prompt
large_stringlengths 70
991k
| completion
large_stringlengths 0
1.02k
|
---|---|
<|file_name|>globalfunc.js<|end_file_name|><|fim▁begin|>var content = '<abcdテスト>';
var result = '';
result = encodeURI(content);<|fim▁hole|>result = encodeURIComponent(content);
console.log("encodeURIComponent('abcdテスト')", result);
console.log("decodeURIComponent(encodeURIComponent('abcdテスト'))", decodeURIComponent(encodeURIComponent(content)));
result = 'var a=1,b=2;console.log(a+b);';
eval(result);
console.log("isFinite('aa')", isFinite('aa'));
console.log("isFinite('1111')", isFinite('1111'));
console.log("isFinite('1111.23')", isFinite('1111.23'));
console.log("isFinite(null)", isFinite(null));
console.log("isFinite(undefined)", isFinite(undefined));
console.log("isFinite(NaN)", isFinite(NaN));
console.log("isFinite(true)", isFinite(true));
console.log("isNaN('aa')", isNaN('aa'));
console.log("isNaN('1111')", isNaN('1111'));
console.log("isNaN('1111.23')", isNaN('1111.23'));
console.log("isNaN(null)", isNaN(null));
console.log("isNaN(undefined)", isNaN(undefined));
console.log("isNaN(NaN)", isNaN(NaN));
console.log("isNaN(true)", isNaN(true));
console.log("parseFloat('aa')", parseFloat('aa'));
console.log("parseFloat('1111')", parseFloat('1111'));
console.log("parseFloat('1111.23')", parseFloat('1111.23'));
console.log("parseFloat(null)", parseFloat(null));
console.log("parseFloat(undefined)", parseFloat(undefined));
console.log("parseFloat(NaN)", parseFloat(NaN));
console.log("parseFloat(true)", parseFloat(true));
console.log("parseInt('aa')", parseInt('aa'));
console.log("parseInt('1111')", parseInt('1111'));
console.log("parseInt('1111.73')", parseInt('1111.73'));
console.log("parseInt(null)", parseInt(null));
console.log("parseInt(undefined)", parseInt(undefined));
console.log("parseInt(NaN)", parseInt(NaN));
console.log("parseInt(true)", parseInt(true));<|fim▁end|> | console.log("encodeURI('abcdテスト')", result);
console.log("decodeURI(encodeURI('abcdテスト'))", decodeURI(encodeURI(content)));
|
<|file_name|>BaseRepository.java<|end_file_name|><|fim▁begin|>/*
* Copyright © 2013-2021, The SeedStack authors <http://seedstack.org>
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package org.seedstack.business.domain;
import java.lang.reflect.Type;
import javax.inject.Inject;
import org.seedstack.business.internal.utils.BusinessUtils;
import org.seedstack.business.specification.dsl.SpecificationBuilder;
/**
* An helper base class that can be extended to create an <strong>implementation</strong> of a
* repository interface which, in turn, must extend {@link Repository}.<|fim▁hole|> *
* @param <A> Type of the aggregate root.
* @param <I> Type of the aggregate root identifier.
* @see Repository
* @see org.seedstack.business.util.inmemory.BaseInMemoryRepository
*/
public abstract class BaseRepository<A extends AggregateRoot<I>, I> implements Repository<A, I> {
private static final int AGGREGATE_INDEX = 0;
private static final int KEY_INDEX = 1;
private final Class<A> aggregateRootClass;
private final Class<I> idClass;
@Inject
private SpecificationBuilder specificationBuilder;
/**
* Creates a base domain repository. Actual classes managed by the repository are determined by
* reflection.
*/
@SuppressWarnings("unchecked")
protected BaseRepository() {
Type[] generics = BusinessUtils.resolveGenerics(BaseRepository.class, getClass());
this.aggregateRootClass = (Class<A>) generics[AGGREGATE_INDEX];
this.idClass = (Class<I>) generics[KEY_INDEX];
}
/**
* Creates a base domain repository. Actual classes managed by the repository are specified
* explicitly. This can be used to create a dynamic implementation of a repository.
*
* @param aggregateRootClass the aggregate root class.
* @param idClass the aggregate root identifier class.
*/
protected BaseRepository(Class<A> aggregateRootClass, Class<I> idClass) {
this.aggregateRootClass = aggregateRootClass;
this.idClass = idClass;
}
@Override
public Class<A> getAggregateRootClass() {
return aggregateRootClass;
}
@Override
public Class<I> getIdentifierClass() {
return idClass;
}
@Override
public SpecificationBuilder getSpecificationBuilder() {
return specificationBuilder;
}
}<|fim▁end|> | *
* <p> This class is mainly used as a common base for specialized technology-specific
* implementations. Client code will often extend these more specialized classes instead of this
* one. </p> |
<|file_name|>omp_target_update.cpp<|end_file_name|><|fim▁begin|><|fim▁hole|>
void
TC_OMP_TARGET_UPDATE::finish_type (tree t)
{
cerr << "finish_type: OMP_TARGET_UPDATE" << t << endl;
};
void
TC_OMP_TARGET_UPDATE::finish_decl (tree t)
{
cerr << "finish_decl: OMP_TARGET_UPDATE" << t << endl;
};
void
TC_OMP_TARGET_UPDATE::finish_unit (tree t)
{
cerr << "finish_unit: OMP_TARGET_UPDATE" << t << endl;
};<|fim▁end|> | #include "omp_target_update.hpp"
TC_OMP_TARGET_UPDATE aOMP_TARGET_UPDATE; |
<|file_name|>sync-send-iterators-in-libcore.rs<|end_file_name|><|fim▁begin|>// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// pretty-expanded FIXME #23616
#![feature(collections)]
fn is_sync<T>(_: T) where T: Sync {}
fn is_send<T>(_: T) where T: Send {}
<|fim▁hole|> let mut x = $ctor;
is_sync(x.$iter());
let mut y = $ctor;
is_send(y.$iter());
)+
})
}
fn main() {
// for char.rs
all_sync_send!("Я", escape_default, escape_unicode);
// for iter.rs
// FIXME
// for option.rs
// FIXME
// for result.rs
// FIXME
// for slice.rs
// FIXME
// for str/mod.rs
// FIXME
}<|fim▁end|> | macro_rules! all_sync_send {
($ctor:expr, $($iter:ident),+) => ({
$( |
<|file_name|>poes.py<|end_file_name|><|fim▁begin|>"""
.. module:: poes
:synopsis: A module for reading, writing, and storing poes Data
.. moduleauthor:: AJ, 20130129
*********************
**Module**: gme.sat.poes
*********************
**Classes**:
* :class:`poesRec`
**Functions**:
* :func:`readPoes`
* :func:`readPoesFtp`
* :func:`mapPoesMongo`
* :func:`overlayPoesTed`
"""
from davitpy.gme.base.gmeBase import gmeData
class poesRec(gmeData):
"""a class to represent a record of poes data. Extends :class:`gmeBase.gmeData`. Insight on the class members can be obtained from `the NOAA NGDC site <ftp://satdat.ngdc.noaa.gov/sem/poes/data/readme.txt>`_. Note that Poes data is available from 1998-present day (or whatever the latest NOAA has uploaded is). **The data are the 16-second averages**
**Members**:
* **time** (`datetime <http://tinyurl.com/bl352yx>`_): an object identifying which time these data are for
* **info** (str): information about where the data come from. *Please be courteous and give credit to data providers when credit is due.*
* **dataSet** (str): the name of the data set
* **satnum** (ind): the noaa satellite number
* **sslat** (float): Geographic Latitude of sub-satellite point, degrees
* **sslon** (float): Geographic Longitude of sub-satellite point, degrees
* **folat** (float): Geographic Latitude of foot-of-field-line, degrees
* **folon** (float): Geographic Longitude of foot-of-field-line, degrees
* **lval** (float): L-value
* **mlt** (float): Magnetic local time of foot-of-field-line, degrees
* **pas0** (float): MEPED-0 pitch angle at satellite, degrees
* **pas90** (float): MEPED-90 pitch angle at satellite, degrees
* **mep0e1** (float): MEPED-0 > 30 keV electrons, counts/sec
* **mep0e2** (float): MEPED-0 > 100 keV electrons, counts/sec
* **mep0e3** (float): MEPED-0 > 300 keV electrons, counts/sec
* **mep0p1** (float):MEPED-0 30 keV to 80 keV protons, counts/sec
* **mep0p2** (float): MEPED-0 80 keV to 240 keV protons, counts/sec
* **mep0p3** (float): 240 kev to 800 keV protons, counts/sec
* **mep0p4** (float): MEPED-0 800 keV to 2500 keV protons, counts/sec
* **mep0p5** (float): MEPED-0 2500 keV to 6900 keV protons, counts/sec
* **mep0p6** (float): MEPED-0 > 6900 keV protons, counts/sec,
* **mep90e1** (float): MEPED-90 > 30 keV electrons, counts/sec,
* **mep90e2** (float): MEPED-90 > 100 keV electrons, counts/sec
* **mep90e3** (float): MEPED-90 > 300 keV electrons, counts/sec
* **mep90p1** (float): MEPED-90 30 keV to 80 keV protons, counts/sec
* **mep90p2** (float): MEPED-90 80 keV to 240 keV protons, counts/sec
* **mep90p3** (float): MEPED-90 240 kev to 800 keV protons, counts/sec,
* **mep90p4** (float): MEPED-90 800 keV to 2500 keV protons, counts/sec
* **mep90p5** (float): MEPED-90 2500 keV to 6900 keV protons, counts/sec
* **mep90p6** (float):MEPED-90 > 6900 keV protons, counts/sec
* **mepomp6** (float): MEPED omni-directional > 16 MeV protons, counts/sec
* **mepomp7** (float): MEPED omni-directional > 36 Mev protons, counts/sec
* **mepomp8** (float): MEPED omni-directional > 70 MeV protons, counts/sec
* **mepomp9** (float): MEPED omni-directional >= 140 MeV protons
* **ted** (float): TED, Total Energy Detector Average, ergs/cm2/sec
* **echar** (float): TED characteristic energy of electrons, eV
* **pchar** (float): TED characteristic energy of protons, eV
* **econtr** (float): TED electron contribution, Electron Energy/Total Energy
.. note::
If any of the members have a value of None, this means that they could not be read for that specific time
**Methods**:
* :func:`parseFtp`
**Example**:
::
emptyPoesObj = gme.sat.poesRec()
written by AJ, 20130131
"""
def parseFtp(self,line, header):
"""This method is used to convert a line of poes data read from the NOAA NGDC FTP site into a :class:`poesRec` object.
.. note::
In general, users will not need to worry about this.
**Belongs to**: :class:`poesRec`
**Args**:
* **line** (str): the ASCII line from the FTP server
**Returns**:
* Nothing.
**Example**:
::
myPoesObj.parseFtp(ftpLine)
written by AJ, 20130131
"""
import datetime as dt
#split the line into cols
cols = line.split()
head = header.split()
self.time = dt.datetime(int(cols[0]), int(cols[1]), int(cols[2]), int(cols[3]),int(cols[4]), \
int(float(cols[5])),int(round((float(cols[5])-int(float(cols[5])))*1e6)))
for key in self.__dict__.iterkeys():
if(key == 'dataSet' or key == 'info' or key == 'satnum' or key == 'time'): continue
try: ind = head.index(key)
except Exception,e:
print e
print 'problem setting attribute',key
#check for a good value
if(float(cols[ind]) != -999.): setattr(self,key,float(cols[ind]))
def __init__(self, ftpLine=None, dbDict=None, satnum=None, header=None):
"""the intialization fucntion for a :class:`omniRec` object.
.. note::
In general, users will not need to worry about this.
**Belongs to**: :class:`omniRec`
**Args**:
* [**ftpLine**] (str): an ASCII line from the FTP server. if this is provided, the object is initialized from it. header must be provided in conjunction with this. default=None
* [**header**] (str): the header from the ASCII FTP file. default=None
* [**dbDict**] (dict): a dictionary read from the mongodb. if this is provided, the object is initialized from it. default = None
* [**satnum**] (int): the satellite nuber. default=None
**Returns**:
* Nothing.
**Example**:
::
myPoesObj = poesRec(ftpLine=aftpLine)
written by AJ, 20130131
"""
#note about where data came from
self.dataSet = 'Poes'
self.info = 'These data were downloaded from NASA SPDF. *Please be courteous and give credit to data providers when credit is due.*'
self.satnum = satnum
self.sslat = None
self.sslon = None
self.folat = None
self.folon = None
self.lval = None
self.mlt = None
self.pas0 = None
self.pas90 = None
self.mep0e1 = None
self.mep0e2 = None
self.mep0e3 = None
self.mep0p1 = None
self.mep0p2 = None
self.mep0p3 = None
self.mep0p4 = None
self.mep0p5 = None
self.mep0p6 = None
self.mep90e1 = None
self.mep90e2 = None
self.mep90e3 = None
self.mep90p1 = None
self.mep90p2 = None
self.mep90p3 = None
self.mep90p4 = None
self.mep90p5 = None
self.mep90p6 = None
self.mepomp6 = None
self.mepomp7 = None
self.mepomp8 = None
self.mepomp9 = None
self.ted = None
self.echar = None
self.pchar = None
self.econtr = None
#if we're initializing from an object, do it!
if(ftpLine != None): self.parseFtp(ftpLine,header)
if(dbDict != None): self.parseDb(dbDict)
def readPoes(sTime,eTime=None,satnum=None,folat=None,folon=None,ted=None,echar=None,pchar=None):
"""This function reads poes data. First, it will try to get it from the mongodb, and if it can't find it, it will look on the NOAA NGDC FTP server using :func:`readPoesFtp`. The data are 16-second averages
**Args**:
* **sTime** (`datetime <http://tinyurl.com/bl352yx>`_ or None): the earliest time you want data for
* [**eTime**] (`datetime <http://tinyurl.com/bl352yx>`_ or None): the latest time you want data for. if this is None, end Time will be 1 day after sTime. default = None
* [**satnum**] (int): the satellite you want data for. eg 17 for noaa17. if this is None, data for all satellites will be returned. default = None
* [**satnum**] (list or None): if this is not None, it must be a 2-element list of numbers, [a,b]. In this case, only data with bx values in the range [a,b] will be returned. default = None
* [**folat**] (list or None): if this is not None, it must be a 2-element list of numbers, [a,b]. In this case, only data with bx values in the range [a,b] will be returned. default = None
* [**folon**] (list or None): if this is not None, it must be a 2-element list of numbers, [a,b]. In this case, only data with bye values in the range [a,b] will be returned. default = None
* [**ted**] (list or None): if this is not None, it must be a 2-element list of numbers, [a,b]. In this case, only data with bze values in the range [a,b] will be returned. default = None
* [**echar**] (list or None): if this is not None, it must be a 2-element list of numbers, [a,b]. In this case, only data with bym values in the range [a,b] will be returned. default = None
* [**pchar**] (list or None): if this is not None, it must be a 2-element list of numbers, [a,b]. In this case, only data with bzm values in the range [a,b] will be returned. default = None
**Returns**:
* **poesList** (list or None): if data is found, a list of :class:`poesRec` objects matching the input parameters is returned. If no data is found, None is returned.
**Example**:
::
import datetime as dt
poesList = gme.sat.readPoes(sTime=dt.datetime(2011,1,1),eTime=dt.datetime(2011,6,1),folat=[60,80])
written by AJ, 20130131
"""
import datetime as dt
import davitpy.pydarn.sdio.dbUtils as db
#check all the inputs for validity
assert(isinstance(sTime,dt.datetime)), \
'error, sTime must be a datetime object'
assert(eTime == None or isinstance(eTime,dt.datetime)), \
'error, eTime must be either None or a datetime object'
assert(satnum == None or isinstance(satnum,int)), 'error, satnum must be an int'
var = locals()
for name in ['folat','folon','ted','echar','pchar']:
assert(var[name] == None or (isinstance(var[name],list) and \
isinstance(var[name][0],(int,float)) and isinstance(var[name][1],(int,float)))), \
'error,'+name+' must None or a list of 2 numbers'
if(eTime == None): eTime = sTime+dt.timedelta(days=1)
qryList = []
#if arguments are provided, query for those
qryList.append({'time':{'$gte':sTime}})
if(eTime != None): qryList.append({'time':{'$lte':eTime}})
if(satnum != None): qryList.append({'satnum':satnum})
var = locals()
for name in ['folat','folon','ted','echar','pchar']:
if(var[name] != None):
qryList.append({name:{'$gte':min(var[name])}})
qryList.append({name:{'$lte':max(var[name])}})
#construct the final query definition
qryDict = {'$and': qryList}
#connect to the database
poesData = db.getDataConn(dbName='gme',collName='poes')
#do the query
if(qryList != []): qry = poesData.find(qryDict)
else: qry = poesData.find()
if(qry.count() > 0):
poesList = []
for rec in qry.sort('time'):
poesList.append(poesRec(dbDict=rec))
print '\nreturning a list with',len(poesList),'records of poes data'
return poesList
#if we didn't find anything on the mongodb
else:
print '\ncould not find requested data in the mongodb'
return None
#print 'we will look on the ftp server, but your conditions will be (mostly) ignored'
##read from ftp server
#poesList = readPoesFtp(sTime, eTime)
#if(poesList != None):
#print '\nreturning a list with',len(poesList),'recs of poes data'
#return poesList
#else:
#print '\n no data found on FTP server, returning None...'
#return None
def readPoesFtp(sTime,eTime=None):
"""This function reads poes data from the NOAA NGDC server via anonymous FTP connection.
.. warning::
You should not use this. Use the general function :func:`readPoes` instead.
**Args**:
* **sTime** (`datetime <http://tinyurl.com/bl352yx>`_): the earliest time you want data for
* [**eTime**] (`datetime <http://tinyurl.com/bl352yx>`_ or None): the latest time you want data for. if this is None, eTime will be equal 1 day after sTime. default = None
**Returns**:
* **poesList** (list or None): if data is found, a list of :class:`poesRec` objects matching the input parameters is returned. If no data is found, None is returned.
**Example**:
::
import datetime as dt
poesList = gme.sat.readpoesFtp(dt.datetime(2011,1,1,1,50),eTime=dt.datetime(2011,1,1,10,0))
written by AJ, 20130128
"""
from ftplib import FTP
import datetime as dt
assert(isinstance(sTime,dt.datetime)),'error, sTime must be datetime'
if(eTime == None): eTime=sTime+dt.timedelta(days=1)
assert(isinstance(eTime,dt.datetime)),'error, eTime must be datetime'
assert(eTime >= sTime), 'error, end time greater than start time'
#connect to the server
try: ftp = FTP('satdat.ngdc.noaa.gov')
except Exception,e:
print e
print 'problem connecting to NOAA server'
return None
#login as anonymous
try: l=ftp.login()
except Exception,e:
print e
print 'problem logging in to NOAA server'
return None
myPoes = []
#get the poes data
myTime = dt.datetime(sTime.year,sTime.month,sTime.day)
while(myTime <= eTime):
#go to the data directory
try: ftp.cwd('/sem/poes/data/avg/txt/'+str(myTime.year))
except Exception,e:
print e
print 'error getting to data directory'
return None
#list directory contents
dirlist = ftp.nlst()
for dire in dirlist:
#check for satellite directory
if(dire.find('noaa') == -1): continue
satnum = dire.replace('noaa','')
#chege to file directory
ftp.cwd('/sem/poes/data/avg/txt/'+str(myTime.year)+'/'+dire)
fname = 'poes_n'+satnum+'_'+myTime.strftime("%Y%m%d")+'.txt'
print 'poes: RETR '+fname
#list to hold the lines
lines = []
#get the data
try: ftp.retrlines('RETR '+fname,lines.append)
except Exception,e:
print e
print 'error retrieving',fname
#convert the ascii lines into a list of poesRec objects
#skip first (header) line
for line in lines[1:]:
cols = line.split()
t = dt.datetime(int(cols[0]), int(cols[1]), int(cols[2]), int(cols[3]),int(cols[4]))
if(sTime <= t <= eTime):
myPoes.append(poesRec(ftpLine=line,satnum=int(satnum),header=lines[0]))
#increment myTime
myTime += dt.timedelta(days=1)
if(len(myPoes) > 0): return myPoes
else: return None
def mapPoesMongo(sYear,eYear=None):
"""This function reads poes data from the NOAA NGDC FTP server via anonymous FTP connection and maps it to the mongodb.
.. warning::
In general, nobody except the database admins will need to use this function
**Args**:
* **sYear** (int): the year to begin mapping data
* [**eYear**] (int or None): the end year for mapping data. if this is None, eYear will be sYear
**Returns**:
* Nothing.
**Example**:
::
gme.sat.mapPoesMongo(2004)
written by AJ, 20130131
"""
import davitpy.pydarn.sdio.dbUtils as db
from davitpy import rcParams
import datetime as dt
#check inputs
assert(isinstance(sYear,int)),'error, sYear must be int'
if(eYear == None): eYear=sYear
assert(isinstance(eYear,int)),'error, sYear must be None or int'
assert(eYear >= sYear), 'error, end year greater than start year'
#get data connection
mongoData = db.getDataConn(username=rcParams['DBWRITEUSER'],password=rcParams['DBWRITEPASS'],\
dbAddress=rcParams['SDDB'],dbName='gme',collName='poes')
#set up all of the indices
mongoData.ensure_index('time')
mongoData.ensure_index('satnum')
mongoData.ensure_index('folat')
mongoData.ensure_index('folon')
mongoData.ensure_index('ted')
mongoData.ensure_index('echar')
mongoData.ensure_index('pchar')
#read the poes data from the FTP server
myTime = dt.datetime(sYear,1,1)
while(myTime < dt.datetime(eYear+1,1,1)):
#10 day at a time, to not fill up RAM
templist = readPoesFtp(myTime,myTime+dt.timedelta(days=10))
if(templist == None): continue
for rec in templist:
#check if a duplicate record exists
qry = mongoData.find({'$and':[{'time':rec.time},{'satnum':rec.satnum}]})
print rec.time, rec.satnum
tempRec = rec.toDbDict()
cnt = qry.count()
#if this is a new record, insert it
if(cnt == 0): mongoData.insert(tempRec)
#if this is an existing record, update it
elif(cnt == 1):
print 'foundone!!'
dbDict = qry.next()
temp = dbDict['_id']
dbDict = tempRec
dbDict['_id'] = temp
mongoData.save(dbDict)
else:
print 'strange, there is more than 1 record for',rec.time
del templist
myTime += dt.timedelta(days=10)
def overlayPoesTed( baseMapObj, axisHandle, startTime, endTime = None, coords = 'geo', \
hemi = 1, folat = [45., 90.], satNum = None, param='ted', scMin=-3.,scMax=0.5) :
"""This function overlays POES TED data onto a map object.
**Args**:
* **baseMapObj** (`datetime <http://tinyurl.com/bl352yx>`_ or None): the map object you want data to be overlayed on.
* **axisHandle** (`datetime <http://tinyurl.com/bl352yx>`_ or None): the Axis Handle used.
* **startTime** (`datetime <http://tinyurl.com/bl352yx>`_ or None): the starttime you want data for. If endTime is not given overlays data from satellites with in +/- 45 min of the startTime
* [**endTime**] (`datetime <http://tinyurl.com/bl352yx>`_ or None): the latest time you want data for. if this is None, data from satellites with in +/- 45 min of the startTime is overlayed. default = None
* [**satnum**] (int): the satellite you want data for. eg 17 for noaa17. if this is None, data for all satellites will be returned. default = None
* [**coords**] (str): Coordinates of the map object on which you want data to be overlayed on, 'geo', 'mag', 'mlt'. Default 'geo'
* [**hemi**] (list or None): Hemisphere of the map object on which you want data to be overlayed on. Value is 1 for northern hemisphere and -1 for the southern hemisphere.Default 1
[**folat**] (list or None): if this is not None, it must be a 2-element list of numbers, [a,b]. In this case, only data with latitude values in the range [a,b] will be returned. default = None
* [**param**] (str): the name of the poes parameter to be plotted. default='ted'
**Returns**:
POES TED data is overlayed on the map object. If no data is found, None is returned.
**Example**:
::
import datetime as dt
poesList = gme.sat.overlayPoesTed(MapObj, sTime=dt.datetime(2011,3,4,4))
written by Bharat Kunduri, 20130216
"""
import utils
import matplotlib as mp
import datetime
import numpy
import matplotlib.pyplot as plt
import gme.sat.poes as Poes
import math
import models
import matplotlib.cm as cm
from scipy import optimize
#check all the inputs for validity
assert(isinstance(startTime,datetime.datetime)), \
'error, sTime must be a datetime object'
assert(endTime == None or isinstance(endTime,datetime.datetime)), \
'error, eTime must be either None or a datetime object'
var = locals()
assert(var['satNum'] == None or (isinstance(var['satNum'],list) )), \
'error, satNum must None or a list of satellite (integer) numbers'
if satNum != None :
assert( len(satNum) <= 5 ), \
'error, there are only 5 POES satellites in operation (atleast when I wrote this code)'
assert(var['folat'] == None or (isinstance(var['folat'],list) and \
isinstance(var['folat'][0],(int,float)) and isinstance(var['folat'][1],(int,float)))), \
'error, folat must None or a list of 2 numbers'
# Check the hemisphere and get the appropriate folat
folat = [ math.fabs( folat[0] ) * hemi, math.fabs( folat[1] ) * hemi ]
# Check if the endTime is given in which case the user wants a specific time interval to search for
# If not we'll give him the best available passes for the selected start time...
if ( endTime != None ) :
timeRange = numpy.array( [ startTime, endTime ] )
else :
timeRange = None
pltTimeInterval = numpy.array( datetime.timedelta( minutes = 45 ) )
# check if the timeRange is set... if not set the timeRange to +/- pltTimeInterval of the startTime
if timeRange == None:
timeRange = numpy.array( [ startTime - pltTimeInterval, startTime + pltTimeInterval ] )
# SatNums - currently operational POES satellites are 15, 16, 17, 18, 19
if satNum == None:
satNum = [None]
# If any particular satellite number is not chosen by user loop through all the available one's
satNum = numpy.array( satNum ) # I like numpy arrays better that's why I'm converting the satNum list to a numpy array
latPoesAll = [[] for j in range(len(satNum))]
lonPoesAll = [[] for j in range(len(satNum))]
tedPoesAll = [[] for j in range(len(satNum))]
timePoesAll = [[] for j in range(len(satNum))]
lenDataAll = [[] for j in range(len(satNum))]
goodFlg=False
for sN in range(len(satNum)) :
if(satNum[sN] != None):
currPoesList = Poes.readPoes(timeRange[0], eTime = timeRange[1], satnum = int(satNum[sN]), folat = folat)
else:
currPoesList = Poes.readPoes(timeRange[0], eTime = timeRange[1], satnum = satNum[sN], folat = folat)
# Check if the data is loaded...
if currPoesList == None :
print 'No data found'
continue
#return None
else:
goodFlg=True
# Loop through the list and store the data into arrays
lenDataAll.append(len(currPoesList))
for l in currPoesList :
# Store our data in arrays
try:
tedPoesAll[sN].append(math.log10(getattr(l,param)))
if coords == 'mag' or coords == 'mlt':
lat,lon,_ = models.aacgm.aacgmConv(l.folat,l.folon, 0., l.time.year, 0)
latPoesAll[sN].append(lat)
if coords == 'mag':
lonPoesAll[sN].append(lon)
else:
lonPoesAll[sN].append(models.aacgm.mltFromEpoch(utils.timeUtils.datetimeToEpoch(l.time),lon)*360./24.)
else:
latPoesAll[sN].append(l.folat)
lonPoesAll[sN].append(l.folon)
timePoesAll[sN].append(l.time)
except Exception,e:
print e
print 'could not get parameter for time',l.time
if(not goodFlg): return None
latPoesAll = numpy.array( latPoesAll )
lonPoesAll = numpy.array( lonPoesAll )
tedPoesAll = numpy.array( tedPoesAll )
timePoesAll = numpy.array( timePoesAll )
lenDataAll = numpy.array( lenDataAll )
poesTicks = [ -3.0, -2.5, -2.0, -1.5, -1.0, -0.5, 0.0, 0.5 ]
# get the axis of the figure...
ax = axisHandle
for nn in range( len(satNum) ) :
x, y = baseMapObj(lonPoesAll[nn], latPoesAll[nn])
bpltpoes = baseMapObj.scatter(x,y,c=tedPoesAll[nn], vmin=scMin, vmax=scMax, alpha = 0.7, cmap=cm.jet, zorder = 7., edgecolor='none')
timeCurr = timePoesAll[nn]
for aa in range( len(latPoesAll[nn]) ) :
if aa % 10 == 0:
str_curr = str(timeCurr[aa].hour)+':'+str(timeCurr[aa].minute)
ax.annotate( str_curr, xy =( x[aa], y[aa] ), size = 5, zorder = 6. )
#cbar = plt.colorbar(bpltpoes, ticks = poesTicks, orientation='horizontal')
#cbar.ax.set_xticklabels(poesTicks)
#cbar.set_label(r"Total Log Energy Flux [ergs cm$^{-2}$ s$^{-1}$]")
return bpltpoes
def overlayPoesBnd( baseMapObj, axisHandle, startTime, coords = 'geo', hemi = 1, equBnd = True, polBnd = False ) :
"""This function reads POES TED data with in +/- 45min of the given time, fits the auroral oval boundaries and overlays them on a map object. The poleward boundary is not accurate all the times due to lesser number of satellite passes identifying it.
**Args**:
* **baseMapObj** (`datetime <http://tinyurl.com/bl352yx>`_ or None): the map object you want data to be overlayed on.
* **axisHandle** (`datetime <http://tinyurl.com/bl352yx>`_ or None): the Axis Handle used.
* **startTime** (`datetime <http://tinyurl.com/bl352yx>`_ or None): the starttime you want data for. If endTime is not given overlays data from satellites with in +/- 45 min of the startTime
* [**coords**] (list or None): Coordinates of the map object on which you want data to be overlayed on. Default 'geo'
* [**hemi**] (list or None): Hemisphere of the map object on which you want data to be overlayed on. Value is 1 for northern hemisphere and -1 for the southern hemisphere.Default 1
* [**equBnd**] (list or None): If this is True the equatorward auroral oval boundary fit from the TED data is overlayed on the map object. Default True
* [**polBnd**] (list or None): If this is True the poleward auroral oval boundary fit from the TED data is overlayed on the map object. Default False
**Returns**:
POES TED data is overlayed on the map object. If no data is found, None is returned.
**Example**:
::
import datetime as dt
poesList = gme.sat.overlayPoesTed(MapObj, sTime=dt.datetime(2011,3,4,4))
written by Bharat Kunduri, 20130216
"""
import utils
import matplotlib as mp
import datetime
import numpy
import matplotlib.pyplot as plt
import gme.sat.poes as Poes
import math
import matplotlib.cm as cm
from scipy import optimize
import models
#check all the inputs for validity
assert(isinstance(startTime,datetime.datetime)), \
'error, sTime must be a datetime object'
# Check the hemisphere and get the appropriate folat
folat = [ 45. * hemi, 90. * hemi ]
# Get the time range we choose +/- 45 minutes....
pltTimeInterval = numpy.array( datetime.timedelta( minutes = 45 ) )
timeRange = numpy.array( [ startTime - pltTimeInterval, startTime + pltTimeInterval ] )
satNum = [ 15, 16, 17, 18, 19 ]
# We set the TED cut-off value to -0.75,
# From observed cases this appeared to do well...
# though fails sometimes especially during geomagnetically quiet times...
# However this is version 1.0 and there always is room for improvement
equBndCutoffVal = -0.75
# If any particular satellite number is not chosen by user loop through all the available one's
satNum = numpy.array( satNum ) # I like numpy arrays better that's why I'm converting the satNum list to a numpy array
latPoesAll = [[] for j in range(len(satNum))]
lonPoesAll = [[] for j in range(len(satNum))]
tedPoesAll = [[] for j in range(len(satNum))]
timePoesAll = [[] for j in range(len(satNum))]
lenDataAll = [[] for j in range(len(satNum))]
for sN in range( len(satNum) ) :
currPoesList = Poes.readPoes( timeRange[0], eTime = timeRange[1], satnum = int(satNum[sN]), folat = folat )
# Check if the data is loaded...
if currPoesList == None :
print 'No data found'
continue
# Loop through the list and store the data into arrays
lenDataAll.append( len( currPoesList ) )
for l in range( lenDataAll[-1] ) :
# Store our data in arrays if the TED data value is > than the cutoff value
try:
x = math.log10(currPoesList[l].ted)
except:
continue
if x > equBndCutoffVal:
if coords == 'mag' or coords == 'mlt':
lat,lon,_ = models.aacgm.aacgmConv(currPoesList[l].folat,currPoesList[l].folon, 0., currPoesList[l].time.year, 0)
latPoesAll[sN].append(lat)
if coords == 'mag':
lonPoesAll[sN].append(lon)
else:
lonPoesAll[sN].append(models.aacgm.mltFromEpoch(utils.timeUtils.datetimeToEpoch(currPoesList[l].time),lon)*360./24.)
else:
latPoesAll[sN].append(currPoesList[l].folat)
lonPoesAll[sN].append(currPoesList[l].folon)
# latPoesAll[sN].append( currPoesList[l].folat )
# lonPoesAll[sN].append( currPoesList[l].folon )
tedPoesAll[sN].append( math.log10(currPoesList[l].ted) )
timePoesAll[sN].append( currPoesList[l].time )
latPoesAll = numpy.array( latPoesAll )
lonPoesAll = numpy.array( lonPoesAll )
tedPoesAll = numpy.array( tedPoesAll )
timePoesAll = numpy.array( timePoesAll )
lenDataAll = numpy.array( lenDataAll )
# Now to identify the boundaries...
# Also need to check if the boundary is equatorward or poleward..
# When satellite is moving from high-lat to low-lat decrease in flux would mean equatorward boundary
# When satellite is moving from low-lat to high-lat increase in flux would mean equatorward boundary
# that is what we are trying to check here
eqBndLats = []
eqBndLons = []
poBndLats = []
poBndLons = []
for n1 in range( len(satNum) ) :
currSatLats = latPoesAll[n1]
currSatLons = lonPoesAll[n1]
currSatTeds = tedPoesAll[n1]
testLatArrLtoh = []
testLonArrLtoh = []
testLatArrHtol = []
testLonArrHtol = []
testLatArrLtohP = []
testLonArrLtohP = []
testLatArrHtolP = []
testLonArrHtolP = []
for n2 in range( len(currSatLats)-1 ) :
#Check if the satellite is moving form low-lat to high-lat or otherwise
if ( math.fabs( currSatLats[n2] ) < math.fabs( currSatLats[n2+1] ) ) :
if ( currSatTeds[n2] < currSatTeds[n2+1] ) :
testLatArrLtoh.append( currSatLats[n2] )
testLonArrLtoh.append( currSatLons[n2] )
if ( currSatTeds[n2] > currSatTeds[n2+1] ) :
testLatArrLtohP.append( currSatLats[n2] )
testLonArrLtohP.append( currSatLons[n2] )
if ( math.fabs( currSatLats[n2] ) > math.fabs( currSatLats[n2+1] ) ) :
if ( currSatTeds[n2] > currSatTeds[n2+1] ) :
testLatArrHtol.append( currSatLats[n2] )
testLonArrHtol.append( currSatLons[n2] )
if ( currSatTeds[n2] < currSatTeds[n2+1] ) :
testLatArrHtolP.append( currSatLats[n2] )
testLonArrHtolP.append( currSatLons[n2] )
# I do this to find the index of the min lat...
if ( testLatArrLtoh != [] ) :
testLatArrLtoh = numpy.array( testLatArrLtoh )
testLonArrLtoh = numpy.array( testLonArrLtoh )
VarEqLat1 = testLatArrLtoh[ numpy.where( testLatArrLtoh == min(testLatArrLtoh) ) ]
VarEqLon1 = testLonArrLtoh[ numpy.where( testLatArrLtoh == min(testLatArrLtoh) ) ]
eqBndLats.append( VarEqLat1[0] )
eqBndLons.append( VarEqLon1[0] )
if ( testLatArrHtol != [] ) :
testLatArrHtol = numpy.array( testLatArrHtol )
testLonArrHtol = numpy.array( testLonArrHtol )
VarEqLat2 = testLatArrHtol[ numpy.where( testLatArrHtol == min(testLatArrHtol) ) ]
VarEqLon2 = testLonArrHtol[ numpy.where( testLatArrHtol == min(testLatArrHtol) ) ]
eqBndLats.append( VarEqLat2[0] )
eqBndLons.append( VarEqLon2[0] )
<|fim▁hole|> if ( testLatArrLtohP != [] ) :
testLatArrLtohP = numpy.array( testLatArrLtohP )
testLonArrLtohP = numpy.array( testLonArrLtohP )
VarEqLatP1 = testLatArrLtohP[ numpy.where( testLatArrLtohP == min(testLatArrLtohP) ) ]
VarEqLonP1 = testLonArrLtohP[ numpy.where( testLatArrLtohP == min(testLatArrLtohP) ) ]
if VarEqLatP1[0] > 64. :
poBndLats.append( VarEqLatP1[0] )
poBndLons.append( VarEqLonP1[0] )
if ( testLatArrHtolP != [] ) :
testLatArrHtolP = numpy.array( testLatArrHtolP )
testLonArrHtolP = numpy.array( testLonArrHtolP )
VarEqLatP2 = testLatArrHtolP[ numpy.where( testLatArrHtolP == min(testLatArrHtolP) ) ]
VarEqLonP2 = testLonArrHtolP[ numpy.where( testLatArrHtolP == min(testLatArrHtolP) ) ]
if VarEqLatP2[0] > 64 :
poBndLats.append( VarEqLatP2[0] )
poBndLons.append( VarEqLonP2[0] )
eqBndLats = numpy.array( eqBndLats )
eqBndLons = numpy.array( eqBndLons )
poBndLats = numpy.array( poBndLats )
poBndLons = numpy.array( poBndLons )
#get the axis Handle used
ax = axisHandle
# Now we do the fitting part...
fitfunc = lambda p, x: p[0] + p[1]*numpy.cos(2*math.pi*(x/360.)+p[2]) # Target function
errfunc = lambda p, x, y: fitfunc(p, x) - y # Distance to the target function
# Initial guess for the parameters
# Equatorward boundary
p0Equ = [ 1., 1., 1.]
p1Equ, successEqu = optimize.leastsq(errfunc, p0Equ[:], args=(eqBndLons, eqBndLats))
if polBnd == True :
p0Pol = [ 1., 1., 1.]
p1Pol, successPol = optimize.leastsq(errfunc, p0Pol[:], args=(poBndLons, poBndLats))
allPlotLons = numpy.linspace(0., 360., 25.)
allPlotLons[-1] = 0.
eqPlotLats = []
if polBnd == True :
poPlotLats = []
for xx in allPlotLons :
if equBnd == True :
eqPlotLats.append( p1Equ[0] + p1Equ[1]*numpy.cos(2*math.pi*(xx/360.)+p1Equ[2] ) )
if polBnd == True :
poPlotLats.append( p1Pol[0] + p1Pol[1]*numpy.cos(2*math.pi*(xx/360.)+p1Pol[2] ) )
xEqu, yEqu = baseMapObj(allPlotLons, eqPlotLats)
bpltpoes = baseMapObj.plot( xEqu,yEqu, zorder = 7., color = 'b' )
if polBnd == True :
xPol, yPol = baseMapObj(allPlotLons, poPlotLats)
bpltpoes = baseMapObj.plot( xPol,yPol, zorder = 7., color = 'r' )<|fim▁end|> | |
<|file_name|>users.js<|end_file_name|><|fim▁begin|>'use strict';<|fim▁hole|>
module.exports = function(app, passport) {
app.route('/logout')
.get(users.signout);
app.route('/users/me')
.get(users.me);
// Setting up the users api
app.route('/register')
.post(users.create);
// Setting up the userId param
app.param('userId', users.user);
// AngularJS route to check for authentication
app.route('/loggedin')
.get(function(req, res) {
res.send(req.isAuthenticated() ? req.user : '0');
});
// Setting the local strategy route
app.route('/login')
.post(passport.authenticate('local', {
failureFlash: true
}), function(req, res) {
res.send({
user: req.user,
redirect: (req.user.roles.indexOf('admin') !== -1) ? req.get('referer') : false
});
});
};<|fim▁end|> |
// User routes use users controller
var users = require('../controllers/users'); |
<|file_name|>test-framework.js<|end_file_name|><|fim▁begin|>'use strict';
var path = require('path');
var helpers = require('yeoman-generator').test;
var assert = require('yeoman-assert');
describe('test framework', function () {
describe('mocha', function () {
before(function (done) {
helpers.run(path.join(__dirname, '../app'))
.inDir(path.join(__dirname, '.tmp'))
.withOptions({
'skip-install': true,
'test-framework': 'mocha'
})
.withPrompts({features: []})
.on('end', done);<|fim▁hole|>
it('adds the Grunt plugin', function () {
assert.fileContent('package.json', '"grunt-mocha"');
});
it('adds the Grunt task', function () {
assert.fileContent('Gruntfile.js', 'mocha');
});
it('uses the ESLint environment', function () {
assert.fileContent('package.json', '"mocha"');
});
});
describe('jasmine', function () {
before(function (done) {
helpers.run(path.join(__dirname, '../app'))
.inDir(path.join(__dirname, '.tmp'))
.withOptions({
'skip-install': true,
'test-framework': 'jasmine'
})
.withPrompts({features: []})
.on('end', done);
});
it('adds the Grunt plugin', function () {
assert.fileContent('package.json', '"grunt-contrib-jasmine"');
});
it('adds the Grunt task', function () {
assert.fileContent('Gruntfile.js', 'jasmine');
});
it('uses the ESLint environment', function () {
assert.fileContent('package.json', '"jasmine"');
});
});
});<|fim▁end|> | }); |
<|file_name|>build-config.js<|end_file_name|><|fim▁begin|>/**
* @license Copyright (c) 2003-2014, CKSource - Frederico Knabben. All rights reserved.
* For licensing, see LICENSE.md or http://ckeditor.com/license
*/
/**
* This file was added automatically by CKEditor builder.
* You may re-use it at any time to build CKEditor again.
*
* If you would like to build CKEditor online again
* (for example to upgrade), visit one the following links:
*
* (1) http://ckeditor.com/builder
* Visit online builder to build CKEditor from scratch.
*
* (2) http://ckeditor.com/builder/e41bccb8290b6d530f8478ddafe95c48
* Visit online builder to build CKEditor, starting with the same setup as before.
*
* (3) http://ckeditor.com/builder/download/e41bccb8290b6d530f8478ddafe95c48
* Straight download link to the latest version of CKEditor (Optimized) with the same setup as before.
*
* NOTE:
* This file is not used by CKEditor, you may remove it.
* Changing this file will not change your CKEditor configuration.
*/
var CKBUILDER_CONFIG = {
skin: 'moono',
preset: 'standard',
ignore: [
'dev',
'.gitignore',
'.gitattributes',
'README.md',
'.mailmap'
],
plugins : {<|fim▁hole|> 'clipboard' : 1,
'contextmenu' : 1,
'elementspath' : 1,
'enterkey' : 1,
'entities' : 1,
'filebrowser' : 1,
'floatingspace' : 1,
'format' : 1,
'horizontalrule' : 1,
'htmlwriter' : 1,
'image' : 1,
'indentlist' : 1,
'link' : 1,
'list' : 1,
'magicline' : 1,
'maximize' : 1,
'pastefromword' : 1,
'pastetext' : 1,
'removeformat' : 1,
'resize' : 1,
'scayt' : 1,
'showborders' : 1,
'sourcearea' : 1,
'specialchar' : 1,
'stylescombo' : 1,
'tab' : 1,
'table' : 1,
'tabletools' : 1,
'toolbar' : 1,
'undo' : 1,
'wsc' : 1,
'wysiwygarea' : 1
},
languages : {
'af' : 1,
'ar' : 1,
'bg' : 1,
'bn' : 1,
'bs' : 1,
'ca' : 1,
'cs' : 1,
'cy' : 1,
'da' : 1,
'de' : 1,
'el' : 1,
'en' : 1,
'en-au' : 1,
'en-ca' : 1,
'en-gb' : 1,
'eo' : 1,
'es' : 1,
'et' : 1,
'eu' : 1,
'fa' : 1,
'fi' : 1,
'fo' : 1,
'fr' : 1,
'fr-ca' : 1,
'gl' : 1,
'gu' : 1,
'he' : 1,
'hi' : 1,
'hr' : 1,
'hu' : 1,
'id' : 1,
'is' : 1,
'it' : 1,
'ja' : 1,
'ka' : 1,
'km' : 1,
'ko' : 1,
'ku' : 1,
'lt' : 1,
'lv' : 1,
'mk' : 1,
'mn' : 1,
'ms' : 1,
'nb' : 1,
'nl' : 1,
'no' : 1,
'pl' : 1,
'pt' : 1,
'pt-br' : 1,
'ro' : 1,
'ru' : 1,
'si' : 1,
'sk' : 1,
'sl' : 1,
'sq' : 1,
'sr' : 1,
'sr-latn' : 1,
'sv' : 1,
'th' : 1,
'tr' : 1,
'tt' : 1,
'ug' : 1,
'uk' : 1,
'vi' : 1,
'zh' : 1,
'zh-cn' : 1
}
};<|fim▁end|> | 'a11yhelp' : 1,
'about' : 1,
'basicstyles' : 1,
'blockquote' : 1, |
<|file_name|>copy-asset.js<|end_file_name|><|fim▁begin|>$(document).ready(function() {
$('#btn-create-version').on('click', function(e) {
e.preventDefault();
var newVersion = $('#new-version').val();
if (newVersion) {
var assetId = $('#asset-id').val();
var assetType = $('#asset-type').val();
var path = caramel.url('/apis/asset/' + assetId + '/create-version?type=' + assetType);
var assetPath = caramel.url('/assets/' + assetType + '/details/');
$('#btn-create-version').addClass('disabled');
$('#new-version-loading').removeClass('hide');
var alertMessage = $("#alertSection");
$.ajax({
url: path,
data: JSON.stringify({
"attributes": {<|fim▁hole|> "overview_version": newVersion
}
}),
type: 'POST',
success: function(response) {
messages.alertSuccess('Asset version created successfully!,You will be redirected to new asset details page in few seconds.....');
setTimeout(function() {
var path = caramel.url('assets/' + assetType + '/details/' + response.data);
window.location = path;
}, 3000);
},
error: function(error) {
var errorText = JSON.parse(error.responseText).error;
messages.alertError(errorText);
$('#btn-create-version').removeClass('disabled');
$('#new-version-loading').addClass('hide');
}
});
}
});
$('#btn-cancel-version').on('click', function(e) {
var assetId = $('#asset-id').val();
var assetType = $('#asset-type').val();
var path = caramel.url('/assets/' + assetType + '/details/' + assetId);
$.ajax({
success: function(response) {
window.location = path;
}
});
});
});<|fim▁end|> | |
<|file_name|>metadata_test.go<|end_file_name|><|fim▁begin|>package bimg
import (
"io/ioutil"
"os"
"path"
"testing"
)
func TestSize(t *testing.T) {
files := []struct {
name string
width int
height int
}{
{"test.jpg", 1680, 1050},
{"test.png", 400, 300},
{"test.webp", 550, 368},
}
for _, file := range files {
size, err := Size(readFile(file.name))
if err != nil {
t.Fatalf("Cannot read the image: %#v", err)
}
if size.Width != file.width || size.Height != file.height {
t.Fatalf("Unexpected image size: %dx%d", size.Width, size.Height)
}
}
}
func TestMetadata(t *testing.T) {
files := []struct {
name string
format string
orientation int
alpha bool
profile bool
space string
}{
{"test.jpg", "jpeg", 0, false, false, "srgb"},
{"test_icc_prophoto.jpg", "jpeg", 0, false, true, "srgb"},
{"test.png", "png", 0, true, false, "srgb"},
{"test.webp", "webp", 0, false, false, "srgb"},
{"test.avif", "avif", 0, false, false, "srgb"},
}
for _, file := range files {
metadata, err := Metadata(readFile(file.name))
if err != nil {
t.Fatalf("Cannot read the image: %s -> %s", file.name, err)
}
if metadata.Type != file.format {
t.Fatalf("Unexpected image format: %s", file.format)
}
if metadata.Orientation != file.orientation {
t.Fatalf("Unexpected image orientation: %d != %d", metadata.Orientation, file.orientation)
}
if metadata.Alpha != file.alpha {
t.Fatalf("Unexpected image alpha: %t != %t", metadata.Alpha, file.alpha)
}
if metadata.Profile != file.profile {
t.Fatalf("Unexpected image profile: %t != %t", metadata.Profile, file.profile)
}
if metadata.Space != file.space {
t.Fatalf("Unexpected image profile: %t != %t", metadata.Profile, file.profile)
}
}
}
func TestImageInterpretation(t *testing.T) {
files := []struct {
name string
interpretation Interpretation
}{
{"test.jpg", InterpretationSRGB},
{"test.png", InterpretationSRGB},
{"test.webp", InterpretationSRGB},
}
for _, file := range files {
interpretation, err := ImageInterpretation(readFile(file.name))
if err != nil {
t.Fatalf("Cannot read the image: %s -> %s", file.name, err)
}
if interpretation != file.interpretation {
t.Fatalf("Unexpected image interpretation")
}
}
}
func TestEXIF(t *testing.T) {
if VipsMajorVersion <= 8 && VipsMinorVersion < 10 {
t.Skip("Skip test in libvips < 8.10")
return
}
files := map[string]EXIF{
"test.jpg": {},
"exif/Landscape_1.jpg": {
Orientation: 1,
XResolution: "72/1",
YResolution: "72/1",
ResolutionUnit: 2,
YCbCrPositioning: 1,
ExifVersion: "Exif Version 2.1",
ColorSpace: 65535,
},
"test_exif.jpg": {
Make: "Jolla",
Model: "Jolla",
XResolution: "72/1",
YResolution: "72/1",
ResolutionUnit: 2,
Orientation: 1,
Datetime: "2014:09:21 16:00:56",
ExposureTime: "1/25",
FNumber: "12/5",
ISOSpeedRatings: 320,
ExifVersion: "Exif Version 2.3",
DateTimeOriginal: "2014:09:21 16:00:56",
ShutterSpeedValue: "205447286/44240665",
ApertureValue: "334328577/132351334",
ExposureBiasValue: "0/1",
MeteringMode: 1,
Flash: 0,
FocalLength: "4/1",
WhiteBalance: 1,
ColorSpace: 65535,
},
"test_exif_canon.jpg": {
Make: "Canon",
Model: "Canon EOS 40D",
Orientation: 1,
XResolution: "72/1",
YResolution: "72/1",
ResolutionUnit: 2,
Software: "GIMP 2.4.5",
Datetime: "2008:07:31 10:38:11",
YCbCrPositioning: 2,
Compression: 6,
ExposureTime: "1/160",
FNumber: "71/10",
ExposureProgram: 1,
ISOSpeedRatings: 100,
ExifVersion: "Exif Version 2.21",
DateTimeOriginal: "2008:05:30 15:56:01",
DateTimeDigitized: "2008:05:30 15:56:01",
ComponentsConfiguration: "Y Cb Cr -",
ShutterSpeedValue: "483328/65536",
ApertureValue: "368640/65536",
ExposureBiasValue: "0/1",
MeteringMode: 5,
Flash: 9,
FocalLength: "135/1",
SubSecTimeOriginal: "00",
SubSecTimeDigitized: "00",
ColorSpace: 1,
PixelXDimension: 100,
PixelYDimension: 68,
ExposureMode: 1,
WhiteBalance: 0,
SceneCaptureType: 0,
},
"test_exif_full.jpg": {
Make: "Apple",
Model: "iPhone XS",
Orientation: 6,
XResolution: "72/1",
YResolution: "72/1",
ResolutionUnit: 2,
Software: "13.3.1",
Datetime: "2020:07:28 19:18:49",
YCbCrPositioning: 1,
Compression: 6,
ExposureTime: "1/835",
FNumber: "9/5",
ExposureProgram: 2,
ISOSpeedRatings: 25,
ExifVersion: "Unknown Exif Version",
DateTimeOriginal: "2020:07:28 19:18:49",
DateTimeDigitized: "2020:07:28 19:18:49",
ComponentsConfiguration: "Y Cb Cr -",
ShutterSpeedValue: "77515/7986",
ApertureValue: "54823/32325",
BrightnessValue: "77160/8623",
ExposureBiasValue: "0/1",
MeteringMode: 5,
Flash: 16,
FocalLength: "17/4",
SubjectArea: "2013 1511 2217 1330",
MakerNote: "1110 bytes undefined data",
SubSecTimeOriginal: "777",
SubSecTimeDigitized: "777",
ColorSpace: 65535,
PixelXDimension: 4032,
PixelYDimension: 3024,
SensingMethod: 2,
SceneType: "Directly photographed",
ExposureMode: 0,
WhiteBalance: 0,
FocalLengthIn35mmFilm: 26,
SceneCaptureType: 0,
GPSLatitudeRef: "N",
GPSLatitude: "55/1 43/1 5287/100",
GPSLongitudeRef: "E",
GPSLongitude: "37/1 35/1 5571/100",
GPSAltitudeRef: "Sea level",
GPSAltitude: "90514/693",
GPSSpeedRef: "K",
GPSSpeed: "114272/41081",
GPSImgDirectionRef: "M",
GPSImgDirection: "192127/921",
GPSDestBearingRef: "M",
GPSDestBearing: "192127/921",
GPSDateStamp: "2020:07:28",
},
}
for name, file := range files {
metadata, err := Metadata(readFile(name))
if err != nil {
t.Fatalf("Cannot read the image: %s -> %s", name, err)
}
if metadata.EXIF.Make != file.Make {
t.Fatalf("Unexpected image exif Make: %s != %s", metadata.EXIF.Make, file.Make)
}
if metadata.EXIF.Model != file.Model {
t.Fatalf("Unexpected image exif Model: %s != %s", metadata.EXIF.Model, file.Model)
}
if metadata.EXIF.Orientation != file.Orientation {
t.Fatalf("Unexpected image exif Orientation: %d != %d", metadata.EXIF.Orientation, file.Orientation)
}
if metadata.EXIF.XResolution != file.XResolution {
t.Fatalf("Unexpected image exif XResolution: %s != %s", metadata.EXIF.XResolution, file.XResolution)
}
if metadata.EXIF.YResolution != file.YResolution {
t.Fatalf("Unexpected image exif YResolution: %s != %s", metadata.EXIF.YResolution, file.YResolution)
}
if metadata.EXIF.ResolutionUnit != file.ResolutionUnit {
t.Fatalf("Unexpected image exif ResolutionUnit: %d != %d", metadata.EXIF.ResolutionUnit, file.ResolutionUnit)
}
if metadata.EXIF.Software != file.Software {
t.Fatalf("Unexpected image exif Software: %s != %s", metadata.EXIF.Software, file.Software)
}
if metadata.EXIF.Datetime != file.Datetime {
t.Fatalf("Unexpected image exif Datetime: %s != %s", metadata.EXIF.Datetime, file.Datetime)
}
if metadata.EXIF.YCbCrPositioning != file.YCbCrPositioning {
t.Fatalf("Unexpected image exif YCbCrPositioning: %d != %d", metadata.EXIF.YCbCrPositioning, file.YCbCrPositioning)
}
if metadata.EXIF.Compression != file.Compression {
t.Fatalf("Unexpected image exif Compression: %d != %d", metadata.EXIF.Compression, file.Compression)
}
if metadata.EXIF.ExposureTime != file.ExposureTime {
t.Fatalf("Unexpected image exif ExposureTime: %s != %s", metadata.EXIF.ExposureTime, file.ExposureTime)
}
if metadata.EXIF.FNumber != file.FNumber {
t.Fatalf("Unexpected image exif FNumber: %s != %s", metadata.EXIF.FNumber, file.FNumber)
}
if metadata.EXIF.ExposureProgram != file.ExposureProgram {
t.Fatalf("Unexpected image exif ExposureProgram: %d != %d", metadata.EXIF.ExposureProgram, file.ExposureProgram)
}
if metadata.EXIF.ISOSpeedRatings != file.ISOSpeedRatings {
t.Fatalf("Unexpected image exif ISOSpeedRatings: %d != %d", metadata.EXIF.ISOSpeedRatings, file.ISOSpeedRatings)
}
if metadata.EXIF.ExifVersion != file.ExifVersion {
t.Fatalf("Unexpected image exif ExifVersion: %s != %s", metadata.EXIF.ExifVersion, file.ExifVersion)
}
if metadata.EXIF.DateTimeOriginal != file.DateTimeOriginal {
t.Fatalf("Unexpected image exif DateTimeOriginal: %s != %s", metadata.EXIF.DateTimeOriginal, file.DateTimeOriginal)
}
if metadata.EXIF.DateTimeDigitized != file.DateTimeDigitized {
t.Fatalf("Unexpected image exif DateTimeDigitized: %s != %s", metadata.EXIF.DateTimeDigitized, file.DateTimeDigitized)
}
if metadata.EXIF.ComponentsConfiguration != file.ComponentsConfiguration {
t.Fatalf("Unexpected image exif ComponentsConfiguration: %s != %s", metadata.EXIF.ComponentsConfiguration, file.ComponentsConfiguration)
}
if metadata.EXIF.ShutterSpeedValue != file.ShutterSpeedValue {
t.Fatalf("Unexpected image exif ShutterSpeedValue: %s != %s", metadata.EXIF.ShutterSpeedValue, file.ShutterSpeedValue)
}<|fim▁hole|> }
if metadata.EXIF.BrightnessValue != file.BrightnessValue {
t.Fatalf("Unexpected image exif BrightnessValue: %s != %s", metadata.EXIF.BrightnessValue, file.BrightnessValue)
}
if metadata.EXIF.ExposureBiasValue != file.ExposureBiasValue {
t.Fatalf("Unexpected image exif ExposureBiasValue: %s != %s", metadata.EXIF.ExposureBiasValue, file.ExposureBiasValue)
}
if metadata.EXIF.MeteringMode != file.MeteringMode {
t.Fatalf("Unexpected image exif MeteringMode: %d != %d", metadata.EXIF.MeteringMode, file.MeteringMode)
}
if metadata.EXIF.Flash != file.Flash {
t.Fatalf("Unexpected image exif Flash: %d != %d", metadata.EXIF.Flash, file.Flash)
}
if metadata.EXIF.FocalLength != file.FocalLength {
t.Fatalf("Unexpected image exif FocalLength: %s != %s", metadata.EXIF.FocalLength, file.FocalLength)
}
if metadata.EXIF.SubjectArea != file.SubjectArea {
t.Fatalf("Unexpected image exif SubjectArea: %s != %s", metadata.EXIF.SubjectArea, file.SubjectArea)
}
if metadata.EXIF.MakerNote != file.MakerNote {
t.Fatalf("Unexpected image exif MakerNote: %s != %s", metadata.EXIF.MakerNote, file.MakerNote)
}
if metadata.EXIF.SubSecTimeOriginal != file.SubSecTimeOriginal {
t.Fatalf("Unexpected image exif SubSecTimeOriginal: %s != %s", metadata.EXIF.SubSecTimeOriginal, file.SubSecTimeOriginal)
}
if metadata.EXIF.SubSecTimeDigitized != file.SubSecTimeDigitized {
t.Fatalf("Unexpected image exif SubSecTimeDigitized: %s != %s", metadata.EXIF.SubSecTimeDigitized, file.SubSecTimeDigitized)
}
if metadata.EXIF.ColorSpace != file.ColorSpace {
t.Fatalf("Unexpected image exif ColorSpace: %d != %d", metadata.EXIF.ColorSpace, file.ColorSpace)
}
if metadata.EXIF.PixelXDimension != file.PixelXDimension {
t.Fatalf("Unexpected image exif PixelXDimension: %d != %d", metadata.EXIF.PixelXDimension, file.PixelXDimension)
}
if metadata.EXIF.PixelYDimension != file.PixelYDimension {
t.Fatalf("Unexpected image exif PixelYDimension: %d != %d", metadata.EXIF.PixelYDimension, file.PixelYDimension)
}
if metadata.EXIF.SensingMethod != file.SensingMethod {
t.Fatalf("Unexpected image exif SensingMethod: %d != %d", metadata.EXIF.SensingMethod, file.SensingMethod)
}
if metadata.EXIF.SceneType != file.SceneType {
t.Fatalf("Unexpected image exif SceneType: %s != %s", metadata.EXIF.SceneType, file.SceneType)
}
if metadata.EXIF.ExposureMode != file.ExposureMode {
t.Fatalf("Unexpected image exif ExposureMode: %d != %d", metadata.EXIF.ExposureMode, file.ExposureMode)
}
if metadata.EXIF.WhiteBalance != file.WhiteBalance {
t.Fatalf("Unexpected image exif WhiteBalance: %d != %d", metadata.EXIF.WhiteBalance, file.WhiteBalance)
}
if metadata.EXIF.FocalLengthIn35mmFilm != file.FocalLengthIn35mmFilm {
t.Fatalf("Unexpected image exif FocalLengthIn35mmFilm: %d != %d", metadata.EXIF.FocalLengthIn35mmFilm, file.FocalLengthIn35mmFilm)
}
if metadata.EXIF.SceneCaptureType != file.SceneCaptureType {
t.Fatalf("Unexpected image exif SceneCaptureType: %d != %d", metadata.EXIF.SceneCaptureType, file.SceneCaptureType)
}
if metadata.EXIF.GPSLongitudeRef != file.GPSLongitudeRef {
t.Fatalf("Unexpected image exif GPSLongitudeRef: %s != %s", metadata.EXIF.GPSLongitudeRef, file.GPSLongitudeRef)
}
if metadata.EXIF.GPSLongitude != file.GPSLongitude {
t.Fatalf("Unexpected image exif GPSLongitude: %s != %s", metadata.EXIF.GPSLongitude, file.GPSLongitude)
}
if metadata.EXIF.GPSAltitudeRef != file.GPSAltitudeRef {
t.Fatalf("Unexpected image exif GPSAltitudeRef: %s != %s", metadata.EXIF.GPSAltitudeRef, file.GPSAltitudeRef)
}
if metadata.EXIF.GPSAltitude != file.GPSAltitude {
t.Fatalf("Unexpected image exif GPSAltitude: %s != %s", metadata.EXIF.GPSAltitude, file.GPSAltitude)
}
if metadata.EXIF.GPSSpeedRef != file.GPSSpeedRef {
t.Fatalf("Unexpected image exif GPSSpeedRef: %s != %s", metadata.EXIF.GPSSpeedRef, file.GPSSpeedRef)
}
if metadata.EXIF.GPSSpeed != file.GPSSpeed {
t.Fatalf("Unexpected image exif GPSSpeed: %s != %s", metadata.EXIF.GPSSpeed, file.GPSSpeed)
}
if metadata.EXIF.GPSImgDirectionRef != file.GPSImgDirectionRef {
t.Fatalf("Unexpected image exif GPSImgDirectionRef: %s != %s", metadata.EXIF.GPSImgDirectionRef, file.GPSImgDirectionRef)
}
if metadata.EXIF.GPSImgDirection != file.GPSImgDirection {
t.Fatalf("Unexpected image exif GPSImgDirection: %s != %s", metadata.EXIF.GPSImgDirection, file.GPSImgDirection)
}
if metadata.EXIF.GPSDestBearingRef != file.GPSDestBearingRef {
t.Fatalf("Unexpected image exif GPSDestBearingRef: %s != %s", metadata.EXIF.GPSDestBearingRef, file.GPSDestBearingRef)
}
if metadata.EXIF.GPSDestBearing != file.GPSDestBearing {
t.Fatalf("Unexpected image exif GPSDestBearing: %s != %s", metadata.EXIF.GPSDestBearing, file.GPSDestBearing)
}
if metadata.EXIF.GPSDateStamp != file.GPSDateStamp {
t.Fatalf("Unexpected image exif GPSDateStamp: %s != %s", metadata.EXIF.GPSDateStamp, file.GPSDateStamp)
}
}
}
func TestColourspaceIsSupported(t *testing.T) {
files := []struct {
name string
}{
{"test.jpg"},
{"test.png"},
{"test.webp"},
}
for _, file := range files {
supported, err := ColourspaceIsSupported(readFile(file.name))
if err != nil {
t.Fatalf("Cannot read the image: %s -> %s", file.name, err)
}
if supported != true {
t.Fatalf("Unsupported image colourspace")
}
}
supported, err := initImage("test.jpg").ColourspaceIsSupported()
if err != nil {
t.Errorf("Cannot process the image: %#v", err)
}
if supported != true {
t.Errorf("Non-supported colourspace")
}
}
func readFile(file string) []byte {
data, _ := os.Open(path.Join("testdata", file))
buf, _ := ioutil.ReadAll(data)
return buf
}<|fim▁end|> | if metadata.EXIF.ApertureValue != file.ApertureValue {
t.Fatalf("Unexpected image exif ApertureValue: %s != %s", metadata.EXIF.ApertureValue, file.ApertureValue) |
<|file_name|>aoncmd_taskupdate.py<|end_file_name|><|fim▁begin|>"""Update a task in maniphest.
you can use the 'task id' output from the 'arcyon task-create' command as input
to this command.
usage examples:
update task '99' with a new title, only show id:
$ arcyon task-update 99 -t 'title' --format-id
99
"""
# =============================================================================
# CONTENTS
# -----------------------------------------------------------------------------
# aoncmd_taskupdate
#
# Public Functions:
# getFromfilePrefixChars
# setupParser
# process
#
# -----------------------------------------------------------------------------
# (this contents block is generated, edits will be lost)
# =============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import textwrap
import phlcon_maniphest
import phlcon_project
import phlcon_user
import phlsys_makeconduit
def getFromfilePrefixChars():
return ""
def setupParser(parser):
# make a list of priority names in increasing order of importance
priority_name_list = phlcon_maniphest.PRIORITIES.keys()
priority_name_list.sort(
key=lambda x: phlcon_maniphest.PRIORITIES[x])
priorities = parser.add_argument_group(
'optional priority arguments',
'use any of ' + textwrap.fill(
str(priority_name_list)))
output_group = parser.add_argument_group(
'Output format arguments',
'Mutually exclusive, defaults to "--format-summary"')
output = output_group.add_mutually_exclusive_group()
opt = parser.add_argument_group(
'Optional task arguments',
'You can supply these later via the web interface if you wish')
priorities.add_argument(
'--priority',
'-p',
choices=priority_name_list,
metavar="PRIORITY",
default=None,
type=str,
help="the priority or importance of the task")
parser.add_argument(
'id',
metavar='INT',
help='the id of the task',
type=str)
parser.add_argument(
'--title',
'-t',
metavar='STRING',
help='the short title of the task',
default=None,
type=str)
opt.add_argument(
'--description',
'-d',
metavar='STRING',
help='the long description of the task',
default=None,
type=str)
opt.add_argument(
'--owner',
'-o',
metavar='USER',
help='the username of the owner',
type=str)
opt.add_argument(
'--ccs',
'-c',
nargs="*",
metavar='USER',<|fim▁hole|> nargs="*",
metavar='PROJECT',
default=[],
help='a list of project names to add the task to',
type=str)
opt.add_argument(
'--comment',
'-m',
metavar='STRING',
help='an optional comment to make on the task',
default=None,
type=str)
output.add_argument(
'--format-summary',
action='store_true',
help='will print a human-readable summary of the result.')
output.add_argument(
'--format-id',
action='store_true',
help='will print just the id of the new task, for scripting.')
output.add_argument(
'--format-url',
action='store_true',
help='will print just the url of the new task, for scripting.')
phlsys_makeconduit.add_argparse_arguments(parser)
def process(args):
if args.title and not args.title.strip():
print('you must supply a non-empty title', file=sys.stderr)
return 1
conduit = phlsys_makeconduit.make_conduit(
args.uri, args.user, args.cert, args.act_as_user)
# create_task expects an integer
priority = None
if args.priority is not None:
priority = phlcon_maniphest.PRIORITIES[args.priority]
# conduit expects PHIDs not plain usernames
user_phids = phlcon_user.UserPhidCache(conduit)
if args.owner:
user_phids.add_hint(args.owner)
if args.ccs:
user_phids.add_hint_list(args.ccs)
owner = user_phids.get_phid(args.owner) if args.owner else None
ccs = [user_phids.get_phid(u) for u in args.ccs] if args.ccs else None
# conduit expects PHIDs not plain project names
projects = None
if args.projects:
project_to_phid = phlcon_project.make_project_to_phid_dict(conduit)
projects = [project_to_phid[p] for p in args.projects]
result = phlcon_maniphest.update_task(
conduit,
args.id,
args.title,
args.description,
priority,
owner,
ccs,
projects,
args.comment)
if args.format_id:
print(result.id)
elif args.format_url:
print(result.uri)
else: # args.format_summary:
message = (
"Updated task '{task_id}', you can view it at this URL:\n"
" {url}"
).format(
task_id=result.id,
url=result.uri)
print(message)
# -----------------------------------------------------------------------------
# Copyright (C) 2013-2014 Bloomberg Finance L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------ END-OF-FILE ----------------------------------<|fim▁end|> | help='a list of usernames to cc on the task',
type=str)
opt.add_argument(
'--projects', |
<|file_name|>tests.py<|end_file_name|><|fim▁begin|>import os
import shutil
import unittest
from django.utils import six
from django_node import node, npm
from django_node.node_server import NodeServer
from django_node.server import server
from django_node.base_service import BaseService
from django_node.exceptions import (
OutdatedDependency, MalformedVersionInput, NodeServiceError, NodeServerAddressInUseError, NodeServerTimeoutError,
ServiceSourceDoesNotExist, MalformedServiceName
)<|fim▁hole|>TEST_DIR = os.path.abspath(os.path.dirname(__file__))
PATH_TO_NODE_MODULES = os.path.join(TEST_DIR, 'node_modules')
DEPENDENCY_PACKAGE = 'yargs'
PATH_TO_INSTALLED_PACKAGE = os.path.join(PATH_TO_NODE_MODULES, DEPENDENCY_PACKAGE)
PACKAGE_TO_INSTALL = 'jquery'
PATH_TO_PACKAGE_TO_INSTALL = os.path.join(PATH_TO_NODE_MODULES, PACKAGE_TO_INSTALL)
PATH_TO_PACKAGE_JSON = os.path.join(TEST_DIR, 'package.json')
echo_service = EchoService()
timeout_service = TimeoutService()
error_service = ErrorService()
class TestDjangoNode(unittest.TestCase):
maxDiff = None
def setUp(self):
self.package_json_contents = self.read_package_json()
def tearDown(self):
if os.path.exists(PATH_TO_NODE_MODULES):
shutil.rmtree(PATH_TO_NODE_MODULES)
self.write_package_json(self.package_json_contents)
if server.is_running:
# Reset the server
server.stop()
def read_package_json(self):
with open(PATH_TO_PACKAGE_JSON, 'r') as package_json_file:
return package_json_file.read()
def write_package_json(self, contents):
with open(PATH_TO_PACKAGE_JSON, 'w+') as package_json_file:
package_json_file.write(contents)
def test_node_is_installed(self):
self.assertTrue(node.is_installed)
def test_node_version_raw(self):
self.assertTrue(isinstance(node.version_raw, six.string_types))
self.assertGreater(len(node.version_raw), 0)
def test_node_version(self):
self.assertTrue(isinstance(node.version, tuple))
self.assertGreaterEqual(len(node.version), 3)
def test_npm_is_installed(self):
self.assertTrue(npm.is_installed)
def test_npm_version_raw(self):
self.assertTrue(isinstance(npm.version_raw, six.string_types))
self.assertGreater(len(npm.version_raw), 0)
def test_npm_version(self):
self.assertTrue(isinstance(npm.version, tuple))
self.assertGreaterEqual(len(npm.version), 3)
def test_ensure_node_installed(self):
node.ensure_installed()
def test_ensure_npm_installed(self):
npm.ensure_installed()
def test_ensure_node_version_greater_than(self):
self.assertRaises(MalformedVersionInput, node.ensure_version_gte, 'v99999.0.0')
self.assertRaises(MalformedVersionInput, node.ensure_version_gte, '99999.0.0')
self.assertRaises(MalformedVersionInput, node.ensure_version_gte, (None,))
self.assertRaises(MalformedVersionInput, node.ensure_version_gte, (10,))
self.assertRaises(MalformedVersionInput, node.ensure_version_gte, (999999999,))
self.assertRaises(MalformedVersionInput, node.ensure_version_gte, (999999999, 0,))
self.assertRaises(OutdatedDependency, node.ensure_version_gte, (999999999, 0, 0,))
node.ensure_version_gte((0, 0, 0,))
node.ensure_version_gte((0, 9, 99999999))
node.ensure_version_gte((0, 10, 33,))
def test_ensure_npm_version_greater_than(self):
self.assertRaises(MalformedVersionInput, npm.ensure_version_gte, 'v99999.0.0')
self.assertRaises(MalformedVersionInput, npm.ensure_version_gte, '99999.0.0')
self.assertRaises(MalformedVersionInput, npm.ensure_version_gte, (None,))
self.assertRaises(MalformedVersionInput, npm.ensure_version_gte, (10,))
self.assertRaises(MalformedVersionInput, npm.ensure_version_gte, (999999999,))
self.assertRaises(MalformedVersionInput, npm.ensure_version_gte, (999999999, 0,))
self.assertRaises(OutdatedDependency, npm.ensure_version_gte, (999999999, 0, 0,))
npm.ensure_version_gte((0, 0, 0,))
npm.ensure_version_gte((0, 9, 99999999))
npm.ensure_version_gte((2, 1, 8,))
def test_node_run_returns_output(self):
stderr, stdout = node.run('--version',)
stdout = stdout.strip()
self.assertEqual(stdout, node.version_raw)
def test_npm_run_returns_output(self):
stderr, stdout = npm.run('--version',)
stdout = stdout.strip()
self.assertEqual(stdout, npm.version_raw)
def test_npm_install_can_install_dependencies(self):
npm.install(TEST_DIR)
self.assertTrue(os.path.exists(PATH_TO_NODE_MODULES))
self.assertTrue(os.path.exists(PATH_TO_INSTALLED_PACKAGE))
def test_node_server_services_can_be_validated(self):
class MissingSource(BaseService):
pass
self.assertRaises(ServiceSourceDoesNotExist, MissingSource.validate)
class AbsoluteUrlName(EchoService):
name = 'http://foo.com'
self.assertRaises(MalformedServiceName, AbsoluteUrlName.validate)
class MissingOpeningSlashName(EchoService):
name = 'foo/bar'
self.assertRaises(MalformedServiceName, MissingOpeningSlashName.validate)
def test_node_server_services_are_discovered(self):
for service in (EchoService, ErrorService, TimeoutService):
self.assertIn(service, server.services)
def test_node_server_can_start_and_stop(self):
self.assertIsInstance(server, NodeServer)
server.start()
self.assertTrue(server.is_running)
self.assertTrue(server.test())
server.stop()
self.assertFalse(server.is_running)
self.assertFalse(server.test())
server.start()
self.assertTrue(server.is_running)
self.assertTrue(server.test())
server.stop()
self.assertFalse(server.is_running)
self.assertFalse(server.test())
def test_node_server_process_can_rely_on_externally_controlled_processes(self):
self.assertFalse(server.test())
new_server = NodeServer()
new_server.start()
self.assertTrue(server.test())
new_server.stop()
self.assertFalse(new_server.test())
self.assertFalse(server.test())
def test_node_server_process_can_raise_on_port_collisions(self):
self.assertFalse(server.test())
new_server = NodeServer()
new_server.start()
self.assertTrue(server.test())
self.assertEqual(server.address, new_server.address)
self.assertEqual(server.port, new_server.port)
self.assertRaises(NodeServerAddressInUseError, server.start, use_existing_process=False)
new_server.stop()
self.assertFalse(server.test())
server.start(use_existing_process=False)
self.assertTrue(server.test())
def test_node_server_config_is_as_expected(self):
config = server.get_config()
self.assertEqual(config['address'], server.address)
self.assertEqual(config['port'], server.port)
self.assertEqual(config['startup_output'], server.get_startup_output())
services = (EchoService, ErrorService, TimeoutService)
self.assertEqual(len(config['services']), len(services))
service_names = [obj['name'] for obj in config['services']]
service_sources = [obj['path_to_source'] for obj in config['services']]
for service in services:
self.assertIn(service.get_name(), service_names)
self.assertIn(service.get_path_to_source(), service_sources)
def test_node_server_echo_service_pumps_output_back(self):
response = echo_service.send(echo='test content')
self.assertEqual(response.text, 'test content')
def test_node_server_throws_timeout_on_long_running_services(self):
self.assertRaises(NodeServerTimeoutError, timeout_service.send)
def test_node_server_error_service_works(self):
self.assertRaises(NodeServiceError, error_service.send)
def test_node_server_config_management_command_provides_the_expected_output(self):
from django_node.management.commands.node_server_config import Command
with StdOutTrap() as output:
Command().handle()
self.assertEqual(''.join(output), server.get_serialised_config())<|fim▁end|> | from django_node.services import EchoService
from .services import TimeoutService, ErrorService
from .utils import StdOutTrap
|
<|file_name|>automatic_questioner.py<|end_file_name|><|fim▁begin|>"""<|fim▁hole|>described structure and which contains information about functions and
variables of other packages.
Scheme of the db
----------------
# {'function_name':
# {'variables':
# {'variable_name':
# {'question_info':
# {'qtype': ['simple_input', 'confirmation_question',
# 'selection_options', 'selection_list_options'],
# 'question_spec': 'question_spec'},
# 'default': default}},
########
# 'descendants': [{'agg_description':
# {variable_name:
# {'variable_value': 'function_name'}
# },
# 'agg_name': 'aggregated_parameter_name'}]
# }}
######## OR
# 'descendants': [{'agg_description': 'function_name'
# 'agg_name': 'aggregated_parameter_name'}]
# }}
#TODO: checker 1 function with list of functions and dicts of dicts
"""
from tui_questioner import general_questioner
def check_quest_info(db):
"""Function which carry out the automatic checking of the database of
function and variables.
Parameters
----------
db: dict
the dictionary of all the information about the system with all its
functions and dependencies between them in order to ask for their
variables authomatically.
Returns
-------
check: boolean
returns the correctness of the database.
path: list
path of the possible error.
message: str
message of the error if it exists.
"""
## 0. Initial preset variables needed
# Function to compare lists
def equality_elements_list(a, b):
a = a.keys() if type(a) == dict else a
b = b.keys() if type(b) == dict else b
c = a[-1::-1]
return a == b or c == b
# List of elements available in some dicts at some levels
first_level = ['descendants', 'variables']
desc_2_level = ['agg_description', 'agg_name']
vars_2_level = ['question_info', 'default']
vars_3_level = ['qtype', 'question_spec']
# Messages of errors
m0 = "The given database of functions is not a dictionary."
m1 = "The function '%s' does not have "+str(first_level)+" as keys."
m2 = "The variables of function '%s' is not a dict."
m3 = "Incorrect keys "+str(vars_2_level)+" in function %s and variable %s."
m4 = "Incorrect question_info format for function %s and variable %s."
m5 = "Not a string the 'qtype' of function %s and variable %s."
m6 = "Incorrect 'question_spec' format for function %s and variable %s."
m7 = "Descendants of the function %s is not a list."
m8 = "Elements of the list of descendants not a dict for function %s."
m9 = "Incorrect structure of a dict in descendants for function %s."
m10 = "Incorrect type of agg_description for function %s and variable %s."
m11 = "Incorrect type of agg_description for function %s."
## Check db is a dict
if type(db) != dict:
return False, [], m0
## Loop for check each function in db
for funct in db.keys():
## Check main keys:
first_bl = equality_elements_list(db[funct], first_level)
if not first_bl:
return False, [funct], m1 % funct
## Check variables
if not type(db[funct]['variables']) == dict:
check = False
path = [funct, 'variables']
message = m2 % funct
return check, path, message
for var in db[funct]['variables']:
varsbles = db[funct]['variables']
v2_bl = equality_elements_list(varsbles[var], vars_2_level)
v3_bl = equality_elements_list(varsbles[var]['question_info'],
vars_3_level)
qtype_bl = db[funct]['variables'][var]['question_info']['qtype']
qtype_bl = type(qtype_bl) != str
qspec_bl = db[funct]['variables'][var]['question_info']
qspec_bl = type(qspec_bl['question_spec']) != dict
if not v2_bl:
check = False
path = [funct, 'variables', var]
message = m3 % (funct, var)
return check, path, message
### Check question_info
if not v3_bl:
check = False
path = [funct, 'variables', 'question_info']
message = m4 % (funct, var)
return check, path, message
if qtype_bl:
check = False
path = [funct, 'variables', 'question_info', 'qtype']
message = m5 % (funct, var)
return check, path, message
if qspec_bl:
check = False
path = [funct, 'variables', 'question_info', 'question_spec']
message = m6 % (funct, var)
return check, path, message
## Check descendants
if not type(db[funct]['descendants']) == list:
check = False
path = [funct, 'descendants']
message = m7 % funct
return check, path, message
for var_desc in db[funct]['descendants']:
if not type(var_desc) == dict:
check = False
path = [funct, 'descendants']
message = m8 % funct
return check, path, message
d2_bl = equality_elements_list(var_desc.keys(), desc_2_level)
if not d2_bl:
check = False
path = [funct, 'descendants']
message = m9 % funct
return check, path, message
if type(var_desc['agg_description']) == str:
pass
elif type(var_desc['agg_description']) == dict:
for varname in var_desc['agg_description']:
if not type(var_desc['agg_description'][varname]) == dict:
check = False
path = [funct, 'descendants', 'agg_description']
message = m10 % (funct, varname)
return check, path, message
else:
check = False
path = [funct, 'descendants', 'agg_description']
message = m11 % funct
return check, path, message
return True, [], ''
def automatic_questioner(function_name, db, choosen={}):
"""Function which carry out the automatic questioning task.
Parameters
----------
function_name: str
the function for which we are interested in their params in order to
call it.
db: dict
the dictionary of all the information about the system with all its
functions and dependencies between them in order to ask for their
variables authomatically.
choosen: dict
previous choosen parameters. The function will avoid to ask for the
pre-set parameters.
Returns
-------
choosen_values: dict
the selected values which are disposed to input in the function we want
to call.
"""
## Initialize variables needed
m1 = "Not value for a variables in order to create aggregate variables."
choosen_values = choosen
if function_name in db.keys():
data_f = db[function_name]
else:
# Better raise error?
return choosen_values
# Put the variables
for var in data_f['variables'].keys():
# Put the variables if there are still not selected
if var not in choosen_values.keys():
question = data_f['variables'][var]['question_info']
choosen_values[var] = general_questioner(**question)
# Put aggregated variables (descendants)
for var_desc in data_f['descendants']:
# Possible variables and aggregated parameter name
agg_description = var_desc['agg_description']
agg_param = var_desc['agg_name']
# prepare possible input for existant aggregated value in choosen
ifaggvar = agg_param in choosen_values
aggvarval = choosen_values[agg_param] if ifaggvar else {}
## Without dependant variable
if type(agg_description) == str:
# Obtain function name
fn = choosen_values[agg_param]
# Recurrent call
aux = automatic_questioner(fn, db, aggvarval)
# Aggregate to our values
choosen_values[agg_param] = aux
## With dependant variable
elif type(agg_description) == dict:
for var in var_desc['agg_description']:
if not var in choosen_values:
raise Exception(m1)
## Give a list and return a dict in the aggparam variable
elif type(choosen_values[var]) == str:
# Obtain function name
fn = var_desc['agg_description'][var][choosen_values[var]]
# Recurrent call
aux = automatic_questioner(fn, db, aggvarval)
# Aggregate to our values
choosen_values[agg_param] = aux
## Give a list and return a list in the aggparam variable
elif type(choosen_values[var]) == list:
choosen_values[agg_param] = []
aggvarval = [] if type(aggvarval) != list else aggvarval
for i in range(len(choosen_values[var])):
val = choosen_values[var][i]
fn = var_desc['agg_description'][var][val]
aux = automatic_questioner(fn, db, aggvarval[i])
choosen_values.append(aux)
return choosen_values
def get_default(function_name, db, choosen={}):
"""Function which returns a dictionary of choosen values by default.
Parameters
----------
function_name: str
the function for which we are interested in their params in order to
call it.
db: dict
the dictionary of all the information about the system with all its
functions and dependencies between them in order to ask for their
variables authomatically.
choosen: dict
previous choosen parameters. The function will avoid to ask for the
pre-set parameters.
Returns
-------
choosen_values: dict
the selected values which are disposed to input in the function we want
to call.
-----
TODO: Possibility of being integrated with authomatic_questioner after
testing.
"""
## Initialize variables needed
m1 = "Not value for a variables in order to create aggregate variables."
choosen_values = choosen
if function_name in db.keys():
data_f = db[function_name]
else:
# Better raise error?
return choosen_values
# Get the variables
for var in data_f['variables'].keys():
# Put the variables if there are still not selected
if var not in choosen_values.keys():
default = data_f['variables'][var]['default']
choosen_values[var] = default
# Get aggregated variables (descendants)
for var_desc in data_f['descendants']:
# Possible variables and aggregated parameter name
agg_description = var_desc['agg_description']
agg_param = var_desc['agg_name']
# prepare possible input for existant aggregated value in choosen
ifaggvar = agg_param in choosen_values
aggvarval = choosen_values[agg_param] if ifaggvar else {}
## Without dependant variable
if type(agg_description) == str:
# Obtain function name
fn = choosen_values[agg_param]
# Recurrent call
aux = get_default(fn, db, aggvarval)
# Aggregate to our values
choosen_values[agg_param] = aux
## With dependant variable
elif type(agg_description) == dict:
for var in var_desc['agg_description']:
if not var in choosen_values:
raise Exception(m1)
## Give a list and return a dict in the aggparam variable
elif type(choosen_values[var]) == str:
# Obtain function name
fn = var_desc['agg_description'][var][choosen_values[var]]
# Recurrent call
aux = get_default(fn, db, aggvarval)
# Aggregate to our values
choosen_values[agg_param] = aux
## Give a list and return a list in the aggparam variable
elif type(choosen_values[var]) == list:
choosen_values[agg_param] = []
aggvarval = [] if type(aggvarval) != list else aggvarval
for i in range(len(choosen_values[var])):
val = choosen_values[var][i]
fn = var_desc['agg_description'][var][val]
aux = get_default(fn, db, aggvarval[i])
choosen_values.append(aux)
return choosen_values
###############################################################################
###############################################################################
###############################################################################
def get_default3(function_name, db, choosen={}):
"""Function which returns a dictionary of choosen values by default.
Parameters
----------
function_name: str
the function for which we are interested in their params in order to
call it.
db: dict
the dictionary of all the information about the system with all its
functions and dependencies between them in order to ask for their
variables authomatically.
choosen: dict
previous choosen parameters. The function will avoid to ask for the
pre-set parameters.
Returns
-------
choosen_values: dict
the selected values which are disposed to input in the function we want
to call.
-----
TODO: Possibility of being integrated with authomatic_questioner after
testing.
"""
choosen_values = choosen
if function_name in db.keys():
data_f = db[function_name]
else:
# Better raise error?
return choosen_values
# Get the variables
for var in data_f['variables'].keys():
# Put the variables if there are still not selected
if var not in choosen_values.keys():
default = data_f['variables'][var]['default']
choosen_values[var] = default
# Get the aggregated variables (descendants)
for i in range(len(data_f['descendants'])):
# Possible variables and aggregated parameter name
vars_values = data_f['descendants'][i]['variable_values']
agg_param = data_f['descendants'][i]['parameters']
variables = vars_values.keys()
# prepare possible input for existant aggregated value in choosen
ifaggvar = agg_param in choosen_values
aggvarval = choosen_values[agg_param] if ifaggvar else {}
for var in variables:
# boolean variables
value = choosen_values[var]
iflist = type(value) == list
ifvars = var in choosen_values.keys()
# if we have to return a list
if ifvars and iflist:
# Initialization values
n = len(value)
aggvarval = aggvarval if ifaggvar else [{} for i in range(n)]
results = []
i = 0
for val in value:
# Obtain function_name
f_name = vars_values[var][value]
# Recurrent call
aux = get_default(f_name, db, aggvarval[i])
# Insert in the correspondent list
results.append(aux)
i += 1
# if we have to return a dict
elif ifvars and not iflist:
# Obtain function_name
f_name = vars_values[var][value]
# Recurrent call
choosen_values[agg_param] = get_default(f_name, db, aggvarval)
return choosen_values
def automatic_questioner3(function_name, db, choosen={}):
"""Function which carry out the automatic questioning task.
Parameters
----------
function_name: str
the function for which we are interested in their params in order to
call it.
db: dict
the dictionary of all the information about the system with all its
functions and dependencies between them in order to ask for their
variables authomatically.
choosen: dict
previous choosen parameters. The function will avoid to ask for the
pre-set parameters.
Returns
-------
choosen_values: dict
the selected values which are disposed to input in the function we want
to call.
"""
## Initialize variables needed
m1 = "Not value for a variables in order to create aggregate variables."
choosen_values = choosen
if function_name in db.keys():
data_f = db[function_name]
else:
# Better raise error?
return choosen_values
# Put the variables
for var in data_f['variables'].keys():
# Put the variables if there are still not selected
if var not in choosen_values.keys():
question = data_f['variables'][var]['question_info']
choosen_values[var] = general_questioner(**question)
# Put aggregated variables (descendants)
for i in range(len(data_f['descendants'])):
# Possible variables and aggregated parameter name
vars_values = data_f['descendants'][i]['variable_values']
agg_param = data_f['descendants'][i]['parameters']
variables = vars_values.keys()
# prepare possible input for existant aggregated value in choosen
ifaggvar = agg_param in choosen_values
aggvarval = choosen_values[agg_param] if ifaggvar else {}
for var in variables:
# boolean variables
value = choosen_values[var]
iflist = type(value) == list
ifvars = var in choosen_values.keys()
# if we have to return a list
if ifvars and iflist:
# Initialization values
n = len(value)
aggvarval = aggvarval if ifaggvar else [{} for i in range(n)]
results = []
i = 0
for val in value:
# Obtain function_name
f_name = vars_values[var][value]
# Recurrent call
aux = authomatic_questioner(f_name, db, aggvarval[i])
# Insert in the correspondent list
results.append(aux)
i += 1
# if we have to return a dict
elif ifvars and not iflist:
# Obtain function_name
f_name = vars_values[var][value]
# Recurrent call
choosen_values[agg_param] = authomatic_questioner(f_name, db,
aggvarval)
return choosen_values<|fim▁end|> | automatic_questioner
--------------------
Module which serves as a interactor between the possible database with the |
<|file_name|>custom_tests.rs<|end_file_name|><|fim▁begin|>extern crate rusoto_mock;
use crate::generated::{LexRuntime, LexRuntimeClient, PostTextRequest, PostTextResponse};
use rusoto_core::Region;
use std::collections::HashMap;
use self::rusoto_mock::*;
#[tokio::test]
async fn test_post_text_resposnse_serialization() {
let mock_resp_body = r#"{
"dialogState": "ElicitSlot",<|fim▁hole|> "intentName": "BookCar",
"message": "In what city do you need to rent a car?",
"messageFormat": "PlainText",
"responseCard": null,
"sessionAttributes": {},
"slotToElicit": "PickUpCity",
"slots": {
"CarType": null,
"PickUpCity": "Boston"
}
}"#;
let mock_request = MockRequestDispatcher::with_status(200).with_body(mock_resp_body);
let lex_client =
LexRuntimeClient::new_with(mock_request, MockCredentialsProvider, Region::UsEast1);
let post_text_req = PostTextRequest {
input_text: "Book a car".to_owned(),
user_id: "rs".to_owned(),
..Default::default()
};
let mut slots = HashMap::new();
slots.insert("CarType".to_owned(), None);
slots.insert("PickUpCity".to_owned(), Some("Boston".to_owned()));
let expected = PostTextResponse {
active_contexts: None,
alternative_intents: None,
bot_version: None,
dialog_state: Some("ElicitSlot".to_owned()),
intent_name: Some("BookCar".to_owned()),
message: Some("In what city do you need to rent a car?".to_owned()),
message_format: Some("PlainText".to_owned()),
nlu_intent_confidence: None,
slot_to_elicit: Some("PickUpCity".to_owned()),
slots: Some(slots),
response_card: None,
session_attributes: Some(HashMap::new()),
sentiment_response: None,
session_id: None,
};
let result: PostTextResponse = lex_client.post_text(post_text_req).await.unwrap();
assert_eq!(result, expected);
}<|fim▁end|> | |
<|file_name|>test_config.py<|end_file_name|><|fim▁begin|>import unittest
from flumine import config
class ConfigTest(unittest.TestCase):
def test_init(self):
self.assertFalse(config.simulated)
self.assertTrue(config.simulated_strategy_isolation)
self.assertIsInstance(config.customer_strategy_ref, str)
self.assertIsInstance(config.process_id, int)
self.assertIsNone(config.current_time)
self.assertFalse(config.raise_errors)
self.assertEqual(config.max_execution_workers, 32)
self.assertFalse(config.async_place_orders)
self.assertEqual(config.place_latency, 0.120)
self.assertEqual(config.cancel_latency, 0.170)<|fim▁hole|><|fim▁end|> | self.assertEqual(config.update_latency, 0.150)
self.assertEqual(config.replace_latency, 0.280)
self.assertEqual(config.order_sep, "-")
self.assertEqual(config.execution_retry_attempts, 10) |
<|file_name|>users-routing.module.ts<|end_file_name|><|fim▁begin|>import { NgModule } from '@angular/core';
import { Routes, RouterModule } from '@angular/router';
import { UserListComponent } from "../user-list/user-list.component";
import { UserDetailComponent } from "../user-detail/user-detail.component";
const routes: Routes = [
{
path: '',
children: [
{ path: '', component: UserListComponent, data: { title: 'Users' } },
{ path: 'add', component: UserDetailComponent, data: { title: 'Add User' } }
]<|fim▁hole|>
@NgModule({
imports: [RouterModule.forChild(routes)],
exports: [RouterModule]
})
export class UsersRoutingModule { }<|fim▁end|> | }
]; |
<|file_name|>notification.py<|end_file_name|><|fim▁begin|>import requests
import logging
import redis
from requests.packages.urllib3.exceptions import ConnectionError
from core.serialisers import json
from dss import localsettings
# TODO([email protected]): refactor these out to
# classes to avoid duplicating constants below
HEADERS = {
'content-type': 'application/json'
}
logger = logging.getLogger('spa')
def post_notification(session_id, image, message):
try:
payload = {<|fim▁hole|> }
data = json.dumps(payload)
r = requests.post(
localsettings.REALTIME_HOST + 'notification',
data=data,
headers=HEADERS
)
if r.status_code == 200:
return ""
else:
return r.text
except ConnectionError:
#should probably implement some sort of retry in here
pass<|fim▁end|> | 'sessionid': session_id,
'image': image,
'message': message |
<|file_name|>number_parser.spec.js<|end_file_name|><|fim▁begin|>// Copyright 2012 Twitter, Inc
// http://www.apache.org/licenses/LICENSE-2.0
var TwitterCldr = require('../../../lib/assets/javascripts/twitter_cldr/core.js');
var data = require('../../../lib/assets/javascripts/twitter_cldr/en.js');
describe("NumberParser", function() {
var separators, parser;
beforeEach(function() {
TwitterCldr.set_data(data);
separators = [",", "\\."];
parser = new TwitterCldr.NumberParser();
});
describe("#group_separator()", function() {
it("returns the correct group separator", function() {
expect(parser.group_separator()).toEqual(",");
});
});
describe("#decimal_separator()", function() {
it("returns the correct decimal separator", function() {
expect(parser.decimal_separator()).toEqual("\\\.");
});
});
describe("#identify", function() {
it("properly identifies a numeric value", function() {
expect(
parser.identify("7841", separators[0], separators[1])
).toEqual({value: "7841", type: "numeric"});
});
it("properly identifies a decimal separator", function() {
expect(
parser.identify(".", separators[0], separators[1])
).toEqual({value: ".", type: "decimal"});
});
it("properly identifies a group separator", function() {
expect(
parser.identify(",", separators[0], separators[1])
).toEqual({value: ",", type: "group"});
});
it("returns nil if the text doesn't match a number or either separators", function() {
expect(
parser.identify("abc", separators[0], separators[1])
).toEqual({value: "abc", type: null});
});
});
describe("#tokenize", function() {
it("splits text by numericality and group/decimal separators", function() {
expect(
parser.tokenize("1,33.00", separators[0], separators[1])
).toEqual([
{value: "1", type: "numeric"},
{value: ",", type: "group"},<|fim▁hole|> {value: "00", type: "numeric"}
]);
});
it("returns an empty array for a non-numeric string", function() {
expect(parser.tokenize("abc", separators[0], separators[1])).toEqual([]);
});
});
describe("#get_separators", function() {
it("returns all separators when strict mode is off", function() {
var found_separators = parser.get_separators(false);
expect(found_separators.group).toEqual('\\.,\\s');
expect(found_separators.decimal).toEqual('\\.,\\s');
});
it("returns only locale-specific separators when strict mode is on", function() {
var found_separators = parser.get_separators(true);
expect(found_separators.group).toEqual(',');
expect(found_separators.decimal).toEqual('\\.');
});
});
describe("#is_punct_valid", function() {
function strip_numerics(token_list) {
var tokens = [];
for (var idx in token_list) {
if (token_list[idx].type != "numeric") {
tokens.push(token_list[idx]);
}
}
return tokens;
}
it("correctly validates a number with no decimal", function() {
var tokens = strip_numerics(parser.tokenize("1.337", separators[0], separators[1]));
expect(parser.is_punct_valid(tokens)).toEqual(true);
});
it("correctly validates a number with a decimal", function() {
var tokens = strip_numerics(parser.tokenize("1,337.00", separators[0], separators[1]));
expect(parser.is_punct_valid(tokens)).toEqual(true);
});
it("reports on an invalid number when it has more than one decimal", function() {
var tokens = strip_numerics(parser.tokenize("1.337.00", separators[0], separators[1]));
expect(parser.is_punct_valid(tokens)).toEqual(false);
});
});
describe("#is_numeric?", function() {
it("returns true if the text is numeric", function() {
expect(TwitterCldr.NumberParser.is_numeric("4839", "")).toEqual(true);
expect(TwitterCldr.NumberParser.is_numeric("1", "")).toEqual(true);
});
it("returns false if the text is not purely numeric", function() {
expect(TwitterCldr.NumberParser.is_numeric("abc", "")).toEqual(false);
expect(TwitterCldr.NumberParser.is_numeric("123abc", "")).toEqual(false);
});
it("returns false if the text is blank", function() {
expect(TwitterCldr.NumberParser.is_numeric("", "")).toEqual(false);
});
it("accepts the given characters as valid numerics", function() {
expect(TwitterCldr.NumberParser.is_numeric("a123a", "a")).toEqual(true);
expect(TwitterCldr.NumberParser.is_numeric("1,234.56")).toEqual(true); // default separator chars used here
});
});
describe("#valid?", function() {
it("correctly identifies a series of valid cases", function() {
var nums = ["5", "5.0", "1,337", "1,337.0", "0.05", ".5", "1,337,000.00"];
for (var idx in nums) {
expect(parser.is_valid(nums[idx])).toEqual(true);
}
});
it("correctly identifies a series of invalid cases", function() {
var nums = ["12.0.0", "5.", "5,"];
for (var idx in nums) {
expect(parser.is_valid(nums[idx])).toEqual(false);
}
});
});
describe("#parse", function() {
it("correctly parses a series of valid numbers", function() {
var cases = {
"5": 5,
"5.0": 5.0,
"1,337": 1337,
"1,337.0": 1337.0,
"0.05": 0.05,
".5": 0.5, // Borked
"1,337,000.00": 1337000.0
};
for (var text in cases) {
var expected = cases[text];
expect(parser.parse(text)).toEqual(expected);
}
});
it("correctly raises an error when asked to parse invalid numbers", function() {
var cases = ["12.0.0", "5.", "5,"];
for (var idx in cases) {
expect(function() {
parser.parse(cases[idx])
}).toThrow(new Error("Invalid number"));
}
});
describe("non-strict", function() {
it("succeeds in parsing even if inexact punctuation is used", function() {
expect(parser.parse("5 100", {strict: false})).toEqual(5100);
});
});
});
describe("#try_parse", function() {
it("parses correctly with a valid number", function() {
expect(parser.try_parse("1,234")).should == 1234;
});
it("parses correctly with a valid number and calls the callback", function() {
var pre_result = null;
parser.try_parse("1,234", null, function(result) { pre_result = result; });
pre_result.should == 1234
});
it("falls back on the default value if the number is invalid", function() {
expect(parser.try_parse("5.")).toEqual(null);
expect(parser.try_parse("5.", 0)).toEqual(0);
});
it("falls back on the block if the number is invalid", function() {
var pre_result = null;
parser.try_parse("5.", null, function(result) { pre_result = 9 });
expect(pre_result).toEqual(9);
});
it("re-raises any unexpected errors", function() {
expect(function() { parser.try_parse({}) }).toThrow();
});
it("parses zero correctly", function() {
expect(parser.try_parse('0')).toEqual(0);
});
});
});<|fim▁end|> | {value: "33", type: "numeric"},
{value: ".", type: "decimal"}, |
<|file_name|>NetworkingActions.ts<|end_file_name|><|fim▁begin|>import { RequestUtil } from "mesosphere-shared-reactjs";
import Config from "#SRC/js/config/Config";
import getFixtureResponses from "#SRC/js/utils/getFixtureResponses";
import {
REQUEST_NETWORKING_BACKEND_CONNECTIONS_ERROR,
REQUEST_NETWORKING_BACKEND_CONNECTIONS_SUCCESS,
REQUEST_NETWORKING_BACKEND_CONNECTIONS_ONGOING,
REQUEST_NETWORKING_NODE_MEMBERSHIPS_ERROR,
REQUEST_NETWORKING_NODE_MEMBERSHIPS_SUCCESS,
REQUEST_NETWORKING_NODE_MEMBERSHIPS_ONGOING,
REQUEST_NETWORKING_VIP_DETAIL_ERROR,
REQUEST_NETWORKING_VIP_DETAIL_SUCCESS,
REQUEST_NETWORKING_VIP_DETAIL_ONGOING,
REQUEST_NETWORKING_VIP_SUMMARIES_ERROR,
REQUEST_NETWORKING_VIP_SUMMARIES_SUCCESS,
REQUEST_NETWORKING_VIP_SUMMARIES_ONGOING,
REQUEST_NETWORKING_VIPS_ERROR,
REQUEST_NETWORKING_VIPS_SUCCESS,
} from "../constants/ActionTypes";
import SDK from "PluginSDK";
const NetworkingActions = {
fetchVIPs() {
RequestUtil.json({
url: `${Config.rootUrl}${Config.networkingAPIPrefix}/vips`,
success(response) {<|fim▁hole|> },
error(xhr) {
SDK.dispatch({
type: REQUEST_NETWORKING_VIPS_ERROR,
data: RequestUtil.getErrorFromXHR(xhr),
});
},
});
},
fetchVIPDetail(protocol, vip, port) {
RequestUtil.json({
url: `${Config.rootUrl}${Config.networkingAPIPrefix}/${vip}/${protocol}/${port}`,
success(response) {
SDK.dispatch({
type: REQUEST_NETWORKING_VIP_DETAIL_SUCCESS,
data: response,
vip: `${protocol}:${vip}:${port}`,
});
},
error(xhr) {
SDK.dispatch({
type: REQUEST_NETWORKING_VIP_DETAIL_ERROR,
data: RequestUtil.getErrorFromXHR(xhr),
vip: `${protocol}:${vip}:${port}`,
});
},
hangingRequestCallback() {
SDK.dispatch({
type: REQUEST_NETWORKING_VIP_DETAIL_ONGOING,
});
},
});
},
fetchVIPBackendConnections(protocol, vip, port) {
RequestUtil.json({
url: `${Config.rootUrl}${Config.networkingAPIPrefix}/backend-connections/${vip}/${protocol}/${port}`,
success(response) {
SDK.dispatch({
type: REQUEST_NETWORKING_BACKEND_CONNECTIONS_SUCCESS,
data: response,
vip: `${protocol}:${vip}:${port}`,
});
},
error(xhr) {
SDK.dispatch({
type: REQUEST_NETWORKING_BACKEND_CONNECTIONS_ERROR,
data: RequestUtil.getErrorFromXHR(xhr),
vip: `${protocol}:${vip}:${port}`,
});
},
hangingRequestCallback() {
SDK.dispatch({
type: REQUEST_NETWORKING_BACKEND_CONNECTIONS_ONGOING,
});
},
});
},
fetchNodeMemberships() {
RequestUtil.json({
url: `${Config.rootUrl}${Config.networkingAPIPrefix}/membership`,
success(response) {
SDK.dispatch({
type: REQUEST_NETWORKING_NODE_MEMBERSHIPS_SUCCESS,
data: response.array,
});
},
error(xhr) {
SDK.dispatch({
type: REQUEST_NETWORKING_NODE_MEMBERSHIPS_ERROR,
data: RequestUtil.getErrorFromXHR(xhr),
});
},
hangingRequestCallback() {
SDK.dispatch({
type: REQUEST_NETWORKING_NODE_MEMBERSHIPS_ONGOING,
});
},
});
},
fetchVIPSummaries() {
RequestUtil.json({
url: `${Config.rootUrl}${Config.networkingAPIPrefix}/summary`,
success(response) {
SDK.dispatch({
type: REQUEST_NETWORKING_VIP_SUMMARIES_SUCCESS,
data: response.array,
});
},
error(xhr) {
SDK.dispatch({
type: REQUEST_NETWORKING_VIP_SUMMARIES_ERROR,
data: RequestUtil.getErrorFromXHR(xhr),
});
},
hangingRequestCallback() {
SDK.dispatch({
type: REQUEST_NETWORKING_VIP_SUMMARIES_ONGOING,
});
},
});
},
};
if (Config.useFixtures) {
const methodFixtureMapping = {
fetchVIPs: import(
/* vipsFixture */ "../../../../../tests/_fixtures/networking/networking-vips.json"
),
fetchVIPDetail: import(
/* vipDetailFixture */ "../../../../../tests/_fixtures/networking/networking-vip-detail.json"
),
fetchVIPBackendConnections: import(
/* backendConnectionsFixture */ "../../../../../tests/_fixtures/networking/networking-backend-connections.json"
),
fetchNodeMemberships: import(
/* nodeMembershipsFixture */ "../../../../../tests/_fixtures/networking/networking-node-memberships.json"
),
fetchVIPSummaries: import(
/* vipSummariesFixture */ "../../../../../tests/_fixtures/networking/networking-vip-summaries.json"
),
};
if (!window.actionTypes) {
window.actionTypes = {};
}
if (!window.actionTypes.NetworkingActions) {
window.actionTypes.NetworkingActions = {};
}
Promise.all(
Object.keys(methodFixtureMapping).map(
(method) => methodFixtureMapping[method]
)
).then((responses) => {
window.actionTypes.NetworkingActions = getFixtureResponses(
methodFixtureMapping,
responses
);
Object.keys(window.actionTypes.NetworkingActions).forEach((method) => {
NetworkingActions[method] = RequestUtil.stubRequest(
NetworkingActions,
"NetworkingActions",
method
);
});
});
}
export default NetworkingActions;<|fim▁end|> | SDK.dispatch({
type: REQUEST_NETWORKING_VIPS_SUCCESS,
data: response.array,
}); |
<|file_name|>AbstractExtension.java<|end_file_name|><|fim▁begin|>/*
* This file is part of Pebble.
*
* Copyright (c) 2014 by Mitchell Bösecke
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
package com.mitchellbosecke.pebble.extension;
import com.mitchellbosecke.pebble.attributes.AttributeResolver;
import com.mitchellbosecke.pebble.operator.BinaryOperator;
import com.mitchellbosecke.pebble.operator.UnaryOperator;
import com.mitchellbosecke.pebble.tokenParser.TokenParser;
import java.util.List;
import java.util.Map;
public abstract class AbstractExtension implements Extension {
@Override
public List<TokenParser> getTokenParsers() {
return null;
}
<|fim▁hole|>
@Override
public List<UnaryOperator> getUnaryOperators() {
return null;
}
@Override
public Map<String, Filter> getFilters() {
return null;
}
@Override
public Map<String, Test> getTests() {
return null;
}
@Override
public Map<String, Function> getFunctions() {
return null;
}
@Override
public Map<String, Object> getGlobalVariables() {
return null;
}
@Override
public List<NodeVisitorFactory> getNodeVisitors() {
return null;
}
@Override
public List<AttributeResolver> getAttributeResolver() {
return null;
}
}<|fim▁end|> | @Override
public List<BinaryOperator> getBinaryOperators() {
return null;
} |
<|file_name|>0007_auto_20160404_1858.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-04-04 18:58
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('snippets', '0006_snippet_last_used'),
]
operations = [
migrations.AlterModelOptions(
name='snippet',
options={'ordering': ('-updated_at',), 'verbose_name': 'snippet', 'verbose_name_plural': 'snippets'},
),
migrations.AlterField(<|fim▁hole|> field=models.CharField(blank=True, max_length=100, null=True, verbose_name='description'),
),
migrations.AlterField(
model_name='snippet',
name='slug',
field=models.SlugField(max_length=255, verbose_name='name'),
),
migrations.AlterField(
model_name='snippet',
name='updated_at',
field=models.DateTimeField(blank=True, null=True, verbose_name='updated at'),
),
migrations.AlterField(
model_name='snippet',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='snippet', to=settings.AUTH_USER_MODEL),
),
]<|fim▁end|> | model_name='snippet',
name='description', |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# Copyright (c) 2012-2014 The python-semanticversion project
# This code is distributed under the two-clause BSD License.
try: # pragma: no cover
import django
from django.conf import settings
django_loaded = True
except ImportError: # pragma: no cover
django_loaded = False
<|fim▁hole|>if django_loaded: # pragma: no cover
if not settings.configured:
settings.configure(
DATABASES={
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'tests/db/test.sqlite',
}
},
INSTALLED_APPS=[
'tests.django_test_app',
],
MIDDLEWARE_CLASSES=[],
)
# https://docs.djangoproject.com/en/dev/releases/1.7/#app-loading-changes
if django.VERSION >= (1, 7):
from django.apps import apps
apps.populate(settings.INSTALLED_APPS)<|fim▁end|> | |
<|file_name|>sleeper.test.js<|end_file_name|><|fim▁begin|>import sinon from 'sinon';
import expect from 'expect';
import sleeper from '..';
describe('sleeper', () => {
it('should be a function', () => {
expect(sleeper).toEqual(expect.any(Function));
});
it('should expose the Resource constructor', () => {
expect(sleeper.Resource).toEqual(expect.any(Function));
});
it('should initialize with a URL', () => {
let url = '/api/users';
expect(() => {
sleeper(url);
}).not.toThrow();
let api = sleeper(url);
expect(api).toBeInstanceOf(sleeper.Resource);
expect(api.url).toEqual(url);
});
// CRUD server
let server, users;
beforeAll(() => {
let multi = /^\/api\/users\/?(?:\?.*)?$/,
single = /^\/api\/users\/([^/?]+)\/?(?:\?.*)?$/,
counter = 0;
server = sinon.useFakeServer();
server.autoRespond = true;
server.autoRespondAfter = 1;
users = [];
user(null, { name: 'brian' });
user(null, { name: 'betty' });
user(null, { name: 'bob' });
function reply(xhr, json) {
json = json || { success: false };
xhr.respond(
json.status || (json.success === false ? 500 : 200),
{ 'Content-Type': 'application/json' },
JSON.stringify(json)
);
}
// get(id), set(id, user), create(null, user), delete(id, false)
function user(id, update) {
if (!id) {
users.push({ id: id = ++counter + '' });
}
for (let i = users.length, u; i--;) {
u = users[i];
if (u.id === id) {
if (update) {
u = users[i] = update;
u.id = id;
}
if (update === false) {
users.splice(i, 1);
}
return u;
}
}
return false;
}
// Index
server.respondWith('GET', multi, (r) => {
reply(r, users);
});
// Create
server.respondWith('POST', multi, (r) => {
reply(r, user(null, JSON.parse(r.requestBody)));
});
// Read
server.respondWith('GET', single, (r, id) => {
reply(r, user(id) || { success: false, status: 404, message: 'Not Found' });
});
// Update
server.respondWith('PUT', single, (r, id) => {
reply(r, user(id, JSON.parse(r.requestBody)));
});
// Delete
server.respondWith('DELETE', single, (r, id) => {
let rem = user(id, false);
reply(r, { success: !!rem });
});
});
afterAll(() => {
server.restore();
});
describe('#index()', () => {
it('should issue a request to /', (done) => {
let api = sleeper('/api/users');
api.index((err, list) => {
expect(err).toEqual(null);
expect(list).toMatchObject(users);
done();
});
});
});
describe('#get(id)', () => {
it('should issue a GET request to /:id', (done) => {
let api = sleeper('/api/users');
api.get(users[0].id, (err, user) => {
expect(err).toEqual(null);
expect(user).toMatchObject(users[0]);
done();
});
});
it('should return an error if status>=400', (done) => {
let api = sleeper('/api/users');
api.get('does-not-exist', (err, user) => {
expect(err).toEqual('Not Found');
//expect(user).toEqual(null);
done();
});
});
it('should return an error property if messageProp is set', (done) => {
let api = sleeper('/api/users');
api.messageProp = 'message';
api.get('also-does-not-exist', (err, user) => {
expect(err).toEqual('Not Found');
//expect(user).toEqual(null);
done();
});
});
});
describe('#post(obj)', () => {
it('should issue a form-encoded POST request to /', (done) => {
let api = sleeper('/api/users'),
newUser = {
name: 'billiam'
};
api.post(newUser, (err, user) => {
expect(err).toEqual(null);
// simpler
newUser.id = users[users.length - 1].id;
expect(user).toMatchObject(newUser);
done();
});
});
});
describe('#put([id, ] obj)', () => {
it('should issue a JSON-encoded PUT request to /:id', (done) => {
let api = sleeper('/api/users'),
updatedUser = {};
updatedUser = Object.assign({}, users[0], {
name: 'sheryll',
concern: 'Who is this sheryll?'
});
api.put(updatedUser.id, updatedUser, (err, user) => {
expect(err).toEqual(null);
expect(user).toMatchObject(updatedUser);
done();
});
});
it('should use an `id` property for an object via #idKey', (done) => {
let api = sleeper('/api/users'),
updatedUser = {
id: users[1].id,
name: 'benny',
associations: ['The Jets']
};
api.put(updatedUser, (err, user) => {
expect(err).toEqual(null);
expect(user).toMatchObject(updatedUser);
done();
});
});
});
describe('#del(id)', () => {
it('should issue a DELETE request to /:id', (done) => {
let api = sleeper('/api/users'),
id = users[1].id;
api.del(id, (err, info) => {
expect(err).toEqual(null);
expect(info).toMatchObject({ success: true });
// make sure Benny's really gone:
expect(users[1].id).not.toEqual(id);
done();
});
});
});
describe('#param(key [, value])', () => {
it('should set a value when given (key, value)', () => {
let api = sleeper('/api/users');
expect(api.query).toEqual({});
api.param('some_key', 'some_value');
expect(api.query).toEqual({
some_key: 'some_value'
});
});
it('should add values from an object when given (hash)', () => {
let api = sleeper('/api/users'),
vals = {
k1: 'v1',
k2: 'v2'
};
api.param('k', 'v');
api.param(vals);
expect(api.query).toEqual({
k: 'v',
k1: 'v1',
k2: 'v2'
});
});
it('should return the value for a key when given (key)', () => {
let api = sleeper('/api/users');
api.query = {
k1: 'v1',
k2: 'v2'
};
expect(api.param('k1')).toEqual('v1');
expect(api.param('k2')).toEqual('v2');
expect(api.param('foo')).toEqual(undefined);
});
it('should send params on each request', (done) => {
let api = sleeper('/api/users');
api.param('auth_token', 'asdf1234');
api.index((err, list) => {
expect(server.requests[server.requests.length - 1].url).toMatch(/\?auth_token=asdf1234$/g);
done();
});
});
});
describe('#create(obj)', () => {
it('should be an alias of post()', () => {
let api = sleeper();
expect(api.create).toEqual(api.post);
});
});
describe('#read(id)', () => {
it('should be an alias of get()', () => {
let api = sleeper();
expect(api.get).toEqual(api.read);
});
});
<|fim▁hole|> let api = sleeper();
expect(api.update).toEqual(api.put);
});
});
describe('#delete(id)', () => {
it('should be an alias of del()', () => {
let api = sleeper();
expect(api.delete).toEqual(api.del);
});
});
describe('#remove(id)', () => {
it('should be an alias of del()', () => {
let api = sleeper();
expect(api.remove).toEqual(api.del);
});
});
});<|fim▁end|> |
describe('#update(id, obj)', () => {
it('should be an alias of put()', () => { |
<|file_name|>AboutBoxAction.java<|end_file_name|><|fim▁begin|>/*
* DBeaver - Universal Database Manager
* Copyright (C) 2010-2022 DBeaver Corp and others
*
* Licensed under the Apache License, Version 2.0 (the "License");<|fim▁hole|> *
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.jkiss.dbeaver.ui.app.standalone.about;
import org.eclipse.jface.action.IAction;
import org.eclipse.ui.IWorkbenchWindow;
import org.eclipse.ui.actions.ActionDelegate;
public class AboutBoxAction extends ActionDelegate
{
private IWorkbenchWindow window;
public AboutBoxAction(IWorkbenchWindow window) {
this.window = window;
}
@Override
public void run(IAction action)
{
// new AboutDialog(window.getShell()).open();
AboutBoxDialog dialog = new AboutBoxDialog(window.getShell());
dialog.open();
}
}<|fim▁end|> | * you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0 |
<|file_name|>tcpstream.cc<|end_file_name|><|fim▁begin|>/*
* "$Id: tcpstream.cc,v 1.11 2007-03-01 01:09:39 rmf24 Exp $"
*
* TCP-on-UDP (tou) network interface for RetroShare.
*
* Copyright 2004-2006 by Robert Fernie.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
* License Version 2 as published by the Free Software Foundation.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Library General Public License for more details.
*
* You should have received a copy of the GNU Library General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
* USA.
*
* Please report all bugs and problems to "[email protected]".
*
*/
#include <stdlib.h>
#include <string.h>
#include "tcpstream.h"
#include <iostream>
#include <iomanip>
#include <assert.h>
#include <errno.h>
#include <math.h>
#include <limits.h>
#include <sys/time.h>
#include <time.h>
/* Debugging for STATE change, and Startup SYNs */
#include "util/rsdebug.h"
#include "util/rsstring.h"
#include "util/rsrandom.h"
static struct RsLog::logInfo rstcpstreamzoneInfo = {RsLog::Default, "rstcpstream"};
#define rstcpstreamzone &rstcpstreamzoneInfo
/*
* #define DEBUG_TCP_STREAM 1
* #define DEBUG_TCP_STREAM_RETRANS 1
* #define DEBUG_TCP_STREAM_CLOSE 1
*/
//#define DEBUG_TCP_STREAM_RETRANS 1
//#define DEBUG_TCP_STREAM_CLOSE 1
/*
*#define DEBUG_TCP_STREAM_EXTRA 1
*/
/*
* #define TCP_NO_PARTIAL_READ 1
*/
#ifdef DEBUG_TCP_STREAM
int checkData(uint8 *data, int size, int idx);
int setupBinaryCheck(std::string fname);
#endif
static const uint32 kMaxQueueSize = 300; // Was 100, which means max packet size of 100k (smaller than max packet size).
static const uint32 kMaxPktRetransmit = 10;
static const uint32 kMaxSynPktRetransmit = 100; // 100 => 200secs = over 3 minutes startup
static const int TCP_STD_TTL = 64;
static const int TCP_DEFAULT_FIREWALL_TTL = 4;
static const double RTT_ALPHA = 0.875;
int dumpPacket(std::ostream &out, unsigned char *pkt, uint32_t size);
// platform independent fractional timestamp.
static double getCurrentTS();
TcpStream::TcpStream(UdpSubReceiver *lyr)
: tcpMtx("TcpStream"), inSize(0), outSizeRead(0), outSizeNet(0),
state(TCP_CLOSED),
inStreamActive(false),
outStreamActive(false),
outSeqno(0), outAcked(0), outWinSize(0),
inAckno(0), inWinSize(0),
maxWinSize(TCP_MAX_WIN),
keepAliveTimeout(TCP_ALIVE_TIMEOUT),
retransTimerOn(false),
retransTimeout(TCP_RETRANS_TIMEOUT),
retransTimerTs(0),
keepAliveTimer(0),
lastIncomingPkt(0),
lastSentAck(0),
lastSentWinSize(0),
initOurSeqno(0),
initPeerSeqno(0),
lastWriteTF(0),lastReadTF(0),
wcount(0), rcount(0),
errorState(0),
/* retranmission variables - init to large */
rtt_est(TCP_RETRANS_TIMEOUT),
rtt_dev(0),
congestThreshold(TCP_MAX_WIN),
congestWinSize(MAX_SEG),
congestUpdate(0),
ttl(0),
mTTL_period(0),
mTTL_start(0),
mTTL_end(0),
peerKnown(false),
udp(lyr)
{
sockaddr_clear(&peeraddr);
return;
}
/* Stream Control! */
int TcpStream::connect(const struct sockaddr_in &raddr, uint32_t conn_period)
{
tcpMtx.lock(); /********** LOCK MUTEX *********/
setRemoteAddress(raddr);
/* check state */
if (state != TCP_CLOSED)
{
if (state == TCP_ESTABLISHED)
{
tcpMtx.unlock(); /******** UNLOCK MUTEX *********/
return 0;
}
else if (state < TCP_ESTABLISHED)
{
errorState = EAGAIN;
}
else
{
// major issues!
errorState = EFAULT;
}
tcpMtx.unlock(); /******** UNLOCK MUTEX *********/
return -1;
}
/* setup Seqnos */
outSeqno = genSequenceNo();
initOurSeqno = outSeqno;
outAcked = outSeqno; /* min - 1 expected */
inWinSize = maxWinSize;
congestThreshold = TCP_MAX_WIN;
congestWinSize = MAX_SEG;
congestUpdate = outAcked + congestWinSize;
/* Init Connection */
/* send syn packet */
TcpPacket *pkt = new TcpPacket();
pkt -> setSyn();
#ifdef DEBUG_TCP_STREAM
std::cerr << "TcpStream::connect() Send Init Pkt" << std::endl;
#endif
/* ********* SLOW START *************
* As this is the only place where a syn
* is sent ..... we switch the ttl to 0,
* and increment it as we retransmit the packet....
* This should help the firewalls along.
*/
setTTL(1);
mTTL_start = getCurrentTS();
mTTL_period = conn_period;
mTTL_end = mTTL_start + mTTL_period;
toSend(pkt);
/* change state */
state = TCP_SYN_SENT;
errorState = EAGAIN;
#ifdef DEBUG_TCP_STREAM
std::cerr << "TcpStream STATE -> TCP_SYN_SENT" << std::endl;
#endif
{
rslog(RSL_WARNING,rstcpstreamzone,"TcpStream::state => TCP_SYN_SENT (Connect)");
}
tcpMtx.unlock(); /******** UNLOCK MUTEX *********/
return -1;
}
int TcpStream::listenfor(const struct sockaddr_in &raddr)
{
tcpMtx.lock(); /********** LOCK MUTEX *********/
setRemoteAddress(raddr);
/* check state */
if (state != TCP_CLOSED)
{
if (state == TCP_ESTABLISHED)
{
tcpMtx.unlock(); /******** UNLOCK MUTEX *********/
return 0;
}
else if (state < TCP_ESTABLISHED)
{
errorState = EAGAIN;
}
else
{
// major issues!
errorState = EFAULT;
}
tcpMtx.unlock(); /******** UNLOCK MUTEX *********/
return -1;
}
errorState = EAGAIN;
tcpMtx.unlock(); /******** UNLOCK MUTEX *********/
return -1;
}
/* Stream Control! */
int TcpStream::close()
{
tcpMtx.lock(); /********** LOCK MUTEX *********/
cleanup();
tcpMtx.unlock(); /******** UNLOCK MUTEX *********/
return 0;
}
int TcpStream::closeWrite()
{
tcpMtx.lock(); /********** LOCK MUTEX *********/
/* check state */
/* will always close socket.... */
/* if in TCP_ESTABLISHED....
* -> to state: TCP_FIN_WAIT_1
* and shutdown outward stream.
*/
/* if in CLOSE_WAIT....
* -> to state: TCP_LAST_ACK
* and shutdown outward stream.
* do this one first!.
*/
outStreamActive = false;
if (state == TCP_CLOSE_WAIT)
{
/* don't think we need to be
* graceful at this point...
* connection already closed by other end.
* XXX might fix later with scheme
*
* flag stream closed, and when outqueue
* emptied then fin will be sent.
*/
/* do nothing */
}
if (state == TCP_ESTABLISHED)
{
/* fire off the damned thing. */
/* by changing state */
/* again this is handled by internals
* the flag however indicates that
* no more data can be send,
* and once the queue empties
* the FIN will be sent.
*/
}
if (state == TCP_CLOSED)
{
#ifdef DEBUG_TCP_STREAM
std::cerr << "TcpStream::close() Flag Set" << std::endl;
#endif
tcpMtx.unlock(); /******** UNLOCK MUTEX *********/
return 0;
}
#ifdef DEBUG_TCP_STREAM
std::cerr << "TcpStream::close() pending" << std::endl;
#endif
errorState = EAGAIN;
tcpMtx.unlock(); /******** UNLOCK MUTEX *********/
return -1;
}
bool TcpStream::isConnected()
{
tcpMtx.lock(); /********** LOCK MUTEX *********/
bool isConn = (state == TCP_ESTABLISHED);
tcpMtx.unlock(); /******** UNLOCK MUTEX *********/
return isConn;
}
int TcpStream::status(std::ostream &out)
{
tcpMtx.lock(); /********** LOCK MUTEX *********/
int s = status_locked(out);
tcpMtx.unlock(); /******** UNLOCK MUTEX *********/
return s;
}
int TcpStream::status_locked(std::ostream &out)
{
int tmpstate = state;
// can leave the timestamp here as time()... rough but okay.
out << "TcpStream::status @ (" << time(NULL) << ")" << std::endl;
out << "TcpStream::state = " << (int) state << std::endl;
out << std::endl;
out << "writeBuffer: " << inSize << " + MAX_SEG * " << inQueue.size();
out << " bytes Queued for transmission" << std::endl;
out << "readBuffer: " << outSizeRead << " + MAX_SEG * ";
out << outQueue.size() << " + " << outSizeNet;
out << " incoming bytes waiting" << std::endl;
out << std::endl;
out << "inPkts: " << inPkt.size() << " packets waiting for processing";
out << std::endl;
out << "outPkts: " << outPkt.size() << " packets waiting for acks";
out << std::endl;
out << "us -> peer: nextSeqno: " << outSeqno << " lastAcked: " << outAcked;
out << " winsize: " << outWinSize;
out << std::endl;
out << "peer -> us: Expected SeqNo: " << inAckno;
out << " winsize: " << inWinSize;
out << std::endl;
out << std::endl;
return tmpstate;
}
int TcpStream::write_allowed()
{
tcpMtx.lock(); /********** LOCK MUTEX *********/
int ret = 1;
if (state == TCP_CLOSED)
{
errorState = EBADF;
ret = -1;
}
else if (state < TCP_ESTABLISHED)
{
errorState = EAGAIN;
ret = -1;
}
else if (!outStreamActive)
{
errorState = EBADF;
ret = -1;
}
if (ret < 1)
{
tcpMtx.unlock(); /******** UNLOCK MUTEX *********/
return ret;
}
int maxwrite = (kMaxQueueSize - inQueue.size()) * MAX_SEG;
tcpMtx.unlock(); /******** UNLOCK MUTEX *********/
return maxwrite;
}
int TcpStream::read_pending()
{
tcpMtx.lock(); /********** LOCK MUTEX *********/
/* error should be detected next time */
int maxread = int_read_pending();
if (state == TCP_CLOSED)
{
errorState = EBADF;
maxread = -1;
}
else if (state < TCP_ESTABLISHED)
{
errorState = EAGAIN;
maxread = -1;
}
tcpMtx.unlock(); /******** UNLOCK MUTEX *********/
return maxread;
}
/* INTERNAL */
int TcpStream::int_read_pending()
{
return outSizeRead + outQueue.size() * MAX_SEG + outSizeNet;
}
/* stream Interface */
int TcpStream::write(char *dta, int size) /* write -> pkt -> net */
{
tcpMtx.lock(); /********** LOCK MUTEX *********/
int ret = 1; /* initial error checking */
#ifdef DEBUG_TCP_STREAM_EXTRA
static uint32 TMPtotalwrite = 0;
#endif
if (state == TCP_CLOSED)
{
#ifdef DEBUG_TCP_STREAM
std::cerr << "TcpStream::write() Error TCP_CLOSED" << std::endl;
#endif
errorState = EBADF;
ret = -1;
}
else if (state < TCP_ESTABLISHED)
{
#ifdef DEBUG_TCP_STREAM
std::cerr << "TcpStream::write() Error TCP Not Established" << std::endl;
#endif
errorState = EAGAIN;
ret = -1;
}
else if (inQueue.size() > kMaxQueueSize)
{
#ifdef DEBUG_TCP_STREAM
std::cerr << "TcpStream::write() Error EAGAIN" << std::endl;
#endif
errorState = EAGAIN;
ret = -1;
}
else if (!outStreamActive)
{
#ifdef DEBUG_TCP_STREAM
std::cerr << "TcpStream::write() Error TCP_CLOSED" << std::endl;
#endif
errorState = EBADF;
ret = -1;
}
if (ret < 1) /* check for initial error */
{
tcpMtx.unlock(); /******** UNLOCK MUTEX *********/
return ret;
}
#ifdef DEBUG_TCP_STREAM_EXTRA
std::cerr << "TcpStream::write() = Will Succeed " << size << std::endl;
std::cerr << "TcpStream::write() Write Start: " << TMPtotalwrite << std::endl;
std::cerr << printPktOffset(TMPtotalwrite, dta, size) << std::endl;
TMPtotalwrite += size;
#endif
if (size + inSize < MAX_SEG)
{
#ifdef DEBUG_TCP_STREAM_EXTRA
std::cerr << "TcpStream::write() Add Itty Bit" << std::endl;
std::cerr << "TcpStream::write() inData: " << (void *) inData;
std::cerr << " inSize: " << inSize << " dta: " << (void *) dta;
std::cerr << " size: " << size << " dest: " << (void *) &(inData[inSize]);
std::cerr << std::endl;
#endif
#ifdef DEBUG_TCP_STREAM
std::cerr << "TcpStream::write() = " << size << std::endl;
#endif
memcpy((void *) &(inData[inSize]), dta, size);
inSize += size;
//std::cerr << "Small Packet - write to net:" << std::endl;
//std::cerr << printPkt(dta, size) << std::endl;
tcpMtx.unlock(); /******** UNLOCK MUTEX *********/
return size;
}
/* otherwise must construct a dataBuffer.
*/
#ifdef DEBUG_TCP_STREAM_EXTRA
std::cerr << "TcpStream::write() filling 1 dataBuffer" << std::endl;
std::cerr << "TcpStream::write() from inData(" << inSize << ")" << std::endl;
std::cerr << "TcpStream::write() + dta(" << MAX_SEG - inSize;
std::cerr << "/" << size << ")" << std::endl;
#endif
/* first create 1. */
dataBuffer *db = new dataBuffer;
memcpy((void *) db->data, (void *) inData, inSize);
int remSize = size;
memcpy((void *) &(db->data[inSize]), dta, MAX_SEG - inSize);
inQueue.push_back(db);
remSize -= (MAX_SEG - inSize);
#ifdef DEBUG_TCP_STREAM_EXTRA
std::cerr << "TcpStream::write() remaining " << remSize << " bytes to load" << std::endl;
#endif
while(remSize >= MAX_SEG)
{
#ifdef DEBUG_TCP_STREAM_EXTRA
std::cerr << "TcpStream::write() filling whole dataBuffer" << std::endl;
std::cerr << "TcpStream::write() from dta[" << size-remSize << "]" << std::endl;
#endif
db = new dataBuffer;
memcpy((void *) db->data, (void *) &(dta[size-remSize]), MAX_SEG);
inQueue.push_back(db);
remSize -= MAX_SEG;
}
#ifdef DEBUG_TCP_STREAM
std::cerr << "TcpStream::write() = " << size << std::endl;
#endif
if (remSize > 0)
{
#ifdef DEBUG_TCP_STREAM_EXTRA
std::cerr << "TcpStream::write() putting last bit in inData" << std::endl;
std::cerr << "TcpStream::write() from dta[" << size-remSize << "] size: ";
std::cerr << remSize << std::endl;
#endif
memcpy((void *) inData, (void *) &(dta[size-remSize]), remSize);
inSize = remSize;
}
else
{
#ifdef DEBUG_TCP_STREAM_EXTRA
std::cerr << "TcpStream::write() Data fitted exactly in dataBuffer!" << std::endl;
#endif
inSize = 0;
}
tcpMtx.unlock(); /******** UNLOCK MUTEX *********/
return size;
}
int TcpStream::read(char *dta, int size) /* net -> pkt -> read */
{
tcpMtx.lock(); /********** LOCK MUTEX *********/
#ifdef DEBUG_TCP_STREAM_EXTRA
static uint32 TMPtotalread = 0;
#endif
/* max available data is
* outDataRead + outQueue + outDataNet
*/
int maxread = outSizeRead + outQueue.size() * MAX_SEG + outSizeNet;
int ret = 1; /* used only for initial errors */
if (state == TCP_CLOSED)
{
errorState = EBADF;
ret = -1;
}
else if (state < TCP_ESTABLISHED)
{
errorState = EAGAIN;
ret = -1;
}
else if ((!inStreamActive) && (maxread == 0))
{
// finished stream.
ret = 0;
}
else if (maxread == 0)
{
/* must wait for more data */
errorState = EAGAIN;
ret = -1;
}
if (ret < 1) /* if ret has been changed */
{
tcpMtx.unlock(); /******** UNLOCK MUTEX *********/
return ret;
}
if (maxread < size)
{
#ifdef TCP_NO_PARTIAL_READ
if (inStreamActive)
{
#ifdef DEBUG_TCP_STREAM
std::cerr << "TcpStream::read() No Partial Read! ";
std::cerr << "Can only supply " << maxread << " of ";
std::cerr << size;
std::cerr << std::endl;
#endif
errorState = EAGAIN;
tcpMtx.unlock(); /******** UNLOCK MUTEX *********/
return -1;
}
#endif /* TCP_NO_PARTIAL_READ */
size = maxread;
}
/* if less than outDataRead size */
if (((unsigned) (size) < outSizeRead) && (outSizeRead))
{
#ifdef DEBUG_TCP_STREAM_EXTRA
std::cerr << "TcpStream::read() Add Itty Bit" << std::endl;
std::cerr << "TcpStream::read() outSizeRead: " << outSizeRead;
std::cerr << " size: " << size << " remaining: " << outSizeRead - size;
std::cerr << std::endl;
#endif
memcpy(dta,(void *) outDataRead, size);
memmove((void *) outDataRead,
(void *) &(outDataRead[size]), outSizeRead - (size));
outSizeRead -= size;
#ifdef DEBUG_TCP_STREAM_EXTRA
std::cerr << "TcpStream::read() = Succeeded " << size << std::endl;
std::cerr << "TcpStream::read() Read Start: " << TMPtotalread << std::endl;
std::cerr << printPktOffset(TMPtotalread, dta, size) << std::endl;
#endif
#ifdef DEBUG_TCP_STREAM_EXTRA
checkData((uint8 *) dta, size, TMPtotalread);
TMPtotalread += size;
#endif
/* can allow more in! - update inWinSize */
UpdateInWinSize();
tcpMtx.unlock(); /******** UNLOCK MUTEX *********/
return size;
}
/* move the whole of outDataRead. */
if (outSizeRead)
{
#ifdef DEBUG_TCP_STREAM_EXTRA
std::cerr << "TcpStream::read() Move All outSizeRead" << std::endl;
std::cerr << "TcpStream::read() outSizeRead: " << outSizeRead;
std::cerr << " size: " << size;
std::cerr << std::endl;
#endif
memcpy(dta,(void *) outDataRead, outSizeRead);
}
int remSize = size - outSizeRead;
outSizeRead = 0;
#ifdef DEBUG_TCP_STREAM_EXTRA
std::cerr << "TcpStream::read() remaining size: " << remSize << std::endl;
#endif
while((outQueue.size() > 0) && (remSize > 0))
{
dataBuffer *db = outQueue.front();
outQueue.pop_front(); /* remove */
#ifdef DEBUG_TCP_STREAM_EXTRA
std::cerr << "TcpStream::read() Taking Data from outQueue" << std::endl;
#endif
/* load into outDataRead */
if (remSize < MAX_SEG)
{
#ifdef DEBUG_TCP_STREAM_EXTRA
std::cerr << "TcpStream::read() Partially using Segment" << std::endl;
std::cerr << "TcpStream::read() moving: " << remSize << " to dta @: " << size-remSize;
std::cerr << std::endl;
std::cerr << "TcpStream::read() rest to outDataRead: " << MAX_SEG - remSize;
std::cerr << std::endl;
#endif
memcpy((void *) &(dta[(size)-remSize]), (void *) db->data, remSize);
memcpy((void *) outDataRead, (void *) &(db->data[remSize]), MAX_SEG - remSize);
outSizeRead = MAX_SEG - remSize;
delete db;
#ifdef DEBUG_TCP_STREAM_EXTRA
std::cerr << "TcpStream::read() = Succeeded " << size << std::endl;
std::cerr << "TcpStream::read() Read Start: " << TMPtotalread << std::endl;
std::cerr << printPktOffset(TMPtotalread, dta, size) << std::endl;
#endif
#ifdef DEBUG_TCP_STREAM_EXTRA
checkData((uint8 *) dta, size, TMPtotalread);
TMPtotalread += size;
#endif
/* can allow more in! - update inWinSize */
UpdateInWinSize();
tcpMtx.unlock(); /******** UNLOCK MUTEX *********/
return size;
}
#ifdef DEBUG_TCP_STREAM_EXTRA
std::cerr << "TcpStream::read() Move Whole Segment to dta @ " << size-remSize << std::endl;
#endif
/* else copy whole segment */
memcpy((void *) &(dta[(size)-remSize]), (void *) db->data, MAX_SEG);
remSize -= MAX_SEG;
delete db;
}
/* assumes that outSizeNet >= remSize due to initial
* constraint
*/
if ((remSize > 0))
{
#ifdef DEBUG_TCP_STREAM_EXTRA
std::cerr << "TcpStream::read() Using up : " << remSize;
std::cerr << " last Bytes, leaving: " << outSizeNet - remSize << std::endl;
#endif
memcpy((void *) &(dta[(size)-remSize]),(void *) outDataNet, remSize);
outSizeNet -= remSize;
if (outSizeNet > 0)
{
/* move to the outDataRead */
memcpy((void *) outDataRead,(void *) &(outDataNet[remSize]), outSizeNet);
outSizeRead = outSizeNet;
outSizeNet = 0;
#ifdef DEBUG_TCP_STREAM_EXTRA
std::cerr << "TcpStream::read() moving last of outSizeNet to outSizeRead: " << outSizeRead;
std::cerr << std::endl;
#endif
}
#ifdef DEBUG_TCP_STREAM_EXTRA
std::cerr << "TcpStream::read() = Succeeded " << size << std::endl;
std::cerr << "TcpStream::read() Read Start: " << TMPtotalread << std::endl;
std::cerr << printPktOffset(TMPtotalread, dta, size) << std::endl;
#endif
#ifdef DEBUG_TCP_STREAM_EXTRA
checkData((uint8 *) dta, size, TMPtotalread);
TMPtotalread += size;
#endif
/* can allow more in! - update inWinSize */
UpdateInWinSize();
tcpMtx.unlock(); /******** UNLOCK MUTEX *********/
return size;
}
#ifdef DEBUG_TCP_STREAM_EXTRA
std::cerr << "TcpStream::read() = Succeeded " << size << std::endl;
std::cerr << "TcpStream::read() Read Start: " << TMPtotalread << std::endl;
std::cerr << printPktOffset(TMPtotalread, dta, size) << std::endl;
#endif
#ifdef DEBUG_TCP_STREAM_EXTRA
checkData((uint8 *) dta, size, TMPtotalread);
TMPtotalread += size;
#endif
/* can allow more in! - update inWinSize */
UpdateInWinSize();
tcpMtx.unlock(); /******** UNLOCK MUTEX *********/
return size;
}
/* Callback from lower Layers */
void TcpStream::recvPkt(void *data, int size)
{
#ifdef DEBUG_TCP_STREAM
std::cerr << "TcpStream::recvPkt()";
std::cerr << std::endl;
#endif
tcpMtx.lock(); /********** LOCK MUTEX *********/
uint8 *input = (uint8 *) data;
#ifdef DEBUG_TCP_STREAM
std::cerr << "TcpStream::recvPkt() Past Lock!";
std::cerr << std::endl;
#endif
#ifdef DEBUG_TCP_STREAM
if (state > TCP_SYN_RCVD)
{
int availRead = outSizeRead + outQueue.size() * MAX_SEG + outSizeNet;
std::cerr << "TcpStream::recvPkt() CC: ";
std::cerr << " iWS: " << inWinSize;
std::cerr << " aRead: " << availRead;
std::cerr << " iAck: " << inAckno;
std::cerr << std::endl;
}
else
{
std::cerr << "TcpStream::recv() Not Connected";
std::cerr << std::endl;
}
#endif
#ifdef DEBUG_TCP_STREAM
std::cerr << "TcpStream::recv() ReadPkt(" << size << ")" << std::endl;
//std::cerr << printPkt(input, size);
//std::cerr << std::endl;
#endif
TcpPacket *pkt = new TcpPacket();
if (0 < pkt -> readPacket(input, size))
{
lastIncomingPkt = getCurrentTS();
handleIncoming(pkt);
}
else
{
#ifdef DEBUG_TCP_STREAM
std::cerr << "TcpStream::recv() Bad Packet Deleting!";
std::cerr << std::endl;
#endif
delete pkt;
}
tcpMtx.unlock(); /******** UNLOCK MUTEX *********/
return;
}
int TcpStream::tick()
{
tcpMtx.lock(); /********** LOCK MUTEX *********/
//std::cerr << "TcpStream::tick()" << std::endl;
recv_check(); /* recv is async */
send();
tcpMtx.unlock(); /******** UNLOCK MUTEX *********/
return 1;
}
bool TcpStream::getRemoteAddress(struct sockaddr_in &raddr)
{
tcpMtx.lock(); /********** LOCK MUTEX *********/
if (peerKnown)
{
raddr = peeraddr;
}
tcpMtx.unlock(); /******** UNLOCK MUTEX *********/
return peerKnown;
}
uint8 TcpStream::TcpState()
{
tcpMtx.lock(); /********** LOCK MUTEX *********/
uint8 err = state;
tcpMtx.unlock(); /******** UNLOCK MUTEX *********/
return err;
}
int TcpStream::TcpErrorState()
{
tcpMtx.lock(); /********** LOCK MUTEX *********/
int err = errorState;
tcpMtx.unlock(); /******** UNLOCK MUTEX *********/
return err;
}
/********************* SOME EXPOSED DEBUGGING FNS ******************/
static int ilevel = 100;
bool TcpStream::widle()
{
tcpMtx.lock(); /********** LOCK MUTEX *********/
/* init */
if (!lastWriteTF)
{
lastWriteTF = int_wbytes();
tcpMtx.unlock(); /******** UNLOCK MUTEX *********/
return false;
}
if ((lastWriteTF == int_wbytes()) && (inSize == 0) && inQueue.empty())
{
wcount++;
if (wcount > ilevel)
{
tcpMtx.unlock(); /******** UNLOCK MUTEX *********/
return true;
}
tcpMtx.unlock(); /******** UNLOCK MUTEX *********/
return false;
}
wcount = 0;
lastWriteTF = int_wbytes();
tcpMtx.unlock(); /******** UNLOCK MUTEX *********/
return false;
}
bool TcpStream::ridle()
{
tcpMtx.lock(); /********** LOCK MUTEX *********/
/* init */
if (!lastReadTF)
{
lastReadTF = int_rbytes();
tcpMtx.unlock(); /******** UNLOCK MUTEX *********/
return false;
}
if ((lastReadTF == int_rbytes()) && (outSizeRead + outQueue.size() + outSizeNet== 0))
{
rcount++;
if (rcount > ilevel)
{
tcpMtx.unlock(); /******** UNLOCK MUTEX *********/
return true;
}
tcpMtx.unlock(); /******** UNLOCK MUTEX *********/
return false;
}
rcount = 0;
lastReadTF = int_rbytes();
tcpMtx.unlock(); /******** UNLOCK MUTEX *********/
return false;
}
uint32 TcpStream::wbytes()
{
tcpMtx.lock(); /********** LOCK MUTEX *********/
uint32 wb = int_wbytes();
tcpMtx.unlock(); /******** UNLOCK MUTEX *********/
return wb;
}
uint32 TcpStream::rbytes()
{
tcpMtx.lock(); /********** LOCK MUTEX *********/
uint32 rb = int_rbytes();
tcpMtx.unlock(); /******** UNLOCK MUTEX *********/
return rb;
}
/********************* ALL BELOW HERE IS INTERNAL ******************
******************* AND ALWAYS PROTECTED BY A MUTEX ***************/
int TcpStream::recv_check()
{
double cts = getCurrentTS(); // fractional seconds.
#ifdef DEBUG_TCP_STREAM
if (state > TCP_SYN_RCVD)
{
int availRead = outSizeRead + outQueue.size() * MAX_SEG + outSizeNet;
std::cerr << "TcpStream::recv_check() CC: ";
std::cerr << " iWS: " << inWinSize;
std::cerr << " aRead: " << availRead;
std::cerr << " iAck: " << inAckno;
std::cerr << std::endl;
}
else
{
std::cerr << "TcpStream::recv_check() Not Connected";
std::cerr << std::endl;
}
#endif
// make sure we've rcvd something!
if ((state > TCP_SYN_RCVD) &&
(cts - lastIncomingPkt > kNoPktTimeout))
{
/* shut it all down */
/* this period should be equivalent
* to the firewall timeouts ???
*
* for max efficiency
*/
#ifdef DEBUG_TCP_STREAM_CLOSE
std::cerr << "TcpStream::recv_check() state = CLOSED (NoPktTimeout)";
std::cerr << std::endl;
dumpstate_locked(std::cerr);
#endif
rslog(RSL_WARNING, rstcpstreamzone, "TcpStream::state => TCP_CLOSED (kNoPktTimeout)");
outStreamActive = false;
inStreamActive = false;
state = TCP_CLOSED;
cleanup();
}
return 1;
}
int TcpStream::cleanup()
{
// This shuts it all down! no matter what.
rslog(RSL_WARNING, rstcpstreamzone, "TcpStream::cleanup() state = TCP_CLOSED");
outStreamActive = false;
inStreamActive = false;
state = TCP_CLOSED;
#ifdef DEBUG_TCP_STREAM
std::cerr << "TcpStream STATE -> TCP_CLOSED" << std::endl;
#endif
//peerKnown = false; //??? NOT SURE -> for a rapid reconnetion this might be key??
/* reset TTL */
setTTL(TCP_STD_TTL);
// clear arrays.
inSize = 0;
while(inQueue.size() > 0)
{
dataBuffer *db = inQueue.front();
inQueue.pop_front();
delete db;
}
while(outPkt.size() > 0)
{
TcpPacket *pkt = outPkt.front();
outPkt.pop_front();
delete pkt;
}
// clear arrays.
outSizeRead = 0;
outSizeNet = 0;
while(outQueue.size() > 0)
{
dataBuffer *db = outQueue.front();
outQueue.pop_front();
delete db;
}
while(inPkt.size() > 0)
{
TcpPacket *pkt = inPkt.front();
inPkt.pop_front();
delete pkt;
}
return 1;
}
int TcpStream::handleIncoming(TcpPacket *pkt)
{
#ifdef DEBUG_TCP_STREAM
std::cerr << "TcpStream::handleIncoming()" << std::endl;
#endif
switch(state)
{
case TCP_CLOSED:
case TCP_LISTEN:
/* if receive SYN
* -> respond SYN/ACK
* To State: SYN_RCVD
*
* else Discard.
*/
return incoming_Closed(pkt);
break;
case TCP_SYN_SENT:
/* if receive SYN
* -> respond SYN/ACK
* To State: SYN_RCVD
*
* if receive SYN+ACK
* -> respond ACK
* To State: TCP_ESTABLISHED
*
* else Discard.
*/
return incoming_SynSent(pkt);
break;
case TCP_SYN_RCVD:
/* if receive ACK
* To State: TCP_ESTABLISHED
*/
return incoming_SynRcvd(pkt);
break;
case TCP_ESTABLISHED:
/* if receive FIN
* -> respond ACK
* To State: TCP_CLOSE_WAIT
* else Discard.
*/
return incoming_Established(pkt);
break;
case TCP_FIN_WAIT_1:
/* state entered by close() call.
* if receive FIN
* -> respond ACK
* To State: TCP_CLOSING
*
* if receive ACK
* -> no response
* To State: TCP_FIN_WAIT_2
*
* if receive FIN+ACK
* -> respond ACK
* To State: TCP_TIMED_WAIT
*
*/
return incoming_Established(pkt);
//return incoming_FinWait1(pkt);
break;
case TCP_FIN_WAIT_2:
/* if receive FIN
* -> respond ACK
* To State: TCP_TIMED_WAIT
*/
return incoming_Established(pkt);
//return incoming_FinWait2(pkt);
break;
case TCP_CLOSING:
/* if receive ACK
* To State: TCP_TIMED_WAIT
*/
/* all handled in Established */
return incoming_Established(pkt);
//return incoming_Closing(pkt);
break;
case TCP_CLOSE_WAIT:
/*
* wait for our close to be called.
*/
/* all handled in Established */
return incoming_Established(pkt);
//return incoming_CloseWait(pkt);
break;
case TCP_LAST_ACK:
/* entered by the local close() after sending FIN.
* if receive ACK
* To State: TCP_CLOSED
*/
/* all handled in Established */
return incoming_Established(pkt);
/*
return incoming_LastAck(pkt);
*/
break;
/* this is actually the only
* final state where packets not expected!
*/
case TCP_TIMED_WAIT:
/* State: TCP_TIMED_WAIT
*
* discard all -> both connections FINed
* timeout of this state.
*
*/
#ifdef DEBUG_TCP_STREAM_CLOSE
std::cerr << "TcpStream::handleIncoming() state = CLOSED (TimedWait)";
std::cerr << std::endl;
dumpstate_locked(std::cerr);
#endif
state = TCP_CLOSED;
// return incoming_TimedWait(pkt);
{
rslog(RSL_WARNING, rstcpstreamzone, "TcpStream::state => TCP_CLOSED (recvd TCP_TIMED_WAIT?)");
}
break;
}
delete pkt;
return 1;
}
int TcpStream::incoming_Closed(TcpPacket *pkt)
{
/* if receive SYN
* -> respond SYN/ACK
* To State: SYN_RCVD
*
* else Discard.
*/
#ifdef DEBUG_TCP_STREAM
std::cerr << "TcpStream::incoming_Closed()" << std::endl;
#endif
if ((pkt -> hasSyn()) && (!pkt -> hasAck()))
{
/* Init Connection */
/* save seqno */
initPeerSeqno = pkt -> seqno;
inAckno = initPeerSeqno + 1;
outWinSize = pkt -> winsize;
inWinSize = maxWinSize;
/* we can get from SynSent as well,
* but only send one SYN packet
*/
/* start packet */
TcpPacket *rsp = new TcpPacket();
if (state == TCP_CLOSED)
{
outSeqno = genSequenceNo();
initOurSeqno = outSeqno;
outAcked = outSeqno; /* min - 1 expected */
/* setup Congestion Charging */
congestThreshold = TCP_MAX_WIN;
congestWinSize = MAX_SEG;
congestUpdate = outAcked + congestWinSize;
rsp -> setSyn();
}
rsp -> setAck(inAckno);
/* seq + winsize set in toSend() */
/* as we have received something ... we can up the TTL */
setTTL(TCP_STD_TTL);
#ifdef DEBUG_TCP_STREAM
std::cerr << "TcpStream::incoming_Closed() Sending reply" << std::endl;
std::cerr << "SeqNo: " << rsp->seqno << " Ack: " << rsp->ackno;
std::cerr << std::endl;
#endif
toSend(rsp);
/* change state */
state = TCP_SYN_RCVD;
#ifdef DEBUG_TCP_STREAM
std::cerr << "TcpStream STATE -> TCP_SYN_RCVD" << std::endl;
#endif
rslog(RSL_WARNING, rstcpstreamzone, "TcpStream::state => TCP_SYN_RECVD (recvd SYN & !ACK)");
}
delete pkt;
return 1;
}
int TcpStream::incoming_SynSent(TcpPacket *pkt)
{
/* if receive SYN
* -> respond SYN/ACK
* To State: SYN_RCVD
*
* if receive SYN+ACK
* -> respond ACK
* To State: TCP_ESTABLISHED
*
* else Discard.
*/
#ifdef DEBUG_TCP_STREAM
std::cerr << "TcpStream::incoming_SynSent()" << std::endl;
#endif
if ((pkt -> hasSyn()) && (pkt -> hasAck()))
{
/* check stuff */
if (pkt -> getAck() != outSeqno)
{
#ifdef DEBUG_TCP_STREAM
std::cerr << "TcpStream::incoming_SynSent() Bad Ack - Deleting " << std::endl;
#endif
/* bad ignore */
delete pkt;
return -1;
}
/* Complete Connection */
/* save seqno */
initPeerSeqno = pkt -> seqno;
inAckno = initPeerSeqno + 1;
outWinSize = pkt -> winsize;
outAcked = pkt -> getAck();
/* before ACK, reset the TTL
* As they have sent something, and we have received
* through the firewall, set to STD.
*/
setTTL(TCP_STD_TTL);
/* ack the Syn Packet */
sendAck();
/* change state */
state = TCP_ESTABLISHED;
outStreamActive = true;
inStreamActive = true;
#ifdef DEBUG_TCP_STREAM
std::cerr << "TcpStream STATE -> TCP_ESTABLISHED" << std::endl;
#endif
rslog(RSL_WARNING, rstcpstreamzone, "TcpStream::state => TCP_ESTABLISHED (recvd SUN & ACK)");
delete pkt;
}
else /* same as if closed! (simultaneous open) */
{
return incoming_Closed(pkt);
}
return 1;
}
int TcpStream::incoming_SynRcvd(TcpPacket *pkt)
{
/* if receive ACK
* To State: TCP_ESTABLISHED
*/
if (pkt -> hasRst())
{
/* trouble */
state = TCP_CLOSED;
#ifdef DEBUG_TCP_STREAM
std::cerr << "TcpStream STATE -> TCP_CLOSED" << std::endl;
#endif
rslog(RSL_WARNING, rstcpstreamzone, "TcpStream::state => TCP_CLOSED (recvd RST)");
delete pkt;
return 1;
}
bool ackWithData = false;
if (pkt -> hasAck())
{
if (pkt -> hasSyn())
{
/* has resent syn -> check it matches */
#ifdef DEBUG_TCP_STREAM
std::cerr << "incoming_SynRcvd -> Pkt with ACK + SYN" << std::endl;
#endif
}
/* check stuff */
if (pkt -> getAck() != outSeqno)
{
/* bad ignore */
#ifdef DEBUG_TCP_STREAM
std::cerr << "incoming_SynRcvd -> Ignoring Pkt with bad ACK" << std::endl;
#endif
delete pkt;
return -1;
}
/* Complete Connection */
/* save seqno */
if (pkt -> datasize > 0)
{
#ifdef DEBUG_TCP_STREAM
std::cerr << "TcpStream::incoming_SynRcvd() ACK with Data!" << std::endl;
std::cerr << "TcpStream::incoming_SynRcvd() Shoudn't recv ... unless initACK lost!" << std::endl;
#endif
// managed to trigger this under windows...
// perhaps the initial Ack was lost,
// believe we should just pass this packet
// directly to the incoming_Established... once
// the following has been done.
// and it should all work!
//exit(1);
ackWithData = true;
}
inAckno = pkt -> seqno; /* + pkt -> datasize; */
outWinSize = pkt -> winsize;
outAcked = pkt -> getAck();
/* As they have sent something, and we have received
* through the firewall, set to STD.
*/
setTTL(TCP_STD_TTL);
/* change state */
state = TCP_ESTABLISHED;
outStreamActive = true;
inStreamActive = true;
#ifdef DEBUG_TCP_STREAM
std::cerr << "TcpStream STATE -> TCP_ESTABLISHED" << std::endl;
#endif
rslog(RSL_WARNING, rstcpstreamzone, "TcpStream::state => TCP_ESTABLISHED (have SYN, recvd ACK)");
}
if (ackWithData)
{
/* connection Established -> handle normally */
#ifdef DEBUG_TCP_STREAM
std::cerr << "incoming_SynRcvd -> Handling Data with Ack Pkt!";
std::cerr << std::endl;<|fim▁hole|>#endif
incoming_Established(pkt);
}
else
{
#ifdef DEBUG_TCP_STREAM
std::cerr << "incoming_SynRcvd -> Ignoring Pkt!" << std::endl;
#endif
/* else nothing */
delete pkt;
}
return 1;
}
int TcpStream::incoming_Established(TcpPacket *pkt)
{
/* first handle the Ack ...
* this must be done before the queue,
* to keep the values as up-to-date as possible.
*
* must sanity check .....
* make sure that the sequence number is within the correct range.
*/
#ifdef DEBUG_TCP_STREAM
std::cerr << "TcpStream::incoming_Established() ";
std::cerr << " Pkt->seqno: " << std::hex << pkt->seqno;
std::cerr << " Pkt->datasize: " << std::hex << pkt->datasize;
std::cerr << std::dec << std::endl;
#endif
if ((!isOldSequence(pkt->seqno, inAckno)) && // seq >= inAckno
isOldSequence(pkt->seqno, inAckno + maxWinSize)) // seq < inAckno + maxWinSize.
{
#ifdef DEBUG_TCP_STREAM
std::cerr << "TcpStream::incoming_Established() valid Packet Seqno.";
std::cerr << std::endl;
#endif
if (pkt->hasAck())
{
#ifdef DEBUG_TCP_STREAM
if (outAcked != pkt->ackno)
{
std::cerr << "TcpStream::incoming_Established() valid Packet Seqno & new Ackno.";
std::cerr << std::endl;
std::cerr << "\tUpdating OutAcked to: " << outAcked;
std::cerr << std::endl;
}
#endif
outAcked = pkt->ackno;
}
outWinSize = pkt->winsize;
#ifdef DEBUG_TCP_STREAM
std::cerr << "\tUpdating OutWinSize to: " << outWinSize;
std::cerr << std::endl;
#endif
}
else
{
/* what we do! (This is actually okay - and happens occasionally) */
#ifdef DEBUG_TCP_STREAM
std::cerr << "TcpStream::incoming_Established() ERROR out-of-range Packet Seqno.";
std::cerr << std::endl;
std::cerr << "\t Pkt->SeqNo: " << std::hex << pkt->seqno;
std::cerr << std::endl;
std::cerr << "\t inAckno: " << std::hex << inAckno;
std::cerr << std::endl;
std::cerr << "\t inAckno + maxWinSize: " << std::hex << inAckno + maxWinSize;
std::cerr << std::endl;
std::cerr << "\t outAcked: " << std::hex << outAcked;
std::cerr << std::endl;
std::cerr << "\t Pkt->SeqNo: " << std::hex << pkt->seqno;
std::cerr << std::dec << std::endl;
std::cerr << "\t !isOldSequence(pkt->seqno, inAckno): " << (!isOldSequence(pkt->seqno, inAckno));
std::cerr << std::endl;
std::cerr << "\t isOldSequence(pkt->seqno, inAckno + maxWinSize): " << isOldSequence(pkt->seqno, inAckno + maxWinSize);
std::cerr << std::endl;
std::cerr << std::endl;
std::cerr << "TcpStream::incoming_Established() Sending Ack to update Peer";
std::cerr << std::endl;
#endif
sendAck();
}
/* add to queue */
inPkt.push_back(pkt);
if (inPkt.size() > kMaxQueueSize)
{
TcpPacket *pkt = inPkt.front();
inPkt.pop_front();
delete pkt;
#ifdef DEBUG_TCP_STREAM
std::cerr << "TcpStream::incoming_Established() inPkt reached max size...Discarding Oldest Pkt";
std::cerr << std::endl;
#endif
}
/* use as many packets as possible */
return check_InPkts();
}
int TcpStream::check_InPkts()
{
bool found = true;
TcpPacket *pkt;
std::list<TcpPacket *>::iterator it;
while(found)
{
found = false;
for(it = inPkt.begin(); (!found) && (it != inPkt.end());)
{
#ifdef DEBUG_TCP_STREAM
std::cerr << "Checking expInAck: " << std::hex << inAckno;
std::cerr << " vs: " << std::hex << (*it)->seqno << std::dec << std::endl;
#endif
pkt = *it;
if ((*it)->seqno == inAckno)
{
//std::cerr << "\tFOUND MATCH!";
//std::cerr << std::endl;
found = true;
it = inPkt.erase(it);
}
/* see if we can discard it */
/* if smaller seqno, and not wrapping around */
else if (isOldSequence((*it)->seqno, inAckno))
{
#ifdef DEBUG_TCP_STREAM
std::cerr << "Discarding Old Packet expAck: " << std::hex << inAckno;
std::cerr << " seqno: " << std::hex << (*it)->seqno;
std::cerr << " pkt->size: " << std::hex << (*it)->datasize;
std::cerr << " pkt->seqno+size: " << std::hex << (*it)->seqno + (*it)->datasize;
std::cerr << std::dec << std::endl;
#endif
/* discard */
it = inPkt.erase(it);
delete pkt;
}
else
{
++it;
}
}
if (found)
{
#ifdef DEBUG_TCP_STREAM_EXTRA
if (pkt->datasize)
{
checkData(pkt->data, pkt->datasize, pkt->seqno-initPeerSeqno-1);
}
#endif
#ifdef DEBUG_TCP_STREAM
std::cerr << "TcpStream::check_inPkts() Updating inAckno from: " << std::hex << inAckno;
#endif
/* update ack number - let it rollover */
inAckno = pkt->seqno + pkt->datasize;
#ifdef DEBUG_TCP_STREAM
std::cerr << " to: " << std::hex << inAckno;
std::cerr << std::dec << std::endl;
#endif
/* XXX This shouldn't be here, as it prevents
* the Ack being used until the packet is.
* This means that a dropped packet will stop traffic in both
* directions....
*
* Moved it to incoming_Established .... but extra
* check here to be sure!
*/
if (pkt->hasAck())
{
if (isOldSequence(outAcked, pkt->ackno))
{
#ifdef DEBUG_TCP_STREAM
std::cerr << "TcpStream::check_inPkts() ERROR Ack Not Already Used!";
std::cerr << std::endl;
std::cerr << "\t Pkt->ackno: " << std::hex << pkt->ackno;
std::cerr << std::endl;
std::cerr << "\t outAcked: " << std::hex << outAcked;
std::cerr << std::endl;
std::cerr << "\t Pkt->winsize: " << std::hex << pkt->winsize;
std::cerr << std::endl;
std::cerr << "\t outWinSize: " << std::hex << outWinSize;
std::cerr << std::endl;
std::cerr << "\t isOldSequence(outAcked, pkt->ackno): " << isOldSequence(outAcked, pkt->ackno);
std::cerr << std::endl;
std::cerr << std::endl;
#endif
outAcked = pkt->ackno;
outWinSize = pkt->winsize;
#ifdef DEBUG_TCP_STREAM
std::cerr << "\tUpdating OutAcked to: " << outAcked;
std::cerr << std::endl;
std::cerr << "\tUpdating OutWinSize to: " << outWinSize;
std::cerr << std::endl;
#endif
}
else
{
#ifdef DEBUG_TCP_STREAM
std::cerr << "TcpStream::check_inPkts() GOOD Ack Already Used!";
std::cerr << std::endl;
#endif
}
}
/* push onto queue */
if (outSizeNet + pkt->datasize < MAX_SEG)
{
/* move onto outSizeNet */
if (pkt->datasize)
{
memcpy((void *) &(outDataNet[outSizeNet]), pkt->data, pkt->datasize);
outSizeNet += pkt->datasize;
}
}
else
{
/* if it'll overflow the buffer. */
dataBuffer *db = new dataBuffer();
/* move outDatNet -> buffer */
memcpy((void *) db->data, (void *) outDataNet, outSizeNet);
/* fill rest of space */
int remSpace = MAX_SEG - outSizeNet;
memcpy((void *) &(db->data[outSizeNet]), (void *) pkt->data, remSpace);
/* push packet onto queue */
outQueue.push_back(db);
/* any big chunks that will take up a full dataBuffer */
int remData = pkt->datasize - remSpace;
while(remData >= MAX_SEG)
{
db = new dataBuffer();
memcpy((void *) db->data, (void *) &(pkt->data[remSpace]), MAX_SEG);
remData -= MAX_SEG;
outQueue.push_back(db);
}
/* remove any remaining to outDataNet */
outSizeNet = remData;
if (outSizeNet > 0)
{
memcpy((void *) outDataNet, (void *) &(pkt->data[pkt->datasize - remData]), outSizeNet);
}
}
/* can allow more in! - update inWinSize */
UpdateInWinSize();
/* if pkt is FIN */
/* these must be here -> at the end of the reliable stream */
/* if the fin is set, ack it specially close stream */
if (pkt->hasFin())
{
/* send final ack */
sendAck();
/* closedown stream */
inStreamActive = false;
if (state == TCP_ESTABLISHED)
{
state = TCP_CLOSE_WAIT;
#ifdef DEBUG_TCP_STREAM
std::cerr << "TcpStream::state = TCP_CLOSE_WAIT";
std::cerr << std::endl;
#endif
rslog(RSL_WARNING, rstcpstreamzone, "TcpStream::state => TCP_CLOSE_WAIT (recvd FIN)");
}
else if (state == TCP_FIN_WAIT_1)
{
state = TCP_CLOSING;
#ifdef DEBUG_TCP_STREAM
std::cerr << "TcpStream::state = TCP_CLOSING";
std::cerr << std::endl;
#endif
rslog(RSL_WARNING, rstcpstreamzone, "TcpStream::state => TCP_CLOSING (FIN_WAIT_1, recvd FIN)");
}
else if (state == TCP_FIN_WAIT_2)
{
state = TCP_TIMED_WAIT;
#ifdef DEBUG_TCP_STREAM
std::cerr << "TcpStream::state = TCP_TIMED_WAIT";
std::cerr << std::endl;
#endif
rslog(RSL_WARNING, rstcpstreamzone, "TcpStream::state => TCP_TIMED_WAIT (FIN_WAIT_2, recvd FIN)");
cleanup();
}
}
/* if ack for our FIN */
if ((pkt->hasAck()) && (!outStreamActive)
&& (pkt->ackno == outSeqno))
{
if (state == TCP_FIN_WAIT_1)
{
state = TCP_FIN_WAIT_2;
#ifdef DEBUG_TCP_STREAM
std::cerr << "TcpStream::state = TCP_FIN_WAIT_2";
std::cerr << std::endl;
#endif
rslog(RSL_WARNING, rstcpstreamzone, "TcpStream::state => TCP_FIN_WAIT_2 (FIN_WAIT_1, recvd ACK)");
}
else if (state == TCP_LAST_ACK)
{
#ifdef DEBUG_TCP_STREAM_CLOSE
std::cerr << "TcpStream::state = TCP_CLOSED (LastAck)";
std::cerr << std::endl;
dumpstate_locked(std::cerr);
#endif
state = TCP_CLOSED;
rslog(RSL_WARNING, rstcpstreamzone, "TcpStream::state => TCP_CLOSED (LAST_ACK, recvd ACK)");
cleanup();
}
else if (state == TCP_CLOSING)
{
state = TCP_TIMED_WAIT;
#ifdef DEBUG_TCP_STREAM
std::cerr << "TcpStream::state = TCP_TIMED_WAIT";
std::cerr << std::endl;
#endif
rslog(RSL_WARNING, rstcpstreamzone, "TcpStream::state => TCP_TIMED_WAIT (TCP_CLOSING, recvd ACK)");
cleanup();
}
}
delete pkt;
} /* end of found */
} /* while(found) */
return 1;
}
/* This Fn should be called after each read, or recvd data (thats added to the buffer)
*/
int TcpStream::UpdateInWinSize()
{
/* InWinSize = maxWinSze - QueuedData,
* actually we can allow a lot more to queue up...
* inWinSize = 65536, unless QueuedData > 65536.
* inWinSize = 2 * maxWinSize - QueuedData;
*
*/
uint32 queuedData = int_read_pending();
if (queuedData < maxWinSize)
{
inWinSize = maxWinSize;
}
else if (queuedData < 2 * maxWinSize)
{
inWinSize = 2 * maxWinSize - queuedData;
}
else
{
inWinSize = 0;
}
return inWinSize;
}
int TcpStream::sendAck()
{
/* simple -> toSend fills in ack/winsize
* and the rest is history
*/
#ifdef DEBUG_TCP_STREAM
std::cerr << "TcpStream::sendAck()";
std::cerr << std::endl;
#endif
return toSend(new TcpPacket(), false);
}
void TcpStream::setRemoteAddress(const struct sockaddr_in &raddr)
{
peeraddr = raddr;
peerKnown = true;
}
int TcpStream::toSend(TcpPacket *pkt, bool retrans)
{
int outPktSize = MAX_SEG + TCP_PSEUDO_HDR_SIZE;
char tmpOutPkt[outPktSize];
if (!peerKnown)
{
/* Major Error! */
#ifdef DEBUG_TCP_STREAM
std::cerr << "TcpStream::toSend() peerUnknown ERROR!!!";
std::cerr << std::endl;
#endif
delete pkt;
return 0;
}
/* get accurate timestamp */
double cts = getCurrentTS();
pkt -> winsize = inWinSize;
pkt -> seqno = outSeqno;
/* increment seq no */
if (pkt->datasize)
{
#ifdef DEBUG_TCP_STREAM_EXTRA
checkData(pkt->data, pkt->datasize, outSeqno-initOurSeqno-1);
#endif
outSeqno += pkt->datasize;
}
if (pkt->hasSyn())
{
/* should not have data! */
if (pkt->datasize)
{
#ifdef DEBUG_TCP_STREAM
std::cerr << "SYN Packet shouldn't contain data!" << std::endl;
#endif
}
outSeqno++;
}
else
{
/* cannot auto Ack SynPackets */
pkt -> setAck(inAckno);
}
pkt -> winsize = inWinSize;
/* store old info */
lastSentAck = pkt -> ackno;
lastSentWinSize = pkt -> winsize;
keepAliveTimer = cts;
pkt -> writePacket(tmpOutPkt, outPktSize);
#ifdef DEBUG_TCP_STREAM
std::cerr << "TcpStream::toSend() Seqno: ";
std::cerr << pkt->seqno << " size: " << pkt->datasize;
std::cerr << " Ackno: ";
std::cerr << pkt->ackno << " winsize: " << pkt->winsize;
std::cerr << std::endl;
//std::cerr << printPkt(tmpOutPkt, outPktSize) << std::endl;
#endif
udp -> sendPkt(tmpOutPkt, outPktSize, peeraddr, ttl);
if (retrans)
{
/* restart timers */
pkt -> ts = cts;
pkt -> retrans = 0;
startRetransmitTimer();
#ifdef DEBUG_TCP_STREAM
std::cerr << "TcpStream::toSend() Adding to outPkt --> Seqno: ";
std::cerr << pkt->seqno << " size: " << pkt->datasize;
std::cerr << std::endl;
#endif
outPkt.push_back(pkt);
}
else
{
delete pkt;
}
return 1;
}
/* single retransmit timer.
*
*/
void TcpStream::startRetransmitTimer()
{
if (retransTimerOn)
{
return;
}
retransTimerTs = getCurrentTS();
retransTimerOn = true;
#ifdef DEBUG_TCP_STREAM
std::cerr << "TcpStream::startRetransmitTimer() peer: " << peeraddr;
std::cerr << " retransTimeout: " << retransTimeout;
std::cerr << " retransTimerTs: " << std::setprecision(12) <<retransTimerTs;
std::cerr << std::endl;
#endif
}
void TcpStream::restartRetransmitTimer()
{
stopRetransmitTimer();
startRetransmitTimer();
}
void TcpStream::stopRetransmitTimer()
{
if (!retransTimerOn)
{
return;
}
#ifdef DEBUG_TCP_STREAM
std::cerr << "TcpStream::stopRetransmitTimer() peer: " << peeraddr;
std::cerr << std::endl;
#endif
retransTimerOn = false;
}
void TcpStream::resetRetransmitTimer()
{
retransTimerOn = false;
retransTimeout = 2.0 * (rtt_est + 4.0 * rtt_dev);
// happens too often for RETRANS debugging.
#ifdef DEBUG_TCP_STREAM
std::cerr << "TcpStream::resetRetransmitTimer() peer: " << peeraddr;
std::cerr << " retransTimeout: " << std::setprecision(12) << retransTimeout;
std::cerr << std::endl;
#endif
}
void TcpStream::incRetransmitTimeout()
{
retransTimeout = 2 * retransTimeout;
if (retransTimeout > TCP_RETRANS_MAX_TIMEOUT)
{
retransTimeout = TCP_RETRANS_MAX_TIMEOUT;
}
#ifdef DEBUG_TCP_STREAM_RETRANS
std::cerr << "TcpStream::incRetransmitTimer() peer: " << peeraddr;
std::cerr << " retransTimeout: " << std::setprecision(12) << retransTimeout;
std::cerr << std::endl;
#endif
}
int TcpStream::retrans()
{
int outPktSize = MAX_SEG + TCP_PSEUDO_HDR_SIZE;
char tmpOutPkt[outPktSize];
if (!peerKnown)
{
/* Major Error! */
#ifdef DEBUG_TCP_STREAM
std::cerr << "TcpStream::retrans() peerUnknown ERROR!!!";
std::cerr << std::endl;
#endif
return 0;
}
if (!retransTimerOn)
{
return 0;
}
double cts = getCurrentTS();
if (cts - retransTimerTs < retransTimeout)
{
return 0;
}
if (outPkt.begin() == outPkt.end())
{
resetRetransmitTimer();
return 0;
}
TcpPacket *pkt = outPkt.front();
if (!pkt)
{
/* error */
return 0;
}
/* retransmission -> adjust the congestWinSize and congestThreshold
*/
congestThreshold = congestWinSize / 2;
congestWinSize = MAX_SEG;
congestUpdate = outAcked + congestWinSize; // point when we can up the winSize.
#ifdef DEBUG_TCP_STREAM
std::cerr << "TcpStream::retrans() Adjusting Congestion Parameters: ";
std::cerr << std::endl;
std::cerr << "\tcongestWinSize: " << congestWinSize;
std::cerr << " congestThreshold: " << congestThreshold;
std::cerr << " congestUpdate: " << congestUpdate;
std::cerr << std::endl;
#endif
/* update ackno and winsize */
if (!(pkt->hasSyn()))
{
pkt->setAck(inAckno);
lastSentAck = pkt -> ackno;
}
pkt->winsize = inWinSize;
lastSentWinSize = pkt -> winsize;
keepAliveTimer = cts;
pkt->writePacket(tmpOutPkt, outPktSize);
#ifdef DEBUG_TCP_STREAM_RETRANS
std::cerr << "TcpStream::retrans()";
std::cerr << " peer: " << peeraddr;
std::cerr << " hasSyn: " << pkt->hasSyn();
std::cerr << " Seqno: ";
std::cerr << pkt->seqno << " size: " << pkt->datasize;
std::cerr << " Ackno: ";
std::cerr << pkt->ackno << " winsize: " << pkt->winsize;
std::cerr << " retrans: " << (int) pkt->retrans;
std::cerr << " timeout: " << std::setprecision(12) << retransTimeout;
std::cerr << std::endl;
//std::cerr << printPkt(tmpOutPkt, outPktSize) << std::endl;
#endif
/* if its a syn packet ** thats been
* transmitting for a while, maybe
* we should increase the ttl.
*/
if ((pkt->hasSyn()) && (getTTL() < TCP_STD_TTL))
{
/* calculate a new TTL */
if (mTTL_end > cts)
{
setTTL(TCP_DEFAULT_FIREWALL_TTL);
}
else
{
setTTL(getTTL() + 1);
}
std::string out;
rs_sprintf(out, "TcpStream::retrans() Startup SYNs retrans count: %u New TTL: %d", pkt->retrans, getTTL());
rslog(RSL_WARNING, rstcpstreamzone, out);
#ifdef DEBUG_TCP_STREAM
std::cerr << out.str() << std::endl;
#endif
}
/* catch excessive retransmits
* - Allow Syn case more....
* - if not SYN or TTL has reached STD then timeout quickly.
* OLD 2nd Logic (below) has been replaced with lower logic.
* (((!pkt->hasSyn()) || (TCP_STD_TTL == getTTL()))
* && (pkt->retrans > kMaxPktRetransmit)))
* Problem was that the retransmit of Syn packet had STD_TTL, and was triggering Close (and SeqNo change).
* It seemed to work anyway.... But might cause coonnection failures. Will reduce the MaxSyn Retransmit
* so something more reasonable as well.
* ((!pkt->hasSyn()) && (pkt->retrans > kMaxPktRetransmit)))
*/
if ((pkt->hasSyn() && (pkt->retrans > kMaxSynPktRetransmit)) ||
((!pkt->hasSyn()) && (pkt->retrans > kMaxPktRetransmit)))
{
/* too many attempts close stream */
#ifdef DEBUG_TCP_STREAM_CLOSE
std::cerr << "TcpStream::retrans() Too many Retransmission Attempts (";
std::cerr << (int) pkt->retrans << ") for Peer: " << peeraddr << std::endl;
std::cerr << "TcpStream::retrans() Closing Socket Connection";
std::cerr << std::endl;
//dumpPacket(std::cerr, (unsigned char *) tmpOutPkt, outPktSize);
dumpstate_locked(std::cerr);
#endif
rslog(RSL_WARNING,rstcpstreamzone,"TcpStream::state => TCP_CLOSED (Too Many Retransmits)");
outStreamActive = false;
inStreamActive = false;
state = TCP_CLOSED;
cleanup();
return 0;
}
udp -> sendPkt(tmpOutPkt, outPktSize, peeraddr, ttl);
/* restart timers */
pkt->ts = cts;
pkt->retrans++;
/*
* finally - double the retransTimeout ... (Karn's Algorithm)
* except if we are starting a connection... i.e. hasSyn()
*/
if (!pkt->hasSyn())
{
incRetransmitTimeout();
restartRetransmitTimer();
}
else
{
resetRetransmitTimer();
startRetransmitTimer();
}
return 1;
}
void TcpStream::acknowledge()
{
/* cleans up acknowledge packets */
/* packets are pushed back in order */
std::list<TcpPacket *>::iterator it;
double cts = getCurrentTS();
bool updateRTT = true;
bool clearedPkts = false;
for(it = outPkt.begin(); (it != outPkt.end()) &&
(isOldSequence((*it)->seqno, outAcked));
it = outPkt.erase(it))
{
TcpPacket *pkt = (*it);
clearedPkts = true;
/* adjust the congestWinSize and congestThreshold
* congestUpdate <= outAcked
*
***/
if (!isOldSequence(outAcked, congestUpdate))
{
if (congestWinSize < congestThreshold)
{
/* double it baby! */
congestWinSize *= 2;
}
else
{
/* linear increase */
congestWinSize += MAX_SEG;
}
if (congestWinSize > maxWinSize)
{
congestWinSize = maxWinSize;
}
congestUpdate = outAcked + congestWinSize; // point when we can up the winSize.
#ifdef DEBUG_TCP_STREAM
std::cerr << "TcpStream::acknowledge() Adjusting Congestion Parameters: ";
std::cerr << std::endl;
std::cerr << "\tcongestWinSize: " << congestWinSize;
std::cerr << " congestThreshold: " << congestThreshold;
std::cerr << " congestUpdate: " << congestUpdate;
std::cerr << std::endl;
#endif
}
/* update the RoundTripTime,
* using Jacobson's values.
* RTT = a RTT + (1-a) M
* where
* RTT is RoundTripTime estimate.
* a = 7/8,
* M = time for ack.
*
* D = a D + (1 - a) | RTT - M |
* where
* D is approx Deviation.
* a,RTT & M are the same as above.
*
* Timeout = RTT + 4 * D.
*
* And Karn's Algorithm...
* which says
* (1) do not update RTT or D for retransmitted packets.
* + the ones that follow .... (the ones whos ack was
* delayed by the retranmission)
* (2) double timeout, when packets fail. (done in retrans).
*/
if (pkt->retrans)
{
updateRTT = false;
}
if (updateRTT) /* can use for RTT calc */
{
double ack_time = cts - pkt->ts;
rtt_est = RTT_ALPHA * rtt_est + (1.0 - RTT_ALPHA) * ack_time;
rtt_dev = RTT_ALPHA * rtt_dev + (1.0 - RTT_ALPHA) * fabs(rtt_est - ack_time);
retransTimeout = rtt_est + 4.0 * rtt_dev;
#ifdef DEBUG_TCP_STREAM
std::cerr << "TcpStream::acknowledge() Updating RTT: ";
std::cerr << std::endl;
std::cerr << "\tAckTime: " << ack_time;
std::cerr << std::endl;
std::cerr << "\tRRT_est: " << rtt_est;
std::cerr << std::endl;
std::cerr << "\tRTT_dev: " << rtt_dev;
std::cerr << std::endl;
std::cerr << "\tTimeout: " << retransTimeout;
std::cerr << std::endl;
#endif
}
#ifdef DEBUG_TCP_STREAM
else
{
std::cerr << "TcpStream::acknowledge() Not Updating RTT for retransmitted Pkt Sequence";
std::cerr << std::endl;
}
#endif
#ifdef DEBUG_TCP_STREAM
std::cerr << "TcpStream::acknowledge() Removing Seqno: ";
std::cerr << pkt->seqno << " size: " << pkt->datasize;
std::cerr << std::endl;
#endif
delete pkt;
}
/* This is triggered if we have recieved acks for retransmitted packets....
* In this case we want to reset the timeout, and remove the doubling.
*
* If we don't do this, and there have been more dropped packets,
* the the timeout gets continually doubled. which will virtually stop
* all communication.
*
* This will effectively trigger the retransmission of the next dropped packet.
*/
/*
* if have acked all data - resetRetransTimer()
*/
if (it == outPkt.end())
{
#ifdef DEBUG_TCP_STREAM
std::cerr << "TcpStream::acknowledge() peer: " << peeraddr;
std::cerr << " Backlog cleared, resetRetransmitTimer";
std::cerr << std::endl;
#endif
resetRetransmitTimer();
}
else if (clearedPkts)
{
#ifdef DEBUG_TCP_STREAM
std::cerr << "TcpStream::acknowledge() peer: " << peeraddr;
std::cerr << " Cleared some packets -> resetRetransmitTimer + start";
std::cerr << std::endl;
#endif
resetRetransmitTimer();
startRetransmitTimer();
}
return;
}
int TcpStream::send()
{
/* handle network interface always */
/* clean up as much as possible */
acknowledge();
/* send any old packets */
retrans();
if (state < TCP_ESTABLISHED)
{
return -1;
}
/* get the inQueue, can send */
/* determine exactly how much we can send */
uint32 maxsend = congestWinSize;
uint32 inTransit;
if (outWinSize < congestWinSize)
{
maxsend = outWinSize;
}
if (outSeqno < outAcked)
{
inTransit = (TCP_MAX_SEQ - outAcked) + outSeqno;
}
else
{
inTransit = outSeqno - outAcked;
}
if (maxsend > inTransit)
{
maxsend -= inTransit;
}
else
{
maxsend = 0;
}
#ifdef DEBUG_TCP_STREAM
int availSend = inQueue.size() * MAX_SEG + inSize;
std::cerr << "TcpStream::send() CC: ";
std::cerr << "oWS: " << outWinSize;
std::cerr << " cWS: " << congestWinSize;
std::cerr << " | inT: " << inTransit;
std::cerr << " mSnd: " << maxsend;
std::cerr << " aSnd: " << availSend;
std::cerr << " | oSeq: " << outSeqno;
std::cerr << " oAck: " << outAcked;
std::cerr << " cUpd: " << congestUpdate;
std::cerr << std::endl;
#endif
int sent = 0;
while((inQueue.size() > 0) && (maxsend >= MAX_SEG))
{
dataBuffer *db = inQueue.front();
inQueue.pop_front();
TcpPacket *pkt = new TcpPacket(db->data, MAX_SEG);
#ifdef DEBUG_TCP_STREAM
std::cerr << "TcpStream::send() Segment ===> Seqno: ";
std::cerr << pkt->seqno << " size: " << pkt->datasize;
std::cerr << std::endl;
#endif
sent++;
maxsend -= MAX_SEG;
toSend(pkt);
delete db;
}
/* if inqueue empty, and enough window space, send partial stuff */
if ((!sent) && (inQueue.empty()) && (maxsend >= inSize) && (inSize))
{
TcpPacket *pkt = new TcpPacket(inData, inSize);
#ifdef DEBUG_TCP_STREAM
std::cerr << "TcpStream::send() Remaining ===>";
std::cerr << std::endl;
#endif
inSize = 0;
sent++;
maxsend -= inSize;
toSend(pkt);
}
/* if send nothing */
bool needsAck = false;
if (!sent)
{
double cts = getCurrentTS();
/* if needs ack */
if (isOldSequence(lastSentAck,inAckno))
{
#ifdef DEBUG_TCP_STREAM
std::cerr << "TcpStream::send() Ack Triggered (Ackno)";
std::cerr << std::endl;
#endif
needsAck = true;
}
/* if needs window
* if added enough space for packet, or
* (this case is equivalent to persistence timer)
* haven't sent anything for a while, and the
* window size has drastically increased.
* */
if (((lastSentWinSize < MAX_SEG) && (inWinSize > MAX_SEG)) ||
((cts - keepAliveTimer > retransTimeout * 4) &&
(inWinSize > lastSentWinSize + 4 * MAX_SEG)))
{
#ifdef DEBUG_TCP_STREAM
std::cerr << "TcpStream::send() Ack Triggered (Window)";
std::cerr << std::endl;
#endif
needsAck = true;
}
/* if needs keepalive */
if (cts - keepAliveTimer > keepAliveTimeout)
{
#ifdef DEBUG_TCP_STREAM
std::cerr << "TcpStream::send() Ack Triggered (KAlive)";
std::cerr << std::endl;
#endif
needsAck = true;
}
/* if end of stream -> switch mode -> send fin (with ack) */
if ((!outStreamActive) && (inQueue.size() + inSize == 0) &&
((state == TCP_ESTABLISHED) || (state == TCP_CLOSE_WAIT)))
{
/* finish the stream */
TcpPacket *pkt = new TcpPacket();
pkt -> setFin();
needsAck = false;
toSend(pkt, false);
#ifdef DEBUG_TCP_STREAM
std::cerr << "TcpStream::send() Fin Triggered";
std::cerr << std::endl;
#endif
if (state == TCP_ESTABLISHED)
{
state = TCP_FIN_WAIT_1;
#ifdef DEBUG_TCP_STREAM
std::cerr << "TcpStream::state = TCP_FIN_WAIT_1";
std::cerr << std::endl;
#endif
rslog(RSL_WARNING, rstcpstreamzone, "TcpStream::state => TCP_FIN_WAIT_1 (End of Stream)");
}
else if (state == TCP_CLOSE_WAIT)
{
state = TCP_LAST_ACK;
#ifdef DEBUG_TCP_STREAM
std::cerr << "TcpStream::state = TCP_LAST_ACK";
std::cerr << std::endl;
#endif
rslog(RSL_WARNING, rstcpstreamzone, "TcpStream::state => TCP_LAST_ACK (CLOSE_WAIT, End of Stream)");
}
}
if (needsAck)
{
sendAck();
}
#ifdef DEBUG_TCP_STREAM_EXTRA
else
{
std::cerr << "TcpStream::send() No Ack";
std::cerr << std::endl;
}
#endif
}
#ifdef DEBUG_TCP_STREAM_EXTRA
else
{
std::cerr << "TcpStream::send() Stuff Sent";
std::cerr << std::endl;
}
#endif
return 1;
}
uint32 TcpStream::genSequenceNo()
{
return RSRandom::random_u32();
//return 1000; // TCP_MAX_SEQ - 1000; //1000; //(rand() - 100000) + time(NULL) % 100000;
//return (rand() - 100000) + time(NULL) % 100000;
}
bool TcpStream::isOldSequence(uint32 tst, uint32 curr)
{
return ((int)((tst)-(curr)) < 0);
std::cerr << "TcpStream::isOldSequence(): Case ";
/* if tst < curr */
if ((int)((tst)-(curr)) < 0)
{
if (curr - tst < TCP_MAX_SEQ/2) /* diff less than half span -> old */
{
std::cerr << "1T" << std::endl;
return true;
}
std::cerr << "2F" << std::endl;
return false;
}
else if ((tst - curr) > TCP_MAX_SEQ/2)
{
std::cerr << "3T: tst-curr:" << (tst-curr) << std::endl;
return true;
}
std::cerr << "4F: tst-curr:" << (tst-curr) << std::endl;
return false;
}
#ifdef WINDOWS_SYS
#include <time.h>
#include <sys/timeb.h>
#endif
// Little fn to get current timestamp in an independent manner.
static double getCurrentTS()
{
#ifndef WINDOWS_SYS
struct timeval cts_tmp;
gettimeofday(&cts_tmp, NULL);
double cts = (cts_tmp.tv_sec) + ((double) cts_tmp.tv_usec) / 1000000.0;
#else
struct _timeb timebuf;
_ftime( &timebuf);
double cts = (timebuf.time) + ((double) timebuf.millitm) / 1000.0;
#endif
return cts;
}
uint32 TcpStream::int_wbytes()
{
return outSeqno - initOurSeqno - 1;
}
uint32 TcpStream::int_rbytes()
{
return inAckno - initPeerSeqno - 1;
}
/********* Special debugging stuff *****/
#ifdef DEBUG_TCP_STREAM_EXTRA
#include <stdio.h>
static FILE *bc_fd = 0;
int setupBinaryCheck(std::string fname)
{
bc_fd = RsDirUtil::rs_fopen(fname.c_str(), "r");
return 1;
}
/* uses seq number to track position -> ensure no rollover */
int checkData(uint8 *data, int size, int idx)
{
if (bc_fd <= 0)
{
return -1;
}
std::cerr << "checkData(" << idx << "+" << size << ")";
int tmpsize = size;
uint8 tmpdata[tmpsize];
if (-1 == fseek(bc_fd, idx, SEEK_SET))
{
std::cerr << "Fseek Issues!" << std::endl;
exit(1);
return -1;
}
if (1 != fread(tmpdata, tmpsize, 1, bc_fd))
{
std::cerr << "Length Difference!" << std::endl;
exit(1);
return -1;
}
for(int i = 0; i < size; i++)
{
if (data[i] != tmpdata[i])
{
std::cerr << "Byte Difference!" << std::endl;
exit(1);
return -1;
}
}
std::cerr << "OK" << std::endl;
return 1;
}
#endif
/***** Dump state of TCP Stream - to workout why it was closed ****/
int TcpStream::dumpstate_locked(std::ostream &out)
{
out << "TcpStream::dumpstate()";
out << "=======================================================";
out << std::endl;
out << "state: " << (int) state;
out << " errorState: " << (int) errorState;
out << std::endl;
out << "(Streams) inStreamActive: " << inStreamActive;
out << " outStreamActive: " << outStreamActive;
out << std::endl;
out << "(Timeouts) maxWinSize: " << maxWinSize;
out << " keepAliveTimeout: " << keepAliveTimeout;
out << " retransTimeout: " << retransTimeout;
out << std::endl;
out << "(Timers) keepAliveTimer: " << std::setprecision(12) << keepAliveTimer;
out << " lastIncomingPkt: " << std::setprecision(12) << lastIncomingPkt;
out << std::endl;
out << "(Tracking) lastSendAck: " << lastSentAck;
out << " lastSendWinSize: " << lastSentWinSize;
out << std::endl;
out << "(Init) initOutSeqno: " << initOurSeqno;
out << " initPeerSeqno: " << initPeerSeqno;
out << std::endl;
out << "(r/w) lastWriteTF: " << lastWriteTF;
out << " lastReadTF: " << lastReadTF;
out << " wcount: " << wcount;
out << " rcount: " << rcount;
out << std::endl;
out << "(rtt) rtt_est: " << rtt_est;
out << " rtt_dev: " << rtt_dev;
out << std::endl;
out << "(congestion) congestThreshold: " << congestThreshold;
out << " congestWinSize: " << congestWinSize;
out << " congestUpdate: " << congestUpdate;
out << std::endl;
out << "(TTL) mTTL_period: " << mTTL_period;
out << " mTTL_start: " << std::setprecision(12) << mTTL_start;
out << " mTTL_end: " << std::setprecision(12) << mTTL_end;
out << std::endl;
out << "(Peer) peerKnown: " << peerKnown;
out << " peerAddr: " << peeraddr;
out << std::endl;
out << "-------------------------------------------------------";
out << std::endl;
status_locked(out);
out << "=======================================================";
out << std::endl;
return 1;
}
int TcpStream::dumpstate(std::ostream &out)
{
tcpMtx.lock(); /********** LOCK MUTEX *********/
dumpstate_locked(out);
tcpMtx.unlock(); /******** UNLOCK MUTEX *********/
return 1;
}
int dumpPacket(std::ostream &out, unsigned char *pkt, uint32_t size)
{
uint32_t i;
out << "dumpPacket() Size: " << size;
out << std::endl;
out << "------------------------------------------------------";
for(i = 0; i < size; i++)
{
if (i % 16 == 0)
{
out << std::endl;
}
out << std::hex << std::setfill('0') << std::setw(2) << (int) pkt[i] << ":";
}
if ((i - 1) % 16 != 0)
{
out << std::endl;
}
out << "------------------------------------------------------";
out << std::dec << std::endl;
return 1;
}<|fim▁end|> | |
<|file_name|>package.py<|end_file_name|><|fim▁begin|>##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.<|fim▁hole|># This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Sleef(CMakePackage):
"""SIMD Library for Evaluating Elementary Functions,
vectorized libm and DFT."""
homepage = "http://sleef.org"
url = "https://github.com/shibatch/sleef/archive/3.2.tar.gz"
version('3.2', '459215058f2c8d55cd2b644d56c8c4f0')<|fim▁end|> | # |
<|file_name|>sensor.py<|end_file_name|><|fim▁begin|>"""Support for the Foobot indoor air quality monitor."""
import asyncio
from datetime import timedelta
import logging
import aiohttp
from foobot_async import FoobotClient
import voluptuous as vol
from homeassistant.const import (
ATTR_TEMPERATURE,
ATTR_TIME,
CONF_TOKEN,
CONF_USERNAME,
TEMP_CELSIUS,
)
from homeassistant.exceptions import PlatformNotReady
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.config_validation import PLATFORM_SCHEMA
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
_LOGGER = logging.getLogger(__name__)
ATTR_HUMIDITY = "humidity"
ATTR_PM2_5 = "PM2.5"
ATTR_CARBON_DIOXIDE = "CO2"
ATTR_VOLATILE_ORGANIC_COMPOUNDS = "VOC"
ATTR_FOOBOT_INDEX = "index"
SENSOR_TYPES = {
"time": [ATTR_TIME, "s"],
"pm": [ATTR_PM2_5, "µg/m3", "mdi:cloud"],
"tmp": [ATTR_TEMPERATURE, TEMP_CELSIUS, "mdi:thermometer"],
"hum": [ATTR_HUMIDITY, "%", "mdi:water-percent"],
"co2": [ATTR_CARBON_DIOXIDE, "ppm", "mdi:periodic-table-co2"],
"voc": [ATTR_VOLATILE_ORGANIC_COMPOUNDS, "ppb", "mdi:cloud"],
"allpollu": [ATTR_FOOBOT_INDEX, "%", "mdi:percent"],
}
SCAN_INTERVAL = timedelta(minutes=10)
PARALLEL_UPDATES = 1
TIMEOUT = 10
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Required(CONF_TOKEN): cv.string, vol.Required(CONF_USERNAME): cv.string}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the devices associated with the account."""
token = config.get(CONF_TOKEN)
username = config.get(CONF_USERNAME)
client = FoobotClient(
token, username, async_get_clientsession(hass), timeout=TIMEOUT
)
dev = []
try:
devices = await client.get_devices()
_LOGGER.debug("The following devices were found: %s", devices)
for device in devices:
foobot_data = FoobotData(client, device["uuid"])
for sensor_type in SENSOR_TYPES:
if sensor_type == "time":
continue
foobot_sensor = FoobotSensor(foobot_data, device, sensor_type)
dev.append(foobot_sensor)
except (
aiohttp.client_exceptions.ClientConnectorError,
asyncio.TimeoutError,
FoobotClient.TooManyRequests,
FoobotClient.InternalError,
):
_LOGGER.exception("Failed to connect to foobot servers.")
raise PlatformNotReady
except FoobotClient.ClientError:
_LOGGER.error("Failed to fetch data from foobot servers.")
return
async_add_entities(dev, True)
class FoobotSensor(Entity):
"""Implementation of a Foobot sensor."""
def __init__(self, data, device, sensor_type):
"""Initialize the sensor."""
self._uuid = device["uuid"]
self.foobot_data = data
self._name = "Foobot {} {}".format(device["name"], SENSOR_TYPES[sensor_type][0])
self.type = sensor_type
self._unit_of_measurement = SENSOR_TYPES[sensor_type][1]
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def icon(self):
"""Icon to use in the frontend."""
return SENSOR_TYPES[self.type][2]
@property
def state(self):
"""Return the state of the device."""
try:
data = self.foobot_data.data[self.type]<|fim▁hole|> return data
@property
def unique_id(self):
"""Return the unique id of this entity."""
return f"{self._uuid}_{self.type}"
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity."""
return self._unit_of_measurement
async def async_update(self):
"""Get the latest data."""
await self.foobot_data.async_update()
class FoobotData(Entity):
"""Get data from Foobot API."""
def __init__(self, client, uuid):
"""Initialize the data object."""
self._client = client
self._uuid = uuid
self.data = {}
@Throttle(SCAN_INTERVAL)
async def async_update(self):
"""Get the data from Foobot API."""
interval = SCAN_INTERVAL.total_seconds()
try:
response = await self._client.get_last_data(
self._uuid, interval, interval + 1
)
except (
aiohttp.client_exceptions.ClientConnectorError,
asyncio.TimeoutError,
self._client.TooManyRequests,
self._client.InternalError,
):
_LOGGER.debug("Couldn't fetch data")
return False
_LOGGER.debug("The data response is: %s", response)
self.data = {k: round(v, 1) for k, v in response[0].items()}
return True<|fim▁end|> | except (KeyError, TypeError):
data = None |
<|file_name|>wsgi.py<|end_file_name|><|fim▁begin|>"""
WSGI config for CongCards project.<|fim▁hole|>https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
from dj_static import Cling
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "CongCards.settings")
application = Cling(get_wsgi_application())<|fim▁end|> |
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see |
<|file_name|>local.example.py<|end_file_name|><|fim▁begin|>from opendata.settings.dev import *
<|fim▁hole|># Override settings here<|fim▁end|> | |
<|file_name|>main.rs<|end_file_name|><|fim▁begin|>fn main() {
// demonstrate the Option<T> and Some
let five = Some(5);
let six = plus_one(five);
let none = plus_one(None);
println!("Five: {:?}", five);
println!("Six: {:?}", six);
println!("None: {:?}", none);
// simpler syntax if you want to do something with
// only one value (one pattern match)
let some_value = Some(3);
if let Some(3) = some_value {
println!("Found 3");
}
// same as if let but includes an else
if let Some(2) = some_value {
println!("Found 2");
} else {
println!("Found something different");
}
}
fn plus_one(x: Option<i32>) -> Option<i32> {
// if no value, return none, otherwise return
// the addition of the value plus one
match x {
None => None,
Some(i) => Some(i + 1),<|fim▁hole|>}<|fim▁end|> | } |
<|file_name|>TitleIllustrations.java<|end_file_name|><|fim▁begin|>/*
* Created on Oct 18, 2004
*
* TODO To change the template for this generated file go to
* Window - Preferences - Java - Code Style - Code Templates
*/
package org.tolweb.tapestry;
import java.util.Collection;
import org.apache.tapestry.BaseComponent;
import org.apache.tapestry.IRequestCycle;
import org.tolweb.hibernate.TitleIllustration;
import org.tolweb.tapestry.injections.BaseInjectable;
import org.tolweb.tapestry.injections.ImageInjectable;
import org.tolweb.treegrow.main.Contributor;
import org.tolweb.treegrow.main.ImageVersion;
import org.tolweb.treegrow.main.NodeImage;
import org.tolweb.treegrow.main.StringUtils;
/**
* @author dmandel
*
* TODO To change the template for this generated type comment go to
* Window - Preferences - Java - Code Style - Code Templates
*/
public abstract class TitleIllustrations extends BaseComponent implements <|fim▁hole|> public abstract Collection getIllustrations();
public abstract TitleIllustration getCurrentIllustration();
public abstract void setCurrentIllustration(TitleIllustration value);
public abstract void setIsSingleIllustration(boolean value);
public abstract boolean getIsSingleIllustration();
public abstract int getIndex();
public abstract void setContributor(Contributor contributor);
public String getAltText() {
if (getCurrentIllustration().getImage() != null) {
NodeImage img = getCurrentIllustration().getImage();
if (StringUtils.notEmpty(img.getAltText())) {
return img.getAltText();
} else {
return " ";
}
} else {
return " ";
}
}
public void prepareForRender(IRequestCycle cycle) {
super.prepareForRender(cycle);
if (getIllustrations() != null && getIllustrations().size() == 1) {
setIsSingleIllustration(true);
} else {
setIsSingleIllustration(false);
}
}
public String getCurrentImageLocation() {
TitleIllustration currentIllustration = getCurrentIllustration();
ImageVersion version = currentIllustration.getVersion();
String url;
if (StringUtils.isEmpty(version.getFileName())) {
url = getImageDAO().generateAndSaveVersion(version);
} else {
url = getImageUtils().getVersionUrl(currentIllustration.getVersion());
}
return url;
}
public String getCurrentImageClass() {
if (getIsSingleIllustration()) {
return "singletillus";
} else {
return null;
}
}
}<|fim▁end|> | ImageInjectable, BaseInjectable {
@SuppressWarnings("unchecked") |
<|file_name|>package.py<|end_file_name|><|fim▁begin|>##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.<|fim▁hole|># Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PyPyani(PythonPackage):
"""pyani is a Python3 module that provides support for calculating
average nucleotide identity (ANI) and related measures for whole genome
comparisons, and rendering relevant graphical summary output. Where
available, it takes advantage of multicore systems, and can integrate
with SGE/OGE-type job schedulers for the sequence comparisons."""
homepage = "http://widdowquinn.github.io/pyani"
url = "https://pypi.io/packages/source/p/pyani/pyani-0.2.7.tar.gz"
version('0.2.7', '239ba630d375a81c35b7c60fb9bec6fa')
version('0.2.6', 'd5524b9a3c62c36063ed474ea95785c9')
depends_on('[email protected]:')
depends_on('py-setuptools', type='build')
depends_on('py-matplotlib', type=('build', 'run'))
depends_on('py-seaborn', type=('build', 'run'))
# Required for ANI analysis
depends_on('py-biopython', type=('build', 'run'))
depends_on('py-pandas', type=('build', 'run'))
depends_on('py-scipy', type=('build', 'run'))
# Required for ANIb analysis
depends_on('blast-plus~python', type='run')
# Required for ANIm analysis
depends_on('mummer', type='run')<|fim▁end|> | #
# This file is part of Spack. |
<|file_name|>registration-embedded.js<|end_file_name|><|fim▁begin|>import Component from '@glimmer/component';
import VerifiLogoSvg from 'dummy/images/media-registry/verifi-logo.svg';
export default class RegistrationEmbedded extends Component {
get registrationEmbedded() {
let verifi_id = this.args.model?.verifi_id;
<|fim▁hole|> type: 'registration',
imgURL: VerifiLogoSvg,
title: 'Verifi Registry',
description: verifi_id,
fields: [
{
title: 'asset type',
value: this.args.model.asset_type || 'Master Recording',
},
{
title: 'created',
value: this.args.model.verifi_reg_date,
type: 'date',
},
],
};
}
return null;
}
}<|fim▁end|> | if (verifi_id) {
return {
id: verifi_id, |
<|file_name|>setup.py<|end_file_name|><|fim▁begin|># Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the relevant file
with open(path.join(here, 'DESCRIPTION.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='melody_scripter',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='0.0.8',
description='Melody Scripter, for parsing melodies from a simple textual format',
long_description=long_description,
# The project's main homepage.
url='https://github.com/pdorrell/melody_scripter',
# Author details
author='Philip Dorrell',
author_email='http://thinkinghard.com/email.html',
<|fim▁hole|> license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Beta',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Intended Audience :: Musicians',
'Topic :: Software Development :: Music',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
],
# What does your project relate to?
keywords='music parsing',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['regex>=2015.09.15',
'midi>=0.2.3'],
dependency_links=[
"https://github.com/vishnubob/python-midi/archive/v0.2.3.zip#egg=midi-0.2.3"
],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
extras_require={
'dev': ['check-manifest'],
'test': ['nose'],
},
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
package_data={
},
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
data_files=[],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'song2midi=melody_scripter.song2midi:main',
'play_song=melody_scripter.play_song:main',
],
},
)<|fim▁end|> | # Choose your license |
<|file_name|>BatchCheckLayerAvailabilityResult.cpp<|end_file_name|><|fim▁begin|>/*
* Copyright 2010-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
#include <aws/ecr/model/BatchCheckLayerAvailabilityResult.h>
#include <aws/core/utils/json/JsonSerializer.h>
#include <aws/core/AmazonWebServiceResult.h>
#include <aws/core/utils/StringUtils.h>
#include <aws/core/utils/UnreferencedParam.h>
#include <utility>
using namespace Aws::ECR::Model;
using namespace Aws::Utils::Json;
using namespace Aws::Utils;
using namespace Aws;
BatchCheckLayerAvailabilityResult::BatchCheckLayerAvailabilityResult()
{
}
BatchCheckLayerAvailabilityResult::BatchCheckLayerAvailabilityResult(const AmazonWebServiceResult<JsonValue>& result)
{
*this = result;
}
BatchCheckLayerAvailabilityResult& BatchCheckLayerAvailabilityResult::operator =(const AmazonWebServiceResult<JsonValue>& result)
{
const JsonValue& jsonValue = result.GetPayload();
if(jsonValue.ValueExists("layers"))
{
Array<JsonValue> layersJsonList = jsonValue.GetArray("layers");
for(unsigned layersIndex = 0; layersIndex < layersJsonList.GetLength(); ++layersIndex)
{
m_layers.push_back(layersJsonList[layersIndex].AsObject());
}
}
if(jsonValue.ValueExists("failures"))
{
Array<JsonValue> failuresJsonList = jsonValue.GetArray("failures");
for(unsigned failuresIndex = 0; failuresIndex < failuresJsonList.GetLength(); ++failuresIndex)
{
m_failures.push_back(failuresJsonList[failuresIndex].AsObject());<|fim▁hole|>
return *this;
}<|fim▁end|> | }
} |
<|file_name|>noncentral_distr.py<|end_file_name|><|fim▁begin|>from numpy import sqrt
from pacal.standard_distr import NormalDistr, ChiSquareDistr
from pacal.distr import Distr, SumDistr, DivDistr, InvDistr
from pacal.distr import sqrt as distr_sqrt
class NoncentralTDistr(DivDistr):
def __init__(self, df = 2, mu = 0):
d1 = NormalDistr(mu, 1)
d2 = distr_sqrt(ChiSquareDistr(df) / df)
super(NoncentralTDistr, self).__init__(d1, d2)
self.df = df
self.mu = mu
def __str__(self):
return "NoncentralTDistr(df={0},mu={1})#{2}".format(self.df, self.mu, self.id())
def getName(self):<|fim▁hole|>
class NoncentralChiSquareDistr(SumDistr):
def __new__(cls, df, lmbda = 0):
assert df >= 1
d1 = NormalDistr(sqrt(lmbda))**2
if df == 1:
return d1
d2 = ChiSquareDistr(df - 1)
ncc2 = super(NoncentralChiSquareDistr, cls).__new__(cls, d1, d2)
super(NoncentralChiSquareDistr, ncc2).__init__(d1, d2)
ncc2.df = df
ncc2.lmbda = lmbda
return ncc2
def __init__(self, df, lmbda = 0):
pass
def __str__(self):
return "NoncentralChiSquare(df={0},lambda={1})#{2}".format(self.df, self.lmbda, self.id())
def getName(self):
return "NoncChi2({0},{1})".format(self.df, self.lmbda)
class NoncentralBetaDistr(InvDistr):
def __init__(self, alpha = 1, beta = 1, lmbda = 0):
d = 1 + ChiSquareDistr(2.0 * beta) / NoncentralChiSquareDistr(2 * alpha, lmbda)
super(NoncentralBetaDistr, self).__init__(d)
self.alpha = alpha
self.beta = beta
self.lmbda = lmbda
def __str__(self):
return "NoncentralBetaDistr(alpha={0},beta={1},lambda={2})#{3}".format(self.alpha, self.beta, self.lmbda, self.id())
def getName(self):
return "NoncBeta({0},{1},{2})".format(self.alpha, self.beta, self.lmbda)
class NoncentralFDistr(DivDistr):
def __init__(self, df1 = 1, df2 = 1, lmbda = 0):
d1 = NoncentralChiSquareDistr(df1, lmbda) / df1
d2 = ChiSquareDistr(df2) / df2
super(NoncentralFDistr, self).__init__(d1, d2)
self.df1 = df1
self.df2 = df2
self.lmbda = lmbda
def __str__(self):
return "NoncentralFDistr(df1={0},df2={1},lambda={2})#{3}".format(self.df1, self.df2, self.lmbda, self.id())
def getName(self):
return "NoncF({0},{1},{2})".format(self.df1, self.df2, self.lmbda)<|fim▁end|> | return "NoncT({0},{1})".format(self.df, self.mu) |
<|file_name|>resolve.js<|end_file_name|><|fim▁begin|>'use strict';
var _Object$assign = require('babel-runtime/core-js/object/assign')['default'];
var _getIterator = require('babel-runtime/core-js/get-iterator')['default'];
var fs = require('fs'),
path = require('path'),
resolve = require('resolve');
var CASE_INSENSITIVE = fs.existsSync(path.join(__dirname, 'reSOLVE.js'));
// http://stackoverflow.com/a/27382838
function fileExistsWithCaseSync(_x) {
var _again = true;
_function: while (_again) {
var filepath = _x;
dir = filenames = undefined;
_again = false;
// shortcut exit
if (!fs.existsSync(filepath)) return false;
var dir = path.dirname(filepath);
if (dir === '/' || dir === '.' || /^[A-Z]:\\$/.test(dir)) return true;
var filenames = fs.readdirSync(dir);
if (filenames.indexOf(path.basename(filepath)) === -1) {
return false;
}
_x = dir;
_again = true;
continue _function;
}
}
function fileExists(filepath) {
if (CASE_INSENSITIVE) {
return fileExistsWithCaseSync(filepath);
} else {
return fs.existsSync(filepath);
}
}
function opts(basedir, settings) {
// pulls all items from 'import/resolve'
return _Object$assign({}, settings['import/resolve'], { basedir: basedir });
}
/**
* wrapper around resolve
* @param {string} p - module path
* @param {object} context - ESLint context
* @return {string} - the full module filesystem path
*/
module.exports = function (p, context) {
function withResolver(resolver) {
// resolve just returns the core module id, which won't appear to exist
if (resolver.isCore(p)) return p;
try {
var file = resolver.sync(p, opts(path.dirname(context.getFilename()), context.settings));
if (!fileExists(file)) return null;
return file;
} catch (err) {
// probably want something more general here
if (err.message.indexOf('Cannot find module') === 0) {
return null;
}
throw err;
}
}
var resolvers = (context.settings['import/resolvers'] || ['resolve']).map(require);
var _iteratorNormalCompletion = true;
var _didIteratorError = false;
var _iteratorError = undefined;
try {
for (var _iterator = _getIterator(resolvers), _step; !(_iteratorNormalCompletion = (_step = _iterator.next()).done); _iteratorNormalCompletion = true) {
var resolver = _step.value;
var file = withResolver(resolver);
if (file) return file;
}<|fim▁hole|> _iteratorError = err;
} finally {
try {
if (!_iteratorNormalCompletion && _iterator['return']) {
_iterator['return']();
}
} finally {
if (_didIteratorError) {
throw _iteratorError;
}
}
}
return null;
};
module.exports.relative = function (p, r, settings) {
try {
var file = resolve.sync(p, opts(path.dirname(r), settings));
if (!fileExists(file)) return null;
return file;
} catch (err) {
if (err.message.indexOf('Cannot find module') === 0) return null;
throw err; // else
}
};<|fim▁end|> | } catch (err) {
_didIteratorError = true; |
<|file_name|>pool-test.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# Copyright (C) 2010 Red Hat, Inc.
#
# This is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation; either version 2.1 of
# the License, or (at your option) any later version.<|fim▁hole|># This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this software; if not, write to the Free
# Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA, or see the FSF site: http://www.fsf.org.
import http
import xmlfmt
import yamlfmt
import jsonfmt
from testutils import *
opts = parseOptions()
(cluster, template) = (None, None)
if len(opts['oargs']) >= 2:
(cluster, template) = opts['oargs'][0:2]
links = http.HEAD_for_links(opts)
for fmt in [xmlfmt]:
t = TestUtils(opts, fmt)
print "=== ", fmt.MEDIA_TYPE, " ==="
for pool in t.get(links['vmpools']):
t.get(pool.href)
if cluster is None:
continue
pool = fmt.VmPool()
pool.name = randomName('foo')
pool.size = "2"
pool.cluster = fmt.Cluster()
pool.cluster.id = t.find(links['clusters'], cluster).id
pool.template = fmt.Template()
pool.template.id = t.find(links['templates'], template).id
pool = t.create(links['vmpools'], pool)
vms_in_pool = []
for vm in t.get(links['vms']):
if not hasattr(vm, "vmpool"):
continue
if vm.vmpool.id == pool.id:
vms_in_pool.append(vm)
assert len(vms_in_pool) == 2, "Expected 2 VMs with pool ID '" + pool.id + "', got " + str(len(vms_in_pool))
for vm in vms_in_pool:
t.syncAction(vm.actions, "detach")
t.delete(vm.href)
t.delete(pool.href)<|fim▁end|> | # |
<|file_name|>tensor_signature.py<|end_file_name|><|fim▁begin|># Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TensorSignature class and utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
<|fim▁hole|>class TensorSignature(collections.namedtuple(
"TensorSignature", ["dtype", "shape", "is_sparse"])):
"""Signature of the `Tensor` object.
Useful to check compatibility of tensors.
Attributes:
dtype: `DType` object.
shape: `TensorShape` object.
"""
def __new__(cls, tensor):
if isinstance(tensor, ops.SparseTensor):
return super(TensorSignature, cls).__new__(
cls, dtype=tensor.values.dtype, shape=None, is_sparse=True)
return super(TensorSignature, cls).__new__(
cls, dtype=tensor.dtype, shape=tensor.get_shape(), is_sparse=False)
def is_compatible_with(self, other):
"""Returns True if signatures are compatible."""
def _shape_is_compatible_0dim(this, other):
other = tensor_shape.as_shape(other)
if this.ndims != other.ndims:
return False
for dim, (x_dim, y_dim) in enumerate(zip(this.dims, other.dims)):
if dim == 0:
continue
if not x_dim.is_compatible_with(y_dim):
return False
return True
if other.is_sparse:
return self.is_sparse and self.dtype.is_compatible_with(other.dtype)
return (self.dtype.is_compatible_with(other.dtype) and
_shape_is_compatible_0dim(self.shape, other.shape) and
not self.is_sparse)
def get_placeholder(self):
if self.is_sparse:
return array_ops.sparse_placeholder(dtype=self.dtype)
return array_ops.placeholder(dtype=self.dtype, shape=self.shape)
def tensors_compatible(tensors, signatures):
"""Check that tensors are compatible with signatures.
Args:
tensors: Dict of `Tensor` objects or single `Tensor` object.
signatures: Dict of `TensorSignature` objects or
single `TensorSignature` object.
Returns:
True if all tensors are compatible, False otherwise.
"""
# Dict of Tensors as input.
if isinstance(tensors, dict):
if not isinstance(signatures, dict):
return False
for key in signatures:
if key not in tensors:
return False
if not TensorSignature(tensors[key]).is_compatible_with(signatures[key]):
return False
return True
# Single tensor as input.
if isinstance(signatures, dict):
return False
return TensorSignature(tensors).is_compatible_with(signatures)
def create_signatures(tensors):
"""Creates TensorSignature objects for given tensors.
Args:
tensors: Dict of `Tensor` objects or single `Tensor`.
Returns:
Dict of `TensorSignature` objects or single `TensorSignature`.
"""
if isinstance(tensors, dict):
return {
key: TensorSignature(tensors[key]) for key in tensors}
return TensorSignature(tensors)
def create_placeholders_from_signatures(signatures):
"""Creates placeholders from given signatures.
Args:
signatures: Dict of `TensorSignature` objects or single `TensorSignature`.
Returns:
Dict of `tf.placeholder` objects or single `tf.placeholder`.
"""
if not isinstance(signatures, dict):
return signatures.get_placeholder()
return {
key: signatures[key].get_placeholder()
for key in signatures}<|fim▁end|> | |
<|file_name|>mnist_lenet2.py<|end_file_name|><|fim▁begin|>import gzip
import cPickle
import numpy as np
import theano
import theano.tensor as T
from neuralmind import NeuralNetwork
from layers import HiddenLayer
from layers import ConvolutionLayer
from layers import FlattenLayer
import activations
def load_data(dataset):
print '... loading data'
<|fim▁hole|> f = gzip.open(dataset, 'rb')
train_set, valid_set, test_set = cPickle.load(f)
f.close()
def shared_dataset(data_xy, borrow=True):
data_x, data_y = data_xy
shared_x = theano.shared(np.asarray(data_x, dtype=theano.config.floatX), borrow=borrow)
shared_y = theano.shared(np.asarray(data_y, dtype=theano.config.floatX), borrow=borrow)
return shared_x, T.cast(shared_y, 'int32')
test_set_x, test_set_y = shared_dataset(test_set)
valid_set_x, valid_set_y = shared_dataset(valid_set)
train_set_x, train_set_y = shared_dataset(train_set)
rval = [(train_set_x, train_set_y), (valid_set_x, valid_set_y), (test_set_x, test_set_y)]
return rval
# Load MNIST
datasets = load_data("mnist.pkl.gz")
model = NeuralNetwork(
n_inputs=28*28,
batch_size=20,
layers = [
(ConvolutionLayer,
{
'image_shape': (1, 28, 28),
'filter_shape': (1, 5, 5),
'n_kernels': 40,
'non_linearity': activations.rectify
}),
(FlattenLayer, {}),
(HiddenLayer,
{
'n_units': 80,
'non_linearity': activations.rectify
}),
(HiddenLayer,
{
'n_units': 10,
'non_linearity': activations.softmax
})
]
)
model.train(datasets[0], datasets[1])<|fim▁end|> | # Load the dataset |
<|file_name|>compositor.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use compositor_data::{CompositorData, DoesntWantScrollEvents, WantsScrollEvents};
use compositor_task::{Msg, CompositorTask, Exit, ChangeReadyState, SetIds, LayerProperties};
use compositor_task::{GetGraphicsMetadata, CreateOrUpdateRootLayer, CreateOrUpdateDescendantLayer};
use compositor_task::{SetLayerOrigin, Paint, ScrollFragmentPoint, LoadComplete};
use compositor_task::{ShutdownComplete, ChangeRenderState, RenderMsgDiscarded};
use constellation::SendableFrameTree;
use events;
use events::ScrollPositionChanged;
use pipeline::CompositionPipeline;
use windowing;
use windowing::{FinishedWindowEvent, IdleWindowEvent, LoadUrlWindowEvent, MouseWindowClickEvent};
use windowing::{MouseWindowEvent, MouseWindowEventClass, MouseWindowMouseDownEvent};
use windowing::{MouseWindowMouseUpEvent, MouseWindowMoveEventClass, NavigationWindowEvent};
use windowing::{QuitWindowEvent, RefreshWindowEvent, ResizeWindowEvent, ScrollWindowEvent};
use windowing::{WindowEvent, WindowMethods, WindowNavigateMsg, ZoomWindowEvent};
use windowing::PinchZoomWindowEvent;
use azure::azure_hl;
use std::cmp;
use std::num::Zero;
use std::time::duration::Duration;
use geom::point::{Point2D, TypedPoint2D};
use geom::rect::{Rect, TypedRect};
use geom::size::TypedSize2D;
use geom::scale_factor::ScaleFactor;
use gfx::render_task::{RenderChan, RenderMsg, RenderRequest, UnusedBufferMsg};
use layers::geometry::{DevicePixel, LayerPixel};
use layers::layers::{BufferRequest, Layer, LayerBufferSet};
use layers::rendergl;
use layers::rendergl::RenderContext;
use layers::scene::Scene;
use opengles::gl2;
use png;
use servo_msg::compositor_msg::{Blank, Epoch, FinishedLoading, IdleRenderState, LayerId};
use servo_msg::compositor_msg::{ReadyState, RenderingRenderState, RenderState, Scrollable};
use servo_msg::constellation_msg::{ConstellationChan, ExitMsg, LoadUrlMsg, NavigateMsg};
use servo_msg::constellation_msg::{LoadData, PipelineId, ResizedWindowMsg, WindowSizeData};
use servo_msg::constellation_msg;
use servo_util::geometry::{PagePx, ScreenPx, ViewportPx};
use servo_util::memory::MemoryProfilerChan;
use servo_util::opts;
use servo_util::time::{profile, TimeProfilerChan};
use servo_util::{memory, time};
use std::io::timer::sleep;
use std::collections::hashmap::HashMap;
use std::path::Path;
use std::rc::Rc;
use time::precise_time_s;
use url::Url;
pub struct IOCompositor<Window: WindowMethods> {
/// The application window.
window: Rc<Window>,
/// The port on which we receive messages.
port: Receiver<Msg>,
/// The render context.
context: RenderContext,
/// The root pipeline.
root_pipeline: Option<CompositionPipeline>,
/// The canvas to paint a page.
scene: Scene<CompositorData>,
/// The application window size.
window_size: TypedSize2D<DevicePixel, uint>,
/// "Mobile-style" zoom that does not reflow the page.
viewport_zoom: ScaleFactor<PagePx, ViewportPx, f32>,
/// "Desktop-style" zoom that resizes the viewport to fit the window.
/// See `ViewportPx` docs in util/geom.rs for details.
page_zoom: ScaleFactor<ViewportPx, ScreenPx, f32>,
/// The device pixel ratio for this window.
hidpi_factor: ScaleFactor<ScreenPx, DevicePixel, f32>,
/// Tracks whether the renderer has finished its first rendering
composite_ready: bool,
/// Tracks whether we are in the process of shutting down, or have shut down and should close
/// the compositor.
shutdown_state: ShutdownState,
/// Tracks whether we need to re-composite a page.
recomposite: bool,
/// Tracks outstanding render_msg's sent to the render tasks.
outstanding_render_msgs: uint,
/// Tracks whether the zoom action has happend recently.
zoom_action: bool,
/// The time of the last zoom action has started.
zoom_time: f64,
/// Current display/reflow status of each pipeline.
ready_states: HashMap<PipelineId, ReadyState>,
/// Current render status of each pipeline.
render_states: HashMap<PipelineId, RenderState>,
/// Whether the page being rendered has loaded completely.
/// Differs from ReadyState because we can finish loading (ready)
/// many times for a single page.
got_load_complete_message: bool,
/// The channel on which messages can be sent to the constellation.
constellation_chan: ConstellationChan,
/// The channel on which messages can be sent to the time profiler.
time_profiler_chan: TimeProfilerChan,
/// The channel on which messages can be sent to the memory profiler.
memory_profiler_chan: MemoryProfilerChan,
/// Pending scroll to fragment event, if any
fragment_point: Option<Point2D<f32>>
}
#[deriving(PartialEq)]
enum ShutdownState {
NotShuttingDown,
ShuttingDown,
FinishedShuttingDown,
}
impl<Window: WindowMethods> IOCompositor<Window> {
fn new(window: Rc<Window>,
port: Receiver<Msg>,
constellation_chan: ConstellationChan,
time_profiler_chan: TimeProfilerChan,
memory_profiler_chan: MemoryProfilerChan) -> IOCompositor<Window> {
// Create an initial layer tree.
//
// TODO: There should be no initial layer tree until the renderer creates one from the
// display list. This is only here because we don't have that logic in the renderer yet.
let window_size = window.framebuffer_size();
let hidpi_factor = window.hidpi_factor();
let show_debug_borders = opts::get().show_debug_borders;
IOCompositor {
window: window,
port: port,
context: rendergl::RenderContext::new(CompositorTask::create_graphics_context(),
show_debug_borders),
root_pipeline: None,
scene: Scene::new(Rect {
origin: Zero::zero(),
size: window_size.as_f32(),
}),
window_size: window_size,
hidpi_factor: hidpi_factor,
composite_ready: false,
shutdown_state: NotShuttingDown,
recomposite: false,
page_zoom: ScaleFactor(1.0),
viewport_zoom: ScaleFactor(1.0),
zoom_action: false,
zoom_time: 0f64,
ready_states: HashMap::new(),
render_states: HashMap::new(),
got_load_complete_message: false,
constellation_chan: constellation_chan,
time_profiler_chan: time_profiler_chan,
memory_profiler_chan: memory_profiler_chan,
fragment_point: None,
outstanding_render_msgs: 0,
}
}
pub fn create(window: Rc<Window>,
port: Receiver<Msg>,
constellation_chan: ConstellationChan,
time_profiler_chan: TimeProfilerChan,
memory_profiler_chan: MemoryProfilerChan) {
let mut compositor = IOCompositor::new(window,
port,
constellation_chan,
time_profiler_chan,
memory_profiler_chan);
compositor.update_zoom_transform();
// Starts the compositor, which listens for messages on the specified port.
compositor.run();
}
fn run (&mut self) {
// Tell the constellation about the initial window size.
self.send_window_size();
// Enter the main event loop.
while self.shutdown_state != FinishedShuttingDown {
// Check for new messages coming from the rendering task.
self.handle_message();
if self.shutdown_state == FinishedShuttingDown {
// We have exited the compositor and passing window
// messages to script may crash.
debug!("Exiting the compositor due to a request from script.");
break;
}
// Check for messages coming from the windowing system.
let msg = self.window.recv();
self.handle_window_message(msg);
// If asked to recomposite and renderer has run at least once
if self.recomposite && self.composite_ready {
self.recomposite = false;
self.composite();
}
sleep(Duration::milliseconds(10));
// If a pinch-zoom happened recently, ask for tiles at the new resolution
if self.zoom_action && precise_time_s() - self.zoom_time > 0.3 {
self.zoom_action = false;
self.scene.mark_layer_contents_as_changed_recursively();
self.send_buffer_requests_for_all_layers();
}
}
// Clear out the compositor layers so that painting tasks can destroy the buffers.
match self.scene.root {
None => {}
Some(ref layer) => CompositorData::forget_all_tiles(layer.clone()),
}
// Drain compositor port, sometimes messages contain channels that are blocking
// another task from finishing (i.e. SetIds)
loop {
match self.port.try_recv() {
Err(_) => break,
Ok(_) => {},
}
}
// Tell the profiler and memory profiler to shut down.
let TimeProfilerChan(ref time_profiler_chan) = self.time_profiler_chan;
time_profiler_chan.send(time::ExitMsg);
let MemoryProfilerChan(ref memory_profiler_chan) = self.memory_profiler_chan;
memory_profiler_chan.send(memory::ExitMsg);
}
fn handle_message(&mut self) {
loop {
match (self.port.try_recv(), self.shutdown_state) {
(_, FinishedShuttingDown) =>
fail!("compositor shouldn't be handling messages after shutting down"),
(Err(_), _) => break,
(Ok(Exit(chan)), _) => {
debug!("shutting down the constellation");
let ConstellationChan(ref con_chan) = self.constellation_chan;
con_chan.send(ExitMsg);
chan.send(());
self.shutdown_state = ShuttingDown;
}
(Ok(ShutdownComplete), _) => {
debug!("constellation completed shutdown");
self.shutdown_state = FinishedShuttingDown;
break;
}
(Ok(ChangeReadyState(pipeline_id, ready_state)), NotShuttingDown) => {
self.change_ready_state(pipeline_id, ready_state);
}
(Ok(ChangeRenderState(pipeline_id, render_state)), NotShuttingDown) => {
self.change_render_state(pipeline_id, render_state);
}
(Ok(RenderMsgDiscarded), NotShuttingDown) => {
self.remove_outstanding_render_msg();
}
(Ok(SetIds(frame_tree, response_chan, new_constellation_chan)), _) => {
self.set_frame_tree(&frame_tree,
response_chan,
new_constellation_chan);
}
(Ok(GetGraphicsMetadata(chan)), NotShuttingDown) => {
chan.send(Some(azure_hl::current_graphics_metadata()));
}
(Ok(CreateOrUpdateRootLayer(layer_properties)), NotShuttingDown) => {
self.create_or_update_root_layer(layer_properties);
}
(Ok(CreateOrUpdateDescendantLayer(layer_properties)), NotShuttingDown) => {
self.create_or_update_descendant_layer(layer_properties);
}
(Ok(SetLayerOrigin(pipeline_id, layer_id, origin)), NotShuttingDown) => {
self.set_layer_origin(pipeline_id, layer_id, origin);
}
(Ok(Paint(pipeline_id, epoch, replies)), NotShuttingDown) => {
for (layer_id, new_layer_buffer_set) in replies.into_iter() {
self.paint(pipeline_id, layer_id, new_layer_buffer_set, epoch);
}
self.remove_outstanding_render_msg();
}
(Ok(ScrollFragmentPoint(pipeline_id, layer_id, point)), NotShuttingDown) => {
self.scroll_fragment_to_point(pipeline_id, layer_id, point);
}
(Ok(LoadComplete(..)), NotShuttingDown) => {
self.got_load_complete_message = true;
}
// When we are shutting_down, we need to avoid performing operations
// such as Paint that may crash because we have begun tearing down
// the rest of our resources.
(_, ShuttingDown) => { }
}
}
}
fn change_ready_state(&mut self, pipeline_id: PipelineId, ready_state: ReadyState) {
self.ready_states.insert_or_update_with(pipeline_id,
ready_state,
|_key, value| *value = ready_state);
self.window.set_ready_state(self.get_earliest_pipeline_ready_state());
}
fn get_earliest_pipeline_ready_state(&self) -> ReadyState {
if self.ready_states.len() == 0 {
return Blank;
}
return self.ready_states.values().fold(FinishedLoading, |a, &b| cmp::min(a, b));
}
fn change_render_state(&mut self, pipeline_id: PipelineId, render_state: RenderState) {
self.render_states.insert_or_update_with(pipeline_id,
render_state,
|_key, value| *value = render_state);
self.window.set_render_state(render_state);
if render_state == IdleRenderState {
self.composite_ready = true;
}
}
fn all_pipelines_in_idle_render_state(&self) -> bool {
if self.ready_states.len() == 0 {
return false;
}
return self.render_states.values().all(|&value| value == IdleRenderState);
}
fn has_render_msg_tracking(&self) -> bool {
// only track RenderMsg's if the compositor outputs to a file.
opts::get().output_file.is_some()
}
fn has_outstanding_render_msgs(&self) -> bool {
self.has_render_msg_tracking() && self.outstanding_render_msgs > 0
}
fn add_outstanding_render_msg(&mut self, count: uint) {
// return early if not tracking render_msg's
if !self.has_render_msg_tracking() {
return;
}
debug!("add_outstanding_render_msg {}", self.outstanding_render_msgs);
self.outstanding_render_msgs += count;
}
fn remove_outstanding_render_msg(&mut self) {
if !self.has_render_msg_tracking() {
return;
}
if self.outstanding_render_msgs > 0 {
self.outstanding_render_msgs -= 1;
} else {
debug!("too many rerender msgs completed");
}
}
fn set_frame_tree(&mut self,
frame_tree: &SendableFrameTree,
response_chan: Sender<()>,
new_constellation_chan: ConstellationChan) {
response_chan.send(());
self.root_pipeline = Some(frame_tree.pipeline.clone());
// If we have an old root layer, release all old tiles before replacing it.
match self.scene.root {
Some(ref mut layer) => CompositorData::clear_all_tiles(layer.clone()),
None => { }
}
self.scene.root = Some(self.create_frame_tree_root_layers(frame_tree, None));
self.scene.set_root_layer_size(self.window_size.as_f32());
// Initialize the new constellation channel by sending it the root window size.
self.constellation_chan = new_constellation_chan;
self.send_window_size();
}
fn create_frame_tree_root_layers(&mut self,
frame_tree: &SendableFrameTree,
frame_rect: Option<TypedRect<PagePx, f32>>)
-> Rc<Layer<CompositorData>> {
// Initialize the ReadyState and RenderState for this pipeline.
self.ready_states.insert(frame_tree.pipeline.id, Blank);
self.render_states.insert(frame_tree.pipeline.id, RenderingRenderState);
let layer_properties = LayerProperties {
pipeline_id: frame_tree.pipeline.id,
epoch: Epoch(0),
id: LayerId::null(),
rect: Rect::zero(),
background_color: azure_hl::Color::new(0., 0., 0., 0.),
scroll_policy: Scrollable,
};
let root_layer = CompositorData::new_layer(frame_tree.pipeline.clone(),
layer_properties,
WantsScrollEvents,
opts::get().tile_size);
match frame_rect {
Some(ref frame_rect) => {
*root_layer.masks_to_bounds.borrow_mut() = true;
let frame_rect = frame_rect.to_untyped();
*root_layer.bounds.borrow_mut() = Rect::from_untyped(&frame_rect);
}
None => {}
}
for kid in frame_tree.children.iter() {
root_layer.add_child(self.create_frame_tree_root_layers(&kid.frame_tree, kid.rect));
}
return root_layer;
}
fn find_layer_with_pipeline_and_layer_id(&self,
pipeline_id: PipelineId,
layer_id: LayerId)
-> Option<Rc<Layer<CompositorData>>> {
match self.scene.root {
Some(ref root_layer) => {
CompositorData::find_layer_with_pipeline_and_layer_id(root_layer.clone(),
pipeline_id,
layer_id)
}
None => None,
}
}
fn find_pipeline_root_layer(&self, pipeline_id: PipelineId) -> Rc<Layer<CompositorData>> {
match self.find_layer_with_pipeline_and_layer_id(pipeline_id, LayerId::null()) {
Some(ref layer) => layer.clone(),
None => fail!("Tried to create or update layer for unknown pipeline"),
}
}
fn update_layer_if_exists(&mut self, properties: LayerProperties) -> bool {
match self.find_layer_with_pipeline_and_layer_id(properties.pipeline_id, properties.id) {
Some(existing_layer) => {
CompositorData::update_layer(existing_layer.clone(), properties);
true
}
None => false,
}
}
fn create_or_update_root_layer(&mut self, layer_properties: LayerProperties) {
let need_new_root_layer = !self.update_layer_if_exists(layer_properties);
if need_new_root_layer {
let root_layer = self.find_pipeline_root_layer(layer_properties.pipeline_id);
CompositorData::update_layer_except_size(root_layer.clone(), layer_properties);
let root_layer_pipeline = root_layer.extra_data.borrow().pipeline.clone();
let first_child = CompositorData::new_layer(root_layer_pipeline.clone(),
layer_properties,
DoesntWantScrollEvents,
opts::get().tile_size);
// Add the first child / base layer to the front of the child list, so that
// child iframe layers are rendered on top of the base layer. These iframe
// layers were added previously when creating the layer tree skeleton in
// create_frame_tree_root_layers.
root_layer.children().insert(0, first_child);
}
self.scroll_layer_to_fragment_point_if_necessary(layer_properties.pipeline_id,
layer_properties.id);
self.send_buffer_requests_for_all_layers();
}
fn create_or_update_descendant_layer(&mut self, layer_properties: LayerProperties) {
if !self.update_layer_if_exists(layer_properties) {
self.create_descendant_layer(layer_properties);
}
self.scroll_layer_to_fragment_point_if_necessary(layer_properties.pipeline_id,
layer_properties.id);
self.send_buffer_requests_for_all_layers();
}
fn create_descendant_layer(&self, layer_properties: LayerProperties) {
let root_layer = self.find_pipeline_root_layer(layer_properties.pipeline_id);
let root_layer_pipeline = root_layer.extra_data.borrow().pipeline.clone();
let new_layer = CompositorData::new_layer(root_layer_pipeline,
layer_properties,
DoesntWantScrollEvents,
root_layer.tile_size);
root_layer.add_child(new_layer);
}
fn send_window_size(&self) {
let dppx = self.page_zoom * self.device_pixels_per_screen_px();
let initial_viewport = self.window_size.as_f32() / dppx;
let visible_viewport = initial_viewport / self.viewport_zoom;
let ConstellationChan(ref chan) = self.constellation_chan;
chan.send(ResizedWindowMsg(WindowSizeData {
device_pixel_ratio: dppx,
initial_viewport: initial_viewport,
visible_viewport: visible_viewport,
}));
}
pub fn move_layer(&self,
pipeline_id: PipelineId,
layer_id: LayerId,
origin: TypedPoint2D<LayerPixel, f32>)
-> bool {
match self.find_layer_with_pipeline_and_layer_id(pipeline_id, layer_id) {
Some(ref layer) => {
if layer.extra_data.borrow().wants_scroll_events == WantsScrollEvents {
events::clamp_scroll_offset_and_scroll_layer(layer.clone(),
TypedPoint2D(0f32, 0f32) - origin);
}
true
}
None => false,
}
}
fn scroll_layer_to_fragment_point_if_necessary(&mut self,
pipeline_id: PipelineId,
layer_id: LayerId) {
match self.fragment_point.take() {
Some(point) => {
if !self.move_layer(pipeline_id, layer_id, Point2D::from_untyped(&point)) {
fail!("Compositor: Tried to scroll to fragment with unknown layer.");
}
self.recomposite = true;
}
None => {}
};
}
fn set_layer_origin(&mut self,
pipeline_id: PipelineId,
layer_id: LayerId,
new_origin: Point2D<f32>) {
match self.find_layer_with_pipeline_and_layer_id(pipeline_id, layer_id) {
Some(ref layer) => {
layer.bounds.borrow_mut().origin = Point2D::from_untyped(&new_origin)
}
None => fail!("Compositor received SetLayerOrigin for nonexistent layer"),
};
self.send_buffer_requests_for_all_layers();
}
fn paint(&mut self,
pipeline_id: PipelineId,
layer_id: LayerId,
new_layer_buffer_set: Box<LayerBufferSet>,
epoch: Epoch) {
debug!("compositor received new frame");
// From now on, if we destroy the buffers, they will leak.
let mut new_layer_buffer_set = new_layer_buffer_set;
new_layer_buffer_set.mark_will_leak();
match self.find_layer_with_pipeline_and_layer_id(pipeline_id, layer_id) {
Some(ref layer) => {
assert!(CompositorData::add_buffers(layer.clone(), new_layer_buffer_set, epoch));
self.recomposite = true;
}
None => {
// FIXME: This may potentially be triggered by a race condition where a
// buffers are being rendered but the layer is removed before rendering
// completes.
fail!("compositor given paint command for non-existent layer");
}
}
}
fn scroll_fragment_to_point(&mut self,
pipeline_id: PipelineId,
layer_id: LayerId,
point: Point2D<f32>) {
if self.move_layer(pipeline_id, layer_id, Point2D::from_untyped(&point)) {
self.recomposite = true;
self.send_buffer_requests_for_all_layers();
} else {
self.fragment_point = Some(point);
}
}
fn handle_window_message(&mut self, event: WindowEvent) {
match event {
IdleWindowEvent => {}
RefreshWindowEvent => {
self.recomposite = true;
}
ResizeWindowEvent(size) => {
self.on_resize_window_event(size);
}
LoadUrlWindowEvent(url_string) => {
self.on_load_url_window_event(url_string);
}
MouseWindowEventClass(mouse_window_event) => {
self.on_mouse_window_event_class(mouse_window_event);
}
MouseWindowMoveEventClass(cursor) => {
self.on_mouse_window_move_event_class(cursor);
}
ScrollWindowEvent(delta, cursor) => {
self.on_scroll_window_event(delta, cursor);
}
ZoomWindowEvent(magnification) => {
self.on_zoom_window_event(magnification);
}
PinchZoomWindowEvent(magnification) => {
self.on_pinch_zoom_window_event(magnification);<|fim▁hole|> NavigationWindowEvent(direction) => {
self.on_navigation_window_event(direction);
}
FinishedWindowEvent => {
let exit = opts::get().exit_after_load;
if exit {
debug!("shutting down the constellation for FinishedWindowEvent");
let ConstellationChan(ref chan) = self.constellation_chan;
chan.send(ExitMsg);
self.shutdown_state = ShuttingDown;
}
}
QuitWindowEvent => {
debug!("shutting down the constellation for QuitWindowEvent");
let ConstellationChan(ref chan) = self.constellation_chan;
chan.send(ExitMsg);
self.shutdown_state = ShuttingDown;
}
}
}
fn on_resize_window_event(&mut self, new_size: TypedSize2D<DevicePixel, uint>) {
// A size change could also mean a resolution change.
let new_hidpi_factor = self.window.hidpi_factor();
if self.hidpi_factor != new_hidpi_factor {
self.hidpi_factor = new_hidpi_factor;
self.update_zoom_transform();
}
if self.window_size == new_size {
return;
}
debug!("osmain: window resized to {:?}", new_size);
self.window_size = new_size;
self.scene.set_root_layer_size(new_size.as_f32());
self.send_window_size();
}
fn on_load_url_window_event(&mut self, url_string: String) {
debug!("osmain: loading URL `{:s}`", url_string);
self.got_load_complete_message = false;
let root_pipeline_id = match self.scene.root {
Some(ref layer) => layer.extra_data.borrow().pipeline.id.clone(),
None => fail!("Compositor: Received LoadUrlWindowEvent without initialized compositor \
layers"),
};
let msg = LoadUrlMsg(root_pipeline_id, LoadData::new(Url::parse(url_string.as_slice()).unwrap()));
let ConstellationChan(ref chan) = self.constellation_chan;
chan.send(msg);
}
fn on_mouse_window_event_class(&self, mouse_window_event: MouseWindowEvent) {
let point = match mouse_window_event {
MouseWindowClickEvent(_, p) => p,
MouseWindowMouseDownEvent(_, p) => p,
MouseWindowMouseUpEvent(_, p) => p,
};
for layer in self.scene.root.iter() {
events::send_mouse_event(layer.clone(), mouse_window_event, point / self.scene.scale);
}
}
fn on_mouse_window_move_event_class(&self, cursor: TypedPoint2D<DevicePixel, f32>) {
for layer in self.scene.root.iter() {
events::send_mouse_move_event(layer.clone(), cursor / self.scene.scale);
}
}
fn on_scroll_window_event(&mut self,
delta: TypedPoint2D<DevicePixel, f32>,
cursor: TypedPoint2D<DevicePixel, i32>) {
let delta = delta / self.scene.scale;
let cursor = cursor.as_f32() / self.scene.scale;
let mut scroll = false;
match self.scene.root {
Some(ref mut layer) => {
scroll = events::handle_scroll_event(layer.clone(),
delta,
cursor) == ScrollPositionChanged;
}
None => { }
}
self.recomposite_if(scroll);
self.send_buffer_requests_for_all_layers();
}
fn device_pixels_per_screen_px(&self) -> ScaleFactor<ScreenPx, DevicePixel, f32> {
match opts::get().device_pixels_per_px {
Some(device_pixels_per_px) => device_pixels_per_px,
None => match opts::get().output_file {
Some(_) => ScaleFactor(1.0),
None => self.hidpi_factor
}
}
}
fn device_pixels_per_page_px(&self) -> ScaleFactor<PagePx, DevicePixel, f32> {
self.viewport_zoom * self.page_zoom * self.device_pixels_per_screen_px()
}
fn update_zoom_transform(&mut self) {
let scale = self.device_pixels_per_page_px();
self.scene.scale = ScaleFactor(scale.get());
// We need to set the size of the root layer again, since the window size
// has changed in unscaled layer pixels.
self.scene.set_root_layer_size(self.window_size.as_f32());
}
fn on_zoom_window_event(&mut self, magnification: f32) {
self.page_zoom = ScaleFactor((self.page_zoom.get() * magnification).max(1.0));
self.update_zoom_transform();
self.send_window_size();
}
fn on_pinch_zoom_window_event(&mut self, magnification: f32) {
self.zoom_action = true;
self.zoom_time = precise_time_s();
let old_viewport_zoom = self.viewport_zoom;
self.viewport_zoom = ScaleFactor((self.viewport_zoom.get() * magnification).max(1.0));
let viewport_zoom = self.viewport_zoom;
self.update_zoom_transform();
// Scroll as needed
let window_size = self.window_size.as_f32();
let page_delta: TypedPoint2D<LayerPixel, f32> = TypedPoint2D(
window_size.width.get() * (viewport_zoom.inv() - old_viewport_zoom.inv()).get() * 0.5,
window_size.height.get() * (viewport_zoom.inv() - old_viewport_zoom.inv()).get() * 0.5);
let cursor = TypedPoint2D(-1f32, -1f32); // Make sure this hits the base layer.
match self.scene.root {
Some(ref mut layer) => {
events::handle_scroll_event(layer.clone(),
page_delta,
cursor);
}
None => { }
}
self.recomposite = true;
}
fn on_navigation_window_event(&self, direction: WindowNavigateMsg) {
let direction = match direction {
windowing::Forward => constellation_msg::Forward,
windowing::Back => constellation_msg::Back,
};
let ConstellationChan(ref chan) = self.constellation_chan;
chan.send(NavigateMsg(direction))
}
fn convert_buffer_requests_to_pipeline_requests_map(&self,
requests: Vec<(Rc<Layer<CompositorData>>,
Vec<BufferRequest>)>) ->
HashMap<PipelineId, (RenderChan,
Vec<RenderRequest>)> {
let scale = self.device_pixels_per_page_px();
let mut results:
HashMap<PipelineId, (RenderChan, Vec<RenderRequest>)> = HashMap::new();
for (layer, mut layer_requests) in requests.into_iter() {
let pipeline_id = layer.extra_data.borrow().pipeline.id;
let &(_, ref mut vec) = results.find_or_insert_with(pipeline_id, |_| {
(layer.extra_data.borrow().pipeline.render_chan.clone(), Vec::new())
});
// All the BufferRequests are in layer/device coordinates, but the render task
// wants to know the page coordinates. We scale them before sending them.
for request in layer_requests.iter_mut() {
request.page_rect = request.page_rect / scale.get();
}
vec.push(RenderRequest {
buffer_requests: layer_requests,
scale: scale.get(),
layer_id: layer.extra_data.borrow().id,
epoch: layer.extra_data.borrow().epoch,
});
}
return results;
}
fn send_back_unused_buffers(&mut self) {
match self.root_pipeline {
Some(ref pipeline) => {
let unused_buffers = self.scene.collect_unused_buffers();
let have_unused_buffers = unused_buffers.len() > 0;
self.recomposite = self.recomposite || have_unused_buffers;
if have_unused_buffers {
let message = UnusedBufferMsg(unused_buffers);
let _ = pipeline.render_chan.send_opt(message);
}
},
None => {}
}
}
fn send_buffer_requests_for_all_layers(&mut self) {
let mut layers_and_requests = Vec::new();
self.scene.get_buffer_requests(&mut layers_and_requests,
Rect(TypedPoint2D(0f32, 0f32), self.window_size.as_f32()));
// Return unused tiles first, so that they can be reused by any new BufferRequests.
self.send_back_unused_buffers();
if layers_and_requests.len() == 0 {
return;
}
// We want to batch requests for each pipeline to avoid race conditions
// when handling the resulting BufferRequest responses.
let pipeline_requests =
self.convert_buffer_requests_to_pipeline_requests_map(layers_and_requests);
let mut num_render_msgs_sent = 0;
for (_pipeline_id, (chan, requests)) in pipeline_requests.into_iter() {
num_render_msgs_sent += 1;
let _ = chan.send_opt(RenderMsg(requests));
}
self.add_outstanding_render_msg(num_render_msgs_sent);
}
fn is_ready_to_render_image_output(&self) -> bool {
if !self.got_load_complete_message {
return false;
}
if self.get_earliest_pipeline_ready_state() != FinishedLoading {
return false;
}
if self.has_outstanding_render_msgs() {
return false;
}
if !self.all_pipelines_in_idle_render_state() {
return false;
}
return true;
}
fn composite(&mut self) {
let output_image = opts::get().output_file.is_some() &&
self.is_ready_to_render_image_output();
let mut framebuffer_ids = vec!();
let mut texture_ids = vec!();
let (width, height) = (self.window_size.width.get(), self.window_size.height.get());
if output_image {
framebuffer_ids = gl2::gen_framebuffers(1);
gl2::bind_framebuffer(gl2::FRAMEBUFFER, framebuffer_ids[0]);
texture_ids = gl2::gen_textures(1);
gl2::bind_texture(gl2::TEXTURE_2D, texture_ids[0]);
gl2::tex_image_2d(gl2::TEXTURE_2D, 0, gl2::RGB as gl2::GLint, width as gl2::GLsizei,
height as gl2::GLsizei, 0, gl2::RGB, gl2::UNSIGNED_BYTE, None);
gl2::tex_parameter_i(gl2::TEXTURE_2D, gl2::TEXTURE_MAG_FILTER, gl2::NEAREST as gl2::GLint);
gl2::tex_parameter_i(gl2::TEXTURE_2D, gl2::TEXTURE_MIN_FILTER, gl2::NEAREST as gl2::GLint);
gl2::framebuffer_texture_2d(gl2::FRAMEBUFFER, gl2::COLOR_ATTACHMENT0, gl2::TEXTURE_2D,
texture_ids[0], 0);
gl2::bind_texture(gl2::TEXTURE_2D, 0);
}
profile(time::CompositingCategory, None, self.time_profiler_chan.clone(), || {
debug!("compositor: compositing");
// Adjust the layer dimensions as necessary to correspond to the size of the window.
self.scene.viewport = Rect {
origin: Zero::zero(),
size: self.window_size.as_f32(),
};
// Render the scene.
match self.scene.root {
Some(ref layer) => {
rendergl::render_scene(layer.clone(), self.context, &self.scene);
}
None => {}
}
});
if output_image {
let path = from_str::<Path>(opts::get().output_file.as_ref().unwrap().as_slice()).unwrap();
let mut pixels = gl2::read_pixels(0, 0,
width as gl2::GLsizei,
height as gl2::GLsizei,
gl2::RGB, gl2::UNSIGNED_BYTE);
gl2::bind_framebuffer(gl2::FRAMEBUFFER, 0);
gl2::delete_buffers(texture_ids.as_slice());
gl2::delete_frame_buffers(framebuffer_ids.as_slice());
// flip image vertically (texture is upside down)
let orig_pixels = pixels.clone();
let stride = width * 3;
for y in range(0, height) {
let dst_start = y * stride;
let src_start = (height - y - 1) * stride;
unsafe {
let src_slice = orig_pixels.slice(src_start, src_start + stride);
pixels.slice_mut(dst_start, dst_start + stride)
.copy_memory(src_slice.slice_to(stride));
}
}
let mut img = png::Image {
width: width as u32,
height: height as u32,
pixels: png::RGB8(pixels),
};
let res = png::store_png(&mut img, &path);
assert!(res.is_ok());
debug!("shutting down the constellation after generating an output file");
let ConstellationChan(ref chan) = self.constellation_chan;
chan.send(ExitMsg);
self.shutdown_state = ShuttingDown;
}
self.window.present();
let exit = opts::get().exit_after_load;
if exit {
debug!("shutting down the constellation for exit_after_load");
let ConstellationChan(ref chan) = self.constellation_chan;
chan.send(ExitMsg);
}
}
fn recomposite_if(&mut self, result: bool) {
self.recomposite = result || self.recomposite;
}
}<|fim▁end|> | }
|
<|file_name|>ArDPPTU.java<|end_file_name|><|fim▁begin|>/*
MobileRobots Advanced Robotics Interface for Applications (ARIA)
Copyright (C) 2004, 2005 ActivMedia Robotics LLC
Copyright (C) 2006, 2007, 2008, 2009 MobileRobots Inc.
Copyright (C) 2010, 2011 Adept Technology, Inc.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
If you wish to redistribute ARIA under different terms, contact
Adept MobileRobots for information about a commercial version of ARIA at
[email protected] or
Adept MobileRobots, 10 Columbia Drive, Amherst, NH 03031; 800-639-9481
*/
/* ----------------------------------------------------------------------------
* This file was automatically generated by SWIG (http://www.swig.org).
* Version 1.3.36
*
* Do not make changes to this file unless you know what you are doing--modify
* the SWIG interface file instead.
* ----------------------------------------------------------------------------- */
package com.mobilerobots.Aria;
public class ArDPPTU extends ArPTZ {
/* (begin code from javabody_derived typemap) */
private long swigCPtr;
/* for internal use by swig only */
public ArDPPTU(long cPtr, boolean cMemoryOwn) {
super(AriaJavaJNI.SWIGArDPPTUUpcast(cPtr), cMemoryOwn);
swigCPtr = cPtr;
}
/* for internal use by swig only */
public static long getCPtr(ArDPPTU obj) {
return (obj == null) ? 0 : obj.swigCPtr;
}
/* (end code from javabody_derived typemap) */
protected void finalize() {
delete();
}
public synchronized void delete() {
if(swigCPtr != 0 && swigCMemOwn) {
swigCMemOwn = false;
AriaJavaJNI.delete_ArDPPTU(swigCPtr);
}
swigCPtr = 0;
super.delete();
}
public ArDPPTU(ArRobot robot, ArDPPTU.DeviceType deviceType) {
this(AriaJavaJNI.new_ArDPPTU__SWIG_0(ArRobot.getCPtr(robot), robot, deviceType.swigValue()), true);
}
public ArDPPTU(ArRobot robot) {
this(AriaJavaJNI.new_ArDPPTU__SWIG_1(ArRobot.getCPtr(robot), robot), true);
}
public boolean init() {
return AriaJavaJNI.ArDPPTU_init(swigCPtr, this);
}
public boolean canZoom() {
return AriaJavaJNI.ArDPPTU_canZoom(swigCPtr, this);
}
public boolean blank() {
return AriaJavaJNI.ArDPPTU_blank(swigCPtr, this);
}
public boolean resetCalib() {
return AriaJavaJNI.ArDPPTU_resetCalib(swigCPtr, this);
}
public boolean disableReset() {
return AriaJavaJNI.ArDPPTU_disableReset(swigCPtr, this);
}
public boolean resetTilt() {
return AriaJavaJNI.ArDPPTU_resetTilt(swigCPtr, this);
}
public boolean resetPan() {
return AriaJavaJNI.ArDPPTU_resetPan(swigCPtr, this);<|fim▁hole|> }
public boolean saveSet() {
return AriaJavaJNI.ArDPPTU_saveSet(swigCPtr, this);
}
public boolean restoreSet() {
return AriaJavaJNI.ArDPPTU_restoreSet(swigCPtr, this);
}
public boolean factorySet() {
return AriaJavaJNI.ArDPPTU_factorySet(swigCPtr, this);
}
public boolean panTilt(double pdeg, double tdeg) {
return AriaJavaJNI.ArDPPTU_panTilt(swigCPtr, this, pdeg, tdeg);
}
public boolean pan(double deg) {
return AriaJavaJNI.ArDPPTU_pan(swigCPtr, this, deg);
}
public boolean panRel(double deg) {
return AriaJavaJNI.ArDPPTU_panRel(swigCPtr, this, deg);
}
public boolean tilt(double deg) {
return AriaJavaJNI.ArDPPTU_tilt(swigCPtr, this, deg);
}
public boolean tiltRel(double deg) {
return AriaJavaJNI.ArDPPTU_tiltRel(swigCPtr, this, deg);
}
public boolean panTiltRel(double pdeg, double tdeg) {
return AriaJavaJNI.ArDPPTU_panTiltRel(swigCPtr, this, pdeg, tdeg);
}
public boolean limitEnforce(boolean val) {
return AriaJavaJNI.ArDPPTU_limitEnforce(swigCPtr, this, val);
}
public boolean immedExec() {
return AriaJavaJNI.ArDPPTU_immedExec(swigCPtr, this);
}
public boolean slaveExec() {
return AriaJavaJNI.ArDPPTU_slaveExec(swigCPtr, this);
}
public boolean awaitExec() {
return AriaJavaJNI.ArDPPTU_awaitExec(swigCPtr, this);
}
public boolean haltAll() {
return AriaJavaJNI.ArDPPTU_haltAll(swigCPtr, this);
}
public boolean haltPan() {
return AriaJavaJNI.ArDPPTU_haltPan(swigCPtr, this);
}
public boolean haltTilt() {
return AriaJavaJNI.ArDPPTU_haltTilt(swigCPtr, this);
}
public double getMaxPosPan() {
return AriaJavaJNI.ArDPPTU_getMaxPosPan(swigCPtr, this);
}
public double getMaxNegPan() {
return AriaJavaJNI.ArDPPTU_getMaxNegPan(swigCPtr, this);
}
public double getMaxPosTilt() {
return AriaJavaJNI.ArDPPTU_getMaxPosTilt(swigCPtr, this);
}
public double getMaxNegTilt() {
return AriaJavaJNI.ArDPPTU_getMaxNegTilt(swigCPtr, this);
}
public double getMaxPanSlew() {
return AriaJavaJNI.ArDPPTU_getMaxPanSlew(swigCPtr, this);
}
public double getMinPanSlew() {
return AriaJavaJNI.ArDPPTU_getMinPanSlew(swigCPtr, this);
}
public double getMaxTiltSlew() {
return AriaJavaJNI.ArDPPTU_getMaxTiltSlew(swigCPtr, this);
}
public double getMinTiltSlew() {
return AriaJavaJNI.ArDPPTU_getMinTiltSlew(swigCPtr, this);
}
public double getMaxPanAccel() {
return AriaJavaJNI.ArDPPTU_getMaxPanAccel(swigCPtr, this);
}
public double getMinPanAccel() {
return AriaJavaJNI.ArDPPTU_getMinPanAccel(swigCPtr, this);
}
public double getMaxTiltAccel() {
return AriaJavaJNI.ArDPPTU_getMaxTiltAccel(swigCPtr, this);
}
public double getMinTiltAccel() {
return AriaJavaJNI.ArDPPTU_getMinTiltAccel(swigCPtr, this);
}
public boolean initMon(double deg1, double deg2, double deg3, double deg4) {
return AriaJavaJNI.ArDPPTU_initMon(swigCPtr, this, deg1, deg2, deg3, deg4);
}
public boolean enMon() {
return AriaJavaJNI.ArDPPTU_enMon(swigCPtr, this);
}
public boolean disMon() {
return AriaJavaJNI.ArDPPTU_disMon(swigCPtr, this);
}
public boolean offStatPower() {
return AriaJavaJNI.ArDPPTU_offStatPower(swigCPtr, this);
}
public boolean regStatPower() {
return AriaJavaJNI.ArDPPTU_regStatPower(swigCPtr, this);
}
public boolean lowStatPower() {
return AriaJavaJNI.ArDPPTU_lowStatPower(swigCPtr, this);
}
public boolean highMotPower() {
return AriaJavaJNI.ArDPPTU_highMotPower(swigCPtr, this);
}
public boolean regMotPower() {
return AriaJavaJNI.ArDPPTU_regMotPower(swigCPtr, this);
}
public boolean lowMotPower() {
return AriaJavaJNI.ArDPPTU_lowMotPower(swigCPtr, this);
}
public boolean panAccel(double deg) {
return AriaJavaJNI.ArDPPTU_panAccel(swigCPtr, this, deg);
}
public boolean tiltAccel(double deg) {
return AriaJavaJNI.ArDPPTU_tiltAccel(swigCPtr, this, deg);
}
public boolean basePanSlew(double deg) {
return AriaJavaJNI.ArDPPTU_basePanSlew(swigCPtr, this, deg);
}
public boolean baseTiltSlew(double deg) {
return AriaJavaJNI.ArDPPTU_baseTiltSlew(swigCPtr, this, deg);
}
public boolean upperPanSlew(double deg) {
return AriaJavaJNI.ArDPPTU_upperPanSlew(swigCPtr, this, deg);
}
public boolean lowerPanSlew(double deg) {
return AriaJavaJNI.ArDPPTU_lowerPanSlew(swigCPtr, this, deg);
}
public boolean upperTiltSlew(double deg) {
return AriaJavaJNI.ArDPPTU_upperTiltSlew(swigCPtr, this, deg);
}
public boolean lowerTiltSlew(double deg) {
return AriaJavaJNI.ArDPPTU_lowerTiltSlew(swigCPtr, this, deg);
}
public boolean indepMove() {
return AriaJavaJNI.ArDPPTU_indepMove(swigCPtr, this);
}
public boolean velMove() {
return AriaJavaJNI.ArDPPTU_velMove(swigCPtr, this);
}
public boolean panSlew(double deg) {
return AriaJavaJNI.ArDPPTU_panSlew(swigCPtr, this, deg);
}
public boolean tiltSlew(double deg) {
return AriaJavaJNI.ArDPPTU_tiltSlew(swigCPtr, this, deg);
}
public boolean panSlewRel(double deg) {
return AriaJavaJNI.ArDPPTU_panSlewRel(swigCPtr, this, deg);
}
public boolean tiltSlewRel(double deg) {
return AriaJavaJNI.ArDPPTU_tiltSlewRel(swigCPtr, this, deg);
}
public double getPan() {
return AriaJavaJNI.ArDPPTU_getPan(swigCPtr, this);
}
public double getTilt() {
return AriaJavaJNI.ArDPPTU_getTilt(swigCPtr, this);
}
public double getPanSlew() {
return AriaJavaJNI.ArDPPTU_getPanSlew(swigCPtr, this);
}
public double getTiltSlew() {
return AriaJavaJNI.ArDPPTU_getTiltSlew(swigCPtr, this);
}
public double getBasePanSlew() {
return AriaJavaJNI.ArDPPTU_getBasePanSlew(swigCPtr, this);
}
public double getBaseTiltSlew() {
return AriaJavaJNI.ArDPPTU_getBaseTiltSlew(swigCPtr, this);
}
public double getPanAccel() {
return AriaJavaJNI.ArDPPTU_getPanAccel(swigCPtr, this);
}
public double getTiltAccel() {
return AriaJavaJNI.ArDPPTU_getTiltAccel(swigCPtr, this);
}
public final static class DeviceType {
public final static DeviceType PANTILT_DEFAULT = new DeviceType("PANTILT_DEFAULT");
public final static DeviceType PANTILT_PTUD47 = new DeviceType("PANTILT_PTUD47");
public final int swigValue() {
return swigValue;
}
public String toString() {
return swigName;
}
public static DeviceType swigToEnum(int swigValue) {
if (swigValue < swigValues.length && swigValue >= 0 && swigValues[swigValue].swigValue == swigValue)
return swigValues[swigValue];
for (int i = 0; i < swigValues.length; i++)
if (swigValues[i].swigValue == swigValue)
return swigValues[i];
throw new IllegalArgumentException("No enum " + DeviceType.class + " with value " + swigValue);
}
private DeviceType(String swigName) {
this.swigName = swigName;
this.swigValue = swigNext++;
}
private DeviceType(String swigName, int swigValue) {
this.swigName = swigName;
this.swigValue = swigValue;
swigNext = swigValue+1;
}
private DeviceType(String swigName, DeviceType swigEnum) {
this.swigName = swigName;
this.swigValue = swigEnum.swigValue;
swigNext = this.swigValue+1;
}
private static DeviceType[] swigValues = { PANTILT_DEFAULT, PANTILT_PTUD47 };
private static int swigNext = 0;
private final int swigValue;
private final String swigName;
}
}<|fim▁end|> | }
public boolean resetAll() {
return AriaJavaJNI.ArDPPTU_resetAll(swigCPtr, this); |
<|file_name|>task10.cc<|end_file_name|><|fim▁begin|>#include <iostream>
using namespace std;
#define FLOAT(name, value) float name = value
#define CONST_FLOAT(name, value) const FLOAT(name, value)
#define VAR_FLOAT(name) FLOAT(name, 0)
#define VAR_INT(name) int name = 0
#define DEFINE_LINEAR_EQUATION(n)\
VAR_FLOAT(a##n);\
VAR_FLOAT(b##n);\
VAR_FLOAT(c##n)\
#define READ(input) cin >> input
#define IS_LESS_THAN_EPSILON(diff) ((diff < EPSILON) && (diff > -EPSILON))
#define READ_LINEAR_EQUATION(n)\
READ(a##n);\
READ(b##n);\
READ(c##n)\
#define IS_ZERO(value) IS_LESS_THAN_EPSILON(value)
#define IS_NOT_ZERO(value) (!IS_LESS_THAN_EPSILON(value))
#define IS_NOT_ZERO_LINE(line) (IS_NOT_ZERO(a##line) || IS_NOT_ZERO(b##line) || IS_NOT_ZERO(c##line))
#define SOLUTION_PART_1(n, divisor) c##n / divisor##n
#define SOLUTION_PART_2(n, multiplier, found, divisor) (c##n - (multiplier##n * found)) / divisor##n
#define ONE_SOLUTION() solution = One
#define NO_SOLUTION() solution = No
#define GAUSE(n1, n2, n3, cnt , calc)\
CONST_FLOAT(k##n1, (-cnt##n2) / cnt##n1);\
CONST_FLOAT(calc##n3, (calc##n1 * k##n1) + calc##n2);\
CONST_FLOAT(c##n3, (c##n1 * k##n1) + c##n2)\
#define IF_ZERO_AND_NO_SOLUTION_THAN_SOLVE(zero, first, second, divisor1, divisor2, n1, n2)\
if(IS_ZERO(zero) && (solution == Many)) {\
if IS_ZERO(divisor1##n1) {\
NO_SOLUTION();\
} else {\
first = SOLUTION_PART_1(n1, divisor1);\<|fim▁hole|> second = SOLUTION_PART_2(n2, divisor1, first, divisor2);\
ONE_SOLUTION();\
}\
}\
#define FIND_SOLUTION(n, first, second, k, divisor1, divisor2)\
CONST_FLOAT(first##n, SOLUTION_PART_1(k, divisor1));\
CONST_FLOAT(second##n, SOLUTION_PART_2(n, divisor1, first##n, divisor2))\
#define DIFF(F, f) CONST_FLOAT(DIFF_##F, f##1 - f##2)
#define PRINT(value) cout << value
#define PRINT_CASE(what, message)\
case what:\
PRINT(message);\
break\
#define IF_ABS_WHOLE_PART_BETWEEN_THAN_MULTIPLIER(a, b, m)\
if((absWholePart >= a) && (absWholePart < b)) {\
multiplier = m;\
}\
#define FORMAT(num)\
sign = (num <= -EPSILON ? -1 : 1);\
wholePart = num + (sign * EPSILON);\
multiplier = 1;\
absWholePart = sign * wholePart;\
IF_ABS_WHOLE_PART_BETWEEN_THAN_MULTIPLIER(0, 10, 10000)\
IF_ABS_WHOLE_PART_BETWEEN_THAN_MULTIPLIER(10, 100, 1000)\
IF_ABS_WHOLE_PART_BETWEEN_THAN_MULTIPLIER(100, 1000, 100)\
IF_ABS_WHOLE_PART_BETWEEN_THAN_MULTIPLIER(1000, 10000, 10)\
floatPart = (int)((num + (sign * EPSILON)) * multiplier) - (wholePart * multiplier);\
num = ((float)floatPart / multiplier) + wholePart\
/* @begin */
enum Solution {
No,
One,
Many
};
CONST_FLOAT(EPSILON, 0.00001);
int main() {
DEFINE_LINEAR_EQUATION(1);
DEFINE_LINEAR_EQUATION(2);
VAR_FLOAT(x);
VAR_FLOAT(y);
Solution solution = Many;
READ_LINEAR_EQUATION(1);
READ_LINEAR_EQUATION(2);
if(IS_NOT_ZERO_LINE(1) && IS_NOT_ZERO_LINE(2)) {
IF_ZERO_AND_NO_SOLUTION_THAN_SOLVE(a1, y, x, b, a, 1, 2)
IF_ZERO_AND_NO_SOLUTION_THAN_SOLVE(a2, y, x, b, a, 2, 1)
IF_ZERO_AND_NO_SOLUTION_THAN_SOLVE(b1, x, y, a, b, 1, 2)
IF_ZERO_AND_NO_SOLUTION_THAN_SOLVE(b2, x, y, a, b, 2, 1)
if(solution == Many) {
GAUSE(1, 2, 3, a, b);
GAUSE(2, 1, 4, b, a);
if((IS_ZERO(b3) && IS_NOT_ZERO(c3)) || (IS_ZERO(a4) && IS_NOT_ZERO(c4))) {
NO_SOLUTION();
} else {
FIND_SOLUTION(1, y, x, 3, b, a);
FIND_SOLUTION(2, x, y, 4, a, b);
DIFF(X, x);
DIFF(Y, y);
if(IS_LESS_THAN_EPSILON(DIFF_X) && IS_LESS_THAN_EPSILON(DIFF_Y)) {
x = x1;
y = y1;
ONE_SOLUTION();
}
}
}
}
switch(solution) {
PRINT_CASE(No, "No solution");
PRINT_CASE(Many, "Many solutions");
case One:
VAR_INT(sign);
VAR_INT(wholePart);
VAR_INT(absWholePart);
VAR_INT(floatPart);
VAR_INT(multiplier);
FORMAT(x);
FORMAT(y);
PRINT(x) << ' ' << y;
break;
}
PRINT('\n');
return 0;
}<|fim▁end|> | |
<|file_name|>DummyVerticle.java<|end_file_name|><|fim▁begin|>package io.vertx.ext.auth.test.jwt;
import io.vertx.core.AbstractVerticle;
import io.vertx.ext.auth.KeyStoreOptions;
import io.vertx.ext.auth.jwt.JWTAuth;
import io.vertx.ext.auth.jwt.JWTAuthOptions;
public class DummyVerticle extends AbstractVerticle {
private static final JWTAuthOptions config = new JWTAuthOptions()
.setKeyStore(new KeyStoreOptions()
.setPath("keystore.jceks")
.setPassword("secret"));
public void start() {
System.out.println(this);
JWTAuth.create(vertx, config);
}<|fim▁hole|><|fim▁end|> | } |
<|file_name|>website.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, models, _
from odoo.addons.http_routing.models.ir_http import url_for
class Website(models.Model):
_inherit = "website"
@api.model
def page_search_dependencies(self, page_id=False):
dep = super(Website, self).page_search_dependencies(page_id=page_id)
page = self.env['website.page'].browse(int(page_id))
path = page.url
dom = [
('content', 'ilike', path)
]
posts = self.env['blog.post'].search(dom)
if posts:
page_key = _('Blog Post')
if len(posts) > 1:<|fim▁hole|> dep[page_key] = []
for p in posts:
dep[page_key].append({
'text': _('Blog Post <b>%s</b> seems to have a link to this page !') % p.name,
'item': p.name,
'link': p.website_url,
})
return dep
@api.model
def page_search_key_dependencies(self, page_id=False):
dep = super(Website, self).page_search_key_dependencies(page_id=page_id)
page = self.env['website.page'].browse(int(page_id))
key = page.key
dom = [
('content', 'ilike', key)
]
posts = self.env['blog.post'].search(dom)
if posts:
page_key = _('Blog Post')
if len(posts) > 1:
page_key = _('Blog Posts')
dep[page_key] = []
for p in posts:
dep[page_key].append({
'text': _('Blog Post <b>%s</b> seems to be calling this file !') % p.name,
'item': p.name,
'link': p.website_url,
})
return dep
def get_suggested_controllers(self):
suggested_controllers = super(Website, self).get_suggested_controllers()
suggested_controllers.append((_('Blog'), url_for('/blog'), 'website_blog'))
return suggested_controllers<|fim▁end|> | page_key = _('Blog Posts') |
<|file_name|>views.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.core.urlresolvers import reverse
from django.views.generic import DetailView, ListView, RedirectView, UpdateView
from django.contrib.auth.mixins import LoginRequiredMixin
from .models import User
class UserDetailView(LoginRequiredMixin, DetailView):
model = User
# These next two lines tell the view to index lookups by username
slug_field = 'username'
slug_url_kwarg = 'username'
<|fim▁hole|>
def get_redirect_url(self):
return reverse('users:detail',
kwargs={'username': self.request.user.username})
class UserUpdateView(LoginRequiredMixin, UpdateView):
fields = ['name', 'phone']
# we already imported User in the view code above, remember?
model = User
# send the user back to their own page after a successful update
def get_success_url(self):
return reverse('users:detail',
kwargs={'username': self.request.user.username})
def get_object(self):
# Only get the User record for the user making the request
return User.objects.get(username=self.request.user.username)
class UserListView(LoginRequiredMixin, ListView):
model = User
# These next two lines tell the view to index lookups by username
slug_field = 'username'
slug_url_kwarg = 'username'<|fim▁end|> |
class UserRedirectView(LoginRequiredMixin, RedirectView):
permanent = False |
<|file_name|>zeroconf.py<|end_file_name|><|fim▁begin|>"""
This module exposes Home Assistant via Zeroconf.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/zeroconf/
"""
import logging
import socket
import voluptuous as vol
from homeassistant import util
from homeassistant.const import (EVENT_HOMEASSISTANT_STOP, __version__)
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = ['api']
DOMAIN = 'zeroconf'
REQUIREMENTS = ['zeroconf==0.19.1']
ZEROCONF_TYPE = '_home-assistant._tcp.local.'
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({}),
}, extra=vol.ALLOW_EXTRA)
def setup(hass, config):
"""Set up Zeroconf and make Home Assistant discoverable."""
from zeroconf import Zeroconf, ServiceInfo
zeroconf = Zeroconf()
zeroconf_name = '{}.{}'.format(hass.config.location_name, ZEROCONF_TYPE)
requires_api_password = hass.config.api.api_password is not None
params = {
'version': __version__,
'base_url': hass.config.api.base_url,
'requires_api_password': requires_api_password,
}
host_ip = util.get_local_ip()
try:
host_ip_pton = socket.inet_pton(socket.AF_INET, host_ip)
except socket.error:
host_ip_pton = socket.inet_pton(socket.AF_INET6, host_ip)
info = ServiceInfo(ZEROCONF_TYPE, zeroconf_name, host_ip_pton,
hass.http.server_port, 0, 0, params)
zeroconf.register_service(info)
def stop_zeroconf(event):
"""Stop Zeroconf."""
zeroconf.unregister_service(info)
zeroconf.close()
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, stop_zeroconf)<|fim▁hole|><|fim▁end|> |
return True |
<|file_name|>TestAllCodes.java<|end_file_name|><|fim▁begin|>/*
* This file is part of lanterna (http://code.google.com/p/lanterna/).
*
* lanterna is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
* Copyright (C) 2010-2014 Martin
*/
package com.googlecode.lanterna.test;
/**
* Prints the whole symbol table, this is debug stuff for UTF-8 to non-UTF-8
* symbol character conversions...
* @author Martin
*/
public class TestAllCodes {
public static void main(String[] args) throws Exception
{
System.out.write(new byte[] { (byte)0x1B, 0x28, 0x30 });
for(int i = 0; i < 200; i++) {
System.out.write((i + " = " + ((char)i) + "\n").getBytes());
}
System.out.write(new byte[] { (byte)0x1B, 0x28, 0x42 });
//System.out.write(new byte[] { (byte)0x1B, (byte)0x21, (byte)0x40, 15 });<|fim▁hole|>}<|fim▁end|> | } |
<|file_name|>models.py<|end_file_name|><|fim▁begin|>from sqlalchemy import Column, Integer, String, DateTime, Boolean, ForeignKey
from project.database import Base
from project.database import db_session
from sqlalchemy.orm import relationship
from sqlalchemy_utils import EmailType
from flask.ext.babel import lazy_gettext as _
# FIXME: move to extensions
from flask.ext.sqlalchemy import SQLAlchemy
db = SQLAlchemy()
class Roles(Base):
__tablename__ = 'role'
id = Column(Integer, primary_key=True)
role_name = Column(String(50), unique=True, nullable=False, info={'label': _('role name')})
url = Column(String(250), nullable=False, info={'label': _('website')})
parent = Column(Integer, nullable=False, info={'label': _('parent')})
description = Column(String(50), info={'label': _('description')})
def __init__(self, **kwargs):
super(Roles, self).__init__(**kwargs)
def __repr__(self):
return self.role_name
class User_Role(Base):
__tablename__ = 'user_role'
id = Column(Integer, primary_key=True)
role = Column(Integer,ForeignKey('role.id'), info={'label': _('role')})
user = Column(Integer,ForeignKey('profile.id'), info={'label': _('user')})
def __init__(self, **kwargs):
super(User_Role, self).__init__(**kwargs)
<|fim▁hole|> return self.role
class Profile(Base):
__tablename__ = 'profile'
id = Column(Integer, primary_key=True)
username = Column(String(50), unique=True, nullable=False, info={'label': _('username')})
password = Column(String(100), nullable=False, info={'label': _('password')})
group_list = Column(String(50), info={'label': _('group list')})
phone = Column(String(50), info={'label': _('phone')})
email = Column(EmailType, info={'label': _('email')})
registered_at = Column(String(50), info={'label': _('registered at')})
firstName = Column(String(50), info={'label': _('first name')})
lastName = Column(String(50), info={'label': _('last name')})
sex = Column(Boolean, info={'label': _('sex')})
birthday = Column(String(50), info={'label': _('birthday')})
avatar = Column(String(50), info={'label': _('avatar')})
country = Column(String(50), info={'label': _('country')})
city = Column(String(50), info={'label': _('city')})
creator = Column(Integer, info={'label': _('creator')})
def __init__(self, **kwargs):
super(Profile, self).__init__(**kwargs)
def __repr__(self):
return self.username
def can(self, roles):
for item in roles:
request_role = Roles.query.filter(Roles.role_name == item).first()
try:
user_have_role = User_Role.query.filter(
User_Role.role == request_role.id ,
User_Role.user == self.id
).first()
if user_have_role:
return True
except:
pass
return False
def has_group(self, group):
"""
"""
if group == self.group_list:
return True
return False
def __unicode__(self):
return self.username
class Log(Base):
'''
table log
'''
__tablename__ = 'log_actions'
id = Column(Integer, primary_key=True)
log_date = Column(String(50), nullable=False, info={'label': _('date')})
log_desc = Column(String(300), info={'label': _('description')})
log_user = Column(Integer, ForeignKey('profile.id'),info={'label': _('user name')})
user = relationship(
Profile,
# backref='profile'
)
def __init__(self, **kwargs):
super(Log, self).__init__(**kwargs)
def __repr__(self):
return self.log_desc<|fim▁end|> | def __repr__(self): |
<|file_name|>WhoisKeyBlock.java<|end_file_name|><|fim▁begin|>package be.dnsbelgium.rdap.sample.parser;
import be.dnsbelgium.rdap.sample.dto.Contact;<|fim▁hole|>import be.dnsbelgium.rdap.sample.dto.DnsSecKey;
import be.dnsbelgium.rdap.sample.dto.SimpleContact;
public enum WhoisKeyBlock {
MAIN(),
DOMAIN(),
REGISTRAR(),
REGISTRANT(),
ADMIN(Contact.class),
TECH(Contact.class),
DNSSEC(),
DNSSECKEY(DnsSecKey.class, true),
HOST(),
SIMPLE_ADMIN(SimpleContact.class),
SIMPLE_TECH(SimpleContact.class);
private Class repeatClass = null;
private boolean hasIndexSuffix = false;
WhoisKeyBlock() {
}
WhoisKeyBlock(Class repeatClass) {
this.repeatClass = repeatClass;
}
WhoisKeyBlock(Class repeatClass, boolean hasIndexSuffix) {
this.repeatClass = repeatClass;
this.hasIndexSuffix = hasIndexSuffix;
}
public Class getRepeatClass() {
return repeatClass;
}
public boolean hasIndexSuffix() {
return hasIndexSuffix;
}
public boolean isRepeatable() {
return repeatClass != null;
}
}<|fim▁end|> | |
<|file_name|>WriteSegmentRequestMarshaller.java<|end_file_name|><|fim▁begin|>/*
* Copyright 2012-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.pinpoint.model.transform;
import javax.annotation.Generated;
import com.amazonaws.SdkClientException;
import com.amazonaws.services.pinpoint.model.*;
import com.amazonaws.protocol.*;
import com.amazonaws.annotation.SdkInternalApi;
/**
* WriteSegmentRequestMarshaller
*/
@Generated("com.amazonaws:aws-java-sdk-code-generator")
@SdkInternalApi
public class WriteSegmentRequestMarshaller {
private static final MarshallingInfo<StructuredPojo> DIMENSIONS_BINDING = MarshallingInfo.builder(MarshallingType.STRUCTURED)<|fim▁hole|> .marshallLocationName("Name").build();
private static final WriteSegmentRequestMarshaller instance = new WriteSegmentRequestMarshaller();
public static WriteSegmentRequestMarshaller getInstance() {
return instance;
}
/**
* Marshall the given parameter object.
*/
public void marshall(WriteSegmentRequest writeSegmentRequest, ProtocolMarshaller protocolMarshaller) {
if (writeSegmentRequest == null) {
throw new SdkClientException("Invalid argument passed to marshall(...)");
}
try {
protocolMarshaller.marshall(writeSegmentRequest.getDimensions(), DIMENSIONS_BINDING);
protocolMarshaller.marshall(writeSegmentRequest.getName(), NAME_BINDING);
} catch (Exception e) {
throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e);
}
}
}<|fim▁end|> | .marshallLocation(MarshallLocation.PAYLOAD).marshallLocationName("Dimensions").build();
private static final MarshallingInfo<String> NAME_BINDING = MarshallingInfo.builder(MarshallingType.STRING).marshallLocation(MarshallLocation.PAYLOAD) |
<|file_name|>0011_setting_logo.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-02-09 20:48
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('elearning', '0010_auto_20160209_2042'),
]
operations = [
migrations.AddField(
model_name='setting',
name='logo',
field=models.CharField(max_length=256, null=True),<|fim▁hole|><|fim▁end|> | ),
] |
<|file_name|>EditCounterNoBots.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
# Abbas (Ar:User:Elph), 2012
# -*- coding: utf-8 -*-
import catlib ,pagegenerators<|fim▁hole|>import MySQLdb as mysqldb
import config
pagetop=u"'''تاریخ آخری تجدید:''''': ~~~~~ '''بذریعہ:''' [[user:{{subst:Currentuser}}|{{subst:Currentuser}}]]''\n\n"
pagetop+=u'\nفہرست 100 بلند پایہ صارفین بلحاظ شراکت بدون روبہ جات۔\n'
pagetop+=u'\nمزید دیکھیں: [[ویکیپیڈیا:رودادہائے ڈیٹابیس/فہرست ویکیپیڈیا صارفین بلحاظ شراکت|شماریات مع روبہ جات]]۔\n'
pagetop+=u'\n{| class="wikitable sortable"\n'
pagetop+=u'!شمار!!صارف!!شراکت\n|-\n'
pagedown=u'\n|}\n[[زمرہ:ویکیپیڈیا شماریات]]'
adress=u"ویکیپیڈیا:رودادہائے ڈیٹابیس/فہرست ویکیپیڈیا صارفین بلحاظ شراکت/بدون روبہ جات"
#adress=u"user:محمد شعیب/test44"
message=u"روبالہ:تجدید شماریات"
count=0
line_items=' '
rowfa=' '
rowic=' '
rowi=' '
rowit=' '
rowfi=' '
rowfia=' '
#---------------------------------------------- sql part--------------
site = wikipedia.getSite("ur")
query = "SELECT user_name, user_editcount FROM user WHERE user_name NOT IN (SELECT user_name FROM user_groups INNER JOIN user ON user_id = ug_user WHERE ug_group = 'bot') ORDER BY user_editcount DESC LIMIT 100;"
#query = "SELECT user_name, user_editcount FROM user WHERE user_name NOT 'روبہ خوش آمدید' AND user_name NOT IN (SELECT user_name FROM user_groups INNER JOIN user ON user_id = ug_user WHERE ug_group = 'bot') ORDER BY user_editcount DESC LIMIT 100;"
wikipedia.output(u'Executing query:\n%s' % query)
conn = mysqldb.connect("urwiki.labsdb", db = site.dbName(),
user = config.db_username,
passwd = config.db_password)
cursor = conn.cursor()
query = query.encode(site.encoding())
cursor.execute(query)
results = cursor.fetchall()
#---------------------------------------------- end of sql part---------
count=0
for row in results:
count+=1
rowi=unicode(str(row[0]),'UTF-8')
rowi2=unicode(str(row[1]),'UTF-8')
rowfa+=u'\n|'+str(count)+u'||[[user:'+rowi+u'|'+rowi+u']]||'
rowfa+=u'[[special:Contributions/{{subst:formatnum:'+rowi+u'}}|{{subst:formatnum:'+rowi2+u'}}]]\n|-\n'
text=rowfa.strip()
text=pagetop+text+pagedown
page = wikipedia.Page(site,adress)
page.put(text,message)<|fim▁end|> | import wikipedia,urllib,gzip,codecs,re |
<|file_name|>lib.rs<|end_file_name|><|fim▁begin|>// DO NOT EDIT !
// This file was generated automatically from 'src/mako/api/lib.rs.mako'
// DO NOT EDIT !
//! This documentation was generated from *logging* crate version *0.1.8+20150326*, where *20150326* is the exact revision of the *logging:v1beta3* schema built by the [mako](http://www.makotemplates.org/) code generator *v0.1.8*.
//! The original source code is [on github](https://github.com/Byron/google-apis-rs/tree/master/gen/logging1_beta3).
//! # Features
//!
//! Handle the following *Resources* with ease from the central [hub](struct.Logging.html) ...
//!
//! * projects
//! * [*log services indexes list*](struct.ProjectLogServiceIndexeListCall.html), [*log services list*](struct.ProjectLogServiceListCall.html), [*log services sinks create*](struct.ProjectLogServiceSinkCreateCall.html), [*log services sinks delete*](struct.ProjectLogServiceSinkDeleteCall.html), [*log services sinks get*](struct.ProjectLogServiceSinkGetCall.html), [*log services sinks list*](struct.ProjectLogServiceSinkListCall.html), [*log services sinks update*](struct.ProjectLogServiceSinkUpdateCall.html), [*logs delete*](struct.ProjectLogDeleteCall.html), [*logs entries write*](struct.ProjectLogEntryWriteCall.html), [*logs list*](struct.ProjectLogListCall.html), [*logs sinks create*](struct.ProjectLogSinkCreateCall.html), [*logs sinks delete*](struct.ProjectLogSinkDeleteCall.html), [*logs sinks get*](struct.ProjectLogSinkGetCall.html), [*logs sinks list*](struct.ProjectLogSinkListCall.html) and [*logs sinks update*](struct.ProjectLogSinkUpdateCall.html)
//!
//!
//!
//!
//! Not what you are looking for ? Find all other Google APIs in their Rust [documentation index](../index.html).
//!
//! # Structure of this Library
//!
//! The API is structured into the following primary items:
//!
//! * **[Hub](struct.Logging.html)**
//! * a central object to maintain state and allow accessing all *Activities*
//! * creates [*Method Builders*](trait.MethodsBuilder.html) which in turn
//! allow access to individual [*Call Builders*](trait.CallBuilder.html)
//! * **[Resources](trait.Resource.html)**
//! * primary types that you can apply *Activities* to
//! * a collection of properties and *Parts*
//! * **[Parts](trait.Part.html)**
//! * a collection of properties
//! * never directly used in *Activities*
//! * **[Activities](trait.CallBuilder.html)**
//! * operations to apply to *Resources*
//!
//! All *structures* are marked with applicable traits to further categorize them and ease browsing.
//!
//! Generally speaking, you can invoke *Activities* like this:
//!
//! ```Rust,ignore
//! let r = hub.resource().activity(...).doit()
//! ```
//!
//! Or specifically ...
//!
//! ```ignore
//! let r = hub.projects().logs_sinks_get(...).doit()
//! let r = hub.projects().log_services_sinks_update(...).doit()
//! let r = hub.projects().logs_sinks_update(...).doit()
//! let r = hub.projects().log_services_sinks_create(...).doit()
//! let r = hub.projects().logs_sinks_create(...).doit()
//! let r = hub.projects().log_services_sinks_get(...).doit()
//! ```
//!
//! The `resource()` and `activity(...)` calls create [builders][builder-pattern]. The second one dealing with `Activities`
//! supports various methods to configure the impending operation (not shown here). It is made such that all required arguments have to be
//! specified right away (i.e. `(...)`), whereas all optional ones can be [build up][builder-pattern] as desired.
//! The `doit()` method performs the actual communication with the server and returns the respective result.
//!
//! # Usage
//!
//! ## Setting up your Project
//!
//! To use this library, you would put the following lines into your `Cargo.toml` file:
//!
//! ```toml
//! [dependencies]
//! google-logging1_beta3 = "*"
//! ```
//!
//! ## A complete example
//!
//! ```test_harness,no_run
//! extern crate hyper;
//! extern crate yup_oauth2 as oauth2;
//! extern crate google_logging1_beta3 as logging1_beta3;
//! use logging1_beta3::LogSink;
//! use logging1_beta3::{Result, Error};
//! # #[test] fn egal() {
//! use std::default::Default;
//! use oauth2::{Authenticator, DefaultAuthenticatorDelegate, ApplicationSecret, MemoryStorage};
//! use logging1_beta3::Logging;
//!
//! // Get an ApplicationSecret instance by some means. It contains the `client_id` and
//! // `client_secret`, among other things.
//! let secret: ApplicationSecret = Default::default();
//! // Instantiate the authenticator. It will choose a suitable authentication flow for you,
//! // unless you replace `None` with the desired Flow.
//! // Provide your own `AuthenticatorDelegate` to adjust the way it operates and get feedback about <|fim▁hole|>//! // what's going on. You probably want to bring in your own `TokenStorage` to persist tokens and
//! // retrieve them from storage.
//! let auth = Authenticator::new(&secret, DefaultAuthenticatorDelegate,
//! hyper::Client::new(),
//! <MemoryStorage as Default>::default(), None);
//! let mut hub = Logging::new(hyper::Client::new(), auth);
//! // As the method needs a request, you would usually fill it with the desired information
//! // into the respective structure. Some of the parts shown here might not be applicable !
//! // Values shown here are possibly random and not representative !
//! let mut req = LogSink::default();
//!
//! // You can configure optional parameters by calling the respective setters at will, and
//! // execute the final call using `doit()`.
//! // Values shown here are possibly random and not representative !
//! let result = hub.projects().log_services_sinks_update(req, "projectsId", "logServicesId", "sinksId")
//! .doit();
//!
//! match result {
//! Err(e) => match e {
//! // The Error enum provides details about what exactly happened.
//! // You can also just use its `Debug`, `Display` or `Error` traits
//! Error::HttpError(_)
//! |Error::MissingAPIKey
//! |Error::MissingToken(_)
//! |Error::Cancelled
//! |Error::UploadSizeLimitExceeded(_, _)
//! |Error::Failure(_)
//! |Error::BadRequest(_)
//! |Error::FieldClash(_)
//! |Error::JsonDecodeError(_, _) => println!("{}", e),
//! },
//! Ok(res) => println!("Success: {:?}", res),
//! }
//! # }
//! ```
//! ## Handling Errors
//!
//! All errors produced by the system are provided either as [Result](enum.Result.html) enumeration as return value of
//! the doit() methods, or handed as possibly intermediate results to either the
//! [Hub Delegate](trait.Delegate.html), or the [Authenticator Delegate](../yup-oauth2/trait.AuthenticatorDelegate.html).
//!
//! When delegates handle errors or intermediate values, they may have a chance to instruct the system to retry. This
//! makes the system potentially resilient to all kinds of errors.
//!
//! ## Uploads and Downloads
//! If a method supports downloads, the response body, which is part of the [Result](enum.Result.html), should be
//! read by you to obtain the media.
//! If such a method also supports a [Response Result](trait.ResponseResult.html), it will return that by default.
//! You can see it as meta-data for the actual media. To trigger a media download, you will have to set up the builder by making
//! this call: `.param("alt", "media")`.
//!
//! Methods supporting uploads can do so using up to 2 different protocols:
//! *simple* and *resumable*. The distinctiveness of each is represented by customized
//! `doit(...)` methods, which are then named `upload(...)` and `upload_resumable(...)` respectively.
//!
//! ## Customization and Callbacks
//!
//! You may alter the way an `doit()` method is called by providing a [delegate](trait.Delegate.html) to the
//! [Method Builder](trait.CallBuilder.html) before making the final `doit()` call.
//! Respective methods will be called to provide progress information, as well as determine whether the system should
//! retry on failure.
//!
//! The [delegate trait](trait.Delegate.html) is default-implemented, allowing you to customize it with minimal effort.
//!
//! ## Optional Parts in Server-Requests
//!
//! All structures provided by this library are made to be [enocodable](trait.RequestValue.html) and
//! [decodable](trait.ResponseResult.html) via *json*. Optionals are used to indicate that partial requests are responses
//! are valid.
//! Most optionals are are considered [Parts](trait.Part.html) which are identifiable by name, which will be sent to
//! the server to indicate either the set parts of the request or the desired parts in the response.
//!
//! ## Builder Arguments
//!
//! Using [method builders](trait.CallBuilder.html), you are able to prepare an action call by repeatedly calling it's methods.
//! These will always take a single argument, for which the following statements are true.
//!
//! * [PODs][wiki-pod] are handed by copy
//! * strings are passed as `&str`
//! * [request values](trait.RequestValue.html) are moved
//!
//! Arguments will always be copied or cloned into the builder, to make them independent of their original life times.
//!
//! [wiki-pod]: http://en.wikipedia.org/wiki/Plain_old_data_structure
//! [builder-pattern]: http://en.wikipedia.org/wiki/Builder_pattern
//! [google-go-api]: https://github.com/google/google-api-go-client
//!
//!
// Unused attributes happen thanks to defined, but unused structures
// We don't warn about this, as depending on the API, some data structures or facilities are never used.
// Instead of pre-determining this, we just disable the lint. It's manually tuned to not have any
// unused imports in fully featured APIs. Same with unused_mut ... .
#![allow(unused_imports, unused_mut, dead_code)]
include!(concat!(env!("OUT_DIR"), "/lib.rs"));<|fim▁end|> | |
<|file_name|>match-static-const-lc.rs<|end_file_name|><|fim▁begin|>// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license<|fim▁hole|>// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Issue #7526: lowercase static constants in patterns look like bindings
#![allow(dead_code)]
#![deny(non_uppercase_statics)]
#[allow(non_uppercase_statics)]
pub const a : int = 97;
fn f() {
let r = match (0,0) {
(0, a) => 0,
//~^ ERROR static constant in pattern `a` should have an uppercase name such as `A`
(x, y) => 1 + x + y,
};
assert!(r == 1);
}
mod m {
#[allow(non_uppercase_statics)]
pub const aha : int = 7;
}
fn g() {
use self::m::aha;
let r = match (0,0) {
(0, aha) => 0,
//~^ ERROR static constant in pattern `aha` should have an uppercase name such as `AHA`
(x, y) => 1 + x + y,
};
assert!(r == 1);
}
mod n {
pub const OKAY : int = 8;
}
fn h() {
use self::n::OKAY as not_okay;
let r = match (0,0) {
(0, not_okay) => 0,
//~^ ERROR static constant in pattern `not_okay` should have an uppercase name such as `NOT_OKAY`
(x, y) => 1 + x + y,
};
assert!(r == 1);
}
fn main () {
f();
g();
h();
}<|fim▁end|> | // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your |
<|file_name|>pref.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
#
# File: src/webframe/management/commands/pref.py
# Date: 2020-04-22 21:35
# Author: Kenson Man <[email protected]>
# Desc: Import / Create / Update / Delete preference
#
from django.conf import settings
from django.contrib.auth import get_user_model
from django.core.management.base import BaseCommand, CommandError
from django.db import transaction
from django.db.models import Q
from pathlib import Path
from webframe.functions import TRUE_VALUES, LogMessage as lm, getTime
from webframe.models import Preference, AbstractPreference
from uuid import UUID
import logging, os, glob, sys, re
logger=logging.getLogger('webframe.commands.prefs')
class Command(BaseCommand):
help = '''Mainpulate the preference in database. Including insert/update/delete/view/import/gensecret/gendoc; Importing support csv|xlsx file.'''
def __getIndent__(self, indent=0, ch=' '):
return ch*indent
def create_parser(self, cmdName, subcommand, **kwargs):
parser=super().create_parser(cmdName, subcommand, **kwargs)
parser.epilog='''Example:\r\n
\tpref import path_to_prefs #Import a folder or a csv/xlsx file\r\n
\tpref set ABC --value="def" #Set the preference "ABC" to value "def"\r\n
\tpref gensecret #Generate the encryption secret; PLEASE backup in secure way.\r\n
\tpref gendoc prefsDoc.html #Generate the documentation and save as as output.html
'''
return parser
def add_arguments(self, parser):
#Default Value
pattern='Pref({pref.id}:{pref.name}): {pref.value}'
action='show'
max=256
wildcard='*'
tmpl='webframe/prefsDoc.html'
#Adding arguments
parser.add_argument('action', type=str, help='The action to be taken. One of import/export/show/set/delete/gensecret/gendoc; Default is {0}'.format(action), default=action)
parser.add_argument('name', type=str, nargs='?', help='[import/export/show/set/delete/gendoc]; The name of the preference or path of importing/exporting file (csv|xlsx);')
parser.add_argument('--file', dest='file', type=str, help='[import/export/gendoc]; The file path for import/export/output.')
parser.add_argument('--value', dest='value', type=str, help='[set/delete]; The value of the preference;', default=None)
parser.add_argument('--owner', dest='owner', type=str, help='[set/delete]; The owner of the preference; Optional;', default=None)
parser.add_argument('--noowner', dest='noowner', action='store_true', help='[show/set/delete]; The target preference has no owner; Optional; Default False')
parser.add_argument('--parent', dest='parent', type=str, help='[show/set/delete]; The parent\'s name of the preference. Optional;', default=None)
parser.add_argument('--noparent', dest='noparent', action='store_true', help='[show/set/delete]; The target preference has no parent; Optional; Default False')
parser.add_argument('--pattern', dest='pattern', type=str, help='[show]; The output pattern. {0}'.format(pattern), default=pattern)
parser.add_argument('--max', dest='max', type=int, help='[show]; The maximum number of preference to show. Default is {0}'.format(max), default=max)
parser.add_argument('--wildcard', dest='wildcard', type=str, help='[show]; Specify the wildcard; Default is {0}'.format(wildcard), default=wildcard)
#Importing
parser.add_argument('--sep', dest='separator', type=str, default=',', help='[import]; The separator when CSV importing; Default \",\"')
parser.add_argument('--encoding', dest='encoding', type=str, default='utf-8', help='[import]; The encoding when CSV importing; Default \"utf-8\"')
parser.add_argument('--quotechar', dest='quotechar', type=str, default='\"', help='[import]; The quote-char when CSV importing; Default double quote: \"')
parser.add_argument('--filepath', dest='filepath', action='store_true', help='[import]; Import the file-path in preferences; Default False')
parser.add_argument('--force', '-f ', dest='force', action='store_true', help='[import]; Force the import', default=False)
#Generate Doc
parser.add_argument('--tmpl', dest='tmpl', type=str, help="[gendoc]; The template name when generating document; Default: {0}".format(tmpl), default=tmpl)
def __get_owner__(self, owner=None):
if not owner: return None
logger.debug('Getting owner by: "%s"', owner)
owner=owner if owner else self.kwargs['owner']
return get_user_model().objects.get(username=owner) if owner else None
def __get_parent__(self, parent=None):
parent=parent if parent else self.kwargs['parent']
if parent:
try:
#Get parent by uuid
return Preference.objects.get(id=parent)
except:
try:
#Get parent by name
return Preference.objects.get(name=parent)
except:
pass
return None
def __get_pref__(self, **kwargs):
owner=kwargs['owner'] if 'owner' in kwargs else self.__get_owner__()
parent=kwargs['parent'] if 'parent' in kwargs else self.__get_parent__()
name=kwargs['name'] if 'name' in kwargs else self.kwargs['name']
lang=kwargs['lang'] if 'lang' in kwargs else None
if self.kwargs['filepath']: name=os.path.basename(name)
if self.kwargs['parent'] and parent==None:
raise Preference.DoesNotExist('Parent Preference not found: {0}'.format(self.kwargs['parent']))
rst=Preference.objects.all()
if name and name!='*':
rst=rst.filter(name=name)
if owner:
rst=rst.filter(owner=owner)
elif self.kwargs['noowner']:
rst=rst.filter(owner__isnull=True)
if parent:
rst=rst.filter(parent=parent)
elif self.kwargs['noparent']:
rst=rst.filter(parent__isnull=True)
if self.kwargs['filepath']:
rst=rst.filter(tipe=AbstractPreference.TYPE_FILEPATH)
rst=rst.order_by('owner', 'parent', 'sequence', 'name')
return rst
def __get_name__( self, name ):
'''
Get the name and sequence according to the name.
@param name The string including the sequence and name. For example, '01.Target' will return a tuple (1, 'Target')
@return A tuple including the sequence and the name
'''
p=re.search(r'^\d+\.', name)
if p:
s=p.group(0)
return name[len(s):].strip(), int(name[0:len(s)-1])
return (name, sys.maxsize if hasattr(sys, 'maxsize') else sys.maxint) #Default append
<|fim▁hole|> print(pattern.format(pref=pref))
pattern=' {0}'.format(pattern)
for ch in pref.childs:
self.output(ch, pattern)
def handle(self, *args, **kwargs):
verbosity=int(kwargs['verbosity'])
if verbosity==3:
logger.setLevel(logging.DEBUG)
elif verbosity==2:
logger.setLevel(logging.INFO)
elif verbosity==1:
logger.setLevel(logging.WARNING)
else:
logger.setLevel(logging.ERROR)
self.kwargs=kwargs
action=kwargs['action']
if action=='import':
self.imp()
elif action=='create': #for backward compatibility
self.set()
elif action=='update': #for backward compatibility
self.set()
elif action=='set':
self.set()
elif action=='delete':
self.delete()
elif action=='show':
self.show()
elif action=='gensecret':
self.gensecret()
elif action=='gendoc':
self.gendoc()
elif action=='export':
self.expCsv()
else:
logger.warning('Unknown action: {0}'.format(action))
logger.warn('DONE!')
def show(self):
logger.info('Showing the preference ...')
q=Preference.objects.all()
if self.kwargs['name']:
logger.info(' with the name filter: {0}'.format(self.kwargs['name']))
if self.kwargs['wildcard'] in self.kwargs['name']:
q=q.filter(name__icontains=self.kwargs['name'].replace(self.kwargs['wildcard'], ''))
else:
q=q.filter(name=self.kwargs['name'])
if self.kwargs['value']:
logger.info(' with the value filter: {0}'.format(self.kwargs['value']))
q=q.filter(value__icontains=self.kwargs['value'])
if self.kwargs['owner']:
logger.info(' which belongs to user: {0}'.format(self.kwargs['owner']))
q=q.filter(owner__username=self.kwargs['owner'])
if self.kwargs['parent']:
logger.info(' which belongs to preference: {0}'.format(self.kwargs['parent']))
q=q.filter(parent__name__iexact=self.kwargs['parent'])
else:
q=q.filter(parent__isnull=True)
for p in q:
self.output(p)
logger.warning('There have {0} preference(s) has been shown'.format(len(q)))
def set(self):
with transaction.atomic():
try:
pref=self.__get_pref__()
if pref.count()<1: raise Preference.DoesNotExist
cnt=pref.update(value=self.kwargs['value'])
logger.info('{0} of Preference(s) has been updated'.format(cnt))
except Preference.DoesNotExist:
p=Preference(name=self.kwargs['name'], value=self.kwargs['value'], owner=owner, parent=parent)
p.save()
logger.info('The preference<{0}> has been created with value: {1}'.format(p.name, p.value))
def delete(self):
pref=self.__get_pref__()
cnt=pref.count()
pref.delete()
logger.warning('{0} of Preference(s) has been deleted'.format(cnt))
def expRow( self, wr, pref, indent=0 ):
'''
Import the specified preference to csv.
'''
cnt=0
tab=self.__getIndent__(indent)
logger.debug(lm('{0}Exporting preference: {1}::{2}...', tab, pref.id, pref.name))
wr.writerow([
pref.name # [0]
, pref.realValue # [1]
, pref.parent.id if pref.parent else '' # [2]
, pref.owner.username if pref.owner else '' # [3]
, pref.helptext # [4]
, Preference.TYPES[pref.tipe][1] # [5]
, pref.encrypted # [6]
, pref.regex # [7]
])
cnt+=1
for p in pref.childs:
cnt+=self.expRow(wr, p, indent+3)
return cnt
def expCsv( self ):
'''
Import the specified list of preferences to csv.
'''
import csv
f=self.kwargs['file']
with open(f, 'w', encoding=self.kwargs['encoding']) as fp:
wr=csv.writer(fp, delimiter=self.kwargs['separator'], quotechar=self.kwargs['quotechar'], quoting=csv.QUOTE_MINIMAL, skipinitialspace=True)
cnt=0
for p in self.__get_pref__():
cnt+=self.expRow(wr, p, 0)
logger.info(lm('Exported {0} records', cnt))
def improw( self, cols, idx=0 ):
try:
name=cols[0]
val=cols[1]
parent=self.__get_parent__(cols[2])
owner=self.__get_owner__(cols[3])
helptext=cols[4]
tipe=cols[5]
encrypted=cols[6] in TRUE_VALUES
regex=cols[7]
lang=cols[8] if len(cols)>8 else None
logger.debug(' Importing row: {0}: {1} [{2}]'.format(idx, name, 'encrypted' if encrypted else 'clear-text'))
self.kwargs['name']=name
pref=self.__get_pref__(name=name, owner=owner, parent=parent, lang=lang)
if pref.count()<1: raise Preference.DoesNotExist
for p in pref:
p.encrypted=encrypted
p.helptext=helptext
p.tipe=tipe
p.regex=regex
#The value must be the last steps to set due to validation. Otherwise, once importing/assign a new value into this field, the last validation rule may be applied incorrectly
p.value=val
p.save()
except Preference.DoesNotExist:
Preference(name=name, _value=val, owner=owner, parent=parent, encrypted=encrypted, helptext=helptext, regex=regex, lang=lang).save()
except:
logger.debug(cols)
logger.exception('Error when handling the column')
raise
def impXlsx( self, f ):
'''
Import xlsx file.
'''
from openpyxl import load_workbook
wb=load_workbook(filename=f)
ws=wb.active
logger.info(' Importing worksheet: {0}!{1}'.format(f, ws.title))
cnt=0
with transaction.atomic():
for r in range(1, ws.max_row+1):
cols=list()
name=ws.cell(row=r, column=1).value
if isinstance(name, str): name=name.strip()
if not name: continue #Skip the row when it has no pref.name
if r==1 and (name.upper()=='ID' or name.upper()=='NAME' or name.upper()=='ID/Name'): continue #Skip the first row if header row
cols.append(name) #Name/ID
cols.append(ws.cell(row=r, column=2).value) #Value
cols.append(ws.cell(row=r, column=3).value) #Parent
cols.append(ws.cell(row=r, column=4).value) #Owner
cols.append(ws.cell(row=r, column=5).value) #Reserved
cols.append(ws.cell(row=r, column=6).value) #Tipe
cols.append(ws.cell(row=r, column=7).value) #encrypted
self.improw( cols, r )
cnt+=1
logger.info(' Imported {0} row(s)'.format(cnt))
def impCsv( self, f ):
'''
Import the csv file.
'''
import csv
with transaction.atomic():
logger.info(' Importing csv: {0}'.format(f))
cnt=0
with open(f, 'r', encoding=self.kwargs['encoding']) as fp:
if self.kwargs['quotechar']:
rows=csv.reader(fp, delimiter=self.kwargs['separator'], quotechar=self.kwargs['quotechar'], quoting=csv.QUOTE_MINIMAL, skipinitialspace=True)
else:
rows=csv.reader(fp, delimiter=self.kwargs['separator'], quoting=csv.QUOTE_NONE, skipinitialspace=True)
for row in rows:
if len(row)<1: continue #Skip the empty row
name=row[0].strip()
if not name: continue #Skip the row when it has no name
if cnt==0 and (name.upper()=='ID' or name.upper()=='NAME' or name.upper()=='ID/NAME'): continue #Skip the first row if header row
self.improw( row, cnt )
cnt+=1
logger.info(' Imported {0} row(s)'.format(cnt))
def impdir( self, d ):
if os.path.isdir(d):
logger.info('Importing directory: {0}'.format(d))
else:
logger.warning('This is not the directory: {0}'.format(d))
return
cnt=0
with transaction.atomic():
p=Preference.objects.pref('IMPORTED_PREFERENCES', returnValue=False)
p.helptext='<p>Sysetm use only! <strong>DO NOT MODIFY</strong> youself unless you understand the risk.</p>'
p.save()
for f in os.listdir(d):
if not (f.upper().endswith('.XLSX') or f.upper().endswith('.CSV')): continue #only support *.xlsx and *.csv
f=os.path.join(d, f)
try:
Preference.objects.get(name=f, parent=p)
if self.kwargs['force']: raise Preference.DoesNotExist
except Preference.DoesNotExist:
self.impfile( f )
cnt+=1
Preference(name=f, parent=p).save()
logger.debug('Imported {0} file(s)'.format(cnt))
def impfile( self, f ):
if not (os.path.isfile(f) and os.access(f, os.R_OK)):
logger.warning('The file is not readable: {0}'.format(f))
return
fn=f.lower()
if fn.endswith('.xlsx'):
self.impXlsx(f)
elif fn.endswith('.csv'):
self.impCsv(f)
else:
logger.info('Unsupported file: {0}'.format(f))
def imppath( self, p, parent=None):
name, seq=self.__get_name__(os.path.basename(p))
if os.path.isdir(p):
try:
pref=self.__get_pref__(name=name)
if pref.count()<1: raise Preference.DoesNotExist
pref=pref[0]
except Preference.DoesNotExist:
pref=Preference(name=name, parent=parent)
pref.tipe=AbstractPreference.TYPE_FILEPATH
pref.sequence=seq
pref.save()
for f in os.listdir(p):
path=os.path.join(p, f)
self.imppath(path, pref)
#Handling the ordering after import all the childs
ord=1
for c in pref.childs:
c.sequence=ord
c.save()
ord+=1
else:
try:
pref=self.__get_pref__(name=name)
if pref.count()<1: raise Preference.DoesNotExist
pref=pref[0]
except Preference.DoesNotExist:
pref=Preference(name=name, parent=parent)
pref.pathValue=p if os.path.isabs(p) else os.path.abspath(p)
pref.tipe=AbstractPreference.TYPE_FILEPATH
pref.sequence=seq
pref.save()
def imp(self):
disableOrder=getattr(settings, 'DISABLE_REORDER', False)
setattr(settings, 'DISABLE_REORDER', True) #Disable the re-ordering features during importing
try:
f=self.kwargs['file']
if self.kwargs['filepath']:
self.imppath(f)
elif os.path.isdir(f):
self.impdir(f)
elif os.path.isfile(f):
self.impfile(f)
finally:
setattr(settings, 'DISABLE_REORDER', disableOrder) #Resume the re-ordering features after importing
def gensecret(self):
from webframe.models import AbstractPreference
key=AbstractPreference.__getSecret__()
logger.warning(lm('Your secret is: {0}', key))
def gendoc(self):
from django.shortcuts import render
from django.template import loader, Template, Context
from webframe.providers import template_injection, fmt_injection
tmpl=getattr(self.kwargs, 'tmpl', 'webframe/prefDoc.html')
logger.warning(lm('Generating the documents according template: {0}', tmpl))
tmpl=loader.get_template(tmpl)
params=dict()
params.update(template_injection(None))
params.update(fmt_injection(None))
#params['target']=Preference.objects.filter(parent__isnull=True)
params['target']=self.__get_pref__()
params['TYPES']=Preference.TYPES
params['now']=getTime('now')
txt=tmpl.render(params)
output=self.kwargs.get('file')
if not output: output='prefsDoc.html'
logger.warning(lm('Generated! Outputing into: {0}', output))
with open(output, 'w') as f:
f.write(txt)<|fim▁end|> | def output( self, pref, pattern=None ):
pattern=pattern if pattern else self.kwargs['pattern'] |
<|file_name|>translatable.tsx<|end_file_name|><|fim▁begin|>import * as React from 'react';
import { LangConsumer } from '../LangContext';
import { Schema } from 'jsoninput';
import { infoStyle } from './commonView';
import { css } from 'glamor';
import IconButton from '../Components/IconButton';
interface Translation {
translation: string;
status: string;
}
interface TranslatableProps {
value: {
[code: string]: Translation;
};
onChange: (value: { [code: string]: Translation }) => void;
view: Schema['view'] & {
label?: string;
readOnly: boolean;
};
}
interface EndProps {
value?: string | number;
onChange: (value: string) => void;
view: {};
}
/**
* HOC: Transform a hashmap (lang:value) into value based on current language
* @param Comp
*/
export default function translatable<P extends EndProps>(
Comp: React.ComponentType<P>,
): React.SFC<TranslatableProps & P> {
function Translated(props: TranslatableProps) {
if (!props.value) {
return null;
}
function catchUp(code: string) {
const value = props.value[code] ? props.value[code].translation : '';
const newValue = {
...props.value,
[code]: {
translation: value,<|fim▁hole|> },
};
props.onChange(newValue);
}
function outdate(code: string) {
const value = props.value[code] ? props.value[code].translation : '';
const newValue = {
...props.value,
[code]: {
translation: value,
status: 'outdated:manual',
},
};
props.onChange(newValue);
}
function markAsMajor(code: string, allLanguages: { code: string; label: string }[]) {
const newValue = {};
for (const lang of allLanguages) {
newValue[lang.code] = {
translation: props.value[lang.code] ? props.value[lang.code].translation : '',
status: 'outdated:' + code,
};
}
newValue[code].status = '';
props.onChange(newValue);
}
return (
<LangConsumer>
{({ lang, availableLang }) => {
// Updade label
const curCode = (
availableLang.find(l => l.code.toUpperCase() === lang.toUpperCase()) || {
code: '',
}
).code;
let translation;
let status;
if (props.value.hasOwnProperty(lang.toUpperCase())) {
translation = props.value[lang.toUpperCase()].translation;
status = props.value[lang.toUpperCase()].status;
} else if (props.value.hasOwnProperty(lang.toLowerCase())) {
translation = props.value[lang.toLowerCase()].translation;
status = props.value[lang.toLowerCase()].status;
}
const view = {
...props.view,
label: (
<span>
{(props.view || { label: '' }).label}{' '}
<span className={String(infoStyle)}>
[{curCode.toLowerCase()}] {status ? '(' + status + ')' : ''}
</span>
</span>
),
};
if (view.readOnly) {
// variable is protected by the model
const theLanguage = availableLang.find(al => al.code === lang);
if (theLanguage != null && theLanguage.visibility === 'PRIVATE') {
// but this language is not defined by the model
if (
Object.entries(props.value).find(([key, value]) => {
const theLang = availableLang.find(al => al.code === key);
return (
theLang && theLang.visibility !== 'PRIVATE' && value.translation
);
})
) {
view.readOnly = false;
}
}
}
const editor = (
// @ts-ignore https://github.com/Microsoft/TypeScript/issues/28748
<Comp
{...props}
value={translation}
view={view}
onChange={value => {
const theStatus = props.value[lang] ? props.value[lang].status : '';
const v = {
...props.value,
[lang]: {
translation: value,
status: theStatus,
},
};
props.onChange(v);
}}
/>
);
// return editor;
const readOnly = view.readOnly;
const orangeStyle = css({
color: '#F57C00',
});
const greenStyle = css({
color: '#388E3C',
});
const majorButton = !readOnly ? (
<IconButton
icon={[
`fa fa-toggle-on fa-stack-1x ${orangeStyle}`,
`fa fa-expand fa-stack-1x ${css({
transform: 'translate(0, 8px) rotate(45deg)',
})}`,
]}
className={`wegas-advanced-feature ${css({
lineHeight: '1.2em',
})}`}
tooltip="Major update"
onClick={() => {
markAsMajor(curCode, availableLang);
}}
/>
) : (
''
);
const outdateButton = !readOnly ? (
<IconButton
className="wegas-advanced-feature"
icon={[`fa fa-toggle-on ${greenStyle}`]}
tooltip="Mark as outdated "
onClick={() => {
outdate(curCode);
}}
/>
) : (
''
);
if (!props.value[curCode] || !props.value[curCode].status) {
return (
<span>
{editor}
{majorButton}
{outdateButton}
</span>
);
} else {
return (
<span>
{editor}
{majorButton}
{!readOnly ? (
<IconButton
icon={[`fa fa-toggle-on fa-flip-horizontal ${orangeStyle}`]}
className="wegas-advanced-feature"
tooltip="Mark as up-to-date"
onClick={() => {
catchUp(curCode);
}}
/>
) : (
''
)}
</span>
);
}
}}
</LangConsumer>
);
}
return Translated;
}<|fim▁end|> | status: '', |
<|file_name|>target_nrf51.py<|end_file_name|><|fim▁begin|>"""
mbed CMSIS-DAP debugger
Copyright (c) 2006-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.<|fim▁hole|> Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from cortex_m import CortexM, DHCSR, DBGKEY, C_DEBUGEN, C_MASKINTS, C_STEP, DEMCR, VC_CORERESET, NVIC_AIRCR, NVIC_AIRCR_VECTKEY, NVIC_AIRCR_SYSRESETREQ
from .memory_map import (FlashRegion, RamRegion, MemoryMap)
from pyOCD.target.target import TARGET_RUNNING, TARGET_HALTED
import logging
# NRF51 specific registers
RESET = 0x40000544
RESET_ENABLE = (1 << 0)
class NRF51(CortexM):
memoryMap = MemoryMap(
FlashRegion( start=0, length=0x40000, blocksize=0x400, isBootMemory=True),
RamRegion( start=0x20000000, length=0x4000)
)
def __init__(self, transport):
super(NRF51, self).__init__(transport, self.memoryMap)
def resetn(self):
"""
reset a core. After a call to this function, the core
is running
"""
#Regular reset will kick NRF out of DBG mode
logging.debug("target_nrf51.reset: enable reset pin")
self.writeMemory(RESET, RESET_ENABLE)
#reset
logging.debug("target_nrf51.reset: trigger nRST pin")
CortexM.reset(self)<|fim▁end|> | You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
|
<|file_name|>util.ts<|end_file_name|><|fim▁begin|>import readdirRecursive from "fs-readdir-recursive";
import * as babel from "@babel/core";
import path from "path";
import fs from "fs";
import * as watcher from "./watcher";
export function chmod(src: string, dest: string): void {
try {
fs.chmodSync(dest, fs.statSync(src).mode);
} catch (err) {
console.warn(`Cannot change permissions of ${dest}`);
}
}
type ReaddirFilter = (filename: string) => boolean;
export function readdir(
dirname: string,
includeDotfiles: boolean,
filter?: ReaddirFilter,
): Array<string> {
return readdirRecursive(dirname, (filename, _index, currentDirectory) => {
const stat = fs.statSync(path.join(currentDirectory, filename));
if (stat.isDirectory()) return true;
return (
(includeDotfiles || filename[0] !== ".") && (!filter || filter(filename))
);
});
}
export function readdirForCompilable(
dirname: string,
includeDotfiles: boolean,
altExts?: Array<string>,
): Array<string> {
return readdir(dirname, includeDotfiles, function (filename) {
return isCompilableExtension(filename, altExts);
});
}
/**
* Test if a filename ends with a compilable extension.
*/
export function isCompilableExtension(
filename: string,
altExts?: readonly string[],
): boolean {
const exts = altExts || babel.DEFAULT_EXTENSIONS;
const ext = path.extname(filename);
return exts.includes(ext);
}
export function addSourceMappingUrl(code: string, loc: string): string {
return code + "\n//# sourceMappingURL=" + path.basename(loc);
}
const CALLER = {
name: "@babel/cli",
};
export function transformRepl(
filename: string,
code: string,
opts: any,
): Promise<any> {
opts = {
...opts,
caller: CALLER,
filename,
};
return new Promise((resolve, reject) => {
babel.transform(code, opts, (err, result) => {
if (err) reject(err);
else resolve(result);
});
});
}
export async function compile(
filename: string,
opts: any | Function,
): Promise<any> {
opts = {
...opts,
caller: CALLER,
};
// TODO (Babel 8): Use `babel.transformFileAsync`
const result: any = await new Promise((resolve, reject) => {
babel.transformFile(filename, opts, (err, result) => {
if (err) reject(err);
else resolve(result);
});
});
if (result) {
if (!process.env.BABEL_8_BREAKING) {
if (!result.externalDependencies) return result;
}
watcher.updateExternalDependencies(filename, result.externalDependencies);
}
return result;
}
export function deleteDir(path: string): void {
if (fs.existsSync(path)) {
fs.readdirSync(path).forEach(function (file) {
const curPath = path + "/" + file;
if (fs.lstatSync(curPath).isDirectory()) {
// recurse
deleteDir(curPath);
} else {
// delete file
fs.unlinkSync(curPath);
}
});
fs.rmdirSync(path);
}
}
process.on("uncaughtException", function (err) {
console.error(err);
process.exitCode = 1;
});<|fim▁hole|> const newBasename = path.basename(filename, path.extname(filename)) + ext;
return path.join(path.dirname(filename), newBasename);
}
export function debounce(fn: () => void, time: number) {
let timer;
function debounced() {
clearTimeout(timer);
timer = setTimeout(fn, time);
}
debounced.flush = () => {
clearTimeout(timer);
fn();
};
return debounced;
}<|fim▁end|> |
export function withExtension(filename: string, ext: string = ".js") { |
<|file_name|>userdata.rs<|end_file_name|><|fim▁begin|>extern crate hlua;
#[test]
fn readwrite() {
#[derive(Clone)]
struct Foo;
impl<'lua, L> hlua::Push<L> for Foo
where L: hlua::AsMutLua<'lua>
{
type Err = hlua::Void;
fn push_to_lua(self, lua: L) -> Result<hlua::PushGuard<L>, (hlua::Void, L)> {
Ok(hlua::push_userdata(self, lua, |_| {}))
}
}
impl<'lua, L> hlua::PushOne<L> for Foo where L: hlua::AsMutLua<'lua> {}
impl<'lua, L> hlua::LuaRead<L> for Foo
where L: hlua::AsMutLua<'lua>
{
fn lua_read_at_position(lua: L, index: i32) -> Result<Foo, L> {
let val: Result<hlua::UserdataOnStack<Foo, _>, _> =
hlua::LuaRead::lua_read_at_position(lua, index);
val.map(|d| d.clone())
}
}
let mut lua = hlua::Lua::new();
lua.set("a", Foo);
let _: Foo = lua.get("a").unwrap();
}
#[test]
fn destructor_called() {
use std::sync::{Arc, Mutex};
let called = Arc::new(Mutex::new(false));
struct Foo {
called: Arc<Mutex<bool>>,
}
impl Drop for Foo {
fn drop(&mut self) {
let mut called = self.called.lock().unwrap();
(*called) = true;
}
}
impl<'lua, L> hlua::Push<L> for Foo
where L: hlua::AsMutLua<'lua>
{
type Err = hlua::Void;
fn push_to_lua(self, lua: L) -> Result<hlua::PushGuard<L>, (hlua::Void, L)> {
Ok(hlua::push_userdata(self, lua, |_| {}))
}
}
impl<'lua, L> hlua::PushOne<L> for Foo where L: hlua::AsMutLua<'lua> {}
{
let mut lua = hlua::Lua::new();
lua.set("a", Foo { called: called.clone() });
}
let locked = called.lock().unwrap();
assert!(*locked);
}
#[test]
fn type_check() {
#[derive(Clone)]
struct Foo;
impl<'lua, L> hlua::Push<L> for Foo
where L: hlua::AsMutLua<'lua>
{
type Err = hlua::Void;
fn push_to_lua(self, lua: L) -> Result<hlua::PushGuard<L>, (hlua::Void, L)> {
Ok(hlua::push_userdata(self, lua, |_| {}))
}
}
impl<'lua, L> hlua::PushOne<L> for Foo where L: hlua::AsMutLua<'lua> {}
impl<'lua, L> hlua::LuaRead<L> for Foo
where L: hlua::AsMutLua<'lua><|fim▁hole|> hlua::LuaRead::lua_read_at_position(lua, index);
val.map(|d| d.clone())
}
}
#[derive(Clone)]
struct Bar;
impl<'lua, L> hlua::Push<L> for Bar
where L: hlua::AsMutLua<'lua>
{
type Err = hlua::Void;
fn push_to_lua(self, lua: L) -> Result<hlua::PushGuard<L>, (hlua::Void, L)> {
Ok(hlua::push_userdata(self, lua, |_| {}))
}
}
impl<'lua, L> hlua::PushOne<L> for Bar where L: hlua::AsMutLua<'lua> {}
impl<'lua, L> hlua::LuaRead<L> for Bar
where L: hlua::AsMutLua<'lua>
{
fn lua_read_at_position(lua: L, index: i32) -> Result<Bar, L> {
let val: Result<hlua::UserdataOnStack<Bar, _>, _> =
hlua::LuaRead::lua_read_at_position(lua, index);
val.map(|d| d.clone())
}
}
let mut lua = hlua::Lua::new();
lua.set("a", Foo);
let x: Option<Bar> = lua.get("a");
assert!(x.is_none())
}
#[test]
fn metatables() {
#[derive(Clone)]
struct Foo;
impl<'lua, L> hlua::Push<L> for Foo
where L: hlua::AsMutLua<'lua>
{
type Err = hlua::Void;
fn push_to_lua(self, lua: L) -> Result<hlua::PushGuard<L>, (hlua::Void, L)> {
Ok(hlua::push_userdata(self, lua, |mut table| {
table.set("__index".to_string(),
vec![("test".to_string(), hlua::function0(|| 5))]);
}))
}
}
impl<'lua, L> hlua::PushOne<L> for Foo where L: hlua::AsMutLua<'lua> {}
let mut lua = hlua::Lua::new();
lua.set("a", Foo);
let x: i32 = lua.execute("return a.test()").unwrap();
assert_eq!(x, 5);
}
#[test]
fn multiple_userdata() {
#[derive(Clone)]
struct Integer(u32);
impl<'lua, L> hlua::Push<L> for Integer
where L: hlua::AsMutLua<'lua>
{
type Err = hlua::Void;
fn push_to_lua(self, lua: L) -> Result<hlua::PushGuard<L>, (hlua::Void, L)> {
Ok(hlua::push_userdata(self, lua, |_| { }))
}
}
impl<'lua, L> hlua::PushOne<L> for Integer where L: hlua::AsMutLua<'lua> {}
impl<'lua, L> hlua::LuaRead<L> for Integer
where L: hlua::AsMutLua<'lua>
{
fn lua_read_at_position(lua: L, index: i32) -> Result<Integer, L> {
let val: Result<hlua::UserdataOnStack<Integer, _>, _> =
hlua::LuaRead::lua_read_at_position(lua, index);
val.map(|d| d.clone())
}
}
#[derive(Debug, Clone, PartialEq, Eq)]
struct BigInteger(u32, u32, u32, u32);
impl<'lua, L> hlua::Push<L> for BigInteger
where L: hlua::AsMutLua<'lua>
{
type Err = hlua::Void;
fn push_to_lua(self, lua: L) -> Result<hlua::PushGuard<L>, (hlua::Void, L)> {
Ok(hlua::push_userdata(self, lua, |_| { }))
}
}
impl<'lua, L> hlua::PushOne<L> for BigInteger where L: hlua::AsMutLua<'lua> {}
impl<'lua, L> hlua::LuaRead<L> for BigInteger
where L: hlua::AsMutLua<'lua>
{
fn lua_read_at_position(lua: L, index: i32) -> Result<BigInteger, L> {
let val: Result<hlua::UserdataOnStack<BigInteger, _>, _> =
hlua::LuaRead::lua_read_at_position(lua, index);
val.map(|d| d.clone())
}
}
let axpy_float = |a: f64, x: Integer, y: Integer| a * x.0 as f64 + y.0 as f64;
let axpy_float_2 = |a: f64, x: Integer, y: f64| a * x.0 as f64 + y;
let broadcast_mul = |k: Integer, v: BigInteger|
BigInteger(k.0 * v.0, k.0 * v.1, k.0 * v.2, k.0 * v.3);
let collapse = |a: f32, k: Integer, v: BigInteger|
(k.0 * v.0) as f32 * a + (k.0 * v.1) as f32 * a + (k.0 * v.2) as f32 * a + (k.0 * v.3) as f32 * a;
let mut lua = hlua::Lua::new();
let big_integer = BigInteger(531,246,1,953);
lua.set("a", Integer(19));
lua.set("b", Integer(114));
lua.set("c", Integer(96));
lua.set("d", Integer(313));
lua.set("v", big_integer.clone());
lua.set("add", hlua::function2(|x: Integer, y: Integer| Integer(x.0 + y.0)));
lua.set("axpy", hlua::function3(|a: Integer, x: Integer, y: Integer|
Integer(a.0 * x.0 + y.0)));
lua.set("axpy_float", hlua::function3(&axpy_float));
lua.set("axpy_float_2", hlua::function3(&axpy_float_2));
lua.set("broadcast_mul", hlua::function2(&broadcast_mul));
lua.set("collapse", hlua::function3(&collapse));
assert_eq!(lua.execute::<Integer>("return add(a, b)").unwrap().0, 19 + 114);
assert_eq!(lua.execute::<Integer>("return add(b, c)").unwrap().0, 114 + 96);
assert_eq!(lua.execute::<Integer>("return add(c, d)").unwrap().0, 96 + 313);
assert_eq!(lua.execute::<Integer>("return axpy(a, b, c)").unwrap().0, 19 * 114 + 96);
assert_eq!(lua.execute::<Integer>("return axpy(b, c, d)").unwrap().0, 114 * 96 + 313);
assert_eq!(lua.execute::<f64>("return axpy_float(2.5, c, d)").unwrap(),
axpy_float(2.5, Integer(96), Integer(313)));
assert_eq!(lua.execute::<BigInteger>("return broadcast_mul(a, v)").unwrap(),
broadcast_mul(Integer(19), big_integer.clone()));
assert_eq!(lua.execute::<BigInteger>("return broadcast_mul(b, v)").unwrap(),
broadcast_mul(Integer(114), big_integer.clone()));
assert_eq!(lua.execute::<f32>("return collapse(19.25, c, v)").unwrap(),
collapse(19.25, Integer(96), big_integer.clone()));
}<|fim▁end|> | {
fn lua_read_at_position(lua: L, index: i32) -> Result<Foo, L> {
let val: Result<hlua::UserdataOnStack<Foo, _>, _> = |
<|file_name|>plot_rbf_parameters.py<|end_file_name|><|fim▁begin|>'''
==================
RBF SVM parameters
==================
This example illustrates the effect of the parameters ``gamma`` and ``C`` of
the Radial Basis Function (RBF) kernel SVM.
Intuitively, the ``gamma`` parameter defines how far the influence of a single
training example reaches, with low values meaning 'far' and high values meaning
'close'. The ``gamma`` parameters can be seen as the inverse of the radius of
influence of samples selected by the model as support vectors.
The ``C`` parameter trades off misclassification of training examples against
simplicity of the decision surface. A low ``C`` makes the decision surface
smooth, while a high ``C`` aims at classifying all training examples correctly
by giving the model freedom to select more samples as support vectors.
The first plot is a visualization of the decision function for a variety of
parameter values on a simplified classification problem involving only 2 input
features and 2 possible target classes (binary classification). Note that this
kind of plot is not possible to do for problems with more features or target
classes.
The second plot is a heatmap of the classifier's cross-validation accuracy as a
function of ``C`` and ``gamma``. For this example we explore a relatively large
grid for illustration purposes. In practice, a logarithmic grid from
:math:`10^{-3}` to :math:`10^3` is usually sufficient. If the best parameters
lie on the boundaries of the grid, it can be extended in that direction in a
subsequent search.
Note that the heat map plot has a special colorbar with a midpoint value close
to the score values of the best performing models so as to make it easy to tell
them appart in the blink of an eye.
The behavior of the model is very sensitive to the ``gamma`` parameter. If
``gamma`` is too large, the radius of the area of influence of the support
vectors only includes the support vector itself and no amount of
regularization with ``C`` will be able to prevent overfitting.
When ``gamma`` is very small, the model is too constrained and cannot capture
the complexity or "shape" of the data. The region of influence of any selected
support vector would include the whole training set. The resulting model will
behave similarly to a linear model with a set of hyperplanes that separate the
centers of high density of any pair of two classes.
For intermediate values, we can see on the second plot that good models can
be found on a diagonal of ``C`` and ``gamma``. Smooth models (lower ``gamma``
values) can be made more complex by selecting a larger number of support
vectors (larger ``C`` values) hence the diagonal of good performing models.
Finally one can also observe that for some intermediate values of ``gamma`` we
get equally performing models when ``C`` becomes very large: it is not
necessary to regularize by limiting the number of support vectors. The radius of
the RBF kernel alone acts as a good structural regularizer. In practice though
it might still be interesting to limit the number of support vectors with a
lower value of ``C`` so as to favor models that use less memory and that are
faster to predict.
We should also note that small differences in scores results from the random
splits of the cross-validation procedure. Those spurious variations can be
smoothed out by increasing the number of CV iterations ``n_iter`` at the
expense of compute time. Increasing the value number of ``C_range`` and
``gamma_range`` steps will increase the resolution of the hyper-parameter heat
map.
'''
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.colors import Normalize
from sklearn.datasets import load_iris
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC
# Utility function to move the midpoint of a colormap to be around
# the values of interest.
class MidpointNormalize(Normalize):
def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):
self.midpoint = midpoint
Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
return np.ma.masked_array(np.interp(value, x, y))
##############################################################################
# Load and prepare data set
#
# dataset for grid search
iris = load_iris()
X = iris.data
y = iris.target
# Dataset for decision function visualization: we only keep the first two
# features in X and sub-sample the dataset to keep only 2 classes and
# make it a binary classification problem.
X_2d = X[:, :2]
X_2d = X_2d[y > 0]
y_2d = y[y > 0]
y_2d -= 1
# It is usually a good idea to scale the data for SVM training.
# We are cheating a bit in this example in scaling all of the data,
# instead of fitting the transformation on the training set and
# just applying it on the test set.
scaler = StandardScaler()
X = scaler.fit_transform(X)
X_2d = scaler.fit_transform(X_2d)
##############################################################################
# Train classifiers
#
# For an initial search, a logarithmic grid with basis
# 10 is often helpful. Using a basis of 2, a finer
# tuning can be achieved but at a much higher cost.
C_range = np.logspace(-2, 10, 13)
gamma_range = np.logspace(-9, 3, 13)
param_grid = dict(gamma=gamma_range, C=C_range)
cv = StratifiedShuffleSplit(n_iter=5, test_size=0.2, random_state=42)
grid = GridSearchCV(SVC(), param_grid=param_grid, cv=cv)
grid.fit(X, y)
print("The best parameters are %s with a score of %0.2f"
% (grid.best_params_, grid.best_score_))
# Now we need to fit a classifier for all parameters in the 2d version
# (we use a smaller set of parameters here because it takes a while to train)
C_2d_range = [1e-2, 1, 1e2]
gamma_2d_range = [1e-1, 1, 1e1]
classifiers = []
for C in C_2d_range:
for gamma in gamma_2d_range:
clf = SVC(C=C, gamma=gamma)
clf.fit(X_2d, y_2d)
classifiers.append((C, gamma, clf))
##############################################################################
# visualization
#
# draw visualization of parameter effects
plt.figure(figsize=(8, 6))
xx, yy = np.meshgrid(np.linspace(-3, 3, 200), np.linspace(-3, 3, 200))
for (k, (C, gamma, clf)) in enumerate(classifiers):
# evaluate decision function in a grid
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])<|fim▁hole|>
# visualize decision function for these parameters
plt.subplot(len(C_2d_range), len(gamma_2d_range), k + 1)
plt.title("gamma=10^%d, C=10^%d" % (np.log10(gamma), np.log10(C)),
size='medium')
# visualize parameter's effect on decision function
plt.pcolormesh(xx, yy, -Z, cmap=plt.cm.RdBu)
plt.scatter(X_2d[:, 0], X_2d[:, 1], c=y_2d, cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.axis('tight')
# plot the scores of the grid
# grid_scores_ contains parameter settings and scores
# We extract just the scores
scores = [x[1] for x in grid.grid_scores_]
scores = np.array(scores).reshape(len(C_range), len(gamma_range))
# Draw heatmap of the validation accuracy as a function of gamma and C
#
# The score are encoded as colors with the hot colormap which varies from dark
# red to bright yellow. As the most interesting scores are all located in the
# 0.92 to 0.97 range we use a custom normalizer to set the mid-point to 0.92 so
# as to make it easier to visualize the small variations of score values in the
# interesting range while not brutally collapsing all the low score values to
# the same color.
plt.figure(figsize=(8, 6))
plt.subplots_adjust(left=.2, right=0.95, bottom=0.15, top=0.95)
plt.imshow(scores, interpolation='nearest', cmap=plt.cm.hot,
norm=MidpointNormalize(vmin=0.2, midpoint=0.92))
plt.xlabel('gamma')
plt.ylabel('C')
plt.colorbar()
plt.xticks(np.arange(len(gamma_range)), gamma_range, rotation=45)
plt.yticks(np.arange(len(C_range)), C_range)
plt.title('Validation accuracy')
plt.show()<|fim▁end|> | Z = Z.reshape(xx.shape) |
<|file_name|>hr.py<|end_file_name|><|fim▁begin|>from __future__ import unicode_literals
from frappe import _
def get_data():
return [
{
"label": _("Employee and Attendance"),
"items": [
{
"type": "doctype",
"name": "Employee",
"description": _("Employee records."),
},
{
"type": "doctype",
"name": "Employee Attendance Tool",
"label": _("Employee Attendance Tool"),
"description":_("Mark Attendance for multiple employees"),
"hide_count": True
},
{
"type": "doctype",
"name": "Attendance",
"description": _("Attendance record."),
},
{
"type": "doctype",
"name": "Upload Attendance",
"description":_("Upload attendance from a .csv file"),
"hide_count": True
},
]
},
{
"label": _("Recruitment"),
"items": [
{
"type": "doctype",
"name": "Job Applicant",
"description": _("Applicant for a Job."),
},
{
"type": "doctype",
"name": "Job Opening",
"description": _("Opening for a Job."),
},
{
"type": "doctype",
"name": "Offer Letter",
"description": _("Offer candidate a Job."),
},
]
},
{
"label": _("Leaves and Holiday"),
"items": [
{
"type": "doctype",
"name": "Leave Application",
"description": _("Applications for leave."),
},
{
"type": "doctype",
"name":"Leave Type",
"description": _("Type of leaves like casual, sick etc."),
},
{
"type": "doctype",
"name": "Holiday List",
"description": _("Holiday master.")
},
{
"type": "doctype",
"name": "Leave Allocation",
"description": _("Allocate leaves for a period.")
},
{
"type": "doctype",
"name": "Leave Control Panel",
"label": _("Leave Allocation Tool"),
"description":_("Allocate leaves for the year."),
"hide_count": True
},
{
"type": "doctype",
"name": "Leave Block List",
"description": _("Block leave applications by department.")
},
]
},
{
"label": _("Payroll"),
"items": [
{
"type": "doctype",
"name": "Salary Slip",
"description": _("Monthly salary statement."),
},
{
"type": "doctype",
"name": "Process Payroll",
"label": _("Process Payroll"),
"description":_("Generate Salary Slips"),
"hide_count": True
},
{
"type": "doctype",
"name": "Salary Structure",
"description": _("Salary template master.")
},
{
"type": "doctype",
"name": "Salary Component",
"label": _("Salary Components"),
"description": _("Earnings, Deductions and other Salary components")
},
<|fim▁hole|> },
{
"label": _("Expense Claims"),
"items": [
{
"type": "doctype",
"name": "Expense Claim",
"description": _("Claims for company expense."),
},
{
"type": "doctype",
"name": "Expense Claim Type",
"description": _("Types of Expense Claim.")
},
]
},
{
"label": _("Appraisals"),
"items": [
{
"type": "doctype",
"name": "Appraisal",
"description": _("Performance appraisal."),
},
{
"type": "doctype",
"name": "Appraisal Template",
"description": _("Template for performance appraisals.")
},
]
},
{
"label": _("Training"),
"items": [
{
"type": "doctype",
"name": "Training Event"
},
{
"type": "doctype",
"name": "Training Result"
},
{
"type": "doctype",
"name": "Training Feedback"
},
]
},
{
"label": _("Setup"),
"icon": "fa fa-cog",
"items": [
{
"type": "doctype",
"name": "HR Settings",
"description": _("Settings for HR Module")
},
{
"type": "doctype",
"name": "Employment Type",
"description": _("Types of employment (permanent, contract, intern etc.).")
},
{
"type": "doctype",
"name": "Branch",
"description": _("Organization branch master.")
},
{
"type": "doctype",
"name": "Department",
"description": _("Organization unit (department) master.")
},
{
"type": "doctype",
"name": "Designation",
"description": _("Employee designation (e.g. CEO, Director etc.).")
},
{
"type": "doctype",
"name": "Daily Work Summary Settings"
},
]
},
{
"label": _("Reports"),
"icon": "fa fa-list",
"items": [
{
"type": "report",
"is_query_report": True,
"name": "Employee Leave Balance",
"doctype": "Leave Application"
},
{
"type": "report",
"is_query_report": True,
"name": "Employee Birthday",
"doctype": "Employee"
},
{
"type": "report",
"is_query_report": True,
"name": "Employees working on a holiday",
"doctype": "Employee"
},
{
"type": "report",
"name": "Employee Information",
"doctype": "Employee"
},
{
"type": "report",
"is_query_report": True,
"name": "Monthly Salary Register",
"doctype": "Salary Slip"
},
{
"type": "report",
"is_query_report": True,
"name": "Monthly Attendance Sheet",
"doctype": "Attendance"
},
]
},
]<|fim▁end|> | ] |
<|file_name|>analyse_check_q_boundary_condition.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Runs the conduction example, produces some output
#
nproc = 1 # Number of processors to use
from boututils import shell, launch, plotdata
from boutdata import collect
import numpy as np
from sys import argv
from math import sqrt, log10, log, pi
from matplotlib import pyplot
gamma = 3.
if len(argv)>1:
data_path = str(argv[1])
else:
data_path = "data"
electron_mass = 9.10938291e-31
ion_mass = 3.34358348e-27<|fim▁hole|>Te = collect("T_electron", path=data_path, info=True, yguards=True)
Ti = collect("T_ion", path=data_path, info=True, yguards=True)
n = collect("n_ion", path=data_path, info=True, yguards=True)
V = collect("Vpar_ion", path=data_path, info=True, yguards=True)
q = collect("heat_flux", path=data_path, info=True, yguards=True)
q_electron_left = []
q_electron_right = []
right_index = len(Te[0,2,:,0])-4
for i in range(len(Te[:,2,0,0])):
Te_left = (Te[i,2,2,0]+Te[i,2,1,0])/2.
Ti_left = (Ti[i,2,2,0]+Ti[i,2,1,0])/2.
n_left = (n[i,2,2,0]+n[i,2,1,0])/2.
Te_right = (Te[i,2,right_index,0]+Te[i,2,right_index+1,0])/2
Ti_right = (Ti[i,2,right_index,0]+Ti[i,2,right_index+1,0])/2
n_right = (n[i,2,right_index,0]+n[i,2,right_index+1,0])/2
sheath_potential = 0.5*Te_left*log(2*pi*electron_mass/ion_mass*(1+gamma*Ti_left/Te_left))
q_electron_left.append((2.0*Te_left-sheath_potential)*n_left*V[i,2,2,0]) # in W/m^2
sheath_potential = 0.5*Te_right*log(2*pi*electron_mass/ion_mass*(1+gamma*Ti_right/Te_right))
q_electron_right.append((2.0*Te_right-sheath_potential)*n_right*V[i,2,right_index+1,0]) # in W/m^2
pyplot.figure(1)
pyplot.plot(q_electron_left,'r',q[:,2,2,0],'b',q_electron_right,'r',q[:,2,right_index+1,0],'b')
pyplot.title("Electron heat flux at the boundaries (blue) and calculated boundary value (red)\n\n")
pyplot.xlabel(u"t/μs")
pyplot.ylabel("Q/eV.m$^{-2}$")
pyplot.figure(2)
pyplot.plot(q[:,2,2,0]-q_electron_left,'b',q[:,2,right_index+1,0]-q_electron_right,'r')
pyplot.title("Difference between heat flux and its calculated boundary value at the left (blue) and right (red) boundaries\n\n")
pyplot.xlabel(u"t/μs")
pyplot.ylabel("dQ/eV.m$^{-2}$")
pyplot.show()<|fim▁end|> |
# Collect the data |
<|file_name|>issue-57410.rs<|end_file_name|><|fim▁begin|>// check-pass
// Tests that the `unreachable_pub` lint doesn't fire for `pub self::imp::f`.
#![deny(unreachable_pub)]
<|fim▁hole|>mod m {
mod imp {
pub fn f() {}
}
pub use self::imp::f;
}
pub use self::m::f;
fn main() {}<|fim▁end|> | |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|># Copyright 2017 Starbot Discord Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.<|fim▁hole|>from api.database.db import DB
def db_open(db_in):
if DB.type == "SQLite":
SQLite.db_open(db_in)
def db_close(db_in):
if DB.type == "SQLite":
SQLite.close(db_in)
def db_create_table(db_in, tablename):
if DB.type == "SQLite":
SQLite.db_create_table(db_in, tablename)
def db_insert(db_in, table, dict_in):
if DB.type == "SQLite":
return SQLite.db_insert(db_in, table, dict_in)
def db_get_contents_of_table(db_in, table, rows):
if DB.type == "SQLite":
return SQLite.db_get_contents_of_table(db_in, table, rows)
def db_get_latest_id(db_in, table):
if DB.type == "SQLite":
return SQLite.db_get_latest_id(db_in, table)<|fim▁end|> | # See the License for the specific language governing permissions and
# limitations under the License.
from api.database.DAL import SQLite |
<|file_name|>riscv_instr_base.py<|end_file_name|><|fim▁begin|>"""Copyright 2020 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import utils
import random
import constraint
from bitstring import BitArray, BitStream
class riscv_instr_base:
max_instr_length = 11
# Missing parts: latency
def __init__(self, name=""):
self.name = name
self.instr_group = "Instruction Group"
self.instr_format = "Instruction Format"
self.instr_category = "Instruction Category"
self.instr_name = "Instruction Name"
self.instr_imm_t = "Instruction Immediate Type"
self.instr_src2 = "Instruction Source 2"
self.instr_src1 = "Instruction Source 1"
self.instr_rd = "Instruction Destination"
self.imm = "Instruction Immediate"
self.imm_length = "Instruction Immediate Length"
self.imm_str = ""
self.csr = "CSR"
self.comment = ""
self.has_label = 1
self.label = ""
self.idx = -1
self.atomic = 0 # As of now, we don't support atomic instructions.
self.is_compressed = 0 # As of now, compressed instructions are not supported
self.is_illegal_instr = 0
self.is_local_numeric_label = 0
self.is_pseudo_instr = "Is it a pseudo instruction or not"
self.branch_assigned = 0
self.process_load_store = 1
self.solution = "A random solution which meets given constraints"
self.problem = constraint.Problem(constraint.MinConflictsSolver())
# Convert an instruction to its assembly form.
def convert2asm(self):
asm = name = self.solution[self.instr_name]
format = self.solution[self.instr_format]
category = self.solution[self.instr_category]
src2 = self.solution[self.instr_src2]
src1 = self.solution[self.instr_src1]
destination = self.solution[self.instr_rd]
csr = self.solution[self.csr]
if category != "SYSTEM":
if format == "J_FORMAT" or format == "U_FORMAT":
asm += " {}, {}".format(destination, self.get_imm())
elif format == "I_FORMAT":
if name == "NOP":
asm = "nop"
elif name == "FENCE":
asm = "fence"
elif name == "FENCEI":
asm = "fence.i"
elif category == "LOAD":
asm += " {}, {}({})".format(destination, self.get_imm(), src1)
elif category == "CSR":
asm += " {}, {}, {}".format(destination, hex(csr), self.get_imm())
else:
asm += " {}, {}, {}".format(destination, src1, self.get_imm())
elif format == "S_FORMAT" or format == "B_FORMAT":
if category == "STORE":
asm += " {}, {}({})".format(src2, self.get_imm(), src1)
else:
asm += " {}, {}, {}".format(src1, src2, self.get_imm())
elif format == "R_FORMAT":
if category == "CSR":
asm += " {}, {}, {}".format(destination, hex(csr), src1)
else:
asm += " {}, {}, {}".format(destination, src1, src2)
else:
if name == "BREAK":
asm = ".option norvc;ebreak;.option rvc;"
if self.comment != "":
asm += " # {}".format(self.comment)
return asm.lower()
# Instruction to binary format
# TODO: to do
def convert2bin(self, sol):
name = sol[self.instr_name]
format = sol[self.instr_format]
imm = sol[self.imm]
rd = sol[self.instr_rd]
if format == "J_FORMAT":
binary = ""
def post_randomize(self):
imm_length = self.solution[self.imm_length]
imm_t = self.solution[self.instr_imm_t]
imm = self.solution[self.imm]
imm_bit = BitArray(int=imm, length=32)
imm_mask = BitArray(uint=4294967295, length=32)
imm_mask = imm_mask << imm_length
if imm_t == "UIMM" or imm_t == "NZUIMM":
imm_bit = imm_bit & ~imm_mask
imm = imm_bit.int
else:
if imm_bit[-imm_length]:
imm_bit = imm_bit | imm_mask
imm = imm_bit.int
else:
imm_bit = imm_bit & ~imm_mask
imm = imm_bit.int
if (imm_t == "NZIMM" or imm_t == "NZUIMM") and imm == 0:
imm = random.randrange(1, 2**(imm_length - 1) - 1)
if self.imm_str == "":
self.imm_str = int(imm)
def get_imm(self):
return self.imm_str
def problem_definition(self,
no_branch=0,
no_load_store=1,
enable_hint_instr=0,
no_name_c=0):
# Adding variables to the problem
self.problem.addVariable(self.instr_group, utils.riscv_instr_group_t)
self.problem.addVariable(self.instr_format, utils.riscv_instr_format_t)
self.problem.addVariable(self.instr_category, utils.riscv_instr_category_t)
self.problem.addVariable(self.instr_name, utils.riscv_instr_name_t)
self.problem.addVariable(self.instr_imm_t, utils.imm_t)
self.problem.addVariables([self.instr_src2, self.instr_src1, self.instr_rd],
utils.riscv_reg_t)
self.problem.addVariable(self.imm_length, [5, 6, 8, 11, 20])
# problem.addVariable(self.imm, range(0x00000000, 0xffffffff)) # doesn't
# work because: OverflowError: Python int too large to convert to C ssize_t
# Need to change the constraint to a soft constraint, as the default_c in
# the pseudo instruction class is in conflict with this one
if self.imm not in self.problem._variables:
self.problem.addVariable(self.imm, range(0x0000, 0xffff))
self.problem.addVariable(self.csr, range(0x000, 0xfff))
def default_c(is_pseudo_instr):
if not is_pseudo_instr:
return True
def name_c(name, group, format, category, imm_t):
condition = (
# Load instructions
(name == "LB" and group == "RV32I" and format == "I_FORMAT" and
category == "LOAD" and imm_t == "IMM") or
(name == "LH" and group == "RV32I" and format == "I_FORMAT" and
category == "LOAD" and imm_t == "IMM") or
(name == "LW" and group == "RV32I" and format == "I_FORMAT" and
category == "LOAD" and imm_t == "IMM") or
(name == "LBU" and group == "RV32I" and format == "I_FORMAT" and
category == "LOAD" and imm_t == "IMM") or
(name == "LHU" and group == "RV32I" and format == "I_FORMAT" and
category == "LOAD" and imm_t == "IMM")
# Store instructions
or (name == "SB" and group == "RV32I" and format == "S_FORMAT" and
category == "STORE" and imm_t == "IMM") or
(name == "SH" and group == "RV32I" and format == "S_FORMAT" and
category == "STORE" and imm_t == "IMM") or
(name == "SW" and group == "RV32I" and format == "S_FORMAT" and
category == "STORE" and imm_t == "IMM")
# Shift istructions
or (name == "SLL" and group == "RV32I" and format == "R_FORMAT" and
category == "SHIFT" and imm_t == "IMM") or
(name == "SLLI" and group == "RV32I" and format == "I_FORMAT" and
category == "SHIFT" and imm_t == "IMM") or
(name == "SRL" and group == "RV32I" and format == "R_FORMAT" and
category == "SHIFT" and imm_t == "IMM") or
(name == "SRLI" and group == "RV32I" and format == "I_FORMAT" and
category == "SHIFT" and imm_t == "IMM") or
(name == "SRA" and group == "RV32I" and format == "R_FORMAT" and
category == "SHIFT" and imm_t == "IMM") or
(name == "SRAI" and group == "RV32I" and format == "I_FORMAT" and
category == "SHIFT" and imm_t == "IMM")
# Arithmetic instructions
or (name == "ADD" and group == "RV32I" and format == "R_FORMAT" and
category == "ARITHMETIC" and imm_t == "IMM") or
(name == "ADDI" and group == "RV32I" and format == "I_FORMAT" and
category == "ARITHMETIC" and imm_t == "IMM") or
(name == "NOP" and group == "RV32I" and format == "I_FORMAT" and
category == "ARITHMETIC" and imm_t == "IMM") or
(name == "SUB" and group == "RV32I" and format == "R_FORMAT" and
category == "ARITHMETIC" and imm_t == "IMM") or
(name == "LUI" and group == "RV32I" and format == "U_FORMAT" and
category == "ARITHMETIC" and imm_t == "UIMM") or
(name == "AUIPC" and group == "RV32I" and format == "U_FORMAT" and
category == "ARITHMETIC" and imm_t == "UIMM")
# Logical instructions
or (name == "XOR" and group == "RV32I" and format == "R_FORMAT" and
category == "LOGICAL" and imm_t == "IMM") or
(name == "XORI" and group == "RV32I" and format == "I_FORMAT" and
category == "LOGICAL" and imm_t == "IMM") or
(name == "OR" and group == "RV32I" and format == "R_FORMAT" and
category == "LOGICAL" and imm_t == "IMM") or
(name == "ORI" and group == "RV32I" and format == "I_FORMAT" and
category == "LOGICAL" and imm_t == "IMM") or
(name == "AND" and group == "RV32I" and format == "R_FORMAT" and
category == "LOGICAL" and imm_t == "IMM") or
(name == "ANDI" and group == "RV32I" and format == "I_FORMAT" and
category == "LOGICAL" and imm_t == "IMM")
# Compare instructions
or (name == "SLT" and group == "RV32I" and format == "R_FORMAT" and
category == "COMPARE" and imm_t == "IMM") or
(name == "SLTI" and group == "RV32I" and format == "I_FORMAT" and
category == "COMPARE" and imm_t == "IMM") or
(name == "SLTU" and group == "RV32I" and format == "R_FORMAT" and
category == "COMPARE" and imm_t == "IMM") or<|fim▁hole|> # Branch instructions
or (name == "BEQ" and group == "RV32I" and format == "B_FORMAT" and
category == "BRANCH" and imm_t == "IMM") or
(name == "BNE" and group == "RV32I" and format == "B_FORMAT" and
category == "BRANCH" and imm_t == "IMM") or
(name == "BLT" and group == "RV32I" and format == "B_FORMAT" and
category == "BRANCH" and imm_t == "IMM") or
(name == "BGE" and group == "RV32I" and format == "B_FORMAT" and
category == "BRANCH" and imm_t == "IMM") or
(name == "BLTU" and group == "RV32I" and format == "B_FORMAT" and
category == "BRANCH" and imm_t == "IMM") or
(name == "BGEU" and group == "RV32I" and format == "B_FORMAT" and
category == "BRANCH" and imm_t == "IMM")
# Jump instructions
or (name == "JAL" and group == "RV32I" and format == "J_FORMAT" and
category == "JUMP" and imm_t == "IMM") or
(name == "JALR" and group == "RV32I" and format == "I_FORMAT" and
category == "JUMP" and imm_t == "IMM")
# Synch instructions
or (name == "FENCE" and group == "RV32I" and format == "I_FORMAT" and
category == "SYNCH" and imm_t == "IMM") or
(name == "FENCEI" and group == "RV32I" and format == "I_FORMAT" and
category == "SYNCH" and imm_t == "IMM")
# System instructions
or (name == "ECALL" and group == "RV32I" and format == "I_FORMAT" and
category == "SYSTEM" and imm_t == "IMM") or
(name == "EBREAK" and group == "RV32I" and format == "I_FORMAT" and
category == "SYSTEM" and imm_t == "IMM") or
(name == "URET" and group == "RV32I" and format == "I_FORMAT" and
category == "SYSTEM" and imm_t == "IMM") or
(name == "SRET" and group == "RV32I" and format == "I_FORMAT" and
category == "SYSTEM" and imm_t == "IMM") or
(name == "MRET" and group == "RV32I" and format == "I_FORMAT" and
category == "SYSTEM" and imm_t == "IMM") or
(name == "WFI" and group == "RV32I" and format == "I_FORMAT" and
category == "SYSTEM" and imm_t == "IMM")
# CSR instructions
or (name == "CSRRW" and group == "RV32I" and format == "R_FORMAT" and
category == "CSR" and imm_t == "UIMM") or
(name == "CSRRS" and group == "RV32I" and format == "R_FORMAT" and
category == "CSR" and imm_t == "UIMM") or
(name == "CSRRC" and group == "RV32I" and format == "R_FORMAT" and
category == "CSR" and imm_t == "UIMM") or
(name == "CSRRWI" and group == "RV32I" and format == "I_FORMAT" and
category == "CSR" and imm_t == "UIMM") or
(name == "CSRRSI" and group == "RV32I" and format == "I_FORMAT" and
category == "CSR" and imm_t == "UIMM") or
(name == "CSRRCI" and group == "RV32I" and format == "I_FORMAT" and
category == "CSR" and imm_t == "UIMM"))
if condition:
return True
def fence_c(name, source1, destination, imm):
if name == "FENCE" or name == "FENCEI":
if source1 == "ZERO" and destination == "ZERO" and imm == 0:
return True
else:
return True
def load_store_c(category, source1):
if category == "LOAD" or category == "STORE":
if source1 != "ZERO":
return True
else:
return True
def nop_c(name, source1, source2, destination):
if name == "NOP":
if source1 == "ZERO" and source2 == "ZERO" and destination == "ZERO":
return True
else:
return True
def system_instr_c(category, source1, destination):
if category == "SYSTEM" or category == "SYNCH":
if source1 == "ZERO" and destination == "ZERO":
return True
else:
return True
def imm_len_c(format, imm_t, imm_length):
if format == "U_FORMAT" or format == "J_FORMAT":
return imm_length == 20
elif format == "I_FORMAT" or format == "S_FORMAT" or format == "B_FORMAT":
if imm_t == "UIMM":
return imm_length == 5
else:
return imm_length == 11
else:
return True
def imm_val_c(imm_type, imm):
if imm_type == "NZIMM" or imm_type == "NZUIMM":
return imm != 0
else:
return True
def shift_imm_val_c(category, imm):
if category == "SHIFT":
return imm < utils.XLEN
else:
return True
def only_arithmetic_and_logical_c(category):
if category == "ARITHMETIC" or category == "LOGICAL" or \
category == "BRANCH" or category == "LOAD" or category == "STORE":
return True
def non_system(category):
if category != "SYSTEM":
return True
def non_csr(category):
if category != "CSR":
return True
def non_synch(category):
if category != "SYNCH":
return True
def no_branch_c(category):
if category != "BRANCH":
return True
def no_load_store_c(category):
if category != "LOAD" and category != "STORE":
return True
# Refer to pseudo class for explanation
if not no_name_c:
self.problem.addConstraint(name_c, [
self.instr_name, self.instr_group, self.instr_format,
self.instr_category, self.instr_imm_t
])
# TODO: add a temporarily constraint for generating only arithmetic random instructions
# self.problem.addConstraint(only_arithmetic_and_logical_c, [self.instr_category])
# self.problem.addConstraint(default_c, [self.is_pseudo_instr])
self.problem.addConstraint(non_csr, [self.instr_category])
self.problem.addConstraint(non_system, [self.instr_category])
self.problem.addConstraint(non_synch, [self.instr_category])
if no_branch:
self.problem.addConstraint(no_branch_c, [self.instr_category])
if no_load_store:
self.problem.addConstraint(no_load_store_c, [self.instr_category])
self.problem.addConstraint(
fence_c, [self.instr_name, self.instr_src1, self.instr_rd, self.imm])
self.problem.addConstraint(load_store_c,
[self.instr_category, self.instr_src1])
self.problem.addConstraint(
nop_c,
[self.instr_name, self.instr_src1, self.instr_src2, self.instr_rd
]) #: takes too long, don't know why
self.problem.addConstraint(
system_instr_c, [self.instr_category, self.instr_src1, self.instr_rd])
self.problem.addConstraint(
imm_len_c, [self.instr_format, self.instr_imm_t, self.imm_length])
self.problem.addConstraint(imm_val_c, [self.instr_imm_t, self.imm])
self.problem.addConstraint(shift_imm_val_c, [self.instr_category, self.imm])
# return
# return self.problem.getSolution()
def randomize(self):
# old randomize()
# self.solution = self.problem.getSolution()
# self.post_randomize()
self.solution = self.problem.getSolution()
if self.solution:
# print("TODO: randomized with steps: {}".format(self.problem._solver._steps))
pass
else:
i = 1
while self.solution is None:
for j in range(10):
self.solution = self.problem.getSolution()
if self.solution:
# print("TODO: randomized with steps: {}".format(self.problem._solver._steps))
break
i *= 5
self.problem._solver._steps *= i
self.post_randomize()
# Psuedo instructions are used to simplify assembly program writing
class riscv_pseudo_instr(riscv_instr_base):
def __init__(self, name=""):
# calling super constructor
riscv_instr_base.__init__(self, name)
# Important: Constraint solver gets too slow in pseudo class. We have three solutions:
# 1- change the type of the constraint solver, from MinConflict to regular, this one
# also takes fairly good amount of time, but it's good for validity check, to see
# if constraints are valid and there is no conflict between them.
# 2- Increase the number of steps for MinConflict...
# 3- Since we don't need to check the name_c constraint here, we can get rid of it
# for pseudo class! We're going to use this option for now
# self.problem = constraint.Problem(constraint.MinConflictsSolver(steps=10000))
# self.problem = constraint.Problem()
self.process_load_store = 0
self.pseudo_instr_name = "Pseudo instruction name"
def problem_definition(self, la_instr=0):
# Calling the super problem_definition, to apply all the constraints to the base object
# super().problem_definition(no_load_store=0, no_name_c=1)
super().problem_definition(no_load_store=0)
# To add the new constraint carried by the problem_definition
# fun()
self.problem.addVariable(self.pseudo_instr_name,
utils.riscv_pseudo_instr_name_t)
self.problem.addVariable(self.is_pseudo_instr, range(2))
def pseudo_name_c(name, group, format, category):
condition = (((name == "LI" or name == "LA") and group == "RV32I" and
format == "I_FORMAT" and category == "LOAD"))
if condition:
return True
def la_c(name):
if name == "LA":
return True
def default_c(is_pseudo_instr):
if is_pseudo_instr:
return True
self.problem.addConstraint(pseudo_name_c, [
self.pseudo_instr_name, self.instr_group, self.instr_format,
self.instr_category
])
if la_instr:
self.problem.addConstraint(la_c, [self.pseudo_instr_name])
self.problem.addConstraint(default_c, [self.is_pseudo_instr])
return
# Convert the instruction to assembly code
def convert2asm(self):
asm_str = self.get_instr_name()
destination = self.solution[self.instr_rd]
# instr rd,imm
asm_str = "{} {}, {}".format(asm_str, destination, self.get_imm())
if self.comment != "":
asm_str = asm_str + " #" + self.comment
return asm_str.lower()
def get_instr_name(self):
return self.solution[self.pseudo_instr_name]<|fim▁end|> | (name == "SLTIU" and group == "RV32I" and format == "I_FORMAT" and
category == "COMPARE" and imm_t == "IMM") |
<|file_name|>register.py<|end_file_name|><|fim▁begin|>from django.db import models
from sampledatahelper.helper import SampleDataHelper
from sampledatahelper import handlers
class Register(object):
fields = {}
ignored_fields = []
_instance = None
sd = SampleDataHelper()
def __new__(cls, *args, **kwargs):
if not cls._instance:
cls._instance = super(Register, cls).__new__(cls, *args, **kwargs)
return cls._instance
def register(self, field_class, handler_class):
self.fields[field_class] = handler_class
def ignore(self, field_class):
self.ignored_fields.append(field_class)
def get_handler(self, field_instance):
if field_instance.__class__ in self.ignored_fields:
return None
handler = self.fields.get(field_instance.__class__, None)
if handler:
return handler(self.sd, field_instance)
return None
register = Register()
register.register(models.CharField, handlers.CharHandler)
register.register(models.BigIntegerField, handlers.BigIntegerHandler)
register.register(models.CharField, handlers.CharHandler)
register.register(models.SlugField, handlers.SlugHandler)
register.register(models.EmailField, handlers.EmailHandler)
register.register(models.URLField, handlers.URLHandler)
register.register(models.TextField, handlers.TextHandler)
register.register(models.IntegerField, handlers.IntegerHandler)
register.register(models.SmallIntegerField, handlers.SmallIntegerHandler)
register.register(models.PositiveIntegerField, handlers.PositiveIntegerHandler)
register.register(models.PositiveSmallIntegerField, handlers.PositiveSmallIntegerHandler)
register.register(models.BigIntegerField, handlers.BigIntegerHandler)
register.register(models.FloatField, handlers.FloatHandler)
register.register(models.BooleanField, handlers.BooleanHandler)
register.register(models.NullBooleanField, handlers.NullBooleanHandler)
register.register(models.CommaSeparatedIntegerField, handlers.CommaSeparatedIntegerHandler)
register.register(models.DecimalField, handlers.DecimalHandler)
register.register(models.DateField, handlers.DateHandler)
register.register(models.DateTimeField, handlers.DateTimeHandler)
register.register(models.TimeField, handlers.TimeHandler)
register.register(models.FileField, handlers.FileHandler)<|fim▁hole|>register.register(models.ForeignKey, handlers.ForeignKeyHandler)
register.register(models.OneToOneField, handlers.OneToOneHandler)
register.ignore(models.ManyToManyField)
register.ignore(models.AutoField)<|fim▁end|> | register.register(models.FilePathField, handlers.FilePathHandler)
register.register(models.ImageField, handlers.ImageHandler)
register.register(models.IPAddressField, handlers.IPAddressHandler)
register.register(models.GenericIPAddressField, handlers.GenericIPAddressHandler) |
<|file_name|>resource_dns_record_set_test.go<|end_file_name|><|fim▁begin|>package google
import (
"fmt"
"testing"
"github.com/hashicorp/terraform/helper/acctest"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
)
func TestAccDnsRecordSet_basic(t *testing.T) {
zoneName := fmt.Sprintf("dnszone-test-%s", acctest.RandString(10))
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckDnsRecordSetDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccDnsRecordSet_basic(zoneName, "127.0.0.10", 300),
Check: resource.ComposeTestCheckFunc(
testAccCheckDnsRecordSetExists(
"google_dns_record_set.foobar", zoneName),
),
},
},
})
}
func TestAccDnsRecordSet_modify(t *testing.T) {
zoneName := fmt.Sprintf("dnszone-test-%s", acctest.RandString(10))
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckDnsRecordSetDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccDnsRecordSet_basic(zoneName, "127.0.0.10", 300),
Check: resource.ComposeTestCheckFunc(
testAccCheckDnsRecordSetExists(
"google_dns_record_set.foobar", zoneName),
),
},
resource.TestStep{
Config: testAccDnsRecordSet_basic(zoneName, "127.0.0.11", 300),
Check: resource.ComposeTestCheckFunc(
testAccCheckDnsRecordSetExists(
"google_dns_record_set.foobar", zoneName),
),
},
resource.TestStep{
Config: testAccDnsRecordSet_basic(zoneName, "127.0.0.11", 600),
Check: resource.ComposeTestCheckFunc(
testAccCheckDnsRecordSetExists(
"google_dns_record_set.foobar", zoneName),
),
},
},
})
}
func TestAccDnsRecordSet_changeType(t *testing.T) {
zoneName := fmt.Sprintf("dnszone-test-%s", acctest.RandString(10))
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckDnsRecordSetDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccDnsRecordSet_basic(zoneName, "127.0.0.10", 300),
Check: resource.ComposeTestCheckFunc(
testAccCheckDnsRecordSetExists(
"google_dns_record_set.foobar", zoneName),
),
},
resource.TestStep{
Config: testAccDnsRecordSet_bigChange(zoneName, 600),
Check: resource.ComposeTestCheckFunc(
testAccCheckDnsRecordSetExists(
"google_dns_record_set.foobar", zoneName),
),
},
},
})
}
func testAccCheckDnsRecordSetDestroy(s *terraform.State) error {
config := testAccProvider.Meta().(*Config)
for _, rs := range s.RootModule().Resources {
// Deletion of the managed_zone implies everything is gone
if rs.Type == "google_dns_managed_zone" {
_, err := config.clientDns.ManagedZones.Get(
config.Project, rs.Primary.ID).Do()
if err == nil {
return fmt.Errorf("DNS ManagedZone still exists")
}
}
}
return nil
}
func testAccCheckDnsRecordSetExists(resourceType, resourceName string) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[resourceType]
if !ok {
return fmt.Errorf("Not found: %s", resourceName)
}
dnsName := rs.Primary.Attributes["name"]
dnsType := rs.Primary.Attributes["type"]
if rs.Primary.ID == "" {
return fmt.Errorf("No ID is set")
}
config := testAccProvider.Meta().(*Config)
resp, err := config.clientDns.ResourceRecordSets.List(
config.Project, resourceName).Name(dnsName).Type(dnsType).Do()
if err != nil {
return fmt.Errorf("Error confirming DNS RecordSet existence: %#v", err)
}
switch len(resp.Rrsets) {
case 0:
// The resource doesn't exist anymore
return fmt.Errorf("DNS RecordSet not found")
case 1:
return nil
default:
return fmt.Errorf("Only expected 1 record set, got %d", len(resp.Rrsets))
}
}
}
func testAccDnsRecordSet_basic(zoneName string, addr2 string, ttl int) string {
return fmt.Sprintf(`
resource "google_dns_managed_zone" "parent-zone" {
name = "%s"
dns_name = "hashicorptest.com."
description = "Test Description"
}
resource "google_dns_record_set" "foobar" {
managed_zone = "${google_dns_managed_zone.parent-zone.name}"
name = "test-record.hashicorptest.com."
type = "A"
rrdatas = ["127.0.0.1", "%s"]
ttl = %d
}
`, zoneName, addr2, ttl)
}
func testAccDnsRecordSet_bigChange(zoneName string, ttl int) string {
return fmt.Sprintf(`
resource "google_dns_managed_zone" "parent-zone" {
name = "%s"
dns_name = "hashicorptest.com."
description = "Test Description"
}
resource "google_dns_record_set" "foobar" {
managed_zone = "${google_dns_managed_zone.parent-zone.name}"
name = "test-record.hashicorptest.com."
type = "CNAME"
rrdatas = ["www.terraform.io."]
ttl = %d
}<|fim▁hole|><|fim▁end|> | `, zoneName, ttl)
} |
<|file_name|>autoderef-method-priority.rs<|end_file_name|><|fim▁begin|><|fim▁hole|>// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(unknown_features)]
#![feature(box_syntax)]
trait double {
fn double(self) -> uint;
}
impl double for uint {
fn double(self) -> uint { self }
}
impl double for Box<uint> {
fn double(self) -> uint { *self * 2_usize }
}
pub fn main() {
let x = box 3_usize;
assert_eq!(x.double(), 6_usize);
}<|fim▁end|> | |
<|file_name|>riot.js<|end_file_name|><|fim▁begin|><|fim▁hole|>};<|fim▁end|> | exports.translate = function(tag) {
return this.import("riot").compile(tag); |
<|file_name|>ReteWorkingMemory.java<|end_file_name|><|fim▁begin|>/*
* Copyright 2015 Red Hat, Inc. and/or its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.drools.reteoo.common;
import org.drools.core.SessionConfiguration;
import org.drools.core.WorkingMemoryEntryPoint;
import org.drools.core.base.DroolsQuery;
import org.drools.core.common.BaseNode;
import org.drools.core.common.InternalAgenda;
import org.drools.core.common.InternalFactHandle;
import org.drools.core.common.InternalWorkingMemory;
import org.drools.core.common.WorkingMemoryAction;
import org.drools.core.event.AgendaEventSupport;
import org.drools.core.event.RuleEventListenerSupport;
import org.drools.core.event.RuleRuntimeEventSupport;
import org.drools.core.impl.InternalKnowledgeBase;
import org.drools.core.impl.StatefulKnowledgeSessionImpl;
import org.drools.core.phreak.PropagationEntry;
import org.drools.core.reteoo.LIANodePropagation;
import org.drools.core.spi.FactHandleFactory;
import org.drools.core.spi.PropagationContext;
import org.kie.api.runtime.Environment;
import org.kie.api.runtime.rule.AgendaFilter;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Iterator;
import java.util.List;
import java.util.Queue;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.atomic.AtomicBoolean;
public class ReteWorkingMemory extends StatefulKnowledgeSessionImpl {
private List<LIANodePropagation> liaPropagations;
private Queue<WorkingMemoryAction> actionQueue;
private AtomicBoolean evaluatingActionQueue = new AtomicBoolean(false);
/** Flag to determine if a rule is currently being fired. */
private volatile AtomicBoolean firing = new AtomicBoolean(false);
public ReteWorkingMemory() {
}
public ReteWorkingMemory(long id, InternalKnowledgeBase kBase) {
super(id, kBase);
}
public ReteWorkingMemory(long id, InternalKnowledgeBase kBase, boolean initInitFactHandle, SessionConfiguration config, Environment environment) {
super(id, kBase, initInitFactHandle, config, environment);
}
public ReteWorkingMemory(long id, InternalKnowledgeBase kBase, FactHandleFactory handleFactory, long propagationContext, SessionConfiguration config, InternalAgenda agenda, Environment environment) {
super(id, kBase, handleFactory, propagationContext, config, agenda, environment);
}
public ReteWorkingMemory(long id, InternalKnowledgeBase kBase, FactHandleFactory handleFactory, InternalFactHandle initialFactHandle, long propagationContext, SessionConfiguration config, Environment environment, RuleRuntimeEventSupport workingMemoryEventSupport, AgendaEventSupport agendaEventSupport, RuleEventListenerSupport ruleEventListenerSupport, InternalAgenda agenda) {
super(id, kBase, handleFactory, false, propagationContext, config, environment, workingMemoryEventSupport, agendaEventSupport, ruleEventListenerSupport, agenda);
}
@Override
protected void init() {
this.actionQueue = new ConcurrentLinkedQueue<WorkingMemoryAction>();
this.propagationList = new RetePropagationList(this);
}
@Override
public void reset() {
super.reset();
actionQueue.clear();
}
@Override
public void reset(int handleId,
long handleCounter,
long propagationCounter) {
super.reset(handleId, handleCounter, propagationCounter );
if (liaPropagations != null) liaPropagations.clear();
actionQueue.clear();
}
@Override
public WorkingMemoryEntryPoint getWorkingMemoryEntryPoint(String name) {
WorkingMemoryEntryPoint ep = this.entryPoints.get(name);
return ep != null ? new ReteWorkingMemoryEntryPoint( this, ep ) : null;
}
public void addLIANodePropagation(LIANodePropagation liaNodePropagation) {
if (liaPropagations == null) liaPropagations = new ArrayList<LIANodePropagation>();
liaPropagations.add( liaNodePropagation );
}
private final Object syncLock = new Object();
public void initInitialFact() {
if ( initialFactHandle == null ) {
synchronized ( syncLock ) {
if ( initialFactHandle == null ) {
// double check, inside of sync point incase some other thread beat us to it.
initInitialFact(kBase, null);
}
}
}
}<|fim▁hole|> super.fireUntilHalt( agendaFilter );
}
@Override
public int fireAllRules(final AgendaFilter agendaFilter,
int fireLimit) {
checkAlive();
if ( this.firing.compareAndSet( false,
true ) ) {
initInitialFact();
try {
startOperation();
return internalFireAllRules(agendaFilter, fireLimit);
} finally {
endOperation();
this.firing.set( false );
}
}
return 0;
}
private int internalFireAllRules(AgendaFilter agendaFilter, int fireLimit) {
int fireCount = 0;
try {
kBase.readLock();
// If we're already firing a rule, then it'll pick up the firing for any other assertObject(..) that get
// nested inside, avoiding concurrent-modification exceptions, depending on code paths of the actions.
if ( liaPropagations != null && isSequential() ) {
for ( LIANodePropagation liaPropagation : liaPropagations ) {
( liaPropagation ).doPropagation( this );
}
}
// do we need to call this in advance?
executeQueuedActionsForRete();
fireCount = this.agenda.fireAllRules( agendaFilter,
fireLimit );
} finally {
kBase.readUnlock();
if (kBase.flushModifications()) {
fireCount += internalFireAllRules(agendaFilter, fireLimit);
}
}
return fireCount;
}
@Override
public void closeLiveQuery(final InternalFactHandle factHandle) {
try {
startOperation();
this.kBase.readLock();
this.lock.lock();
final PropagationContext pCtx = pctxFactory.createPropagationContext(getNextPropagationIdCounter(), PropagationContext.INSERTION,
null, null, factHandle, getEntryPoint());
getEntryPointNode().retractQuery( factHandle,
pCtx,
this );
pCtx.evaluateActionQueue(this);
getFactHandleFactory().destroyFactHandle( factHandle );
} finally {
this.lock.unlock();
this.kBase.readUnlock();
endOperation();
}
}
@Override
protected BaseNode[] evalQuery(String queryName, DroolsQuery queryObject, InternalFactHandle handle, PropagationContext pCtx) {
initInitialFact();
BaseNode[] tnodes = kBase.getReteooBuilder().getTerminalNodesForQuery( queryName );
// no need to call retract, as no leftmemory used.
getEntryPointNode().assertQuery( handle,
pCtx,
this );
pCtx.evaluateActionQueue( this );
return tnodes;
}
public Collection<WorkingMemoryAction> getActionQueue() {
return actionQueue;
}
@Override
public void queueWorkingMemoryAction(final WorkingMemoryAction action) {
try {
startOperation();
actionQueue.add(action);
notifyWaitOnRest();
} finally {
endOperation();
}
}
public void addPropagation(PropagationEntry propagationEntry) {
if (propagationEntry instanceof WorkingMemoryAction) {
actionQueue.add((WorkingMemoryAction) propagationEntry);
} else {
super.addPropagation(propagationEntry);
}
}
@Override
public void executeQueuedActionsForRete() {
try {
startOperation();
if ( evaluatingActionQueue.compareAndSet( false,
true ) ) {
try {
if ( actionQueue!= null && !actionQueue.isEmpty() ) {
WorkingMemoryAction action;
while ( (action = actionQueue.poll()) != null ) {
try {
action.execute( (InternalWorkingMemory) this );
} catch ( Exception e ) {
throw new RuntimeException( "Unexpected exception executing action " + action.toString(),
e );
}
}
}
} finally {
evaluatingActionQueue.compareAndSet( true,
false );
}
}
} finally {
endOperation();
}
}
@Override
public Iterator<? extends PropagationEntry> getActionsIterator() {
return actionQueue.iterator();
}
}<|fim▁end|> |
@Override
public void fireUntilHalt(final AgendaFilter agendaFilter) {
initInitialFact(); |
<|file_name|>sample5__unittest_8cc.js<|end_file_name|><|fim▁begin|>var sample5__unittest_8cc =
[
[ "QuickTest", "class_quick_test.html", "class_quick_test" ],<|fim▁hole|> [ "QueueTest", "class_queue_test.html", "class_queue_test" ],
[ "TEST_F", "sample5__unittest_8cc.html#ad7679328025ef6d95ee2f45d5604aa89", null ],
[ "TEST_F", "sample5__unittest_8cc.html#ab941323e5a68b9aa8125cb81f5ff3d7c", null ],
[ "TEST_F", "sample5__unittest_8cc.html#ac3e547171299114162b1e8cd5946eb5c", null ],
[ "TEST_F", "sample5__unittest_8cc.html#a0149784588d6ea2a29e0f50a05ee198e", null ]
];<|fim▁end|> | [ "IntegerFunctionTest", "class_integer_function_test.html", null ], |
<|file_name|>_hassourceofbase.py<|end_file_name|><|fim▁begin|>#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2002-2006 Donald N. Allingham
# Copyright (C) 2011 Tim G L Lyons
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#-------------------------------------------------------------------------
#
# Standard Python modules
#<|fim▁hole|>from ...const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from . import Rule
#-------------------------------------------------------------------------
#
# HasSourceOf
#
#-------------------------------------------------------------------------
class HasSourceOfBase(Rule):
"""Rule that checks for objects that have a particular source."""
labels = [ _('Source ID:') ]
name = 'Object with the <source>'
category = _('Citation/source filters')
description = 'Matches objects who have a particular source'
def prepare(self, db, user):
if self.list[0] == '':
self.source_handle = None
self.nosource = True
return
self.nosource = False
try:
self.source_handle = db.get_source_from_gramps_id(
self.list[0]).get_handle()
except:
self.source_handle = None
def apply(self, db, object):
if not self.source_handle:
if self.nosource:
# check whether the citation list is empty as a proxy for
# there being no sources
return len(object.get_all_citation_lists()) == 0
else:
return False
else:
for citation_handle in object.get_all_citation_lists():
citation = db.get_citation_from_handle(citation_handle)
if citation.get_reference_handle() == self.source_handle:
return True
return False<|fim▁end|> | #------------------------------------------------------------------------- |
<|file_name|>ColorManager.test.js<|end_file_name|><|fim▁begin|>"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
var jsdom = require("jsdom");
var chai_1 = require("chai");
var ColorManager_1 = require("./ColorManager");
describe('ColorManager', function () {
var cm;
var dom;
var document;
var window;
beforeEach(function () {
dom = new jsdom.JSDOM('');
window = dom.window;
document = window.document;
window.HTMLCanvasElement.prototype.getContext = function () { return ({
createLinearGradient: function () {
return null;
},
fillRect: function () { },
getImageData: function () {
return { data: [0, 0, 0, 0xFF] };
}
}); };
cm = new ColorManager_1.ColorManager(document, false);
});
describe('constructor', function () {
it('should fill all colors with values', function () {
for (var _i = 0, _a = Object.keys(cm.colors); _i < _a.length; _i++) {
var key = _a[_i];
if (key !== 'ansi') {
chai_1.assert.ok(cm.colors[key].css.length >= 7);
}
}
chai_1.assert.equal(cm.colors.ansi.length, 256);
});
it('should fill 240 colors with expected values', function () {
chai_1.assert.equal(cm.colors.ansi[16].css, '#000000');
chai_1.assert.equal(cm.colors.ansi[17].css, '#00005f');
chai_1.assert.equal(cm.colors.ansi[18].css, '#000087');
chai_1.assert.equal(cm.colors.ansi[19].css, '#0000af');
chai_1.assert.equal(cm.colors.ansi[20].css, '#0000d7');
chai_1.assert.equal(cm.colors.ansi[21].css, '#0000ff');
chai_1.assert.equal(cm.colors.ansi[22].css, '#005f00');
chai_1.assert.equal(cm.colors.ansi[23].css, '#005f5f');
chai_1.assert.equal(cm.colors.ansi[24].css, '#005f87');
chai_1.assert.equal(cm.colors.ansi[25].css, '#005faf');
chai_1.assert.equal(cm.colors.ansi[26].css, '#005fd7');
chai_1.assert.equal(cm.colors.ansi[27].css, '#005fff');
chai_1.assert.equal(cm.colors.ansi[28].css, '#008700');
chai_1.assert.equal(cm.colors.ansi[29].css, '#00875f');
chai_1.assert.equal(cm.colors.ansi[30].css, '#008787');
chai_1.assert.equal(cm.colors.ansi[31].css, '#0087af');
chai_1.assert.equal(cm.colors.ansi[32].css, '#0087d7');
chai_1.assert.equal(cm.colors.ansi[33].css, '#0087ff');
chai_1.assert.equal(cm.colors.ansi[34].css, '#00af00');
chai_1.assert.equal(cm.colors.ansi[35].css, '#00af5f');
chai_1.assert.equal(cm.colors.ansi[36].css, '#00af87');
chai_1.assert.equal(cm.colors.ansi[37].css, '#00afaf');
chai_1.assert.equal(cm.colors.ansi[38].css, '#00afd7');
chai_1.assert.equal(cm.colors.ansi[39].css, '#00afff');
chai_1.assert.equal(cm.colors.ansi[40].css, '#00d700');
chai_1.assert.equal(cm.colors.ansi[41].css, '#00d75f');
chai_1.assert.equal(cm.colors.ansi[42].css, '#00d787');
chai_1.assert.equal(cm.colors.ansi[43].css, '#00d7af');
chai_1.assert.equal(cm.colors.ansi[44].css, '#00d7d7');
chai_1.assert.equal(cm.colors.ansi[45].css, '#00d7ff');
chai_1.assert.equal(cm.colors.ansi[46].css, '#00ff00');
chai_1.assert.equal(cm.colors.ansi[47].css, '#00ff5f');
chai_1.assert.equal(cm.colors.ansi[48].css, '#00ff87');
chai_1.assert.equal(cm.colors.ansi[49].css, '#00ffaf');
chai_1.assert.equal(cm.colors.ansi[50].css, '#00ffd7');
chai_1.assert.equal(cm.colors.ansi[51].css, '#00ffff');
chai_1.assert.equal(cm.colors.ansi[52].css, '#5f0000');
chai_1.assert.equal(cm.colors.ansi[53].css, '#5f005f');
chai_1.assert.equal(cm.colors.ansi[54].css, '#5f0087');
chai_1.assert.equal(cm.colors.ansi[55].css, '#5f00af');
chai_1.assert.equal(cm.colors.ansi[56].css, '#5f00d7');
chai_1.assert.equal(cm.colors.ansi[57].css, '#5f00ff');
chai_1.assert.equal(cm.colors.ansi[58].css, '#5f5f00');
chai_1.assert.equal(cm.colors.ansi[59].css, '#5f5f5f');
chai_1.assert.equal(cm.colors.ansi[60].css, '#5f5f87');
chai_1.assert.equal(cm.colors.ansi[61].css, '#5f5faf');
chai_1.assert.equal(cm.colors.ansi[62].css, '#5f5fd7');
chai_1.assert.equal(cm.colors.ansi[63].css, '#5f5fff');
chai_1.assert.equal(cm.colors.ansi[64].css, '#5f8700');
chai_1.assert.equal(cm.colors.ansi[65].css, '#5f875f');
chai_1.assert.equal(cm.colors.ansi[66].css, '#5f8787');
chai_1.assert.equal(cm.colors.ansi[67].css, '#5f87af');
chai_1.assert.equal(cm.colors.ansi[68].css, '#5f87d7');
chai_1.assert.equal(cm.colors.ansi[69].css, '#5f87ff');
chai_1.assert.equal(cm.colors.ansi[70].css, '#5faf00');
chai_1.assert.equal(cm.colors.ansi[71].css, '#5faf5f');
chai_1.assert.equal(cm.colors.ansi[72].css, '#5faf87');
chai_1.assert.equal(cm.colors.ansi[73].css, '#5fafaf');
chai_1.assert.equal(cm.colors.ansi[74].css, '#5fafd7');
chai_1.assert.equal(cm.colors.ansi[75].css, '#5fafff');
chai_1.assert.equal(cm.colors.ansi[76].css, '#5fd700');
chai_1.assert.equal(cm.colors.ansi[77].css, '#5fd75f');
chai_1.assert.equal(cm.colors.ansi[78].css, '#5fd787');
chai_1.assert.equal(cm.colors.ansi[79].css, '#5fd7af');
chai_1.assert.equal(cm.colors.ansi[80].css, '#5fd7d7');
chai_1.assert.equal(cm.colors.ansi[81].css, '#5fd7ff');
chai_1.assert.equal(cm.colors.ansi[82].css, '#5fff00');
chai_1.assert.equal(cm.colors.ansi[83].css, '#5fff5f');
chai_1.assert.equal(cm.colors.ansi[84].css, '#5fff87');
chai_1.assert.equal(cm.colors.ansi[85].css, '#5fffaf');
chai_1.assert.equal(cm.colors.ansi[86].css, '#5fffd7');
chai_1.assert.equal(cm.colors.ansi[87].css, '#5fffff');
chai_1.assert.equal(cm.colors.ansi[88].css, '#870000');
chai_1.assert.equal(cm.colors.ansi[89].css, '#87005f');
chai_1.assert.equal(cm.colors.ansi[90].css, '#870087');
chai_1.assert.equal(cm.colors.ansi[91].css, '#8700af');
chai_1.assert.equal(cm.colors.ansi[92].css, '#8700d7');
chai_1.assert.equal(cm.colors.ansi[93].css, '#8700ff');
chai_1.assert.equal(cm.colors.ansi[94].css, '#875f00');
chai_1.assert.equal(cm.colors.ansi[95].css, '#875f5f');
chai_1.assert.equal(cm.colors.ansi[96].css, '#875f87');
chai_1.assert.equal(cm.colors.ansi[97].css, '#875faf');
chai_1.assert.equal(cm.colors.ansi[98].css, '#875fd7');
chai_1.assert.equal(cm.colors.ansi[99].css, '#875fff');
chai_1.assert.equal(cm.colors.ansi[100].css, '#878700');
chai_1.assert.equal(cm.colors.ansi[101].css, '#87875f');
chai_1.assert.equal(cm.colors.ansi[102].css, '#878787');
chai_1.assert.equal(cm.colors.ansi[103].css, '#8787af');
chai_1.assert.equal(cm.colors.ansi[104].css, '#8787d7');
chai_1.assert.equal(cm.colors.ansi[105].css, '#8787ff');
chai_1.assert.equal(cm.colors.ansi[106].css, '#87af00');
chai_1.assert.equal(cm.colors.ansi[107].css, '#87af5f');
chai_1.assert.equal(cm.colors.ansi[108].css, '#87af87');
chai_1.assert.equal(cm.colors.ansi[109].css, '#87afaf');
chai_1.assert.equal(cm.colors.ansi[110].css, '#87afd7');
chai_1.assert.equal(cm.colors.ansi[111].css, '#87afff');
chai_1.assert.equal(cm.colors.ansi[112].css, '#87d700');
chai_1.assert.equal(cm.colors.ansi[113].css, '#87d75f');
chai_1.assert.equal(cm.colors.ansi[114].css, '#87d787');
chai_1.assert.equal(cm.colors.ansi[115].css, '#87d7af');
chai_1.assert.equal(cm.colors.ansi[116].css, '#87d7d7');
chai_1.assert.equal(cm.colors.ansi[117].css, '#87d7ff');
chai_1.assert.equal(cm.colors.ansi[118].css, '#87ff00');
chai_1.assert.equal(cm.colors.ansi[119].css, '#87ff5f');
chai_1.assert.equal(cm.colors.ansi[120].css, '#87ff87');
chai_1.assert.equal(cm.colors.ansi[121].css, '#87ffaf');
chai_1.assert.equal(cm.colors.ansi[122].css, '#87ffd7');
chai_1.assert.equal(cm.colors.ansi[123].css, '#87ffff');
chai_1.assert.equal(cm.colors.ansi[124].css, '#af0000');
chai_1.assert.equal(cm.colors.ansi[125].css, '#af005f');
chai_1.assert.equal(cm.colors.ansi[126].css, '#af0087');
chai_1.assert.equal(cm.colors.ansi[127].css, '#af00af');
chai_1.assert.equal(cm.colors.ansi[128].css, '#af00d7');
chai_1.assert.equal(cm.colors.ansi[129].css, '#af00ff');
chai_1.assert.equal(cm.colors.ansi[130].css, '#af5f00');
chai_1.assert.equal(cm.colors.ansi[131].css, '#af5f5f');
chai_1.assert.equal(cm.colors.ansi[132].css, '#af5f87');
chai_1.assert.equal(cm.colors.ansi[133].css, '#af5faf');
chai_1.assert.equal(cm.colors.ansi[134].css, '#af5fd7');
chai_1.assert.equal(cm.colors.ansi[135].css, '#af5fff');
chai_1.assert.equal(cm.colors.ansi[136].css, '#af8700');
chai_1.assert.equal(cm.colors.ansi[137].css, '#af875f');
chai_1.assert.equal(cm.colors.ansi[138].css, '#af8787');
chai_1.assert.equal(cm.colors.ansi[139].css, '#af87af');
chai_1.assert.equal(cm.colors.ansi[140].css, '#af87d7');
chai_1.assert.equal(cm.colors.ansi[141].css, '#af87ff');
chai_1.assert.equal(cm.colors.ansi[142].css, '#afaf00');
chai_1.assert.equal(cm.colors.ansi[143].css, '#afaf5f');
chai_1.assert.equal(cm.colors.ansi[144].css, '#afaf87');<|fim▁hole|> chai_1.assert.equal(cm.colors.ansi[148].css, '#afd700');
chai_1.assert.equal(cm.colors.ansi[149].css, '#afd75f');
chai_1.assert.equal(cm.colors.ansi[150].css, '#afd787');
chai_1.assert.equal(cm.colors.ansi[151].css, '#afd7af');
chai_1.assert.equal(cm.colors.ansi[152].css, '#afd7d7');
chai_1.assert.equal(cm.colors.ansi[153].css, '#afd7ff');
chai_1.assert.equal(cm.colors.ansi[154].css, '#afff00');
chai_1.assert.equal(cm.colors.ansi[155].css, '#afff5f');
chai_1.assert.equal(cm.colors.ansi[156].css, '#afff87');
chai_1.assert.equal(cm.colors.ansi[157].css, '#afffaf');
chai_1.assert.equal(cm.colors.ansi[158].css, '#afffd7');
chai_1.assert.equal(cm.colors.ansi[159].css, '#afffff');
chai_1.assert.equal(cm.colors.ansi[160].css, '#d70000');
chai_1.assert.equal(cm.colors.ansi[161].css, '#d7005f');
chai_1.assert.equal(cm.colors.ansi[162].css, '#d70087');
chai_1.assert.equal(cm.colors.ansi[163].css, '#d700af');
chai_1.assert.equal(cm.colors.ansi[164].css, '#d700d7');
chai_1.assert.equal(cm.colors.ansi[165].css, '#d700ff');
chai_1.assert.equal(cm.colors.ansi[166].css, '#d75f00');
chai_1.assert.equal(cm.colors.ansi[167].css, '#d75f5f');
chai_1.assert.equal(cm.colors.ansi[168].css, '#d75f87');
chai_1.assert.equal(cm.colors.ansi[169].css, '#d75faf');
chai_1.assert.equal(cm.colors.ansi[170].css, '#d75fd7');
chai_1.assert.equal(cm.colors.ansi[171].css, '#d75fff');
chai_1.assert.equal(cm.colors.ansi[172].css, '#d78700');
chai_1.assert.equal(cm.colors.ansi[173].css, '#d7875f');
chai_1.assert.equal(cm.colors.ansi[174].css, '#d78787');
chai_1.assert.equal(cm.colors.ansi[175].css, '#d787af');
chai_1.assert.equal(cm.colors.ansi[176].css, '#d787d7');
chai_1.assert.equal(cm.colors.ansi[177].css, '#d787ff');
chai_1.assert.equal(cm.colors.ansi[178].css, '#d7af00');
chai_1.assert.equal(cm.colors.ansi[179].css, '#d7af5f');
chai_1.assert.equal(cm.colors.ansi[180].css, '#d7af87');
chai_1.assert.equal(cm.colors.ansi[181].css, '#d7afaf');
chai_1.assert.equal(cm.colors.ansi[182].css, '#d7afd7');
chai_1.assert.equal(cm.colors.ansi[183].css, '#d7afff');
chai_1.assert.equal(cm.colors.ansi[184].css, '#d7d700');
chai_1.assert.equal(cm.colors.ansi[185].css, '#d7d75f');
chai_1.assert.equal(cm.colors.ansi[186].css, '#d7d787');
chai_1.assert.equal(cm.colors.ansi[187].css, '#d7d7af');
chai_1.assert.equal(cm.colors.ansi[188].css, '#d7d7d7');
chai_1.assert.equal(cm.colors.ansi[189].css, '#d7d7ff');
chai_1.assert.equal(cm.colors.ansi[190].css, '#d7ff00');
chai_1.assert.equal(cm.colors.ansi[191].css, '#d7ff5f');
chai_1.assert.equal(cm.colors.ansi[192].css, '#d7ff87');
chai_1.assert.equal(cm.colors.ansi[193].css, '#d7ffaf');
chai_1.assert.equal(cm.colors.ansi[194].css, '#d7ffd7');
chai_1.assert.equal(cm.colors.ansi[195].css, '#d7ffff');
chai_1.assert.equal(cm.colors.ansi[196].css, '#ff0000');
chai_1.assert.equal(cm.colors.ansi[197].css, '#ff005f');
chai_1.assert.equal(cm.colors.ansi[198].css, '#ff0087');
chai_1.assert.equal(cm.colors.ansi[199].css, '#ff00af');
chai_1.assert.equal(cm.colors.ansi[200].css, '#ff00d7');
chai_1.assert.equal(cm.colors.ansi[201].css, '#ff00ff');
chai_1.assert.equal(cm.colors.ansi[202].css, '#ff5f00');
chai_1.assert.equal(cm.colors.ansi[203].css, '#ff5f5f');
chai_1.assert.equal(cm.colors.ansi[204].css, '#ff5f87');
chai_1.assert.equal(cm.colors.ansi[205].css, '#ff5faf');
chai_1.assert.equal(cm.colors.ansi[206].css, '#ff5fd7');
chai_1.assert.equal(cm.colors.ansi[207].css, '#ff5fff');
chai_1.assert.equal(cm.colors.ansi[208].css, '#ff8700');
chai_1.assert.equal(cm.colors.ansi[209].css, '#ff875f');
chai_1.assert.equal(cm.colors.ansi[210].css, '#ff8787');
chai_1.assert.equal(cm.colors.ansi[211].css, '#ff87af');
chai_1.assert.equal(cm.colors.ansi[212].css, '#ff87d7');
chai_1.assert.equal(cm.colors.ansi[213].css, '#ff87ff');
chai_1.assert.equal(cm.colors.ansi[214].css, '#ffaf00');
chai_1.assert.equal(cm.colors.ansi[215].css, '#ffaf5f');
chai_1.assert.equal(cm.colors.ansi[216].css, '#ffaf87');
chai_1.assert.equal(cm.colors.ansi[217].css, '#ffafaf');
chai_1.assert.equal(cm.colors.ansi[218].css, '#ffafd7');
chai_1.assert.equal(cm.colors.ansi[219].css, '#ffafff');
chai_1.assert.equal(cm.colors.ansi[220].css, '#ffd700');
chai_1.assert.equal(cm.colors.ansi[221].css, '#ffd75f');
chai_1.assert.equal(cm.colors.ansi[222].css, '#ffd787');
chai_1.assert.equal(cm.colors.ansi[223].css, '#ffd7af');
chai_1.assert.equal(cm.colors.ansi[224].css, '#ffd7d7');
chai_1.assert.equal(cm.colors.ansi[225].css, '#ffd7ff');
chai_1.assert.equal(cm.colors.ansi[226].css, '#ffff00');
chai_1.assert.equal(cm.colors.ansi[227].css, '#ffff5f');
chai_1.assert.equal(cm.colors.ansi[228].css, '#ffff87');
chai_1.assert.equal(cm.colors.ansi[229].css, '#ffffaf');
chai_1.assert.equal(cm.colors.ansi[230].css, '#ffffd7');
chai_1.assert.equal(cm.colors.ansi[231].css, '#ffffff');
chai_1.assert.equal(cm.colors.ansi[232].css, '#080808');
chai_1.assert.equal(cm.colors.ansi[233].css, '#121212');
chai_1.assert.equal(cm.colors.ansi[234].css, '#1c1c1c');
chai_1.assert.equal(cm.colors.ansi[235].css, '#262626');
chai_1.assert.equal(cm.colors.ansi[236].css, '#303030');
chai_1.assert.equal(cm.colors.ansi[237].css, '#3a3a3a');
chai_1.assert.equal(cm.colors.ansi[238].css, '#444444');
chai_1.assert.equal(cm.colors.ansi[239].css, '#4e4e4e');
chai_1.assert.equal(cm.colors.ansi[240].css, '#585858');
chai_1.assert.equal(cm.colors.ansi[241].css, '#626262');
chai_1.assert.equal(cm.colors.ansi[242].css, '#6c6c6c');
chai_1.assert.equal(cm.colors.ansi[243].css, '#767676');
chai_1.assert.equal(cm.colors.ansi[244].css, '#808080');
chai_1.assert.equal(cm.colors.ansi[245].css, '#8a8a8a');
chai_1.assert.equal(cm.colors.ansi[246].css, '#949494');
chai_1.assert.equal(cm.colors.ansi[247].css, '#9e9e9e');
chai_1.assert.equal(cm.colors.ansi[248].css, '#a8a8a8');
chai_1.assert.equal(cm.colors.ansi[249].css, '#b2b2b2');
chai_1.assert.equal(cm.colors.ansi[250].css, '#bcbcbc');
chai_1.assert.equal(cm.colors.ansi[251].css, '#c6c6c6');
chai_1.assert.equal(cm.colors.ansi[252].css, '#d0d0d0');
chai_1.assert.equal(cm.colors.ansi[253].css, '#dadada');
chai_1.assert.equal(cm.colors.ansi[254].css, '#e4e4e4');
chai_1.assert.equal(cm.colors.ansi[255].css, '#eeeeee');
});
});
describe('setTheme', function () {
it('should not throw when not setting all colors', function () {
chai_1.assert.doesNotThrow(function () {
cm.setTheme({});
});
});
it('should set a partial set of colors, using the default if not present', function () {
chai_1.assert.equal(cm.colors.background.css, '#000000');
chai_1.assert.equal(cm.colors.foreground.css, '#ffffff');
cm.setTheme({
background: '#FF0000',
foreground: '#00FF00'
});
chai_1.assert.equal(cm.colors.background.css, '#FF0000');
chai_1.assert.equal(cm.colors.foreground.css, '#00FF00');
cm.setTheme({
background: '#0000FF'
});
chai_1.assert.equal(cm.colors.background.css, '#0000FF');
chai_1.assert.equal(cm.colors.foreground.css, '#ffffff');
});
});
});
//# sourceMappingURL=ColorManager.test.js.map<|fim▁end|> | chai_1.assert.equal(cm.colors.ansi[145].css, '#afafaf');
chai_1.assert.equal(cm.colors.ansi[146].css, '#afafd7');
chai_1.assert.equal(cm.colors.ansi[147].css, '#afafff'); |
<|file_name|>mod.rs<|end_file_name|><|fim▁begin|>mod approach;
mod character;
mod friend;
mod context;
mod chat;
mod authorized;
mod player_status;
mod error;
use super::{Session, GameState, AccountData, SocialInformations, SocialState};
use super::chunk::{ChunkImpl, Ref, SocialUpdateType};
use character::CharacterMinimal;
use protocol::{Protocol, VarShort};
use protocol::messages::handshake::*;
use protocol::messages::game::approach::*;
use protocol::messages::queues::*;
use protocol::messages::game::basic::TextInformationMessage;
use protocol::enums::text_information_type;
use std::io::{self, Result};
use std::sync::atomic::Ordering;
use shared::{self, database};
use diesel::*;
use server::SERVER;
use character::Character;
use std::mem;
use std::collections::HashMap;
use shared::database::schema::{accounts, social_relations};
impl shared::session::Session<ChunkImpl> for Session {
fn new(base: shared::session::SessionBase) -> Self {
let mut buf = ProtocolRequired {
required_version: 1658,
current_version: 1658,
}.unwrap();
HelloGameMessage.unwrap_with_buf(&mut buf);
write!(SERVER, base.token, buf);
Session {
base: base,
account: None,
state: GameState::None,
last_sales_chat_request: 0,
last_seek_chat_request: 0,
friends_cache: HashMap::new(),
ignored_cache: HashMap::new(),
}
}
fn handle<'a>(&mut self, chunk: Ref<'a>, id: i16, mut data: io::Cursor<Vec<u8>>)
-> Result<()> {
use protocol::messages::game::friend::{
FriendsGetListMessage,
FriendSetWarnOnConnectionMessage,
FriendSetWarnOnLevelGainMessage,
IgnoredGetListMessage,
FriendAddRequestMessage,
FriendDeleteRequestMessage,
IgnoredAddRequestMessage,
IgnoredDeleteRequestMessage,
};
use protocol::messages::game::character::status::PlayerStatusUpdateRequestMessage;
use protocol::messages::game::chat::{
ChatClientMultiMessage,
ChatClientMultiWithObjectMessage,
ChatClientPrivateMessage,
ChatClientPrivateWithObjectMessage,
};
use protocol::messages::game::chat::channel::ChannelEnablingMessage;
use protocol::messages::game::chat::smiley::{
ChatSmileyRequestMessage,
MoodSmileyRequestMessage,
};
use protocol::messages::game::character::choice::{
CharactersListRequestMessage,
CharacterSelectionMessage,
};
use protocol::messages::game::character::creation::{
CharacterCreationRequestMessage,
};
use protocol::messages::authorized::{
AdminQuietCommandMessage,
};
use protocol::messages::game::context::{
GameContextCreateRequestMessage,
GameMapMovementRequestMessage,
GameMapMovementCancelMessage,
GameMapMovementConfirmMessage,
};
use protocol::messages::game::context::roleplay::{
MapInformationsRequestMessage,
ChangeMapMessage,
};
handle!(self, chunk, id, data)
}
fn close<'a>(mut self, mut chunk: Ref<'a>) {
let account = mem::replace(&mut self.account, None);
if let Some(account) = account {
SERVER.with(|s| database::execute(&s.auth_db, move |conn| {
if let Err(err) = Session::save_auth(conn, account) {
error!("error while saving session to auth db: {:?}", err);
}
}));
}
let state = mem::replace(&mut self.state, GameState::None);
if let GameState::InContext(ch) = state {
let map_id = ch.map_id;
let ch = chunk.maps
.get_mut(&ch.map_id).unwrap()
.remove_actor(ch.id).unwrap()
.into_character();
SERVER.with(|s| database::execute(&s.db, move |conn| {
if let Err(err) = self.base.save_logs(conn, ch.minimal().account_id()) {
error!("error while saving logs: {:?}", err);
}
if let Err(err) = self.save_game(conn, ch, map_id) {
error!("error while saving session to game db: {:?}", err);
}
}));
}
}
}
impl Session {
pub fn update_queue(&self) {
let (global_queue_size, global_queue_counter) = match self.state {
GameState::TicketQueue(..) => {
use self::approach::{QUEUE_COUNTER, QUEUE_SIZE};
(QUEUE_COUNTER.load(Ordering::Relaxed), QUEUE_SIZE.load(Ordering::Relaxed))
}
GameState::GameQueue(..) => {
use self::character::{QUEUE_COUNTER, QUEUE_SIZE};
(QUEUE_COUNTER.load(Ordering::Relaxed), QUEUE_SIZE.load(Ordering::Relaxed))
}
_ => return (),
};
let (former_queue_size, former_queue_counter) = match self.state {
GameState::TicketQueue(qs, qc) | GameState::GameQueue(qs, qc) => (qs, qc),
_ => unreachable!(),
};
let mut pos = former_queue_size - (global_queue_counter - former_queue_counter);
if pos < 0 {
pos = 0;
}
let buf = QueueStatusMessage {
position: pos as i16,
total: global_queue_size as i16,
}.unwrap();
write!(SERVER, self.base.token, buf);
}
pub fn update_social(&mut self, ch: &CharacterMinimal, social: Option<&SocialInformations>,
ty: SocialUpdateType) {
let account = match self.account.as_ref() {
Some(account) => account,
None => return,
};
let account_id = ch.account_id();
if account.social.has_relation_with(account_id, SocialState::Friend) {
let _ = self.friends_cache.insert(
account_id,
ch.as_relation_infos(account.id, social, SocialState::Friend).as_friend()
);
match ty {
SocialUpdateType::Online if account.social.warn_on_connection => {
let buf = TextInformationMessage {
msg_type: text_information_type::MESSAGE,
msg_id: VarShort(143),
parameters: vec![ch.name().to_string(), ch.account_nickname().to_string(),
account_id.to_string()],
}.unwrap();
write!(SERVER, self.base.token, buf);
},
SocialUpdateType::WithLevel(_) if account.social.warn_on_level_gain => {
// TODO
},
_ => (),
}
}
if account.social.has_relation_with(account_id, SocialState::Ignored) {
let _ = self.ignored_cache.insert(
account_id,
ch.as_relation_infos(account.id, social, SocialState::Ignored).as_ignored()
);
}
}
}
#[changeset_for(accounts)]
struct UpdateSqlAccount {
already_logged: Option<i16>,
last_server: Option<i16>,
channels: Option<Vec<i16>>,
}
#[derive(Queryable)]
#[changeset_for(social_relations)]
struct SqlRelations {
friends: Vec<i32>,
ignored: Vec<i32>,
warn_on_connection: bool,
warn_on_level_gain: bool,
}
impl Session {
fn save_auth(conn: &Connection, account: AccountData) -> QueryResult<()> {
try!(conn.transaction(move || {
let _ = try!(
update(
accounts::table.filter(accounts::id.eq(&account.id))
).set(&UpdateSqlAccount {
already_logged: Some(0),
last_server: None,
channels: Some(account.channels.into_iter().map(|c| c as i16).collect()),
}).execute(conn)
);
let _ = try!(
update(
social_relations::table.filter(social_relations::id.eq(&account.id))
).set(&SqlRelations {
friends: account.social.friends.into_iter().collect(),
ignored: account.social.ignored.into_iter().collect(),<|fim▁hole|> }).execute(conn)
);
Ok(())
}));
Ok(())
}
fn save_game(&self, conn: &Connection, ch: Character, map: i32) -> QueryResult<()> {
try!(conn.transaction(|| {
ch.save(conn, map)
}));
Ok(())
}
}<|fim▁end|> | warn_on_connection: account.social.warn_on_connection,
warn_on_level_gain: account.social.warn_on_level_gain, |
<|file_name|>mysql_monitor_test.py<|end_file_name|><|fim▁begin|># Copyright 2019 Scalyr Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------
#
# author: Imron Alston <[email protected]>
from __future__ import absolute_import
from __future__ import print_function
__author__ = "[email protected]"
import sys
from scalyr_agent import UnsupportedSystem
from scalyr_agent.test_base import ScalyrTestCase
class MySqlMonitorTest(ScalyrTestCase):
def _import_mysql_monitor(self):
import scalyr_agent.builtin_monitors.mysql_monitor # NOQA
self.assertTrue(True)
def test_min_python_version(self):
if sys.version_info[:2] < (2, 7):<|fim▁hole|> self._import_mysql_monitor()
def test_missing_qcache_hits(self):
if sys.version_info[:2] < (2, 7):
print(
"Skipping test 'test_missing_qcache_hits'.\n"
"This test is non-critical for pre-2.7 testing.\n"
)
return
from scalyr_agent.builtin_monitors.mysql_monitor import MysqlDB
class TestMysqlDB(MysqlDB):
def __init__(self):
# do nothing, because we don't actually want to connect to a DB
# for this test
pass
db = TestMysqlDB()
globalVars = {}
globalStatusMap = {"global.com_select": 10}
expected = 0
actual = db._derived_stat_query_cache_efficiency(globalVars, globalStatusMap)
self.assertEqual(expected, actual)<|fim▁end|> | self.assertRaises(UnsupportedSystem, lambda: self._import_mysql_monitor())
else: |
<|file_name|>bitcoin_zh_CN.ts<|end_file_name|><|fim▁begin|><?xml version="1.0" ?><!DOCTYPE TS><TS language="zh_CN" version="2.0">
<defaultcodec>UTF-8</defaultcodec>
<context>
<name>AboutDialog</name>
<message>
<location filename="../forms/aboutdialog.ui" line="+14"/>
<source>About EAcoin</source>
<translation>关于莱特币</translation>
</message>
<message>
<location line="+39"/>
<source><b>EAcoin</b> version</source>
<translation><b>莱特币</b>版本</translation>
</message>
<message>
<location line="+57"/>
<source>
This is experimental software.
Distributed under the MIT/X11 software license, see the accompanying file COPYING or http://www.opensource.org/licenses/mit-license.php.
This product includes software developed by the OpenSSL Project for use in the OpenSSL Toolkit (http://www.openssl.org/) and cryptographic software written by Eric Young ([email protected]) and UPnP software written by Thomas Bernard.</source>
<translation>
This is experimental software.
Distributed under the MIT/X11 software license, see the accompanying file COPYING or http://www.opensource.org/licenses/mit-license.php.
This product includes software developed by the OpenSSL Project for use in the OpenSSL Toolkit (http://www.openssl.org/) and cryptographic software written by Eric Young ([email protected]) and UPnP software written by Thomas Bernard.</translation>
</message>
<message>
<location filename="../aboutdialog.cpp" line="+14"/>
<source>Copyright</source>
<translation>版权</translation>
</message>
<message>
<location line="+0"/>
<source>The EAcoin developers</source>
<translation>EAcoin-qt 客户端开发团队</translation>
</message>
</context>
<context>
<name>AddressBookPage</name>
<message>
<location filename="../forms/addressbookpage.ui" line="+14"/>
<source>Address Book</source>
<translation>通讯录</translation>
</message>
<message>
<location line="+19"/>
<source>Double-click to edit address or label</source>
<translation>双击以编辑地址或标签</translation>
</message>
<message>
<location line="+27"/>
<source>Create a new address</source>
<translation>创建新地址</translation>
</message>
<message>
<location line="+14"/>
<source>Copy the currently selected address to the system clipboard</source>
<translation>复制当前选中地址到系统剪贴板</translation>
</message>
<message>
<location line="-11"/>
<source>&New Address</source>
<translation>&新建地址</translation>
</message>
<message>
<location filename="../addressbookpage.cpp" line="+63"/>
<source>These are your EAcoin addresses for receiving payments. You may want to give a different one to each sender so you can keep track of who is paying you.</source>
<translation>这是您用来收款的莱特币地址。为了标记不同的资金来源,建议为每个付款人保留不同的收款地址。</translation>
</message>
<message>
<location filename="../forms/addressbookpage.ui" line="+14"/>
<source>&Copy Address</source>
<translation>&复制地址</translation>
</message>
<message>
<location line="+11"/>
<source>Show &QR Code</source>
<translation>显示二维码</translation>
</message>
<message>
<location line="+11"/>
<source>Sign a message to prove you own a EAcoin address</source>
<translation>签名消息,证明这个地址属于您。</translation>
</message>
<message>
<location line="+3"/>
<source>Sign &Message</source>
<translation>对消息签名</translation>
</message>
<message>
<location line="+25"/>
<source>Delete the currently selected address from the list</source>
<translation>从列表中删除选中的地址</translation>
</message>
<message>
<location line="+27"/>
<source>Export the data in the current tab to a file</source>
<translation>导出当前数据到文件</translation>
</message>
<message>
<location line="+3"/>
<source>&Export</source>
<translation>&导出</translation>
</message>
<message>
<location line="-44"/>
<source>Verify a message to ensure it was signed with a specified EAcoin address</source>
<translation>验证消息,确保消息是由指定的莱特币地址签名过的。</translation>
</message>
<message>
<location line="+3"/>
<source>&Verify Message</source>
<translation>&验证消息</translation>
</message>
<message>
<location line="+14"/>
<source>&Delete</source>
<translation>&删除</translation>
</message>
<message>
<location filename="../addressbookpage.cpp" line="-5"/>
<source>These are your EAcoin addresses for sending payments. Always check the amount and the receiving address before sending coins.</source>
<translation>这是您用来付款的莱特币地址。在付款前,请总是核实付款金额和收款地址。</translation>
</message>
<message>
<location line="+13"/>
<source>Copy &Label</source>
<translation>复制 &标签</translation>
</message>
<message>
<location line="+1"/>
<source>&Edit</source>
<translation>&编辑</translation>
</message>
<message>
<location line="+1"/>
<source>Send &Coins</source>
<translation>付款</translation>
</message>
<message>
<location line="+260"/>
<source>Export Address Book Data</source>
<translation>导出通讯录数据</translation>
</message>
<message>
<location line="+1"/>
<source>Comma separated file (*.csv)</source>
<translation>逗号分隔文件 (*.csv)</translation>
</message>
<message>
<location line="+13"/>
<source>Error exporting</source>
<translation>导出错误</translation>
</message>
<message>
<location line="+0"/>
<source>Could not write to file %1.</source>
<translation>无法写入文件 %1。</translation>
</message>
</context>
<context>
<name>AddressTableModel</name>
<message>
<location filename="../addresstablemodel.cpp" line="+144"/>
<source>Label</source>
<translation>标签</translation>
</message>
<message>
<location line="+0"/>
<source>Address</source>
<translation>地址</translation>
</message>
<message>
<location line="+36"/>
<source>(no label)</source>
<translation>(没有标签)</translation>
</message>
</context>
<context>
<name>AskPassphraseDialog</name>
<message>
<location filename="../forms/askpassphrasedialog.ui" line="+26"/>
<source>Passphrase Dialog</source>
<translation>密码对话框</translation>
</message>
<message>
<location line="+21"/>
<source>Enter passphrase</source>
<translation>输入密码</translation>
</message>
<message>
<location line="+14"/>
<source>New passphrase</source>
<translation>新密码</translation>
</message>
<message>
<location line="+14"/>
<source>Repeat new passphrase</source>
<translation>重复新密码</translation>
</message>
<message>
<location filename="../askpassphrasedialog.cpp" line="+33"/>
<source>Enter the new passphrase to the wallet.<br/>Please use a passphrase of <b>10 or more random characters</b>, or <b>eight or more words</b>.</source>
<translation>输入钱包的新密码。<br/>使用的密码请至少包含<b>10个以上随机字符</>,或者是<b>8个以上的单词</b>。</translation>
</message>
<message>
<location line="+1"/>
<source>Encrypt wallet</source>
<translation>加密钱包</translation>
</message>
<message>
<location line="+3"/>
<source>This operation needs your wallet passphrase to unlock the wallet.</source>
<translation>该操作需要您首先使用密码解锁钱包。</translation>
</message>
<message>
<location line="+5"/>
<source>Unlock wallet</source>
<translation>解锁钱包</translation>
</message>
<message>
<location line="+3"/>
<source>This operation needs your wallet passphrase to decrypt the wallet.</source>
<translation>该操作需要您首先使用密码解密钱包。</translation>
</message>
<message>
<location line="+5"/>
<source>Decrypt wallet</source>
<translation>解密钱包</translation>
</message>
<message>
<location line="+3"/>
<source>Change passphrase</source>
<translation>修改密码</translation>
</message>
<message>
<location line="+1"/>
<source>Enter the old and new passphrase to the wallet.</source>
<translation>请输入钱包的旧密码与新密码。</translation>
</message>
<message>
<location line="+46"/>
<source>Confirm wallet encryption</source>
<translation>确认加密钱包</translation>
</message>
<message>
<location line="+1"/>
<source>Warning: If you encrypt your wallet and lose your passphrase, you will <b>LOSE ALL OF YOUR EACOINS</b>!</source>
<translation>警告:如果您加密了您的钱包,但是忘记了密码,你将会<b>丢失所有的莱特币</b>!</translation>
</message>
<message>
<location line="+0"/>
<source>Are you sure you wish to encrypt your wallet?</source>
<translation>您确定需要为钱包加密吗?</translation>
</message>
<message>
<location line="+15"/>
<source>IMPORTANT: Any previous backups you have made of your wallet file should be replaced with the newly generated, encrypted wallet file. For security reasons, previous backups of the unencrypted wallet file will become useless as soon as you start using the new, encrypted wallet.</source>
<translation>重要提示:您以前备份的钱包文件应该替换成最新生成的加密钱包文件(重新备份)。从安全性上考虑,您以前备份的未加密的钱包文件,在您使用新的加密钱包后将无效,请重新备份。</translation>
</message>
<message>
<location line="+100"/>
<location line="+24"/>
<source>Warning: The Caps Lock key is on!</source>
<translation>警告:大写锁定键处于打开状态!</translation>
</message>
<message>
<location line="-130"/>
<location line="+58"/>
<source>Wallet encrypted</source>
<translation>钱包已加密</translation>
</message>
<message>
<location line="-56"/>
<source>EAcoin will close now to finish the encryption process. Remember that encrypting your wallet cannot fully protect your eacoins from being stolen by malware infecting your computer.</source>
<translation>将关闭软件以完成加密过程。 请您谨记:钱包加密并不是万能的,电脑中毒,您的莱特币还是有可能丢失。</translation>
</message>
<message>
<location line="+13"/>
<location line="+7"/>
<location line="+42"/>
<location line="+6"/>
<source>Wallet encryption failed</source>
<translation>钱包加密失败</translation>
</message>
<message>
<location line="-54"/>
<source>Wallet encryption failed due to an internal error. Your wallet was not encrypted.</source>
<translation>由于一个本地错误,加密钱包操作已经失败。您的钱包没有被加密。</translation>
</message>
<message>
<location line="+7"/>
<location line="+48"/>
<source>The supplied passphrases do not match.</source>
<translation>密码不匹配。</translation>
</message>
<message>
<location line="-37"/>
<source>Wallet unlock failed</source>
<translation>钱包解锁失败</translation>
</message>
<message>
<location line="+1"/>
<location line="+11"/>
<location line="+19"/>
<source>The passphrase entered for the wallet decryption was incorrect.</source>
<translation>用于解密钱包的密码不正确。</translation>
</message>
<message>
<location line="-20"/>
<source>Wallet decryption failed</source>
<translation>钱包解密失败。</translation>
</message>
<message>
<location line="+14"/>
<source>Wallet passphrase was successfully changed.</source>
<translation>修改钱包密码成功。</translation>
</message>
</context>
<context>
<name>BitcoinGUI</name>
<message>
<location filename="../bitcoingui.cpp" line="+233"/>
<source>Sign &message...</source>
<translation>对&消息签名...</translation>
</message>
<message>
<location line="+280"/>
<source>Synchronizing with network...</source>
<translation>正在与网络同步...</translation>
</message>
<message>
<location line="-349"/>
<source>&Overview</source>
<translation>&概况</translation>
</message>
<message>
<location line="+1"/>
<source>Show general overview of wallet</source>
<translation>显示钱包概况</translation>
</message>
<message>
<location line="+20"/>
<source>&Transactions</source>
<translation>&交易记录</translation>
</message>
<message>
<location line="+1"/>
<source>Browse transaction history</source>
<translation>查看交易历史</translation>
</message>
<message>
<location line="+7"/>
<source>Edit the list of stored addresses and labels</source>
<translation>修改存储的地址和标签列表</translation>
</message>
<message>
<location line="-14"/>
<source>Show the list of addresses for receiving payments</source>
<translation>显示接收支付的地址列表</translation>
</message>
<message>
<location line="+31"/>
<source>E&xit</source>
<translation>退出</translation>
</message>
<message>
<location line="+1"/>
<source>Quit application</source>
<translation>退出程序</translation>
</message>
<message>
<location line="+4"/>
<source>Show information about EAcoin</source>
<translation>显示莱特币的相关信息</translation>
</message>
<message>
<location line="+2"/>
<source>About &Qt</source>
<translation>关于 &Qt</translation>
</message>
<message>
<location line="+1"/>
<source>Show information about Qt</source>
<translation>显示Qt相关信息</translation>
</message>
<message>
<location line="+2"/>
<source>&Options...</source>
<translation>&选项...</translation>
</message>
<message>
<location line="+6"/>
<source>&Encrypt Wallet...</source>
<translation>&加密钱包...</translation>
</message>
<message>
<location line="+3"/>
<source>&Backup Wallet...</source>
<translation>&备份钱包...</translation>
</message>
<message>
<location line="+2"/>
<source>&Change Passphrase...</source>
<translation>&修改密码...</translation>
</message>
<message>
<location line="+285"/>
<source>Importing blocks from disk...</source>
<translation>正在从磁盘导入数据块...</translation>
</message>
<message>
<location line="+3"/>
<source>Reindexing blocks on disk...</source>
<translation>正在为数据块建立索引...</translation>
</message>
<message>
<location line="-347"/>
<source>Send coins to a EAcoin address</source>
<translation>向一个莱特币地址发送莱特币</translation>
</message>
<message>
<location line="+49"/>
<source>Modify configuration options for EAcoin</source>
<translation>设置选项</translation>
</message>
<message>
<location line="+9"/>
<source>Backup wallet to another location</source>
<translation>备份钱包到其它文件夹</translation>
</message>
<message>
<location line="+2"/>
<source>Change the passphrase used for wallet encryption</source>
<translation>修改钱包加密口令</translation>
</message>
<message>
<location line="+6"/>
<source>&Debug window</source>
<translation>&调试窗口</translation>
</message>
<message>
<location line="+1"/>
<source>Open debugging and diagnostic console</source>
<translation>在诊断控制台调试</translation>
</message>
<message>
<location line="-4"/>
<source>&Verify message...</source>
<translation>&验证消息...</translation>
</message>
<message>
<location line="-165"/>
<location line="+530"/>
<source>EAcoin</source>
<translation>莱特币</translation>
</message>
<message>
<location line="-530"/>
<source>Wallet</source>
<translation>钱包</translation>
</message>
<message>
<location line="+101"/>
<source>&Send</source>
<translation>&发送</translation>
</message>
<message>
<location line="+7"/>
<source>&Receive</source>
<translation>&接收</translation>
</message>
<message>
<location line="+14"/>
<source>&Addresses</source>
<translation>&地址</translation>
</message>
<message>
<location line="+22"/>
<source>&About EAcoin</source>
<translation>&关于莱特币</translation>
</message>
<message>
<location line="+9"/>
<source>&Show / Hide</source>
<translation>&显示 / 隐藏</translation>
</message>
<message>
<location line="+1"/>
<source>Show or hide the main Window</source>
<translation>显示或隐藏主窗口</translation>
</message>
<message>
<location line="+3"/>
<source>Encrypt the private keys that belong to your wallet</source>
<translation>对钱包中的私钥加密</translation>
</message>
<message>
<location line="+7"/>
<source>Sign messages with your EAcoin addresses to prove you own them</source>
<translation>用莱特币地址关联的私钥为消息签名,以证明您拥有这个莱特币地址</translation>
</message>
<message>
<location line="+2"/>
<source>Verify messages to ensure they were signed with specified EAcoin addresses</source>
<translation>校验消息,确保该消息是由指定的莱特币地址所有者签名的</translation>
</message>
<message>
<location line="+28"/>
<source>&File</source>
<translation>&文件</translation>
</message>
<message>
<location line="+7"/>
<source>&Settings</source>
<translation>&设置</translation>
</message>
<message>
<location line="+6"/>
<source>&Help</source>
<translation>&帮助</translation>
</message>
<message>
<location line="+9"/>
<source>Tabs toolbar</source>
<translation>分页工具栏</translation>
</message>
<message>
<location line="+17"/>
<location line="+10"/>
<source>[testnet]</source>
<translation>[testnet]</translation>
</message>
<message>
<location line="+47"/>
<source>EAcoin client</source>
<translation>莱特币客户端</translation>
</message>
<message numerus="yes">
<location line="+141"/>
<source>%n active connection(s) to EAcoin network</source>
<translation><numerusform>到莱特币网络的连接共有%n条</numerusform></translation>
</message>
<message>
<location line="+22"/>
<source>No block source available...</source>
<translation>No block source available...</translation>
</message>
<message>
<location line="+12"/>
<source>Processed %1 of %2 (estimated) blocks of transaction history.</source>
<translation>%1 / %2 个交易历史的区块已下载</translation>
</message>
<message>
<location line="+4"/>
<source>Processed %1 blocks of transaction history.</source>
<translation>已处理 %1 个交易历史数据块。</translation>
</message>
<message numerus="yes">
<location line="+20"/>
<source>%n hour(s)</source>
<translation><numerusform>%n 小时前</numerusform></translation>
</message>
<message numerus="yes">
<location line="+4"/>
<source>%n day(s)</source>
<translation><numerusform>%n 天前</numerusform></translation>
</message>
<message numerus="yes">
<location line="+4"/>
<source>%n week(s)</source>
<translation><numerusform>%n 周前</numerusform></translation>
</message>
<message>
<location line="+4"/>
<source>%1 behind</source>
<translation>落后 %1 </translation>
</message>
<message>
<location line="+14"/>
<source>Last received block was generated %1 ago.</source>
<translation>最新收到的区块产生于 %1。</translation>
</message>
<message>
<location line="+2"/>
<source>Transactions after this will not yet be visible.</source>
<translation>在此之后的交易尚未可见</translation>
</message>
<message>
<location line="+22"/>
<source>Error</source>
<translation>错误</translation>
</message>
<message>
<location line="+3"/>
<source>Warning</source>
<translation>警告</translation>
</message>
<message>
<location line="+3"/>
<source>Information</source>
<translation>信息</translation>
</message>
<message>
<location line="+70"/>
<source>This transaction is over the size limit. You can still send it for a fee of %1, which goes to the nodes that process your transaction and helps to support the network. Do you want to pay the fee?</source>
<translation>该交易的字节数超标。您可以选择支付%1的交易费给处理您的交易的网络节点,有助于莱特币网络的运行。您愿意支付这笔交易费用吗?</translation>
</message>
<message>
<location line="-140"/>
<source>Up to date</source>
<translation>最新状态</translation>
</message>
<message>
<location line="+31"/>
<source>Catching up...</source>
<translation>更新中...</translation>
</message>
<message>
<location line="+113"/>
<source>Confirm transaction fee</source>
<translation>确认交易费</translation>
</message>
<message>
<location line="+8"/>
<source>Sent transaction</source>
<translation>已发送交易</translation>
</message>
<message>
<location line="+0"/>
<source>Incoming transaction</source>
<translation>流入交易</translation>
</message>
<message>
<location line="+1"/>
<source>Date: %1
Amount: %2
Type: %3
Address: %4
</source>
<translation>日期: %1
金额: %2
类别: %3
地址: %4
</translation>
</message>
<message>
<location line="+33"/>
<location line="+23"/>
<source>URI handling</source>
<translation>URI 处理</translation>
</message>
<message>
<location line="-23"/>
<location line="+23"/>
<source>URI can not be parsed! This can be caused by an invalid EAcoin address or malformed URI parameters.</source>
<translation>URI无法解析!原因可能是莱特币地址不正确,或者URI参数错误。</translation>
</message>
<message>
<location line="+17"/>
<source>Wallet is <b>encrypted</b> and currently <b>unlocked</b></source><|fim▁hole|> <location line="+8"/>
<source>Wallet is <b>encrypted</b> and currently <b>locked</b></source>
<translation>钱包已被<b>加密</b>,当前为<b>锁定</b>状态</translation>
</message>
<message>
<location filename="../bitcoin.cpp" line="+111"/>
<source>A fatal error occurred. EAcoin can no longer continue safely and will quit.</source>
<translation>发生严重错误。</translation>
</message>
</context>
<context>
<name>ClientModel</name>
<message>
<location filename="../clientmodel.cpp" line="+104"/>
<source>Network Alert</source>
<translation>网络警报</translation>
</message>
</context>
<context>
<name>EditAddressDialog</name>
<message>
<location filename="../forms/editaddressdialog.ui" line="+14"/>
<source>Edit Address</source>
<translation>编辑地址</translation>
</message>
<message>
<location line="+11"/>
<source>&Label</source>
<translation>&标签</translation>
</message>
<message>
<location line="+10"/>
<source>The label associated with this address book entry</source>
<translation>与此地址条目关联的标签</translation>
</message>
<message>
<location line="+7"/>
<source>&Address</source>
<translation>&地址</translation>
</message>
<message>
<location line="+10"/>
<source>The address associated with this address book entry. This can only be modified for sending addresses.</source>
<translation>该地址与地址簿中的条目已关联,无法作为发送地址编辑。</translation>
</message>
<message>
<location filename="../editaddressdialog.cpp" line="+21"/>
<source>New receiving address</source>
<translation>新接收地址</translation>
</message>
<message>
<location line="+4"/>
<source>New sending address</source>
<translation>新发送地址</translation>
</message>
<message>
<location line="+3"/>
<source>Edit receiving address</source>
<translation>编辑接收地址</translation>
</message>
<message>
<location line="+4"/>
<source>Edit sending address</source>
<translation>编辑发送地址</translation>
</message>
<message>
<location line="+76"/>
<source>The entered address "%1" is already in the address book.</source>
<translation>输入的地址 "%1" 已经存在于地址簿。</translation>
</message>
<message>
<location line="-5"/>
<source>The entered address "%1" is not a valid EAcoin address.</source>
<translation>您输入的 "%1" 不是合法的莱特币地址.</translation>
</message>
<message>
<location line="+10"/>
<source>Could not unlock wallet.</source>
<translation>无法解锁钱包</translation>
</message>
<message>
<location line="+5"/>
<source>New key generation failed.</source>
<translation>密钥创建失败.</translation>
</message>
</context>
<context>
<name>GUIUtil::HelpMessageBox</name>
<message>
<location filename="../guiutil.cpp" line="+424"/>
<location line="+12"/>
<source>EAcoin-Qt</source>
<translation>EAcoin-Qt</translation>
</message>
<message>
<location line="-12"/>
<source>version</source>
<translation>版本</translation>
</message>
<message>
<location line="+2"/>
<source>Usage:</source>
<translation>使用:</translation>
</message>
<message>
<location line="+1"/>
<source>command-line options</source>
<translation>命令行选项</translation>
</message>
<message>
<location line="+4"/>
<source>UI options</source>
<translation>UI选项</translation>
</message>
<message>
<location line="+1"/>
<source>Set language, for example "de_DE" (default: system locale)</source>
<translation>设置语言, 例如 "de_DE" (缺省: 系统语言)</translation>
</message>
<message>
<location line="+1"/>
<source>Start minimized</source>
<translation>启动时最小化
</translation>
</message>
<message>
<location line="+1"/>
<source>Show splash screen on startup (default: 1)</source>
<translation>启动时显示版权页 (缺省: 1)</translation>
</message>
</context>
<context>
<name>OptionsDialog</name>
<message>
<location filename="../forms/optionsdialog.ui" line="+14"/>
<source>Options</source>
<translation>选项</translation>
</message>
<message>
<location line="+16"/>
<source>&Main</source>
<translation>&主要的</translation>
</message>
<message>
<location line="+6"/>
<source>Optional transaction fee per kB that helps make sure your transactions are processed quickly. Most transactions are 1 kB.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+15"/>
<source>Pay transaction &fee</source>
<translation>支付交易 &费用</translation>
</message>
<message>
<location line="+31"/>
<source>Automatically start EAcoin after logging in to the system.</source>
<translation>登录系统后自动开启莱特币客户端</translation>
</message>
<message>
<location line="+3"/>
<source>&Start EAcoin on system login</source>
<translation>启动时&运行</translation>
</message>
<message>
<location line="+35"/>
<source>Reset all client options to default.</source>
<translation>恢复客户端的缺省设置</translation>
</message>
<message>
<location line="+3"/>
<source>&Reset Options</source>
<translation>恢复缺省设置</translation>
</message>
<message>
<location line="+13"/>
<source>&Network</source>
<translation>&网络</translation>
</message>
<message>
<location line="+6"/>
<source>Automatically open the EAcoin client port on the router. This only works when your router supports UPnP and it is enabled.</source>
<translation>自动在路由器中打开莱特币端口。只有当您的路由器开启 UPnP 选项时此功能才有效。</translation>
</message>
<message>
<location line="+3"/>
<source>Map port using &UPnP</source>
<translation>使用 &UPnP 映射端口</translation>
</message>
<message>
<location line="+7"/>
<source>Connect to the EAcoin network through a SOCKS proxy (e.g. when connecting through Tor).</source>
<translation>通过代理服务器连接莱特币网络(例如:通过Tor连接)</translation>
</message>
<message>
<location line="+3"/>
<source>&Connect through SOCKS proxy:</source>
<translation>&通过Socks代理连接:</translation>
</message>
<message>
<location line="+9"/>
<source>Proxy &IP:</source>
<translation>代理服务器&IP:</translation>
</message>
<message>
<location line="+19"/>
<source>IP address of the proxy (e.g. 127.0.0.1)</source>
<translation>代理服务器IP (如 127.0.0.1)</translation>
</message>
<message>
<location line="+7"/>
<source>&Port:</source>
<translation>&端口:</translation>
</message>
<message>
<location line="+19"/>
<source>Port of the proxy (e.g. 9050)</source>
<translation>代理端口(例如 9050)</translation>
</message>
<message>
<location line="+7"/>
<source>SOCKS &Version:</source>
<translation>Socks &版本</translation>
</message>
<message>
<location line="+13"/>
<source>SOCKS version of the proxy (e.g. 5)</source>
<translation>Socks代理版本 (例如 5)</translation>
</message>
<message>
<location line="+36"/>
<source>&Window</source>
<translation>&窗口</translation>
</message>
<message>
<location line="+6"/>
<source>Show only a tray icon after minimizing the window.</source>
<translation>最小化窗口后仅显示托盘图标</translation>
</message>
<message>
<location line="+3"/>
<source>&Minimize to the tray instead of the taskbar</source>
<translation>&最小化到托盘</translation>
</message>
<message>
<location line="+7"/>
<source>Minimize instead of exit the application when the window is closed. When this option is enabled, the application will be closed only after selecting Quit in the menu.</source>
<translation>当窗口关闭时程序最小化而不是退出。当使用该选项时,程序只能通过在菜单中选择退出来关闭</translation>
</message>
<message>
<location line="+3"/>
<source>M&inimize on close</source>
<translation>单击关闭按钮最小化</translation>
</message>
<message>
<location line="+21"/>
<source>&Display</source>
<translation>&显示</translation>
</message>
<message>
<location line="+8"/>
<source>User Interface &language:</source>
<translation>用户界面&语言:</translation>
</message>
<message>
<location line="+13"/>
<source>The user interface language can be set here. This setting will take effect after restarting EAcoin.</source>
<translation>在这里设置用户界面的语言。设置将在客户端重启后生效。</translation>
</message>
<message>
<location line="+11"/>
<source>&Unit to show amounts in:</source>
<translation>&莱特币金额单位:</translation>
</message>
<message>
<location line="+13"/>
<source>Choose the default subdivision unit to show in the interface and when sending coins.</source>
<translation>选择莱特币单位。</translation>
</message>
<message>
<location line="+9"/>
<source>Whether to show EAcoin addresses in the transaction list or not.</source>
<translation>是否需要在交易清单中显示莱特币地址。</translation>
</message>
<message>
<location line="+3"/>
<source>&Display addresses in transaction list</source>
<translation>在交易清单中&显示莱特币地址</translation>
</message>
<message>
<location line="+71"/>
<source>&OK</source>
<translation>&确定</translation>
</message>
<message>
<location line="+7"/>
<source>&Cancel</source>
<translation>&取消</translation>
</message>
<message>
<location line="+10"/>
<source>&Apply</source>
<translation>&应用</translation>
</message>
<message>
<location filename="../optionsdialog.cpp" line="+53"/>
<source>default</source>
<translation>缺省</translation>
</message>
<message>
<location line="+130"/>
<source>Confirm options reset</source>
<translation>确认恢复缺省设置</translation>
</message>
<message>
<location line="+1"/>
<source>Some settings may require a client restart to take effect.</source>
<translation>某些设置选项需要重启客户端才能生效</translation>
</message>
<message>
<location line="+0"/>
<source>Do you want to proceed?</source>
<translation>您希望继续吗?</translation>
</message>
<message>
<location line="+42"/>
<location line="+9"/>
<source>Warning</source>
<translation>警告</translation>
</message>
<message>
<location line="-9"/>
<location line="+9"/>
<source>This setting will take effect after restarting EAcoin.</source>
<translation>需要重启客户端软件才能生效。</translation>
</message>
<message>
<location line="+29"/>
<source>The supplied proxy address is invalid.</source>
<translation>提供的代理服务器地址无效。</translation>
</message>
</context>
<context>
<name>OverviewPage</name>
<message>
<location filename="../forms/overviewpage.ui" line="+14"/>
<source>Form</source>
<translation>表单</translation>
</message>
<message>
<location line="+50"/>
<location line="+166"/>
<source>The displayed information may be out of date. Your wallet automatically synchronizes with the EAcoin network after a connection is established, but this process has not completed yet.</source>
<translation>现在显示的消息可能是过期的. 在连接上莱特币网络节点后,您的钱包将自动与网络同步,但是这个过程还没有完成.</translation>
</message>
<message>
<location line="-124"/>
<source>Balance:</source>
<translation>余额:</translation>
</message>
<message>
<location line="+29"/>
<source>Unconfirmed:</source>
<translation>未确认:</translation>
</message>
<message>
<location line="-78"/>
<source>Wallet</source>
<translation>钱包</translation>
</message>
<message>
<location line="+107"/>
<source>Immature:</source>
<translation>未成熟的:</translation>
</message>
<message>
<location line="+13"/>
<source>Mined balance that has not yet matured</source>
<translation>尚未成熟的挖矿收入余额</translation>
</message>
<message>
<location line="+46"/>
<source><b>Recent transactions</b></source>
<translation><b>最近交易记录</b></translation>
</message>
<message>
<location line="-101"/>
<source>Your current balance</source>
<translation>您的当前余额</translation>
</message>
<message>
<location line="+29"/>
<source>Total of transactions that have yet to be confirmed, and do not yet count toward the current balance</source>
<translation>尚未确认的交易总额, 未计入当前余额</translation>
</message>
<message>
<location filename="../overviewpage.cpp" line="+116"/>
<location line="+1"/>
<source>out of sync</source>
<translation>数据同步中</translation>
</message>
</context>
<context>
<name>PaymentServer</name>
<message>
<location filename="../paymentserver.cpp" line="+107"/>
<source>Cannot start eacoin: click-to-pay handler</source>
<translation>暂时无法启动莱特币:点击支付功能</translation>
</message>
</context>
<context>
<name>QRCodeDialog</name>
<message>
<location filename="../forms/qrcodedialog.ui" line="+14"/>
<source>QR Code Dialog</source>
<translation>二维码对话框</translation>
</message>
<message>
<location line="+59"/>
<source>Request Payment</source>
<translation>请求付款</translation>
</message>
<message>
<location line="+56"/>
<source>Amount:</source>
<translation>金额:</translation>
</message>
<message>
<location line="-44"/>
<source>Label:</source>
<translation>标签:</translation>
</message>
<message>
<location line="+19"/>
<source>Message:</source>
<translation>消息:</translation>
</message>
<message>
<location line="+71"/>
<source>&Save As...</source>
<translation>&另存为</translation>
</message>
<message>
<location filename="../qrcodedialog.cpp" line="+62"/>
<source>Error encoding URI into QR Code.</source>
<translation>将 URI 转换成二维码失败.</translation>
</message>
<message>
<location line="+40"/>
<source>The entered amount is invalid, please check.</source>
<translation>输入的金额非法,请检查。</translation>
</message>
<message>
<location line="+23"/>
<source>Resulting URI too long, try to reduce the text for label / message.</source>
<translation>URI 太长, 请试着精简标签/消息的内容.</translation>
</message>
<message>
<location line="+25"/>
<source>Save QR Code</source>
<translation>保存二维码</translation>
</message>
<message>
<location line="+0"/>
<source>PNG Images (*.png)</source>
<translation>PNG图像文件(*.png)</translation>
</message>
</context>
<context>
<name>RPCConsole</name>
<message>
<location filename="../forms/rpcconsole.ui" line="+46"/>
<source>Client name</source>
<translation>客户端名称</translation>
</message>
<message>
<location line="+10"/>
<location line="+23"/>
<location line="+26"/>
<location line="+23"/>
<location line="+23"/>
<location line="+36"/>
<location line="+53"/>
<location line="+23"/>
<location line="+23"/>
<location filename="../rpcconsole.cpp" line="+339"/>
<source>N/A</source>
<translation>不可用</translation>
</message>
<message>
<location line="-217"/>
<source>Client version</source>
<translation>客户端版本</translation>
</message>
<message>
<location line="-45"/>
<source>&Information</source>
<translation>&信息</translation>
</message>
<message>
<location line="+68"/>
<source>Using OpenSSL version</source>
<translation>使用OpenSSL版本</translation>
</message>
<message>
<location line="+49"/>
<source>Startup time</source>
<translation>启动时间</translation>
</message>
<message>
<location line="+29"/>
<source>Network</source>
<translation>网络</translation>
</message>
<message>
<location line="+7"/>
<source>Number of connections</source>
<translation>连接数</translation>
</message>
<message>
<location line="+23"/>
<source>On testnet</source>
<translation>当前为莱特币测试网络</translation>
</message>
<message>
<location line="+23"/>
<source>Block chain</source>
<translation>数据链</translation>
</message>
<message>
<location line="+7"/>
<source>Current number of blocks</source>
<translation>当前数据块数量</translation>
</message>
<message>
<location line="+23"/>
<source>Estimated total blocks</source>
<translation>预计数据块数量</translation>
</message>
<message>
<location line="+23"/>
<source>Last block time</source>
<translation>上一数据块时间</translation>
</message>
<message>
<location line="+52"/>
<source>&Open</source>
<translation>&打开</translation>
</message>
<message>
<location line="+16"/>
<source>Command-line options</source>
<translation>命令行选项</translation>
</message>
<message>
<location line="+7"/>
<source>Show the EAcoin-Qt help message to get a list with possible EAcoin command-line options.</source>
<translation>显示EAcoin命令行选项帮助信息</translation>
</message>
<message>
<location line="+3"/>
<source>&Show</source>
<translation>&显示</translation>
</message>
<message>
<location line="+24"/>
<source>&Console</source>
<translation>&控制台</translation>
</message>
<message>
<location line="-260"/>
<source>Build date</source>
<translation>创建时间</translation>
</message>
<message>
<location line="-104"/>
<source>EAcoin - Debug window</source>
<translation>莱特币 - 调试窗口</translation>
</message>
<message>
<location line="+25"/>
<source>EAcoin Core</source>
<translation>莱特币核心</translation>
</message>
<message>
<location line="+279"/>
<source>Debug log file</source>
<translation>调试日志文件</translation>
</message>
<message>
<location line="+7"/>
<source>Open the EAcoin debug log file from the current data directory. This can take a few seconds for large log files.</source>
<translation>打开当前目录中的调试日志文件。日志文件大的话可能要等上几秒钟。</translation>
</message>
<message>
<location line="+102"/>
<source>Clear console</source>
<translation>清空控制台</translation>
</message>
<message>
<location filename="../rpcconsole.cpp" line="-30"/>
<source>Welcome to the EAcoin RPC console.</source>
<translation>欢迎来到 RPC 控制台.</translation>
</message>
<message>
<location line="+1"/>
<source>Use up and down arrows to navigate history, and <b>Ctrl-L</b> to clear screen.</source>
<translation>使用上下方向键浏览历史, <b>Ctrl-L</b>清除屏幕.</translation>
</message>
<message>
<location line="+1"/>
<source>Type <b>help</b> for an overview of available commands.</source>
<translation>使用 <b>help</b> 命令显示帮助信息.</translation>
</message>
</context>
<context>
<name>SendCoinsDialog</name>
<message>
<location filename="../forms/sendcoinsdialog.ui" line="+14"/>
<location filename="../sendcoinsdialog.cpp" line="+124"/>
<location line="+5"/>
<location line="+5"/>
<location line="+5"/>
<location line="+6"/>
<location line="+5"/>
<location line="+5"/>
<source>Send Coins</source>
<translation>发送货币</translation>
</message>
<message>
<location line="+50"/>
<source>Send to multiple recipients at once</source>
<translation>一次发送给多个接收者</translation>
</message>
<message>
<location line="+3"/>
<source>Add &Recipient</source>
<translation>添加收款人</translation>
</message>
<message>
<location line="+20"/>
<source>Remove all transaction fields</source>
<translation>移除所有交易项</translation>
</message>
<message>
<location line="+3"/>
<source>Clear &All</source>
<translation>清除 &所有</translation>
</message>
<message>
<location line="+22"/>
<source>Balance:</source>
<translation>余额:</translation>
</message>
<message>
<location line="+10"/>
<source>123.456 BTC</source>
<translation>123.456 BTC</translation>
</message>
<message>
<location line="+31"/>
<source>Confirm the send action</source>
<translation>确认并发送货币</translation>
</message>
<message>
<location line="+3"/>
<source>S&end</source>
<translation>发送</translation>
</message>
<message>
<location filename="../sendcoinsdialog.cpp" line="-59"/>
<source><b>%1</b> to %2 (%3)</source>
<translation><b>%1</b> 到 %2 (%3)</translation>
</message>
<message>
<location line="+5"/>
<source>Confirm send coins</source>
<translation>确认发送货币</translation>
</message>
<message>
<location line="+1"/>
<source>Are you sure you want to send %1?</source>
<translation>确定您要发送 %1?</translation>
</message>
<message>
<location line="+0"/>
<source> and </source>
<translation> 和 </translation>
</message>
<message>
<location line="+23"/>
<source>The recipient address is not valid, please recheck.</source>
<translation>收款人地址不合法,请检查。</translation>
</message>
<message>
<location line="+5"/>
<source>The amount to pay must be larger than 0.</source>
<translation>支付金额必须大于0.</translation>
</message>
<message>
<location line="+5"/>
<source>The amount exceeds your balance.</source>
<translation>金额超出您的账上余额。</translation>
</message>
<message>
<location line="+5"/>
<source>The total exceeds your balance when the %1 transaction fee is included.</source>
<translation>计入 %1 交易费后的金额超出您的账上余额。</translation>
</message>
<message>
<location line="+6"/>
<source>Duplicate address found, can only send to each address once per send operation.</source>
<translation>发现重复的地址, 每次只能对同一地址发送一次.</translation>
</message>
<message>
<location line="+5"/>
<source>Error: Transaction creation failed!</source>
<translation>错误:创建交易失败!</translation>
</message>
<message>
<location line="+5"/>
<source>Error: The transaction was rejected. This might happen if some of the coins in your wallet were already spent, such as if you used a copy of wallet.dat and coins were spent in the copy but not marked as spent here.</source>
<translation>错误: 交易被拒绝. 如果您使用的是备份钱包,可能存在两个钱包不同步的情况,另一个钱包中的莱特币已经被使用,但本地的这个钱包尚没有记录。</translation>
</message>
</context>
<context>
<name>SendCoinsEntry</name>
<message>
<location filename="../forms/sendcoinsentry.ui" line="+14"/>
<source>Form</source>
<translation>表单</translation>
</message>
<message>
<location line="+15"/>
<source>A&mount:</source>
<translation>金额</translation>
</message>
<message>
<location line="+13"/>
<source>Pay &To:</source>
<translation>付款&给:</translation>
</message>
<message>
<location line="+34"/>
<source>The address to send the payment to (e.g. Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</source>
<translation>付款给这个地址 (例如 Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</translation>
</message>
<message>
<location line="+60"/>
<location filename="../sendcoinsentry.cpp" line="+26"/>
<source>Enter a label for this address to add it to your address book</source>
<translation>为这个地址输入一个标签,以便将它添加到您的地址簿</translation>
</message>
<message>
<location line="-78"/>
<source>&Label:</source>
<translation>&标签:</translation>
</message>
<message>
<location line="+28"/>
<source>Choose address from address book</source>
<translation>从地址簿选择地址</translation>
</message>
<message>
<location line="+10"/>
<source>Alt+A</source>
<translation>Alt+A</translation>
</message>
<message>
<location line="+7"/>
<source>Paste address from clipboard</source>
<translation>从剪贴板粘贴地址</translation>
</message>
<message>
<location line="+10"/>
<source>Alt+P</source>
<translation>Alt+P</translation>
</message>
<message>
<location line="+7"/>
<source>Remove this recipient</source>
<translation>移除此接收者</translation>
</message>
<message>
<location filename="../sendcoinsentry.cpp" line="+1"/>
<source>Enter a EAcoin address (e.g. Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</source>
<translation>请输入莱特币地址 (例如: Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</translation>
</message>
</context>
<context>
<name>SignVerifyMessageDialog</name>
<message>
<location filename="../forms/signverifymessagedialog.ui" line="+14"/>
<source>Signatures - Sign / Verify a Message</source>
<translation>签名 - 为消息签名/验证签名消息</translation>
</message>
<message>
<location line="+13"/>
<source>&Sign Message</source>
<translation>&签名消息</translation>
</message>
<message>
<location line="+6"/>
<source>You can sign messages with your addresses to prove you own them. Be careful not to sign anything vague, as phishing attacks may try to trick you into signing your identity over to them. Only sign fully-detailed statements you agree to.</source>
<translation>您可以用你的地址对消息进行签名,以证明您是该地址的所有人。注意不要对模棱两可的消息签名,以免遭受钓鱼式攻击。请确保消息内容准确的表达了您的真实意愿。</translation>
</message>
<message>
<location line="+18"/>
<source>The address to sign the message with (e.g. Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</source>
<translation>用于签名消息的地址(例如: Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</translation>
</message>
<message>
<location line="+10"/>
<location line="+213"/>
<source>Choose an address from the address book</source>
<translation>从地址簿选择地址</translation>
</message>
<message>
<location line="-203"/>
<location line="+213"/>
<source>Alt+A</source>
<translation>Alt+A</translation>
</message>
<message>
<location line="-203"/>
<source>Paste address from clipboard</source>
<translation>从剪贴板粘贴地址</translation>
</message>
<message>
<location line="+10"/>
<source>Alt+P</source>
<translation>Alt+P</translation>
</message>
<message>
<location line="+12"/>
<source>Enter the message you want to sign here</source>
<translation>请输入您要发送的签名消息</translation>
</message>
<message>
<location line="+7"/>
<source>Signature</source>
<translation>签名</translation>
</message>
<message>
<location line="+27"/>
<source>Copy the current signature to the system clipboard</source>
<translation>复制当前签名至剪切板</translation>
</message>
<message>
<location line="+21"/>
<source>Sign the message to prove you own this EAcoin address</source>
<translation>签名消息,证明这个地址属于您。</translation>
</message>
<message>
<location line="+3"/>
<source>Sign &Message</source>
<translation>消息签名</translation>
</message>
<message>
<location line="+14"/>
<source>Reset all sign message fields</source>
<translation>清空所有签名消息栏</translation>
</message>
<message>
<location line="+3"/>
<location line="+146"/>
<source>Clear &All</source>
<translation>清除 &所有</translation>
</message>
<message>
<location line="-87"/>
<source>&Verify Message</source>
<translation>&验证消息</translation>
</message>
<message>
<location line="+6"/>
<source>Enter the signing address, message (ensure you copy line breaks, spaces, tabs, etc. exactly) and signature below to verify the message. Be careful not to read more into the signature than what is in the signed message itself, to avoid being tricked by a man-in-the-middle attack.</source>
<translation>在下面输入签名地址,消息(请确保换行符、空格符、制表符等等一个不漏)和签名以验证消息。请确保签名信息准确,提防中间人攻击。</translation>
</message>
<message>
<location line="+21"/>
<source>The address the message was signed with (e.g. Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</source>
<translation>用于签名消息的地址(例如: Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</translation>
</message>
<message>
<location line="+40"/>
<source>Verify the message to ensure it was signed with the specified EAcoin address</source>
<translation>验证消息,确保消息是由指定的莱特币地址签名过的。</translation>
</message>
<message>
<location line="+3"/>
<source>Verify &Message</source>
<translation>验证消息签名</translation>
</message>
<message>
<location line="+14"/>
<source>Reset all verify message fields</source>
<translation>清空所有验证消息栏</translation>
</message>
<message>
<location filename="../signverifymessagedialog.cpp" line="+27"/>
<location line="+3"/>
<source>Enter a EAcoin address (e.g. Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</source>
<translation>请输入莱特币地址 (例如: Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</translation>
</message>
<message>
<location line="-2"/>
<source>Click "Sign Message" to generate signature</source>
<translation>单击“签名消息“产生签名。</translation>
</message>
<message>
<location line="+3"/>
<source>Enter EAcoin signature</source>
<translation>输入莱特币签名</translation>
</message>
<message>
<location line="+82"/>
<location line="+81"/>
<source>The entered address is invalid.</source>
<translation>输入的地址非法。</translation>
</message>
<message>
<location line="-81"/>
<location line="+8"/>
<location line="+73"/>
<location line="+8"/>
<source>Please check the address and try again.</source>
<translation>请检查地址后重试。</translation>
</message>
<message>
<location line="-81"/>
<location line="+81"/>
<source>The entered address does not refer to a key.</source>
<translation>输入的地址没有关联的公私钥对。</translation>
</message>
<message>
<location line="-73"/>
<source>Wallet unlock was cancelled.</source>
<translation>钱包解锁动作取消。</translation>
</message>
<message>
<location line="+8"/>
<source>Private key for the entered address is not available.</source>
<translation>找不到输入地址关联的私钥。</translation>
</message>
<message>
<location line="+12"/>
<source>Message signing failed.</source>
<translation>消息签名失败。</translation>
</message>
<message>
<location line="+5"/>
<source>Message signed.</source>
<translation>消息已签名。</translation>
</message>
<message>
<location line="+59"/>
<source>The signature could not be decoded.</source>
<translation>签名无法解码。</translation>
</message>
<message>
<location line="+0"/>
<location line="+13"/>
<source>Please check the signature and try again.</source>
<translation>请检查签名后重试。</translation>
</message>
<message>
<location line="+0"/>
<source>The signature did not match the message digest.</source>
<translation>签名与消息摘要不匹配。</translation>
</message>
<message>
<location line="+7"/>
<source>Message verification failed.</source>
<translation>消息验证失败。</translation>
</message>
<message>
<location line="+5"/>
<source>Message verified.</source>
<translation>消息验证成功。</translation>
</message>
</context>
<context>
<name>SplashScreen</name>
<message>
<location filename="../splashscreen.cpp" line="+22"/>
<source>The EAcoin developers</source>
<translation>EAcoin-qt 客户端开发团队</translation>
</message>
<message>
<location line="+1"/>
<source>[testnet]</source>
<translation>[testnet]</translation>
</message>
</context>
<context>
<name>TransactionDesc</name>
<message>
<location filename="../transactiondesc.cpp" line="+20"/>
<source>Open until %1</source>
<translation>至 %1 个数据块时开启</translation>
</message>
<message>
<location line="+6"/>
<source>%1/offline</source>
<translation>%1 / 离线</translation>
</message>
<message>
<location line="+2"/>
<source>%1/unconfirmed</source>
<translation>%1/未确认</translation>
</message>
<message>
<location line="+2"/>
<source>%1 confirmations</source>
<translation>%1 确认项</translation>
</message>
<message>
<location line="+18"/>
<source>Status</source>
<translation>状态</translation>
</message>
<message numerus="yes">
<location line="+7"/>
<source>, broadcast through %n node(s)</source>
<translation><numerusform>通过 %n 个节点广播</numerusform></translation>
</message>
<message>
<location line="+4"/>
<source>Date</source>
<translation>日期</translation>
</message>
<message>
<location line="+7"/>
<source>Source</source>
<translation>源</translation>
</message>
<message>
<location line="+0"/>
<source>Generated</source>
<translation>生成</translation>
</message>
<message>
<location line="+5"/>
<location line="+17"/>
<source>From</source>
<translation>来自</translation>
</message>
<message>
<location line="+1"/>
<location line="+22"/>
<location line="+58"/>
<source>To</source>
<translation>到</translation>
</message>
<message>
<location line="-77"/>
<location line="+2"/>
<source>own address</source>
<translation>自己的地址</translation>
</message>
<message>
<location line="-2"/>
<source>label</source>
<translation>标签</translation>
</message>
<message>
<location line="+37"/>
<location line="+12"/>
<location line="+45"/>
<location line="+17"/>
<location line="+30"/>
<source>Credit</source>
<translation>收入</translation>
</message>
<message numerus="yes">
<location line="-102"/>
<source>matures in %n more block(s)</source>
<translation><numerusform>将在 %n 个数据块后成熟</numerusform></translation>
</message>
<message>
<location line="+2"/>
<source>not accepted</source>
<translation>未被接受</translation>
</message>
<message>
<location line="+44"/>
<location line="+8"/>
<location line="+15"/>
<location line="+30"/>
<source>Debit</source>
<translation>支出</translation>
</message>
<message>
<location line="-39"/>
<source>Transaction fee</source>
<translation>交易费</translation>
</message>
<message>
<location line="+16"/>
<source>Net amount</source>
<translation>净额</translation>
</message>
<message>
<location line="+6"/>
<source>Message</source>
<translation>消息</translation>
</message>
<message>
<location line="+2"/>
<source>Comment</source>
<translation>备注</translation>
</message>
<message>
<location line="+2"/>
<source>Transaction ID</source>
<translation>交易ID</translation>
</message>
<message>
<location line="+3"/>
<source>Generated coins must mature 10 blocks before they can be spent. When you generated this block, it was broadcast to the network to be added to the block chain. If it fails to get into the chain, its state will change to "not accepted" and it won't be spendable. This may occasionally happen if another node generates a block within a few seconds of yours.</source>
<translation>新挖出的莱特币必须等确120个确认才能使用。您生产出的数据块,将被广播到全网并添加到数据块链。如果入链失败,状态将变为“未被接受”,意味着您的数据块竞争失败,挖出的莱特币将不能使用。当某个节点先于你几秒生产出新的数据块,这种情况会偶尔发生。</translation>
</message>
<message>
<location line="+7"/>
<source>Debug information</source>
<translation>调试信息</translation>
</message>
<message>
<location line="+8"/>
<source>Transaction</source>
<translation>交易</translation>
</message>
<message>
<location line="+3"/>
<source>Inputs</source>
<translation>输入</translation>
</message>
<message>
<location line="+23"/>
<source>Amount</source>
<translation>金额</translation>
</message>
<message>
<location line="+1"/>
<source>true</source>
<translation>正确</translation>
</message>
<message>
<location line="+0"/>
<source>false</source>
<translation>错误</translation>
</message>
<message>
<location line="-209"/>
<source>, has not been successfully broadcast yet</source>
<translation>, 未被成功广播</translation>
</message>
<message numerus="yes">
<location line="-35"/>
<source>Open for %n more block(s)</source>
<translation><numerusform>Open for %n more block</numerusform></translation>
</message>
<message>
<location line="+70"/>
<source>unknown</source>
<translation>未知</translation>
</message>
</context>
<context>
<name>TransactionDescDialog</name>
<message>
<location filename="../forms/transactiondescdialog.ui" line="+14"/>
<source>Transaction details</source>
<translation>交易明细</translation>
</message>
<message>
<location line="+6"/>
<source>This pane shows a detailed description of the transaction</source>
<translation>当前面板显示了交易的详细信息</translation>
</message>
</context>
<context>
<name>TransactionTableModel</name>
<message>
<location filename="../transactiontablemodel.cpp" line="+225"/>
<source>Date</source>
<translation>日期</translation>
</message>
<message>
<location line="+0"/>
<source>Type</source>
<translation>类型</translation>
</message>
<message>
<location line="+0"/>
<source>Address</source>
<translation>地址</translation>
</message>
<message>
<location line="+0"/>
<source>Amount</source>
<translation>数量</translation>
</message>
<message numerus="yes">
<location line="+57"/>
<source>Open for %n more block(s)</source>
<translation><numerusform>Open for %n more block</numerusform></translation>
</message>
<message>
<location line="+3"/>
<source>Open until %1</source>
<translation>至 %1 个数据块时开启</translation>
</message>
<message>
<location line="+3"/>
<source>Offline (%1 confirmations)</source>
<translation>离线 (%1 个确认项)</translation>
</message>
<message>
<location line="+3"/>
<source>Unconfirmed (%1 of %2 confirmations)</source>
<translation>未确认 (%1 / %2 条确认信息)</translation>
</message>
<message>
<location line="+3"/>
<source>Confirmed (%1 confirmations)</source>
<translation>已确认 (%1 条确认信息)</translation>
</message>
<message numerus="yes">
<location line="+8"/>
<source>Mined balance will be available when it matures in %n more block(s)</source>
<translation><numerusform>挖矿收入余额将在 %n 个数据块后可用</numerusform></translation>
</message>
<message>
<location line="+5"/>
<source>This block was not received by any other nodes and will probably not be accepted!</source>
<translation>此数据块未被其他节点接收,并可能不被接受!</translation>
</message>
<message>
<location line="+3"/>
<source>Generated but not accepted</source>
<translation>已生成但未被接受</translation>
</message>
<message>
<location line="+43"/>
<source>Received with</source>
<translation>接收于</translation>
</message>
<message>
<location line="+2"/>
<source>Received from</source>
<translation>收款来自</translation>
</message>
<message>
<location line="+3"/>
<source>Sent to</source>
<translation>发送到</translation>
</message>
<message>
<location line="+2"/>
<source>Payment to yourself</source>
<translation>付款给自己</translation>
</message>
<message>
<location line="+2"/>
<source>Mined</source>
<translation>挖矿所得</translation>
</message>
<message>
<location line="+38"/>
<source>(n/a)</source>
<translation>(n/a)</translation>
</message>
<message>
<location line="+199"/>
<source>Transaction status. Hover over this field to show number of confirmations.</source>
<translation>交易状态。 鼠标移到此区域上可显示确认消息项的数目。</translation>
</message>
<message>
<location line="+2"/>
<source>Date and time that the transaction was received.</source>
<translation>接收莱特币的时间</translation>
</message>
<message>
<location line="+2"/>
<source>Type of transaction.</source>
<translation>交易类别。</translation>
</message>
<message>
<location line="+2"/>
<source>Destination address of transaction.</source>
<translation>交易目的地址。</translation>
</message>
<message>
<location line="+2"/>
<source>Amount removed from or added to balance.</source>
<translation>从余额添加或移除的金额。</translation>
</message>
</context>
<context>
<name>TransactionView</name>
<message>
<location filename="../transactionview.cpp" line="+52"/>
<location line="+16"/>
<source>All</source>
<translation>全部</translation>
</message>
<message>
<location line="-15"/>
<source>Today</source>
<translation>今天</translation>
</message>
<message>
<location line="+1"/>
<source>This week</source>
<translation>本周</translation>
</message>
<message>
<location line="+1"/>
<source>This month</source>
<translation>本月</translation>
</message>
<message>
<location line="+1"/>
<source>Last month</source>
<translation>上月</translation>
</message>
<message>
<location line="+1"/>
<source>This year</source>
<translation>今年</translation>
</message>
<message>
<location line="+1"/>
<source>Range...</source>
<translation>范围...</translation>
</message>
<message>
<location line="+11"/>
<source>Received with</source>
<translation>接收于</translation>
</message>
<message>
<location line="+2"/>
<source>Sent to</source>
<translation>发送到</translation>
</message>
<message>
<location line="+2"/>
<source>To yourself</source>
<translation>到自己</translation>
</message>
<message>
<location line="+1"/>
<source>Mined</source>
<translation>挖矿所得</translation>
</message>
<message>
<location line="+1"/>
<source>Other</source>
<translation>其他</translation>
</message>
<message>
<location line="+7"/>
<source>Enter address or label to search</source>
<translation>输入地址或标签进行搜索</translation>
</message>
<message>
<location line="+7"/>
<source>Min amount</source>
<translation>最小金额</translation>
</message>
<message>
<location line="+34"/>
<source>Copy address</source>
<translation>复制地址</translation>
</message>
<message>
<location line="+1"/>
<source>Copy label</source>
<translation>复制标签</translation>
</message>
<message>
<location line="+1"/>
<source>Copy amount</source>
<translation>复制金额</translation>
</message>
<message>
<location line="+1"/>
<source>Copy transaction ID</source>
<translation>复制交易编号</translation>
</message>
<message>
<location line="+1"/>
<source>Edit label</source>
<translation>编辑标签</translation>
</message>
<message>
<location line="+1"/>
<source>Show transaction details</source>
<translation>显示交易详情</translation>
</message>
<message>
<location line="+139"/>
<source>Export Transaction Data</source>
<translation>导出交易数据</translation>
</message>
<message>
<location line="+1"/>
<source>Comma separated file (*.csv)</source>
<translation>逗号分隔文件(*.csv)</translation>
</message>
<message>
<location line="+8"/>
<source>Confirmed</source>
<translation>已确认</translation>
</message>
<message>
<location line="+1"/>
<source>Date</source>
<translation>日期</translation>
</message>
<message>
<location line="+1"/>
<source>Type</source>
<translation>类别</translation>
</message>
<message>
<location line="+1"/>
<source>Label</source>
<translation>标签</translation>
</message>
<message>
<location line="+1"/>
<source>Address</source>
<translation>地址</translation>
</message>
<message>
<location line="+1"/>
<source>Amount</source>
<translation>金额</translation>
</message>
<message>
<location line="+1"/>
<source>ID</source>
<translation>ID</translation>
</message>
<message>
<location line="+4"/>
<source>Error exporting</source>
<translation>导出错误</translation>
</message>
<message>
<location line="+0"/>
<source>Could not write to file %1.</source>
<translation>无法写入文件 %1。</translation>
</message>
<message>
<location line="+100"/>
<source>Range:</source>
<translation>范围:</translation>
</message>
<message>
<location line="+8"/>
<source>to</source>
<translation>到</translation>
</message>
</context>
<context>
<name>WalletModel</name>
<message>
<location filename="../walletmodel.cpp" line="+193"/>
<source>Send Coins</source>
<translation>发送莱特币</translation>
</message>
</context>
<context>
<name>WalletView</name>
<message>
<location filename="../walletview.cpp" line="+42"/>
<source>&Export</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Export the data in the current tab to a file</source>
<translation>导出当前数据到文件</translation>
</message>
<message>
<location line="+193"/>
<source>Backup Wallet</source>
<translation>备份钱包</translation>
</message>
<message>
<location line="+0"/>
<source>Wallet Data (*.dat)</source>
<translation>钱包文件(*.dat)</translation>
</message>
<message>
<location line="+3"/>
<source>Backup Failed</source>
<translation>备份失败</translation>
</message>
<message>
<location line="+0"/>
<source>There was an error trying to save the wallet data to the new location.</source>
<translation>备份钱包到其它文件夹失败.</translation>
</message>
<message>
<location line="+4"/>
<source>Backup Successful</source>
<translation>备份成功</translation>
</message>
<message>
<location line="+0"/>
<source>The wallet data was successfully saved to the new location.</source>
<translation>钱包数据成功存储到新位置</translation>
</message>
</context>
<context>
<name>bitcoin-core</name>
<message>
<location filename="../bitcoinstrings.cpp" line="+94"/>
<source>EAcoin version</source>
<translation>莱特币版本</translation>
</message>
<message>
<location line="+102"/>
<source>Usage:</source>
<translation>使用:</translation>
</message>
<message>
<location line="-29"/>
<source>Send command to -server or eacoind</source>
<translation>发送命令到服务器或者 eacoind
</translation>
</message>
<message>
<location line="-23"/>
<source>List commands</source>
<translation>列出命令
</translation>
</message>
<message>
<location line="-12"/>
<source>Get help for a command</source>
<translation>获得某条命令的帮助
</translation>
</message>
<message>
<location line="+24"/>
<source>Options:</source>
<translation>选项:
</translation>
</message>
<message>
<location line="+24"/>
<source>Specify configuration file (default: eacoin.conf)</source>
<translation>指定配置文件 (默认为 eacoin.conf)
</translation>
</message>
<message>
<location line="+3"/>
<source>Specify pid file (default: eacoind.pid)</source>
<translation>指定 pid 文件 (默认为 eacoind.pid)
</translation>
</message>
<message>
<location line="-1"/>
<source>Specify data directory</source>
<translation>指定数据目录
</translation>
</message>
<message>
<location line="-9"/>
<source>Set database cache size in megabytes (default: 25)</source>
<translation>设置数据库缓冲区大小 (缺省: 25MB)</translation>
</message>
<message>
<location line="-28"/>
<source>Listen for connections on <port> (default: 5389 or testnet: 15389)</source>
<translation>监听端口连接 <port> (缺省: 5389 or testnet: 15389)</translation>
</message>
<message>
<location line="+5"/>
<source>Maintain at most <n> connections to peers (default: 125)</source>
<translation>最大连接数 <n> (缺省: 125)</translation>
</message>
<message>
<location line="-48"/>
<source>Connect to a node to retrieve peer addresses, and disconnect</source>
<translation>连接一个节点并获取对端地址, 然后断开连接</translation>
</message>
<message>
<location line="+82"/>
<source>Specify your own public address</source>
<translation>指定您的公共地址</translation>
</message>
<message>
<location line="+3"/>
<source>Threshold for disconnecting misbehaving peers (default: 100)</source>
<translation>Threshold for disconnecting misbehaving peers (缺省: 100)</translation>
</message>
<message>
<location line="-134"/>
<source>Number of seconds to keep misbehaving peers from reconnecting (default: 86400)</source>
<translation>Number of seconds to keep misbehaving peers from reconnecting (缺省: 86400)</translation>
</message>
<message>
<location line="-29"/>
<source>An error occurred while setting up the RPC port %u for listening on IPv4: %s</source>
<translation>设置RPC监听端口%u时发生错误, IPv4:%s</translation>
</message>
<message>
<location line="+27"/>
<source>Listen for JSON-RPC connections on <port> (default: 5390 or testnet: 15390)</source>
<translation>JSON-RPC连接监听端口<port> (缺省:5390 testnet:15390)</translation>
</message>
<message>
<location line="+37"/>
<source>Accept command line and JSON-RPC commands</source>
<translation>接受命令行和 JSON-RPC 命令
</translation>
</message>
<message>
<location line="+76"/>
<source>Run in the background as a daemon and accept commands</source>
<translation>在后台运行并接受命令
</translation>
</message>
<message>
<location line="+37"/>
<source>Use the test network</source>
<translation>使用测试网络
</translation>
</message>
<message>
<location line="-112"/>
<source>Accept connections from outside (default: 1 if no -proxy or -connect)</source>
<translation>接受来自外部的连接 (缺省: 如果不带 -proxy or -connect 参数设置为1)</translation>
</message>
<message>
<location line="-80"/>
<source>%s, you must set a rpcpassword in the configuration file:
%s
It is recommended you use the following random password:
rpcuser=eacoinrpc
rpcpassword=%s
(you do not need to remember this password)
The username and password MUST NOT be the same.
If the file does not exist, create it with owner-readable-only file permissions.
It is also recommended to set alertnotify so you are notified of problems;
for example: alertnotify=echo %%s | mail -s "EAcoin Alert" [email protected]
</source>
<translation>%s, 您必须在配置文件设置rpcpassword:
%s
建议您使用下面的随机密码:
rpcuser=eacoinrpc
rpcpassword=%s
(您无需记住此密码)
用户名和密码 必! 须! 不一样。
如果配置文件不存在,请自行建立一个只有所有者拥有只读权限的文件。
推荐您开启提示通知以便收到错误通知,
像这样: alertnotify=echo %%s | mail -s "EAcoin Alert" [email protected]
</translation>
</message>
<message>
<location line="+17"/>
<source>An error occurred while setting up the RPC port %u for listening on IPv6, falling back to IPv4: %s</source>
<translation>在IPv6模式下设置RPC监听端口 %u 失败,返回到IPv4模式: %s</translation>
</message>
<message>
<location line="+3"/>
<source>Bind to given address and always listen on it. Use [host]:port notation for IPv6</source>
<translation>绑定指定的IP地址开始监听。IPv6地址请使用[host]:port 格式</translation>
</message>
<message>
<location line="+3"/>
<source>Cannot obtain a lock on data directory %s. EAcoin is probably already running.</source>
<translation>无法给数据目录 %s上锁。本软件可能已经在运行。</translation>
</message>
<message>
<location line="+3"/>
<source>Error: The transaction was rejected! This might happen if some of the coins in your wallet were already spent, such as if you used a copy of wallet.dat and coins were spent in the copy but not marked as spent here.</source>
<translation>错误:该交易被拒绝!发生这种错误的原因可能是:钱包中的莱特币已经被用掉,有可能您复制了wallet.dat钱包文件,然后用复制的钱包文件支付了莱特币,但是这个钱包文件中没有记录。</translation>
</message>
<message>
<location line="+4"/>
<source>Error: This transaction requires a transaction fee of at least %s because of its amount, complexity, or use of recently received funds!</source>
<translation>错误:因为该交易的数量、复杂度或者动用了刚收到不久的资金,您需要支付不少于%s的交易费用。</translation>
</message>
<message>
<location line="+3"/>
<source>Execute command when a relevant alert is received (%s in cmd is replaced by message)</source>
<translation>当收到相关通知时执行命令(命令行中的 %s 的替换为消息)</translation>
</message>
<message>
<location line="+3"/>
<source>Execute command when a wallet transaction changes (%s in cmd is replaced by TxID)</source>
<translation>当最佳区块变化时执行命令 (命令行中的 %s 会被替换成区块哈希值)</translation>
</message>
<message>
<location line="+11"/>
<source>Set maximum size of high-priority/low-fee transactions in bytes (default: 27000)</source>
<translation>Set maximum size of high-priority/low-fee transactions in bytes (default: 27000)</translation>
</message>
<message>
<location line="+6"/>
<source>This is a pre-release test build - use at your own risk - do not use for mining or merchant applications</source>
<translation>这是测试用的预发布版本 - 请谨慎使用 - 不要用来挖矿,或者在正式商用环境下使用</translation>
</message>
<message>
<location line="+5"/>
<source>Warning: -paytxfee is set very high! This is the transaction fee you will pay if you send a transaction.</source>
<translation>警告:-paytxfee 交易费设置得太高了!每笔交易都将支付交易费。</translation>
</message>
<message>
<location line="+3"/>
<source>Warning: Displayed transactions may not be correct! You may need to upgrade, or other nodes may need to upgrade.</source>
<translation>警告:显示的交易可能不正确!您需要升级客户端软件,或者网络上的其他节点需要升级。</translation>
</message>
<message>
<location line="+3"/>
<source>Warning: Please check that your computer's date and time are correct! If your clock is wrong EAcoin will not work properly.</source>
<translation>警告:请检查电脑的日期时间设置是否正确!时间错误可能会导致莱特币客户端运行异常。</translation>
</message>
<message>
<location line="+3"/>
<source>Warning: error reading wallet.dat! All keys read correctly, but transaction data or address book entries might be missing or incorrect.</source>
<translation>警告:钱包文件wallet.dat读取失败!最重要的公钥、私钥数据都没有问题,但是交易记录或地址簿数据不正确,或者存在数据丢失。</translation>
</message>
<message>
<location line="+3"/>
<source>Warning: wallet.dat corrupt, data salvaged! Original wallet.dat saved as wallet.{timestamp}.bak in %s; if your balance or transactions are incorrect you should restore from a backup.</source>
<translation>警告:钱包文件wallet.dat损坏! 原始的钱包文件已经备份到%s目录下并重命名为{timestamp}.bak 。如果您的账户余额或者交易记录不正确,请使用您的钱包备份文件恢复。</translation>
</message>
<message>
<location line="+14"/>
<source>Attempt to recover private keys from a corrupt wallet.dat</source>
<translation>尝试从损坏的钱包文件wallet.dat中恢复私钥</translation>
</message>
<message>
<location line="+2"/>
<source>Block creation options:</source>
<translation>数据块创建选项:</translation>
</message>
<message>
<location line="+5"/>
<source>Connect only to the specified node(s)</source>
<translation>仅连接到指定节点</translation>
</message>
<message>
<location line="+3"/>
<source>Corrupted block database detected</source>
<translation>检测发现数据块数据库损坏。请使用 -reindex参数重启客户端。</translation>
</message>
<message>
<location line="+1"/>
<source>Discover own IP address (default: 1 when listening and no -externalip)</source>
<translation>发现自己的IP地址(缺省:不带 -externalip 参数监听时设置为1)</translation>
</message>
<message>
<location line="+1"/>
<source>Do you want to rebuild the block database now?</source>
<translation>你想现在就重建块数据库吗?</translation>
</message>
<message>
<location line="+2"/>
<source>Error initializing block database</source>
<translation>初始化数据块数据库出错</translation>
</message>
<message>
<location line="+1"/>
<source>Error initializing wallet database environment %s!</source>
<translation>Error initializing wallet database environment %s!</translation>
</message>
<message>
<location line="+1"/>
<source>Error loading block database</source>
<translation>导入数据块数据库出错</translation>
</message>
<message>
<location line="+4"/>
<source>Error opening block database</source>
<translation>导入数据块数据库出错</translation>
</message>
<message>
<location line="+2"/>
<source>Error: Disk space is low!</source>
<translation>错误:磁盘剩余空间低!</translation>
</message>
<message>
<location line="+1"/>
<source>Error: Wallet locked, unable to create transaction!</source>
<translation>错误:钱包被锁定,无法创建交易!</translation>
</message>
<message>
<location line="+1"/>
<source>Error: system error: </source>
<translation>错误:系统出错。</translation>
</message>
<message>
<location line="+1"/>
<source>Failed to listen on any port. Use -listen=0 if you want this.</source>
<translation>监听端口失败。请使用 -listen=0 参数。</translation>
</message>
<message>
<location line="+1"/>
<source>Failed to read block info</source>
<translation>无法读取数据块信息</translation>
</message>
<message>
<location line="+1"/>
<source>Failed to read block</source>
<translation>读取数据块失败</translation>
</message>
<message>
<location line="+1"/>
<source>Failed to sync block index</source>
<translation>无法同步数据块索引</translation>
</message>
<message>
<location line="+1"/>
<source>Failed to write block index</source>
<translation>无法写入数据块索引</translation>
</message>
<message>
<location line="+1"/>
<source>Failed to write block info</source>
<translation>无法写入数据块信息</translation>
</message>
<message>
<location line="+1"/>
<source>Failed to write block</source>
<translation>无法写数据块</translation>
</message>
<message>
<location line="+1"/>
<source>Failed to write file info</source>
<translation>无法写入文件信息</translation>
</message>
<message>
<location line="+1"/>
<source>Failed to write to coin database</source>
<translation>无法写入coin数据库</translation>
</message>
<message>
<location line="+1"/>
<source>Failed to write transaction index</source>
<translation>无法写入交易索引</translation>
</message>
<message>
<location line="+1"/>
<source>Failed to write undo data</source>
<translation>无法写入回滚信息</translation>
</message>
<message>
<location line="+2"/>
<source>Find peers using DNS lookup (default: 1 unless -connect)</source>
<translation>通过DNS查找节点(缺省:1 除非使用 -connect 选项)</translation>
</message>
<message>
<location line="+1"/>
<source>Generate coins (default: 0)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>How many blocks to check at startup (default: 288, 0 = all)</source>
<translation>启动时检测多少个数据块(缺省:288,0=所有)</translation>
</message>
<message>
<location line="+1"/>
<source>How thorough the block verification is (0-4, default: 3)</source>
<translation>How thorough the block verification is (0-4, default: 3)</translation>
</message>
<message>
<location line="+19"/>
<source>Not enough file descriptors available.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+8"/>
<source>Rebuild block chain index from current blk000??.dat files</source>
<translation>重新为当前的blk000??.dat文件建立索引</translation>
</message>
<message>
<location line="+16"/>
<source>Set the number of threads to service RPC calls (default: 4)</source>
<translation>设置使用调用服务 RPC 的线程数量(默认:4)</translation>
</message>
<message>
<location line="+26"/>
<source>Verifying blocks...</source>
<translation>正在验证数据库的完整性...</translation>
</message>
<message>
<location line="+1"/>
<source>Verifying wallet...</source>
<translation>正在检测钱包的完整性...</translation>
</message>
<message>
<location line="-69"/>
<source>Imports blocks from external blk000??.dat file</source>
<translation>从blk000??.dat文件导入数据块</translation>
</message>
<message>
<location line="-76"/>
<source>Set the number of script verification threads (up to 16, 0 = auto, <0 = leave that many cores free, default: 0)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+77"/>
<source>Information</source>
<translation>信息</translation>
</message>
<message>
<location line="+3"/>
<source>Invalid -tor address: '%s'</source>
<translation>非法的 -tor 地址:'%s' </translation>
</message>
<message>
<location line="+1"/>
<source>Invalid amount for -minrelaytxfee=<amount>: '%s'</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Invalid amount for -mintxfee=<amount>: '%s'</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+8"/>
<source>Maintain a full transaction index (default: 0)</source>
<translation>维护一份完整的交易索引(缺省:0)</translation>
</message>
<message>
<location line="+2"/>
<source>Maximum per-connection receive buffer, <n>*1000 bytes (default: 5000)</source>
<translation>每个连接的最大接收缓存,<n>*1000 字节(缺省:5000)</translation>
</message>
<message>
<location line="+1"/>
<source>Maximum per-connection send buffer, <n>*1000 bytes (default: 1000)</source>
<translation>每个连接的最大发送缓存,<n>*1000 字节(缺省:1000)</translation>
</message>
<message>
<location line="+2"/>
<source>Only accept block chain matching built-in checkpoints (default: 1)</source>
<translation>仅接受符合客户端检查点设置的数据块文件</translation>
</message>
<message>
<location line="+1"/>
<source>Only connect to nodes in network <net> (IPv4, IPv6 or Tor)</source>
<translation>仅连接至指定网络的节点<net>(IPv4, IPv6 或者 Tor)</translation>
</message>
<message>
<location line="+2"/>
<source>Output extra debugging information. Implies all other -debug* options</source>
<translation>输出额外的调试信息。打开所有 -debug* 开关</translation>
</message>
<message>
<location line="+1"/>
<source>Output extra network debugging information</source>
<translation>输出额外的网络调试信息</translation>
</message>
<message>
<location line="+2"/>
<source>Prepend debug output with timestamp</source>
<translation>为调试输出信息添加时间戳</translation>
</message>
<message>
<location line="+5"/>
<source>SSL options: (see the EAcoin Wiki for SSL setup instructions)</source>
<translation>SSL选项:(参见EAcoin Wiki关于SSL设置栏目)</translation>
</message>
<message>
<location line="+1"/>
<source>Select the version of socks proxy to use (4-5, default: 5)</source>
<translation>请选择Socks代理服务器版本 (4 或 5, 缺省: 5)</translation>
</message>
<message>
<location line="+3"/>
<source>Send trace/debug info to console instead of debug.log file</source>
<translation>跟踪/调试信息输出到控制台,不输出到debug.log文件</translation>
</message>
<message>
<location line="+1"/>
<source>Send trace/debug info to debugger</source>
<translation>跟踪/调试信息输出到 调试器debugger</translation>
</message>
<message>
<location line="+5"/>
<source>Set maximum block size in bytes (default: 250000)</source>
<translation>设置最大数据块大小(缺省:250000)</translation>
</message>
<message>
<location line="+1"/>
<source>Set minimum block size in bytes (default: 0)</source>
<translation>设置最小数据块大小(缺省:0)</translation>
</message>
<message>
<location line="+2"/>
<source>Shrink debug.log file on client startup (default: 1 when no -debug)</source>
<translation>客户端启动时压缩debug.log文件(缺省:no-debug模式时为1)</translation>
</message>
<message>
<location line="+1"/>
<source>Signing transaction failed</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Specify connection timeout in milliseconds (default: 5000)</source>
<translation>设置连接超时时间(缺省:5000毫秒)</translation>
</message>
<message>
<location line="+4"/>
<source>System error: </source>
<translation>系统错误:</translation>
</message>
<message>
<location line="+4"/>
<source>Transaction amount too small</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Transaction amounts must be positive</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Transaction too large</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Use UPnP to map the listening port (default: 0)</source>
<translation>使用UPnp映射监听端口(缺省: 0)</translation>
</message>
<message>
<location line="+1"/>
<source>Use UPnP to map the listening port (default: 1 when listening)</source>
<translation>使用UPnp映射监听端口(缺省: 监听状态设为1)</translation>
</message>
<message>
<location line="+1"/>
<source>Use proxy to reach tor hidden services (default: same as -proxy)</source>
<translation>使用代理服务器访问隐藏服务(缺省:同 -proxy)</translation>
</message>
<message>
<location line="+2"/>
<source>Username for JSON-RPC connections</source>
<translation>JSON-RPC连接用户名
</translation>
</message>
<message>
<location line="+4"/>
<source>Warning</source>
<translation>警告</translation>
</message>
<message>
<location line="+1"/>
<source>Warning: This version is obsolete, upgrade required!</source>
<translation>警告:该软件版本已过时,请升级!</translation>
</message>
<message>
<location line="+1"/>
<source>You need to rebuild the databases using -reindex to change -txindex</source>
<translation>You need to rebuild the databases using -reindex to change -txindex</translation>
</message>
<message>
<location line="+1"/>
<source>wallet.dat corrupt, salvage failed</source>
<translation>钱包文件wallet.dat损坏,抢救备份失败</translation>
</message>
<message>
<location line="-50"/>
<source>Password for JSON-RPC connections</source>
<translation>JSON-RPC连接密码
</translation>
</message>
<message>
<location line="-67"/>
<source>Allow JSON-RPC connections from specified IP address</source>
<translation>允许从指定IP接受到的JSON-RPC连接
</translation>
</message>
<message>
<location line="+76"/>
<source>Send commands to node running on <ip> (default: 127.0.0.1)</source>
<translation>向IP地址为 <ip> 的节点发送指令 (缺省: 127.0.0.1)
</translation>
</message>
<message>
<location line="-120"/>
<source>Execute command when the best block changes (%s in cmd is replaced by block hash)</source>
<translation>当最佳数据块变化时执行命令 (命令行中的 %s 会被替换成数据块哈希值)</translation>
</message>
<message>
<location line="+147"/>
<source>Upgrade wallet to latest format</source>
<translation>将钱包升级到最新的格式</translation>
</message>
<message>
<location line="-21"/>
<source>Set key pool size to <n> (default: 100)</source>
<translation>设置密钥池大小为 <n> (缺省: 100)
</translation>
</message>
<message>
<location line="-12"/>
<source>Rescan the block chain for missing wallet transactions</source>
<translation>重新扫描数据链以查找遗漏的交易
</translation>
</message>
<message>
<location line="+35"/>
<source>Use OpenSSL (https) for JSON-RPC connections</source>
<translation>为 JSON-RPC 连接使用 OpenSSL (https)连接</translation>
</message>
<message>
<location line="-26"/>
<source>Server certificate file (default: server.cert)</source>
<translation>服务器证书 (默认为 server.cert)
</translation>
</message>
<message>
<location line="+1"/>
<source>Server private key (default: server.pem)</source>
<translation>服务器私钥 (默认为 server.pem)
</translation>
</message>
<message>
<location line="-151"/>
<source>Acceptable ciphers (default: TLSv1+HIGH:!SSLv2:!aNULL:!eNULL:!AH:!3DES:@STRENGTH)</source>
<translation>可接受的加密器 (默认为 TLSv1+HIGH:!SSLv2:!aNULL:!eNULL:!AH:!3DES:@STRENGTH)
</translation>
</message>
<message>
<location line="+165"/>
<source>This help message</source>
<translation>该帮助信息
</translation>
</message>
<message>
<location line="+6"/>
<source>Unable to bind to %s on this computer (bind returned error %d, %s)</source>
<translation>无法绑定本机端口 %s (返回错误消息 %d, %s)</translation>
</message>
<message>
<location line="-91"/>
<source>Connect through socks proxy</source>
<translation>通过 socks 代理连接</translation>
</message>
<message>
<location line="-10"/>
<source>Allow DNS lookups for -addnode, -seednode and -connect</source>
<translation>使用 -addnode, -seednode 和 -connect选项时允许DNS查找</translation>
</message>
<message>
<location line="+55"/>
<source>Loading addresses...</source>
<translation>正在加载地址...</translation>
</message>
<message>
<location line="-35"/>
<source>Error loading wallet.dat: Wallet corrupted</source>
<translation>wallet.dat钱包文件加载错误:钱包损坏</translation>
</message>
<message>
<location line="+1"/>
<source>Error loading wallet.dat: Wallet requires newer version of EAcoin</source>
<translation>wallet.dat钱包文件加载错误:请升级到最新EAcoin客户端</translation>
</message>
<message>
<location line="+93"/>
<source>Wallet needed to be rewritten: restart EAcoin to complete</source>
<translation>钱包文件需要重写:请退出并重新启动EAcoin客户端</translation>
</message>
<message>
<location line="-95"/>
<source>Error loading wallet.dat</source>
<translation>wallet.dat钱包文件加载错误</translation>
</message>
<message>
<location line="+28"/>
<source>Invalid -proxy address: '%s'</source>
<translation>非法的代理地址: '%s'</translation>
</message>
<message>
<location line="+56"/>
<source>Unknown network specified in -onlynet: '%s'</source>
<translation>被指定的是未知网络 -onlynet: '%s'</translation>
</message>
<message>
<location line="-1"/>
<source>Unknown -socks proxy version requested: %i</source>
<translation>被指定的是未知socks代理版本: %i</translation>
</message>
<message>
<location line="-96"/>
<source>Cannot resolve -bind address: '%s'</source>
<translation>无法解析 -bind 端口地址: '%s'</translation>
</message>
<message>
<location line="+1"/>
<source>Cannot resolve -externalip address: '%s'</source>
<translation>无法解析 -externalip 地址: '%s'</translation>
</message>
<message>
<location line="+44"/>
<source>Invalid amount for -paytxfee=<amount>: '%s'</source>
<translation>非法金额 -paytxfee=<amount>: '%s'</translation>
</message>
<message>
<location line="+1"/>
<source>Invalid amount</source>
<translation>金额不对</translation>
</message>
<message>
<location line="-6"/>
<source>Insufficient funds</source>
<translation>金额不足</translation>
</message>
<message>
<location line="+10"/>
<source>Loading block index...</source>
<translation>加载数据块索引...</translation>
</message>
<message>
<location line="-57"/>
<source>Add a node to connect to and attempt to keep the connection open</source>
<translation>添加节点并与其保持连接</translation>
</message>
<message>
<location line="-25"/>
<source>Unable to bind to %s on this computer. EAcoin is probably already running.</source>
<translation>无法在本机绑定 %s 端口 . 莱特币客户端软件可能已经在运行.</translation>
</message>
<message>
<location line="+64"/>
<source>Fee per KB to add to transactions you send</source>
<translation>每发送1KB交易所需的费用</translation>
</message>
<message>
<location line="+19"/>
<source>Loading wallet...</source>
<translation>正在加载钱包...</translation>
</message>
<message>
<location line="-52"/>
<source>Cannot downgrade wallet</source>
<translation>无法降级钱包格式</translation>
</message>
<message>
<location line="+3"/>
<source>Cannot write default address</source>
<translation>无法写入缺省地址</translation>
</message>
<message>
<location line="+64"/>
<source>Rescanning...</source>
<translation>正在重新扫描...</translation>
</message>
<message>
<location line="-57"/>
<source>Done loading</source>
<translation>加载完成</translation>
</message>
<message>
<location line="+82"/>
<source>To use the %s option</source>
<translation>使用 %s 选项</translation>
</message>
<message>
<location line="-74"/>
<source>Error</source>
<translation>错误</translation>
</message>
<message>
<location line="-31"/>
<source>You must set rpcpassword=<password> in the configuration file:
%s
If the file does not exist, create it with owner-readable-only file permissions.</source>
<translation>您必须在配置文件中加入选项 rpcpassword :
%s
如果配置文件不存在,请新建,并将文件权限设置为仅允许文件所有者读取.</translation>
</message>
</context>
</TS><|fim▁end|> | <translation>钱包已被<b>加密</b>,当前为<b>解锁</b>状态</translation>
</message>
<message> |
<|file_name|>island.js<|end_file_name|><|fim▁begin|>/**
* echarts组件:孤岛数据<|fim▁hole|> */
define(function (require) {
/**
* 构造函数
* @param {Object} messageCenter echart消息中心
* @param {ZRender} zr zrender实例
* @param {Object} option 图表选项
*/
function Island(messageCenter, zr) {
// 基类装饰
var ComponentBase = require('../component/base');
ComponentBase.call(this, zr);
// 可计算特性装饰
var CalculableBase = require('./calculableBase');
CalculableBase.call(this, zr);
var ecConfig = require('../config');
var ecData = require('../util/ecData');
var zrEvent = require('zrender/tool/event');
var self = this;
self.type = ecConfig.CHART_TYPE_ISLAND;
var option;
var _zlevelBase = self.getZlevelBase();
var _nameConnector;
var _valueConnector;
var _zrHeight = zr.getHeight();
var _zrWidth = zr.getWidth();
/**
* 孤岛合并
*
* @param {string} tarShapeIndex 目标索引
* @param {Object} srcShape 源目标,合入目标后删除
*/
function _combine(tarShape, srcShape) {
var zrColor = require('zrender/tool/color');
var value = ecData.get(tarShape, 'value')
+ ecData.get(srcShape, 'value');
var name = ecData.get(tarShape, 'name')
+ _nameConnector
+ ecData.get(srcShape, 'name');
tarShape.style.text = name + _valueConnector + value;
ecData.set(tarShape, 'value', value);
ecData.set(tarShape, 'name', name);
tarShape.style.r = option.island.r;
tarShape.style.color = zrColor.mix(
tarShape.style.color,
srcShape.style.color
);
}
/**
* 刷新
*/
function refresh(newOption) {
if (newOption) {
newOption.island = self.reformOption(newOption.island);
option = newOption;
_nameConnector = option.nameConnector;
_valueConnector = option.valueConnector;
}
}
function render(newOption) {
refresh(newOption);
for (var i = 0, l = self.shapeList.length; i < l; i++) {
zr.addShape(self.shapeList[i]);
}
}
function getOption() {
return option;
}
function resize() {
var newWidth = zr.getWidth();
var newHieght = zr.getHeight();
var xScale = newWidth / (_zrWidth || newWidth);
var yScale = newHieght / (_zrHeight || newHieght);
if (xScale == 1 && yScale == 1) {
return;
}
_zrWidth = newWidth;
_zrHeight = newHieght;
for (var i = 0, l = self.shapeList.length; i < l; i++) {
zr.modShape(
self.shapeList[i].id,
{
style: {
x: Math.round(self.shapeList[i].style.x * xScale),
y: Math.round(self.shapeList[i].style.y * yScale)
}
}
);
}
}
function add(shape) {
var name = ecData.get(shape, 'name');
var value = ecData.get(shape, 'value');
var seriesName = typeof ecData.get(shape, 'series') != 'undefined'
? ecData.get(shape, 'series').name
: '';
var font = self.getFont(option.island.textStyle);
var islandShape = {
shape : 'circle',
id : zr.newShapeId(self.type),
zlevel : _zlevelBase,
style : {
x : shape.style.x,
y : shape.style.y,
r : option.island.r,
color : shape.style.color || shape.style.strokeColor,
text : name + _valueConnector + value,
textFont : font
},
draggable : true,
hoverable : true,
onmousewheel : self.shapeHandler.onmousewheel,
_type : 'island'
};
if (islandShape.style.color == '#fff') {
islandShape.style.color = shape.style.strokeColor;
}
self.setCalculable(islandShape);
ecData.pack(
islandShape,
{name:seriesName}, -1,
value, -1,
name
);
self.shapeList.push(islandShape);
zr.addShape(islandShape);
}
function del(shape) {
zr.delShape(shape.id);
var newShapeList = [];
for (var i = 0, l = self.shapeList.length; i < l; i++) {
if (self.shapeList[i].id != shape.id) {
newShapeList.push(self.shapeList[i]);
}
}
self.shapeList = newShapeList;
}
/**
* 数据项被拖拽进来, 重载基类方法
*/
function ondrop(param, status) {
if (!self.isDrop || !param.target) {
// 没有在当前实例上发生拖拽行为则直接返回
return;
}
// 拖拽产生孤岛数据合并
var target = param.target; // 拖拽安放目标
var dragged = param.dragged; // 当前被拖拽的图形对象
_combine(target, dragged);
zr.modShape(target.id, target);
status.dragIn = true;
// 处理完拖拽事件后复位
self.isDrop = false;
return;
}
/**
* 数据项被拖拽出去, 重载基类方法
*/
function ondragend(param, status) {
var target = param.target; // 拖拽安放目标
if (!self.isDragend) {
// 拖拽的不是孤岛数据,如果没有图表接受孤岛数据,需要新增孤岛数据
if (!status.dragIn) {
target.style.x = zrEvent.getX(param.event);
target.style.y = zrEvent.getY(param.event);
add(target);
status.needRefresh = true;
}
}
else {
// 拖拽的是孤岛数据,如果有图表接受了孤岛数据,需要删除孤岛数据
if (status.dragIn) {
del(target);
status.needRefresh = true;
}
}
// 处理完拖拽事件后复位
self.isDragend = false;
return;
}
/**
* 滚轮改变孤岛数据值
*/
self.shapeHandler.onmousewheel = function(param) {
var shape = param.target;
var event = param.event;
var delta = zrEvent.getDelta(event);
delta = delta > 0 ? (-1) : 1;
shape.style.r -= delta;
shape.style.r = shape.style.r < 5 ? 5 : shape.style.r;
var value = ecData.get(shape, 'value');
var dvalue = value * option.island.calculateStep;
if (dvalue > 1) {
value = Math.round(value - dvalue * delta);
}
else {
value = (value - dvalue * delta).toFixed(2) - 0;
}
var name = ecData.get(shape, 'name');
shape.style.text = name + ':' + value;
ecData.set(shape, 'value', value);
ecData.set(shape, 'name', name);
zr.modShape(shape.id, shape);
zr.refresh();
zrEvent.stop(event);
};
self.refresh = refresh;
self.render = render;
self.resize = resize;
self.getOption = getOption;
self.add = add;
self.del = del;
self.ondrop = ondrop;
self.ondragend = ondragend;
}
// 图表注册
require('../chart').define('island', Island);
return Island;
});<|fim▁end|> | *
* @desc echarts基于Canvas,纯Javascript图表库,提供直观,生动,可交互,可个性化定制的数据统计图表。
* @author Kener (@Kener-林峰, [email protected])
* |
<|file_name|>transform_old_config.py<|end_file_name|><|fim▁begin|># Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import sys
import yaml_util
from run import check_run_quick
class Processor(object):
def __init__(self, config, environ_path, yml_path, aws_path):
with open(environ_path, 'r') as f:
self.__environ_content = f.read()
if not self.__environ_content.endswith('\n'):
self.__environ_content += '\n'
with open(yml_path, 'r') as f:
self.__output = f.read()
self.__bindings = yaml_util.YamlBindings()
self.__bindings.import_string(config)
self.__write_yml_path = yml_path
self.__write_aws_path = aws_path
self.__write_environ_path = environ_path
self.__environ_keys = set()
def update_environ(self, key, name):
value = self.lookup(key)
if value is None:
return
self.__environ_keys.add(key)
assignment = '{name}={value}'.format(name=name, value=value)
match = re.search('^{name}=.*'.format(name=name),
self.__environ_content,
re.MULTILINE)
if match:
self.__environ_content = ''.join([
self.__environ_content[0:match.start(0)],
assignment,
self.__environ_content[match.end(0):]
])
else:
self.__environ_content += assignment + '\n'
def update_in_place(self, key):
self.__output = self.__bindings.transform_yaml_source(self.__output, key)
def lookup(self, key):
try:
return self.__bindings.get(key)
except KeyError:
return None
def update_remaining_keys(self):
stack = [('', self.__bindings.map)]
while stack:
prefix, root = stack.pop()<|fim▁hole|> if isinstance(value, dict):
stack.append((key + '.', value))
elif not key in self.__environ_keys:
try:
self.update_in_place(key)
except ValueError:
pass
def process(self):
self.update_environ('providers.aws.enabled', 'SPINNAKER_AWS_ENABLED')
self.update_environ('providers.aws.defaultRegion',
'SPINNAKER_AWS_DEFAULT_REGION')
self.update_environ('providers.google.enabled',
'SPINNAKER_GOOGLE_ENABLED')
self.update_environ('providers.google.primaryCredentials.project',
'SPINNAKER_GOOGLE_PROJECT_ID')
self.update_environ('providers.google.defaultRegion',
'SPINNAKER_GOOGLE_DEFAULT_REGION')
self.update_environ('providers.google.defaultZone',
'SPINNAKER_GOOGLE_DEFAULT_ZONE')
self.update_in_place('providers.aws.primaryCredentials.name')
aws_name = self.lookup('providers.aws.primaryCredentials.name')
aws_key = self.lookup('providers.aws.primaryCredentials.access_key_id')
aws_secret = self.lookup('providers.aws.primaryCredentials.secret_key')
if aws_key and aws_secret:
with open(self.__write_aws_path, 'w') as f:
f.write("""
[default]
aws_secret_access_key = {secret}
aws_access_key_id = {key}
""".format(name=aws_name, secret=aws_secret, key=aws_key))
self.update_remaining_keys()
with open(self.__write_environ_path, 'w') as f:
f.write(self.__environ_content)
with open(self.__write_yml_path, 'w') as f:
f.write(self.__output)
if __name__ == '__main__':
if len(sys.argv) != 5:
sys.stderr.write('Usage: <content> <environ-path> <local-yml-path> <aws-cred-path>\n')
sys.exit(-1)
content = sys.argv[1]
environ_path = sys.argv[2]
local_yml_path = sys.argv[3]
aws_credentials_path = sys.argv[4]
processor = Processor(content,
environ_path, local_yml_path, aws_credentials_path)
processor.process()
sys.exit(0)<|fim▁end|> | for name, value in root.items():
key = '{prefix}{child}'.format(prefix=prefix, child=name) |
<|file_name|>virtual_network_peering.py<|end_file_name|><|fim▁begin|># coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sub_resource import SubResource
class VirtualNetworkPeering(SubResource):
"""Peerings in a virtual network resource.
:param id: Resource ID.
:type id: str
:param allow_virtual_network_access: Whether the VMs in the linked virtual
network space would be able to access all the VMs in local Virtual network
space.
:type allow_virtual_network_access: bool
:param allow_forwarded_traffic: Whether the forwarded traffic from the VMs
in the remote virtual network will be allowed/disallowed.
:type allow_forwarded_traffic: bool
:param allow_gateway_transit: If gateway links can be used in remote
virtual networking to link to this virtual network.
:type allow_gateway_transit: bool
:param use_remote_gateways: If remote gateways can be used on this virtual
network. If the flag is set to true, and allowGatewayTransit on remote
peering is also true, virtual network will use gateways of remote virtual
network for transit. Only one peering can have this flag set to true. This
flag cannot be set if virtual network already has a gateway.
:type use_remote_gateways: bool
:param remote_virtual_network: The reference of the remote virtual
network. The remote virtual network can be in the same or different region
(preview). See here to register for the preview and learn more
(https://docs.microsoft.com/en-us/azure/virtual-network/virtual-network-create-peering).
:type remote_virtual_network:
~azure.mgmt.network.v2017_11_01.models.SubResource
:param remote_address_space: The reference of the remote virtual network
address space.
:type remote_address_space:
~azure.mgmt.network.v2017_11_01.models.AddressSpace
:param peering_state: The status of the virtual network peering. Possible
values are 'Initiated', 'Connected', and 'Disconnected'. Possible values
include: 'Initiated', 'Connected', 'Disconnected'
:type peering_state: str or
~azure.mgmt.network.v2017_11_01.models.VirtualNetworkPeeringState
:param provisioning_state: The provisioning state of the resource.
:type provisioning_state: str
:param name: The name of the resource that is unique within a resource
group. This name can be used to access the resource.
:type name: str
:param etag: A unique read-only string that changes whenever the resource
is updated.
:type etag: str
"""<|fim▁hole|>
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'allow_virtual_network_access': {'key': 'properties.allowVirtualNetworkAccess', 'type': 'bool'},
'allow_forwarded_traffic': {'key': 'properties.allowForwardedTraffic', 'type': 'bool'},
'allow_gateway_transit': {'key': 'properties.allowGatewayTransit', 'type': 'bool'},
'use_remote_gateways': {'key': 'properties.useRemoteGateways', 'type': 'bool'},
'remote_virtual_network': {'key': 'properties.remoteVirtualNetwork', 'type': 'SubResource'},
'remote_address_space': {'key': 'properties.remoteAddressSpace', 'type': 'AddressSpace'},
'peering_state': {'key': 'properties.peeringState', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, id=None, allow_virtual_network_access=None, allow_forwarded_traffic=None, allow_gateway_transit=None, use_remote_gateways=None, remote_virtual_network=None, remote_address_space=None, peering_state=None, provisioning_state=None, name=None, etag=None):
super(VirtualNetworkPeering, self).__init__(id=id)
self.allow_virtual_network_access = allow_virtual_network_access
self.allow_forwarded_traffic = allow_forwarded_traffic
self.allow_gateway_transit = allow_gateway_transit
self.use_remote_gateways = use_remote_gateways
self.remote_virtual_network = remote_virtual_network
self.remote_address_space = remote_address_space
self.peering_state = peering_state
self.provisioning_state = provisioning_state
self.name = name
self.etag = etag<|fim▁end|> | |
<|file_name|>reduce.rs<|end_file_name|><|fim▁begin|>//! Applies a reduction function on records grouped by key.
//!
//! The `reduce` operator acts on `(key, val)` data.
//! Records with the same key are grouped together, and a user-supplied reduction function is applied
//! to the key and the list of values.
//! The function is expected to populate a list of output values.
use hashable::Hashable;
use ::{Data, ExchangeData, Collection};
use ::difference::{Semigroup, Abelian};
use timely::order::PartialOrder;
use timely::progress::frontier::Antichain;
use timely::dataflow::*;
use timely::dataflow::operators::Operator;
use timely::dataflow::channels::pact::Pipeline;
use timely::dataflow::operators::Capability;
use operators::arrange::{Arranged, ArrangeByKey, ArrangeBySelf, TraceAgent};
use lattice::Lattice;
use trace::{Batch, BatchReader, Cursor, Trace, Builder};
use trace::cursor::CursorList;
use trace::implementations::ord::OrdValSpine as DefaultValTrace;
use trace::implementations::ord::OrdKeySpine as DefaultKeyTrace;
use trace::TraceReader;
/// Extension trait for the `reduce` differential dataflow method.
pub trait Reduce<G: Scope, K: Data, V: Data, R: Semigroup> where G::Timestamp: Lattice+Ord {
/// Applies a reduction function on records grouped by key.
///
/// Input data must be structured as `(key, val)` pairs.
/// The user-supplied reduction function takes as arguments
///
/// 1. a reference to the key,
/// 2. a reference to the slice of values and their accumulated updates,
/// 3. a mutuable reference to a vector to populate with output values and accumulated updates.
///
/// The user logic is only invoked for non-empty input collections, and it is safe to assume that the
/// slice of input values is non-empty. The values are presented in sorted order, as defined by their
/// `Ord` implementations.
///
/// # Examples
///
/// ```
/// extern crate timely;
/// extern crate differential_dataflow;
///
/// use differential_dataflow::input::Input;
/// use differential_dataflow::operators::Reduce;
///
/// fn main() {
/// ::timely::example(|scope| {
/// // report the smallest value for each group
/// scope.new_collection_from(1 .. 10).1
/// .map(|x| (x / 3, x))
/// .reduce(|_key, input, output| {
/// output.push((*input[0].0, 1))
/// });
/// });
/// }
/// ```
fn reduce<L, V2: Data, R2: Abelian>(&self, logic: L) -> Collection<G, (K, V2), R2>
where L: FnMut(&K, &[(&V, R)], &mut Vec<(V2, R2)>)+'static {
self.reduce_named("Reduce", logic)
}
/// As `reduce` with the ability to name the operator.
fn reduce_named<L, V2: Data, R2: Abelian>(&self, name: &str, logic: L) -> Collection<G, (K, V2), R2>
where L: FnMut(&K, &[(&V, R)], &mut Vec<(V2, R2)>)+'static;
}
impl<G, K, V, R> Reduce<G, K, V, R> for Collection<G, (K, V), R>
where
G: Scope,
G::Timestamp: Lattice+Ord,
K: ExchangeData+Hashable,
V: ExchangeData,
R: ExchangeData+Semigroup,
{
fn reduce_named<L, V2: Data, R2: Abelian>(&self, name: &str, logic: L) -> Collection<G, (K, V2), R2>
where L: FnMut(&K, &[(&V, R)], &mut Vec<(V2, R2)>)+'static {
self.arrange_by_key()
.reduce_named(name, logic)
}
}
impl<G: Scope, K: Data, V: Data, T1, R: Semigroup> Reduce<G, K, V, R> for Arranged<G, T1>
where
G::Timestamp: Lattice+Ord,
T1: TraceReader<Key=K, Val=V, Time=G::Timestamp, R=R>+Clone+'static,
T1::Batch: BatchReader<K, V, G::Timestamp, R>,
T1::Cursor: Cursor<K, V, G::Timestamp, R>,
{
fn reduce_named<L, V2: Data, R2: Abelian>(&self, name: &str, logic: L) -> Collection<G, (K, V2), R2>
where L: FnMut(&K, &[(&V, R)], &mut Vec<(V2, R2)>)+'static {
self.reduce_abelian::<_,DefaultValTrace<_,_,_,_>>(name, logic)
.as_collection(|k,v| (k.clone(), v.clone()))
}
}
/// Extension trait for the `threshold` and `distinct` differential dataflow methods.
pub trait Threshold<G: Scope, K: Data, R1: Semigroup> where G::Timestamp: Lattice+Ord {
/// Transforms the multiplicity of records.
///
/// The `threshold` function is obliged to map `R1::zero` to `R2::zero`, or at
/// least the computation may behave as if it does. Otherwise, the transformation
/// can be nearly arbitrary: the code does not assume any properties of `threshold`.
///
/// # Examples
///
/// ```
/// extern crate timely;
/// extern crate differential_dataflow;
///
/// use differential_dataflow::input::Input;
/// use differential_dataflow::operators::Threshold;
///
/// fn main() {
/// ::timely::example(|scope| {
/// // report at most one of each key.
/// scope.new_collection_from(1 .. 10).1
/// .map(|x| x / 3)
/// .threshold(|_,c| c % 2);
/// });
/// }
/// ```
fn threshold<R2: Abelian, F: FnMut(&K, &R1)->R2+'static>(&self, thresh: F) -> Collection<G, K, R2> {
self.threshold_named("Threshold", thresh)
}
/// A `threshold` with the ability to name the operator.
fn threshold_named<R2: Abelian, F: FnMut(&K, &R1)->R2+'static>(&self, name: &str, thresh: F) -> Collection<G, K, R2>;
/// Reduces the collection to one occurrence of each distinct element.
///
/// # Examples
///
/// ```
/// extern crate timely;
/// extern crate differential_dataflow;
///
/// use differential_dataflow::input::Input;
/// use differential_dataflow::operators::Threshold;
///
/// fn main() {
/// ::timely::example(|scope| {
/// // report at most one of each key.
/// scope.new_collection_from(1 .. 10).1
/// .map(|x| x / 3)
/// .distinct();
/// });
/// }
/// ```
fn distinct(&self) -> Collection<G, K, isize> {
self.distinct_core()
}
/// Distinct for general integer differences.
///
/// This method allows `distinct` to produce collections whose difference
/// type is something other than an `isize` integer, for example perhaps an
/// `i32`.
fn distinct_core<R2: Abelian+From<i8>>(&self) -> Collection<G, K, R2> {
self.threshold_named("Distinct", |_,_| R2::from(1i8))
}
}
impl<G: Scope, K: ExchangeData+Hashable, R1: ExchangeData+Semigroup> Threshold<G, K, R1> for Collection<G, K, R1>
where G::Timestamp: Lattice+Ord {
fn threshold_named<R2: Abelian, F: FnMut(&K,&R1)->R2+'static>(&self, name: &str, thresh: F) -> Collection<G, K, R2> {
self.arrange_by_self()
.threshold_named(name, thresh)
}
}
impl<G: Scope, K: Data, T1, R1: Semigroup> Threshold<G, K, R1> for Arranged<G, T1>
where
G::Timestamp: Lattice+Ord,
T1: TraceReader<Key=K, Val=(), Time=G::Timestamp, R=R1>+Clone+'static,
T1::Batch: BatchReader<K, (), G::Timestamp, R1>,
T1::Cursor: Cursor<K, (), G::Timestamp, R1>,
{
fn threshold_named<R2: Abelian, F: FnMut(&K,&R1)->R2+'static>(&self, name: &str, mut thresh: F) -> Collection<G, K, R2> {
self.reduce_abelian::<_,DefaultKeyTrace<_,_,_>>(name, move |k,s,t| t.push(((), thresh(k, &s[0].1))))
.as_collection(|k,_| k.clone())
}
}
/// Extension trait for the `count` differential dataflow method.
pub trait Count<G: Scope, K: Data, R: Semigroup> where G::Timestamp: Lattice+Ord {
/// Counts the number of occurrences of each element.
///
/// # Examples
///
/// ```
/// extern crate timely;
/// extern crate differential_dataflow;
///
/// use differential_dataflow::input::Input;
/// use differential_dataflow::operators::Count;
///
/// fn main() {
/// ::timely::example(|scope| {
/// // report the number of occurrences of each key
/// scope.new_collection_from(1 .. 10).1
/// .map(|x| x / 3)
/// .count();
/// });
/// }
/// ```
fn count(&self) -> Collection<G, (K, R), isize>;
}
impl<G: Scope, K: ExchangeData+Hashable, R: ExchangeData+Semigroup> Count<G, K, R> for Collection<G, K, R>
where
G::Timestamp: Lattice+Ord,
{
fn count(&self) -> Collection<G, (K, R), isize> {
self.arrange_by_self()
.count()
}
}
impl<G: Scope, K: Data, T1, R: Semigroup> Count<G, K, R> for Arranged<G, T1>
where
G::Timestamp: Lattice+Ord,
T1: TraceReader<Key=K, Val=(), Time=G::Timestamp, R=R>+Clone+'static,
T1::Batch: BatchReader<K, (), G::Timestamp, R>,
T1::Cursor: Cursor<K, (), G::Timestamp, R>,
{
fn count(&self) -> Collection<G, (K, R), isize> {
self.reduce_abelian::<_,DefaultValTrace<_,_,_,_>>("Count", |_k,s,t| t.push((s[0].1.clone(), 1)))
.as_collection(|k,c| (k.clone(), c.clone()))
}
}
/// Extension trait for the `group_arranged` differential dataflow method.
pub trait ReduceCore<G: Scope, K: Data, V: Data, R: Semigroup> where G::Timestamp: Lattice+Ord {
/// Applies `group` to arranged data, and returns an arrangement of output data.
///
/// This method is used by the more ergonomic `group`, `distinct`, and `count` methods, although
/// it can be very useful if one needs to manually attach and re-use existing arranged collections.
///
/// # Examples
///
/// ```
/// extern crate timely;
/// extern crate differential_dataflow;
///
/// use differential_dataflow::input::Input;
/// use differential_dataflow::operators::reduce::ReduceCore;
/// use differential_dataflow::trace::Trace;
/// use differential_dataflow::trace::implementations::ord::OrdValSpine;
/// use differential_dataflow::hashable::OrdWrapper;
///
/// fn main() {
/// ::timely::example(|scope| {
///
/// let trace =
/// scope.new_collection_from(1 .. 10u32).1
/// .map(|x| (x, x))
/// .reduce_abelian::<_,OrdValSpine<_,_,_,_>>(
/// "Example",
/// move |_key, src, dst| dst.push((*src[0].0, 1))
/// )
/// .trace;
/// });
/// }
/// ```
fn reduce_abelian<L, T2>(&self, name: &str, mut logic: L) -> Arranged<G, TraceAgent<T2>>
where
T2: Trace+TraceReader<Key=K, Time=G::Timestamp>+'static,
T2::Val: Data,
T2::R: Abelian,
T2::Batch: Batch<K, T2::Val, G::Timestamp, T2::R>,
T2::Cursor: Cursor<K, T2::Val, G::Timestamp, T2::R>,
L: FnMut(&K, &[(&V, R)], &mut Vec<(T2::Val, T2::R)>)+'static,
{
self.reduce_core::<_,T2>(name, move |key, input, output, change| {
if !input.is_empty() {
logic(key, input, change);
}
change.extend(output.drain(..).map(|(x,d)| (x,-d)));
crate::consolidation::consolidate(change);
})
}
/// Solves for output updates when presented with inputs and would-be outputs.
///
/// Unlike `reduce_arranged`, this method may be called with an empty `input`,
/// and it may not be safe to index into the first element.
/// At least one of the two collections will be non-empty.
fn reduce_core<L, T2>(&self, name: &str, logic: L) -> Arranged<G, TraceAgent<T2>>
where
T2: Trace+TraceReader<Key=K, Time=G::Timestamp>+'static,
T2::Val: Data,
T2::R: Semigroup,
T2::Batch: Batch<K, T2::Val, G::Timestamp, T2::R>,
T2::Cursor: Cursor<K, T2::Val, G::Timestamp, T2::R>,
L: FnMut(&K, &[(&V, R)], &mut Vec<(T2::Val,T2::R)>, &mut Vec<(T2::Val,T2::R)>)+'static
;
}
impl<G, K, V, R> ReduceCore<G, K, V, R> for Collection<G, (K, V), R>
where
G: Scope,
G::Timestamp: Lattice+Ord,
K: ExchangeData+Hashable,
V: ExchangeData,
R: ExchangeData+Semigroup,
{
fn reduce_core<L, T2>(&self, name: &str, logic: L) -> Arranged<G, TraceAgent<T2>>
where
T2::Val: Data,
T2::R: Semigroup,
T2: Trace+TraceReader<Key=K, Time=G::Timestamp>+'static,
T2::Batch: Batch<K, T2::Val, G::Timestamp, T2::R>,
T2::Cursor: Cursor<K, T2::Val, G::Timestamp, T2::R>,
L: FnMut(&K, &[(&V, R)], &mut Vec<(T2::Val,T2::R)>, &mut Vec<(T2::Val, T2::R)>)+'static
{
self.arrange_by_key()
.reduce_core(name, logic)
}
}
impl<G: Scope, K: Data, V: Data, T1, R: Semigroup> ReduceCore<G, K, V, R> for Arranged<G, T1>
where
G::Timestamp: Lattice+Ord,
T1: TraceReader<Key=K, Val=V, Time=G::Timestamp, R=R>+Clone+'static,
T1::Batch: BatchReader<K, V, G::Timestamp, R>,
T1::Cursor: Cursor<K, V, G::Timestamp, R>,
{
fn reduce_core<L, T2>(&self, name: &str, mut logic: L) -> Arranged<G, TraceAgent<T2>>
where
T2: Trace+TraceReader<Key=K, Time=G::Timestamp>+'static,
T2::Val: Data,
T2::R: Semigroup,
T2::Batch: Batch<K, T2::Val, G::Timestamp, T2::R>,
T2::Cursor: Cursor<K, T2::Val, G::Timestamp, T2::R>,
L: FnMut(&K, &[(&V, R)], &mut Vec<(T2::Val,T2::R)>, &mut Vec<(T2::Val, T2::R)>)+'static {
let mut result_trace = None;
// fabricate a data-parallel operator using the `unary_notify` pattern.
let stream = {
let result_trace = &mut result_trace;
self.stream.unary_frontier(Pipeline, name, move |_capability, operator_info| {
let logger = {
let scope = self.stream.scope();
let register = scope.log_register();
register.get::<::logging::DifferentialEvent>("differential/arrange")
};
// Determine if we should regularly exert the trace maintenance machinery,
// and with what amount of effort each time.
let (activator, effort) =
if let Ok(text) = ::std::env::var("DIFFERENTIAL_EAGER_MERGE") {
let effort = text.parse::<isize>().expect("DIFFERENTIAL_EAGER_MERGE must be set to an integer");
(Some(self.stream.scope().activator_for(&operator_info.address[..])), Some(effort))
}
else {
(None, None)
};
let empty = T2::new(operator_info.clone(), logger.clone(), activator);
let mut source_trace = self.trace.clone();
let (mut output_reader, mut output_writer) = TraceAgent::new(empty, operator_info, logger);
// let mut output_trace = TraceRc::make_from(agent).0;
*result_trace = Some(output_reader.clone());
// let mut thinker1 = history_replay_prior::HistoryReplayer::<V, V2, G::Timestamp, R, R2>::new();
// let mut thinker = history_replay::HistoryReplayer::<V, V2, G::Timestamp, R, R2>::new();
let mut new_interesting_times = Vec::<G::Timestamp>::new();
// Our implementation maintains a list of outstanding `(key, time)` synthetic interesting times,
// as well as capabilities for these times (or their lower envelope, at least).
let mut interesting = Vec::<(K, G::Timestamp)>::new();
let mut capabilities = Vec::<Capability<G::Timestamp>>::new();
// buffers and logic for computing per-key interesting times "efficiently".
let mut interesting_times = Vec::<G::Timestamp>::new();
// Upper and lower frontiers for the pending input and output batches to process.
let mut upper_limit = Antichain::from_elem(<G::Timestamp as Lattice>::minimum());
let mut lower_limit = Antichain::from_elem(<G::Timestamp as Lattice>::minimum());
// Output batches may need to be built piecemeal, and these temp storage help there.
let mut output_upper = Antichain::from_elem(<G::Timestamp as Lattice>::minimum());
let mut output_lower = Antichain::from_elem(<G::Timestamp as Lattice>::minimum());
let mut input_buffer = Vec::new();
let id = self.stream.scope().index();
move |input, output| {
// The `reduce` operator receives fully formed batches, which each serve as an indication
// that the frontier has advanced to the upper bound of their description.
//
// Although we could act on each individually, several may have been sent, and it makes
// sense to accumulate them first to coordinate their re-evaluation. We will need to pay
// attention to which times need to be collected under which capability, so that we can
// assemble output batches correctly. We will maintain several builders concurrently, and
// place output updates into the appropriate builder.
//
// It turns out we must use notificators, as we cannot await empty batches from arrange to
// indicate progress, as the arrange may not hold the capability to send such. Instead, we
// must watch for progress here (and the upper bound of received batches) to tell us how
// far we can process work.
//
// We really want to retire all batches we receive, so we want a frontier which reflects
// both information from batches as well as progress information. I think this means that
// we keep times that are greater than or equal to a time in the other frontier, deduplicated.
let mut batch_cursors = Vec::new();
let mut batch_storage = Vec::new();
// Downgrate previous upper limit to be current lower limit.
lower_limit.clear();
lower_limit.extend(upper_limit.elements().iter().cloned());
// Drain the input stream of batches, validating the contiguity of the batch descriptions and
// capturing a cursor for each of the batches as well as ensuring we hold a capability for the
// times in the batch.
input.for_each(|capability, batches| {
batches.swap(&mut input_buffer);
for batch in input_buffer.drain(..) {
upper_limit.clear();
upper_limit.extend(batch.upper().iter().cloned());
batch_cursors.push(batch.cursor());
batch_storage.push(batch);
}
// Ensure that `capabilities` covers the capability of the batch.
capabilities.retain(|cap| !capability.time().less_than(&cap.time()));
if !capabilities.iter().any(|cap| cap.time().less_equal(&capability.time())) {
capabilities.push(capability.retain());
}
});
// Pull in any subsequent empty batches we believe to exist.
source_trace.advance_upper(&mut upper_limit);
// Only if our upper limit has advanced should we do work.
if upper_limit != lower_limit {
// If we have no capabilities, then we (i) should not produce any outputs and (ii) could not send
// any produced outputs even if they were (incorrectly) produced. We cannot even send empty batches
// to indicate forward progress, and must hope that downstream operators look at progress frontiers
// as well as batch descriptions.
//
// We can (and should) advance source and output traces if `upper_limit` indicates this is possible.
if capabilities.iter().any(|c| !upper_limit.less_equal(c.time())) {
// `interesting` contains "warnings" about keys and times that may need to be re-considered.
// We first extract those times from this list that lie in the interval we will process.
sort_dedup(&mut interesting);
// `exposed` contains interesting (key, time)s now below `upper_limit`
let exposed = {
let (exposed, new_interesting) = interesting.drain(..).partition(|&(_, ref time)| !upper_limit.less_equal(time));
interesting = new_interesting;
exposed
};
// Prepare an output buffer and builder for each capability.
//
// We buffer and build separately, as outputs are produced grouped by time, whereas the
// builder wants to see outputs grouped by value. While the per-key computation could
// do the re-sorting itself, buffering per-key outputs lets us double check the results
// against other implementations for accuracy.
//
// TODO: It would be better if all updates went into one batch, but timely dataflow prevents
// this as long as it requires that there is only one capability for each message.
let mut buffers = Vec::<(G::Timestamp, Vec<(T2::Val, G::Timestamp, T2::R)>)>::new();
let mut builders = Vec::new();
for i in 0 .. capabilities.len() {
buffers.push((capabilities[i].time().clone(), Vec::new()));
builders.push(<T2::Batch as Batch<K,T2::Val,G::Timestamp,T2::R>>::Builder::new());
}
// cursors for navigating input and output traces.
let (mut source_cursor, source_storage): (T1::Cursor, _) = source_trace.cursor_through(lower_limit.elements()).expect("failed to acquire source cursor");
let source_storage = &source_storage;
let (mut output_cursor, output_storage): (T2::Cursor, _) = output_reader.cursor_through(lower_limit.elements()).expect("failed to acquire output cursor");
let output_storage = &output_storage;
let (mut batch_cursor, batch_storage) = (CursorList::new(batch_cursors, &batch_storage), batch_storage);
let batch_storage = &batch_storage;
let mut thinker = history_replay::HistoryReplayer::<V, T2::Val, G::Timestamp, R, T2::R>::new();
// We now march through the keys we must work on, drawing from `batch_cursors` and `exposed`.
//
// We only keep valid cursors (those with more data) in `batch_cursors`, and so its length
// indicates whether more data remain. We move through `exposed` using (index) `exposed_position`.
// There could perhaps be a less provocative variable name.
let mut exposed_position = 0;
while batch_cursor.key_valid(batch_storage) || exposed_position < exposed.len() {
// Determine the next key we will work on; could be synthetic, could be from a batch.
let key1 = exposed.get(exposed_position).map(|x| x.0.clone());
let key2 = batch_cursor.get_key(&batch_storage).map(|k| k.clone());
let key = match (key1, key2) {
(Some(key1), Some(key2)) => ::std::cmp::min(key1, key2),
(Some(key1), None) => key1,
(None, Some(key2)) => key2,
(None, None) => unreachable!(),
};
// `interesting_times` contains those times between `lower_issued` and `upper_limit`
// that we need to re-consider. We now populate it, but perhaps this should be left
// to the per-key computation, which may be able to avoid examining the times of some
// values (for example, in the case of min/max/topk).
interesting_times.clear();
// Populate `interesting_times` with synthetic interesting times (below `upper_limit`) for this key.
while exposed.get(exposed_position).map(|x| &x.0) == Some(&key) {
interesting_times.push(exposed[exposed_position].1.clone());
exposed_position += 1;
}
// tidy up times, removing redundancy.
sort_dedup(&mut interesting_times);
// do the per-key computation.
let _counters = thinker.compute(
&key,
(&mut source_cursor, source_storage),
(&mut output_cursor, output_storage),
(&mut batch_cursor, batch_storage),
&mut interesting_times,
&mut logic,
&upper_limit,
&mut buffers[..],
&mut new_interesting_times,
);
if batch_cursor.get_key(batch_storage) == Some(&key) {
batch_cursor.step_key(batch_storage);
}
// Record future warnings about interesting times (and assert they should be "future").
for time in new_interesting_times.drain(..) {
debug_assert!(upper_limit.less_equal(&time));
interesting.push((key.clone(), time));
}
// Sort each buffer by value and move into the corresponding builder.
// TODO: This makes assumptions about at least one of (i) the stability of `sort_by`,
// (ii) that the buffers are time-ordered, and (iii) that the builders accept
// arbitrarily ordered times.
for index in 0 .. buffers.len() {
buffers[index].1.sort_by(|x,y| x.0.cmp(&y.0));
for (val, time, diff) in buffers[index].1.drain(..) {
builders[index].push((key.clone(), val, time, diff));
}
}
}
// We start sealing output batches from the lower limit (previous upper limit).
// In principle, we could update `lower_limit` itself, and it should arrive at
// `upper_limit` by the end of the process.
output_lower.clear();
output_lower.extend(lower_limit.elements().iter().cloned());
// build and ship each batch (because only one capability per message).
for (index, builder) in builders.drain(..).enumerate() {
// Form the upper limit of the next batch, which includes all times greater
// than the input batch, or the capabilities from i + 1 onward.
output_upper.clear();
output_upper.extend(upper_limit.elements().iter().cloned());
for capability in &capabilities[index + 1 ..] {
output_upper.insert(capability.time().clone());
}
if output_upper.elements() != output_lower.elements() {
let batch = builder.done(output_lower.elements(), output_upper.elements(), output_lower.elements());
// ship batch to the output, and commit to the output trace.
output.session(&capabilities[index]).give(batch.clone());
output_writer.insert(batch, Some(capabilities[index].time().clone()));
output_lower.clear();
output_lower.extend(output_upper.elements().iter().cloned());
}
}
// This should be true, as the final iteration introduces no capabilities, and
// uses exactly `upper_limit` to determine the upper bound. Good to check though.
assert!(output_upper.elements() == upper_limit.elements());
// Determine the frontier of our interesting times.
let mut frontier = Antichain::<G::Timestamp>::new();
for &(_, ref time) in &interesting {
frontier.insert(time.clone());
}
// Update `capabilities` to reflect interesting pairs described by `frontier`.
let mut new_capabilities = Vec::new();
for time in frontier.elements().iter() {
if let Some(cap) = capabilities.iter().find(|c| c.time().less_equal(time)) {
new_capabilities.push(cap.delayed(time));
}
else {
println!("{}:\tfailed to find capability less than new frontier time:", id);
println!("{}:\t time: {:?}", id, time);
println!("{}:\t caps: {:?}", id, capabilities);
println!("{}:\t uppr: {:?}", id, upper_limit);
}
}
capabilities = new_capabilities;
// ensure that observed progres is reflected in the output.
output_writer.seal(upper_limit.elements());
}
else {
output_writer.seal(upper_limit.elements());
}
// We only anticipate future times in advance of `upper_limit`.
source_trace.advance_by(upper_limit.elements());
output_reader.advance_by(upper_limit.elements());
// We will only slice the data between future batches.
source_trace.distinguish_since(upper_limit.elements());
output_reader.distinguish_since(upper_limit.elements());
}
// Exert trace maintenance if we have been so requested.
if let Some(mut fuel) = effort.clone() {
output_writer.exert(&mut fuel);
}
}
}
)
};
Arranged { stream: stream, trace: result_trace.unwrap() }
}
}
#[inline(never)]
fn sort_dedup<T: Ord>(list: &mut Vec<T>) {
list.dedup();
list.sort();
list.dedup();
}
trait PerKeyCompute<'a, V1, V2, T, R1, R2>
where
V1: Ord+Clone+'a,
V2: Ord+Clone+'a,
T: Lattice+Ord+Clone,
R1: Semigroup,
R2: Semigroup,
{
fn new() -> Self;
fn compute<K, C1, C2, C3, L>(
&mut self,
key: &K,
source_cursor: (&mut C1, &'a C1::Storage),
output_cursor: (&mut C2, &'a C2::Storage),
batch_cursor: (&mut C3, &'a C3::Storage),
times: &mut Vec<T>,
logic: &mut L,
upper_limit: &Antichain<T>,
outputs: &mut [(T, Vec<(V2, T, R2)>)],
new_interesting: &mut Vec<T>) -> (usize, usize)
where
K: Eq+Clone,
C1: Cursor<K, V1, T, R1>,
C2: Cursor<K, V2, T, R2>,
C3: Cursor<K, V1, T, R1>,
L: FnMut(&K, &[(&V1, R1)], &mut Vec<(V2, R2)>, &mut Vec<(V2, R2)>);
}
/// Implementation based on replaying historical and new updates together.
mod history_replay {
use ::difference::Semigroup;
use lattice::Lattice;
use trace::Cursor;
use operators::ValueHistory;
use timely::progress::Antichain;
use super::{PerKeyCompute, sort_dedup};
/// The `HistoryReplayer` is a compute strategy based on moving through existing inputs, interesting times, etc in
/// time order, maintaining consolidated representations of updates with respect to future interesting times.
pub struct HistoryReplayer<'a, V1, V2, T, R1, R2>
where
V1: Ord+Clone+'a,
V2: Ord+Clone+'a,
T: Lattice+Ord+Clone,
R1: Semigroup,
R2: Semigroup,
{
batch_history: ValueHistory<'a, V1, T, R1>,
input_history: ValueHistory<'a, V1, T, R1>,
output_history: ValueHistory<'a, V2, T, R2>,
input_buffer: Vec<(&'a V1, R1)>,
output_buffer: Vec<(V2, R2)>,
update_buffer: Vec<(V2, R2)>,
output_produced: Vec<((V2, T), R2)>,
synth_times: Vec<T>,
meets: Vec<T>,
times_current: Vec<T>,
temporary: Vec<T>,
}
impl<'a, V1, V2, T, R1, R2> PerKeyCompute<'a, V1, V2, T, R1, R2> for HistoryReplayer<'a, V1, V2, T, R1, R2>
where
V1: Ord+Clone,
V2: Ord+Clone,
T: Lattice+Ord+Clone,
R1: Semigroup,
R2: Semigroup,
{
fn new() -> Self {
HistoryReplayer {
batch_history: ValueHistory::new(),
input_history: ValueHistory::new(),
output_history: ValueHistory::new(),
input_buffer: Vec::new(),
output_buffer: Vec::new(),
update_buffer: Vec::new(),
output_produced: Vec::new(),
synth_times: Vec::new(),
meets: Vec::new(),
times_current: Vec::new(),
temporary: Vec::new(),
}
}
#[inline(never)]
fn compute<K, C1, C2, C3, L>(
&mut self,
key: &K,
(source_cursor, source_storage): (&mut C1, &'a C1::Storage),
(output_cursor, output_storage): (&mut C2, &'a C2::Storage),
(batch_cursor, batch_storage): (&mut C3, &'a C3::Storage),
times: &mut Vec<T>,
logic: &mut L,
upper_limit: &Antichain<T>,
outputs: &mut [(T, Vec<(V2, T, R2)>)],
new_interesting: &mut Vec<T>) -> (usize, usize)
where
K: Eq+Clone,
C1: Cursor<K, V1, T, R1>,
C2: Cursor<K, V2, T, R2>,
C3: Cursor<K, V1, T, R1>,
L: FnMut(&K, &[(&V1, R1)], &mut Vec<(V2, R2)>, &mut Vec<(V2, R2)>)
{
// The work we need to perform is at times defined principally by the contents of `batch_cursor`
// and `times`, respectively "new work we just received" and "old times we were warned about".
//
// Our first step is to identify these times, so that we can use them to restrict the amount of
// information we need to recover from `input` and `output`; as all times of interest will have
// some time from `batch_cursor` or `times`, we can compute their meet and advance all other
// loaded times by performing the lattice `join` with this value.
// Load the batch contents.
let mut batch_replay = self.batch_history.replay_key(batch_cursor, batch_storage, key, |time| time.clone());
// We determine the meet of times we must reconsider (those from `batch` and `times`). This meet
// can be used to advance other historical times, which may consolidate their representation. As
// a first step, we determine the meets of each *suffix* of `times`, which we will use as we play
// history forward.
self.meets.clear();
self.meets.extend(times.iter().cloned());
for index in (1 .. self.meets.len()).rev() {
self.meets[index-1] = self.meets[index-1].meet(&self.meets[index]);
}
// Determine the meet of times in `batch` and `times`.
let mut meet = None;
update_meet(&mut meet, self.meets.get(0));
update_meet(&mut meet, batch_replay.meet());
// if let Some(time) = self.meets.get(0) {
// meet = match meet {
// None => Some(self.meets[0].clone()),
// Some(x) => Some(x.meet(&self.meets[0])),
// };
// }
// if let Some(time) = batch_replay.meet() {
// meet = match meet {
// None => Some(time.clone()),
// Some(x) => Some(x.meet(&time)),
// };
// }
// Having determined the meet, we can load the input and output histories, where we
// advance all times by joining them with `meet`. The resulting times are more compact
// and guaranteed to accumulate identically for times greater or equal to `meet`.
// Load the input and output histories.
let mut input_replay = if let Some(meet) = meet.as_ref() {
self.input_history.replay_key(source_cursor, source_storage, key, |time| time.join(&meet))
}
else {
self.input_history.replay_key(source_cursor, source_storage, key, |time| time.clone())
};
let mut output_replay = if let Some(meet) = meet.as_ref() {
self.output_history.replay_key(output_cursor, output_storage, key, |time| time.join(&meet))
}
else {
self.output_history.replay_key(output_cursor, output_storage, key, |time| time.clone())
};
self.synth_times.clear();
self.times_current.clear();
self.output_produced.clear();
// The frontier of times we may still consider.
// Derived from frontiers of our update histories, supplied times, and synthetic times.
let mut times_slice = ×[..];
let mut meets_slice = &self.meets[..];
let mut compute_counter = 0;
let mut output_counter = 0;
// We have candidate times from `batch` and `times`, as well as times identified by either
// `input` or `output`. Finally, we may have synthetic times produced as the join of times
// we consider in the course of evaluation. As long as any of these times exist, we need to
// keep examining times.
while let Some(next_time) = [ batch_replay.time(),
times_slice.first(),
input_replay.time(),
output_replay.time(),
self.synth_times.last(),
].iter().cloned().filter_map(|t| t).min().map(|t| t.clone()) {
// Advance input and output history replayers. This marks applicable updates as active.
input_replay.step_while_time_is(&next_time);
output_replay.step_while_time_is(&next_time);
// One of our goals is to determine if `next_time` is "interesting", meaning whether we
// have any evidence that we should re-evaluate the user logic at this time. For a time
// to be "interesting" it would need to be the join of times that include either a time
// from `batch`, `times`, or `synth`. Neither `input` nor `output` times are sufficient.
// Advance batch history, and capture whether an update exists at `next_time`.
let mut interesting = batch_replay.step_while_time_is(&next_time);
if interesting {
if let Some(meet) = meet.as_ref() {
batch_replay.advance_buffer_by(&meet);
}
}
// advance both `synth_times` and `times_slice`, marking this time interesting if in either.
while self.synth_times.last() == Some(&next_time) {
// We don't know enough about `next_time` to avoid putting it in to `times_current`.
// TODO: If we knew that the time derived from a canceled batch update, we could remove the time.
self.times_current.push(self.synth_times.pop().expect("failed to pop from synth_times")); // <-- TODO: this could be a min-heap.
interesting = true;
}
while times_slice.first() == Some(&next_time) {
// We know nothing about why we were warned about `next_time`, and must include it to scare future times.
self.times_current.push(times_slice[0].clone());
times_slice = ×_slice[1..];
meets_slice = &meets_slice[1..];
interesting = true;
}
// Times could also be interesting if an interesting time is less than them, as they would join
// and become the time itself. They may not equal the current time because whatever frontier we
// are tracking may not have advanced far enough.
// TODO: `batch_history` may or may not be super compact at this point, and so this check might
// yield false positives if not sufficiently compact. Maybe we should into this and see.
interesting = interesting || batch_replay.buffer().iter().any(|&((_, ref t),_)| t.less_equal(&next_time));
interesting = interesting || self.times_current.iter().any(|t| t.less_equal(&next_time));
// We should only process times that are not in advance of `upper_limit`.<|fim▁hole|> //
// We have no particular guarantee that known times will not be in advance of `upper_limit`.
// We may have the guarantee that synthetic times will not be, as we test against the limit
// before we add the time to `synth_times`.
if !upper_limit.less_equal(&next_time) {
// We should re-evaluate the computation if this is an interesting time.
// If the time is uninteresting (and our logic is sound) it is not possible for there to be
// output produced. This sounds like a good test to have for debug builds!
if interesting {
compute_counter += 1;
// Assemble the input collection at `next_time`. (`self.input_buffer` cleared just after use).
debug_assert!(self.input_buffer.is_empty());
meet.as_ref().map(|meet| input_replay.advance_buffer_by(&meet));
for &((value, ref time), ref diff) in input_replay.buffer().iter() {
if time.less_equal(&next_time) {
self.input_buffer.push((value, diff.clone()));
}
else {
self.temporary.push(next_time.join(time));
}
}
for &((value, ref time), ref diff) in batch_replay.buffer().iter() {
if time.less_equal(&next_time) {
self.input_buffer.push((value, diff.clone()));
}
else {
self.temporary.push(next_time.join(time));
}
}
crate::consolidation::consolidate(&mut self.input_buffer);
meet.as_ref().map(|meet| output_replay.advance_buffer_by(&meet));
for &((ref value, ref time), ref diff) in output_replay.buffer().iter() {
if time.less_equal(&next_time) {
self.output_buffer.push(((*value).clone(), diff.clone()));
}
else {
self.temporary.push(next_time.join(time));
}
}
for &((ref value, ref time), ref diff) in self.output_produced.iter() {
if time.less_equal(&next_time) {
self.output_buffer.push(((*value).clone(), diff.clone()));
}
else {
self.temporary.push(next_time.join(&time));
}
}
crate::consolidation::consolidate(&mut self.output_buffer);
// Apply user logic if non-empty input and see what happens!
if self.input_buffer.len() > 0 || self.output_buffer.len() > 0 {
logic(key, &self.input_buffer[..], &mut self.output_buffer, &mut self.update_buffer);
self.input_buffer.clear();
self.output_buffer.clear();
}
// output_replay.advance_buffer_by(&meet);
// for &((ref value, ref time), diff) in output_replay.buffer().iter() {
// if time.less_equal(&next_time) {
// self.output_buffer.push(((*value).clone(), -diff));
// }
// else {
// self.temporary.push(next_time.join(time));
// }
// }
// for &((ref value, ref time), diff) in self.output_produced.iter() {
// if time.less_equal(&next_time) {
// self.output_buffer.push(((*value).clone(), -diff));
// }
// else {
// self.temporary.push(next_time.join(&time));
// }
// }
// Having subtracted output updates from user output, consolidate the results to determine
// if there is anything worth reporting. Note: this also orders the results by value, so
// that could make the above merging plan even easier.
crate::consolidation::consolidate(&mut self.update_buffer);
// Stash produced updates into both capability-indexed buffers and `output_produced`.
// The two locations are important, in that we will compact `output_produced` as we move
// through times, but we cannot compact the output buffers because we need their actual
// times.
if self.update_buffer.len() > 0 {
output_counter += 1;
// We *should* be able to find a capability for `next_time`. Any thing else would
// indicate a logical error somewhere along the way; either we release a capability
// we should have kept, or we have computed the output incorrectly (or both!)
let idx = outputs.iter().rev().position(|&(ref time, _)| time.less_equal(&next_time));
let idx = outputs.len() - idx.expect("failed to find index") - 1;
for (val, diff) in self.update_buffer.drain(..) {
self.output_produced.push(((val.clone(), next_time.clone()), diff.clone()));
outputs[idx].1.push((val, next_time.clone(), diff));
}
// Advance times in `self.output_produced` and consolidate the representation.
// NOTE: We only do this when we add records; it could be that there are situations
// where we want to consolidate even without changes (because an initially
// large collection can now be collapsed).
if let Some(meet) = meet.as_ref() {
for entry in &mut self.output_produced {
(entry.0).1 = (entry.0).1.join(&meet);
}
}
crate::consolidation::consolidate(&mut self.output_produced);
}
}
// Determine synthetic interesting times.
//
// Synthetic interesting times are produced differently for interesting and uninteresting
// times. An uninteresting time must join with an interesting time to become interesting,
// which means joins with `self.batch_history` and `self.times_current`. I think we can
// skip `self.synth_times` as we haven't gotten to them yet, but we will and they will be
// joined against everything.
// Any time, even uninteresting times, must be joined with the current accumulation of
// batch times as well as the current accumulation of `times_current`.
for &((_, ref time), _) in batch_replay.buffer().iter() {
if !time.less_equal(&next_time) {
self.temporary.push(time.join(&next_time));
}
}
for time in self.times_current.iter() {
if !time.less_equal(&next_time) {
self.temporary.push(time.join(&next_time));
}
}
sort_dedup(&mut self.temporary);
// Introduce synthetic times, and re-organize if we add any.
let synth_len = self.synth_times.len();
for time in self.temporary.drain(..) {
// We can either service `join` now, or must delay for the future.
if upper_limit.less_equal(&time) {
debug_assert!(outputs.iter().any(|&(ref t,_)| t.less_equal(&time)));
new_interesting.push(time);
}
else {
self.synth_times.push(time);
}
}
if self.synth_times.len() > synth_len {
self.synth_times.sort_by(|x,y| y.cmp(x));
self.synth_times.dedup();
}
}
else {
if interesting {
// We cannot process `next_time` now, and must delay it.
//
// I think we are probably only here because of an uninteresting time declared interesting,
// as initial interesting times are filtered to be in interval, and synthetic times are also
// filtered before introducing them to `self.synth_times`.
new_interesting.push(next_time.clone());
debug_assert!(outputs.iter().any(|&(ref t,_)| t.less_equal(&next_time)))
}
}
// Update `meet` to track the meet of each source of times.
meet = None;//T::maximum();
update_meet(&mut meet, batch_replay.meet());
update_meet(&mut meet, input_replay.meet());
update_meet(&mut meet, output_replay.meet());
for time in self.synth_times.iter() { update_meet(&mut meet, Some(time)); }
// if let Some(time) = batch_replay.meet() { meet = meet.meet(time); }
// if let Some(time) = input_replay.meet() { meet = meet.meet(time); }
// if let Some(time) = output_replay.meet() { meet = meet.meet(time); }
// for time in self.synth_times.iter() { meet = meet.meet(time); }
update_meet(&mut meet, meets_slice.first());
// if let Some(time) = meets_slice.first() { meet = meet.meet(time); }
// Update `times_current` by the frontier.
if let Some(meet) = meet.as_ref() {
for time in self.times_current.iter_mut() {
*time = time.join(&meet);
}
}
sort_dedup(&mut self.times_current);
}
// Normalize the representation of `new_interesting`, deduplicating and ordering.
sort_dedup(new_interesting);
(compute_counter, output_counter)
}
}
/// Updates an optional meet by an optional time.
fn update_meet<T: Lattice+Clone>(meet: &mut Option<T>, other: Option<&T>) {
if let Some(time) = other {
if let Some(meet) = meet.as_mut() {
*meet = meet.meet(time);
}
if meet.is_none() {
*meet = Some(time.clone());
}
}
}
}<|fim▁end|> | |
<|file_name|>MeasureUtils.java<|end_file_name|><|fim▁begin|>/*
* SonarQube
* Copyright (C) 2009-2016 SonarSource SA
* mailto:contact AT sonarsource DOT com
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 3 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
package org.sonar.api.measures;
import org.apache.commons.lang.StringUtils;
import javax.annotation.Nullable;
import java.util.Collection;
/**
* An utility class to manipulate measures
*
* @since 1.10
*/
public final class MeasureUtils {
/**
* Class cannot be instantiated, it should only be access through static methods
*/
private MeasureUtils() {
}
/**
* Return true if all measures have numeric value
*
* @param measures the measures
* @return true if all measures numeric values
*/
public static boolean haveValues(Measure... measures) {
if (measures == null || measures.length == 0) {
return false;
}
for (Measure measure : measures) {
if (!hasValue(measure)) {
return false;
}
}
return true;
}
/**
* Get the value of a measure, or alternatively a default value
*
* @param measure the measure
* @param defaultValue the default value
* @return <code>defaultValue</code> if measure is null or has no values.
*/
public static Double getValue(Measure measure, @Nullable Double defaultValue) {
if (MeasureUtils.hasValue(measure)) {
return measure.getValue();
}
return defaultValue;
}
public static Long getValueAsLong(Measure measure, Long defaultValue) {
if (MeasureUtils.hasValue(measure)) {
return measure.getValue().longValue();
}
return defaultValue;
}
public static Double getVariation(Measure measure, int periodIndex) {
return getVariation(measure, periodIndex, null);
}
public static Double getVariation(Measure measure, int periodIndex, @Nullable Double defaultValue) {
Double result = null;
if (measure != null) {
result = measure.getVariation(periodIndex);
}
return result != null ? result : defaultValue;
}
public static Long getVariationAsLong(Measure measure, int periodIndex) {
return getVariationAsLong(measure, periodIndex, null);
}
public static Long getVariationAsLong(Measure measure, int periodIndex, @Nullable Long defaultValue) {
Double result = null;
if (measure != null) {
result = measure.getVariation(periodIndex);
}
return result == null ? defaultValue : Long.valueOf(result.longValue());
}
/**
* Tests if a measure has a value
*
* @param measure the measure
* @return whether the measure has a value
*/
public static boolean hasValue(Measure measure) {
return measure != null && measure.getValue() != null;
}
/**
* Tests if a measure has a data field
*
* @param measure the measure<|fim▁hole|> return measure != null && StringUtils.isNotBlank(measure.getData());
}
/**
* Sums a series of measures
*
* @param zeroIfNone whether to return 0 or null in case measures is null
* @param measures the series of measures
* @return the sum of the measure series
*/
public static Double sum(boolean zeroIfNone, Collection<Measure> measures) {
if (measures != null) {
return sum(zeroIfNone, measures.toArray(new Measure[measures.size()]));
}
return zeroIfNone(zeroIfNone);
}
/**
* Sums a series of measures
*
* @param zeroIfNone whether to return 0 or null in case measures is null
* @param measures the series of measures
* @return the sum of the measure series
*/
public static Double sum(boolean zeroIfNone, Measure... measures) {
if (measures == null) {
return zeroIfNone(zeroIfNone);
}
Double sum = 0d;
boolean hasValue = false;
for (Measure measure : measures) {
if (measure != null && measure.getValue() != null) {
hasValue = true;
sum += measure.getValue();
}
}
if (hasValue) {
return sum;
}
return zeroIfNone(zeroIfNone);
}
/**
* Sums a series of measures for the given variation index
*
* @param zeroIfNone whether to return 0 or null in case measures is null
* @param variationIndex the index of the variation to use
* @param measures the series of measures
* @return the sum of the variations for the measure series
*/
public static Double sumOnVariation(boolean zeroIfNone, int variationIndex, Collection<Measure> measures) {
if (measures == null) {
return zeroIfNone(zeroIfNone);
}
Double sum = 0d;
for (Measure measure : measures) {
Double var = measure.getVariation(variationIndex);
if (var != null) {
sum += var;
}
}
return sum;
}
private static Double zeroIfNone(boolean zeroIfNone) {
return zeroIfNone ? 0d : null;
}
}<|fim▁end|> | * @return whether the measure has a data field
*/
public static boolean hasData(Measure measure) { |
<|file_name|>utils.py<|end_file_name|><|fim▁begin|><|fim▁hole|>
def _asdict(self):
result = OrderedDict()
for key in self.__mapper__.c.keys():
result[key] = getattr(self, key)
return result<|fim▁end|> | from collections import OrderedDict
class SerializableModel(object): |
<|file_name|>content.js<|end_file_name|><|fim▁begin|>module.exports = {
before: [function () {
console.log('global beforeAll1');
}, 'alias1'],
'alias1': 'alias2',
'alias2': function () {
console.log('global beforeAll2');
},
'One': function () {
this.sum = 1;
},<|fim▁hole|> if (this.sum !== 2) {
throw new Error(this.sum + ' !== 2');
}
}
};<|fim▁end|> | 'plus one': function () {
this.sum += 1;
},
'equals two': function () { |
<|file_name|>tarpc_server_missing_async.rs<|end_file_name|><|fim▁begin|>#[tarpc::service(derive_serde = false)]
trait World {
async fn hello(name: String) -> String;
}
<|fim▁hole|> fn hello(name: String) -> String {
format!("Hello, {}!", name)
}
}
fn main() {}<|fim▁end|> | struct HelloServer;
#[tarpc::server]
impl World for HelloServer { |
<|file_name|>progParser.py<|end_file_name|><|fim▁begin|>import xml.etree.ElementTree as ET
import csv
'''
Tara O'Kelly - G00322214,
Graph Theory Assignment,
Third Year, Graph Theory, Software Development.
A program to parse the xml file with data taken from http://timetable.gmit.ie/.
'''
# adapted from https://docs.python.org/3/library/xml.etree.elementtree.html
out = csv.writer(open("programmes.csv","w"), delimiter=',',quoting=csv.QUOTE_ALL)
headerRow = ["course", "code", "degree", "dept"]
out.writerow(headerRow)
dept = ["CCAM","CE","CS","CA","EE","HT","LPS","MIE","SI","SOB_AIS","SOB_M","EFDT", "Mayo"]
#dept = ["Centre for the Creative Arts and Media","Dept of Building and Civil Engineering","Dept of Computer Science & Applied Physics","Dept of Culinary Arts","Dept of Electronic and Electrical Engineering","Dept of Heritage and Tourism", "Dept of Life and Physical Sciences", "Dept of Mechanical and Industrial Engineering","Dept of Service Industries","Department of Accounting & Information Systems","Department of Management", "Letterfrack","Mayo"]
c = 0
for d in dept:
print(d)
tree = ET.parse("prog"+d+".xml")
root = tree.getroot()
for child in root:
count = 0
ss = ""
row = []
s = child.text.split(" ", 3)
# course names that adhered to the common naming convention
if "BA" == s[1] or "BSc" == s[1] or s[1] == "BB" or s[1] == "BBs" or s[1] == "HDip" or s[1] == "HC" or s[1] == "BEng" or s[1] == "NCC" or s[1] == "MSc" or s[1] == "Certificate" or s[1] == "Assc" or s[1] == "BICT62":
if s[2] == "in" or s[2] == "of":
row = [s[3], s[0], s[1], d]
else:
row = [s[2] + " " + s[3], s[0], s[1], d]
elif s[1] == "Bachelor":
s = child.text.split(" ", 5)<|fim▁hole|> if s[4] == "in":
row = [s[5], s[0], s[1] + " " + s[2] + " " + s[3],d]
else:
if s[4] == "L8":
row = [s[0] + " " + s[4] + " " + s[5], s[0], s[1] + " " + s[2] + " " + s[3], d]
else:
row = [s[4] + " " + s[5], s[0], s[1] + " " + s[2] + " " + s[3], d]
elif s[1] == "SPA" or s[1] == "Higher" or s[1] == "Advanced" or s[1] == "Prof" or s[1] == "Post":
s = child.text.split(" ", 4)
if s[3] == "in":
row = [s[4], s[0], s[1] + " " + s[2],d]
else:
row = [s[3] + " " + s[4], s[0], s[1] + " " + s[2],d]
elif s[1] == "National":
s = child.text.split(" ", 6)
if s[5] == "in":
row = [s[6], s[0], s[1] + " " + s[2] + " " + s[3] + " " + s[4], d]
else:
row = [s[5] + " " + s[6], s[0], s[1] + " " + s[2] + " " + s[3] + " " + s[4], d]
# courses distinguished by groups (art students)
elif s[1] == "Gr":
s = child.text.split(" ", 5)
if s[4] == "in":
row = [s[5] + " " + s[1] + " " + s[2], s[0], s[3], d]
else:
row = [s[4] + " " + s[5] + " " + s[1] + " " + s[2], s[0], s[3], d]
elif s[1] == "L8" or s[1] == "L7":
row = [s[0], s[0], s[1] + " " + s[2] + " " + s[3], d]
elif s[1] == "" :
s = child.text.split(" ", 4)
row = [s[4], s[0], s[2], d]
elif s[1] == "Cons" :
row = [s[3], s[0] + s[1], "SPA", d]
# courses with no specified name
else:
for subs in s:
if count == 1:
ss += subs
if count > 1:
ss += " " + subs
count += 1
row = [ss, s[0], "Unknown", d]
out.writerow(row)
c += 1<|fim▁end|> | |
<|file_name|>main.js<|end_file_name|><|fim▁begin|>var DND_START_EVENT = 'dnd-start',
DND_END_EVENT = 'dnd-end',
DND_DRAG_EVENT = 'dnd-drag';
angular
.module( 'app' )
.config( [ 'iScrollServiceProvider', function(iScrollServiceProvider){
iScrollServiceProvider.configureDefaults({
iScroll: {
momentum: false,
mouseWheel: true,
disableMouse: false,
useTransform: true,
scrollbars: true,
interactiveScrollbars: true,
resizeScrollbars: false,
probeType: 2,
preventDefault: false
// preventDefaultException: {
// tagName: /^.*$/
// }
},
directive: {
asyncRefreshDelay: 0,
refreshInterval: false
}
});
} ] )
.controller( 'main', function( $scope, draggingIndicator, iScrollService ){
'use strict';
this.iScrollState = iScrollService.state;
var DND_SCROLL_IGNORED_HEIGHT = 20, // ignoring 20px touch-scroll,
// TODO: this might be stored somewhere in browser env
DND_ACTIVATION_TIMEOUT = 500, // milliseconds needed to touch-activate d-n-d
MOUSE_OVER_EVENT = 'mousemove';
var self = this,
items = [],
touchTimerId;
$scope.dragging = draggingIndicator;
for( var i = 0; i< 25; i++ ){
items.push( i );
}
$scope.items = items;
this.disable = function ( ){
$scope.iScrollInstance.disable();
};
this.log = function ( msg ){<|fim▁hole|>} );<|fim▁end|> | console.log( 'got msg', msg );
};
|
<|file_name|>networks.js<|end_file_name|><|fim▁begin|>'use strict';
Object.defineProperty(exports, '__esModule', { value: true });
exports.bitcoin = {
messagePrefix: '\x18Bitcoin Signed Message:\n',
bech32: 'bc',
bip32: {
public: 0x0488b21e,
private: 0x0488ade4,
},
pubKeyHash: 0x00,
scriptHash: 0x05,
wif: 0x80,
};
exports.regtest = {
messagePrefix: '\x18Bitcoin Signed Message:\n',
bech32: 'bcrt',
bip32: {
public: 0x043587cf,
private: 0x04358394,
},
pubKeyHash: 0x6f,
scriptHash: 0xc4,
wif: 0xef,
};<|fim▁hole|> bip32: {
public: 0x043587cf,
private: 0x04358394,
},
pubKeyHash: 0x6f,
scriptHash: 0xc4,
wif: 0xef,
};<|fim▁end|> | exports.testnet = {
messagePrefix: '\x18Bitcoin Signed Message:\n',
bech32: 'tb', |
<|file_name|>settings.component.js<|end_file_name|><|fim▁begin|>var __decorate = (this && this.__decorate) || function (decorators, target, key, desc) {
var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;
if (typeof Reflect === "object" && typeof Reflect.decorate === "function") r = Reflect.decorate(decorators, target, key, desc);
else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;
return c > 3 && r && Object.defineProperty(target, key, r), r;
};
var __metadata = (this && this.__metadata) || function (k, v) {
if (typeof Reflect === "object" && typeof Reflect.metadata === "function") return Reflect.metadata(k, v);
};
import { Component } from '@angular/core';
import { SettingsService } from "../../shared/service/settings.service";
import { DataService } from "../../shared/service/data.service";
var SettingsComponent = (function () {
function SettingsComponent(settingsService, dataService) {
this.settingsService = settingsService;
this.dataService = dataService;
// Do stuff
}
SettingsComponent.prototype.save = function () {
this.settingsService.save();
this.dataService.getParameterNames();
};
return SettingsComponent;
}());
SettingsComponent = __decorate([
Component({
selector: 'my-home',
templateUrl: 'settings.component.html',
styleUrls: ['settings.component.scss']
}),
__metadata("design:paramtypes", [SettingsService, DataService])
], SettingsComponent);
export { SettingsComponent };<|fim▁hole|>//# sourceMappingURL=settings.component.js.map<|fim▁end|> | |
<|file_name|>board-file-gallery.component.ts<|end_file_name|><|fim▁begin|>import { Component, ViewChild, NgZone} from '@angular/core';
<|fim▁hole|>import { CanvasFile } from '../../models/canvas-file';
@Component({
selector: 'boad-file-gallery',
templateUrl: './board-file-gallery.component.html'
})
export class BoardFileGalleryComponent {
private files :Array<CanvasFile>;
constructor(private boardDialogRef: MdDialogRef<BoardFileGalleryComponent>,
private boardService :BoardService,
private zone: NgZone) {
this.boardService
.getFilesForUser()
.subscribe(data => {
console.log(1);
console.log(data);
this.zone.run(() => {
this.files = data;
});
}, error => {});
}
closeAndInsertIntoEditor(url) {
console.log(url);
this.boardDialogRef.close();
this.boardService.insertFromGallery(url);
}
}<|fim▁end|> | import { MdDialog, MdDialogRef } from '@angular/material';
import { BoardService } from '../board/board.service';
|
<|file_name|>ThreadGroupDebuggee.java<|end_file_name|><|fim▁begin|>/*<|fim▁hole|> * (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @author Vitaly A. Provodin
*/
/**
* Created on 18.02.2005
*/
package org.apache.harmony.jpda.tests.jdwp.ThreadReference;
import org.apache.harmony.jpda.tests.framework.DebuggeeSynchronizer;
import org.apache.harmony.jpda.tests.framework.LogWriter;
import org.apache.harmony.jpda.tests.share.JPDADebuggeeSynchronizer;
import org.apache.harmony.jpda.tests.share.SyncDebuggee;
/**
* The class specifies debuggee for <code>org.apache.harmony.jpda.tests.jdwp.ThreadReference.ThreadGroupTest</code>.
* This debuggee is started as follow:
* <ol>
* <li>the tested group <code>TESTED_GROUP</code> is created
* <li>the tested thread <code>TESTED_THREAD</code> is started so this
* thread belongs to that thread group
* </ol>
* For different goals of tests, the debuggee sends the <code>SGNL_READY</code>
* signal to and waits for the <code>SGNL_CONTINUE</code> signal from debugger
* in two places:
* <ul>
* <li>right away when the tested thread has been started
* <li>when the tested thread has been finished
* </ul>
*/
public class ThreadGroupDebuggee extends SyncDebuggee {
public static final String TESTED_GROUP = "TestedGroup";
public static final String TESTED_THREAD = "TestedThread";
static Object waitForStart = new Object();
static Object waitForFinish = new Object();
static Object waitTimeObject = new Object();
static void waitMlsecsTime(long mlsecsTime) {
synchronized(waitTimeObject) {
try {
waitTimeObject.wait(mlsecsTime);
} catch (Throwable throwable) {
// ignore
}
}
}
public void run() {
ThreadGroup thrdGroup = new ThreadGroup(TESTED_GROUP);
DebuggeeThread thrd = new DebuggeeThread(thrdGroup, TESTED_THREAD,
logWriter, synchronizer);
synchronized(waitForStart){
thrd.start();
try {
waitForStart.wait();
} catch (InterruptedException e) {
}
}
while ( thrd.isAlive() ) {
waitMlsecsTime(100);
}
// synchronized(waitForFinish){
logWriter.println("thread is finished");
// }
logWriter.println("send SGNL_READY");
synchronizer.sendMessage(JPDADebuggeeSynchronizer.SGNL_READY);
synchronizer.receiveMessage(JPDADebuggeeSynchronizer.SGNL_CONTINUE);
}
class DebuggeeThread extends Thread {
LogWriter logWriter;
DebuggeeSynchronizer synchronizer;
public DebuggeeThread(ThreadGroup thrdGroup, String name,
LogWriter logWriter, DebuggeeSynchronizer synchronizer) {
super(thrdGroup, name);
this.logWriter = logWriter;
this.synchronizer = synchronizer;
}
public void run() {
synchronized(ThreadGroupDebuggee.waitForFinish){
synchronized(ThreadGroupDebuggee.waitForStart){
ThreadGroupDebuggee.waitForStart.notifyAll();
logWriter.println(getName() + ": started");
synchronizer.sendMessage(JPDADebuggeeSynchronizer.SGNL_READY);
logWriter.println(getName() + ": wait for SGNL_CONTINUE");
synchronizer.receiveMessage(JPDADebuggeeSynchronizer.SGNL_CONTINUE);
logWriter.println(getName() + ": finished");
}
}
}
}
public static void main(String [] args) {
runDebuggee(ThreadGroupDebuggee.class);
}
}<|fim▁end|> | * Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0 |
<|file_name|>bitcoin_nb.ts<|end_file_name|><|fim▁begin|><?xml version="1.0" ?><!DOCTYPE TS><TS language="nb" version="2.0">
<defaultcodec>UTF-8</defaultcodec>
<context>
<name>AboutDialog</name>
<message>
<location filename="../forms/aboutdialog.ui" line="+14"/>
<source>About RupayaCoin</source>
<translation>Om RupayaCoin</translation>
</message>
<message>
<location line="+39"/>
<source><b>RupayaCoin</b> version</source>
<translation><b>RupayaCoin</b> versjon</translation>
</message>
<message>
<location line="+57"/>
<source>
This is experimental software.
Distributed under the MIT/X11 software license, see the accompanying file COPYING or http://www.opensource.org/licenses/mit-license.php.
This product includes software developed by the OpenSSL Project for use in the OpenSSL Toolkit (http://www.openssl.org/) and cryptographic software written by Eric Young ([email protected]) and UPnP software written by Thomas Bernard.</source>
<translation>
Dette er eksperimentell programvare.
Distribuert under MIT/X11 programvarelisensen, se medfølgende fil COPYING eller http://www.opensource.org/licenses/mit-license.php.
Dette produktet inneholder programvare utviklet av OpenSSL prosjektet for bruk i OpenSSL Toolkit (http://www.openssl.org/) og kryptografisk programvare skrevet av Eric Young ([email protected]) og UPnP programvare skrevet av Thomas Bernard.</translation>
</message>
<message>
<location filename="../aboutdialog.cpp" line="+14"/>
<source>Copyright</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>The RupayaCoin developers</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>AddressBookPage</name>
<message>
<location filename="../forms/addressbookpage.ui" line="+14"/>
<source>Address Book</source>
<translation>Adressebok</translation>
</message>
<message>
<location line="+19"/>
<source>Double-click to edit address or label</source>
<translation>Dobbeltklikk for å redigere adresse eller merkelapp</translation>
</message>
<message>
<location line="+27"/>
<source>Create a new address</source>
<translation>Lag en ny adresse</translation>
</message>
<message>
<location line="+14"/>
<source>Copy the currently selected address to the system clipboard</source>
<translation>Kopier den valgte adressen til systemets utklippstavle</translation>
</message>
<message>
<location line="-11"/>
<source>&New Address</source>
<translation>&Ny Adresse</translation>
</message>
<message>
<location filename="../addressbookpage.cpp" line="+63"/>
<source>These are your RupayaCoin addresses for receiving payments. You may want to give a different one to each sender so you can keep track of who is paying you.</source>
<translation>Dette er dine RupayaCoin-adresser for mottak av betalinger. Du kan gi forskjellige adresser til alle som skal betale deg for å holde bedre oversikt.</translation>
</message>
<message>
<location filename="../forms/addressbookpage.ui" line="+14"/>
<source>&Copy Address</source>
<translation>&Kopier Adresse</translation>
</message>
<message>
<location line="+11"/>
<source>Show &QR Code</source>
<translation>Vis &QR Kode</translation>
</message>
<message>
<location line="+11"/>
<source>Sign a message to prove you own a RupayaCoin address</source>
<translation>Signer en melding for å bevise at du eier en RupayaCoin-adresse</translation>
</message>
<message>
<location line="+3"/>
<source>Sign &Message</source>
<translation>Signér &Melding</translation>
</message>
<message>
<location line="+25"/>
<source>Delete the currently selected address from the list</source>
<translation>Slett den valgte adressen fra listen.</translation>
</message>
<message>
<location line="+27"/>
<source>Export the data in the current tab to a file</source>
<translation>Eksporter data fra nåværende fane til fil</translation>
</message>
<message>
<location line="+3"/>
<source>&Export</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-44"/>
<source>Verify a message to ensure it was signed with a specified RupayaCoin address</source>
<translation>Verifiser en melding for å være sikker på at den ble signert av en angitt RupayaCoin-adresse</translation>
</message>
<message>
<location line="+3"/>
<source>&Verify Message</source>
<translation>&Verifiser Melding</translation>
</message>
<message>
<location line="+14"/>
<source>&Delete</source>
<translation>&Slett</translation>
</message>
<message>
<location filename="../addressbookpage.cpp" line="-5"/>
<source>These are your RupayaCoin addresses for sending payments. Always check the amount and the receiving address before sending coins.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+13"/>
<source>Copy &Label</source>
<translation>Kopier &Merkelapp</translation>
</message>
<message>
<location line="+1"/>
<source>&Edit</source>
<translation>&Rediger</translation>
</message>
<message>
<location line="+1"/>
<source>Send &Coins</source>
<translation>Send &Coins</translation>
</message>
<message>
<location line="+260"/>
<source>Export Address Book Data</source>
<translation>Eksporter adressebok</translation>
</message>
<message>
<location line="+1"/>
<source>Comma separated file (*.csv)</source>
<translation>Kommaseparert fil (*.csv)</translation>
</message>
<message>
<location line="+13"/>
<source>Error exporting</source>
<translation>Feil ved eksportering</translation>
</message>
<message>
<location line="+0"/>
<source>Could not write to file %1.</source>
<translation>Kunne ikke skrive til filen %1.</translation>
</message>
</context>
<context>
<name>AddressTableModel</name>
<message>
<location filename="../addresstablemodel.cpp" line="+144"/>
<source>Label</source>
<translation>Merkelapp</translation>
</message>
<message>
<location line="+0"/>
<source>Address</source>
<translation>Adresse</translation>
</message>
<message>
<location line="+36"/>
<source>(no label)</source>
<translation>(ingen merkelapp)</translation>
</message>
</context>
<context>
<name>AskPassphraseDialog</name>
<message>
<location filename="../forms/askpassphrasedialog.ui" line="+26"/>
<source>Passphrase Dialog</source>
<translation>Dialog for Adgangsfrase</translation>
</message>
<message>
<location line="+21"/>
<source>Enter passphrase</source>
<translation>Angi adgangsfrase</translation>
</message>
<message>
<location line="+14"/>
<source>New passphrase</source>
<translation>Ny adgangsfrase</translation>
</message>
<message>
<location line="+14"/>
<source>Repeat new passphrase</source>
<translation>Gjenta ny adgangsfrase</translation>
</message>
<message>
<location filename="../askpassphrasedialog.cpp" line="+33"/>
<source>Enter the new passphrase to the wallet.<br/>Please use a passphrase of <b>10 or more random characters</b>, or <b>eight or more words</b>.</source>
<translation>Skriv inn den nye adgangsfrasen for lommeboken.<br/>Vennligst bruk en adgangsfrase med <b>10 eller flere tilfeldige tegn</b>, eller <b>åtte eller flere ord</b>.</translation>
</message>
<message>
<location line="+1"/>
<source>Encrypt wallet</source>
<translation>Krypter lommebok</translation>
</message>
<message>
<location line="+3"/>
<source>This operation needs your wallet passphrase to unlock the wallet.</source>
<translation>Denne operasjonen krever adgangsfrasen til lommeboken for å låse den opp.</translation>
</message>
<message>
<location line="+5"/>
<source>Unlock wallet</source>
<translation>Lås opp lommebok</translation>
</message>
<message>
<location line="+3"/>
<source>This operation needs your wallet passphrase to decrypt the wallet.</source>
<translation>Denne operasjonen krever adgangsfrasen til lommeboken for å dekryptere den.</translation>
</message>
<message>
<location line="+5"/>
<source>Decrypt wallet</source>
<translation>Dekrypter lommebok</translation>
</message>
<message>
<location line="+3"/>
<source>Change passphrase</source>
<translation>Endre adgangsfrase</translation>
</message>
<message>
<location line="+1"/>
<source>Enter the old and new passphrase to the wallet.</source>
<translation>Skriv inn gammel og ny adgangsfrase for lommeboken.</translation>
</message>
<message>
<location line="+46"/>
<source>Confirm wallet encryption</source>
<translation>Bekreft kryptering av lommebok</translation>
</message>
<message>
<location line="+1"/>
<source>Warning: If you encrypt your wallet and lose your passphrase, you will <b>LOSE ALL OF YOUR RupayaCoinS</b>!</source>
<translation>Advarsel: Hvis du krypterer lommeboken og mister adgangsfrasen, så vil du <b>MISTE ALLE DINE RupayaCoinS</b>!</translation>
</message>
<message>
<location line="+0"/>
<source>Are you sure you wish to encrypt your wallet?</source>
<translation>Er du sikker på at du vil kryptere lommeboken?</translation>
</message>
<message>
<location line="+15"/>
<source>IMPORTANT: Any previous backups you have made of your wallet file should be replaced with the newly generated, encrypted wallet file. For security reasons, previous backups of the unencrypted wallet file will become useless as soon as you start using the new, encrypted wallet.</source>
<translation>VIKTIG: Tidligere sikkerhetskopier av din lommebok-fil, bør erstattes med den nylig genererte, krypterte filen, da de blir ugyldiggjort av sikkerhetshensyn så snart du begynner å bruke den nye krypterte lommeboken.</translation>
</message>
<message>
<location line="+100"/>
<location line="+24"/>
<source>Warning: The Caps Lock key is on!</source>
<translation>Advarsel: Caps Lock er på !</translation>
</message>
<message>
<location line="-130"/>
<location line="+58"/>
<source>Wallet encrypted</source>
<translation>Lommebok kryptert</translation>
</message>
<message>
<location line="-56"/>
<source>RupayaCoin will close now to finish the encryption process. Remember that encrypting your wallet cannot fully protect your RupayaCoins from being stolen by malware infecting your computer.</source>
<translation>RupayaCoin vil nå lukkes for å fullføre krypteringsprosessen. Husk at kryptering av lommeboken ikke fullt ut kan beskytte dine RupayaCoins fra å bli stjålet om skadevare infiserer datamaskinen.</translation>
</message>
<message>
<location line="+13"/>
<location line="+7"/>
<location line="+42"/>
<location line="+6"/>
<source>Wallet encryption failed</source>
<translation>Kryptering av lommebok feilet</translation>
</message>
<message>
<location line="-54"/>
<source>Wallet encryption failed due to an internal error. Your wallet was not encrypted.</source>
<translation>Kryptering av lommebok feilet på grunn av en intern feil. Din lommebok ble ikke kryptert.</translation>
</message>
<message>
<location line="+7"/>
<location line="+48"/>
<source>The supplied passphrases do not match.</source>
<translation>De angitte adgangsfrasene er ulike.</translation>
</message>
<message>
<location line="-37"/>
<source>Wallet unlock failed</source>
<translation>Opplåsing av lommebok feilet</translation>
</message>
<message>
<location line="+1"/>
<location line="+11"/>
<location line="+19"/>
<source>The passphrase entered for the wallet decryption was incorrect.</source>
<translation>Adgangsfrasen angitt for dekryptering av lommeboken var feil.</translation>
</message>
<message>
<location line="-20"/>
<source>Wallet decryption failed</source>
<translation>Dekryptering av lommebok feilet</translation>
</message>
<message>
<location line="+14"/>
<source>Wallet passphrase was successfully changed.</source>
<translation>Adgangsfrase for lommebok endret.</translation>
</message>
</context>
<context>
<name>BitcoinGUI</name>
<message>
<location filename="../bitcoingui.cpp" line="+233"/>
<source>Sign &message...</source>
<translation>Signer &melding...</translation>
</message>
<message>
<location line="+280"/>
<source>Synchronizing with network...</source>
<translation>Synkroniserer med nettverk...</translation>
</message>
<message>
<location line="-349"/>
<source>&Overview</source>
<translation>&Oversikt</translation>
</message>
<message>
<location line="+1"/>
<source>Show general overview of wallet</source>
<translation>Vis generell oversikt over lommeboken</translation>
</message>
<message>
<location line="+20"/>
<source>&Transactions</source>
<translation>&Transaksjoner</translation>
</message>
<message>
<location line="+1"/>
<source>Browse transaction history</source>
<translation>Vis transaksjonshistorikk</translation>
</message>
<message>
<location line="+7"/>
<source>Edit the list of stored addresses and labels for sending</source>
<translation>Rediger listen over adresser og deres merkelapper</translation>
</message>
<message>
<location line="-14"/>
<source>Show the list of addresses for receiving payments</source>
<translation>Vis listen over adresser for mottak av betalinger</translation>
</message>
<message>
<location line="+31"/>
<source>E&xit</source>
<translation>&Avslutt</translation>
</message>
<message>
<location line="+1"/>
<source>Quit application</source>
<translation>Avslutt applikasjonen</translation>
</message>
<message>
<location line="+4"/>
<source>Show information about RupayaCoin</source>
<translation>Vis informasjon om RupayaCoin</translation>
</message>
<message>
<location line="+2"/>
<source>About &Qt</source>
<translation>Om &Qt</translation>
</message>
<message>
<location line="+1"/>
<source>Show information about Qt</source>
<translation>Vis informasjon om Qt</translation>
</message>
<message>
<location line="+2"/>
<source>&Options...</source>
<translation>&Innstillinger...</translation>
</message>
<message>
<location line="+6"/>
<source>&Encrypt Wallet...</source>
<translation>&Krypter Lommebok...</translation>
</message>
<message>
<location line="+3"/>
<source>&Backup Wallet...</source>
<translation>Lag &Sikkerhetskopi av Lommebok...</translation>
</message>
<message>
<location line="+2"/>
<source>&Change Passphrase...</source>
<translation>&Endre Adgangsfrase...</translation>
</message>
<message>
<location line="+285"/>
<source>Importing blocks from disk...</source>
<translation>Importere blokker...</translation>
</message>
<message>
<location line="+3"/>
<source>Reindexing blocks on disk...</source>
<translation>Re-indekserer blokker på disk...</translation>
</message>
<message>
<location line="-347"/>
<source>Send coins to a RupayaCoin address</source>
<translation>Send til en RupayaCoin-adresse</translation>
</message>
<message>
<location line="+49"/>
<source>Modify configuration options for RupayaCoin</source>
<translation>Endre oppsett for RupayaCoin</translation>
</message>
<message>
<location line="+9"/>
<source>Backup wallet to another location</source>
<translation>Sikkerhetskopiér lommebok til annet sted</translation>
</message>
<message>
<location line="+2"/>
<source>Change the passphrase used for wallet encryption</source>
<translation>Endre adgangsfrasen brukt for kryptering av lommebok</translation>
</message>
<message>
<location line="+6"/>
<source>&Debug window</source>
<translation>&Feilsøkingsvindu</translation>
</message>
<message>
<location line="+1"/>
<source>Open debugging and diagnostic console</source>
<translation>Åpne konsoll for feilsøk og diagnostikk</translation>
</message>
<message>
<location line="-4"/>
<source>&Verify message...</source>
<translation>&Verifiser melding...</translation>
</message>
<message>
<location line="-165"/>
<location line="+530"/>
<source>RupayaCoin</source>
<translation>RupayaCoin</translation>
</message>
<message>
<location line="-530"/>
<source>Wallet</source>
<translation>Lommebok</translation>
</message>
<message>
<location line="+101"/>
<source>&Send</source>
<translation>&Send</translation>
</message>
<message>
<location line="+7"/>
<source>&Receive</source>
<translation>&Motta</translation>
</message>
<message>
<location line="+14"/>
<source>&Addresses</source>
<translation>&Adressebok</translation>
</message>
<message>
<location line="+22"/>
<source>&About RupayaCoin</source>
<translation>&Om RupayaCoin</translation>
</message>
<message>
<location line="+9"/>
<source>&Show / Hide</source>
<translation>&Gjem / vis</translation>
</message>
<message>
<location line="+1"/>
<source>Show or hide the main Window</source>
<translation>Vis eller skjul hovedvinduet</translation>
</message>
<message>
<location line="+3"/>
<source>Encrypt the private keys that belong to your wallet</source>
<translation>Krypter de private nøklene som tilhører lommeboken din</translation>
</message>
<message>
<location line="+7"/>
<source>Sign messages with your RupayaCoin addresses to prove you own them</source>
<translation>Signér en melding for å bevise at du eier denne adressen</translation>
</message>
<message>
<location line="+2"/>
<source>Verify messages to ensure they were signed with specified RupayaCoin addresses</source>
<translation>Bekreft meldinger for å være sikker på at de ble signert av en angitt RupayaCoin-adresse</translation>
</message>
<message>
<location line="+28"/>
<source>&File</source>
<translation>&Fil</translation>
</message>
<message>
<location line="+7"/>
<source>&Settings</source>
<translation>&Innstillinger</translation>
</message>
<message>
<location line="+6"/>
<source>&Help</source>
<translation>&Hjelp</translation>
</message>
<message>
<location line="+9"/>
<source>Tabs toolbar</source>
<translation>Verktøylinje for faner</translation>
</message>
<message>
<location line="+17"/>
<location line="+10"/>
<source>[testnet]</source>
<translation>[testnett]</translation>
</message>
<message>
<location line="+47"/>
<source>RupayaCoin client</source>
<translation>RupayaCoinklient</translation>
</message>
<message numerus="yes">
<location line="+141"/>
<source>%n active connection(s) to RupayaCoin network</source>
<translation><numerusform>%n aktiv forbindelse til RupayaCoin-nettverket</numerusform><numerusform>%n aktive forbindelser til RupayaCoin-nettverket</numerusform></translation>
</message>
<message>
<location line="+22"/>
<source>No block source available...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+12"/>
<source>Processed %1 of %2 (estimated) blocks of transaction history.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>Processed %1 blocks of transaction history.</source>
<translation>Lastet %1 blokker med transaksjonshistorikk.</translation>
</message>
<message numerus="yes">
<location line="+20"/>
<source>%n hour(s)</source>
<translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation>
</message>
<message numerus="yes">
<location line="+4"/>
<source>%n day(s)</source>
<translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation>
</message>
<message numerus="yes">
<location line="+4"/>
<source>%n week(s)</source>
<translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation>
</message>
<message>
<location line="+4"/>
<source>%1 behind</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+14"/>
<source>Last received block was generated %1 ago.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Transactions after this will not yet be visible.</source>
<translation>Transaksjoner etter dette vil ikke være synlige enda.</translation>
</message>
<message>
<location line="+22"/>
<source>Error</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Warning</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Information</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+70"/>
<source>This transaction is over the size limit. You can still send it for a fee of %1, which goes to the nodes that process your transaction and helps to support the network. Do you want to pay the fee?</source>
<translation>Denne transaksjonen overstiger størrelsesbegrensningen. Du kan likevel sende den med et gebyr på %1, som går til nodene som prosesserer transaksjonen din og støtter nettverket. Vil du betale gebyret?</translation>
</message>
<message>
<location line="-140"/>
<source>Up to date</source>
<translation>Ajour</translation>
</message>
<message>
<location line="+31"/>
<source>Catching up...</source>
<translation>Kommer ajour...</translation>
</message>
<message>
<location line="+113"/>
<source>Confirm transaction fee</source>
<translation>Bekreft transaksjonsgebyr</translation>
</message>
<message>
<location line="+8"/>
<source>Sent transaction</source>
<translation>Sendt transaksjon</translation>
</message>
<message>
<location line="+0"/>
<source>Incoming transaction</source>
<translation>Innkommende transaksjon</translation>
</message>
<message>
<location line="+1"/>
<source>Date: %1
Amount: %2
Type: %3
Address: %4
</source>
<translation>Dato: %1
Beløp: %2
Type: %3
Adresse: %4
</translation>
</message>
<message>
<location line="+33"/>
<location line="+23"/>
<source>URI handling</source>
<translation>URI håndtering</translation>
</message>
<message>
<location line="-23"/>
<location line="+23"/>
<source>URI can not be parsed! This can be caused by an invalid RupayaCoin address or malformed URI parameters.</source>
<translation>URI kunne ikke tolkes! Dette kan forårsakes av en ugyldig RupayaCoin-adresse eller feil i URI-parametere.</translation>
</message>
<message>
<location line="+17"/>
<source>Wallet is <b>encrypted</b> and currently <b>unlocked</b></source>
<translation>Lommeboken er <b>kryptert</b> og for tiden <b>ulåst</b></translation>
</message>
<message>
<location line="+8"/>
<source>Wallet is <b>encrypted</b> and currently <b>locked</b></source>
<translation>Lommeboken er <b>kryptert</b> og for tiden <b>låst</b></translation>
</message>
<message>
<location filename="../bitcoin.cpp" line="+111"/>
<source>A fatal error occurred. RupayaCoin can no longer continue safely and will quit.</source>
<translation>En fatal feil har inntruffet. Det er ikke trygt å fortsette og RupayaCoin må derfor avslutte.</translation>
</message>
</context>
<context>
<name>ClientModel</name>
<message>
<location filename="../clientmodel.cpp" line="+104"/>
<source>Network Alert</source>
<translation>Nettverksvarsel</translation>
</message>
</context>
<context>
<name>EditAddressDialog</name>
<message>
<location filename="../forms/editaddressdialog.ui" line="+14"/>
<source>Edit Address</source>
<translation>Rediger adresse</translation>
</message>
<message>
<location line="+11"/>
<source>&Label</source>
<translation>&Merkelapp</translation>
</message>
<message>
<location line="+10"/>
<source>The label associated with this address book entry</source>
<translation>Merkelappen koblet til denne adressen i adresseboken</translation>
</message>
<message>
<location line="+7"/>
<source>&Address</source>
<translation>&Adresse</translation>
</message>
<message>
<location line="+10"/>
<source>The address associated with this address book entry. This can only be modified for sending addresses.</source>
<translation>Adressen til denne oppføringen i adresseboken. Denne kan kun endres for utsendingsadresser.</translation>
</message>
<message>
<location filename="../editaddressdialog.cpp" line="+21"/>
<source>New receiving address</source>
<translation>Ny mottaksadresse</translation>
</message>
<message>
<location line="+4"/>
<source>New sending address</source>
<translation>Ny utsendingsadresse</translation>
</message>
<message>
<location line="+3"/>
<source>Edit receiving address</source>
<translation>Rediger mottaksadresse</translation>
</message>
<message>
<location line="+4"/>
<source>Edit sending address</source>
<translation>Rediger utsendingsadresse</translation>
</message>
<message>
<location line="+76"/>
<source>The entered address "%1" is already in the address book.</source>
<translation>Den oppgitte adressen "%1" er allerede i adresseboken.</translation>
</message>
<message>
<location line="-5"/>
<source>The entered address "%1" is not a valid RupayaCoin address.</source>
<translation>Den angitte adressed "%1" er ikke en gyldig RupayaCoin-adresse.</translation>
</message>
<message>
<location line="+10"/>
<source>Could not unlock wallet.</source>
<translation>Kunne ikke låse opp lommeboken.</translation>
</message>
<message>
<location line="+5"/>
<source>New key generation failed.</source>
<translation>Generering av ny nøkkel feilet.</translation>
</message>
</context>
<context>
<name>GUIUtil::HelpMessageBox</name>
<message>
<location filename="../guiutil.cpp" line="+424"/>
<location line="+12"/>
<source>RupayaCoin-Qt</source>
<translation>RupayaCoin-Qt</translation>
</message>
<message>
<location line="-12"/>
<source>version</source>
<translation>versjon</translation>
</message>
<message>
<location line="+2"/>
<source>Usage:</source>
<translation>Bruk:</translation>
</message>
<message>
<location line="+1"/>
<source>command-line options</source>
<translation>kommandolinjevalg</translation>
</message>
<message>
<location line="+4"/>
<source>UI options</source>
<translation>valg i brukergrensesnitt</translation>
</message>
<message>
<location line="+1"/>
<source>Set language, for example "de_DE" (default: system locale)</source>
<translation>Sett språk, for eksempel "nb_NO" (standardverdi: fra operativsystem)</translation>
</message>
<message>
<location line="+1"/>
<source>Start minimized</source>
<translation>Start minimert
</translation>
</message>
<message>
<location line="+1"/>
<source>Show splash screen on startup (default: 1)</source>
<translation>Vis splashskjerm ved oppstart (standardverdi: 1)</translation>
</message>
</context>
<context>
<name>OptionsDialog</name>
<message>
<location filename="../forms/optionsdialog.ui" line="+14"/>
<source>Options</source>
<translation>Innstillinger</translation>
</message>
<message>
<location line="+16"/>
<source>&Main</source>
<translation>&Hoved</translation>
</message>
<message>
<location line="+6"/>
<source>Optional transaction fee per kB that helps make sure your transactions are processed quickly. Most transactions are 1 kB.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+15"/>
<source>Pay transaction &fee</source>
<translation>Betal transaksjons&gebyr</translation>
</message>
<message>
<location line="+31"/>
<source>Automatically start RupayaCoin after logging in to the system.</source>
<translation>Start RupayaCoin automatisk etter innlogging.</translation>
</message>
<message>
<location line="+3"/>
<source>&Start RupayaCoin on system login</source>
<translation>&Start RupayaCoin ved systeminnlogging</translation>
</message>
<message>
<location line="+35"/>
<source>Reset all client options to default.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>&Reset Options</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+13"/>
<source>&Network</source>
<translation>&Nettverk</translation>
</message>
<message>
<location line="+6"/>
<source>Automatically open the RupayaCoin client port on the router. This only works when your router supports UPnP and it is enabled.</source>
<translation>Åpne automatisk RupayaCoin klientporten på ruteren. Dette virker kun om din ruter støtter UPnP og dette er påslått.</translation>
</message>
<message>
<location line="+3"/>
<source>Map port using &UPnP</source>
<translation>Sett opp port vha. &UPnP</translation>
</message>
<message>
<location line="+7"/>
<source>Connect to the RupayaCoin network through a SOCKS proxy (e.g. when connecting through Tor).</source>
<translation>Koble til RupayaCoin-nettverket gjennom en SOCKS proxy (f.eks. ved tilkobling gjennom Tor).</translation>
</message>
<message>
<location line="+3"/>
<source>&Connect through SOCKS proxy:</source>
<translation>&Koble til gjenom SOCKS proxy:</translation>
</message>
<message>
<location line="+9"/>
<source>Proxy &IP:</source>
<translation>Proxy &IP:</translation>
</message>
<message>
<location line="+19"/>
<source>IP address of the proxy (e.g. 127.0.0.1)</source>
<translation>IP-adresse for mellomtjener (f.eks. 127.0.0.1)</translation>
</message>
<message>
<location line="+7"/>
<source>&Port:</source>
<translation>&Port:</translation>
</message>
<message>
<location line="+19"/>
<source>Port of the proxy (e.g. 9050)</source>
<translation>Proxyens port (f.eks. 9050)</translation>
</message>
<message>
<location line="+7"/>
<source>SOCKS &Version:</source>
<translation>SOCKS &Versjon:</translation>
</message>
<message>
<location line="+13"/>
<source>SOCKS version of the proxy (e.g. 5)</source>
<translation>Proxyens SOCKS versjon (f.eks. 5)</translation>
</message>
<message>
<location line="+36"/>
<source>&Window</source>
<translation>&Vindu</translation>
</message>
<message>
<location line="+6"/>
<source>Show only a tray icon after minimizing the window.</source>
<translation>Vis kun ikon i systemkurv etter minimering av vinduet.</translation>
</message>
<message>
<location line="+3"/>
<source>&Minimize to the tray instead of the taskbar</source>
<translation>&Minimer til systemkurv istedenfor oppgavelinjen</translation>
</message>
<message>
<location line="+7"/>
<source>Minimize instead of exit the application when the window is closed. When this option is enabled, the application will be closed only after selecting Quit in the menu.</source>
<translation>Minimerer vinduet istedenfor å avslutte applikasjonen når vinduet lukkes. Når dette er slått på avsluttes applikasjonen kun ved å velge avslutt i menyen.</translation>
</message>
<message>
<location line="+3"/>
<source>M&inimize on close</source>
<translation>M&inimer ved lukking</translation>
</message>
<message>
<location line="+21"/>
<source>&Display</source>
<translation>&Visning</translation>
</message>
<message>
<location line="+8"/>
<source>User Interface &language:</source>
<translation>&Språk for brukergrensesnitt</translation>
</message>
<message>
<location line="+13"/>
<source>The user interface language can be set here. This setting will take effect after restarting RupayaCoin.</source>
<translation>Språket for brukergrensesnittet kan settes her. Innstillingen trer i kraft ved omstart av RupayaCoin.</translation>
</message>
<message>
<location line="+11"/>
<source>&Unit to show amounts in:</source>
<translation>&Enhet for visning av beløper:</translation>
</message>
<message>
<location line="+13"/>
<source>Choose the default subdivision unit to show in the interface and when sending coins.</source>
<translation>Velg standard delt enhet for visning i grensesnittet og for sending av RupayaCoins.</translation>
</message>
<message>
<location line="+9"/>
<source>Whether to show RupayaCoin addresses in the transaction list or not.</source>
<translation>Om RupayaCoin-adresser skal vises i transaksjonslisten eller ikke.</translation>
</message>
<message>
<location line="+3"/>
<source>&Display addresses in transaction list</source>
<translation>&Vis adresser i transaksjonslisten</translation>
</message>
<message>
<location line="+71"/>
<source>&OK</source>
<translation>&OK</translation>
</message>
<message>
<location line="+7"/>
<source>&Cancel</source>
<translation>&Avbryt</translation>
</message>
<message>
<location line="+10"/>
<source>&Apply</source>
<translation>&Bruk</translation>
</message>
<message>
<location filename="../optionsdialog.cpp" line="+53"/>
<source>default</source>
<translation>standardverdi</translation>
</message>
<message>
<location line="+130"/>
<source>Confirm options reset</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Some settings may require a client restart to take effect.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>Do you want to proceed?</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+42"/>
<location line="+9"/>
<source>Warning</source>
<translation>Advarsel</translation>
</message>
<message>
<location line="-9"/>
<location line="+9"/>
<source>This setting will take effect after restarting RupayaCoin.</source>
<translation>Denne innstillingen trer i kraft etter omstart av RupayaCoin.</translation>
</message>
<message>
<location line="+29"/>
<source>The supplied proxy address is invalid.</source>
<translation>Angitt proxyadresse er ugyldig.</translation>
</message>
</context>
<context>
<name>OverviewPage</name>
<message>
<location filename="../forms/overviewpage.ui" line="+14"/>
<source>Form</source>
<translation>Skjema</translation>
</message>
<message>
<location line="+50"/>
<location line="+166"/>
<source>The displayed information may be out of date. Your wallet automatically synchronizes with the RupayaCoin network after a connection is established, but this process has not completed yet.</source>
<translation>Informasjonen som vises kan være foreldet. Din lommebok synkroniseres automatisk med RupayaCoin-nettverket etter at tilkobling er opprettet, men denne prosessen er ikke ferdig enda.</translation>
</message>
<message>
<location line="-124"/>
<source>Balance:</source>
<translation>Saldo:</translation>
</message>
<message>
<location line="+29"/>
<source>Unconfirmed:</source>
<translation>Ubekreftet</translation>
</message>
<message>
<location line="-78"/>
<source>Wallet</source>
<translation>Lommebok</translation>
</message>
<message>
<location line="+107"/>
<source>Immature:</source>
<translation>Umoden:</translation>
</message>
<message>
<location line="+13"/>
<source>Mined balance that has not yet matured</source>
<translation>Minet saldo har ikke modnet enda</translation>
</message>
<message>
<location line="+46"/>
<source><b>Recent transactions</b></source>
<translation><b>Siste transaksjoner</b></translation>
</message>
<message>
<location line="-101"/>
<source>Your current balance</source>
<translation>Din nåværende saldo</translation>
</message>
<message>
<location line="+29"/>
<source>Total of transactions that have yet to be confirmed, and do not yet count toward the current balance</source>
<translation>Totalt antall ubekreftede transaksjoner som ikke telles med i saldo enda</translation>
</message>
<message>
<location filename="../overviewpage.cpp" line="+116"/>
<location line="+1"/>
<source>out of sync</source>
<translation>ute av synk</translation>
</message>
</context>
<context>
<name>PaymentServer</name>
<message>
<location filename="../paymentserver.cpp" line="+107"/>
<source>Cannot start RupayaCoin: click-to-pay handler</source>
<translation>Kan ikke starte RupayaCoin: klikk-og-betal håndterer</translation>
</message>
</context>
<context>
<name>QRCodeDialog</name>
<message>
<location filename="../forms/qrcodedialog.ui" line="+14"/>
<source>QR Code Dialog</source>
<translation>Dialog for QR Kode</translation>
</message>
<message>
<location line="+59"/>
<source>Request Payment</source>
<translation>Etterspør Betaling</translation>
</message>
<message>
<location line="+56"/>
<source>Amount:</source>
<translation>Beløp:</translation>
</message>
<message>
<location line="-44"/>
<source>Label:</source>
<translation>Merkelapp:</translation>
</message>
<message>
<location line="+19"/>
<source>Message:</source>
<translation>Melding:</translation>
</message>
<message>
<location line="+71"/>
<source>&Save As...</source>
<translation>&Lagre Som...</translation>
</message>
<message>
<location filename="../qrcodedialog.cpp" line="+62"/>
<source>Error encoding URI into QR Code.</source>
<translation>Feil ved koding av URI i QR kode.</translation>
</message>
<message>
<location line="+40"/>
<source>The entered amount is invalid, please check.</source>
<translation>Angitt beløp er ugyldig.</translation>
</message>
<message>
<location line="+23"/>
<source>Resulting URI too long, try to reduce the text for label / message.</source>
<translation>Resulterende URI for lang, prøv å redusere teksten for merkelapp / melding.</translation>
</message>
<message>
<location line="+25"/>
<source>Save QR Code</source>
<translation>Lagre QR Kode</translation>
</message>
<message>
<location line="+0"/>
<source>PNG Images (*.png)</source>
<translation>PNG bilder (*.png)</translation>
</message>
</context>
<context>
<name>RPCConsole</name>
<message>
<location filename="../forms/rpcconsole.ui" line="+46"/>
<source>Client name</source>
<translation>Klientnavn</translation>
</message>
<message>
<location line="+10"/>
<location line="+23"/>
<location line="+26"/>
<location line="+23"/>
<location line="+23"/>
<location line="+36"/>
<location line="+53"/>
<location line="+23"/>
<location line="+23"/>
<location filename="../rpcconsole.cpp" line="+339"/>
<source>N/A</source>
<translation>-</translation>
</message>
<message>
<location line="-217"/>
<source>Client version</source>
<translation>Klientversjon</translation>
</message>
<message>
<location line="-45"/>
<source>&Information</source>
<translation>&Informasjon</translation>
</message>
<message>
<location line="+68"/>
<source>Using OpenSSL version</source>
<translation>Bruker OpenSSL versjon</translation>
</message>
<message>
<location line="+49"/>
<source>Startup time</source>
<translation>Oppstartstidspunkt</translation>
</message>
<message>
<location line="+29"/>
<source>Network</source>
<translation>Nettverk</translation>
</message>
<message>
<location line="+7"/>
<source>Number of connections</source>
<translation>Antall tilkoblinger</translation>
</message>
<message>
<location line="+23"/>
<source>On testnet</source>
<translation>På testnett</translation>
</message>
<message>
<location line="+23"/>
<source>Block chain</source>
<translation>Blokkjeden</translation>
</message>
<message>
<location line="+7"/>
<source>Current number of blocks</source>
<translation>Nåværende antall blokker</translation>
</message>
<message>
<location line="+23"/>
<source>Estimated total blocks</source>
<translation>Estimert totalt antall blokker</translation>
</message>
<message>
<location line="+23"/>
<source>Last block time</source>
<translation>Tidspunkt for siste blokk</translation>
</message>
<message>
<location line="+52"/>
<source>&Open</source>
<translation>&Åpne</translation>
</message>
<message>
<location line="+16"/>
<source>Command-line options</source>
<translation>Kommandolinjevalg</translation>
</message>
<message>
<location line="+7"/>
<source>Show the RupayaCoin-Qt help message to get a list with possible RupayaCoin command-line options.</source>
<translation>Vis RupayaCoin-Qt hjelpemelding for å få en liste med mulige kommandolinjevalg.</translation>
</message>
<message>
<location line="+3"/>
<source>&Show</source>
<translation>&Vis</translation>
</message>
<message>
<location line="+24"/>
<source>&Console</source>
<translation>&Konsoll</translation>
</message>
<message>
<location line="-260"/>
<source>Build date</source>
<translation>Byggedato</translation>
</message>
<message>
<location line="-104"/>
<source>RupayaCoin - Debug window</source>
<translation>RupayaCoin - vindu for feilsøk</translation>
</message>
<message>
<location line="+25"/>
<source>RupayaCoin Core</source>
<translation>RupayaCoin Kjerne</translation>
</message>
<message>
<location line="+279"/>
<source>Debug log file</source>
<translation>Loggfil for feilsøk</translation>
</message>
<message>
<location line="+7"/>
<source>Open the RupayaCoin debug log file from the current data directory. This can take a few seconds for large log files.</source>
<translation>Åpne RupayaCoin loggfil for feilsøk fra datamappen. Dette kan ta noen sekunder for store loggfiler.</translation>
</message>
<message>
<location line="+102"/>
<source>Clear console</source>
<translation>Tøm konsoll</translation>
</message>
<message>
<location filename="../rpcconsole.cpp" line="-30"/>
<source>Welcome to the RupayaCoin RPC console.</source>
<translation>Velkommen til RupayaCoin RPC konsoll.</translation>
</message>
<message>
<location line="+1"/>
<source>Use up and down arrows to navigate history, and <b>Ctrl-L</b> to clear screen.</source>
<translation>Bruk opp og ned pil for å navigere historikken, og <b>Ctrl-L</b> for å tømme skjermen.</translation>
</message>
<message>
<location line="+1"/>
<source>Type <b>help</b> for an overview of available commands.</source>
<translation>Skriv <b>help</b> for en oversikt over kommandoer.</translation>
</message>
</context>
<context>
<name>SendCoinsDialog</name>
<message>
<location filename="../forms/sendcoinsdialog.ui" line="+14"/>
<location filename="../sendcoinsdialog.cpp" line="+124"/>
<location line="+5"/>
<location line="+5"/>
<location line="+5"/>
<location line="+6"/>
<location line="+5"/>
<location line="+5"/>
<source>Send Coins</source>
<translation>Send RupayaCoins</translation>
</message>
<message>
<location line="+50"/>
<source>Send to multiple recipients at once</source>
<translation>Send til flere enn én mottaker</translation>
</message>
<message>
<location line="+3"/>
<source>Add &Recipient</source>
<translation>&Legg til Mottaker</translation>
</message>
<message>
<location line="+20"/>
<source>Remove all transaction fields</source>
<translation>Fjern alle transaksjonsfelter</translation>
</message>
<message>
<location line="+3"/>
<source>Clear &All</source>
<translation>Fjern &Alt</translation>
</message>
<message>
<location line="+22"/>
<source>Balance:</source>
<translation>Saldo:</translation>
</message>
<message>
<location line="+10"/>
<source>123.456 BTC</source>
<translation>123.456 BTC</translation>
</message>
<message>
<location line="+31"/>
<source>Confirm the send action</source>
<translation>Bekreft sending</translation>
</message>
<message>
<location line="+3"/>
<source>S&end</source>
<translation>S&end</translation>
</message>
<message>
<location filename="../sendcoinsdialog.cpp" line="-59"/>
<source><b>%1</b> to %2 (%3)</source>
<translation><b>%1</b> til %2 (%3)</translation>
</message>
<message>
<location line="+5"/>
<source>Confirm send coins</source>
<translation>Bekreft sending av RupayaCoins</translation>
</message>
<message>
<location line="+1"/>
<source>Are you sure you want to send %1?</source>
<translation>Er du sikker på at du vil sende %1?</translation>
</message>
<message>
<location line="+0"/>
<source> and </source>
<translation> og </translation>
</message>
<message>
<location line="+23"/>
<source>The recipient address is not valid, please recheck.</source>
<translation>Adresse for mottaker er ugyldig.</translation>
</message>
<message>
<location line="+5"/>
<source>The amount to pay must be larger than 0.</source>
<translation>Beløpen som skal betales må være over 0.</translation>
</message>
<message>
<location line="+5"/>
<source>The amount exceeds your balance.</source>
<translation>Beløpet overstiger saldo.</translation>
</message>
<message>
<location line="+5"/>
<source>The total exceeds your balance when the %1 transaction fee is included.</source>
<translation>Totalbeløpet overstiger saldo etter at %1 transaksjonsgebyr er lagt til.</translation>
</message>
<message>
<location line="+6"/>
<source>Duplicate address found, can only send to each address once per send operation.</source>
<translation>Duplikate adresser funnet. Kan bare sende én gang til hver adresse per operasjon.</translation>
</message>
<message>
<location line="+5"/>
<source>Error: Transaction creation failed!</source>
<translation>Feil: Opprettelse av transaksjon feilet </translation>
</message>
<message>
<location line="+5"/>
<source>Error: The transaction was rejected. This might happen if some of the coins in your wallet were already spent, such as if you used a copy of wallet.dat and coins were spent in the copy but not marked as spent here.</source>
<translation>Feil: Transaksjonen ble avvist. Dette kan skje om noe av beløpet allerede var brukt, f.eks. hvis du kopierte wallet.dat og noen RupayaCoins ble brukt i kopien men ikke ble markert som brukt her.</translation>
</message>
</context>
<context>
<name>SendCoinsEntry</name>
<message>
<location filename="../forms/sendcoinsentry.ui" line="+14"/>
<source>Form</source>
<translation>Skjema</translation>
</message>
<message>
<location line="+15"/>
<source>A&mount:</source>
<translation>&Beløp:</translation>
</message>
<message>
<location line="+13"/>
<source>Pay &To:</source>
<translation>Betal &Til:</translation>
</message>
<message>
<location line="+34"/>
<source>The address to send the payment to (e.g. Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</source>
<translation>Adressen betalingen skal sendes til (f.eks. Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</translation>
</message>
<message>
<location line="+60"/>
<location filename="../sendcoinsentry.cpp" line="+26"/>
<source>Enter a label for this address to add it to your address book</source>
<translation>Skriv inn en merkelapp for denne adressen for å legge den til i din adressebok</translation>
</message>
<message>
<location line="-78"/>
<source>&Label:</source>
<translation>&Merkelapp:</translation>
</message>
<message>
<location line="+28"/>
<source>Choose address from address book</source>
<translation>Velg adresse fra adresseboken</translation>
</message>
<message>
<location line="+10"/>
<source>Alt+A</source>
<translation>Alt+A</translation>
</message>
<message>
<location line="+7"/>
<source>Paste address from clipboard</source>
<translation>Lim inn adresse fra utklippstavlen</translation>
</message>
<message>
<location line="+10"/>
<source>Alt+P</source>
<translation>Alt+P</translation>
</message>
<message>
<location line="+7"/>
<source>Remove this recipient</source>
<translation>Fjern denne mottakeren</translation>
</message>
<message>
<location filename="../sendcoinsentry.cpp" line="+1"/>
<source>Enter a RupayaCoin address (e.g. Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</source>
<translation>Skriv inn en RupayaCoin adresse (f.eks. Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</translation>
</message>
</context>
<context>
<name>SignVerifyMessageDialog</name>
<message>
<location filename="../forms/signverifymessagedialog.ui" line="+14"/>
<source>Signatures - Sign / Verify a Message</source>
<translation>Signaturer - Signer / Verifiser en melding</translation>
</message>
<message>
<location line="+13"/>
<source>&Sign Message</source>
<translation>&Signér Melding</translation>
</message>
<message>
<location line="+6"/>
<source>You can sign messages with your addresses to prove you own them. Be careful not to sign anything vague, as phishing attacks may try to trick you into signing your identity over to them. Only sign fully-detailed statements you agree to.</source>
<translation>Du kan signere meldinger med dine adresser for å bevise at du eier dem. Ikke signér vage meldinger da phishing-angrep kan prøve å lure deg til å signere din identitet over til andre. Signér kun fullt detaljerte utsagn som du er enig i.</translation>
</message>
<message>
<location line="+18"/>
<source>The address to sign the message with (e.g. Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</source>
<translation>Adressen for signering av meldingen (f.eks. Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</translation>
</message>
<message>
<location line="+10"/>
<location line="+213"/>
<source>Choose an address from the address book</source>
<translation>Velg en adresse fra adresseboken</translation>
</message>
<message>
<location line="-203"/>
<location line="+213"/>
<source>Alt+A</source>
<translation>Alt+A</translation>
</message>
<message>
<location line="-203"/>
<source>Paste address from clipboard</source>
<translation>Lim inn adresse fra utklippstavlen</translation>
</message>
<message>
<location line="+10"/>
<source>Alt+P</source>
<translation>Alt+P</translation>
</message>
<message>
<location line="+12"/>
<source>Enter the message you want to sign here</source>
<translation>Skriv inn meldingen du vil signere her</translation>
</message>
<message>
<location line="+7"/>
<source>Signature</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+27"/>
<source>Copy the current signature to the system clipboard</source>
<translation>Kopier valgt signatur til utklippstavle</translation>
</message>
<message>
<location line="+21"/>
<source>Sign the message to prove you own this RupayaCoin address</source>
<translation>Signer meldingen for å bevise at du eier denne RupayaCoin-adressen</translation>
</message>
<message>
<location line="+3"/><|fim▁hole|> <translation type="unfinished"/>
</message>
<message>
<location line="+14"/>
<source>Reset all sign message fields</source>
<translation>Tilbakestill alle felter for meldingssignering</translation>
</message>
<message>
<location line="+3"/>
<location line="+146"/>
<source>Clear &All</source>
<translation>Fjern &Alt</translation>
</message>
<message>
<location line="-87"/>
<source>&Verify Message</source>
<translation>&Verifiser Melding</translation>
</message>
<message>
<location line="+6"/>
<source>Enter the signing address, message (ensure you copy line breaks, spaces, tabs, etc. exactly) and signature below to verify the message. Be careful not to read more into the signature than what is in the signed message itself, to avoid being tricked by a man-in-the-middle attack.</source>
<translation>Angi adresse for signering, melding (vær sikker på at du kopierer linjeskift, mellomrom, tab, etc. helt nøyaktig) og signatur under for å verifisere meldingen. Vær forsiktig med at du ikke gir signaturen mer betydning enn det som faktisk står i meldingen, for å unngå å bli lurt av såkalte "man-in-the-middle" angrep.</translation>
</message>
<message>
<location line="+21"/>
<source>The address the message was signed with (e.g. Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</source>
<translation>Adressen meldingen var signert med (f.eks. Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</translation>
</message>
<message>
<location line="+40"/>
<source>Verify the message to ensure it was signed with the specified RupayaCoin address</source>
<translation>Verifiser meldingen for å være sikker på at den ble signert av den angitte RupayaCoin-adressen</translation>
</message>
<message>
<location line="+3"/>
<source>Verify &Message</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+14"/>
<source>Reset all verify message fields</source>
<translation>Tilbakestill alle felter for meldingsverifikasjon</translation>
</message>
<message>
<location filename="../signverifymessagedialog.cpp" line="+27"/>
<location line="+3"/>
<source>Enter a RupayaCoin address (e.g. Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</source>
<translation>Skriv inn en RupayaCoin adresse (f.eks. Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</translation>
</message>
<message>
<location line="-2"/>
<source>Click "Sign Message" to generate signature</source>
<translation>Klikk "Signer Melding" for å generere signatur</translation>
</message>
<message>
<location line="+3"/>
<source>Enter RupayaCoin signature</source>
<translation>Angi RupayaCoin signatur</translation>
</message>
<message>
<location line="+82"/>
<location line="+81"/>
<source>The entered address is invalid.</source>
<translation>Angitt adresse er ugyldig.</translation>
</message>
<message>
<location line="-81"/>
<location line="+8"/>
<location line="+73"/>
<location line="+8"/>
<source>Please check the address and try again.</source>
<translation>Vennligst sjekk adressen og prøv igjen.</translation>
</message>
<message>
<location line="-81"/>
<location line="+81"/>
<source>The entered address does not refer to a key.</source>
<translation>Angitt adresse refererer ikke til en nøkkel.</translation>
</message>
<message>
<location line="-73"/>
<source>Wallet unlock was cancelled.</source>
<translation>Opplåsing av lommebok ble avbrutt.</translation>
</message>
<message>
<location line="+8"/>
<source>Private key for the entered address is not available.</source>
<translation>Privat nøkkel for den angitte adressen er ikke tilgjengelig.</translation>
</message>
<message>
<location line="+12"/>
<source>Message signing failed.</source>
<translation>Signering av melding feilet.</translation>
</message>
<message>
<location line="+5"/>
<source>Message signed.</source>
<translation>Melding signert.</translation>
</message>
<message>
<location line="+59"/>
<source>The signature could not be decoded.</source>
<translation>Signaturen kunne ikke dekodes.</translation>
</message>
<message>
<location line="+0"/>
<location line="+13"/>
<source>Please check the signature and try again.</source>
<translation>Vennligst sjekk signaturen og prøv igjen.</translation>
</message>
<message>
<location line="+0"/>
<source>The signature did not match the message digest.</source>
<translation>Signaturen passer ikke til meldingen.</translation>
</message>
<message>
<location line="+7"/>
<source>Message verification failed.</source>
<translation>Verifikasjon av melding feilet.</translation>
</message>
<message>
<location line="+5"/>
<source>Message verified.</source>
<translation>Melding verifisert.</translation>
</message>
</context>
<context>
<name>SplashScreen</name>
<message>
<location filename="../splashscreen.cpp" line="+22"/>
<source>The RupayaCoin developers</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>[testnet]</source>
<translation>[testnett]</translation>
</message>
</context>
<context>
<name>TransactionDesc</name>
<message>
<location filename="../transactiondesc.cpp" line="+20"/>
<source>Open until %1</source>
<translation>Åpen til %1</translation>
</message>
<message>
<location line="+6"/>
<source>%1/offline</source>
<translation>%1/frakoblet</translation>
</message>
<message>
<location line="+2"/>
<source>%1/unconfirmed</source>
<translation>%1/ubekreftet</translation>
</message>
<message>
<location line="+2"/>
<source>%1 confirmations</source>
<translation>%1 bekreftelser</translation>
</message>
<message>
<location line="+18"/>
<source>Status</source>
<translation>Status</translation>
</message>
<message numerus="yes">
<location line="+7"/>
<source>, broadcast through %n node(s)</source>
<translation><numerusform>, kringkast gjennom %n node</numerusform><numerusform>, kringkast gjennom %n noder</numerusform></translation>
</message>
<message>
<location line="+4"/>
<source>Date</source>
<translation>Dato</translation>
</message>
<message>
<location line="+7"/>
<source>Source</source>
<translation>Kilde</translation>
</message>
<message>
<location line="+0"/>
<source>Generated</source>
<translation>Generert</translation>
</message>
<message>
<location line="+5"/>
<location line="+17"/>
<source>From</source>
<translation>Fra</translation>
</message>
<message>
<location line="+1"/>
<location line="+22"/>
<location line="+58"/>
<source>To</source>
<translation>Til</translation>
</message>
<message>
<location line="-77"/>
<location line="+2"/>
<source>own address</source>
<translation>egen adresse</translation>
</message>
<message>
<location line="-2"/>
<source>label</source>
<translation>merkelapp</translation>
</message>
<message>
<location line="+37"/>
<location line="+12"/>
<location line="+45"/>
<location line="+17"/>
<location line="+30"/>
<source>Credit</source>
<translation>Kredit</translation>
</message>
<message numerus="yes">
<location line="-102"/>
<source>matures in %n more block(s)</source>
<translation><numerusform>blir moden om %n blokk</numerusform><numerusform>blir moden om %n blokker</numerusform></translation>
</message>
<message>
<location line="+2"/>
<source>not accepted</source>
<translation>ikke akseptert</translation>
</message>
<message>
<location line="+44"/>
<location line="+8"/>
<location line="+15"/>
<location line="+30"/>
<source>Debit</source>
<translation>Debet</translation>
</message>
<message>
<location line="-39"/>
<source>Transaction fee</source>
<translation>Transaksjonsgebyr</translation>
</message>
<message>
<location line="+16"/>
<source>Net amount</source>
<translation>Nettobeløp</translation>
</message>
<message>
<location line="+6"/>
<source>Message</source>
<translation>Melding</translation>
</message>
<message>
<location line="+2"/>
<source>Comment</source>
<translation>Kommentar</translation>
</message>
<message>
<location line="+2"/>
<source>Transaction ID</source>
<translation>Transaksjons-ID</translation>
</message>
<message>
<location line="+3"/>
<source>Generated coins must mature 120 blocks before they can be spent. When you generated this block, it was broadcast to the network to be added to the block chain. If it fails to get into the chain, its state will change to "not accepted" and it won't be spendable. This may occasionally happen if another node generates a block within a few seconds of yours.</source>
<translation>Genererte RupayaCoins må modnes 120 blokker før de kan brukes. Da du genererte denne blokken ble den kringkastet til nettverket for å legges til i blokkjeden. Hvis den ikke kommer inn i kjeden får den tilstanden "ikke akseptert" og vil ikke kunne brukes. Dette skjer noen ganger hvis en annen node genererer en blokk noen sekunder fra din.</translation>
</message>
<message>
<location line="+7"/>
<source>Debug information</source>
<translation>Informasjon for feilsøk</translation>
</message>
<message>
<location line="+8"/>
<source>Transaction</source>
<translation>Transaksjon</translation>
</message>
<message>
<location line="+3"/>
<source>Inputs</source>
<translation>Inndata</translation>
</message>
<message>
<location line="+23"/>
<source>Amount</source>
<translation>Beløp</translation>
</message>
<message>
<location line="+1"/>
<source>true</source>
<translation>sann</translation>
</message>
<message>
<location line="+0"/>
<source>false</source>
<translation>usann</translation>
</message>
<message>
<location line="-209"/>
<source>, has not been successfully broadcast yet</source>
<translation>, har ikke blitt kringkastet uten problemer enda.</translation>
</message>
<message numerus="yes">
<location line="-35"/>
<source>Open for %n more block(s)</source>
<translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation>
</message>
<message>
<location line="+70"/>
<source>unknown</source>
<translation>ukjent</translation>
</message>
</context>
<context>
<name>TransactionDescDialog</name>
<message>
<location filename="../forms/transactiondescdialog.ui" line="+14"/>
<source>Transaction details</source>
<translation>Transaksjonsdetaljer</translation>
</message>
<message>
<location line="+6"/>
<source>This pane shows a detailed description of the transaction</source>
<translation>Her vises en detaljert beskrivelse av transaksjonen</translation>
</message>
</context>
<context>
<name>TransactionTableModel</name>
<message>
<location filename="../transactiontablemodel.cpp" line="+225"/>
<source>Date</source>
<translation>Dato</translation>
</message>
<message>
<location line="+0"/>
<source>Type</source>
<translation>Type</translation>
</message>
<message>
<location line="+0"/>
<source>Address</source>
<translation>Adresse</translation>
</message>
<message>
<location line="+0"/>
<source>Amount</source>
<translation>Beløp</translation>
</message>
<message numerus="yes">
<location line="+57"/>
<source>Open for %n more block(s)</source>
<translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation>
</message>
<message>
<location line="+3"/>
<source>Open until %1</source>
<translation>Åpen til %1</translation>
</message>
<message>
<location line="+3"/>
<source>Offline (%1 confirmations)</source>
<translation>Frakoblet (%1 bekreftelser)</translation>
</message>
<message>
<location line="+3"/>
<source>Unconfirmed (%1 of %2 confirmations)</source>
<translation>Ubekreftet (%1 av %2 bekreftelser)</translation>
</message>
<message>
<location line="+3"/>
<source>Confirmed (%1 confirmations)</source>
<translation>Bekreftet (%1 bekreftelser)</translation>
</message>
<message numerus="yes">
<location line="+8"/>
<source>Mined balance will be available when it matures in %n more block(s)</source>
<translation><numerusform>Minet saldo blir tilgjengelig når den modner om %n blokk</numerusform><numerusform>Minet saldo blir tilgjengelig når den modner om %n blokker</numerusform></translation>
</message>
<message>
<location line="+5"/>
<source>This block was not received by any other nodes and will probably not be accepted!</source>
<translation>Denne blokken har ikke blitt mottatt av noen andre noder og vil sannsynligvis ikke bli akseptert!</translation>
</message>
<message>
<location line="+3"/>
<source>Generated but not accepted</source>
<translation>Generert men ikke akseptert</translation>
</message>
<message>
<location line="+43"/>
<source>Received with</source>
<translation>Mottatt med</translation>
</message>
<message>
<location line="+2"/>
<source>Received from</source>
<translation>Mottatt fra</translation>
</message>
<message>
<location line="+3"/>
<source>Sent to</source>
<translation>Sendt til</translation>
</message>
<message>
<location line="+2"/>
<source>Payment to yourself</source>
<translation>Betaling til deg selv</translation>
</message>
<message>
<location line="+2"/>
<source>Mined</source>
<translation>Utvunnet</translation>
</message>
<message>
<location line="+38"/>
<source>(n/a)</source>
<translation>-</translation>
</message>
<message>
<location line="+199"/>
<source>Transaction status. Hover over this field to show number of confirmations.</source>
<translation>Transaksjonsstatus. Hold muspekeren over dette feltet for å se antall bekreftelser.</translation>
</message>
<message>
<location line="+2"/>
<source>Date and time that the transaction was received.</source>
<translation>Dato og tid for da transaksjonen ble mottat.</translation>
</message>
<message>
<location line="+2"/>
<source>Type of transaction.</source>
<translation>Type transaksjon.</translation>
</message>
<message>
<location line="+2"/>
<source>Destination address of transaction.</source>
<translation>Mottaksadresse for transaksjonen</translation>
</message>
<message>
<location line="+2"/>
<source>Amount removed from or added to balance.</source>
<translation>Beløp fjernet eller lagt til saldo.</translation>
</message>
</context>
<context>
<name>TransactionView</name>
<message>
<location filename="../transactionview.cpp" line="+52"/>
<location line="+16"/>
<source>All</source>
<translation>Alle</translation>
</message>
<message>
<location line="-15"/>
<source>Today</source>
<translation>I dag</translation>
</message>
<message>
<location line="+1"/>
<source>This week</source>
<translation>Denne uken</translation>
</message>
<message>
<location line="+1"/>
<source>This month</source>
<translation>Denne måneden</translation>
</message>
<message>
<location line="+1"/>
<source>Last month</source>
<translation>Forrige måned</translation>
</message>
<message>
<location line="+1"/>
<source>This year</source>
<translation>Dette året</translation>
</message>
<message>
<location line="+1"/>
<source>Range...</source>
<translation>Intervall...</translation>
</message>
<message>
<location line="+11"/>
<source>Received with</source>
<translation>Mottatt med</translation>
</message>
<message>
<location line="+2"/>
<source>Sent to</source>
<translation>Sendt til</translation>
</message>
<message>
<location line="+2"/>
<source>To yourself</source>
<translation>Til deg selv</translation>
</message>
<message>
<location line="+1"/>
<source>Mined</source>
<translation>Utvunnet</translation>
</message>
<message>
<location line="+1"/>
<source>Other</source>
<translation>Andre</translation>
</message>
<message>
<location line="+7"/>
<source>Enter address or label to search</source>
<translation>Skriv inn adresse eller merkelapp for søk</translation>
</message>
<message>
<location line="+7"/>
<source>Min amount</source>
<translation>Minimumsbeløp</translation>
</message>
<message>
<location line="+34"/>
<source>Copy address</source>
<translation>Kopier adresse</translation>
</message>
<message>
<location line="+1"/>
<source>Copy label</source>
<translation>Kopier merkelapp</translation>
</message>
<message>
<location line="+1"/>
<source>Copy amount</source>
<translation>Kopiér beløp</translation>
</message>
<message>
<location line="+1"/>
<source>Copy transaction ID</source>
<translation>Kopier transaksjons-ID</translation>
</message>
<message>
<location line="+1"/>
<source>Edit label</source>
<translation>Rediger merkelapp</translation>
</message>
<message>
<location line="+1"/>
<source>Show transaction details</source>
<translation>Vis transaksjonsdetaljer</translation>
</message>
<message>
<location line="+139"/>
<source>Export Transaction Data</source>
<translation>Eksporter transaksjonsdata</translation>
</message>
<message>
<location line="+1"/>
<source>Comma separated file (*.csv)</source>
<translation>Kommaseparert fil (*.csv)</translation>
</message>
<message>
<location line="+8"/>
<source>Confirmed</source>
<translation>Bekreftet</translation>
</message>
<message>
<location line="+1"/>
<source>Date</source>
<translation>Dato</translation>
</message>
<message>
<location line="+1"/>
<source>Type</source>
<translation>Type</translation>
</message>
<message>
<location line="+1"/>
<source>Label</source>
<translation>Merkelapp</translation>
</message>
<message>
<location line="+1"/>
<source>Address</source>
<translation>Adresse</translation>
</message>
<message>
<location line="+1"/>
<source>Amount</source>
<translation>Beløp</translation>
</message>
<message>
<location line="+1"/>
<source>ID</source>
<translation>ID</translation>
</message>
<message>
<location line="+4"/>
<source>Error exporting</source>
<translation>Feil ved eksport</translation>
</message>
<message>
<location line="+0"/>
<source>Could not write to file %1.</source>
<translation>Kunne ikke skrive til filen %1.</translation>
</message>
<message>
<location line="+100"/>
<source>Range:</source>
<translation>Intervall:</translation>
</message>
<message>
<location line="+8"/>
<source>to</source>
<translation>til</translation>
</message>
</context>
<context>
<name>WalletModel</name>
<message>
<location filename="../walletmodel.cpp" line="+193"/>
<source>Send Coins</source>
<translation>Send RupayaCoins</translation>
</message>
</context>
<context>
<name>WalletView</name>
<message>
<location filename="../walletview.cpp" line="+42"/>
<source>&Export</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Export the data in the current tab to a file</source>
<translation>Eksporter data fra nåværende fane til fil</translation>
</message>
<message>
<location line="+193"/>
<source>Backup Wallet</source>
<translation>Sikkerhetskopier lommebok</translation>
</message>
<message>
<location line="+0"/>
<source>Wallet Data (*.dat)</source>
<translation>Lommebokdata (*.dat)</translation>
</message>
<message>
<location line="+3"/>
<source>Backup Failed</source>
<translation>Sikkerhetskopiering feilet</translation>
</message>
<message>
<location line="+0"/>
<source>There was an error trying to save the wallet data to the new location.</source>
<translation>En feil oppstod under lagringen av lommeboken til den nye plasseringen.</translation>
</message>
<message>
<location line="+4"/>
<source>Backup Successful</source>
<translation>Sikkerhetskopiering fullført</translation>
</message>
<message>
<location line="+0"/>
<source>The wallet data was successfully saved to the new location.</source>
<translation>Lommebokdata ble lagret til den nye plasseringen. </translation>
</message>
</context>
<context>
<name>bitcoin-core</name>
<message>
<location filename="../bitcoinstrings.cpp" line="+94"/>
<source>RupayaCoin version</source>
<translation>RupayaCoin versjon</translation>
</message>
<message>
<location line="+102"/>
<source>Usage:</source>
<translation>Bruk:</translation>
</message>
<message>
<location line="-29"/>
<source>Send command to -server or RupayaCoind</source>
<translation>Send kommando til -server eller RupayaCoind</translation>
</message>
<message>
<location line="-23"/>
<source>List commands</source>
<translation>List opp kommandoer</translation>
</message>
<message>
<location line="-12"/>
<source>Get help for a command</source>
<translation>Vis hjelpetekst for en kommando</translation>
</message>
<message>
<location line="+24"/>
<source>Options:</source>
<translation>Innstillinger:</translation>
</message>
<message>
<location line="+24"/>
<source>Specify configuration file (default: RupayaCoin.conf)</source>
<translation>Angi konfigurasjonsfil (standardverdi: RupayaCoin.conf)</translation>
</message>
<message>
<location line="+3"/>
<source>Specify pid file (default: RupayaCoind.pid)</source>
<translation>Angi pid-fil (standardverdi: RupayaCoind.pid)</translation>
</message>
<message>
<location line="-1"/>
<source>Specify data directory</source>
<translation>Angi mappe for datafiler</translation>
</message>
<message>
<location line="-9"/>
<source>Set database cache size in megabytes (default: 25)</source>
<translation>Sett størrelse på mellomlager for database i megabytes (standardverdi: 25)</translation>
</message>
<message>
<location line="-28"/>
<source>Listen for connections on <port> (default: 22556 or testnet: 44556)</source>
<translation>Lytt etter tilkoblinger på <port> (standardverdi: 22556 eller testnet: 44556)</translation>
</message>
<message>
<location line="+5"/>
<source>Maintain at most <n> connections to peers (default: 125)</source>
<translation>Hold maks <n> koblinger åpne til andre noder (standardverdi: 125)</translation>
</message>
<message>
<location line="-48"/>
<source>Connect to a node to retrieve peer addresses, and disconnect</source>
<translation>Koble til node for å hente adresser til andre noder, koble så fra igjen</translation>
</message>
<message>
<location line="+82"/>
<source>Specify your own public address</source>
<translation>Angi din egen offentlige adresse</translation>
</message>
<message>
<location line="+3"/>
<source>Threshold for disconnecting misbehaving peers (default: 100)</source>
<translation>Grenseverdi for å koble fra noder med dårlig oppførsel (standardverdi: 100)</translation>
</message>
<message>
<location line="-134"/>
<source>Number of seconds to keep misbehaving peers from reconnecting (default: 86400)</source>
<translation>Antall sekunder noder med dårlig oppførsel hindres fra å koble til på nytt (standardverdi: 86400)</translation>
</message>
<message>
<location line="-29"/>
<source>An error occurred while setting up the RPC port %u for listening on IPv4: %s</source>
<translation>En feil oppstod ved opprettelse av RPC port %u for lytting: %s</translation>
</message>
<message>
<location line="+27"/>
<source>Listen for JSON-RPC connections on <port> (default: 22555 or testnet: 44555)</source>
<translation>Lytt etter JSON-RPC tilkoblinger på <port> (standardverdi: 22555 or testnet: 44555)</translation>
</message>
<message>
<location line="+37"/>
<source>Accept command line and JSON-RPC commands</source>
<translation>Ta imot kommandolinje- og JSON-RPC-kommandoer</translation>
</message>
<message>
<location line="+76"/>
<source>Run in the background as a daemon and accept commands</source>
<translation>Kjør i bakgrunnen som daemon og ta imot kommandoer</translation>
</message>
<message>
<location line="+37"/>
<source>Use the test network</source>
<translation>Bruk testnettverket</translation>
</message>
<message>
<location line="-112"/>
<source>Accept connections from outside (default: 1 if no -proxy or -connect)</source>
<translation>Ta imot tilkoblinger fra utsiden (standardverdi: 1 hvis uten -proxy eller -connect)</translation>
</message>
<message>
<location line="-80"/>
<source>%s, you must set a rpcpassword in the configuration file:
%s
It is recommended you use the following random password:
rpcuser=RupayaCoinrpc
rpcpassword=%s
(you do not need to remember this password)
The username and password MUST NOT be the same.
If the file does not exist, create it with owner-readable-only file permissions.
It is also recommended to set alertnotify so you are notified of problems;
for example: alertnotify=echo %%s | mail -s "RupayaCoin Alert" [email protected]
</source>
<translation>%s, du må angi rpcpassord i konfigurasjonsfilen.
%s
Det anbefales at du bruker det følgende tilfeldige passordet:
rpcbruker=RupayaCoinrpc
rpcpassord=%s
(du behøver ikke å huske passordet)
Brukernavnet og passordet MÅ IKKE være like.
Om filen ikke eksisterer, opprett den nå med eier-kun-les filrettigheter.
Det er også anbefalt at å sette varselsmelding slik du får melding om problemer.
For eksempel: varselmelding=echo %%s | mail -s "RupayaCoin varsel" [email protected]</translation>
</message>
<message>
<location line="+17"/>
<source>An error occurred while setting up the RPC port %u for listening on IPv6, falling back to IPv4: %s</source>
<translation>En feil oppstod under oppsettet av RPC port %u for IPv6, tilbakestilles til IPv4: %s</translation>
</message>
<message>
<location line="+3"/>
<source>Bind to given address and always listen on it. Use [host]:port notation for IPv6</source>
<translation>Bind til angitt adresse. Bruk [vertsmaskin]:port notasjon for IPv6</translation>
</message>
<message>
<location line="+3"/>
<source>Cannot obtain a lock on data directory %s. RupayaCoin is probably already running.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Error: The transaction was rejected! This might happen if some of the coins in your wallet were already spent, such as if you used a copy of wallet.dat and coins were spent in the copy but not marked as spent here.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>Error: This transaction requires a transaction fee of at least %s because of its amount, complexity, or use of recently received funds!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Execute command when a relevant alert is received (%s in cmd is replaced by message)</source>
<translation>Kjør kommando når relevant varsel blir mottatt (%s i cmd er erstattet med TxID)</translation>
</message>
<message>
<location line="+3"/>
<source>Execute command when a wallet transaction changes (%s in cmd is replaced by TxID)</source>
<translation>Kjør kommando når en lommeboktransaksjon endres (%s i cmd er erstattet med TxID)</translation>
</message>
<message>
<location line="+11"/>
<source>Set maximum size of high-priority/low-fee transactions in bytes (default: 27000)</source>
<translation>Sett maks størrelse for transaksjoner med høy prioritet / lavt gebyr, i bytes (standardverdi: 27000)</translation>
</message>
<message>
<location line="+6"/>
<source>This is a pre-release test build - use at your own risk - do not use for mining or merchant applications</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Warning: -paytxfee is set very high! This is the transaction fee you will pay if you send a transaction.</source>
<translation>Advarsel: -paytxfee er satt veldig høyt! Dette er transaksjonsgebyret du betaler når du sender transaksjoner.</translation>
</message>
<message>
<location line="+3"/>
<source>Warning: Displayed transactions may not be correct! You may need to upgrade, or other nodes may need to upgrade.</source>
<translation>Advarsel: Viste transaksjoner kan være feil! Du, eller andre noder, kan trenge en oppgradering.</translation>
</message>
<message>
<location line="+3"/>
<source>Warning: Please check that your computer's date and time are correct! If your clock is wrong RupayaCoin will not work properly.</source>
<translation>Advarsel: Vennligst undersøk at din datamaskin har riktig dato og klokkeslett! Hvis klokken er stilt feil vil ikke RupayaCoin fungere riktig.</translation>
</message>
<message>
<location line="+3"/>
<source>Warning: error reading wallet.dat! All keys read correctly, but transaction data or address book entries might be missing or incorrect.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Warning: wallet.dat corrupt, data salvaged! Original wallet.dat saved as wallet.{timestamp}.bak in %s; if your balance or transactions are incorrect you should restore from a backup.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+14"/>
<source>Attempt to recover private keys from a corrupt wallet.dat</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Block creation options:</source>
<translation>Valg for opprettelse av blokker:</translation>
</message>
<message>
<location line="+5"/>
<source>Connect only to the specified node(s)</source>
<translation>Koble kun til angitt(e) node(r)</translation>
</message>
<message>
<location line="+3"/>
<source>Corrupted block database detected</source>
<translation>Oppdaget korrupt blokkdatabase</translation>
</message>
<message>
<location line="+1"/>
<source>Discover own IP address (default: 1 when listening and no -externalip)</source>
<translation>Oppdag egen IP-adresse (standardverdi: 1 ved lytting og uten -externalip)</translation>
</message>
<message>
<location line="+1"/>
<source>Do you want to rebuild the block database now?</source>
<translation>Ønsker du å gjenopprette blokkdatabasen nå?</translation>
</message>
<message>
<location line="+2"/>
<source>Error initializing block database</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Error initializing wallet database environment %s!</source>
<translation>Feil under oppstart av lommebokdatabasemiljø %s!</translation>
</message>
<message>
<location line="+1"/>
<source>Error loading block database</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>Error opening block database</source>
<translation>Feil under åpning av blokkdatabase</translation>
</message>
<message>
<location line="+2"/>
<source>Error: Disk space is low!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Error: Wallet locked, unable to create transaction!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Error: system error: </source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Failed to listen on any port. Use -listen=0 if you want this.</source>
<translation>Kunne ikke lytte på noen port. Bruk -listen=0 hvis det er dette du vil.</translation>
</message>
<message>
<location line="+1"/>
<source>Failed to read block info</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Failed to read block</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Failed to sync block index</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Failed to write block index</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Failed to write block info</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Failed to write block</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Failed to write file info</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Failed to write to coin database</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Failed to write transaction index</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Failed to write undo data</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Find peers using DNS lookup (default: 1 unless -connect)</source>
<translation>Finn andre noder gjennom DNS-oppslag (standardverdi: 1 med mindre -connect er oppgit)</translation>
</message>
<message>
<location line="+1"/>
<source>Generate coins (default: 0)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>How many blocks to check at startup (default: 288, 0 = all)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>How thorough the block verification is (0-4, default: 3)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+19"/>
<source>Not enough file descriptors available.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+8"/>
<source>Rebuild block chain index from current blk000??.dat files</source>
<translation>Gjenopprett blokkjedeindex fra blk000??.dat filer</translation>
</message>
<message>
<location line="+16"/>
<source>Set the number of threads to service RPC calls (default: 4)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+26"/>
<source>Verifying blocks...</source>
<translation>Verifiserer blokker...</translation>
</message>
<message>
<location line="+1"/>
<source>Verifying wallet...</source>
<translation>Verifiserer lommebok...</translation>
</message>
<message>
<location line="-69"/>
<source>Imports blocks from external blk000??.dat file</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-76"/>
<source>Set the number of script verification threads (up to 16, 0 = auto, <0 = leave that many cores free, default: 0)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+77"/>
<source>Information</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Invalid -tor address: '%s'</source>
<translation>Ugyldig -tor adresse: '%s'</translation>
</message>
<message>
<location line="+1"/>
<source>Invalid amount for -minrelaytxfee=<amount>: '%s'</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Invalid amount for -mintxfee=<amount>: '%s'</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+8"/>
<source>Maintain a full transaction index (default: 0)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Maximum per-connection receive buffer, <n>*1000 bytes (default: 5000)</source>
<translation>Maks mottaksbuffer per forbindelse, <n>*1000 bytes (standardverdi: 5000)</translation>
</message>
<message>
<location line="+1"/>
<source>Maximum per-connection send buffer, <n>*1000 bytes (default: 1000)</source>
<translation>Maks sendebuffer per forbindelse, <n>*1000 bytes (standardverdi: 1000)</translation>
</message>
<message>
<location line="+2"/>
<source>Only accept block chain matching built-in checkpoints (default: 1)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Only connect to nodes in network <net> (IPv4, IPv6 or Tor)</source>
<translation>Koble kun til noder i nettverket <nett> (IPv4, IPv6 eller Tor)</translation>
</message>
<message>
<location line="+2"/>
<source>Output extra debugging information. Implies all other -debug* options</source>
<translation>Skriv ekstra informasjon for feilsøk. Medfører at alle -debug* valg tas med</translation>
</message>
<message>
<location line="+1"/>
<source>Output extra network debugging information</source>
<translation>Skriv ekstra informasjon for feilsøk av nettverk</translation>
</message>
<message>
<location line="+2"/>
<source>Prepend debug output with timestamp (default: 1)</source>
<translation>Sett tidsstempel på debugmeldinger</translation>
</message>
<message>
<location line="+5"/>
<source>SSL options: (see the RupayaCoin Wiki for SSL setup instructions)</source>
<translation>SSL valg: (se RupayaCoin Wiki for instruksjoner for oppsett av SSL)</translation>
</message>
<message>
<location line="+1"/>
<source>Select the version of socks proxy to use (4-5, default: 5)</source>
<translation>Velg versjon av socks proxy (4-5, standardverdi 5)</translation>
</message>
<message>
<location line="+3"/>
<source>Send trace/debug info to console instead of debug.log file</source>
<translation>Send spor/debug informasjon til konsollet istedenfor debug.log filen</translation>
</message>
<message>
<location line="+1"/>
<source>Send trace/debug info to debugger</source>
<translation>Send spor/debug informasjon til debugger</translation>
</message>
<message>
<location line="+5"/>
<source>Set maximum block size in bytes (default: 250000)</source>
<translation>Sett maks blokkstørrelse i bytes (standardverdi: 250000)</translation>
</message>
<message>
<location line="+1"/>
<source>Set minimum block size in bytes (default: 0)</source>
<translation>Sett minimum blokkstørrelse i bytes (standardverdi: 0)</translation>
</message>
<message>
<location line="+2"/>
<source>Shrink debug.log file on client startup (default: 1 when no -debug)</source>
<translation>Krymp debug.log filen når klienten starter (standardverdi: 1 hvis uten -debug)</translation>
</message>
<message>
<location line="+1"/>
<source>Signing transaction failed</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Specify connection timeout in milliseconds (default: 5000)</source>
<translation>Angi tidsavbrudd for forbindelse i millisekunder (standardverdi: 5000)</translation>
</message>
<message>
<location line="+4"/>
<source>System error: </source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>Transaction amount too small</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Transaction amounts must be positive</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Transaction too large</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Use UPnP to map the listening port (default: 0)</source>
<translation>Bruk UPnP for lytteport (standardverdi: 0)</translation>
</message>
<message>
<location line="+1"/>
<source>Use UPnP to map the listening port (default: 1 when listening)</source>
<translation>Bruk UPnP for lytteport (standardverdi: 1 ved lytting)</translation>
</message>
<message>
<location line="+1"/>
<source>Use proxy to reach tor hidden services (default: same as -proxy)</source>
<translation>Bruk en proxy for å nå skjulte tor tjenester (standardverdi: samme som -proxy)</translation>
</message>
<message>
<location line="+2"/>
<source>Username for JSON-RPC connections</source>
<translation>Brukernavn for JSON-RPC forbindelser</translation>
</message>
<message>
<location line="+4"/>
<source>Warning</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Warning: This version is obsolete, upgrade required!</source>
<translation>Advarsel: Denne versjonen er foreldet, oppgradering kreves!</translation>
</message>
<message>
<location line="+1"/>
<source>You need to rebuild the databases using -reindex to change -txindex</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>wallet.dat corrupt, salvage failed</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-50"/>
<source>Password for JSON-RPC connections</source>
<translation>Passord for JSON-RPC forbindelser</translation>
</message>
<message>
<location line="-67"/>
<source>Allow JSON-RPC connections from specified IP address</source>
<translation>Tillat JSON-RPC tilkoblinger fra angitt IP-adresse</translation>
</message>
<message>
<location line="+76"/>
<source>Send commands to node running on <ip> (default: 127.0.0.1)</source>
<translation>Send kommandoer til node på <ip> (standardverdi: 127.0.0.1)</translation>
</message>
<message>
<location line="-120"/>
<source>Execute command when the best block changes (%s in cmd is replaced by block hash)</source>
<translation>Eksekvér kommando når beste blokk endrer seg (%s i kommandoen erstattes med blokkens hash)</translation>
</message>
<message>
<location line="+147"/>
<source>Upgrade wallet to latest format</source>
<translation>Oppgradér lommebok til nyeste format</translation>
</message>
<message>
<location line="-21"/>
<source>Set key pool size to <n> (default: 100)</source>
<translation>Angi størrelse på nøkkel-lager til <n> (standardverdi: 100)</translation>
</message>
<message>
<location line="-12"/>
<source>Rescan the block chain for missing wallet transactions</source>
<translation>Se gjennom blokk-kjeden etter manglende lommeboktransaksjoner</translation>
</message>
<message>
<location line="+35"/>
<source>Use OpenSSL (https) for JSON-RPC connections</source>
<translation>Bruk OpenSSL (https) for JSON-RPC forbindelser</translation>
</message>
<message>
<location line="-26"/>
<source>Server certificate file (default: server.cert)</source>
<translation>Servers sertifikat (standardverdi: server.cert)</translation>
</message>
<message>
<location line="+1"/>
<source>Server private key (default: server.pem)</source>
<translation>Servers private nøkkel (standardverdi: server.pem)</translation>
</message>
<message>
<location line="-151"/>
<source>Acceptable ciphers (default: TLSv1+HIGH:!SSLv2:!aNULL:!eNULL:!AH:!3DES:@STRENGTH)</source>
<translation>Akseptable krypteringsmetoder (standardverdi: TLSv1+HIGH:!SSLv2:!aNULL:!eNULL:!AH:!3DES:@STRENGTH)</translation>
</message>
<message>
<location line="+165"/>
<source>This help message</source>
<translation>Denne hjelpemeldingen</translation>
</message>
<message>
<location line="+6"/>
<source>Unable to bind to %s on this computer (bind returned error %d, %s)</source>
<translation>Kan ikke binde til %s på denne datamaskinen (bind returnerte feil %d, %s)</translation>
</message>
<message>
<location line="-91"/>
<source>Connect through socks proxy</source>
<translation>Koble til gjennom socks proxy</translation>
</message>
<message>
<location line="-10"/>
<source>Allow DNS lookups for -addnode, -seednode and -connect</source>
<translation>Tillat DNS oppslag for -addnode, -seednode og -connect</translation>
</message>
<message>
<location line="+55"/>
<source>Loading addresses...</source>
<translation>Laster adresser...</translation>
</message>
<message>
<location line="-35"/>
<source>Error loading wallet.dat: Wallet corrupted</source>
<translation>Feil ved lasting av wallet.dat: Lommeboken er skadet</translation>
</message>
<message>
<location line="+1"/>
<source>Error loading wallet.dat: Wallet requires newer version of RupayaCoin</source>
<translation>Feil ved lasting av wallet.dat: Lommeboken krever en nyere versjon av RupayaCoin</translation>
</message>
<message>
<location line="+93"/>
<source>Wallet needed to be rewritten: restart RupayaCoin to complete</source>
<translation>Lommeboken måtte skrives om: start RupayaCoin på nytt for å fullføre</translation>
</message>
<message>
<location line="-95"/>
<source>Error loading wallet.dat</source>
<translation>Feil ved lasting av wallet.dat</translation>
</message>
<message>
<location line="+28"/>
<source>Invalid -proxy address: '%s'</source>
<translation>Ugyldig -proxy adresse: '%s'</translation>
</message>
<message>
<location line="+56"/>
<source>Unknown network specified in -onlynet: '%s'</source>
<translation>Ukjent nettverk angitt i -onlynet '%s'</translation>
</message>
<message>
<location line="-1"/>
<source>Unknown -socks proxy version requested: %i</source>
<translation>Ukjent -socks proxy versjon angitt: %i</translation>
</message>
<message>
<location line="-96"/>
<source>Cannot resolve -bind address: '%s'</source>
<translation>Kunne ikke slå opp -bind adresse: '%s'</translation>
</message>
<message>
<location line="+1"/>
<source>Cannot resolve -externalip address: '%s'</source>
<translation>Kunne ikke slå opp -externalip adresse: '%s'</translation>
</message>
<message>
<location line="+44"/>
<source>Invalid amount for -paytxfee=<amount>: '%s'</source>
<translation>Ugyldig beløp for -paytxfee=<beløp>: '%s'</translation>
</message>
<message>
<location line="+1"/>
<source>Invalid amount</source>
<translation>Ugyldig beløp</translation>
</message>
<message>
<location line="-6"/>
<source>Insufficient funds</source>
<translation>Utilstrekkelige midler</translation>
</message>
<message>
<location line="+10"/>
<source>Loading block index...</source>
<translation>Laster blokkindeks...</translation>
</message>
<message>
<location line="-57"/>
<source>Add a node to connect to and attempt to keep the connection open</source>
<translation>Legg til node for tilkobling og hold forbindelsen åpen</translation>
</message>
<message>
<location line="-25"/>
<source>Unable to bind to %s on this computer. RupayaCoin is probably already running.</source>
<translation>Kan ikke binde til %s på denne datamaskinen. Sannsynligvis kjører RupayaCoin allerede.</translation>
</message>
<message>
<location line="+64"/>
<source>Fee per KB to add to transactions you send</source>
<translation>Gebyr per KB for transaksjoner du sender</translation>
</message>
<message>
<location line="+19"/>
<source>Loading wallet...</source>
<translation>Laster lommebok...</translation>
</message>
<message>
<location line="-52"/>
<source>Cannot downgrade wallet</source>
<translation>Kan ikke nedgradere lommebok</translation>
</message>
<message>
<location line="+3"/>
<source>Cannot write default address</source>
<translation>Kan ikke skrive standardadresse</translation>
</message>
<message>
<location line="+64"/>
<source>Rescanning...</source>
<translation>Leser gjennom...</translation>
</message>
<message>
<location line="-57"/>
<source>Done loading</source>
<translation>Ferdig med lasting</translation>
</message>
<message>
<location line="+82"/>
<source>To use the %s option</source>
<translation>For å bruke %s opsjonen</translation>
</message>
<message>
<location line="-74"/>
<source>Error</source>
<translation>Feil</translation>
</message>
<message>
<location line="-31"/>
<source>You must set rpcpassword=<password> in the configuration file:
%s
If the file does not exist, create it with owner-readable-only file permissions.</source>
<translation>Du må sette rpcpassword=<passord> i konfigurasjonsfilen:
%s
Hvis filen ikke finnes, opprett den med leserettighet kun for eier av filen.</translation>
</message>
</context>
</TS><|fim▁end|> | <source>Sign &Message</source> |
<|file_name|>query.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Utilities for querying the layout, as needed by the layout thread.
use app_units::Au;
use construct::ConstructionResult;
use euclid::point::Point2D;
use euclid::rect::Rect;
use euclid::size::Size2D;
use flow;
use flow_ref::FlowRef;
use fragment::{Fragment, FragmentBorderBoxIterator, SpecificFragmentInfo};
use gfx::display_list::{DisplayItemMetadata, DisplayList, OpaqueNode, ScrollOffsetMap};
use gfx_traits::LayerId;
use ipc_channel::ipc::IpcSender;
use opaque_node::OpaqueNodeMethods;
use script_layout_interface::rpc::{ContentBoxResponse, ContentBoxesResponse};
use script_layout_interface::rpc::{HitTestResponse, LayoutRPC};
use script_layout_interface::rpc::{MarginStyleResponse, NodeGeometryResponse};
use script_layout_interface::rpc::{NodeLayerIdResponse, NodeOverflowResponse};
use script_layout_interface::rpc::{OffsetParentResponse, ResolvedStyleResponse};
use script_layout_interface::wrapper_traits::{LayoutNode, ThreadSafeLayoutNode};
use script_traits::LayoutMsg as ConstellationMsg;
use script_traits::UntrustedNodeAddress;
use sequential;
use std::cmp::{min, max};
use std::ops::Deref;
use std::sync::{Arc, Mutex};
use string_cache::Atom;
use style::computed_values;
use style::logical_geometry::{WritingMode, BlockFlowDirection, InlineBaseDirection};
use style::properties::ComputedValues;
use style::properties::longhands::{display, position};
use style::properties::style_structs;
use style::selector_impl::PseudoElement;
use style::servo::Stylist;
use style::values::AuExtensionMethods;
use style_traits::cursor::Cursor;
use wrapper::ThreadSafeLayoutNodeHelpers;
/// Mutable data belonging to the LayoutThread.
///
/// This needs to be protected by a mutex so we can do fast RPCs.
pub struct LayoutThreadData {
/// The channel on which messages can be sent to the constellation.
pub constellation_chan: IpcSender<ConstellationMsg>,
/// The root stacking context.
pub display_list: Option<Arc<DisplayList>>,
/// Performs CSS selector matching and style resolution.
pub stylist: Arc<Stylist>,
/// A queued response for the union of the content boxes of a node.
pub content_box_response: Rect<Au>,
/// A queued response for the content boxes of a node.
pub content_boxes_response: Vec<Rect<Au>>,
/// A queued response for the client {top, left, width, height} of a node in pixels.
pub client_rect_response: Rect<i32>,
pub layer_id_response: Option<LayerId>,
/// A queued response for the node at a given point
pub hit_test_response: (Option<DisplayItemMetadata>, bool),
/// A pair of overflow property in x and y
pub overflow_response: NodeOverflowResponse,
/// A queued response for the scroll {top, left, width, height} of a node in pixels.
pub scroll_area_response: Rect<i32>,
/// A queued response for the resolved style property of an element.
pub resolved_style_response: Option<String>,
/// A queued response for the offset parent/rect of a node.
pub offset_parent_response: OffsetParentResponse,
/// A queued response for the offset parent/rect of a node.
pub margin_style_response: MarginStyleResponse,
<|fim▁hole|>}
pub struct LayoutRPCImpl(pub Arc<Mutex<LayoutThreadData>>);
// https://drafts.csswg.org/cssom-view/#overflow-directions
fn overflow_direction(writing_mode: &WritingMode) -> OverflowDirection {
match (writing_mode.block_flow_direction(), writing_mode.inline_base_direction()) {
(BlockFlowDirection::TopToBottom, InlineBaseDirection::LeftToRight) |
(BlockFlowDirection::LeftToRight, InlineBaseDirection::LeftToRight) => OverflowDirection::RightAndDown,
(BlockFlowDirection::TopToBottom, InlineBaseDirection::RightToLeft) |
(BlockFlowDirection::RightToLeft, InlineBaseDirection::LeftToRight) => OverflowDirection::LeftAndDown,
(BlockFlowDirection::RightToLeft, InlineBaseDirection::RightToLeft) => OverflowDirection::LeftAndUp,
(BlockFlowDirection::LeftToRight, InlineBaseDirection::RightToLeft) => OverflowDirection::RightAndUp
}
}
impl LayoutRPC for LayoutRPCImpl {
// The neat thing here is that in order to answer the following two queries we only
// need to compare nodes for equality. Thus we can safely work only with `OpaqueNode`.
fn content_box(&self) -> ContentBoxResponse {
let &LayoutRPCImpl(ref rw_data) = self;
let rw_data = rw_data.lock().unwrap();
ContentBoxResponse(rw_data.content_box_response)
}
/// Requests the dimensions of all the content boxes, as in the `getClientRects()` call.
fn content_boxes(&self) -> ContentBoxesResponse {
let &LayoutRPCImpl(ref rw_data) = self;
let rw_data = rw_data.lock().unwrap();
ContentBoxesResponse(rw_data.content_boxes_response.clone())
}
/// Requests the node containing the point of interest.
fn hit_test(&self) -> HitTestResponse {
let &LayoutRPCImpl(ref rw_data) = self;
let rw_data = rw_data.lock().unwrap();
let &(ref result, update_cursor) = &rw_data.hit_test_response;
if update_cursor {
// Compute the new cursor.
let cursor = match *result {
None => Cursor::Default,
Some(dim) => dim.pointing.unwrap(),
};
rw_data.constellation_chan.send(ConstellationMsg::SetCursor(cursor)).unwrap();
}
HitTestResponse {
node_address: result.map(|dim| dim.node.to_untrusted_node_address()),
}
}
fn nodes_from_point(&self, point: Point2D<f32>) -> Vec<UntrustedNodeAddress> {
let point = Point2D::new(Au::from_f32_px(point.x), Au::from_f32_px(point.y));
let nodes_from_point_list = {
let &LayoutRPCImpl(ref rw_data) = self;
let rw_data = rw_data.lock().unwrap();
let result = match rw_data.display_list {
None => panic!("Tried to hit test without a DisplayList"),
Some(ref display_list) => {
display_list.hit_test(&point, &rw_data.stacking_context_scroll_offsets)
}
};
result
};
nodes_from_point_list.iter()
.map(|metadata| metadata.node.to_untrusted_node_address())
.collect()
}
fn node_geometry(&self) -> NodeGeometryResponse {
let &LayoutRPCImpl(ref rw_data) = self;
let rw_data = rw_data.lock().unwrap();
NodeGeometryResponse {
client_rect: rw_data.client_rect_response
}
}
fn node_overflow(&self) -> NodeOverflowResponse {
NodeOverflowResponse(self.0.lock().unwrap().overflow_response.0)
}
fn node_scroll_area(&self) -> NodeGeometryResponse {
NodeGeometryResponse {
client_rect: self.0.lock().unwrap().scroll_area_response
}
}
fn node_layer_id(&self) -> NodeLayerIdResponse {
NodeLayerIdResponse {
layer_id: self.0.lock().unwrap().layer_id_response
.expect("layer_id is not correctly fetched, see PR #9968")
}
}
/// Retrieves the resolved value for a CSS style property.
fn resolved_style(&self) -> ResolvedStyleResponse {
let &LayoutRPCImpl(ref rw_data) = self;
let rw_data = rw_data.lock().unwrap();
ResolvedStyleResponse(rw_data.resolved_style_response.clone())
}
fn offset_parent(&self) -> OffsetParentResponse {
let &LayoutRPCImpl(ref rw_data) = self;
let rw_data = rw_data.lock().unwrap();
rw_data.offset_parent_response.clone()
}
fn margin_style(&self) -> MarginStyleResponse {
let &LayoutRPCImpl(ref rw_data) = self;
let rw_data = rw_data.lock().unwrap();
rw_data.margin_style_response.clone()
}
}
struct UnioningFragmentBorderBoxIterator {
node_address: OpaqueNode,
rect: Option<Rect<Au>>,
}
impl UnioningFragmentBorderBoxIterator {
fn new(node_address: OpaqueNode) -> UnioningFragmentBorderBoxIterator {
UnioningFragmentBorderBoxIterator {
node_address: node_address,
rect: None
}
}
}
impl FragmentBorderBoxIterator for UnioningFragmentBorderBoxIterator {
fn process(&mut self, _: &Fragment, _: i32, border_box: &Rect<Au>) {
self.rect = match self.rect {
Some(rect) => {
Some(rect.union(border_box))
}
None => {
Some(*border_box)
}
};
}
fn should_process(&mut self, fragment: &Fragment) -> bool {
fragment.contains_node(self.node_address)
}
}
struct CollectingFragmentBorderBoxIterator {
node_address: OpaqueNode,
rects: Vec<Rect<Au>>,
}
impl CollectingFragmentBorderBoxIterator {
fn new(node_address: OpaqueNode) -> CollectingFragmentBorderBoxIterator {
CollectingFragmentBorderBoxIterator {
node_address: node_address,
rects: Vec::new(),
}
}
}
impl FragmentBorderBoxIterator for CollectingFragmentBorderBoxIterator {
fn process(&mut self, _: &Fragment, _: i32, border_box: &Rect<Au>) {
self.rects.push(*border_box);
}
fn should_process(&mut self, fragment: &Fragment) -> bool {
fragment.contains_node(self.node_address)
}
}
enum Side {
Left,
Right,
Bottom,
Top
}
enum MarginPadding {
Margin,
Padding
}
enum PositionProperty {
Left,
Right,
Top,
Bottom,
Width,
Height,
}
#[derive(Debug)]
enum OverflowDirection {
RightAndDown,
LeftAndDown,
LeftAndUp,
RightAndUp,
}
struct PositionRetrievingFragmentBorderBoxIterator {
node_address: OpaqueNode,
result: Option<Au>,
position: Point2D<Au>,
property: PositionProperty,
}
impl PositionRetrievingFragmentBorderBoxIterator {
fn new(node_address: OpaqueNode,
property: PositionProperty,
position: Point2D<Au>) -> PositionRetrievingFragmentBorderBoxIterator {
PositionRetrievingFragmentBorderBoxIterator {
node_address: node_address,
position: position,
property: property,
result: None,
}
}
}
impl FragmentBorderBoxIterator for PositionRetrievingFragmentBorderBoxIterator {
fn process(&mut self, fragment: &Fragment, _: i32, border_box: &Rect<Au>) {
let border_padding = fragment.border_padding.to_physical(fragment.style.writing_mode);
self.result =
Some(match self.property {
PositionProperty::Left => self.position.x,
PositionProperty::Top => self.position.y,
PositionProperty::Width => border_box.size.width - border_padding.horizontal(),
PositionProperty::Height => border_box.size.height - border_padding.vertical(),
// TODO: the following 2 calculations are completely wrong.
// They should return the difference between the parent's and this
// fragment's border boxes.
PositionProperty::Right => border_box.max_x() + self.position.x,
PositionProperty::Bottom => border_box.max_y() + self.position.y,
});
}
fn should_process(&mut self, fragment: &Fragment) -> bool {
fragment.contains_node(self.node_address)
}
}
struct MarginRetrievingFragmentBorderBoxIterator {
node_address: OpaqueNode,
result: Option<Au>,
writing_mode: WritingMode,
margin_padding: MarginPadding,
side: Side,
}
impl MarginRetrievingFragmentBorderBoxIterator {
fn new(node_address: OpaqueNode, side: Side, margin_padding:
MarginPadding, writing_mode: WritingMode) -> MarginRetrievingFragmentBorderBoxIterator {
MarginRetrievingFragmentBorderBoxIterator {
node_address: node_address,
side: side,
margin_padding: margin_padding,
result: None,
writing_mode: writing_mode,
}
}
}
impl FragmentBorderBoxIterator for MarginRetrievingFragmentBorderBoxIterator {
fn process(&mut self, fragment: &Fragment, _: i32, _: &Rect<Au>) {
let rect = match self.margin_padding {
MarginPadding::Margin => &fragment.margin,
MarginPadding::Padding => &fragment.border_padding
};
self.result = Some(match self.side {
Side::Left => rect.left(self.writing_mode),
Side::Right => rect.right(self.writing_mode),
Side::Bottom => rect.bottom(self.writing_mode),
Side::Top => rect.top(self.writing_mode)
});
}
fn should_process(&mut self, fragment: &Fragment) -> bool {
fragment.contains_node(self.node_address)
}
}
pub fn process_content_box_request<N: LayoutNode>(
requested_node: N, layout_root: &mut FlowRef) -> Rect<Au> {
// FIXME(pcwalton): This has not been updated to handle the stacking context relative
// stuff. So the position is wrong in most cases.
let mut iterator = UnioningFragmentBorderBoxIterator::new(requested_node.opaque());
sequential::iterate_through_flow_tree_fragment_border_boxes(layout_root, &mut iterator);
match iterator.rect {
Some(rect) => rect,
None => Rect::zero()
}
}
pub fn process_content_boxes_request<N: LayoutNode>(requested_node: N, layout_root: &mut FlowRef)
-> Vec<Rect<Au>> {
// FIXME(pcwalton): This has not been updated to handle the stacking context relative
// stuff. So the position is wrong in most cases.
let mut iterator = CollectingFragmentBorderBoxIterator::new(requested_node.opaque());
sequential::iterate_through_flow_tree_fragment_border_boxes(layout_root, &mut iterator);
iterator.rects
}
struct FragmentLocatingFragmentIterator {
node_address: OpaqueNode,
client_rect: Rect<i32>,
}
impl FragmentLocatingFragmentIterator {
fn new(node_address: OpaqueNode) -> FragmentLocatingFragmentIterator {
FragmentLocatingFragmentIterator {
node_address: node_address,
client_rect: Rect::zero()
}
}
}
struct UnioningFragmentScrollAreaIterator {
node_address: OpaqueNode,
union_rect: Rect<i32>,
origin_rect: Rect<i32>,
level: Option<i32>,
is_child: bool,
overflow_direction: OverflowDirection
}
impl UnioningFragmentScrollAreaIterator {
fn new(node_address: OpaqueNode) -> UnioningFragmentScrollAreaIterator {
UnioningFragmentScrollAreaIterator {
node_address: node_address,
union_rect: Rect::zero(),
origin_rect: Rect::zero(),
level: None,
is_child: false,
overflow_direction: OverflowDirection::RightAndDown
}
}
}
struct ParentBorderBoxInfo {
node_address: OpaqueNode,
border_box: Rect<Au>,
}
struct ParentOffsetBorderBoxIterator {
node_address: OpaqueNode,
last_level: i32,
has_found_node: bool,
node_border_box: Rect<Au>,
parent_nodes: Vec<Option<ParentBorderBoxInfo>>,
}
impl ParentOffsetBorderBoxIterator {
fn new(node_address: OpaqueNode) -> ParentOffsetBorderBoxIterator {
ParentOffsetBorderBoxIterator {
node_address: node_address,
last_level: -1,
has_found_node: false,
node_border_box: Rect::zero(),
parent_nodes: Vec::new(),
}
}
}
impl FragmentBorderBoxIterator for FragmentLocatingFragmentIterator {
fn process(&mut self, fragment: &Fragment, _: i32, border_box: &Rect<Au>) {
let style_structs::ServoBorder {
border_top_width: top_width,
border_right_width: right_width,
border_bottom_width: bottom_width,
border_left_width: left_width,
..
} = *fragment.style.get_border();
self.client_rect.origin.y = top_width.to_px();
self.client_rect.origin.x = left_width.to_px();
self.client_rect.size.width = (border_box.size.width - left_width - right_width).to_px();
self.client_rect.size.height = (border_box.size.height - top_width - bottom_width).to_px();
}
fn should_process(&mut self, fragment: &Fragment) -> bool {
fragment.node == self.node_address
}
}
// https://drafts.csswg.org/cssom-view/#scrolling-area
impl FragmentBorderBoxIterator for UnioningFragmentScrollAreaIterator {
fn process(&mut self, fragment: &Fragment, level: i32, border_box: &Rect<Au>) {
// In cases in which smaller child elements contain less padding than the parent
// the a union of the two elements padding rectangles could result in an unwanted
// increase in size. To work around this, we store the original elements padding
// rectangle as `origin_rect` and the union of all child elements padding and
// margin rectangles as `union_rect`.
let style_structs::ServoBorder {
border_top_width: top_border,
border_right_width: right_border,
border_bottom_width: bottom_border,
border_left_width: left_border,
..
} = *fragment.style.get_border();
let right_padding = (border_box.size.width - right_border - left_border).to_px();
let bottom_padding = (border_box.size.height - bottom_border - top_border).to_px();
let top_padding = top_border.to_px();
let left_padding = left_border.to_px();
match self.level {
Some(start_level) if level <= start_level => { self.is_child = false; }
Some(_) => {
let padding = Rect::new(Point2D::new(left_padding, top_padding),
Size2D::new(right_padding, bottom_padding));
let top_margin = fragment.margin.top(fragment.style.writing_mode).to_px();
let left_margin = fragment.margin.left(fragment.style.writing_mode).to_px();
let bottom_margin = fragment.margin.bottom(fragment.style.writing_mode).to_px();
let right_margin = fragment.margin.right(fragment.style.writing_mode).to_px();
let margin = Rect::new(Point2D::new(left_margin, top_margin),
Size2D::new(right_margin, bottom_margin));
self.union_rect = self.union_rect.union(&margin).union(&padding);
}
None => {
self.level = Some(level);
self.is_child = true;
self.overflow_direction = overflow_direction(&fragment.style.writing_mode);
self.origin_rect = Rect::new(Point2D::new(left_padding, top_padding),
Size2D::new(right_padding, bottom_padding));
},
};
}
fn should_process(&mut self, fragment: &Fragment) -> bool {
fragment.contains_node(self.node_address) || self.is_child
}
}
// https://drafts.csswg.org/cssom-view/#extensions-to-the-htmlelement-interface
impl FragmentBorderBoxIterator for ParentOffsetBorderBoxIterator {
fn process(&mut self, fragment: &Fragment, level: i32, border_box: &Rect<Au>) {
if fragment.node == self.node_address {
// Found the fragment in the flow tree that matches the
// DOM node being looked for.
self.has_found_node = true;
self.node_border_box = *border_box;
// offsetParent returns null if the node is fixed.
if fragment.style.get_box().position == computed_values::position::T::fixed {
self.parent_nodes.clear();
}
} else if level > self.last_level {
// TODO(gw): Is there a less fragile way of checking whether this
// fragment is the body element, rather than just checking that
// the parent nodes stack contains the root node only?
let is_body_element = self.parent_nodes.len() == 1;
let is_valid_parent = match (is_body_element,
fragment.style.get_box().position,
&fragment.specific) {
// Spec says it's valid if any of these are true:
// 1) Is the body element
// 2) Is static position *and* is a table or table cell
// 3) Is not static position
(true, _, _) |
(false, computed_values::position::T::static_, &SpecificFragmentInfo::Table) |
(false, computed_values::position::T::static_, &SpecificFragmentInfo::TableCell) |
(false, computed_values::position::T::absolute, _) |
(false, computed_values::position::T::relative, _) |
(false, computed_values::position::T::fixed, _) => true,
// Otherwise, it's not a valid parent
(false, computed_values::position::T::static_, _) => false,
};
let parent_info = if is_valid_parent {
Some(ParentBorderBoxInfo {
border_box: *border_box,
node_address: fragment.node,
})
} else {
None
};
self.parent_nodes.push(parent_info);
} else if level < self.last_level {
self.parent_nodes.pop();
}
}
fn should_process(&mut self, _: &Fragment) -> bool {
!self.has_found_node
}
}
pub fn process_node_geometry_request<N: LayoutNode>(requested_node: N, layout_root: &mut FlowRef)
-> Rect<i32> {
let mut iterator = FragmentLocatingFragmentIterator::new(requested_node.opaque());
sequential::iterate_through_flow_tree_fragment_border_boxes(layout_root, &mut iterator);
iterator.client_rect
}
pub fn process_node_layer_id_request<N: LayoutNode>(requested_node: N) -> LayerId {
let layout_node = requested_node.to_threadsafe();
layout_node.layer_id()
}
pub fn process_node_scroll_area_request< N: LayoutNode>(requested_node: N, layout_root: &mut FlowRef)
-> Rect<i32> {
let mut iterator = UnioningFragmentScrollAreaIterator::new(requested_node.opaque());
sequential::iterate_through_flow_tree_fragment_border_boxes(layout_root, &mut iterator);
match iterator.overflow_direction {
OverflowDirection::RightAndDown => {
let right = max(iterator.union_rect.size.width, iterator.origin_rect.size.width);
let bottom = max(iterator.union_rect.size.height, iterator.origin_rect.size.height);
Rect::new(iterator.origin_rect.origin, Size2D::new(right, bottom))
},
OverflowDirection::LeftAndDown => {
let bottom = max(iterator.union_rect.size.height, iterator.origin_rect.size.height);
let left = max(iterator.union_rect.origin.x, iterator.origin_rect.origin.x);
Rect::new(Point2D::new(left, iterator.origin_rect.origin.y),
Size2D::new(iterator.origin_rect.size.width, bottom))
},
OverflowDirection::LeftAndUp => {
let top = min(iterator.union_rect.origin.y, iterator.origin_rect.origin.y);
let left = min(iterator.union_rect.origin.x, iterator.origin_rect.origin.x);
Rect::new(Point2D::new(left, top), iterator.origin_rect.size)
},
OverflowDirection::RightAndUp => {
let top = min(iterator.union_rect.origin.y, iterator.origin_rect.origin.y);
let right = max(iterator.union_rect.size.width, iterator.origin_rect.size.width);
Rect::new(Point2D::new(iterator.origin_rect.origin.x, top),
Size2D::new(right, iterator.origin_rect.size.height))
}
}
}
/// Return the resolved value of property for a given (pseudo)element.
/// https://drafts.csswg.org/cssom/#resolved-value
pub fn process_resolved_style_request<N: LayoutNode>(
requested_node: N, pseudo: &Option<PseudoElement>,
property: &Atom, layout_root: &mut FlowRef) -> Option<String> {
let layout_node = requested_node.to_threadsafe();
let layout_node = match *pseudo {
Some(PseudoElement::Before) => layout_node.get_before_pseudo(),
Some(PseudoElement::After) => layout_node.get_after_pseudo(),
Some(PseudoElement::DetailsSummary) |
Some(PseudoElement::DetailsContent) |
Some(PseudoElement::Selection) => None,
_ => Some(layout_node)
};
let layout_node = match layout_node {
None => {
// The pseudo doesn't exist, return nothing. Chrome seems to query
// the element itself in this case, Firefox uses the resolved value.
// https://www.w3.org/Bugs/Public/show_bug.cgi?id=29006
return None;
}
Some(layout_node) => layout_node
};
let style = &*layout_node.resolved_style();
let positioned = match style.get_box().position {
position::computed_value::T::relative |
/*position::computed_value::T::sticky |*/
position::computed_value::T::fixed |
position::computed_value::T::absolute => true,
_ => false
};
//TODO: determine whether requested property applies to the element.
// eg. width does not apply to non-replaced inline elements.
// Existing browsers disagree about when left/top/right/bottom apply
// (Chrome seems to think they never apply and always returns resolved values).
// There are probably other quirks.
let applies = true;
fn used_value_for_position_property<N: LayoutNode>(
layout_node: N::ConcreteThreadSafeLayoutNode,
layout_root: &mut FlowRef,
requested_node: N,
property: &Atom) -> Option<String> {
let maybe_data = layout_node.borrow_layout_data();
let position = maybe_data.map_or(Point2D::zero(), |data| {
match (*data).flow_construction_result {
ConstructionResult::Flow(ref flow_ref, _) =>
flow::base(flow_ref.deref()).stacking_relative_position,
// TODO(dzbarsky) search parents until we find node with a flow ref.
// https://github.com/servo/servo/issues/8307
_ => Point2D::zero()
}
});
let property = match *property {
atom!("bottom") => PositionProperty::Bottom,
atom!("top") => PositionProperty::Top,
atom!("left") => PositionProperty::Left,
atom!("right") => PositionProperty::Right,
atom!("width") => PositionProperty::Width,
atom!("height") => PositionProperty::Height,
_ => unreachable!()
};
let mut iterator =
PositionRetrievingFragmentBorderBoxIterator::new(requested_node.opaque(),
property,
position);
sequential::iterate_through_flow_tree_fragment_border_boxes(layout_root,
&mut iterator);
iterator.result.map(|r| r.to_css_string())
}
// TODO: we will return neither the computed nor used value for margin and padding.
// Firefox returns blank strings for the computed value of shorthands,
// so this should be web-compatible.
match *property {
atom!("margin-bottom") | atom!("margin-top") |
atom!("margin-left") | atom!("margin-right") |
atom!("padding-bottom") | atom!("padding-top") |
atom!("padding-left") | atom!("padding-right")
if applies && style.get_box().display != display::computed_value::T::none => {
let (margin_padding, side) = match *property {
atom!("margin-bottom") => (MarginPadding::Margin, Side::Bottom),
atom!("margin-top") => (MarginPadding::Margin, Side::Top),
atom!("margin-left") => (MarginPadding::Margin, Side::Left),
atom!("margin-right") => (MarginPadding::Margin, Side::Right),
atom!("padding-bottom") => (MarginPadding::Padding, Side::Bottom),
atom!("padding-top") => (MarginPadding::Padding, Side::Top),
atom!("padding-left") => (MarginPadding::Padding, Side::Left),
atom!("padding-right") => (MarginPadding::Padding, Side::Right),
_ => unreachable!()
};
let mut iterator =
MarginRetrievingFragmentBorderBoxIterator::new(requested_node.opaque(),
side,
margin_padding,
style.writing_mode);
sequential::iterate_through_flow_tree_fragment_border_boxes(layout_root,
&mut iterator);
iterator.result.map(|r| r.to_css_string())
},
atom!("bottom") | atom!("top") | atom!("right") |
atom!("left")
if applies && positioned && style.get_box().display !=
display::computed_value::T::none => {
used_value_for_position_property(layout_node, layout_root, requested_node, property)
}
atom!("width") | atom!("height")
if applies && style.get_box().display !=
display::computed_value::T::none => {
used_value_for_position_property(layout_node, layout_root, requested_node, property)
}
// FIXME: implement used value computation for line-height
ref property => {
style.computed_value_to_string(&*property).ok()
}
}
}
pub fn process_offset_parent_query<N: LayoutNode>(requested_node: N, layout_root: &mut FlowRef)
-> OffsetParentResponse {
let mut iterator = ParentOffsetBorderBoxIterator::new(requested_node.opaque());
sequential::iterate_through_flow_tree_fragment_border_boxes(layout_root, &mut iterator);
let parent_info_index = iterator.parent_nodes.iter().rposition(|info| info.is_some());
match parent_info_index {
Some(parent_info_index) => {
let parent = iterator.parent_nodes[parent_info_index].as_ref().unwrap();
let origin = iterator.node_border_box.origin - parent.border_box.origin;
let size = iterator.node_border_box.size;
OffsetParentResponse {
node_address: Some(parent.node_address.to_untrusted_node_address()),
rect: Rect::new(origin, size),
}
}
None => {
OffsetParentResponse::empty()
}
}
}
pub fn process_node_overflow_request<N: LayoutNode>(requested_node: N) -> NodeOverflowResponse {
let layout_node = requested_node.to_threadsafe();
let style = &*layout_node.resolved_style();
let style_box = style.get_box();
NodeOverflowResponse(Some((Point2D::new(style_box.overflow_x, style_box.overflow_y.0))))
}
pub fn process_margin_style_query<N: LayoutNode>(requested_node: N)
-> MarginStyleResponse {
let layout_node = requested_node.to_threadsafe();
let style = &*layout_node.resolved_style();
let margin = style.get_margin();
MarginStyleResponse {
top: margin.margin_top,
right: margin.margin_right,
bottom: margin.margin_bottom,
left: margin.margin_left,
}
}<|fim▁end|> | /// Scroll offsets of stacking contexts. This will only be populated if WebRender is in use.
pub stacking_context_scroll_offsets: ScrollOffsetMap, |
<|file_name|>hashing.py<|end_file_name|><|fim▁begin|># Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras hashing preprocessing layer."""
# pylint: disable=g-classes-have-attributes
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import tensor_util
from tensorflow.python.keras.engine import base_preprocessing_layer
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.ops import gen_sparse_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops.ragged import ragged_functional_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.util.tf_export import keras_export
# Default key from tf.sparse.cross_hashed
_DEFAULT_SALT_KEY = [0xDECAFCAFFE, 0xDECAFCAFFE]
@keras_export('keras.layers.experimental.preprocessing.Hashing')
class Hashing(base_preprocessing_layer.PreprocessingLayer):
"""Implements categorical feature hashing, also known as "hashing trick".
This layer transforms single or multiple categorical inputs to hashed output.
It converts a sequence of int or string to a sequence of int. The stable hash
function uses tensorflow::ops::Fingerprint to produce universal output that
is consistent across platforms.
This layer uses [FarmHash64](https://github.com/google/farmhash) by default,
which provides a consistent hashed output across different platforms and is
stable across invocations, regardless of device and context, by mixing the
input bits thoroughly.
If you want to obfuscate the hashed output, you can also pass a random `salt`
argument in the constructor. In that case, the layer will use the
[SipHash64](https://github.com/google/highwayhash) hash function, with
the `salt` value serving as additional input to the hash function.
Example (FarmHash64):
>>> layer = tf.keras.layers.experimental.preprocessing.Hashing(num_bins=3)
>>> inp = [['A'], ['B'], ['C'], ['D'], ['E']]
>>> layer(inp)
<tf.Tensor: shape=(5, 1), dtype=int64, numpy=
array([[1],
[0],
[1],
[1],
[2]])>
Example (FarmHash64) with list of inputs:
>>> layer = tf.keras.layers.experimental.preprocessing.Hashing(num_bins=3)
>>> inp_1 = [['A'], ['B'], ['C'], ['D'], ['E']]
>>> inp_2 = np.asarray([[5], [4], [3], [2], [1]])
>>> layer([inp_1, inp_2])
<tf.Tensor: shape=(5, 1), dtype=int64, numpy=
array([[1],
[1],
[0],
[2],
[0]])>
Example (SipHash64):
>>> layer = tf.keras.layers.experimental.preprocessing.Hashing(num_bins=3,
... salt=[133, 137])
>>> inp = [['A'], ['B'], ['C'], ['D'], ['E']]
>>> layer(inp)
<tf.Tensor: shape=(5, 1), dtype=int64, numpy=
array([[1],
[2],
[1],
[0],
[2]])>
Example (Siphash64 with a single integer, same as `salt=[133, 133]`
>>> layer = tf.keras.layers.experimental.preprocessing.Hashing(num_bins=3,
... salt=133)
>>> inp = [['A'], ['B'], ['C'], ['D'], ['E']]
>>> layer(inp)
<tf.Tensor: shape=(5, 1), dtype=int64, numpy=
array([[0],
[0],
[2],
[1],
[0]])>
Reference: [SipHash with salt](https://www.131002.net/siphash/siphash.pdf)
Arguments:
num_bins: Number of hash bins.
salt: A single unsigned integer or None.
If passed, the hash function used will be SipHash64, with these values
used as an additional input (known as a "salt" in cryptography).
These should be non-zero. Defaults to `None` (in that
case, the FarmHash64 hash function is used). It also supports
tuple/list of 2 unsigned integer numbers, see reference paper for details.
name: Name to give to the layer.
**kwargs: Keyword arguments to construct a layer.
Input shape: A single or list of string, int32 or int64 `Tensor`,
`SparseTensor` or `RaggedTensor` of shape `[batch_size, ...,]`
Output shape: An int64 `Tensor`, `SparseTensor` or `RaggedTensor` of shape
`[batch_size, ...]`. If any input is `RaggedTensor` then output is
`RaggedTensor`, otherwise if any input is `SparseTensor` then output is
`SparseTensor`, otherwise the output is `Tensor`.
"""
def __init__(self, num_bins, salt=None, name=None, **kwargs):
if num_bins is None or num_bins <= 0:
raise ValueError('`num_bins` cannot be `None` or non-positive values.')
super(Hashing, self).__init__(name=name, **kwargs)
base_preprocessing_layer.keras_kpl_gauge.get_cell('Hashing').set(True)
self.num_bins = num_bins
self.strong_hash = True if salt is not None else False
if salt is not None:
if isinstance(salt, (tuple, list)) and len(salt) == 2:
self.salt = salt
elif isinstance(salt, int):
self.salt = [salt, salt]
else:
raise ValueError('`salt can only be a tuple of size 2 integers, or a '
'single integer, given {}'.format(salt))
else:
self.salt = _DEFAULT_SALT_KEY
def _preprocess_single_input(self, inp):
if isinstance(inp, (list, tuple, np.ndarray)):
inp = ops.convert_to_tensor_v2_with_dispatch(inp)
return inp
def _preprocess_inputs(self, inputs):
if isinstance(inputs, (tuple, list)):
# If any of them is tensor or ndarray, then treat as list
if any(
tensor_util.is_tensor(inp) or isinstance(inp, np.ndarray)
for inp in inputs):
return [self._preprocess_single_input(inp) for inp in inputs]
return self._preprocess_single_input(inputs)
def call(self, inputs):
inputs = self._preprocess_inputs(inputs)
if isinstance(inputs, (tuple, list)):
return self._process_input_list(inputs)
else:
return self._process_single_input(inputs)
def _process_single_input(self, inputs):
# Converts integer inputs to string.
if inputs.dtype.is_integer:
if isinstance(inputs, sparse_tensor.SparseTensor):
inputs = sparse_tensor.SparseTensor(
indices=inputs.indices,
values=string_ops.as_string(inputs.values),
dense_shape=inputs.dense_shape)
else:
inputs = string_ops.as_string(inputs)
str_to_hash_bucket = self._get_string_to_hash_bucket_fn()
if tf_utils.is_ragged(inputs):
return ragged_functional_ops.map_flat_values(
str_to_hash_bucket, inputs, num_buckets=self.num_bins, name='hash')
elif isinstance(inputs, sparse_tensor.SparseTensor):
sparse_values = inputs.values
sparse_hashed_values = str_to_hash_bucket(
sparse_values, self.num_bins, name='hash')
return sparse_tensor.SparseTensor(
indices=inputs.indices,
values=sparse_hashed_values,
dense_shape=inputs.dense_shape)
else:
return str_to_hash_bucket(inputs, self.num_bins, name='hash')
def _process_input_list(self, inputs):
# TODO(momernick): support ragged_cross_hashed with corrected fingerprint<|fim▁hole|> sparse_inputs = [
inp for inp in inputs if isinstance(inp, sparse_tensor.SparseTensor)
]
dense_inputs = [
inp for inp in inputs if not isinstance(inp, sparse_tensor.SparseTensor)
]
all_dense = True if not sparse_inputs else False
indices = [sp_inp.indices for sp_inp in sparse_inputs]
values = [sp_inp.values for sp_inp in sparse_inputs]
shapes = [sp_inp.dense_shape for sp_inp in sparse_inputs]
indices_out, values_out, shapes_out = gen_sparse_ops.SparseCrossHashed(
indices=indices,
values=values,
shapes=shapes,
dense_inputs=dense_inputs,
num_buckets=self.num_bins,
strong_hash=self.strong_hash,
salt=self.salt)
sparse_out = sparse_tensor.SparseTensor(indices_out, values_out, shapes_out)
if all_dense:
return sparse_ops.sparse_tensor_to_dense(sparse_out)
return sparse_out
def _get_string_to_hash_bucket_fn(self):
"""Returns the string_to_hash_bucket op to use based on `hasher_key`."""
# string_to_hash_bucket_fast uses FarmHash64 as hash function.
if not self.strong_hash:
return string_ops.string_to_hash_bucket_fast
# string_to_hash_bucket_strong uses SipHash64 as hash function.
else:
return functools.partial(
string_ops.string_to_hash_bucket_strong, key=self.salt)
def compute_output_shape(self, input_shape):
if not isinstance(input_shape, (tuple, list)):
return input_shape
input_shapes = input_shape
batch_size = None
for inp_shape in input_shapes:
inp_tensor_shape = tensor_shape.TensorShape(inp_shape).as_list()
if len(inp_tensor_shape) != 2:
raise ValueError('Inputs must be rank 2, get {}'.format(input_shapes))
if batch_size is None:
batch_size = inp_tensor_shape[0]
# The second dimension is dynamic based on inputs.
output_shape = [batch_size, None]
return tensor_shape.TensorShape(output_shape)
def compute_output_signature(self, input_spec):
if not isinstance(input_spec, (tuple, list)):
output_shape = self.compute_output_shape(input_spec.shape)
output_dtype = dtypes.int64
if isinstance(input_spec, sparse_tensor.SparseTensorSpec):
return sparse_tensor.SparseTensorSpec(
shape=output_shape, dtype=output_dtype)
else:
return tensor_spec.TensorSpec(shape=output_shape, dtype=output_dtype)
input_shapes = [x.shape for x in input_spec]
output_shape = self.compute_output_shape(input_shapes)
if any(
isinstance(inp_spec, ragged_tensor.RaggedTensorSpec)
for inp_spec in input_spec):
return tensor_spec.TensorSpec(shape=output_shape, dtype=dtypes.int64)
elif any(
isinstance(inp_spec, sparse_tensor.SparseTensorSpec)
for inp_spec in input_spec):
return sparse_tensor.SparseTensorSpec(
shape=output_shape, dtype=dtypes.int64)
return tensor_spec.TensorSpec(shape=output_shape, dtype=dtypes.int64)
def get_config(self):
config = {'num_bins': self.num_bins, 'salt': self.salt}
base_config = super(Hashing, self).get_config()
return dict(list(base_config.items()) + list(config.items()))<|fim▁end|> | # and siphash.
if any(isinstance(inp, ragged_tensor.RaggedTensor) for inp in inputs):
raise ValueError('Hashing with ragged input is not supported yet.') |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2016 Pi-Yueh Chuang <[email protected]>
#
# Distributed under terms of the MIT license.
"""__init__.py"""
from utils.errors.Error import Error
from utils.errors.InfLoopError import InfLoopError
<|fim▁hole|><|fim▁end|> | __author__ = "Pi-Yueh Chuang"
__version__ = "alpha" |
<|file_name|>MyRemoteClass.java<|end_file_name|><|fim▁begin|>package rmi;
import java.rmi.RemoteException;
import java.rmi.registry.LocateRegistry;
import java.rmi.registry.Registry;
<|fim▁hole|> public String[] sayYourName(String name) throws RemoteException {
System.err.println("remote reference");
return new String[] { "kick", name };
}
public static void main(String[] args) {
try {
MyRemoteClass myRemoteClass = new MyRemoteClass();
MyRemoteInterface myRemoteInterface = (MyRemoteInterface) UnicastRemoteObject
.exportObject(myRemoteClass, 0);
Registry registry = LocateRegistry.getRegistry();
registry.rebind("myRemoteInterface", myRemoteInterface);
System.err.println("system ready!!");
} catch (RemoteException e) {
e.printStackTrace();
}
}
public Boolean checkIfSuccess() throws RemoteException {
return true;
}
}<|fim▁end|> | import java.rmi.server.UnicastRemoteObject;
public class MyRemoteClass implements MyRemoteInterface {
|
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|># Python bindings to oDesk API
# python-odesk version 0.5
# (C) 2010-2015 oDesk
<|fim▁hole|>so you can use::
from odesk import Client
from odesk import raise_http_error
"""
VERSION = '0.5.8'
def get_version():
return VERSION
from odesk.client import Client
from odesk.http import raise_http_error
__all__ = ["get_version", "Client", "raise_http_error"]<|fim▁end|> | # Updated by the script
"""Main package of the python bindings for oDesk API.
For convenience some most commonly used functionalities are imported here, |
<|file_name|>index-test.js<|end_file_name|><|fim▁begin|>/* eslint-env jasmine, jest */
import React from 'react';
import View from '../';
import StyleSheet from '../../StyleSheet';
import { act } from 'react-dom/test-utils';
import { createEventTarget } from 'dom-event-testing-library';
import { render } from '@testing-library/react';
describe('components/View', () => {
test('default', () => {
const { container } = render(<View />);
expect(container.firstChild).toMatchSnapshot();
});
test('non-text is rendered', () => {
const children = <View testID="1" />;
const { container } = render(<View>{children}</View>);
expect(container.firstChild).toMatchSnapshot();
});
describe('raw text nodes as children', () => {
beforeEach(() => {
jest.spyOn(console, 'error');
console.error.mockImplementation(() => {});
});
afterEach(() => {
console.error.mockRestore();
});
test('error logged (single)', () => {
render(<View>hello</View>);
expect(console.error).toBeCalled();
});
test('error logged (array)', () => {
render(
<View>
<View />
hello
<View />
</View>
);
expect(console.error).toBeCalled();
});
});
describe('prop "accessibilityLabel"', () => {
test('value is set', () => {
const { container } = render(<View accessibilityLabel="accessibility label" />);
expect(container.firstChild).toMatchSnapshot();
});
});
describe('prop "accessibilityLabelledBy"', () => {
test('value is set', () => {
const { container } = render(<View accessibilityLabelledBy="123" />);
expect(container.firstChild).toMatchSnapshot();
});
});
describe('prop "accessibilityLiveRegion"', () => {
test('value is set', () => {
const { container } = render(<View accessibilityLiveRegion="polite" />);
expect(container.firstChild).toMatchSnapshot();
});
});
describe('prop "accessibilityRole"', () => {
test('value is set', () => {
const { container } = render(<View accessibilityRole="none" />);
expect(container.firstChild).toMatchSnapshot();
});
test('value is "button"', () => {
const { container } = render(<View accessibilityRole="button" />);
expect(container.firstChild).toMatchSnapshot();
});
test('value alters HTML element', () => {
const { container } = render(<View accessibilityRole="article" />);
expect(container.firstChild).toMatchSnapshot();
});
});
test('allows "dir" to be overridden', () => {
const { container } = render(<View dir="rtl" />);
expect(container.firstChild).toMatchSnapshot();
});
describe('prop "href"', () => {
test('value is set', () => {
const { container } = render(<View href="https://example.com" />);
expect(container.firstChild).toMatchSnapshot();
});
test('href with accessibilityRole', () => {
const { container } = render(<View accessibilityRole="none" href="https://example.com" />);
expect(container.firstChild).toMatchSnapshot();
});
});
describe('prop "hrefAttrs"', () => {
test('requires "href"', () => {
const { container } = render(<View hrefAttrs={{ download: 'filename.jpg' }} />);
expect(container.firstChild).toMatchSnapshot();
});
test('value is set', () => {
const hrefAttrs = {
download: 'filename.jpg',
rel: 'nofollow',
target: '_blank'
};
const { container } = render(<View href="https://example.com" hrefAttrs={hrefAttrs} />);
expect(container.firstChild).toMatchSnapshot();
});
test('target variant is set', () => {
const hrefAttrs = {
target: 'blank'
};
const { container } = render(<View href="https://example.com" hrefAttrs={hrefAttrs} />);
expect(container.firstChild).toMatchSnapshot();
});
test('null values are excluded', () => {
const hrefAttrs = {
download: null,
rel: null,
target: null
};
const { container } = render(<View href="https://example.com" hrefAttrs={hrefAttrs} />);
expect(container.firstChild).toMatchSnapshot();
});
});
describe('prop "nativeID"', () => {
test('value is set', () => {
const { container } = render(<View nativeID="nativeID" />);
expect(container.firstChild).toMatchSnapshot();
});
});
describe('prop "onBlur"', () => {
test('is called', () => {
const onBlur = jest.fn();
const ref = React.createRef();
act(() => {
render(<View onBlur={onBlur} ref={ref} />);
});
const target = createEventTarget(ref.current);
const body = createEventTarget(document.body);
act(() => {
target.focus();
body.focus({ relatedTarget: target.node });
});
expect(onBlur).toBeCalled();
});
});
describe('prop "onFocus"', () => {
test('is called', () => {
const onFocus = jest.fn();
const ref = React.createRef();
act(() => {
render(<View onFocus={onFocus} ref={ref} />);
});
const target = createEventTarget(ref.current);
act(() => {
target.focus();
target.blur();
});
expect(onFocus).toBeCalled();
});
});
describe('prop "ref"', () => {
test('value is set', () => {
const ref = jest.fn();
render(<View ref={ref} />);
expect(ref).toBeCalled();
});
test('is not called for prop changes', () => {
const ref = jest.fn();
let rerender;
act(() => {
({ rerender } = render(<View nativeID="123" ref={ref} style={{ borderWidth: 5 }} />));
});
expect(ref).toHaveBeenCalledTimes(1);
act(() => {
rerender(<View nativeID="1234" ref={ref} style={{ borderWidth: 6 }} />);
});
expect(ref).toHaveBeenCalledTimes(1);
});
test('node has imperative methods', () => {
const ref = React.createRef();
act(() => {
render(<View ref={ref} />);
});
const node = ref.current;
expect(typeof node.measure === 'function');
expect(typeof node.measureLayout === 'function');
expect(typeof node.measureInWindow === 'function');
expect(typeof node.setNativeProps === 'function');
});
describe('setNativeProps method', () => {
test('works with react-native props', () => {
const ref = React.createRef();
const { container } = render(<View ref={ref} />);
const node = ref.current;
node.setNativeProps({
accessibilityLabel: 'label',
pointerEvents: 'box-only',
style: {
marginHorizontal: 10,
shadowColor: 'black',
shadowWidth: 2,
textAlignVertical: 'top'
}
});
expect(container.firstChild).toMatchSnapshot();
});
test('style updates as expected', () => {
const ref = React.createRef();
const styles = StyleSheet.create({ root: { color: 'red' } });
// initial render
const { container, rerender } = render(
<View ref={ref} style={[styles.root, { width: 10 }]} />
);
const node = ref.current;<|fim▁hole|> // set native props again
node.setNativeProps({ style: { width: 30 } });
expect(container.firstChild).toMatchSnapshot();
node.setNativeProps({ style: { width: 30 } });
node.setNativeProps({ style: { width: 30 } });
node.setNativeProps({ style: { width: 30 } });
expect(container.firstChild).toMatchSnapshot();
// update render
rerender(<View ref={ref} style={[styles.root, { width: 40 }]} />);
expect(container.firstChild).toMatchSnapshot();
});
});
});
test('prop "pointerEvents"', () => {
const { container } = render(<View pointerEvents="box-only" />);
expect(container.firstChild).toMatchSnapshot();
});
describe('prop "style"', () => {
test('value is set', () => {
const { container } = render(<View style={{ borderWidth: 5 }} />);
expect(container.firstChild).toMatchSnapshot();
});
});
describe('prop "testID"', () => {
test('value is set', () => {
const { container } = render(<View testID="123" />);
expect(container.firstChild).toMatchSnapshot();
});
});
});<|fim▁end|> | expect(container.firstChild).toMatchSnapshot();
// set native props
node.setNativeProps({ style: { color: 'orange', height: 20, width: 20 } });
expect(container.firstChild).toMatchSnapshot(); |
<|file_name|>SendAuth.hpp<|end_file_name|><|fim▁begin|>/*================================================================================
code generated by: java2cpp
author: Zoran Angelov, mailto://[email protected]
class: com.tencent.mm.sdk.openapi.SendAuth
================================================================================*/
#ifndef J2CPP_INCLUDE_IMPLEMENTATION
#ifndef J2CPP_COM_TENCENT_MM_SDK_OPENAPI_SENDAUTH_HPP_DECL
#define J2CPP_COM_TENCENT_MM_SDK_OPENAPI_SENDAUTH_HPP_DECL
namespace j2cpp { namespace android { namespace os { class Bundle; } } }
namespace j2cpp { namespace java { namespace lang { class String; } } }
namespace j2cpp { namespace java { namespace lang { class Object; } } }
namespace j2cpp { namespace com { namespace tencent { namespace mm { namespace sdk { namespace openapi { class BaseReq; } } } } } }
namespace j2cpp { namespace com { namespace tencent { namespace mm { namespace sdk { namespace openapi { class BaseResp; } } } } } }
#include <android/os/Bundle.hpp>
#include <com/tencent/mm/sdk/openapi/BaseReq.hpp>
#include <com/tencent/mm/sdk/openapi/BaseResp.hpp>
#include <java/lang/Object.hpp>
#include <java/lang/String.hpp>
namespace j2cpp {
namespace com { namespace tencent { namespace mm { namespace sdk { namespace openapi {
class SendAuth;
namespace SendAuth_ {
class Req;
class Req
: public object<Req>
{
public:
J2CPP_DECLARE_CLASS
J2CPP_DECLARE_METHOD(0)
J2CPP_DECLARE_METHOD(1)
J2CPP_DECLARE_METHOD(2)
J2CPP_DECLARE_METHOD(3)
J2CPP_DECLARE_METHOD(4)
J2CPP_DECLARE_METHOD(5)
J2CPP_DECLARE_FIELD(0)
J2CPP_DECLARE_FIELD(1)
explicit Req(jobject jobj)
: object<Req>(jobj)
, scope(jobj)
, state(jobj)
{
}
operator local_ref<java::lang::Object>() const;
operator local_ref<com::tencent::mm::sdk::openapi::BaseReq>() const;
Req();
Req(local_ref< android::os::Bundle > const&);
jint getType();
void fromBundle(local_ref< android::os::Bundle > const&);
void toBundle(local_ref< android::os::Bundle > const&);
field< J2CPP_CLASS_NAME, J2CPP_FIELD_NAME(0), J2CPP_FIELD_SIGNATURE(0), local_ref< java::lang::String > > scope;
field< J2CPP_CLASS_NAME, J2CPP_FIELD_NAME(1), J2CPP_FIELD_SIGNATURE(1), local_ref< java::lang::String > > state;
}; //class Req
class Resp;
class Resp
: public object<Resp>
{
public:
J2CPP_DECLARE_CLASS
J2CPP_DECLARE_METHOD(0)
J2CPP_DECLARE_METHOD(1)
J2CPP_DECLARE_METHOD(2)
J2CPP_DECLARE_METHOD(3)
J2CPP_DECLARE_METHOD(4)
J2CPP_DECLARE_METHOD(5)
J2CPP_DECLARE_FIELD(0)
J2CPP_DECLARE_FIELD(1)
J2CPP_DECLARE_FIELD(2)
J2CPP_DECLARE_FIELD(3)
J2CPP_DECLARE_FIELD(4)
explicit Resp(jobject jobj)
: object<Resp>(jobj)
, userName(jobj)
, token(jobj)
, expireDate(jobj)
, state(jobj)
, resultUrl(jobj)
{
}
operator local_ref<java::lang::Object>() const;
operator local_ref<com::tencent::mm::sdk::openapi::BaseResp>() const;
Resp();
Resp(local_ref< android::os::Bundle > const&);
jint getType();
void fromBundle(local_ref< android::os::Bundle > const&);
void toBundle(local_ref< android::os::Bundle > const&);
field< J2CPP_CLASS_NAME, J2CPP_FIELD_NAME(0), J2CPP_FIELD_SIGNATURE(0), local_ref< java::lang::String > > userName;
field< J2CPP_CLASS_NAME, J2CPP_FIELD_NAME(1), J2CPP_FIELD_SIGNATURE(1), local_ref< java::lang::String > > token;
field< J2CPP_CLASS_NAME, J2CPP_FIELD_NAME(2), J2CPP_FIELD_SIGNATURE(2), jint > expireDate;
field< J2CPP_CLASS_NAME, J2CPP_FIELD_NAME(3), J2CPP_FIELD_SIGNATURE(3), local_ref< java::lang::String > > state;
field< J2CPP_CLASS_NAME, J2CPP_FIELD_NAME(4), J2CPP_FIELD_SIGNATURE(4), local_ref< java::lang::String > > resultUrl;
}; //class Resp
<|fim▁hole|> class SendAuth
: public object<SendAuth>
{
public:
J2CPP_DECLARE_CLASS
J2CPP_DECLARE_METHOD(0)
typedef SendAuth_::Req Req;
typedef SendAuth_::Resp Resp;
explicit SendAuth(jobject jobj)
: object<SendAuth>(jobj)
{
}
operator local_ref<java::lang::Object>() const;
}; //class SendAuth
} //namespace openapi
} //namespace sdk
} //namespace mm
} //namespace tencent
} //namespace com
} //namespace j2cpp
#endif //J2CPP_COM_TENCENT_MM_SDK_OPENAPI_SENDAUTH_HPP_DECL
#else //J2CPP_INCLUDE_IMPLEMENTATION
#ifndef J2CPP_COM_TENCENT_MM_SDK_OPENAPI_SENDAUTH_HPP_IMPL
#define J2CPP_COM_TENCENT_MM_SDK_OPENAPI_SENDAUTH_HPP_IMPL
namespace j2cpp {
com::tencent::mm::sdk::openapi::SendAuth_::Req::operator local_ref<java::lang::Object>() const
{
return local_ref<java::lang::Object>(get_jobject());
}
com::tencent::mm::sdk::openapi::SendAuth_::Req::operator local_ref<com::tencent::mm::sdk::openapi::BaseReq>() const
{
return local_ref<com::tencent::mm::sdk::openapi::BaseReq>(get_jobject());
}
com::tencent::mm::sdk::openapi::SendAuth_::Req::Req()
: object<com::tencent::mm::sdk::openapi::SendAuth_::Req>(
call_new_object<
com::tencent::mm::sdk::openapi::SendAuth_::Req::J2CPP_CLASS_NAME,
com::tencent::mm::sdk::openapi::SendAuth_::Req::J2CPP_METHOD_NAME(0),
com::tencent::mm::sdk::openapi::SendAuth_::Req::J2CPP_METHOD_SIGNATURE(0)
>()
)
, scope(get_jobject())
, state(get_jobject())
{
}
com::tencent::mm::sdk::openapi::SendAuth_::Req::Req(local_ref< android::os::Bundle > const &a0)
: object<com::tencent::mm::sdk::openapi::SendAuth_::Req>(
call_new_object<
com::tencent::mm::sdk::openapi::SendAuth_::Req::J2CPP_CLASS_NAME,
com::tencent::mm::sdk::openapi::SendAuth_::Req::J2CPP_METHOD_NAME(1),
com::tencent::mm::sdk::openapi::SendAuth_::Req::J2CPP_METHOD_SIGNATURE(1)
>(a0)
)
, scope(get_jobject())
, state(get_jobject())
{
}
jint com::tencent::mm::sdk::openapi::SendAuth_::Req::getType()
{
return call_method<
com::tencent::mm::sdk::openapi::SendAuth_::Req::J2CPP_CLASS_NAME,
com::tencent::mm::sdk::openapi::SendAuth_::Req::J2CPP_METHOD_NAME(2),
com::tencent::mm::sdk::openapi::SendAuth_::Req::J2CPP_METHOD_SIGNATURE(2),
jint
>(get_jobject());
}
void com::tencent::mm::sdk::openapi::SendAuth_::Req::fromBundle(local_ref< android::os::Bundle > const &a0)
{
return call_method<
com::tencent::mm::sdk::openapi::SendAuth_::Req::J2CPP_CLASS_NAME,
com::tencent::mm::sdk::openapi::SendAuth_::Req::J2CPP_METHOD_NAME(3),
com::tencent::mm::sdk::openapi::SendAuth_::Req::J2CPP_METHOD_SIGNATURE(3),
void
>(get_jobject(), a0);
}
void com::tencent::mm::sdk::openapi::SendAuth_::Req::toBundle(local_ref< android::os::Bundle > const &a0)
{
return call_method<
com::tencent::mm::sdk::openapi::SendAuth_::Req::J2CPP_CLASS_NAME,
com::tencent::mm::sdk::openapi::SendAuth_::Req::J2CPP_METHOD_NAME(4),
com::tencent::mm::sdk::openapi::SendAuth_::Req::J2CPP_METHOD_SIGNATURE(4),
void
>(get_jobject(), a0);
}
J2CPP_DEFINE_CLASS(com::tencent::mm::sdk::openapi::SendAuth_::Req,"com/tencent/mm/sdk/openapi/SendAuth$Req")
J2CPP_DEFINE_METHOD(com::tencent::mm::sdk::openapi::SendAuth_::Req,0,"<init>","()V")
J2CPP_DEFINE_METHOD(com::tencent::mm::sdk::openapi::SendAuth_::Req,1,"<init>","(Landroid/os/Bundle;)V")
J2CPP_DEFINE_METHOD(com::tencent::mm::sdk::openapi::SendAuth_::Req,2,"getType","()I")
J2CPP_DEFINE_METHOD(com::tencent::mm::sdk::openapi::SendAuth_::Req,3,"fromBundle","(Landroid/os/Bundle;)V")
J2CPP_DEFINE_METHOD(com::tencent::mm::sdk::openapi::SendAuth_::Req,4,"toBundle","(Landroid/os/Bundle;)V")
J2CPP_DEFINE_METHOD(com::tencent::mm::sdk::openapi::SendAuth_::Req,5,"checkArgs","()Z")
J2CPP_DEFINE_FIELD(com::tencent::mm::sdk::openapi::SendAuth_::Req,0,"scope","Ljava/lang/String;")
J2CPP_DEFINE_FIELD(com::tencent::mm::sdk::openapi::SendAuth_::Req,1,"state","Ljava/lang/String;")
com::tencent::mm::sdk::openapi::SendAuth_::Resp::operator local_ref<java::lang::Object>() const
{
return local_ref<java::lang::Object>(get_jobject());
}
com::tencent::mm::sdk::openapi::SendAuth_::Resp::operator local_ref<com::tencent::mm::sdk::openapi::BaseResp>() const
{
return local_ref<com::tencent::mm::sdk::openapi::BaseResp>(get_jobject());
}
com::tencent::mm::sdk::openapi::SendAuth_::Resp::Resp()
: object<com::tencent::mm::sdk::openapi::SendAuth_::Resp>(
call_new_object<
com::tencent::mm::sdk::openapi::SendAuth_::Resp::J2CPP_CLASS_NAME,
com::tencent::mm::sdk::openapi::SendAuth_::Resp::J2CPP_METHOD_NAME(0),
com::tencent::mm::sdk::openapi::SendAuth_::Resp::J2CPP_METHOD_SIGNATURE(0)
>()
)
, userName(get_jobject())
, token(get_jobject())
, expireDate(get_jobject())
, state(get_jobject())
, resultUrl(get_jobject())
{
}
com::tencent::mm::sdk::openapi::SendAuth_::Resp::Resp(local_ref< android::os::Bundle > const &a0)
: object<com::tencent::mm::sdk::openapi::SendAuth_::Resp>(
call_new_object<
com::tencent::mm::sdk::openapi::SendAuth_::Resp::J2CPP_CLASS_NAME,
com::tencent::mm::sdk::openapi::SendAuth_::Resp::J2CPP_METHOD_NAME(1),
com::tencent::mm::sdk::openapi::SendAuth_::Resp::J2CPP_METHOD_SIGNATURE(1)
>(a0)
)
, userName(get_jobject())
, token(get_jobject())
, expireDate(get_jobject())
, state(get_jobject())
, resultUrl(get_jobject())
{
}
jint com::tencent::mm::sdk::openapi::SendAuth_::Resp::getType()
{
return call_method<
com::tencent::mm::sdk::openapi::SendAuth_::Resp::J2CPP_CLASS_NAME,
com::tencent::mm::sdk::openapi::SendAuth_::Resp::J2CPP_METHOD_NAME(2),
com::tencent::mm::sdk::openapi::SendAuth_::Resp::J2CPP_METHOD_SIGNATURE(2),
jint
>(get_jobject());
}
void com::tencent::mm::sdk::openapi::SendAuth_::Resp::fromBundle(local_ref< android::os::Bundle > const &a0)
{
return call_method<
com::tencent::mm::sdk::openapi::SendAuth_::Resp::J2CPP_CLASS_NAME,
com::tencent::mm::sdk::openapi::SendAuth_::Resp::J2CPP_METHOD_NAME(3),
com::tencent::mm::sdk::openapi::SendAuth_::Resp::J2CPP_METHOD_SIGNATURE(3),
void
>(get_jobject(), a0);
}
void com::tencent::mm::sdk::openapi::SendAuth_::Resp::toBundle(local_ref< android::os::Bundle > const &a0)
{
return call_method<
com::tencent::mm::sdk::openapi::SendAuth_::Resp::J2CPP_CLASS_NAME,
com::tencent::mm::sdk::openapi::SendAuth_::Resp::J2CPP_METHOD_NAME(4),
com::tencent::mm::sdk::openapi::SendAuth_::Resp::J2CPP_METHOD_SIGNATURE(4),
void
>(get_jobject(), a0);
}
J2CPP_DEFINE_CLASS(com::tencent::mm::sdk::openapi::SendAuth_::Resp,"com/tencent/mm/sdk/openapi/SendAuth$Resp")
J2CPP_DEFINE_METHOD(com::tencent::mm::sdk::openapi::SendAuth_::Resp,0,"<init>","()V")
J2CPP_DEFINE_METHOD(com::tencent::mm::sdk::openapi::SendAuth_::Resp,1,"<init>","(Landroid/os/Bundle;)V")
J2CPP_DEFINE_METHOD(com::tencent::mm::sdk::openapi::SendAuth_::Resp,2,"getType","()I")
J2CPP_DEFINE_METHOD(com::tencent::mm::sdk::openapi::SendAuth_::Resp,3,"fromBundle","(Landroid/os/Bundle;)V")
J2CPP_DEFINE_METHOD(com::tencent::mm::sdk::openapi::SendAuth_::Resp,4,"toBundle","(Landroid/os/Bundle;)V")
J2CPP_DEFINE_METHOD(com::tencent::mm::sdk::openapi::SendAuth_::Resp,5,"checkArgs","()Z")
J2CPP_DEFINE_FIELD(com::tencent::mm::sdk::openapi::SendAuth_::Resp,0,"userName","Ljava/lang/String;")
J2CPP_DEFINE_FIELD(com::tencent::mm::sdk::openapi::SendAuth_::Resp,1,"token","Ljava/lang/String;")
J2CPP_DEFINE_FIELD(com::tencent::mm::sdk::openapi::SendAuth_::Resp,2,"expireDate","I")
J2CPP_DEFINE_FIELD(com::tencent::mm::sdk::openapi::SendAuth_::Resp,3,"state","Ljava/lang/String;")
J2CPP_DEFINE_FIELD(com::tencent::mm::sdk::openapi::SendAuth_::Resp,4,"resultUrl","Ljava/lang/String;")
com::tencent::mm::sdk::openapi::SendAuth::operator local_ref<java::lang::Object>() const
{
return local_ref<java::lang::Object>(get_jobject());
}
J2CPP_DEFINE_CLASS(com::tencent::mm::sdk::openapi::SendAuth,"com/tencent/mm/sdk/openapi/SendAuth")
J2CPP_DEFINE_METHOD(com::tencent::mm::sdk::openapi::SendAuth,0,"<init>","()V")
} //namespace j2cpp
#endif //J2CPP_COM_TENCENT_MM_SDK_OPENAPI_SENDAUTH_HPP_IMPL
#endif //J2CPP_INCLUDE_IMPLEMENTATION<|fim▁end|> | } //namespace SendAuth_
|
<|file_name|>dim_helpers.py<|end_file_name|><|fim▁begin|>from __future__ import absolute_import, division, print_function
from dynd._pydynd import w_type, \
make_var_dim, make_strided_dim, make_fixed_dim, make_cfixed_dim
__all__ = ['var', 'strided', 'fixed', 'cfixed']
class _Dim(object):
__slots__ = []
def __mul__(self, rhs):
if isinstance(rhs, w_type):
# Apply all the dimensions to get
# produce a type
for dim in reversed(self.dims):
rhs = dim.create(rhs)
return rhs
elif isinstance(rhs, (str, type)):
# Allow:
# ndt.strided * 'int32'
# ndt.strided * int
rhs = w_type(rhs)
for dim in reversed(self.dims):
rhs = dim.create(rhs)
return rhs
elif isinstance(rhs, _Dim):
# Combine the dimension fragments
return _DimFragment(self.dims + rhs.dims)
else:
raise TypeError('Expected a dynd dimension or type, not %r' % rhs)
def __pow__(self, count):
return _DimFragment(self.dims * count)
class _DimFragment(_Dim):
__slots__ = ['dims']
def __init__(self, dims):
self.dims = dims
def __repr__(self):
return ' * '.join(repr(dim) for dim in self.dims)
class _Var(_Dim):
"""
Creates a var dimension when combined with other types.
Examples
--------
>>> ndt.var * ndt.int32
ndt.type('var * int32')
>>> ndt.fixed[5] * ndt.var * ndt.float64
ndt.type('5 * var * float64')
"""
__slots__ = []
@property
def dims(self):
return (self,)
def create(self, eltype):
return make_var_dim(eltype)
def __repr__(self):
return 'ndt.var'
class _Strided(_Dim):
"""
Creates a strided dimension when combined with other types.
Examples
--------
>>> ndt.strided * ndt.int32
ndt.type('strided * int32')<|fim▁hole|> __slots__ = []
@property
def dims(self):
return (self,)
def create(self, eltype):
return make_strided_dim(eltype)
def __repr__(self):
return 'ndt.strided'
class _Fixed(_Dim):
"""
Creates a fixed dimension when combined with other types.
Examples
--------
>>> ndt.fixed[3] * ndt.int32
ndt.type('3 * int32')
>>> ndt.fixed[5] * ndt.var * ndt.float64
ndt.type('5 * var * float64')
"""
__slots__ = ['dim_size']
def __init__(self, dim_size = None):
self.dim_size = dim_size
@property
def dims(self):
if self.dim_size is not None:
return (self,)
else:
raise TypeError('Need to specify ndt.fixed[dim_size],' +
' not just ndt.fixed')
def create(self, eltype):
return make_fixed_dim(self.dim_size, eltype)
def __getitem__(self, dim_size):
return _Fixed(dim_size)
def __repr__(self):
if self.dim_size is not None:
return 'ndt.fixed[%d]' % self.dim_size
else:
return 'ndt.fixed'
class _CFixed(_Dim):
"""
Creates a cfixed dimension when combined with other types.
Examples
--------
>>> ndt.cfixed[3] * ndt.int32
ndt.type('cfixed[3] * int32')
>>> ndt.fixed[5] * ndt.cfixed[2] * ndt.float64
ndt.type('5 * cfixed[2] * float64')
"""
__slots__ = ['dim_size']
def __init__(self, dim_size = None):
self.dim_size = dim_size
@property
def dims(self):
if self.dim_size is not None:
return (self,)
else:
raise TypeError('Need to specify ndt.cfixed[dim_size],' +
' not just ndt.cfixed')
def create(self, eltype):
return make_cfixed_dim(self.dim_size, eltype)
def __getitem__(self, dim_size):
return _CFixed(dim_size)
def __repr__(self):
if self.dim_size is not None:
return 'ndt.cfixed[%d]' % self.dim_size
else:
return 'ndt.cfixed'
var = _Var()
strided = _Strided()
fixed = _Fixed()
cfixed = _CFixed()<|fim▁end|> | >>> ndt.fixed[5] * ndt.strided * ndt.float64
ndt.type('5 * strided * float64')
""" |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.