prompt
large_stringlengths 70
991k
| completion
large_stringlengths 0
1.02k
|
---|---|
<|file_name|>boxplot.py<|end_file_name|><|fim▁begin|>import numpy as np
import pandas as pd
from bokeh.plotting import *
# Generate some synthetic time series for six different categories
cats = list("abcdef")
y = np.random.randn(2000)
g = np.random.choice(cats, 2000)
for i, l in enumerate(cats):
y[g == l] += i // 2
df = pd.DataFrame(dict(score=y, group=g))
# Find the quartiles, IQR, and outliers for each category
groups = df.groupby('group')<|fim▁hole|>upper = q2 + 1.5*iqr
lower = q2 - 1.5*iqr
def outliers(group):
cat = group.name
return group[(group.score > upper.loc[cat][0]) | (group.score < lower.loc[cat][0])]['score']
out = groups.apply(outliers).dropna()
# Prepare outlier data for plotting, we need and x (categorical) and y (numeric)
# coordinate for every outlier.
outx = []
outy = []
for cat in cats:
for value in out[cat]:
outx.append(cat)
outy.append(value)
# EXERCISE: output static HTML file
# EXERCISE: turn on plot hold
# Draw the upper segment extending from the box plot using `segment` which
# takes x0, x1 and y0, y1 as data
segment(cats, upper.score, cats, q3.score, x_range=cats, line_width=2,
tools="", background_fill="#EFE8E2", line_color="black", title="")
# EXERCISE: draw the lower segment
# Draw the upper box of the box plot using `rect`
rect(cats, (q3.score+q2.score)/2, 0.7, q3.score-q2.score,
fill_color="#E08E79", line_width=2, line_color="black")
# EXERCISE: use `rect` to draw the bottom box with a different color
# OK here we use `rect` to draw the whiskers. It's slightly cheating, but it's
# easier than using segments or lines, since we can specify widths simply with
# categorical percentage units
rect(cats, lower.score, 0.2, 0, line_color="black")
rect(cats, upper.score, 0.2, 0, line_color="black")
# EXERCISE: use `circle` to draw the outliers
# EXERCISE: use grid(), axis(), etc. to style the plot. Some suggestions:
# - remove the X grid lines, change the Y grid line color
# - make the tick labels bigger
xgrid().grid_line_color = None
ygrid().grid_line_color = "white"
ygrid().grid_line_width = 2
xaxis().major_label_text_font_size="12pt"
show()<|fim▁end|> | q1 = groups.quantile(q=0.25)
q2 = groups.quantile(q=0.5)
q3 = groups.quantile(q=0.75)
iqr = q3 - q1 |
<|file_name|>gothamadsBidAdapter.js<|end_file_name|><|fim▁begin|>import { registerBidder } from '../src/adapters/bidderFactory.js';
import { BANNER, NATIVE, VIDEO } from '../src/mediaTypes.js';
import * as utils from '../src/utils.js';
import { config } from '../src/config.js';
const BIDDER_CODE = 'gothamads';
const ACCOUNTID_MACROS = '[account_id]';
const URL_ENDPOINT = `https://us-e-node1.gothamads.com/bid?pass=${ACCOUNTID_MACROS}&integration=prebidjs`;
const NATIVE_ASSET_IDS = {
0: 'title',
2: 'icon',
3: 'image',
5: 'sponsoredBy',
4: 'body',
1: 'cta'
};
const NATIVE_PARAMS = {
title: {
id: 0,
name: 'title'
},
icon: {
id: 2,
type: 1,
name: 'img'
},
image: {
id: 3,
type: 3,
name: 'img'
},
sponsoredBy: {
id: 5,
name: 'data',
type: 1
},
body: {
id: 4,
name: 'data',
type: 2
},
cta: {
id: 1,
type: 12,
name: 'data'
}
};
const NATIVE_VERSION = '1.2';
export const spec = {
code: BIDDER_CODE,
supportedMediaTypes: [BANNER, VIDEO, NATIVE],
/**
* Determines whether or not the given bid request is valid.
*
* @param {object} bid The bid to validate.
* @return boolean True if this is a valid bid, and false otherwise.
*/
isBidRequestValid: (bid) => {
return Boolean(bid.params.accountId) && Boolean(bid.params.placementId)
},
/**
* Make a server request from the list of BidRequests.
*
* @param {BidRequest[]} validBidRequests A non-empty list of valid bid requests that should be sent to the Server.
* @return ServerRequest Info describing the request to the server.
*/
buildRequests: (validBidRequests, bidderRequest) => {
if (validBidRequests && validBidRequests.length === 0) return []
let accuontId = validBidRequests[0].params.accountId;
const endpointURL = URL_ENDPOINT.replace(ACCOUNTID_MACROS, accuontId);
let winTop = window;
let location;
try {
location = new URL(bidderRequest.refererInfo.referer)
winTop = window.top;
} catch (e) {
location = winTop.location;
utils.logMessage(e);
};
let bids = [];
for (let bidRequest of validBidRequests) {
let impObject = prepareImpObject(bidRequest);
let data = {
id: bidRequest.bidId,
test: config.getConfig('debug') ? 1 : 0,
cur: ['USD'],
device: {
w: winTop.screen.width,
h: winTop.screen.height,
language: (navigator && navigator.language) ? navigator.language.indexOf('-') != -1 ? navigator.language.split('-')[0] : navigator.language : '',
},
site: {
page: location.pathname,
host: location.host
},
source: {
tid: bidRequest.transactionId
},
regs: {
coppa: config.getConfig('coppa') === true ? 1 : 0,
ext: {}
},
tmax: bidRequest.timeout,
imp: [impObject],
};
if (bidRequest.gdprConsent && bidRequest.gdprConsent.gdprApplies) {
utils.deepSetValue(data, 'regs.ext.gdpr', bidRequest.gdprConsent.gdprApplies ? 1 : 0);
utils.deepSetValue(data, 'user.ext.consent', bidRequest.gdprConsent.consentString);
}
if (bidRequest.uspConsent !== undefined) {<|fim▁hole|> bids.push(data)
}
return {
method: 'POST',
url: endpointURL,
data: bids
};
},
/**
* Unpack the response from the server into a list of bids.
*
* @param {*} serverResponse A successful response from the server.
* @return {Bid[]} An array of bids which were nested inside the server.
*/
interpretResponse: (serverResponse) => {
if (!serverResponse || !serverResponse.body) return []
let GothamAdsResponse = serverResponse.body;
let bids = [];
for (let response of GothamAdsResponse) {
let mediaType = response.seatbid[0].bid[0].ext && response.seatbid[0].bid[0].ext.mediaType ? response.seatbid[0].bid[0].ext.mediaType : BANNER;
let bid = {
requestId: response.id,
cpm: response.seatbid[0].bid[0].price,
width: response.seatbid[0].bid[0].w,
height: response.seatbid[0].bid[0].h,
ttl: response.ttl || 1200,
currency: response.cur || 'USD',
netRevenue: true,
creativeId: response.seatbid[0].bid[0].crid,
dealId: response.seatbid[0].bid[0].dealid,
mediaType: mediaType
};
bid.meta = {};
if (response.seatbid[0].bid[0].adomain && response.seatbid[0].bid[0].adomain.length > 0) {
bid.meta.advertiserDomains = response.seatbid[0].bid[0].adomain;
}
switch (mediaType) {
case VIDEO:
bid.vastXml = response.seatbid[0].bid[0].adm;
bid.vastUrl = response.seatbid[0].bid[0].ext.vastUrl;
break;
case NATIVE:
bid.native = parseNative(response.seatbid[0].bid[0].adm);
break;
default:
bid.ad = response.seatbid[0].bid[0].adm;
}
bids.push(bid);
}
return bids;
},
};
/**
* Determine type of request
*
* @param bidRequest
* @param type
* @returns {boolean}
*/
const checkRequestType = (bidRequest, type) => {
return (typeof utils.deepAccess(bidRequest, `mediaTypes.${type}`) !== 'undefined');
}
const parseNative = admObject => {
const {
assets,
link,
imptrackers,
jstracker
} = admObject.native;
const result = {
clickUrl: link.url,
clickTrackers: link.clicktrackers || undefined,
impressionTrackers: imptrackers || undefined,
javascriptTrackers: jstracker ? [jstracker] : undefined
};
assets.forEach(asset => {
const kind = NATIVE_ASSET_IDS[asset.id];
const content = kind && asset[NATIVE_PARAMS[kind].name];
if (content) {
result[kind] = content.text || content.value || {
url: content.url,
width: content.w,
height: content.h
};
}
});
return result;
}
const prepareImpObject = (bidRequest) => {
let impObject = {
id: bidRequest.transactionId,
secure: 1,
ext: {
placementId: bidRequest.params.placementId
}
};
if (checkRequestType(bidRequest, BANNER)) {
impObject.banner = addBannerParameters(bidRequest);
}
if (checkRequestType(bidRequest, VIDEO)) {
impObject.video = addVideoParameters(bidRequest);
}
if (checkRequestType(bidRequest, NATIVE)) {
impObject.native = {
ver: NATIVE_VERSION,
request: addNativeParameters(bidRequest)
};
}
return impObject
};
const addNativeParameters = bidRequest => {
let impObject = {
id: bidRequest.transactionId,
ver: NATIVE_VERSION,
};
const assets = utils._map(bidRequest.mediaTypes.native, (bidParams, key) => {
const props = NATIVE_PARAMS[key];
const asset = {
required: bidParams.required & 1,
};
if (props) {
asset.id = props.id;
let wmin, hmin;
let aRatios = bidParams.aspect_ratios;
if (aRatios && aRatios[0]) {
aRatios = aRatios[0];
wmin = aRatios.min_width || 0;
hmin = aRatios.ratio_height * wmin / aRatios.ratio_width | 0;
}
if (bidParams.sizes) {
const sizes = flatten(bidParams.sizes);
wmin = sizes[0];
hmin = sizes[1];
}
asset[props.name] = {}
if (bidParams.len) asset[props.name]['len'] = bidParams.len;
if (props.type) asset[props.name]['type'] = props.type;
if (wmin) asset[props.name]['wmin'] = wmin;
if (hmin) asset[props.name]['hmin'] = hmin;
return asset;
}
}).filter(Boolean);
impObject.assets = assets;
return impObject
}
const addBannerParameters = (bidRequest) => {
let bannerObject = {};
const size = parseSizes(bidRequest, 'banner');
bannerObject.w = size[0];
bannerObject.h = size[1];
return bannerObject;
};
const parseSizes = (bid, mediaType) => {
let mediaTypes = bid.mediaTypes;
if (mediaType === 'video') {
let size = [];
if (mediaTypes.video && mediaTypes.video.w && mediaTypes.video.h) {
size = [
mediaTypes.video.w,
mediaTypes.video.h
];
} else if (Array.isArray(utils.deepAccess(bid, 'mediaTypes.video.playerSize')) && bid.mediaTypes.video.playerSize.length === 1) {
size = bid.mediaTypes.video.playerSize[0];
} else if (Array.isArray(bid.sizes) && bid.sizes.length > 0 && Array.isArray(bid.sizes[0]) && bid.sizes[0].length > 1) {
size = bid.sizes[0];
}
return size;
}
let sizes = [];
if (Array.isArray(mediaTypes.banner.sizes)) {
sizes = mediaTypes.banner.sizes[0];
} else if (Array.isArray(bid.sizes) && bid.sizes.length > 0) {
sizes = bid.sizes
} else {
utils.logWarn('no sizes are setup or found');
}
return sizes
}
const addVideoParameters = (bidRequest) => {
let videoObj = {};
let supportParamsList = ['mimes', 'minduration', 'maxduration', 'protocols', 'startdelay', 'placement', 'skip', 'skipafter', 'minbitrate', 'maxbitrate', 'delivery', 'playbackmethod', 'api', 'linearity']
for (let param of supportParamsList) {
if (bidRequest.mediaTypes.video[param] !== undefined) {
videoObj[param] = bidRequest.mediaTypes.video[param];
}
}
const size = parseSizes(bidRequest, 'video');
videoObj.w = size[0];
videoObj.h = size[1];
return videoObj;
}
const flatten = arr => {
return [].concat(...arr);
}
registerBidder(spec);<|fim▁end|> | utils.deepSetValue(data, 'regs.ext.us_privacy', bidRequest.uspConsent);
}
|
<|file_name|>word2pinyin.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# PEP 8 check with Pylint
"""Word to pinyin.
"""
from numpy import mat, zeros, where
from pypinyin import pinyin, lazy_pinyin
# from .mytools import time_me
def sum_cosine(matrix, threshold):
"""Calculate the parameters of the semantic Jaccard model based on the
Cosine similarity matrix of semantic word segmentation.
根据语义分词Cosine相似性矩阵计算语义 jaccard 模型的各个参数。
Args:
matrix: Semantic Cosine similarity matrix. 语义分词Cosine相似性矩阵。
threshold: Threshold for semantic matching. 达到语义匹配标准的阈值。
Returns:
total: The semantic intersection of two sentence language fragments.
两个句子语言片段组成集合的语义交集。
num_not_match: The total number of fragments or the maximum value of two sets
that do not meet the semantic matching criteria controlled by the threshold.
两个集合中没有达到语义匹配标准(由阈值threshold控制)的总片段个数或者两者中取最大值。
total_dif: The degree of semantic difference between two sets.
两个集合的语义差异程度。
"""
total = 0
count = 0
row = matrix.shape[0]
col = matrix.shape[1]
<|fim▁hole|> total += max_score
count += 1
pos = where(matrix == max_score)
i = pos[0][0]
j = pos[1][0]
matrix[i, :] = zero_row
matrix[:, j] = zero_col
max_score = matrix.max()
num = (row - count) if row > col else (col - count)
return dict(total=total, num_not_match=num, total_dif=max_score)
def match_pinyin(pinyin1, pinyin2):
"""Similarity score between two pinyin.
计算两个拼音的相似度得分。
"""
assert pinyin1 != "", "pinyin1 can not be empty"
assert pinyin2 != "", "pinyin2 can not be empty"
pv_match = 0
if len(pinyin1) < len(pinyin2):
len_short = len(pinyin1)
len_long = len(pinyin2)
pv_long = pinyin2
pv_short = pinyin1
else:
len_short = len(pinyin2)
len_long = len(pinyin1)
pv_long = pinyin1
pv_short = pinyin2
for i in range(0, len_short):
if pv_short[i] == pv_long[i]:
pv_match += 1
score = pv_match/len_long
return score
def jaccard_pinyin(pv1, pv2, threshold=0.7):
"""Similarity score between two pinyin vectors with jaccard.
计算两个拼音向量的语义 jaccard 相似度得分。
According to the semantic jaccard model to calculate the similarity.
The similarity score interval for each two pinyin sentences was [0, 1].
根据语义jaccard模型来计算相似度。每两个拼音向量的相似度得分区间为为[0, 1]。
"""
sv_matrix = []
sv_rows = []
for pinyin1 in pv1:
for pinyin2 in pv2:
score = match_pinyin(pinyin1, pinyin2)
sv_rows.append(score)
sv_matrix.append(sv_rows)
sv_rows = []
matrix = mat(sv_matrix)
result = sum_cosine(matrix, threshold)
total = result["total"]
total_dif = result["total_dif"]
num = result["num_not_match"]
sim = total/(total + num*(1-total_dif))
return sim
def pinyin_cut(sentence, pattern=None):
"""Cut the sentence into phonetic vectors.
将句子切分为拼音向量。
"""
return lazy_pinyin(sentence)
# @time_me()
def similarity_pinyin(sentence1, sentence2):
"""Similarity score between two based on pinyin vectors with jaccard.
基于拼音向量的语义 jaccard 句子相似度得分。
"""
pv1 = pinyin_cut(sentence1)
pv2 = pinyin_cut(sentence2)
return jaccard_pinyin(pv1, pv2)
if __name__ == '__main__':
print(similarity_pinyin("我想办理粤通卡", "办理悦通卡"))<|fim▁end|> | zero_row = zeros([1, col])
zero_col = zeros([row, 1])
max_score = matrix.max()
while max_score > threshold:
|
<|file_name|>qp_decorators.py<|end_file_name|><|fim▁begin|>from decorator import decorator
from inspect import getargspec
# ------------------------------------------------------------------------
# decorators
# ------------------------------------------------------------------------
def lazy_property(func):
"""Decorator that makes a property lazy-evaluated.
"""
attr_name = '_lazy_' + func.__name__
@property
def _lazy_property(self):
if not hasattr(self, attr_name):
setattr(self, attr_name, func(self))
return getattr(self, attr_name)
return _lazy_property
def verify(variables=None, categorical=None, text_keys=None, axis=None, is_str=None):
"""
Decorator to verify arguments.
"""
@decorator
def _var_in_ds(func, *args, **kwargs):
all_args = getargspec(func)[0]
ds = args[0]
for variable, collection in variables.items():
nested = False
if collection.endswith('_nested'):
nested = True
collection = collection.split('_')[0]
# get collection for argument
if collection == 'both':
collection = ['columns', 'masks']
else:
collection = [collection]
c = [key for col in collection for key in ds._meta[col].keys()]
# get the variable argument to check
v_index = all_args.index(variable)
var = kwargs.get(variable, args[v_index])
if var is None:
return func(*args, **kwargs)
if not isinstance(var, list):
var = [var]
if nested:
valid = []
for v in var:
if ' > ' in v:
valid.extend(v.replace(' ', '').split('>'))
else:
valid.append(v)
else:
valid = var
# check the variable
not_valid = [v for v in valid if not v in c + ['@']]
if not_valid:
msg = "'{}' argument for {}() must be in {}.\n"
msg += '{} is not in {}.'
msg = msg.format(variable, func.func_name, collection,
not_valid, collection)
raise KeyError(msg)
return func(*args, **kwargs)
@decorator
def _var_is_cat(func, *args, **kwargs):
all_args = getargspec(func)[0]
ds = args[0]
for cat in categorical:
# get the variable argument to check if it is categorical
v_index = all_args.index(cat)
var = kwargs.get(cat, args[v_index])
if var is None: return func(*args, **kwargs)
if not isinstance(var, list): var = [var]
valid = []
for v in var:
if ' > ' in v:
valid.extend(v.replace(' ', '').split('>'))
elif not '@' == v:
valid.append(v)
# check if varaibles are categorical
not_cat = [v for v in valid if not ds._has_categorical_data(v)]
if not_cat:
msg = "'{}' argument for {}() must reference categorical "
msg += 'variable.\n {} is not categorical.'
msg = msg.format(cat, func.func_name, not_cat)
raise ValueError(msg)
return func(*args, **kwargs)
@decorator
def _verify_text_key(func, *args, **kwargs):
all_args = getargspec(func)[0]
ds = args[0]
for text_key in text_keys:
# get the text_key argument to check
tk_index = all_args.index(text_key)
tks = kwargs.get(text_key, args[tk_index])
if tks is None: return func(*args, **kwargs)
if not isinstance(tks, list): tks = [tks]
# ckeck the text_key
valid_tks = ds.valid_tks
not_supported = [tk for tk in tks if not tk in valid_tks]
if not_supported:
msg = "{} is not a valid text_key! Supported are: \n {}"
raise ValueError(msg.format(not_supported, valid_tks))
return func(*args, **kwargs)
@decorator
def _verify_axis(func, *args, **kwargs):
# get the axis argument to check
all_args = getargspec(func)[0]
ax_index = all_args.index(axis)
a_edit = kwargs.get(axis, args[ax_index])
if a_edit is None: return func(*args, **kwargs)
if not isinstance(a_edit, list): a_edit = [a_edit]
# ckeck the axis
valid_ax = ['x', 'y']
not_supported = [ax for ax in a_edit if not ax in valid_ax]
if not_supported:
msg = "{} is not a valid axis! Supported are: {}"
raise ValueError(msg.format(not_supported, valid_ax))
return func(*args, **kwargs)
@decorator
def _is_str(func, *args, **kwargs):
all_args = getargspec(func)[0]
for val in is_str:
# get the arguments to modify<|fim▁hole|> v = kwargs.get(val, args[val_index])
if not isinstance(v, (list, tuple)): v = [v]
if not all(isinstance(text, (str, unicode)) for text in v):
raise ValueError('Included value must be str or list of str.')
return func(*args, **kwargs)
@decorator
def _deco(func, *args, **kwargs):
p = [variables, categorical, text_keys, axis, is_str]
d = [_var_in_ds, _var_is_cat, _verify_text_key, _verify_axis, _is_str]
for arg, dec in reversed(zip(p, d)):
if arg is None: continue
func = dec(func)
return func(*args, **kwargs)
if categorical and not isinstance(categorical, list): categorical = [categorical]
if text_keys and not isinstance(text_keys, list): text_keys = [text_keys]
if is_str and not isinstance(is_str, list): is_str = [is_str]
return _deco
def modify(to_list=None):
"""
Decorator to modify arguments.
"""
@decorator
def _to_list(func, *args, **kwargs):
all_args = getargspec(func)[0]
for val in to_list:
# get the arguments to modify
val_index = all_args.index(val)
v = kwargs.get(val, args[val_index])
if v is None: v = []
if not isinstance(v, list): v = [v]
if kwargs.get(val):
kwargs[val] = v
else:
args = tuple(a if not x == val_index else v
for x, a in enumerate(args))
return func(*args, **kwargs)
if to_list:
if not isinstance(to_list, list): to_list = [to_list]
return _to_list<|fim▁end|> | val_index = all_args.index(val) |
<|file_name|>setAuthorization.js<|end_file_name|><|fim▁begin|>import axios from 'axios'
export const setAuthorization = token => {
if (token) {
axios.defaults.headers.common['Authorization'] = `Bearer ${token}`
} else {
delete axios.defaults.headers.common['Authorization']
}<|fim▁hole|><|fim▁end|> | } |
<|file_name|>credentials.go<|end_file_name|><|fim▁begin|>package trivette
import (
"encoding/json"
"io/ioutil"
)
type Credentials struct {
User, Token string
}
func e(err error) {<|fim▁hole|> }
}
func ReadCredentials(path string) *Credentials {
raw, err := ioutil.ReadFile(path)
var credentials Credentials
err = json.Unmarshal(raw, &credentials)
e(err)
return &credentials
}<|fim▁end|> | if nil != err {
panic(err) |
<|file_name|>ps3_driver.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
#<|fim▁hole|>#
# This file is under the GPLv3 licence.
#
import rospy
from std_msgs.msg import String
from std_msgs.msg import Int32MultiArray
#sudo apt-get install python-pyaudio
import pyaudio
from rospy.numpy_msg import numpy_msg
import numpy as np
import time
import signal
import os
import sys
CHUNK = 3200
FORMAT = pyaudio.paInt16
CHANNELS = 4
RATE = 16000
DEV_IDX = 5
p = pyaudio.PyAudio()
pub_mic_array = rospy.Publisher("/microphone_array_raw", numpy_msg(Int32MultiArray),queue_size=1)
def callback(in_data, frame_count, time_info, status):
global np,pub_mic_array
numpydata = np.fromstring(in_data, dtype=np.int16)
print('sending...')
numpydata_msg = Int32MultiArray()
numpydata_msg.data = numpydata
pub_mic_array.publish(numpydata_msg)
return (in_data, pyaudio.paContinue)
stream = p.open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
frames_per_buffer=CHUNK,
input_device_index=DEV_IDX,
stream_callback=callback)
def signal_handler(signal, frame):
print('---stopping---')
stream.close()
p.terminate()
sys.exit()
signal.signal(signal.SIGINT, signal_handler)
def talker():
rospy.init_node('microphone_array_driver', anonymous=True)
print("---recording---")
stream.start_stream()
while stream.is_active():
time.sleep(0.1)
stream.close()
p.terminate()
if __name__ == '__main__':
try:
talker()
except rospy.ROSInterruptException:
pass<|fim▁end|> | # This file is part of the SSM_LinearArray (Sound Sources Mapping
# using a Linear Microphone Array)
# developed by Daobilige Su <daobilige DOT su AT student DOT uts DOT edu DOT au> |
<|file_name|>app02-functions.js<|end_file_name|><|fim▁begin|>'use strict'
// const foo = (msg) => { // Função anónima... MAS o this tem outro significado
// const foo = function(msg) { // Função anónima
function foo(msg) {
// if(msg == undefined) return console.log('Empty msg')
if(!msg) return console.log('Empty msg')
let str = ''
for(let i = 0; i < arguments.length; i++){
str += arguments[i] + ', '
}
console.log('this = ' + this + '; ' + msg + ' (args = ' + str + ')')
}
foo()<|fim▁hole|>foo.apply(null, [3,4,4,5])
foo.call(null, 'ola', 87364, 22, 'abc', 2, 3)
const d = new foo(89) // !!!!!! NÃO FAZER
const g = foo
g('Calling foo through g')
new g(76) // !!!!!! NÃO FAZER
const obj = {} // <=> new Object()
obj.xpto = foo
obj.xpto(345) // this = obj
obj.xpto.call(d, 6328) // this = d
/* Cenas malucas !!!!
foo.bar = foo
foo.bar(6666)
*/<|fim▁end|> | foo('ola mundo')
foo(3,4,4,5) |
<|file_name|>animated_properties.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use cssparser::RGBA;
use style::properties::animated_properties::{Animatable, IntermediateRGBA};
fn interpolate_rgba(from: RGBA, to: RGBA, progress: f64) -> RGBA {
let from: IntermediateRGBA = from.into();
let to: IntermediateRGBA = to.into();
from.interpolate(&to, progress).unwrap().into()
}
#[test]
fn test_rgba_color_interepolation_preserves_transparent() {
assert_eq!(interpolate_rgba(RGBA::transparent(),
RGBA::transparent(), 0.5),
RGBA::transparent());
}
#[test]
fn test_rgba_color_interepolation_alpha() {
assert_eq!(interpolate_rgba(RGBA::new(200, 0, 0, 100),
RGBA::new(0, 200, 0, 200), 0.5),
RGBA::new(67, 133, 0, 150));
}
#[test]
fn test_rgba_color_interepolation_out_of_range_1() {
// Some cubic-bezier functions produce values that are out of range [0, 1].
// Unclamped cases.
assert_eq!(interpolate_rgba(RGBA::from_floats(0.3, 0.0, 0.0, 0.4),
RGBA::from_floats(0.0, 1.0, 0.0, 0.6), -0.5),
RGBA::new(154, 0, 0, 77));
}
#[test]
fn test_rgba_color_interepolation_out_of_range_2() {
assert_eq!(interpolate_rgba(RGBA::from_floats(1.0, 0.0, 0.0, 0.6),
RGBA::from_floats(0.0, 0.3, 0.0, 0.4), 1.5),
RGBA::new(0, 154, 0, 77));
}
#[test]
fn test_rgba_color_interepolation_out_of_range_clamped_1() {
assert_eq!(interpolate_rgba(RGBA::from_floats(1.0, 0.0, 0.0, 0.8),
RGBA::from_floats(0.0, 1.0, 0.0, 0.2), -0.5),
RGBA::from_floats(1.0, 0.0, 0.0, 1.0));
}
#[test]
fn test_rgba_color_interepolation_out_of_range_clamped_2() {<|fim▁hole|> RGBA::from_floats(0.0, 1.0, 0.0, 0.2), 1.5),
RGBA::from_floats(0.0, 0.0, 0.0, 0.0));
}<|fim▁end|> | assert_eq!(interpolate_rgba(RGBA::from_floats(1.0, 0.0, 0.0, 0.8), |
<|file_name|>gmainwindow.cpp<|end_file_name|><|fim▁begin|>/***************************************************************************
gmainwindow.cpp
(c) 2004-2006 - Daniel Campos Fernández <[email protected]>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2, or (at your option)
any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
MA 02110-1301, USA.
***************************************************************************/
#include <ctype.h>
#include <time.h>
#include "widgets.h"
#ifdef GDK_WINDOWING_X11
#include <X11/extensions/shape.h>
#endif
#include "x11.h"
#include "sm/sm.h"
#include "gapplication.h"
#include "gdesktop.h"
#include "gkey.h"
#include "gmenu.h"
#include "gmessage.h"
#include "gdialog.h"
#include "gmouse.h"
#include "gmainwindow.h"
static gboolean cb_frame(GtkWidget *widget,GdkEventWindowState *event,gMainWindow *data)
{
data->performArrange();
data->emit(SIGNAL(data->onState));
return false;
}
static gboolean cb_show(GtkWidget *widget, gMainWindow *data)
{
data->emitOpen();
if (data->opened)
{
data->setGeometryHints();
//data->performArrange();
data->emitResize();
data->emit(SIGNAL(data->onShow));
data->_not_spontaneous = false;
}
return false;
}
static gboolean cb_hide(GtkWidget *widget, gMainWindow *data)
{
data->emit(SIGNAL(data->onHide));
data->_not_spontaneous = false;
return false;
//if (data == gDesktop::activeWindow())
// gMainWindow::setActiveWindow(NULL);
}
static gboolean cb_close(GtkWidget *widget,GdkEvent *event,gMainWindow *data)
{
if (!gMainWindow::_current || data == gMainWindow::_current)
data->doClose();
return true;
}
static gboolean cb_configure(GtkWidget *widget, GdkEventConfigure *event, gMainWindow *data)
{
gint x, y;
if (data->opened)
{
if (data->isTopLevel())
{
gtk_window_get_position(GTK_WINDOW(data->border), &x, &y);
}
else
{
x = event->x;
y = event->y;
}
//fprintf(stderr, "cb_configure: %s: (%d %d %d %d) -> (%d %d %d %d) window = %p resized = %d\n", data->name(), data->bufX, data->bufY, data->bufW, data->bufH, x, y, event->width, event->height, event->window, data->_resized);
if (x != data->bufX || y != data->bufY)
{
data->bufX = x;
data->bufY = y;
if (data->onMove) data->onMove(data);
}
if ((event->width != data->bufW) || (event->height != data->bufH) || (data->_resized) || !event->window)
{
data->_resized = false;
data->bufW = event->width;
data->bufH = event->height;
data->emitResize();
}
}
return false;
}
#ifdef GTK3
static gboolean cb_draw(GtkWidget *wid, cairo_t *cr, gMainWindow *data)
{
if (data->isTransparent())
{
if (data->background() == COLOR_DEFAULT)
gt_cairo_set_source_color(cr, 0xFF000000);
else
gt_cairo_set_source_color(cr, data->background());
cairo_set_operator (cr, CAIRO_OPERATOR_SOURCE);
cairo_paint(cr);
}
if (data->_picture)
{
cairo_pattern_t *pattern;
pattern = cairo_pattern_create_for_surface(data->_picture->getSurface());
cairo_pattern_set_extend(pattern, CAIRO_EXTEND_REPEAT);
cairo_set_source(cr, pattern);
cairo_paint(cr);
cairo_pattern_destroy(pattern);
}
return false;
}
#else
static gboolean cb_expose(GtkWidget *wid, GdkEventExpose *e, gMainWindow *data)
{
bool draw_bg = data->isTransparent();
bool draw_pic = data->_picture;
if (!draw_bg && !draw_pic)
return false;
cairo_t *cr = gdk_cairo_create(gtk_widget_get_window(wid));
if (draw_bg)
{
if (data->background() == COLOR_DEFAULT)
gt_cairo_set_source_color(cr, 0xFF000000);
else
gt_cairo_set_source_color(cr, data->background());
cairo_set_operator (cr, CAIRO_OPERATOR_SOURCE);
cairo_paint(cr);
}
if (draw_pic)
{
cairo_pattern_t *pattern;
gdk_cairo_region(cr, e->region);
cairo_clip(cr);
pattern = cairo_pattern_create_for_surface(data->_picture->getSurface());
cairo_pattern_set_extend(pattern, CAIRO_EXTEND_REPEAT);
cairo_set_source(cr, pattern);
cairo_paint(cr);
cairo_pattern_destroy(pattern);
}
cairo_destroy(cr);
return false;
}
#endif
GList *gMainWindow::windows = NULL;
gMainWindow *gMainWindow::_active = NULL;
gMainWindow *gMainWindow::_current = NULL;
void gMainWindow::initialize()
{
//fprintf(stderr, "new window: %p in %p\n", this, parent());
opened = false;
sticky = false;
persistent = false;
stack = 0;
_type = 0;
_mask = false;
_masked = false;
_resized = false;
accel = NULL;
_default = NULL;
_cancel = NULL;
menuBar = NULL;
layout = NULL;
top_only = false;
_icon = NULL;
_picture = NULL;
focus = 0;
_closing = false;
_title = NULL;
_not_spontaneous = false;
_skip_taskbar = false;
_current = NULL;
_style = NULL;
_xembed = false;
_activate = false;
_hidden = false;
_hideMenuBar = false;
_showMenuBar = true;
_popup = false;
_maximized = _minimized = _fullscreen = false;
_resize_last_w = _resize_last_h = -1;
_min_w = _min_h = 0;
_transparent = false;
_utility = false;
onOpen = NULL;
onShow = NULL;
onHide = NULL;
onMove = NULL;
onResize = NULL;
onActivate = NULL;
onDeactivate = NULL;
onState = NULL;
accel = gtk_accel_group_new();
}
void gMainWindow::initWindow()
{
//resize(200,150);
if (!isTopLevel())
{
g_signal_connect(G_OBJECT(border), "configure-event", G_CALLBACK(cb_configure), (gpointer)this);
g_signal_connect_after(G_OBJECT(border), "map", G_CALLBACK(cb_show), (gpointer)this);
g_signal_connect(G_OBJECT(border),"unmap",G_CALLBACK(cb_hide),(gpointer)this);
//g_signal_connect_after(G_OBJECT(border), "size-allocate", G_CALLBACK(cb_configure), (gpointer)this);
ON_DRAW_BEFORE(widget, this, cb_expose, cb_draw);
gtk_widget_add_events(border, GDK_STRUCTURE_MASK);
}
else
{
//g_signal_connect(G_OBJECT(border),"size-request",G_CALLBACK(cb_realize),(gpointer)this);
g_signal_connect(G_OBJECT(border), "show",G_CALLBACK(cb_show),(gpointer)this);
g_signal_connect(G_OBJECT(border), "hide",G_CALLBACK(cb_hide),(gpointer)this);
g_signal_connect(G_OBJECT(border), "configure-event",G_CALLBACK(cb_configure),(gpointer)this);
g_signal_connect(G_OBJECT(border), "delete-event",G_CALLBACK(cb_close),(gpointer)this);
g_signal_connect(G_OBJECT(border), "window-state-event",G_CALLBACK(cb_frame),(gpointer)this);
gtk_widget_add_events(widget,GDK_BUTTON_MOTION_MASK);
ON_DRAW_BEFORE(border, this, cb_expose, cb_draw);
}
gtk_window_add_accel_group(GTK_WINDOW(topLevel()->border), accel);
have_cursor = true; //parent() == 0 && !_xembed;
}
#if 0 //def GTK3
static void (*old_fixed_get_preferred_width)(GtkWidget *, gint *, gint *);
static void (*old_fixed_get_preferred_height)(GtkWidget *, gint *, gint *);
static void gtk_fixed_get_preferred_width(GtkWidget *widget, gint *minimum_size, gint *natural_size)
{
(*old_fixed_get_preferred_width)(widget, minimum_size, natural_size);
*minimum_size = 0;
}
static void gtk_fixed_get_preferred_height(GtkWidget *widget, gint *minimum_size, gint *natural_size)
{
(*old_fixed_get_preferred_height)(widget, minimum_size, natural_size);
*minimum_size = 0;
}
#endif
gMainWindow::gMainWindow(int plug) : gContainer(NULL)
{
initialize();
g_typ = Type_gMainWindow;
windows = g_list_append(windows, (gpointer)this);
_xembed = plug != 0;
if (_xembed)
border = gtk_plug_new(plug);
else
border = gtk_window_new(GTK_WINDOW_TOPLEVEL);
widget = gtk_fixed_new(); //gtk_layout_new(0,0);
#if 0 //def GTK3
static bool patch = FALSE;
if (!patch)
{
GtkWidgetClass *klass;
klass = (GtkWidgetClass *)GTK_FIXED_GET_CLASS(widget);
old_fixed_get_preferred_width = klass->get_preferred_width;
klass->get_preferred_width = gtk_fixed_get_preferred_width;
old_fixed_get_preferred_height = klass->get_preferred_height;
klass->get_preferred_height = gtk_fixed_get_preferred_height;
/*klass = (GtkWidgetClass *)GTK_FIXED_GET_CLASS(border);
old_window_get_preferred_width = klass->get_preferred_width;
klass->get_preferred_width = gtk_window_get_preferred_width;
old_window_get_preferred_height = klass->get_preferred_height;
klass->get_preferred_height = gtk_window_get_preferred_height;*/
patch = true;
}
#endif
realize(false);
initWindow();
gtk_widget_realize(border);
gtk_widget_show(widget);
gtk_widget_set_size_request(border, 1, 1);
setCanFocus(false);
}
gMainWindow::gMainWindow(gContainer *par) : gContainer(par)
{
initialize();
g_typ = Type_gMainWindow;
border = gtk_alignment_new(0,0,1,1); //gtk_fixed_new(); //gtk_event_box_new();
widget = gtk_fixed_new();
realize(false);
initWindow();
setCanFocus(false);
}
gMainWindow::~gMainWindow()
{
//fprintf(stderr, "delete window %p %s\n", this, name());
if (opened)
{
emit(SIGNAL(onClose));
opened = false;
if (GTK_IS_WINDOW(border) && isModal())
gApplication::exitLoop(this);
}
gPicture::assign(&_picture);
gPicture::assign(&_icon);
if (_title) g_free(_title);
g_object_unref(accel);
if (_style) g_object_unref(_style);
if (_active == this)
_active = NULL;
if (gApplication::mainWindow() == this)
gApplication::setMainWindow(NULL);
windows = g_list_remove(windows, (gpointer)this);
}
bool gMainWindow::getSticky()
{
return sticky;
}
int gMainWindow::getStacking()
{
return stack;
}
void gMainWindow::setSticky(bool vl)
{
sticky=vl;
if (!isTopLevel()) return;
if (vl) gtk_window_stick(GTK_WINDOW(border));
else gtk_window_unstick(GTK_WINDOW(border));
}
void gMainWindow::setStacking(int vl)
{
stack=vl;
if (!isTopLevel()) return;
switch (vl)
{
case 0:
gtk_window_set_keep_below(GTK_WINDOW(border),FALSE);
gtk_window_set_keep_above(GTK_WINDOW(border),FALSE);
break;
case 1:
gtk_window_set_keep_below(GTK_WINDOW(border),FALSE);
gtk_window_set_keep_above(GTK_WINDOW(border),TRUE);
break;
case 2:
gtk_window_set_keep_above(GTK_WINDOW(border),FALSE);
gtk_window_set_keep_below(GTK_WINDOW(border),TRUE);
break;
}
}
void gMainWindow::setRealBackground(gColor color)
{
if (!_picture)
{
gControl::setRealBackground(color);
gMenu::updateColor(this);
}
}
void gMainWindow::setRealForeground(gColor color)
{
gControl::setRealForeground(color);
gMenu::updateColor(this);
}
void gMainWindow::move(int x, int y)
{
gint ox, oy;
if (isTopLevel())
{
if (x == bufX && y == bufY)
return;
#ifdef GDK_WINDOWING_X11
gdk_window_get_origin(gtk_widget_get_window(border), &ox, &oy);
ox = x + ox - bufX;
oy = y + oy - bufY;
bufX = x;
bufY = y;
if (bufW > 0 && bufH > 0)
{
if (!X11_send_move_resize_event(GDK_WINDOW_XID(gtk_widget_get_window(border)), ox, oy, width(), height()))
return;
}
#else
bufX = x;
bufY = y;
#endif
gtk_window_move(GTK_WINDOW(border), x, y);
}
else
{
gContainer::move(x,y);
}
}
void gMainWindow::resize(int w, int h)
{
if (w == bufW && h == bufH)
return;
_resized = true;
if (isTopLevel())
{
//fprintf(stderr, "resize: %s: %d %d\n", name(), w, h);
//gdk_window_enable_synchronized_configure (border->window);
bufW = w < 0 ? 0 : w;
bufH = h < 0 ? 0 : h;
if (w < 1 || h < 1)
{
if (visible)
gtk_widget_hide(border);
}
else
{
if (isResizable())
gtk_window_resize(GTK_WINDOW(border), w, h);
else
gtk_widget_set_size_request(border, w, h);
if (visible)
gtk_widget_show(border);
}
}
else
{
//fprintf(stderr, "resize %p -> (%d %d) (%d %d)\n", this, bufW, bufH, w, h);
gContainer::resize(w, h);
}
}
void gMainWindow::moveResize(int x, int y, int w, int h)
{
//if (isTopLevel())
// gdk_window_move_resize(border->window, x, y, w, h);
//else
gContainer::moveResize(x, y, w, h);
}
void gMainWindow::emitOpen()
{
//fprintf(stderr, "emit Open: %p (%d %d) %d resizable = %d fullscreen = %d\n", this, width(), height(), opened, isResizable(), fullscreen());
if (!opened)
{
opened = true;
//_no_resize_event = true; // If the event loop is run during emitOpen(), some spurious configure events are received.<|fim▁hole|> _min_w = width();
_min_h = height();
}
gtk_widget_realize(border);
performArrange();
emit(SIGNAL(onOpen));
if (opened)
{
//fprintf(stderr, "emit Move & Resize: %p\n", this);
emit(SIGNAL(onMove));
emitResize();
}
}
//_no_resize_event = false;
}
void gMainWindow::afterShow()
{
if (_activate)
{
gtk_window_present(GTK_WINDOW(border));
_activate = false;
}
}
void gMainWindow::setVisible(bool vl)
{
if (!vl)
_hidden = true;
if (vl == isVisible())
return;
if (vl)
{
bool arr = !isVisible();
emitOpen();
if (!opened)
return;
_not_spontaneous = !visible;
visible = true;
_hidden = false;
setTransparent(_transparent);
if (isTopLevel())
{
if (!_title || !*_title)
gtk_window_set_title(GTK_WINDOW(border), gApplication::defaultTitle());
/*if (!_xembed)
{
fprintf(stderr, "gtk_window_group_add_window: %p -> %p\n", border, gApplication::currentGroup());
gtk_window_group_add_window(gApplication::currentGroup(), GTK_WINDOW(border));
fprintf(stderr, "-> %p\n", gtk_window_get_group(GTK_WINDOW(border)));
}*/
// Thanks for Ubuntu's GTK+ patching :-(
#if GTK_CHECK_VERSION(3,0,0)
gtk_window_set_has_resize_grip(GTK_WINDOW(border), false);
#else
if (g_object_class_find_property(G_OBJECT_GET_CLASS(border), "has-resize-grip"))
g_object_set(G_OBJECT(border), "has-resize-grip", false, (char *)NULL);
#endif
gtk_window_move(GTK_WINDOW(border), bufX, bufY);
if (isPopup())
{
gtk_widget_show_now(border);
gtk_widget_grab_focus(border);
}
else
gtk_window_present(GTK_WINDOW(border));
if (isUtility())
{
gMainWindow *parent = _current;
if (!parent && gApplication::mainWindow() && gApplication::mainWindow() != this)
parent = gApplication::mainWindow();
if (parent)
gtk_window_set_transient_for(GTK_WINDOW(border), GTK_WINDOW(parent->border));
}
if (gApplication::mainWindow() == this)
{
int desktop = session_manager_get_desktop();
if (desktop >= 0)
{
//fprintf(stderr, "X11_window_set_desktop: %d (%d)\n", desktop, true);
X11_window_set_desktop((Window)handle(), true, desktop);
session_manager_set_desktop(-1);
}
}
}
else
{
gtk_widget_show(border);
parent()->performArrange();
}
drawMask();
if (focus)
{
//fprintf(stderr, "focus = %s\n", focus->name());
focus->setFocus();
focus = 0;
}
if (skipTaskBar())
_activate = true;
if (arr)
performArrange();
}
else
{
if (this == _active)
focus = gApplication::activeControl();
_not_spontaneous = visible;
gContainer::setVisible(false);
if (_popup)
gApplication::exitLoop(this);
if (gApplication::_button_grab && !gApplication::_button_grab->isReallyVisible())
gApplication::setButtonGrab(NULL);
}
}
void gMainWindow::setMinimized(bool vl)
{
if (!isTopLevel()) return;
_minimized = vl;
if (vl) gtk_window_iconify(GTK_WINDOW(border));
else gtk_window_deiconify(GTK_WINDOW(border));
}
void gMainWindow::setMaximized(bool vl)
{
if (!isTopLevel())
return;
_maximized = vl;
if (vl)
gtk_window_maximize(GTK_WINDOW(border));
else
gtk_window_unmaximize(GTK_WINDOW(border));
}
void gMainWindow::setFullscreen(bool vl)
{
if (!isTopLevel())
return;
_fullscreen = vl;
if (vl)
{
gtk_window_fullscreen(GTK_WINDOW(border));
if (isVisible())
gtk_window_present(GTK_WINDOW(border));
}
else
gtk_window_unfullscreen(GTK_WINDOW(border));
}
void gMainWindow::center()
{
GdkRectangle rect;
int x, y;
if (!isTopLevel()) return;
gDesktop::availableGeometry(screen(), &rect);
x = rect.x + (rect.width - width()) / 2;
y = rect.y + (rect.height - height()) / 2;
move(x, y);
}
bool gMainWindow::isModal() const
{
if (!isTopLevel()) return false;
return gtk_window_get_modal(GTK_WINDOW(border));
}
void gMainWindow::showModal()
{
gMainWindow *save;
if (!isTopLevel()) return;
if (isModal()) return;
//show();
gtk_window_set_modal(GTK_WINDOW(border), true);
center();
//show();
gtk_grab_add(border);
if (_active)
gtk_window_set_transient_for(GTK_WINDOW(border), GTK_WINDOW(_active->topLevel()->border));
save = _current;
_current = this;
gApplication::enterLoop(this, true);
_current = save;
gtk_grab_remove(border);
gtk_window_set_modal(GTK_WINDOW(border), false);
if (!persistent)
destroyNow();
else
hide();
}
void gMainWindow::showPopup(int x, int y)
{
gMainWindow *save;
bool has_border;
int oldx, oldy;
//int type;
if (!isTopLevel()) return;
if (isModal()) return;
//gtk_widget_unrealize(border);
//((GtkWindow *)border)->type = GTK_WINDOW_POPUP;
//gtk_widget_realize(border);
oldx = left();
oldy = top();
has_border = gtk_window_get_decorated(GTK_WINDOW(border));
//type = getType();
//setType(_NET_WM_WINDOW_TYPE_COMBO);
gtk_window_set_decorated(GTK_WINDOW(border), false);
//gtk_window_set_type_hint(GTK_WINDOW(border), GDK_WINDOW_TYPE_HINT_POPUP_MENU);
move(x, y);
gtk_window_resize(GTK_WINDOW(border), bufW, bufH);
//reparent(NULL, x, y, GTK_WINDOW_POPUP);
_popup = true;
save = _current;
_current = this;
gApplication::enterPopup(this);
_current = save;
_popup = false;
if (!persistent)
{
destroyNow();
}
else
{
hide();
//gdk_window_set_override_redirect(gtk_widget_get_window(GTK_WINDOW(border)), false);
gtk_window_set_decorated(GTK_WINDOW(border), has_border);
//setType(type);
//gtk_window_set_type_hint(GTK_WINDOW(border), type);
move(oldx, oldy);
}
}
void gMainWindow::showActivate()
{
bool v = isTopLevel() && isVisible();
show();
if (v)
gtk_window_present(GTK_WINDOW(border));
}
void gMainWindow::showPopup()
{
int x, y;
gMouse::getScreenPos(&x, &y);
showPopup(x, y);
}
void gMainWindow::raise()
{
if (!isTopLevel()) { gControl::raise(); return; }
gtk_window_present(GTK_WINDOW(border));
}
const char* gMainWindow::text()
{
return _title;
}
bool gMainWindow::skipTaskBar()
{
if (!isTopLevel())
return false;
else
return _skip_taskbar;
}
void gMainWindow::setText(const char *txt)
{
if (_title) g_free(_title);
_title = g_strdup(txt);
if (isTopLevel())
gtk_window_set_title(GTK_WINDOW(border), txt);
}
bool gMainWindow::hasBorder()
{
if (isTopLevel())
return gtk_window_get_decorated(GTK_WINDOW(border));
else
return false;
}
bool gMainWindow::isResizable()
{
if (isTopLevel())
return gtk_window_get_resizable(GTK_WINDOW(border));
else
return false;
}
void gMainWindow::setBorder(bool b)
{
if (!isTopLevel())
return;
gtk_window_set_decorated(GTK_WINDOW(border), b);
/*#ifdef GDK_WINDOWING_X11
XSetWindowAttributes attr;
attr.override_redirect = !b;
XChangeWindowAttributes(GDK_WINDOW_XDISPLAY(border->window), GDK_WINDOW_XID(border->window), CWOverrideRedirect, &attr);
#endif*/
}
void gMainWindow::setResizable(bool b)
{
if (!isTopLevel())
return;
if (b == isResizable())
return;
gtk_window_set_resizable(GTK_WINDOW(border), b);
if (b)
gtk_widget_set_size_request(border, 1, 1);
else
gtk_widget_set_size_request(border, bufW, bufH);
}
void gMainWindow::setSkipTaskBar(bool b)
{
_skip_taskbar = b;
if (!isTopLevel()) return;
gtk_window_set_skip_taskbar_hint (GTK_WINDOW(border), b);
}
/*gPicture* gMainWindow::icon()
{
GdkPixbuf *buf;
gPicture *pic;
if (!isTopLevel()) return NULL;
buf=gtk_window_get_icon(GTK_WINDOW(border));
if (!buf) return NULL;
pic=gPicture::fromPixbuf(buf);
return pic;
}*/
void gMainWindow::setIcon(gPicture *pic)
{
gPicture::assign(&_icon, pic);
if (!isTopLevel()) return;
gtk_window_set_icon(GTK_WINDOW(border), pic ? pic->getPixbuf() : NULL);
}
bool gMainWindow::topOnly()
{
if (!isTopLevel()) return false;
return top_only;
}
void gMainWindow::setTopOnly(bool vl)
{
if (!isTopLevel()) return;
gtk_window_set_keep_above (GTK_WINDOW(border),vl);
top_only=vl;
}
void gMainWindow::setMask(bool vl)
{
if (_mask == vl)
return;
_mask = vl;
drawMask();
}
void gMainWindow::setPicture(gPicture *pic)
{
gPicture::assign(&_picture, pic);
drawMask();
}
void gMainWindow::remap()
{
if (!isVisible())
return;
gtk_widget_unmap(border);
gtk_widget_map(border);
if (_skip_taskbar) { setSkipTaskBar(false); setSkipTaskBar(true); }
if (top_only) { setTopOnly(false); setTopOnly(true); }
if (sticky) { setSticky(false); setSticky(true); }
if (stack) { setStacking(0); setStacking(stack); }
X11_set_window_type(handle(), _type);
}
void gMainWindow::drawMask()
{
bool do_remap = false;
if (!isVisible())
return;
#ifdef GTK3
cairo_region_t *mask;
if (_mask && _picture)
mask = gdk_cairo_region_create_from_surface(_picture->getSurface());
else
mask = NULL;
gdk_window_shape_combine_region(gtk_widget_get_window(border), mask, 0, 0);
if (mask)
cairo_region_destroy(mask);
refresh();
#else
GdkBitmap *mask = (_mask && _picture) ? _picture->getMask() : NULL;
do_remap = !mask && _masked;
gdk_window_shape_combine_mask(border->window, mask, 0, 0);
#endif
if (_picture)
{
gtk_widget_set_app_paintable(border, TRUE);
gtk_widget_realize(border);
gtk_widget_realize(widget);
for (int i = 0; i < controlCount(); i++)
getControl(i)->refresh();
}
else if (!_transparent)
{
gtk_widget_set_app_paintable(border, FALSE);
setRealBackground(background());
}
_masked = mask != NULL;
if (do_remap)
remap();
else
{
if (!_skip_taskbar)
{
setSkipTaskBar(true);
setSkipTaskBar(false);
}
}
}
int gMainWindow::menuCount()
{
if (!menuBar) return 0;
return gMenu::winChildCount(this);
}
void gMainWindow::setPersistent(bool vl)
{
persistent = vl;
}
bool gMainWindow::doClose()
{
if (_closing)
return false;
if (opened)
{
if (isModal() && !gApplication::hasLoop(this))
return true;
_closing = true;
if (onClose)
{
if (!onClose(this))
opened = false;
}
else
opened = false;
_closing = false;
if (!opened && isModal())
gApplication::exitLoop(this);
}
if (!opened) // && !modal())
{
if (_active == this)
setActiveWindow(NULL);
if (!isModal())
{
if (persistent)
hide();
else
destroy();
}
return false;
}
else
return opened;
}
bool gMainWindow::close()
{
return doClose();
}
static void hide_hidden_children(gContainer *cont)
{
int i;
gControl *child;
for (i = 0;; i++)
{
child = cont->child(i);
if (!child)
break;
if (!child->isVisible())
gtk_widget_hide(child->border);
else if (child->isContainer())
hide_hidden_children((gContainer *)child);
}
}
void gMainWindow::reparent(gContainer *newpr, int x, int y)
{
GtkWidget *new_border;
int w, h;
gColor fg, bg;
if (_xembed)
return;
bg = background();
fg = foreground();
if (isTopLevel() && newpr)
{
gtk_window_remove_accel_group(GTK_WINDOW(topLevel()->border), accel);
new_border = gtk_event_box_new();
gtk_widget_reparent(widget, new_border);
embedMenuBar(new_border);
_no_delete = true;
gtk_widget_destroy(border);
_no_delete = false;
border = new_border;
registerControl();
setCanFocus(false);
setParent(newpr);
connectParent();
borderSignals();
initWindow();
setBackground(bg);
setForeground(fg);
setFont(font());
checkMenuBar();
bufX = bufY = 0;
move(x, y);
gtk_widget_set_size_request(border, width(), height());
// Hidden children are incorrectly shown. Fix that!
hideHiddenChildren();
}
else if ((!isTopLevel() && !newpr)
|| (isTopLevel() && isPopup()))
//|| (isTopLevel() && (isPopup() ^ (type == GTK_WINDOW_POPUP))))
{
gtk_window_remove_accel_group(GTK_WINDOW(topLevel()->border), accel);
// TODO: test that
new_border = gtk_window_new(GTK_WINDOW_TOPLEVEL);
gtk_widget_reparent(widget, new_border);
embedMenuBar(new_border);
_no_delete = true;
gtk_widget_destroy(border);
_no_delete = false;
border = new_border;
registerControl();
setCanFocus(true);
if (parent())
{
parent()->remove(this);
parent()->arrange();
setParent(NULL);
}
initWindow();
borderSignals();
setBackground(bg);
setForeground(fg);
setFont(font());
move(x, y);
w = width();
h = height();
bufW = bufH = -1;
gtk_widget_set_size_request(border, 1, 1);
resize(w, h);
hideHiddenChildren();
_popup = false; //type == GTK_WINDOW_POPUP;
}
else
{
gContainer::reparent(newpr, x, y);
}
}
int gMainWindow::controlCount()
{
GList *list = gControl::controlList();
gControl *ctrl;
int n = 0;
while (list)
{
ctrl = (gControl *)list->data;
if (ctrl->window() == this && !ctrl->isDestroyed())
n++;
list = g_list_next(list);
}
return n;
}
gControl *gMainWindow::getControl(char *name)
{
GList *list = gControl::controlList();
gControl *ctrl;
while (list)
{
ctrl = (gControl *)list->data;
if (ctrl->window() == this && !strcasecmp(ctrl->name(), name) && !ctrl->isDestroyed())
return ctrl;
list = g_list_next(list);
}
return NULL;
}
gControl *gMainWindow::getControl(int index)
{
GList *list = gControl::controlList();
gControl *ctrl;
int i = 0;
while (list)
{
ctrl = (gControl *)list->data;
if (ctrl->window() == this && !ctrl->isDestroyed())
{
if (i == index)
return ctrl;
i++;
}
list = g_list_next(list);
}
return NULL;
}
int gMainWindow::clientX()
{
return 0;
}
int gMainWindow::containerX()
{
return 0;
}
int gMainWindow::clientY()
{
if (isMenuBarVisible())
return menuBarHeight();
else
return 0;
}
int gMainWindow::containerY()
{
return 0;
}
int gMainWindow::clientWidth()
{
return width();
}
int gMainWindow::menuBarHeight()
{
int h = 0;
if (menuBar)
{
//gtk_widget_show(GTK_WIDGET(menuBar));
//fprintf(stderr, "menuBarHeight: gtk_widget_get_visible: %d\n", gtk_widget_get_visible(GTK_WIDGET(menuBar)));
#ifdef GTK3
gtk_widget_get_preferred_height(GTK_WIDGET(menuBar), NULL, &h);
#else
GtkRequisition req = { 0, 0 };
gtk_widget_size_request(GTK_WIDGET(menuBar), &req);
h = req.height;
#endif
//fprintf(stderr, "menuBarHeight: %d\n", h);
}
return h;
}
int gMainWindow::clientHeight()
{
if (isMenuBarVisible())
return height() - menuBarHeight();
else
return height();
}
void gMainWindow::setActiveWindow(gControl *control)
{
gMainWindow *window = control ? control->window() : NULL;
gMainWindow *old = _active;
if (window == _active)
return;
_active = window;
//fprintf(stderr, "setActiveWindow: %p %s\n", _active, _active ? _active->name() : "");
if (old)
old->emit(SIGNAL(old->onDeactivate));
if (window)
window->emit(SIGNAL(window->onActivate));
}
#ifdef GDK_WINDOWING_X11
bool gMainWindow::isUtility() const
{
return _utility;
}
void gMainWindow::setUtility(bool v)
{
bool remap = false;
if (!isTopLevel())
return;
// TODO: works only if the window is not mapped!
_utility = v;
if (gtk_widget_get_mapped(border))
{
remap = true;
gtk_widget_unmap(border);
}
gtk_window_set_type_hint(GTK_WINDOW(border), v ? GDK_WINDOW_TYPE_HINT_UTILITY : GDK_WINDOW_TYPE_HINT_NORMAL);
if (remap)
gtk_widget_map(border);
}
#else
bool gMainWindow::isUtility()
{
return _utility;
}
void gMainWindow::setUtility(bool v)
{
_utility = v;
}
#endif
void gMainWindow::configure()
{
int h;
if (bufW < 1 || bufH < 1)
return;
h = menuBarHeight();
//fprintf(stderr, "configure: %s: %d %d - %d %d\n", name(), isMenuBarVisible(), h, width(), height());
if (isMenuBarVisible())
{
gtk_fixed_move(layout, GTK_WIDGET(menuBar), 0, 0);
if (h > 1)
gtk_widget_set_size_request(GTK_WIDGET(menuBar), width(), h);
gtk_fixed_move(layout, widget, 0, h);
gtk_widget_set_size_request(widget, width(), Max(0, height() - h));
}
else
{
if (layout)
{
if (menuBar)
gtk_fixed_move(layout, GTK_WIDGET(menuBar), 0, -h);
gtk_fixed_move(layout, widget, 0, 0);
}
gtk_widget_set_size_request(widget, width(), height());
}
}
void gMainWindow::setMenuBarVisible(bool v)
{
_showMenuBar = v;
if (!menuBar)
return;
configure();
performArrange();
}
bool gMainWindow::isMenuBarVisible()
{
//fprintf(stderr, "isMenuBarVisible: %d\n", !!(menuBar && !_hideMenuBar && _showMenuBar));
return menuBar && !_hideMenuBar && _showMenuBar; //|| (menuBar && GTK_WIDGET_MAPPED(GTK_WIDGET(menuBar)));
}
void gMainWindow::updateFont()
{
gContainer::updateFont();
gMenu::updateFont(this);
}
void gMainWindow::checkMenuBar()
{
int i;
gMenu *menu;
//fprintf(stderr, "gMainWindow::checkMenuBar\n");
if (menuBar)
{
_hideMenuBar = true;
for (i = 0;; i++)
{
menu = gMenu::winChildMenu(this, i);
if (!menu)
break;
if (menu->isVisible() && !menu->isSeparator())
{
_hideMenuBar = false;
break;
}
}
}
configure();
performArrange();
}
void gMainWindow::embedMenuBar(GtkWidget *border)
{
if (menuBar)
{
// layout is automatically destroyed ?
layout = GTK_FIXED(gtk_fixed_new());
g_object_ref(G_OBJECT(menuBar));
if (gtk_widget_get_parent(GTK_WIDGET(menuBar)))
gtk_container_remove(GTK_CONTAINER(gtk_widget_get_parent(GTK_WIDGET(menuBar))), GTK_WIDGET(menuBar));
gtk_fixed_put(layout, GTK_WIDGET(menuBar), 0, 0);
g_object_unref(G_OBJECT(menuBar));
gtk_widget_reparent(widget, GTK_WIDGET(layout));
gtk_container_add(GTK_CONTAINER(border), GTK_WIDGET(layout));
gtk_widget_show(GTK_WIDGET(menuBar));
gtk_widget_show(GTK_WIDGET(layout));
gtk_widget_show(GTK_WIDGET(widget));
gMenu::updateFont(this);
gMenu::updateColor(this);
checkMenuBar();
}
}
/*bool gMainWindow::getScreenPos(int *x, int *y)
{
return gContainer::getScreenPos(x, y);
}*/
double gMainWindow::opacity()
{
if (isTopLevel())
#if GTK_CHECK_VERSION(3, 8, 0)
return gtk_widget_get_opacity(border);
#else
return gtk_window_get_opacity(GTK_WINDOW(border));
#endif
else
return 1.0;
}
void gMainWindow::setOpacity(double v)
{
if (isTopLevel())
#if GTK_CHECK_VERSION(3, 8, 0)
gtk_widget_set_opacity(border, v);
#else
gtk_window_set_opacity(GTK_WINDOW(border), v);
#endif
}
int gMainWindow::screen()
{
gMainWindow *tl = topLevel();
return gdk_screen_get_number(gtk_window_get_screen(GTK_WINDOW(tl->border)));
}
void gMainWindow::emitResize()
{
if (bufW == _resize_last_w && bufH == _resize_last_h)
return;
_resize_last_w = bufW;
_resize_last_h = bufH;
configure();
performArrange();
emit(SIGNAL(onResize));
}
void gMainWindow::setGeometryHints()
{
if (isTopLevel() && isResizable())
{
if (isModal())
{
GdkGeometry geometry;
geometry.min_width = _min_w;
geometry.min_height = _min_h;
gdk_window_set_geometry_hints(gtk_widget_get_window(border), &geometry, (GdkWindowHints)(GDK_HINT_MIN_SIZE | GDK_HINT_POS));
}
}
}
void gMainWindow::setBackground(gColor vl)
{
_bg = vl;
if (!_transparent)
gControl::setBackground(vl);
}
void gMainWindow::setTransparent(bool vl)
{
if (!vl)
return;
_transparent = TRUE;
if (!isVisible())
return;
#ifdef GTK3
GdkScreen *screen = NULL;
GdkVisual *visual = NULL;
screen = gtk_widget_get_screen(border);
visual = gdk_screen_get_rgba_visual(screen);
if (visual == NULL)
return;
#else
GdkScreen *screen;
GdkColormap *colormap;
screen = gtk_widget_get_screen(border);
colormap = gdk_screen_get_rgba_colormap(screen);
if (colormap == NULL)
return;
#endif
gtk_widget_unrealize(border);
gtk_widget_set_app_paintable(border, TRUE);
#ifdef GTK3
gtk_widget_set_visual(border, visual);
#else
gtk_widget_set_colormap(border, colormap);
#endif
gtk_widget_realize(border);
int w = width();
int h = height();
bufW = w - 1;
resize(w, h);
gtk_window_present(GTK_WINDOW(border));
}
bool gMainWindow::closeAll()
{
int i;
gMainWindow *win;
for(i = 0; i < count(); i++)
{
win = get(i);
if (!win)
break;
if (win == gApplication::mainWindow())
continue;
if (win->close())
return true;
}
return false;
}<|fim▁end|> |
if (!_min_w && !_min_h)
{ |
<|file_name|>lib.rs<|end_file_name|><|fim▁begin|>//! # MongoDB Rust Driver
//!
//! A driver written in pure Rust, providing a native interface to MongoDB.
//!
//! ## Connecting to MongoDB
//!
//! The Client is an entry-point to interacting with a MongoDB instance.
//!
//! ```no_run
//! # use mongodb::{Client, ClientOptions, ThreadedClient};
//! # use mongodb::common::{ReadMode, ReadPreference};
//! #
//! // Direct connection to a server. Will not look for other servers in the topology.
//! let client = Client::connect("localhost", 27017)
//! .expect("Failed to initialize client.");
//!
//! // Connect to a complex server topology, such as a replica set
//! // or sharded cluster, using a connection string uri.
//! let client = Client::with_uri("mongodb://localhost:27017,localhost:27018/")
//! .expect("Failed to initialize client.");
//!
//! // Specify a read preference, and rely on the driver to find secondaries.
//! let mut options = ClientOptions::new();
//! options.read_preference = Some(ReadPreference::new(ReadMode::SecondaryPreferred, None));
//! let client = Client::with_uri_and_options("mongodb://localhost:27017/", options)
//! .expect("Failed to initialize client.");
//! ```
//!
//! ## Interacting with MongoDB Collections
//!
//! ```no_run
//! # #[macro_use] extern crate bson;
//! # extern crate mongodb;
//! # use mongodb::{Client, ThreadedClient};
//! # use mongodb::db::ThreadedDatabase;
//! # use bson::Bson;
//! #
//! # fn main() {
//! # let client = Client::connect("localhost", 27017).unwrap();
//! #
//! let coll = client.db("media").collection("movies");
//! coll.insert_one(doc!{ "title": "Back to the Future" }, None).unwrap();
//! coll.update_one(doc!{}, doc!{ "director": "Robert Zemeckis" }, None).unwrap();
//! coll.delete_many(doc!{}, None).unwrap();
//!
//! let mut cursor = coll.find(None, None).unwrap();
//! for result in cursor {
//! if let Ok(item) = result {
//! if let Some(&Bson::String(ref title)) = item.get("title") {
//! println!("title: {}", title);
//! }
//! }
//! }
//! # }
//! ```
//!
//! ## Command Monitoring
//!
//! The driver provides an intuitive interface for monitoring and responding to runtime information
//! about commands being executed on the server. Arbitrary functions can be used as start and
//! completion hooks, reacting to command results from the server.
//!
//! ```no_run
//! # use mongodb::{Client, CommandResult, ThreadedClient};
//! fn log_query_duration(client: Client, command_result: &CommandResult) {
//! match command_result {
//! &CommandResult::Success { duration, .. } => {
//! println!("Command took {} nanoseconds.", duration);
//! },
//! _ => println!("Failed to execute command."),
//! }
//! }
//!
//! let mut client = Client::connect("localhost", 27017).unwrap();
//! client.add_completion_hook(log_query_duration).unwrap();
//! ```
//!
//! ## Topology Monitoring
//!
//! Each server within a MongoDB server set is monitored asynchronously for changes in status, and
//! the driver's view of the current topology is updated in response to this. This allows the
//! driver to be aware of the status of the server set it is communicating with, and to make server
//! selections appropriately with regards to the user-specified `ReadPreference` and `WriteConcern`.
//!
//! ## Connection Pooling
//!
//! Each server within a MongoDB server set is maintained by the driver with a separate connection
//! pool. By default, each pool has a maximum of 5 concurrent open connections.
// Clippy lints
#![cfg_attr(feature = "clippy", feature(plugin))]
#![cfg_attr(feature = "clippy", plugin(clippy))]
#![cfg_attr(feature = "clippy", allow(
doc_markdown,
// allow double_parens for bson/doc macro.
double_parens,
// more explicit than catch-alls.
match_wild_err_arm,
too_many_arguments,
))]
#![cfg_attr(feature = "clippy", warn(
cast_precision_loss,
enum_glob_use,
filter_map,
if_not_else,
invalid_upcast_comparisons,
items_after_statements,
mem_forget,
mut_mut,
mutex_integer,
non_ascii_literal,
nonminimal_bool,
option_map_unwrap_or,
option_map_unwrap_or_else,
print_stdout,
shadow_reuse,
shadow_same,
shadow_unrelated,
similar_names,
unicode_not_nfc,
unseparated_literal_suffix,<|fim▁hole|> used_underscore_binding,
wrong_pub_self_convention,
))]
#[doc(html_root_url = "https://docs.rs/mongodb")]
#[macro_use]
extern crate bitflags;
#[macro_use(bson, doc)]
extern crate bson;
extern crate bufstream;
extern crate byteorder;
extern crate chrono;
extern crate data_encoding;
#[cfg(feature = "ssl")]
extern crate openssl;
extern crate rand;
#[macro_use]
extern crate scan_fmt;
extern crate semver;
extern crate separator;
extern crate textnonce;
extern crate time;
extern crate md5;
extern crate sha1;
extern crate hmac;
extern crate pbkdf2;
extern crate hex;
pub mod db;
pub mod coll;
pub mod common;
pub mod connstring;
pub mod cursor;
pub mod error;
pub mod gridfs;
pub mod pool;
pub mod stream;
pub mod topology;
pub mod wire_protocol;
mod apm;
mod auth;
mod command_type;
pub use apm::{CommandStarted, CommandResult};
pub use command_type::CommandType;
pub use error::{Error, ErrorCode, Result};
use std::fmt;
use std::fs::{File, OpenOptions};
use std::io::Write;
use std::ops::DerefMut;
use std::sync::{Arc, Mutex};
use std::sync::atomic::{AtomicIsize, Ordering, ATOMIC_ISIZE_INIT};
use apm::Listener;
use bson::Bson;
use common::{ReadPreference, ReadMode, WriteConcern};
use connstring::ConnectionString;
use db::{Database, ThreadedDatabase};
use error::Error::ResponseError;
use pool::PooledStream;
use stream::StreamConnector;
use topology::{Topology, TopologyDescription, TopologyType, DEFAULT_HEARTBEAT_FREQUENCY_MS,
DEFAULT_LOCAL_THRESHOLD_MS, DEFAULT_SERVER_SELECTION_TIMEOUT_MS};
use topology::server::Server;
pub const DRIVER_NAME: &'static str = "mongo-rust-driver-prototype";
/// Interfaces with a MongoDB server or replica set.
pub struct ClientInner {
/// Indicates how a server should be selected for read operations.
pub read_preference: ReadPreference,
/// Describes the guarantees provided by MongoDB when reporting the success of a write
/// operation.
pub write_concern: WriteConcern,
req_id: Arc<AtomicIsize>,
topology: Topology,
listener: Listener,
log_file: Option<Mutex<File>>,
}
impl fmt::Debug for ClientInner {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("ClientInner")
.field("read_preference", &self.read_preference)
.field("write_concern", &self.write_concern)
.field("req_id", &self.req_id)
.field("topology", &self.topology)
.field("listener", &"Listener { .. }")
.field("log_file", &self.log_file)
.finish()
}
}
/// Configuration options for a client.
#[derive(Default)]
pub struct ClientOptions {
/// File path for command logging.
pub log_file: Option<String>,
/// Client-level server selection preferences for read operations.
pub read_preference: Option<ReadPreference>,
/// Client-level write guarantees when reporting a write success.
pub write_concern: Option<WriteConcern>,
/// Frequency of server monitor updates; default 10000 ms.
pub heartbeat_frequency_ms: u32,
/// Timeout for selecting an appropriate server for operations; default 30000 ms.
pub server_selection_timeout_ms: i64,
/// The size of the latency window for selecting suitable servers; default 15 ms.
pub local_threshold_ms: i64,
/// Options for how to connect to the server.
pub stream_connector: StreamConnector,
}
impl ClientOptions {
/// Creates a new default options struct.
pub fn new() -> ClientOptions {
ClientOptions {
log_file: None,
read_preference: None,
write_concern: None,
heartbeat_frequency_ms: DEFAULT_HEARTBEAT_FREQUENCY_MS,
server_selection_timeout_ms: DEFAULT_SERVER_SELECTION_TIMEOUT_MS,
local_threshold_ms: DEFAULT_LOCAL_THRESHOLD_MS,
stream_connector: StreamConnector::default(),
}
}
/// Creates a new options struct with a specified log file.
pub fn with_log_file(file: &str) -> ClientOptions {
let mut options = ClientOptions::new();
options.log_file = Some(String::from(file));
options
}
#[cfg(feature = "ssl")]
/// Creates a new options struct with a specified SSL certificate and key files.
pub fn with_ssl(
ca_file: &str,
certificate_file: &str,
key_file: &str,
verify_peer: bool,
) -> ClientOptions {
let mut options = ClientOptions::new();
options.stream_connector =
StreamConnector::with_ssl(ca_file, certificate_file, key_file, verify_peer);
options
}
#[cfg(feature = "ssl")]
/// Creates a new options struct with a specified SSL certificate
pub fn with_unauthenticated_ssl(ca_file: &str, verify_peer: bool) -> ClientOptions {
let mut options = ClientOptions::new();
options.stream_connector = StreamConnector::with_unauthenticated_ssl(ca_file, verify_peer);
options
}
}
pub trait ThreadedClient: Sync + Sized {
/// Creates a new Client directly connected to a single MongoDB server.
fn connect(host: &str, port: u16) -> Result<Self>;
/// Creates a new Client directly connected to a single MongoDB server with options.
fn connect_with_options(host: &str, port: u16, ClientOptions) -> Result<Self>;
/// Creates a new Client connected to a complex topology, such as a
/// replica set or sharded cluster.
fn with_uri(uri: &str) -> Result<Self>;
/// Creates a new Client connected to a complex topology, such as a
/// replica set or sharded cluster, with options.
fn with_uri_and_options(uri: &str, options: ClientOptions) -> Result<Self>;
/// Create a new Client with manual connection configurations.
/// `connect` and `with_uri` should generally be used as higher-level constructors.
fn with_config(
config: ConnectionString,
options: Option<ClientOptions>,
description: Option<TopologyDescription>,
) -> Result<Self>;
/// Creates a database representation.
fn db(&self, db_name: &str) -> Database;
/// Creates a database representation with custom read and write controls.
fn db_with_prefs(
&self,
db_name: &str,
read_preference: Option<ReadPreference>,
write_concern: Option<WriteConcern>,
) -> Database;
/// Acquires a connection stream from the pool, along with slave_ok and should_send_read_pref.
fn acquire_stream(&self, read_pref: ReadPreference) -> Result<(PooledStream, bool, bool)>;
/// Acquires a connection stream from the pool for write operations.
fn acquire_write_stream(&self) -> Result<PooledStream>;
/// Returns a unique operational request id.
fn get_req_id(&self) -> i32;
/// Returns a list of all database names that exist on the server.
fn database_names(&self) -> Result<Vec<String>>;
/// Drops the database defined by `db_name`.
fn drop_database(&self, db_name: &str) -> Result<()>;
/// Reports whether this instance is a primary, master, mongos, or standalone mongod instance.
fn is_master(&self) -> Result<bool>;
/// Sets a function to be run every time a command starts.
fn add_start_hook(&mut self, hook: fn(Client, &CommandStarted)) -> Result<()>;
/// Sets a function to be run every time a command completes.
fn add_completion_hook(&mut self, hook: fn(Client, &CommandResult)) -> Result<()>;
}
pub type Client = Arc<ClientInner>;
impl ThreadedClient for Client {
fn connect(host: &str, port: u16) -> Result<Client> {
let config = ConnectionString::new(host, port);
let mut description = TopologyDescription::new(StreamConnector::Tcp);
description.topology_type = TopologyType::Single;
Client::with_config(config, None, Some(description))
}
fn connect_with_options(host: &str, port: u16, options: ClientOptions) -> Result<Client> {
let config = ConnectionString::new(host, port);
let mut description = TopologyDescription::new(options.stream_connector.clone());
description.topology_type = TopologyType::Single;
Client::with_config(config, Some(options), Some(description))
}
fn with_uri(uri: &str) -> Result<Client> {
let config = connstring::parse(uri)?;
Client::with_config(config, None, None)
}
fn with_uri_and_options(uri: &str, options: ClientOptions) -> Result<Client> {
let config = connstring::parse(uri)?;
Client::with_config(config, Some(options), None)
}
fn with_config(
config: ConnectionString,
options: Option<ClientOptions>,
description: Option<TopologyDescription>,
) -> Result<Client> {
let client_options = options.unwrap_or_else(ClientOptions::new);
let rp = client_options.read_preference.unwrap_or_else(|| {
ReadPreference::new(ReadMode::Primary, None)
});
let wc = client_options.write_concern.unwrap_or_else(
WriteConcern::new,
);
let listener = Listener::new();
let file = match client_options.log_file {
Some(string) => {
let _ = listener.add_start_hook(log_command_started);
let _ = listener.add_completion_hook(log_command_completed);
Some(Mutex::new(
OpenOptions::new()
.write(true)
.append(true)
.create(true)
.open(&string)?
))
}
None => None,
};
let client = Arc::new(ClientInner {
req_id: Arc::new(ATOMIC_ISIZE_INIT),
topology: Topology::new(
config.clone(),
description,
client_options.stream_connector.clone(),
)?,
listener: listener,
read_preference: rp,
write_concern: wc,
log_file: file,
});
// Fill servers array and set options
{
let top_description = &client.topology.description;
let mut top = top_description.write()?;
top.heartbeat_frequency_ms = client_options.heartbeat_frequency_ms;
top.server_selection_timeout_ms = client_options.server_selection_timeout_ms;
top.local_threshold_ms = client_options.local_threshold_ms;
for host in config.hosts {
let server = Server::new(
client.clone(),
host.clone(),
top_description.clone(),
true,
client_options.stream_connector.clone(),
);
top.servers.insert(host, server);
}
}
Ok(client)
}
fn db(&self, db_name: &str) -> Database {
Database::open(self.clone(), db_name, None, None)
}
fn db_with_prefs(
&self,
db_name: &str,
read_preference: Option<ReadPreference>,
write_concern: Option<WriteConcern>,
) -> Database {
Database::open(self.clone(), db_name, read_preference, write_concern)
}
fn acquire_stream(
&self,
read_preference: ReadPreference,
) -> Result<(PooledStream, bool, bool)> {
self.topology.acquire_stream(self.clone(), read_preference)
}
fn acquire_write_stream(&self) -> Result<PooledStream> {
self.topology.acquire_write_stream(self.clone())
}
fn get_req_id(&self) -> i32 {
self.req_id.fetch_add(1, Ordering::SeqCst) as i32
}
fn database_names(&self) -> Result<Vec<String>> {
let doc = doc!{ "listDatabases": 1 };
let db = self.db("admin");
let res = db.command(doc, CommandType::ListDatabases, None)?;
if let Some(&Bson::Array(ref batch)) = res.get("databases") {
// Extract database names
let map = batch
.iter()
.filter_map(|bdoc| {
if let Bson::Document(ref doc) = *bdoc {
if let Some(&Bson::String(ref name)) = doc.get("name") {
return Some(name.to_owned());
}
}
None
})
.collect();
Ok(map)
} else {
Err(ResponseError(
String::from("Server reply does not contain 'databases'."),
))
}
}
fn drop_database(&self, db_name: &str) -> Result<()> {
self.db(db_name).drop_database()
}
fn is_master(&self) -> Result<bool> {
let doc = doc!{ "isMaster": 1 };
let db = self.db("local");
let res = db.command(doc, CommandType::IsMaster, None)?;
match res.get("ismaster") {
Some(&Bson::Boolean(is_master)) => Ok(is_master),
_ => Err(ResponseError(
String::from("Server reply does not contain 'ismaster'."),
)),
}
}
fn add_start_hook(&mut self, hook: fn(Client, &CommandStarted)) -> Result<()> {
self.listener.add_start_hook(hook)
}
fn add_completion_hook(&mut self, hook: fn(Client, &CommandResult)) -> Result<()> {
self.listener.add_completion_hook(hook)
}
}
fn log_command_started(client: Client, command_started: &CommandStarted) {
let mutex = match client.log_file {
Some(ref mutex) => mutex,
None => return,
};
let mut guard = match mutex.lock() {
Ok(guard) => guard,
Err(_) => return,
};
let _ = writeln!(guard.deref_mut(), "{}", command_started);
}
fn log_command_completed(client: Client, command_result: &CommandResult) {
let mutex = match client.log_file {
Some(ref mutex) => mutex,
None => return,
};
let mut guard = match mutex.lock() {
Ok(guard) => guard,
Err(_) => return,
};
let _ = writeln!(guard.deref_mut(), "{}", command_result);
}<|fim▁end|> | |
<|file_name|>shared.js<|end_file_name|><|fim▁begin|>"use strict";;
var __importDefault = (this && this.__importDefault) || function (mod) {
return (mod && mod.__esModule) ? mod : { "default": mod };
};
Object.defineProperty(exports, "__esModule", { value: true });
var types_1 = __importDefault(require("./types"));
function default_1(fork) {
var types = fork.use(types_1.default);
var Type = types.Type;
var builtin = types.builtInTypes;
var isNumber = builtin.number;
// An example of constructing a new type with arbitrary constraints from
// an existing type.
function geq(than) {
return Type.from(function (value) { return isNumber.check(value) && value >= than; }, isNumber + " >= " + than);
}
;
// Default value-returning functions that may optionally be passed as a
// third argument to Def.prototype.field.
var defaults = {
// Functions were used because (among other reasons) that's the most
// elegant way to allow for the emptyArray one always to give a new
// array instance.<|fim▁hole|> "undefined": function () { },
"use strict": function () { return "use strict"; }
};
var naiveIsPrimitive = Type.or(builtin.string, builtin.number, builtin.boolean, builtin.null, builtin.undefined);
var isPrimitive = Type.from(function (value) {
if (value === null)
return true;
var type = typeof value;
if (type === "object" ||
type === "function") {
return false;
}
return true;
}, naiveIsPrimitive.toString());
return {
geq: geq,
defaults: defaults,
isPrimitive: isPrimitive,
};
}
exports.default = default_1;
module.exports = exports["default"];<|fim▁end|> | "null": function () { return null; },
"emptyArray": function () { return []; },
"false": function () { return false; },
"true": function () { return true; }, |
<|file_name|>browser.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use compositing::compositor_thread::EmbedderMsg;
use compositing::windowing::{WebRenderDebugOption, WindowEvent};
use euclid::{TypedPoint2D, TypedVector2D};
use glutin_app::keyutils::{CMD_OR_CONTROL, CMD_OR_ALT};
use glutin_app::window::{Window, LINE_HEIGHT};
use msg::constellation_msg::{Key, TopLevelBrowsingContextId as BrowserId};
use msg::constellation_msg::{KeyModifiers, KeyState, TraversalDirection};
use script_traits::TouchEventType;
use servo::net_traits::pub_domains::is_reg_domain;
use servo::servo_url::ServoUrl;
use servo_config::prefs::PREFS;
use std::mem;
use std::rc::Rc;
use tinyfiledialogs;
use webrender_api::ScrollLocation;
pub struct Browser {
current_url: Option<ServoUrl>,
/// id of the top level browsing context. It is unique as tabs
/// are not supported yet. None until created.
browser_id: Option<BrowserId>,
title: Option<String>,
status: Option<String>,
favicon: Option<ServoUrl>,
loading_state: Option<LoadingState>,
window: Rc<Window>,
event_queue: Vec<WindowEvent>,
shutdown_requested: bool,
}
enum LoadingState {
Connecting,
Loading,
Loaded,
}
impl Browser {
pub fn new(window: Rc<Window>) -> Browser {
Browser {
title: None,
current_url: None,
browser_id: None,
status: None,
favicon: None,
loading_state: None,
window: window,
event_queue: Vec::new(),
shutdown_requested: false,
}
}
pub fn get_events(&mut self) -> Vec<WindowEvent> {
mem::replace(&mut self.event_queue, Vec::new())
}
pub fn set_browser_id(&mut self, browser_id: BrowserId) {
self.browser_id = Some(browser_id);
}
pub fn handle_window_events(&mut self, events: Vec<WindowEvent>) {
for event in events {
match event {
WindowEvent::KeyEvent(ch, key, state, mods) => {
self.handle_key_from_window(ch, key, state, mods);
},
event => {
self.event_queue.push(event);
}
}
}
}
pub fn shutdown_requested(&self) -> bool {
self.shutdown_requested
}
/// Handle key events before sending them to Servo.
fn handle_key_from_window(&mut self, ch: Option<char>, key: Key, state: KeyState, mods: KeyModifiers) {
match (mods, ch, key) {
(CMD_OR_CONTROL, Some('r'), _) => {
if let Some(id) = self.browser_id {
self.event_queue.push(WindowEvent::Reload(id));
}
}
(CMD_OR_CONTROL, Some('l'), _) => {
if let Some(id) = self.browser_id {
let url: String = if let Some(ref current_url) = self.current_url {
current_url.to_string()
} else {
String::from("")
};
let title = "URL or search query";
if let Some(input) = tinyfiledialogs::input_box(title, title, &url) {
if let Some(url) = sanitize_url(&input) {
self.event_queue.push(WindowEvent::LoadUrl(id, url));
}
}
}
}
(CMD_OR_CONTROL, Some('q'), _) => {
self.event_queue.push(WindowEvent::Quit);
}
(_, Some('3'), _) => if mods ^ KeyModifiers::CONTROL == KeyModifiers::SHIFT {
self.event_queue.push(WindowEvent::CaptureWebRender);
}
(KeyModifiers::CONTROL, None, Key::F10) => {
let event = WindowEvent::ToggleWebRenderDebug(WebRenderDebugOption::RenderTargetDebug);
self.event_queue.push(event);
}
(KeyModifiers::CONTROL, None, Key::F11) => {
let event = WindowEvent::ToggleWebRenderDebug(WebRenderDebugOption::TextureCacheDebug);
self.event_queue.push(event);
}
(KeyModifiers::CONTROL, None, Key::F12) => {
let event = WindowEvent::ToggleWebRenderDebug(WebRenderDebugOption::Profiler);
self.event_queue.push(event);
}
(CMD_OR_ALT, None, Key::Right) | (KeyModifiers::NONE, None, Key::NavigateForward) => {
if let Some(id) = self.browser_id {
let event = WindowEvent::Navigation(id, TraversalDirection::Forward(1));
self.event_queue.push(event);
}
}
(CMD_OR_ALT, None, Key::Left) | (KeyModifiers::NONE, None, Key::NavigateBackward) => {
if let Some(id) = self.browser_id {
let event = WindowEvent::Navigation(id, TraversalDirection::Back(1));
self.event_queue.push(event);
}
}
(KeyModifiers::NONE, None, Key::Escape) => {
self.event_queue.push(WindowEvent::Quit);
}
_ => {
let event = self.platform_handle_key(key, mods);
self.event_queue.push(event.unwrap_or(WindowEvent::KeyEvent(ch, key, state, mods)));
}
}
}
#[cfg(not(target_os = "win"))]
fn platform_handle_key(&self, key: Key, mods: KeyModifiers) -> Option<WindowEvent> {
match (mods, key, self.browser_id) {
(CMD_OR_CONTROL, Key::LeftBracket, Some(id)) => {
Some(WindowEvent::Navigation(id, TraversalDirection::Back(1)))
}
(CMD_OR_CONTROL, Key::RightBracket, Some(id)) => {
Some(WindowEvent::Navigation(id, TraversalDirection::Forward(1)))
}
_ => None
}
}
#[cfg(target_os = "win")]
fn platform_handle_key(&self, key: Key, mods: KeyModifiers) -> Option<WindowEvent> {
None
}
/// Handle key events after they have been handled by Servo.
fn handle_key_from_servo(&mut self, _: Option<BrowserId>, ch: Option<char>,
key: Key, _: KeyState, mods: KeyModifiers) {
match (mods, ch, key) {
(_, Some('+'), _) => {
if mods & !KeyModifiers::SHIFT == CMD_OR_CONTROL {
self.event_queue.push(WindowEvent::Zoom(1.1));
} else if mods & !KeyModifiers::SHIFT == CMD_OR_CONTROL | KeyModifiers::ALT {
self.event_queue.push(WindowEvent::PinchZoom(1.1));
}
}
(CMD_OR_CONTROL, Some('-'), _) => {
self.event_queue.push(WindowEvent::Zoom(1.0 / 1.1));
}
(_, Some('-'), _) if mods == CMD_OR_CONTROL | KeyModifiers::ALT => {
self.event_queue.push(WindowEvent::PinchZoom(1.0 / 1.1));
}
(CMD_OR_CONTROL, Some('0'), _) => {
self.event_queue.push(WindowEvent::ResetZoom);
}
(KeyModifiers::NONE, None, Key::PageDown) => {
let scroll_location = ScrollLocation::Delta(TypedVector2D::new(0.0,
-self.window.page_height() + 2.0 * LINE_HEIGHT));
self.scroll_window_from_key(scroll_location, TouchEventType::Move);
}
(KeyModifiers::NONE, None, Key::PageUp) => {
let scroll_location = ScrollLocation::Delta(TypedVector2D::new(0.0,
self.window.page_height() - 2.0 * LINE_HEIGHT));
self.scroll_window_from_key(scroll_location, TouchEventType::Move);
}
(KeyModifiers::NONE, None, Key::Home) => {
self.scroll_window_from_key(ScrollLocation::Start, TouchEventType::Move);<|fim▁hole|> self.scroll_window_from_key(ScrollLocation::End, TouchEventType::Move);
}
(KeyModifiers::NONE, None, Key::Up) => {
self.scroll_window_from_key(ScrollLocation::Delta(TypedVector2D::new(0.0, 3.0 * LINE_HEIGHT)),
TouchEventType::Move);
}
(KeyModifiers::NONE, None, Key::Down) => {
self.scroll_window_from_key(ScrollLocation::Delta(TypedVector2D::new(0.0, -3.0 * LINE_HEIGHT)),
TouchEventType::Move);
}
(KeyModifiers::NONE, None, Key::Left) => {
self.scroll_window_from_key(ScrollLocation::Delta(TypedVector2D::new(LINE_HEIGHT, 0.0)),
TouchEventType::Move);
}
(KeyModifiers::NONE, None, Key::Right) => {
self.scroll_window_from_key(ScrollLocation::Delta(TypedVector2D::new(-LINE_HEIGHT, 0.0)),
TouchEventType::Move);
}
_ => {
}
}
}
fn scroll_window_from_key(&mut self, scroll_location: ScrollLocation, phase: TouchEventType) {
let event = WindowEvent::Scroll(scroll_location, TypedPoint2D::zero(), phase);
self.event_queue.push(event);
}
pub fn handle_servo_events(&mut self, events: Vec<EmbedderMsg>) {
for event in events {
match event {
EmbedderMsg::Status(_browser_id, status) => {
self.status = status;
},
EmbedderMsg::ChangePageTitle(_browser_id, title) => {
self.title = title;
let fallback_title: String = if let Some(ref current_url) = self.current_url {
current_url.to_string()
} else {
String::from("Untitled")
};
let title = match self.title {
Some(ref title) if title.len() > 0 => &**title,
_ => &fallback_title,
};
let title = format!("{} - Servo", title);
self.window.set_title(&title);
}
EmbedderMsg::MoveTo(_browser_id, point) => {
self.window.set_position(point);
}
EmbedderMsg::ResizeTo(_browser_id, size) => {
self.window.set_inner_size(size);
}
EmbedderMsg::AllowNavigation(_browser_id, _url, response_chan) => {
if let Err(e) = response_chan.send(true) {
warn!("Failed to send allow_navigation() response: {}", e);
};
}
EmbedderMsg::KeyEvent(browser_id, ch, key, state, modified) => {
self.handle_key_from_servo(browser_id, ch, key, state, modified);
}
EmbedderMsg::SetCursor(cursor) => {
self.window.set_cursor(cursor);
}
EmbedderMsg::NewFavicon(_browser_id, url) => {
self.favicon = Some(url);
}
EmbedderMsg::HeadParsed(_browser_id, ) => {
self.loading_state = Some(LoadingState::Loading);
}
EmbedderMsg::HistoryChanged(_browser_id, entries, current) => {
self.current_url = Some(entries[current].url.clone());
}
EmbedderMsg::SetFullscreenState(_browser_id, state) => {
self.window.set_fullscreen(state);
}
EmbedderMsg::LoadStart(_browser_id) => {
self.loading_state = Some(LoadingState::Connecting);
}
EmbedderMsg::LoadComplete(_browser_id) => {
self.loading_state = Some(LoadingState::Loaded);
}
EmbedderMsg::Shutdown => {
self.shutdown_requested = true;
},
EmbedderMsg::Panic(_browser_id, _reason, _backtrace) => {
}
}
}
}
}
fn sanitize_url(request: &str) -> Option<ServoUrl> {
let request = request.trim();
ServoUrl::parse(&request).ok()
.or_else(|| {
if request.contains('/') || is_reg_domain(request) {
ServoUrl::parse(&format!("http://{}", request)).ok()
} else {
None
}
}).or_else(|| {
PREFS.get("shell.searchpage").as_string().and_then(|s: &str| {
let url = s.replace("%s", request);
ServoUrl::parse(&url).ok()
})
})
}<|fim▁end|> | }
(KeyModifiers::NONE, None, Key::End) => { |
<|file_name|>admins.py<|end_file_name|><|fim▁begin|>from canvas_sdk import client, utils
def make_account_admin(request_ctx, account_id, user_id, role=None, role_id=None, send_confirmation=None, **request_kwargs):
"""
Flag an existing user as an admin within the account.
:param request_ctx: The request context
:type request_ctx: :class:RequestContext
:param account_id: (required) ID
:type account_id: string
:param user_id: (required) The id of the user to promote.
:type user_id: integer
:param role: (optional) (deprecated)
The user's admin relationship with the account will be created with the
given role. Defaults to 'AccountAdmin'.
:type role: string or None
:param role_id: (optional) The user's admin relationship with the account will be created with the
given role. Defaults to the built-in role for 'AccountAdmin'.
:type role_id: integer or None
:param send_confirmation: (optional) Send a notification email to
the new admin if true. Default is true.
:type send_confirmation: boolean or None
:return: Make an account admin
:rtype: requests.Response (with Admin data)
"""
path = '/v1/accounts/{account_id}/admins'
payload = {
'user_id' : user_id,
'role' : role,
'role_id' : role_id,
'send_confirmation' : send_confirmation,
}
url = request_ctx.base_api_url + path.format(account_id=account_id)
response = client.post(request_ctx, url, payload=payload, **request_kwargs)
return response
def remove_account_admin(request_ctx, account_id, user_id, role=None, role_id=None, **request_kwargs):
"""<|fim▁hole|> :type request_ctx: :class:RequestContext
:param account_id: (required) ID
:type account_id: string
:param user_id: (required) ID
:type user_id: string
:param role: (optional) (Deprecated)
Account role to remove from the user. Defaults to 'AccountAdmin'. Any
other account role must be specified explicitly.
:type role: string or None
:param role_id: (optional) The user's admin relationship with the account will be created with the
given role. Defaults to the built-in role for 'AccountAdmin'.
:type role_id: integer or None
:return: Remove account admin
:rtype: requests.Response (with Admin data)
"""
path = '/v1/accounts/{account_id}/admins/{user_id}'
payload = {
'role' : role,
'role_id' : role_id,
}
url = request_ctx.base_api_url + path.format(account_id=account_id, user_id=user_id)
response = client.delete(request_ctx, url, payload=payload, **request_kwargs)
return response
def list_account_admins(request_ctx, account_id, user_id=None, per_page=None, **request_kwargs):
"""
List the admins in the account
:param request_ctx: The request context
:type request_ctx: :class:RequestContext
:param account_id: (required) ID
:type account_id: string
:param user_id: (optional) Scope the results to those with user IDs equal to any of the IDs specified here.
:type user_id: array or None
:param per_page: (optional) Set how many results canvas should return, defaults to config.LIMIT_PER_PAGE
:type per_page: integer or None
:return: List account admins
:rtype: requests.Response (with array data)
"""
if per_page is None:
per_page = request_ctx.per_page
path = '/v1/accounts/{account_id}/admins'
payload = {
'user_id' : user_id,
'per_page' : per_page,
}
url = request_ctx.base_api_url + path.format(account_id=account_id)
response = client.get(request_ctx, url, payload=payload, **request_kwargs)
return response<|fim▁end|> | Remove the rights associated with an account admin role from a user.
:param request_ctx: The request context |
<|file_name|>proof_map_index.rs<|end_file_name|><|fim▁begin|>// Copyright 2020 The Exonum Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Property testing for proofs of existence / absence in `ProofMapIndex`.
//!
//! To adjust the number of test cases for each test, set the `PROPTEST_CASES` environment
//! variable as per `proptest` docs. The number of test cases for large tests will be scaled
//! back automatically. A reasonable value for `PROPTEST_CASES` is `256`
//! (default; results in running time ~30 sec for larger tests) or more. The run time
//! scales linearly with the number of cases.
// cspell:ignore proptest
use exonum_merkledb::{
access::{CopyAccessExt, RawAccess},
proof_map::{Hashed, ToProofPath},
BinaryKey, BinaryValue, Database, MapProof, ObjectHash, ProofMapIndex, TemporaryDB,
};
use proptest::{
prelude::prop::{
array,
collection::{btree_map, vec},
},
prelude::*,
test_runner::{Config, TestCaseError, TestCaseResult},
};
use std::{
collections::{BTreeMap, BTreeSet},
fmt::Debug,
ops::{Range, RangeInclusive},
};
use crate::key::Key;
mod key;
const INDEX_NAME: &str = "index";
type Data = BTreeMap<[u8; 32], u64>;
fn check_map_proof<T, K, V>(
proof: &MapProof<K, V>,
key: Option<K>,
table: &ProofMapIndex<T, K, V>,
) -> TestCaseResult
where
T: RawAccess,
K: BinaryKey + ObjectHash + PartialEq + Debug,
V: BinaryValue + PartialEq + Debug,
{
let entry = key.map(|key| {
let value = table.get(&key).unwrap();
(key, value)
});
let proof = proof
.check_against_hash(table.object_hash())
.map_err(|e| TestCaseError::fail(e.to_string()))?;
prop_assert!(proof.entries().eq(entry.as_ref().map(|(k, v)| (k, v))));
Ok(())
}
fn check_map_multiproof<T, K, V>(
proof: &MapProof<K, V, Hashed>,
keys: BTreeSet<&K>,
table: &ProofMapIndex<T, K, V>,
) -> TestCaseResult
where
T: RawAccess,
K: BinaryKey + ObjectHash + PartialEq + Debug,
V: BinaryValue + PartialEq + Debug,
{
let mut entries: Vec<(&K, V)> = Vec::new();
let mut missing_keys: Vec<&K> = Vec::new();
for key in keys {
if table.contains(key) {
let value = table.get(key).unwrap();
entries.push((key, value));
} else {
missing_keys.push(key);
}
}
// Sort entries and missing keys by the order imposed by the `ProofPath`
// serialization of the keys
entries.sort_unstable_by(|(x, _), (y, _)| {
Hashed::transform_key(*x)
.partial_cmp(&Hashed::transform_key(*y))
.unwrap()
});
missing_keys.sort_unstable_by(|&x, &y| {
Hashed::transform_key(x)
.partial_cmp(&Hashed::transform_key(y))
.unwrap()
});
let unchecked_proof = proof;
let proof = proof
.check()
.map_err(|e| TestCaseError::fail(e.to_string()))?;
prop_assert!(proof
.all_entries()
.eq(unchecked_proof.all_entries_unchecked()));
prop_assert_eq!(proof.index_hash(), table.object_hash());
let mut actual_keys: Vec<&K> = proof.missing_keys().collect();
actual_keys.sort_unstable_by(|&x, &y| {
Hashed::transform_key(x)
.partial_cmp(&Hashed::transform_key(y))
.unwrap()
});
prop_assert_eq!(missing_keys, actual_keys);
let mut actual_entries: Vec<(&K, &V)> = proof.entries().collect();
actual_entries.sort_unstable_by(|&(x, _), &(y, _)| {
Hashed::transform_key(x)
.partial_cmp(&Hashed::transform_key(y))
.unwrap()
});
prop_assert!(entries.iter().map(|(k, v)| (*k, v)).eq(actual_entries));
Ok(())
}
/// Writes raw data to a database.
fn write_data(db: &TemporaryDB, data: Data) {
let fork = db.fork();
{
let mut table: ProofMapIndex<_, Key, _> = fork.get_proof_map(INDEX_NAME);
table.clear();
for (key, value) in data {
table.put(&key.into(), value);
}
}
db.merge(fork.into_patch()).unwrap();
}
/// Creates data for a random-filled `ProofMapIndex<_, [u8; 32], u64>`.
fn index_data(
key_bytes: impl Strategy<Value = u8>,
sizes: Range<usize>,
) -> impl Strategy<Value = Data> {
btree_map(array::uniform32(key_bytes), any::<u64>(), sizes)
}
fn absent_keys(key_bytes: RangeInclusive<u8>) -> impl Strategy<Value = Vec<Key>> {
vec(array::uniform32(key_bytes).prop_map(Key), 20)
}
/// Generates data to test a proof of presence.
fn data_for_proof_of_presence(
key_bytes: impl Strategy<Value = u8>,
sizes: Range<usize>,
) -> impl Strategy<Value = (Key, Data)> {
index_data(key_bytes, sizes)
.prop_flat_map(|data| (0..data.len(), Just(data)))
.prop_map(|(index, data)| (*data.keys().nth(index).unwrap(), data))
.prop_map(|(index, data)| (index.into(), data))
}
fn data_for_multiproof(
key_bytes: impl Strategy<Value = u8>,
sizes: Range<usize>,
) -> impl Strategy<Value = (Vec<Key>, Data)> {
index_data(key_bytes, sizes)
.prop_flat_map(|data| (vec(0..data.len(), data.len() / 5), Just(data)))
.prop_map(|(indexes, data)| {
// Note that keys may coincide; this is intentional.
let keys: Vec<Key> = indexes
.into_iter()
.map(|i| *data.keys().nth(i).unwrap())
.map(Key)
.collect();
(keys, data)
})
}
fn test_proof(db: &TemporaryDB, key: Key) -> TestCaseResult {
let snapshot = db.snapshot();
let table: ProofMapIndex<_, Key, u64> = snapshot.get_proof_map(INDEX_NAME);
let proof = table.get_proof(key);
let expected_key = if table.contains(&key) {
Some(key)
} else {
None
};
check_map_proof(&proof, expected_key, &table)
}
fn test_multiproof(db: &TemporaryDB, keys: &[Key]) -> TestCaseResult {
let snapshot = db.snapshot();
let table: ProofMapIndex<_, Key, u64> = snapshot.get_proof_map(INDEX_NAME);
let proof = table.get_multiproof(keys.to_vec());
let unique_keys: BTreeSet<_> = keys.iter().collect();
check_map_multiproof(&proof, unique_keys, &table)
}
#[derive(Debug, Clone)]
struct TestParams {
key_bytes: RangeInclusive<u8>,
index_sizes: Range<usize>,
test_cases_divider: u32,
}
impl TestParams {
fn key_bytes(&self) -> RangeInclusive<u8> {
self.key_bytes.clone()
}
fn index_sizes(&self) -> Range<usize> {
self.index_sizes.clone()
}
fn config(&self) -> Config {
Config::with_cases(Config::default().cases / self.test_cases_divider)
}
fn proof_of_presence(&self) {
let db = TemporaryDB::new();
let strategy = data_for_proof_of_presence(self.key_bytes(), self.index_sizes());
proptest!(self.config(), |((key, data) in strategy)| {
write_data(&db, data);
test_proof(&db, key)?;
});
}
fn proof_of_absence(&self) {
let db = TemporaryDB::new();
let key_strategy = array::uniform32(self.key_bytes()).prop_map(Key);
let data_strategy = index_data(self.key_bytes(), self.index_sizes());
proptest!(self.config(), |(key in key_strategy, data in data_strategy)| {
write_data(&db, data);
test_proof(&db, key)?;
});
}
fn multiproof_of_existing_elements(&self) {
let db = TemporaryDB::new();
let strategy = data_for_multiproof(self.key_bytes(), self.index_sizes());
proptest!(self.config(), |((keys, data) in strategy)| {
write_data(&db, data);
test_multiproof(&db, &keys)?;
});
}
fn multiproof_of_absent_elements(&self) {
let db = TemporaryDB::new();
let keys_strategy = absent_keys(self.key_bytes());
let data_strategy = index_data(self.key_bytes(), self.index_sizes());
proptest!(self.config(), |(keys in keys_strategy, data in data_strategy)| {
write_data(&db, data);
test_multiproof(&db, &keys)?;
});
}
fn mixed_multiproof(&self) {
let db = TemporaryDB::new();
let strategy = data_for_multiproof(self.key_bytes(), self.index_sizes());
let absent_keys_strategy = absent_keys(self.key_bytes());<|fim▁hole|> keys.extend_from_slice(&absent_keys);
test_multiproof(&db, &keys)?;
}
);
}
}
mod small_index {
use super::*;
const PARAMS: TestParams = TestParams {
key_bytes: 0..=255,
index_sizes: 10..100,
test_cases_divider: 1,
};
#[test]
fn proof_of_presence() {
PARAMS.proof_of_presence();
}
#[test]
fn proof_of_absence() {
PARAMS.proof_of_absence();
}
#[test]
fn multiproof_of_existing_elements() {
PARAMS.multiproof_of_existing_elements();
}
#[test]
fn multiproof_of_absent_elements() {
PARAMS.multiproof_of_absent_elements();
}
#[test]
fn mixed_multiproof() {
PARAMS.mixed_multiproof();
}
}
mod small_index_skewed {
use super::*;
const PARAMS: TestParams = TestParams {
key_bytes: 0..=2,
index_sizes: 10..100,
test_cases_divider: 1,
};
#[test]
fn proof_of_presence() {
PARAMS.proof_of_presence();
}
#[test]
fn proof_of_absence() {
PARAMS.proof_of_absence();
}
#[test]
fn multiproof_of_existing_elements() {
PARAMS.multiproof_of_existing_elements();
}
#[test]
fn multiproof_of_absent_elements() {
PARAMS.multiproof_of_absent_elements();
}
#[test]
fn mixed_multiproof() {
PARAMS.mixed_multiproof();
}
}
mod large_index {
use super::*;
const PARAMS: TestParams = TestParams {
key_bytes: 0..=255,
index_sizes: 5_000..10_000,
test_cases_divider: 32,
};
#[test]
fn proof_of_presence() {
PARAMS.proof_of_presence();
}
#[test]
fn proof_of_absence() {
PARAMS.proof_of_absence();
}
#[test]
fn multiproof_of_existing_elements() {
PARAMS.multiproof_of_existing_elements();
}
#[test]
fn multiproof_of_absent_elements() {
PARAMS.multiproof_of_absent_elements();
}
#[test]
fn mixed_multiproof() {
PARAMS.mixed_multiproof();
}
}
mod large_index_skewed {
use super::*;
const PARAMS: TestParams = TestParams {
key_bytes: 0..=2,
index_sizes: 5_000..10_000,
test_cases_divider: 32,
};
#[test]
fn proof_of_presence() {
PARAMS.proof_of_presence();
}
#[test]
fn proof_of_absence() {
PARAMS.proof_of_absence();
}
#[test]
fn multiproof_of_existing_elements() {
PARAMS.multiproof_of_existing_elements();
}
#[test]
fn multiproof_of_absent_elements() {
PARAMS.multiproof_of_absent_elements();
}
#[test]
fn mixed_multiproof() {
PARAMS.mixed_multiproof();
}
}<|fim▁end|> | proptest!(
self.config(),
|((mut keys, data) in strategy, absent_keys in absent_keys_strategy)| {
write_data(&db, data); |
<|file_name|>Function.java<|end_file_name|><|fim▁begin|>/**
* Licensed to Odiago, Inc. under one or more contributor license
* agreements. See the NOTICE.txt file distributed with this work for
* additional information regarding copyright ownership. Odiago, Inc.
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
<|fim▁hole|>
import java.util.Collections;
import java.util.List;
/**
* Abstract base class that defines a callable function. Subclasses
* of this exist for scalar, aggregate, and table functions.
*/
public abstract class Function {
/**
* @return the Type of the object returned by the function.
*/
public abstract Type getReturnType();
/**
* @return an ordered list containing the types expected for all mandatory arguments.
*/
public abstract List<Type> getArgumentTypes();
/**
* @return an ordered list containing types expected for variable argument lists.
* If a function takes a variable-length argument list, the varargs must be arranged
* in groups matching the size of the list returned by this method. e.g., to accept
* an arbitrary number of strings, this should return a singleton list of type STRING.
* If pairs of strings and ints are required, this should return a list [STRING, INT].
*/
public List<Type> getVarArgTypes() {
return Collections.emptyList();
}
/**
* Determines whether arguments are promoted to their specified types by
* the runtime. If this returns true, actual arguments are promoted to
* new values that match the types specified in getArgumentTypes().
* If false, the expressions are simply type-checked to ensure that there
* is a valid promotion, but are passed in as-is. The default value of
* this method is true.
*/
public boolean autoPromoteArguments() {
return true;
}
}<|fim▁end|> | package com.odiago.flumebase.lang; |
<|file_name|>ServiceNow.ts<|end_file_name|><|fim▁begin|>export interface IServiceNowCredentials {
username: string;
password: string;
url: string;
}
export interface ICreatedTicket {
tableName: string;
ticketNumber: string;
url: string;
}
<|fim▁hole|> Complete,
Rejected
}
export interface ITicketStage {
stageName: string;
status: StageStatus;
}
export interface ITicketStatus {
ticketNumber: string;
stages: ITicketStage[];
url: string;
}<|fim▁end|> | export enum StageStatus {
NotStarted,
InProgress, |
<|file_name|>hashalgo.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
from binascii import hexlify
class HashAlgo:
"""A generic class for an abstract cryptographic hash algorithm.
:undocumented: block_size
"""
#: The size of the resulting hash in bytes.
digest_size = None
#: The internal block size of the hash algorithm in bytes.
block_size = None
def __init__(self, hashFactory, data=None):
"""Initialize the hash object.
:Parameters:
hashFactory : callable
An object that will generate the actual hash implementation.
*hashFactory* must have a *new()* method, or must be directly
callable.
data : byte string
The very first chunk of the message to hash.
It is equivalent to an early call to `update()`.
"""
if hasattr(hashFactory, 'new'):
self._hash = hashFactory.new()
else:
self._hash = hashFactory()
if data:
self.update(data)
def update(self, data):
"""Continue hashing of a message by consuming the next chunk of data.
Repeated calls are equivalent to a single call with the concatenation
of all the arguments. In other words:
>>> m.update(a); m.update(b)<|fim▁hole|>
>>> m.update(a+b)
:Parameters:
data : byte string
The next chunk of the message being hashed.
"""
return self._hash.update(data)
def digest(self):
"""Return the **binary** (non-printable) digest of the message that has been hashed so far.
This method does not change the state of the hash object.
You can continue updating the object after calling this function.
:Return: A byte string of `digest_size` bytes. It may contain non-ASCII
characters, including null bytes.
"""
return self._hash.digest()
def hexdigest(self):
"""Return the **printable** digest of the message that has been hashed so far.
This method does not change the state of the hash object.
:Return: A string of 2* `digest_size` characters. It contains only
hexadecimal ASCII digits.
"""
return self._hash.hexdigest()
def copy(self):
"""Return a copy ("clone") of the hash object.
The copy will have the same internal state as the original hash
object.
This can be used to efficiently compute the digests of strings that
share a common initial substring.
:Return: A hash object of the same type
"""
return self._hash.copy()
def new(self, data=None):
"""Return a fresh instance of the hash object.
Unlike the `copy` method, the internal state of the object is empty.
:Parameters:
data : byte string
The next chunk of the message being hashed.
:Return: A hash object of the same type
"""
pass<|fim▁end|> |
is equivalent to: |
<|file_name|>AsyncRequest.js<|end_file_name|><|fim▁begin|>//
// Jala Project [http://opensvn.csie.org/traccgi/jala]
//
// Copyright 2004 ORF Online und Teletext GmbH
//
// Licensed under the Apache License, Version 2.0 (the ``License'');
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an ``AS IS'' BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// $Revision: 213 $
// $LastChangedBy: tobi $
// $LastChangedDate: 2007-05-08 16:12:32 +0200 (Die, 08 Mai 2007) $
// $HeadURL: http://dev.orf.at/source/jala/trunk/code/AsyncRequest.js $
//
/**
* @fileoverview Fields and methods of the jala.AsyncRequest class.
*/
// Define the global namespace for Jala modules
if (!global.jala) {
global.jala = {};
}
/**
* Creates a new AsyncRequest instance.
* @class This class is used to create requests of type "INTERNAL"
* (like cron-jobs) that are processed in a separate thread and
* therefor asynchronous.
* @param {Object} obj Object in whose context the method should be called
* @param {String} funcName Name of the function to call
* @param {Array} args Array containing the arguments that should be passed
* to the function (optional). This option is <em>deprecated</em>, instead
* pass the arguments directly to the {@link #run} method.
* @constructor
* @returns A new instance of AsyncRequest
* @type AsyncRequest
* @deprecated Use the {@link http://helma.zumbrunn.net/reference/core/app.html#invokeAsync
* app.invokeAsync} method instead (built-in into Helma as
* of version 1.6)
*/
jala.AsyncRequest = function(obj, funcName, args) {
app.logger.warn("Use of jala.AsyncRequest is deprecated in this version.");
app.logger.warn("This module will probably be removed in a " +
"future version of Jala.");
/**
* Contains a reference to the thread started by this AsyncRequest
* @type java.lang.Thread
* @private
*/
var thread;
/**
* Contains the timeout defined for this AsyncRequest (in milliseconds)
* @type Number
* @private
*/
var timeout;
/**
* Contains the number of milliseconds to wait before starting
* the asynchronous request.
* @type Number
* @private
*/
var delay;
/**
* Run method necessary to implement java.lang.Runnable.
* @private
*/
var runner = function() {
// evaluator that will handle the request
var ev = app.__app__.getEvaluator();
<|fim▁hole|> java.lang.Thread.sleep(delay);
}
try {
if (args === undefined || args === null || args.constructor != Array) {
args = [];
}
if (timeout != null) {
ev.invokeInternal(obj, funcName, args, timeout);
} else {
ev.invokeInternal(obj, funcName, args);
}
} catch (e) {
// ignore it, but log it
app.log("[Runner] Caught Exception: " + e);
} finally {
// release the ev in any case
app.__app__.releaseEvaluator(ev);
// remove reference to underlying thread
thread = null;
}
return;
};
/**
* Sets the timeout of this asynchronous request.
* @param {Number} seconds Thread-timeout.
*/
this.setTimeout = function(seconds) {
timeout = seconds * 1000;
return;
};
/**
* Defines the delay to wait before evaluating this asynchronous request.
* @param {Number} millis Milliseconds to wait
*/
this.setDelay = function(millis) {
delay = millis;
return;
};
/**
* Starts this asynchronous request. Any arguments passed to
* this method will be passed to the method executed by
* this AsyncRequest instance.
*/
this.run = function() {
if (arguments.length > 0) {
// convert arguments object into array
args = Array.prototype.slice.call(arguments, 0, arguments.length);
}
thread = (new java.lang.Thread(new java.lang.Runnable({"run": runner})));
thread.start();
return;
};
/**
* Starts this asynchronous request.
* @deprecated Use {@link #run} instead
*/
this.evaluate = function() {
this.run.apply(this, arguments);
return;
};
/**
* Returns true if the underlying thread is alive
* @returns True if the underlying thread is alive,
* false otherwise.
* @type Boolean
*/
this.isAlive = function() {
return thread != null && thread.isAlive();
}
/** @ignore */
this.toString = function() {
return "[jala.AsyncRequest]";
};
/**
* Main constructor body
*/
if (!obj || !funcName)
throw "jala.AsyncRequest: insufficient arguments.";
return this;
}<|fim▁end|> | if (delay != null) { |
<|file_name|>fvMeshDistribute.H<|end_file_name|><|fim▁begin|>/*---------------------------------------------------------------------------*\
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration | Website: https://openfoam.org
\\ / A nd | Copyright (C) 2011-2019 OpenFOAM Foundation
\\/ M anipulation |
-------------------------------------------------------------------------------
License
This file is part of OpenFOAM.
OpenFOAM is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>.
Class
Foam::fvMeshDistribute
Description
Sends/receives parts of mesh+fvfields to neighbouring processors.
Used in load balancing.
Input is per local cell the processor it should move to. Moves meshes
and volFields/surfaceFields and returns map which can be used to
distribute other.
Notes:
- does not handle cyclics. Will probably handle separated proc patches.
- if all cells move off processor also all its processor patches will
get deleted so comms might be screwed up (since e.g. globalMeshData
expects procPatches on all)
- initial mesh has to have procPatches last and all normal patches common
to all processors and in the same order. This is checked.
- faces are matched topologically but points on the faces are not. So
expect problems -on separated patches (cyclics?) -on zero sized processor
edges.
SourceFiles
fvMeshDistribute.C
fvMeshDistributeTemplates.C
\*---------------------------------------------------------------------------*/
#ifndef fvMeshDistribute_H
#define fvMeshDistribute_H
#include "Field.H"
#include "fvMeshSubset.H"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
namespace Foam
{
// Forward declaration of classes
class mapAddedPolyMesh;
class mapDistributePolyMesh;
/*---------------------------------------------------------------------------*\
Class fvMeshDistribute Declaration
\*---------------------------------------------------------------------------*/
class fvMeshDistribute
{
// Private Data
//- Underlying fvMesh
fvMesh& mesh_;
//- Absolute merging tolerance (constructing meshes gets done using
// geometric matching)
const scalar mergeTol_;
// Private Member Functions
static void inplaceRenumberWithFlip
(
const labelUList& oldToNew,
const bool oldToNewHasFlip,
const bool lstHasFlip,
labelUList& lst
);
//- Find indices with value
static labelList select
(
const bool selectEqual,
const labelList& values,
const label value
);
//- Check all procs have same names and in exactly same order.
static void checkEqualWordList(const string&, const wordList&);
//- Merge wordlists over all processors
static wordList mergeWordList(const wordList&);
// Patch handling
//- Find patch to put exposed faces into.
label findNonEmptyPatch() const;
//- Save boundary fields
template<class T, class Mesh>
void saveBoundaryFields
(
PtrList<FieldField<fvsPatchField, T>>& bflds
) const;
//- Map boundary fields
template<class T, class Mesh>
void mapBoundaryFields
(
const mapPolyMesh& map,
const PtrList<FieldField<fvsPatchField, T>>& oldBflds
);
//- Save internal fields of surfaceFields
template<class T>
void saveInternalFields(PtrList<Field<T>>& iflds) const;
//- Set value of patch faces resulting from internal faces
template<class T>
void mapExposedFaces
(
const mapPolyMesh& map,
const PtrList<Field<T>>& oldFlds
);
//- Init patch fields of certain type
template<class GeoField, class PatchFieldType>
void initPatchFields
(
const typename GeoField::value_type& initVal
);
//- Call correctBoundaryConditions on fields
template<class GeoField>
void correctBoundaryConditions();
//- Delete all processor patches. Move any processor faces into
// patchi.
autoPtr<mapPolyMesh> deleteProcPatches(const label patchi);
//- Repatch the mesh. This is only necessary for the proc
// boundary faces. newPatchID is over all boundary faces: -1 or
// new patchID. constructFaceMap is being adapted for the
// possible new face position (since proc faces get automatically
// matched)
autoPtr<mapPolyMesh> repatch
(
const labelList& newPatchID,
labelListList& constructFaceMap
);
//- Merge any local points that were remotely coupled.
// constructPointMap is adapted for the new point labels.
autoPtr<mapPolyMesh> mergeSharedPoints
(
const labelList& pointToGlobalMaster,
labelListList& constructPointMap
);
// Coupling information
//- Construct the local environment of all boundary faces.
void getCouplingData
(
const labelList& distribution,
labelList& sourceFace,
labelList& sourceProc,
labelList& sourcePatch,
labelList& sourceNewProc,
labelList& sourcePointMaster
) const;
// Subset the neighbourCell/neighbourProc fields
static void subsetCouplingData
(
const fvMesh& mesh,
const labelList& pointMap,
const labelList& faceMap,
const labelList& cellMap,
const labelList& oldDistribution,
const labelList& oldFaceOwner,
const labelList& oldFaceNeighbour,
const label oldInternalFaces,
const labelList& sourceFace,
const labelList& sourceProc,
const labelList& sourcePatch,
const labelList& sourceNewProc,
const labelList& sourcePointMaster,
labelList& subFace,
labelList& subProc,
labelList& subPatch,
labelList& subNewProc,
labelList& subPointMaster
);
//- Find cells on mesh whose faceID/procID match the neighbour
// cell/proc of domainMesh. Store the matching face.
static void findCouples
(
const primitiveMesh&,
const labelList& sourceFace,
const labelList& sourceProc,
const labelList& sourcePatch,
const label domain,
const primitiveMesh& domainMesh,
const labelList& domainFace,
const labelList& domainProc,
const labelList& domainPatch,
labelList& masterCoupledFaces,
labelList& slaveCoupledFaces
);
//- Map data on boundary faces to new mesh (resulting from adding
// two meshes)
static labelList mapBoundaryData
(
const primitiveMesh& mesh, // mesh after adding
const mapAddedPolyMesh& map,
const labelList& boundaryData0, // mesh before adding
const label nInternalFaces1,
const labelList& boundaryData1 // added mesh
);
//- Map point data to new mesh (resulting from adding two meshes)
static labelList mapPointData
(
const primitiveMesh& mesh, // mesh after adding
const mapAddedPolyMesh& map,
const labelList& boundaryData0, // on mesh before adding
const labelList& boundaryData1 // on added mesh
);
// Other
//- Remove cells. Add all exposed faces to patch oldInternalPatchi
autoPtr<mapPolyMesh> doRemoveCells
(
const labelList& cellsToRemove,
const label oldInternalPatchi
);
//- Add processor patches. Changes mesh and returns per neighbour
// proc the processor patchID.
void addProcPatches
(
const labelList&, // processor that neighbour is now on
const labelList&, // -1 or patch that face originated from
List<Map<label>>& procPatchID
);
//- Get boundary faces to be repatched. Is -1 or new patchID
static labelList getBoundaryPatch
(
const labelList& neighbourNewProc, // new processor per b. face
const labelList& referPatchID, // -1 or original patch
const List<Map<label>>& procPatchID // patchID
);
//- Send mesh and coupling data.
static void sendMesh
(
const label domain,
const fvMesh& mesh,
const wordList& pointZoneNames,
const wordList& facesZoneNames,<|fim▁hole|> const labelList& sourceProc,
const labelList& sourcePatch,
const labelList& sourceNewProc,
const labelList& sourcePointMaster,
Ostream& toDomain
);
//- Send subset of fields
template<class GeoField>
static void sendFields
(
const label domain,
const wordList& fieldNames,
const fvMeshSubset&,
Ostream& toNbr
);
//- Receive mesh. Opposite of sendMesh
static autoPtr<fvMesh> receiveMesh
(
const label domain,
const wordList& pointZoneNames,
const wordList& facesZoneNames,
const wordList& cellZoneNames,
const Time& runTime,
labelList& domainSourceFace,
labelList& domainSourceProc,
labelList& domainSourcePatch,
labelList& domainSourceNewProc,
labelList& domainSourcePointMaster,
Istream& fromNbr
);
//- Receive fields. Opposite of sendFields
template<class GeoField>
static void receiveFields
(
const label domain,
const wordList& fieldNames,
typename GeoField::Mesh&,
PtrList<GeoField>&,
const dictionary& fieldDicts
);
public:
ClassName("fvMeshDistribute");
// Constructors
//- Construct from mesh and absolute merge tolerance
fvMeshDistribute(fvMesh& mesh, const scalar mergeTol);
//- Disallow default bitwise copy construction
fvMeshDistribute(const fvMeshDistribute&) = delete;
// Member Functions
//- Helper function: count cells per processor in wanted distribution
static labelList countCells(const labelList&);
//- Send cells to neighbours according to distribution
// (for every cell the new proc)
autoPtr<mapDistributePolyMesh> distribute(const labelList& dist);
// Debugging
//- Print some info on coupling data
static void printCoupleInfo
(
const primitiveMesh&,
const labelList&,
const labelList&,
const labelList&,
const labelList&
);
//- Print some field info
template<class GeoField>
static void printFieldInfo(const fvMesh&);
//- Print some info on mesh.
static void printMeshInfo(const fvMesh&);
// Member Operators
//- Disallow default bitwise assignment
void operator=(const fvMeshDistribute&) = delete;
};
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
} // End namespace Foam
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
#ifdef NoRepository
#include "fvMeshDistributeTemplates.C"
#endif
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
#endif
// ************************************************************************* //<|fim▁end|> | const wordList& cellZoneNames,
const labelList& sourceFace, |
<|file_name|>C.controller.js<|end_file_name|><|fim▁begin|>sap.ui.define([
'jquery.sap.global',<|fim▁hole|> 'sap/ui/model/json/JSONModel'
], function(jQuery, Fragment, Controller, Filter, JSONModel) {
"use strict";
var CController = Controller.extend("sap.m.sample.InputAssistedTwoValues.C", {
inputId: '',
onInit: function () {
// set explored app's demo model on this sample
var oModel = new JSONModel(jQuery.sap.getModulePath("sap.ui.demo.mock", "/products.json"));
this.getView().setModel(oModel);
},
handleValueHelp : function (oController) {
this.inputId = oController.oSource.sId;
// create value help dialog
if (!this._valueHelpDialog) {
this._valueHelpDialog = sap.ui.xmlfragment(
"sap.m.sample.InputAssistedTwoValues.Dialog",
this
);
this.getView().addDependent(this._valueHelpDialog);
}
// open value help dialog
this._valueHelpDialog.open();
},
_handleValueHelpSearch : function (evt) {
var sValue = evt.getParameter("value");
var oFilter = new Filter(
"Name",
sap.ui.model.FilterOperator.Contains, sValue
);
evt.getSource().getBinding("items").filter([oFilter]);
},
_handleValueHelpClose : function (evt) {
var oSelectedItem = evt.getParameter("selectedItem");
if (oSelectedItem) {
var productInput = this.byId(this.inputId);
productInput.setValue(oSelectedItem.getTitle());
}
evt.getSource().getBinding("items").filter([]);
}
});
return CController;
});<|fim▁end|> | 'sap/ui/core/Fragment',
'sap/ui/core/mvc/Controller',
'sap/ui/model/Filter', |
<|file_name|>no_shorthand_arrow_function.js<|end_file_name|><|fim▁begin|><|fim▁hole|><|fim▁end|> | (this => {}); |
<|file_name|>rs_enum.py<|end_file_name|><|fim▁begin|>#! /usr/bin/env python3
# -*- coding: utf-8 -*-
import gettext
from enum import Enum, unique
_ = gettext.gettext
strategy_descriptions = [_("New resourcelist strategy"),
_("New changelist strategy"),
_("Incremental changelist strategy")]
@unique
class Strategy(Enum):
"""
:samp:`Strategy for ResourceSync Publishing`
"""
resourcelist = 0
"""
``0`` :samp:`New resourcelist {strategy}`
Create new resourcelist(s) every run.
"""
new_changelist = 1
"""
``1`` :samp:`New changelist {strategy}`
Create a new changelist every run.
If no resourcelist was found in the metadata directory switch to new resourcelist strategy.
"""
inc_changelist = 2
"""
``2`` :samp:`Incremental changelist {strategy}`
Add changes to an existing changelist. If no changelist exists, create a new one.
If no resourcelist was found in the metadata directory switch to new resourcelist strategy.
"""
# resourcedump = 3 # not implemented
# changedump = 4 # not implemented
@staticmethod
def names():
"""
:samp:`Get Strategy names`
:return: List<str> of names
"""
names = dir(Strategy)
return [x for x in names if not x.startswith("_")]
@staticmethod
def sanitize(name):
"""
:samp:`Verify a {Strategy} name`
:param str name: string to test
:return: name if it is the name of a strategy
:raises: :exc:`ValueError` if the given name is not the name of a strategy
"""
try:
strategy = Strategy[name]
return strategy.name
except KeyError as err:
raise ValueError(err)
@staticmethod
def strategy_for(value):
"""
:samp:`Get a Strategy for the given value`
:param value: may be :class:`Strategy`, str or int
:return: :class:`Strategy`
:raises: :exc:`ValueError` if the given value could not be converted to a :class:`Strategy`
"""
try:
if isinstance(value, Strategy):
return value
elif isinstance(value, int):
return Strategy(value)
else:
return Strategy[value]
except KeyError as err:
raise ValueError(err)
def describe(self):
return strategy_descriptions[self.value]
class Capability(Enum):
"""
:samp:`Capabilities as defined in the ResourceSync Framework`
"""
resourcelist = 0
"""
``0`` :samp:`resourcelist`
"""
changelist = 1
"""
``1`` :samp:`changelist`
"""
resourcedump = 2
"""
``2`` :samp:`resourcedump`
"""
changedump = 3
"""
``3`` :samp:`changedump`
"""
resourcedump_manifest = 4
"""
``4`` :samp:`resourcedump_manifest`
"""
changedump_manifest = 5
"""
``5`` :samp:`changedump_manifest`
"""
capabilitylist = 6
"""
``6`` :samp:`capabilitylist`
"""
description = 7
"""
``7`` :samp:`description`
"""
class SelectMode(Enum):
"""
:samp:`Mode of selection`
"""
simple = 0
selector = 1
@staticmethod
def names():
"""
:samp:`Get SelectMode names`
:return: List<str> of names<|fim▁hole|>
@staticmethod
def select_mode_for(mode):
try:
if isinstance(mode, SelectMode):
return mode
elif isinstance(mode, int):
return SelectMode(mode)
else:
return SelectMode[mode]
except KeyError as err:
raise ValueError(err)<|fim▁end|> | """
names = dir(SelectMode)
return [x for x in names if not x.startswith("_")] |
<|file_name|>cbuster.cpp<|end_file_name|><|fim▁begin|>#include "../vidhrdw/cbuster.cpp"
/***************************************************************************
Crude Buster (World version FX) (c) 1990 Data East Corporation
Crude Buster (World version FU) (c) 1990 Data East Corporation
Crude Buster (Japanese version) (c) 1990 Data East Corporation
Two Crude (USA version) (c) 1990 Data East USA
The 'FX' board is filled with 'FU' roms except for the 4 program roms,
both boards have 'export' stickers which usually indicates a World version.
Maybe one is a UK or European version.
Emulation by Bryan McPhail, [email protected]
***************************************************************************/
#include "driver.h"
#include "vidhrdw/generic.h"
#include "cpu/h6280/h6280.h"
int twocrude_vh_start(void);
void twocrude_vh_stop(void);
void twocrude_vh_screenrefresh(struct osd_bitmap *bitmap,int full_refresh);
WRITE_HANDLER( twocrude_pf1_data_w );
WRITE_HANDLER( twocrude_pf2_data_w );
WRITE_HANDLER( twocrude_pf3_data_w );
WRITE_HANDLER( twocrude_pf4_data_w );
WRITE_HANDLER( twocrude_control_0_w );
WRITE_HANDLER( twocrude_control_1_w );
WRITE_HANDLER( twocrude_palette_24bit_rg_w );
WRITE_HANDLER( twocrude_palette_24bit_b_w );
READ_HANDLER( twocrude_palette_24bit_rg_r );
READ_HANDLER( twocrude_palette_24bit_b_r );
WRITE_HANDLER( twocrude_pf1_rowscroll_w );
WRITE_HANDLER( twocrude_pf2_rowscroll_w );
WRITE_HANDLER( twocrude_pf3_rowscroll_w );
WRITE_HANDLER( twocrude_pf4_rowscroll_w );
extern unsigned char *twocrude_pf1_rowscroll,*twocrude_pf2_rowscroll;
extern unsigned char *twocrude_pf3_rowscroll,*twocrude_pf4_rowscroll;
extern unsigned char *twocrude_pf1_data, *twocrude_pf2_data, *twocrude_pf3_data, *twocrude_pf4_data;
static unsigned char *twocrude_ram;
extern void twocrude_pri_w(int pri);
WRITE_HANDLER( twocrude_update_sprites_w );
static int prot;
/******************************************************************************/
static WRITE_HANDLER( twocrude_control_w )
{
switch (offset) {
case 0: /* DMA flag */
twocrude_update_sprites_w(0,0);
return;
case 6: /* IRQ ack */
return;
case 2: /* Sound CPU write */
soundlatch_w(0,data & 0xff);
cpu_cause_interrupt(1,H6280_INT_IRQ1);
return;
case 4: /* Protection, maybe this is a PAL on the board?
80046 is level number
stop at stage and enter.
see also 8216..
9a 00 = pf4 over pf3 (normal) (level 0)
9a f1 = (level 1 - water), pf3 over ALL sprites + pf4
9a 80 = pf3 over pf4 (Level 2 - copter)
9a 40 = pf3 over ALL sprites + pf4 (snow) level 3
9a c0 = doesn't matter?
9a ff = pf 3 over pf4
I can't find a priority register, I assume it's tied to the
protection?!
*/
if ((data&0xffff)==0x9a00) prot=0;
if ((data&0xffff)==0xaa) prot=0x74;
if ((data&0xffff)==0x0200) prot=0x63<<8;
if ((data&0xffff)==0x9a) prot=0xe;
if ((data&0xffff)==0x55) prot=0x1e;
if ((data&0xffff)==0x0e) {prot=0x0e;twocrude_pri_w(0);} /* start */
if ((data&0xffff)==0x00) {prot=0x0e;twocrude_pri_w(0);} /* level 0 */
if ((data&0xffff)==0xf1) {prot=0x36;twocrude_pri_w(1);} /* level 1 */
if ((data&0xffff)==0x80) {prot=0x2e;twocrude_pri_w(1);} /* level 2 */
if ((data&0xffff)==0x40) {prot=0x1e;twocrude_pri_w(1);} /* level 3 */
if ((data&0xffff)==0xc0) {prot=0x3e;twocrude_pri_w(0);} /* level 4 */
if ((data&0xffff)==0xff) {prot=0x76;twocrude_pri_w(1);} /* level 5 */
break;
}
//logerror("Warning %04x- %02x written to control %02x\n",cpu_get_pc(),data,offset);
}
READ_HANDLER( twocrude_control_r )
{
switch (offset)
{
case 0: /* Player 1 & Player 2 joysticks & fire buttons */
return (readinputport(0) + (readinputport(1) << 8));
case 2: /* Dip Switches */
return (readinputport(3) + (readinputport(4) << 8));
case 4: /* Protection */
//logerror("%04x : protection control read at 30c000 %d\n",cpu_get_pc(),offset);
return prot;
case 6: /* Credits, VBL in byte 7 */
return readinputport(2);
}
return 0xffff;
}
static READ_HANDLER( twocrude_pf1_data_r ) { return READ_WORD(&twocrude_pf1_data[offset]);}
static READ_HANDLER( twocrude_pf2_data_r ) { return READ_WORD(&twocrude_pf2_data[offset]);}
static READ_HANDLER( twocrude_pf3_data_r ) { return READ_WORD(&twocrude_pf3_data[offset]);}
static READ_HANDLER( twocrude_pf4_data_r ) { return READ_WORD(&twocrude_pf4_data[offset]);}
static READ_HANDLER( twocrude_pf1_rowscroll_r ) { return READ_WORD(&twocrude_pf1_rowscroll[offset]);}
static READ_HANDLER( twocrude_pf2_rowscroll_r ) { return READ_WORD(&twocrude_pf2_rowscroll[offset]);}
static READ_HANDLER( twocrude_pf3_rowscroll_r ) { return READ_WORD(&twocrude_pf3_rowscroll[offset]);}
static READ_HANDLER( twocrude_pf4_rowscroll_r ) { return READ_WORD(&twocrude_pf4_rowscroll[offset]);}
/******************************************************************************/
static struct MemoryReadAddress twocrude_readmem[] =
{
{ 0x000000, 0x07ffff, MRA_ROM },
{ 0x080000, 0x083fff, MRA_BANK1 },
{ 0x0a0000, 0x0a1fff, twocrude_pf1_data_r },
{ 0x0a2000, 0x0a2fff, twocrude_pf4_data_r },
{ 0x0a4000, 0x0a47ff, twocrude_pf1_rowscroll_r },
{ 0x0a6000, 0x0a67ff, twocrude_pf4_rowscroll_r },
{ 0x0a8000, 0x0a8fff, twocrude_pf3_data_r },
{ 0x0aa000, 0x0aafff, twocrude_pf2_data_r },
{ 0x0ac000, 0x0ac7ff, twocrude_pf3_rowscroll_r },
{ 0x0ae000, 0x0ae7ff, twocrude_pf2_rowscroll_r },
{ 0x0b0000, 0x0b07ff, MRA_BANK2 },
{ 0x0b8000, 0x0b8fff, twocrude_palette_24bit_rg_r },
{ 0x0b9000, 0x0b9fff, twocrude_palette_24bit_b_r },
{ 0x0bc000, 0x0bc00f, twocrude_control_r },
{ -1 } /* end of table */
};
static struct MemoryWriteAddress twocrude_writemem[] =
{
{ 0x000000, 0x07ffff, MWA_ROM },
{ 0x080000, 0x083fff, MWA_BANK1, &twocrude_ram },
{ 0x0a0000, 0x0a1fff, twocrude_pf1_data_w, &twocrude_pf1_data },
{ 0x0a2000, 0x0a2fff, twocrude_pf4_data_w, &twocrude_pf4_data },
{ 0x0a4000, 0x0a47ff, twocrude_pf1_rowscroll_w, &twocrude_pf1_rowscroll },
{ 0x0a6000, 0x0a67ff, twocrude_pf4_rowscroll_w, &twocrude_pf4_rowscroll },
{ 0x0a8000, 0x0a8fff, twocrude_pf3_data_w, &twocrude_pf3_data },
{ 0x0aa000, 0x0aafff, twocrude_pf2_data_w, &twocrude_pf2_data },
{ 0x0ac000, 0x0ac7ff, twocrude_pf3_rowscroll_w, &twocrude_pf3_rowscroll },
{ 0x0ae000, 0x0ae7ff, twocrude_pf2_rowscroll_w, &twocrude_pf2_rowscroll },
{ 0x0b0000, 0x0b07ff, MWA_BANK2, &spriteram },
{ 0x0b4000, 0x0b4001, MWA_NOP },
{ 0x0b5000, 0x0b500f, twocrude_control_1_w },
{ 0x0b6000, 0x0b600f, twocrude_control_0_w },
{ 0x0b8000, 0x0b8fff, twocrude_palette_24bit_rg_w, &paletteram },
{ 0x0b9000, 0x0b9fff, twocrude_palette_24bit_b_w, &paletteram_2 },
{ 0x0bc000, 0x0bc00f, twocrude_control_w },
{ -1 } /* end of table */
};
/******************************************************************************/
static WRITE_HANDLER( YM2151_w )
{
switch (offset) {
case 0:
YM2151_register_port_0_w(0,data);
break;
case 1:
YM2151_data_port_0_w(0,data);
break;
}
}
static WRITE_HANDLER( YM2203_w )
{
switch (offset) {
case 0:
YM2203_control_port_0_w(0,data);
break;
case 1:
YM2203_write_port_0_w(0,data);
break;
}
}
static struct MemoryReadAddress sound_readmem[] =
{
{ 0x000000, 0x00ffff, MRA_ROM },
{ 0x100000, 0x100001, YM2203_status_port_0_r },
{ 0x110000, 0x110001, YM2151_status_port_0_r },
{ 0x120000, 0x120001, OKIM6295_status_0_r },
{ 0x130000, 0x130001, OKIM6295_status_1_r },
{ 0x140000, 0x140001, soundlatch_r },
{ 0x1f0000, 0x1f1fff, MRA_BANK8 },
{ -1 } /* end of table */
};
static struct MemoryWriteAddress sound_writemem[] =
{
{ 0x000000, 0x00ffff, MWA_ROM },
{ 0x100000, 0x100001, YM2203_w },
{ 0x110000, 0x110001, YM2151_w },
{ 0x120000, 0x120001, OKIM6295_data_0_w },
{ 0x130000, 0x130001, OKIM6295_data_1_w },
{ 0x1f0000, 0x1f1fff, MWA_BANK8 },
{ 0x1fec00, 0x1fec01, H6280_timer_w },
{ 0x1ff402, 0x1ff403, H6280_irq_status_w },
{ -1 } /* end of table */
};
/******************************************************************************/
INPUT_PORTS_START( twocrude )
PORT_START /* Player 1 controls */
PORT_BIT( 0x01, IP_ACTIVE_LOW, IPT_JOYSTICK_UP | IPF_8WAY )
PORT_BIT( 0x02, IP_ACTIVE_LOW, IPT_JOYSTICK_DOWN | IPF_8WAY )
PORT_BIT( 0x04, IP_ACTIVE_LOW, IPT_JOYSTICK_LEFT | IPF_8WAY )
PORT_BIT( 0x08, IP_ACTIVE_LOW, IPT_JOYSTICK_RIGHT | IPF_8WAY )
PORT_BIT( 0x10, IP_ACTIVE_LOW, IPT_BUTTON1 )
PORT_BIT( 0x20, IP_ACTIVE_LOW, IPT_BUTTON2 )
PORT_BIT( 0x40, IP_ACTIVE_LOW, IPT_BUTTON3 )
PORT_BIT( 0x80, IP_ACTIVE_LOW, IPT_START1 )
PORT_START /* Player 2 controls */
PORT_BIT( 0x01, IP_ACTIVE_LOW, IPT_JOYSTICK_UP | IPF_8WAY | IPF_PLAYER2 )
PORT_BIT( 0x02, IP_ACTIVE_LOW, IPT_JOYSTICK_DOWN | IPF_8WAY | IPF_PLAYER2 )
PORT_BIT( 0x04, IP_ACTIVE_LOW, IPT_JOYSTICK_LEFT | IPF_8WAY | IPF_PLAYER2 )
PORT_BIT( 0x08, IP_ACTIVE_LOW, IPT_JOYSTICK_RIGHT | IPF_8WAY | IPF_PLAYER2 )
PORT_BIT( 0x10, IP_ACTIVE_LOW, IPT_BUTTON1 | IPF_PLAYER2 )
PORT_BIT( 0x20, IP_ACTIVE_LOW, IPT_BUTTON2 | IPF_PLAYER2 )
PORT_BIT( 0x40, IP_ACTIVE_LOW, IPT_BUTTON3 | IPF_PLAYER2 )
PORT_BIT( 0x80, IP_ACTIVE_LOW, IPT_START2 )
PORT_START /* Credits */
PORT_BIT( 0x01, IP_ACTIVE_LOW, IPT_COIN1 )
PORT_BIT( 0x02, IP_ACTIVE_LOW, IPT_COIN2 )
PORT_BIT( 0x04, IP_ACTIVE_LOW, IPT_COIN3 )
PORT_BIT( 0x08, IP_ACTIVE_HIGH, IPT_VBLANK )
PORT_BIT( 0x10, IP_ACTIVE_LOW, IPT_UNKNOWN )
PORT_BIT( 0x20, IP_ACTIVE_LOW, IPT_UNKNOWN )
PORT_BIT( 0x40, IP_ACTIVE_LOW, IPT_UNKNOWN )
PORT_BIT( 0x80, IP_ACTIVE_LOW, IPT_UNKNOWN )
PORT_START /* Dip switch bank 1 */
PORT_DIPNAME( 0x07, 0x07, DEF_STR( Coin_A ) )
PORT_DIPSETTING( 0x00, DEF_STR( 3C_1C ) )
PORT_DIPSETTING( 0x01, DEF_STR( 2C_1C ) )
PORT_DIPSETTING( 0x07, DEF_STR( 1C_1C ) )
PORT_DIPSETTING( 0x06, DEF_STR( 1C_2C ) )
PORT_DIPSETTING( 0x05, DEF_STR( 1C_3C ) )
PORT_DIPSETTING( 0x04, DEF_STR( 1C_4C ) )
PORT_DIPSETTING( 0x03, DEF_STR( 1C_5C ) )
PORT_DIPSETTING( 0x02, DEF_STR( 1C_6C ) )
PORT_DIPNAME( 0x38, 0x38, DEF_STR( Coin_B ) )
PORT_DIPSETTING( 0x00, DEF_STR( 3C_1C ) )
PORT_DIPSETTING( 0x08, DEF_STR( 2C_1C ) )
PORT_DIPSETTING( 0x38, DEF_STR( 1C_1C ) )
PORT_DIPSETTING( 0x30, DEF_STR( 1C_2C ) )
PORT_DIPSETTING( 0x28, DEF_STR( 1C_3C ) )
PORT_DIPSETTING( 0x20, DEF_STR( 1C_4C ) )
PORT_DIPSETTING( 0x18, DEF_STR( 1C_5C ) )
PORT_DIPSETTING( 0x10, DEF_STR( 1C_6C ) )
PORT_DIPNAME( 0x40, 0x40, DEF_STR( Flip_Screen ) )
PORT_DIPSETTING( 0x40, DEF_STR( Off ) )
PORT_DIPSETTING( 0x00, DEF_STR( On ) )
PORT_DIPNAME( 0x80, 0x80, DEF_STR( Unused ) )
PORT_DIPSETTING( 0x80, DEF_STR( Off ) )
PORT_DIPSETTING( 0x00, DEF_STR( On ) )
PORT_START /* Dip switch bank 2 */
PORT_DIPNAME( 0x03, 0x03, DEF_STR( Lives ) )
PORT_DIPSETTING( 0x00, "1" )
PORT_DIPSETTING( 0x01, "2" )
PORT_DIPSETTING( 0x03, "3" )
PORT_DIPSETTING( 0x02, "4" )
PORT_DIPNAME( 0x0c, 0x0c, DEF_STR( Difficulty ) )
PORT_DIPSETTING( 0x0c, "Normal" )
PORT_DIPSETTING( 0x08, "Easy" )
PORT_DIPSETTING( 0x04, "Hard" )
PORT_DIPSETTING( 0x00, "Hardest" )
PORT_DIPNAME( 0x10, 0x10, DEF_STR( Unused ) )
PORT_DIPSETTING( 0x10, DEF_STR( Off ) )
PORT_DIPSETTING( 0x00, DEF_STR( On ) )
PORT_DIPNAME( 0x20, 0x20, DEF_STR( Unused ) )
PORT_DIPSETTING( 0x20, DEF_STR( Off ) )
PORT_DIPSETTING( 0x00, DEF_STR( On ) )
PORT_DIPNAME( 0x40, 0x40, "Allow Continue" )
PORT_DIPSETTING( 0x00, DEF_STR( No ) )
PORT_DIPSETTING( 0x40, DEF_STR( Yes ) )
PORT_DIPNAME( 0x80, 0x00, DEF_STR( Demo_Sounds ) )
PORT_DIPSETTING( 0x80, DEF_STR( Off ) )
PORT_DIPSETTING( 0x00, DEF_STR( On ) )
INPUT_PORTS_END
/******************************************************************************/
static struct GfxLayout charlayout =
{
8,8,
4096,
4,
{ 0x10000*8+8, 8, 0x10000*8, 0 },
{ 0, 1, 2, 3, 4, 5, 6, 7 },
{ 0*16, 1*16, 2*16, 3*16, 4*16, 5*16, 6*16, 7*16,
},
16*8
};
static struct GfxLayout tilelayout =
{
16,16,
4096,
4,
{ 24, 16, 8, 0 },
{ 64*8+0, 64*8+1, 64*8+2, 64*8+3, 64*8+4, 64*8+5, 64*8+6, 64*8+7,
0, 1, 2, 3, 4, 5, 6, 7 },
{ 0*32, 1*32, 2*32, 3*32, 4*32, 5*32, 6*32, 7*32,
8*32, 9*32, 10*32, 11*32, 12*32, 13*32, 14*32, 15*32 },
128*8
};
static struct GfxLayout spritelayout =
{
16,16,
(4096*2)+2048, /* Main bank + 4 extra roms */
4,
{ 0xa0000*8+8, 0xa0000*8, 8, 0 },
{ 32*8+0, 32*8+1, 32*8+2, 32*8+3, 32*8+4, 32*8+5, 32*8+6, 32*8+7,
0, 1, 2, 3, 4, 5, 6, 7 },
{ 0*16, 1*16, 2*16, 3*16, 4*16, 5*16, 6*16, 7*16,
8*16, 9*16, 10*16, 11*16, 12*16, 13*16, 14*16, 15*16 },
64*8
};
static struct GfxDecodeInfo gfxdecodeinfo[] =
{
{ REGION_GFX1, 0, &charlayout, 0, 16 }, /* Characters 8x8 */
{ REGION_GFX2, 0, &tilelayout, 1024, 16 }, /* Tiles 16x16 */
{ REGION_GFX2, 0, &tilelayout, 768, 16 }, /* Tiles 16x16 */
{ REGION_GFX3, 0, &tilelayout, 512, 16 }, /* Tiles 16x16 */
{ REGION_GFX4, 0, &spritelayout, 256, 80 }, /* Sprites 16x16 */
{ -1 } /* end of array */
};
/******************************************************************************/
static struct OKIM6295interface okim6295_interface =
{
2, /* 2 chips */
{ 7757, 15514 },/* Frequency */
{ REGION_SOUND1, REGION_SOUND2 }, /* memory regions 3 & 4 */
{ 50, 25 } /* Note! Keep chip 1 (voices) louder than chip 2 */
};
static struct YM2203interface ym2203_interface =
{
1,
32220000/8, /* Accurate, audio section crystal is 32.220 MHz */
{ YM2203_VOL(40,40) },
{ 0 },
{ 0 },
{ 0 },
{ 0 }
};
static void sound_irq(int state)
{
cpu_set_irq_line(1,1,state); /* IRQ 2 */
}
static struct YM2151interface ym2151_interface =
{
1,
32220000/9, /* Accurate, audio section crystal is 32.220 MHz */
{ YM3012_VOL(45,MIXER_PAN_LEFT,45,MIXER_PAN_RIGHT) },
{ sound_irq }
};
static struct MachineDriver machine_driver_twocrude =
{
/* basic machine hardware */
{
{
CPU_M68000,
12000000, /* Accurate */
twocrude_readmem,twocrude_writemem,0,0,
m68_level4_irq,1 /* VBL */
},
{
CPU_H6280 | CPU_AUDIO_CPU,
32220000/8, /* Accurate */
sound_readmem,sound_writemem,0,0,
ignore_interrupt,0
}
},
58, 529, /* frames per second, vblank duration */
1, /* 1 CPU slice per frame - interleaving is forced when a sound command is written */
0,
/* video hardware */
32*8, 32*8, { 0*8, 32*8-1, 1*8, 31*8-1 },
gfxdecodeinfo,
2048, 2048,
0,
VIDEO_TYPE_RASTER | VIDEO_MODIFIES_PALETTE | VIDEO_UPDATE_BEFORE_VBLANK,
0,
twocrude_vh_start,
twocrude_vh_stop,
twocrude_vh_screenrefresh,
/* sound hardware */<|fim▁hole|> SOUND_YM2203,
&ym2203_interface
},
{
SOUND_YM2151,
&ym2151_interface
},
{
SOUND_OKIM6295,
&okim6295_interface
}
}
};
/******************************************************************************/
ROM_START( cbuster )
ROM_REGION( 0x80000, REGION_CPU1 ) /* 68000 code */
ROM_LOAD_EVEN( "fx01.rom", 0x00000, 0x20000, 0xddae6d83 )
ROM_LOAD_ODD ( "fx00.rom", 0x00000, 0x20000, 0x5bc2c0de )
ROM_LOAD_EVEN( "fx03.rom", 0x40000, 0x20000, 0xc3d65bf9 )
ROM_LOAD_ODD ( "fx02.rom", 0x40000, 0x20000, 0xb875266b )
ROM_REGION( 0x10000, REGION_CPU2 ) /* Sound CPU */
ROM_LOAD( "fu11-.rom", 0x00000, 0x10000, 0x65f20f10 )
ROM_REGION( 0x20000, REGION_GFX1 | REGIONFLAG_DISPOSE )
ROM_LOAD( "fu05-.rom", 0x00000, 0x10000, 0x8134d412 ) /* Chars */
ROM_LOAD( "fu06-.rom", 0x10000, 0x10000, 0x2f914a45 )
ROM_REGION( 0x80000, REGION_GFX2 | REGIONFLAG_DISPOSE )
ROM_LOAD( "mab-01", 0x00000, 0x80000, 0x1080d619 ) /* Tiles */
ROM_REGION( 0x80000, REGION_GFX3 | REGIONFLAG_DISPOSE )
ROM_LOAD( "mab-00", 0x00000, 0x80000, 0x660eaabd ) /* Tiles */
ROM_REGION( 0x180000,REGION_GFX4 | REGIONFLAG_DISPOSE )
ROM_LOAD( "mab-02", 0x000000, 0x80000, 0x58b7231d ) /* Sprites */
/* Space for extra sprites to be copied to (0x20000) */
ROM_LOAD( "mab-03", 0x0a0000, 0x80000, 0x76053b9d )
/* Space for extra sprites to be copied to (0x20000) */
ROM_LOAD( "fu07-.rom", 0x140000, 0x10000, 0xca8d0bb3 ) /* Extra sprites */
ROM_LOAD( "fu08-.rom", 0x150000, 0x10000, 0xc6afc5c8 )
ROM_LOAD( "fu09-.rom", 0x160000, 0x10000, 0x526809ca )
ROM_LOAD( "fu10-.rom", 0x170000, 0x10000, 0x6be6d50e )
ROM_REGION( 0x20000, REGION_SOUND1 ) /* ADPCM samples */
ROM_LOAD( "fu12-.rom", 0x00000, 0x20000, 0x2d1d65f2 )
ROM_REGION( 0x20000, REGION_SOUND2 ) /* ADPCM samples */
ROM_LOAD( "fu13-.rom", 0x00000, 0x20000, 0xb8525622 )
ROM_END
ROM_START( cbusterw )
ROM_REGION( 0x80000, REGION_CPU1 ) /* 68000 code */
ROM_LOAD_EVEN( "fu01-.rom", 0x00000, 0x20000, 0x0203e0f8 )
ROM_LOAD_ODD ( "fu00-.rom", 0x00000, 0x20000, 0x9c58626d )
ROM_LOAD_EVEN( "fu03-.rom", 0x40000, 0x20000, 0xdef46956 )
ROM_LOAD_ODD ( "fu02-.rom", 0x40000, 0x20000, 0x649c3338 )
ROM_REGION( 0x10000, REGION_CPU2 ) /* Sound CPU */
ROM_LOAD( "fu11-.rom", 0x00000, 0x10000, 0x65f20f10 )
ROM_REGION( 0x20000, REGION_GFX1 | REGIONFLAG_DISPOSE )
ROM_LOAD( "fu05-.rom", 0x00000, 0x10000, 0x8134d412 ) /* Chars */
ROM_LOAD( "fu06-.rom", 0x10000, 0x10000, 0x2f914a45 )
ROM_REGION( 0x80000, REGION_GFX2 | REGIONFLAG_DISPOSE )
ROM_LOAD( "mab-01", 0x00000, 0x80000, 0x1080d619 ) /* Tiles */
ROM_REGION( 0x80000, REGION_GFX3 | REGIONFLAG_DISPOSE )
ROM_LOAD( "mab-00", 0x00000, 0x80000, 0x660eaabd ) /* Tiles */
ROM_REGION( 0x180000,REGION_GFX4 | REGIONFLAG_DISPOSE )
ROM_LOAD( "mab-02", 0x000000, 0x80000, 0x58b7231d ) /* Sprites */
/* Space for extra sprites to be copied to (0x20000) */
ROM_LOAD( "mab-03", 0x0a0000, 0x80000, 0x76053b9d )
/* Space for extra sprites to be copied to (0x20000) */
ROM_LOAD( "fu07-.rom", 0x140000, 0x10000, 0xca8d0bb3 ) /* Extra sprites */
ROM_LOAD( "fu08-.rom", 0x150000, 0x10000, 0xc6afc5c8 )
ROM_LOAD( "fu09-.rom", 0x160000, 0x10000, 0x526809ca )
ROM_LOAD( "fu10-.rom", 0x170000, 0x10000, 0x6be6d50e )
ROM_REGION( 0x20000, REGION_SOUND1 ) /* ADPCM samples */
ROM_LOAD( "fu12-.rom", 0x00000, 0x20000, 0x2d1d65f2 )
ROM_REGION( 0x20000, REGION_SOUND2 ) /* ADPCM samples */
ROM_LOAD( "fu13-.rom", 0x00000, 0x20000, 0xb8525622 )
ROM_END
ROM_START( cbusterj )
ROM_REGION( 0x80000, REGION_CPU1 ) /* 68000 code */
ROM_LOAD_EVEN( "fr01-1", 0x00000, 0x20000, 0xaf3c014f )
ROM_LOAD_ODD ( "fr00-1", 0x00000, 0x20000, 0xf666ad52 )
ROM_LOAD_EVEN( "fr03", 0x40000, 0x20000, 0x02c06118 )
ROM_LOAD_ODD ( "fr02", 0x40000, 0x20000, 0xb6c34332 )
ROM_REGION( 0x10000, REGION_CPU2 ) /* Sound CPU */
ROM_LOAD( "fu11-.rom", 0x00000, 0x10000, 0x65f20f10 )
ROM_REGION( 0x20000, REGION_GFX1 | REGIONFLAG_DISPOSE )
ROM_LOAD( "fu05-.rom", 0x00000, 0x10000, 0x8134d412 ) /* Chars */
ROM_LOAD( "fu06-.rom", 0x10000, 0x10000, 0x2f914a45 )
ROM_REGION( 0x80000, REGION_GFX2 | REGIONFLAG_DISPOSE )
ROM_LOAD( "mab-01", 0x00000, 0x80000, 0x1080d619 ) /* Tiles */
ROM_REGION( 0x80000, REGION_GFX3 | REGIONFLAG_DISPOSE )
ROM_LOAD( "mab-00", 0x00000, 0x80000, 0x660eaabd ) /* Tiles */
ROM_REGION( 0x180000,REGION_GFX4 | REGIONFLAG_DISPOSE )
ROM_LOAD( "mab-02", 0x000000, 0x80000, 0x58b7231d ) /* Sprites */
/* Space for extra sprites to be copied to (0x20000) */
ROM_LOAD( "mab-03", 0x0a0000, 0x80000, 0x76053b9d )
/* Space for extra sprites to be copied to (0x20000) */
ROM_LOAD( "fr07", 0x140000, 0x10000, 0x52c85318 ) /* Extra sprites */
ROM_LOAD( "fr08", 0x150000, 0x10000, 0xea25fbac )
ROM_LOAD( "fr09", 0x160000, 0x10000, 0xf8363424 )
ROM_LOAD( "fr10", 0x170000, 0x10000, 0x241d5760 )
ROM_REGION( 0x20000, REGION_SOUND1 ) /* ADPCM samples */
ROM_LOAD( "fu12-.rom", 0x00000, 0x20000, 0x2d1d65f2 )
ROM_REGION( 0x20000, REGION_SOUND2 ) /* ADPCM samples */
ROM_LOAD( "fu13-.rom", 0x00000, 0x20000, 0xb8525622 )
ROM_END
ROM_START( twocrude )
ROM_REGION( 0x80000, REGION_CPU1 ) /* 68000 code */
ROM_LOAD_EVEN( "ft01", 0x00000, 0x20000, 0x08e96489 )
ROM_LOAD_ODD ( "ft00", 0x00000, 0x20000, 0x6765c445 )
ROM_LOAD_EVEN( "ft03", 0x40000, 0x20000, 0x28002c99 )
ROM_LOAD_ODD ( "ft02", 0x40000, 0x20000, 0x37ea0626 )
ROM_REGION( 0x10000, REGION_CPU2 ) /* Sound CPU */
ROM_LOAD( "fu11-.rom", 0x00000, 0x10000, 0x65f20f10 )
ROM_REGION( 0x20000, REGION_GFX1 | REGIONFLAG_DISPOSE )
ROM_LOAD( "fu05-.rom", 0x00000, 0x10000, 0x8134d412 ) /* Chars */
ROM_LOAD( "fu06-.rom", 0x10000, 0x10000, 0x2f914a45 )
ROM_REGION( 0x80000, REGION_GFX2 | REGIONFLAG_DISPOSE )
ROM_LOAD( "mab-01", 0x00000, 0x80000, 0x1080d619 ) /* Tiles */
ROM_REGION( 0x80000, REGION_GFX3 | REGIONFLAG_DISPOSE )
ROM_LOAD( "mab-00", 0x00000, 0x80000, 0x660eaabd ) /* Tiles */
ROM_REGION( 0x180000,REGION_GFX4 | REGIONFLAG_DISPOSE )
ROM_LOAD( "mab-02", 0x000000, 0x80000, 0x58b7231d ) /* Sprites */
/* Space for extra sprites to be copied to (0x20000) */
ROM_LOAD( "mab-03", 0x0a0000, 0x80000, 0x76053b9d )
/* Space for extra sprites to be copied to (0x20000) */
ROM_LOAD( "ft07", 0x140000, 0x10000, 0xe3465c25 )
ROM_LOAD( "ft08", 0x150000, 0x10000, 0xc7f1d565 )
ROM_LOAD( "ft09", 0x160000, 0x10000, 0x6e3657b9 )
ROM_LOAD( "ft10", 0x170000, 0x10000, 0xcdb83560 )
ROM_REGION( 0x20000, REGION_SOUND1 ) /* ADPCM samples */
ROM_LOAD( "fu12-.rom", 0x00000, 0x20000, 0x2d1d65f2 )
ROM_REGION( 0x20000, REGION_SOUND2 ) /* ADPCM samples */
ROM_LOAD( "fu13-.rom", 0x00000, 0x20000, 0xb8525622 )
ROM_END
/******************************************************************************/
static void init_twocrude(void)
{
unsigned char *RAM = memory_region(REGION_CPU1);
unsigned char *PTR;
int i,j;
/* Main cpu decrypt */
for (i=0x00000; i<0x80000; i+=2) {
#ifdef LSB_FIRST
RAM[i+1]=(RAM[i+1] & 0xcf) | ((RAM[i+1] & 0x10) << 1) | ((RAM[i+1] & 0x20) >> 1);
RAM[i+1]=(RAM[i+1] & 0x5f) | ((RAM[i+1] & 0x20) << 2) | ((RAM[i+1] & 0x80) >> 2);
RAM[i]=(RAM[i] & 0xbd) | ((RAM[i] & 0x2) << 5) | ((RAM[i] & 0x40) >> 5);
RAM[i]=(RAM[i] & 0xf5) | ((RAM[i] & 0x2) << 2) | ((RAM[i] & 0x8) >> 2);
#else
RAM[i]=(RAM[i] & 0xcf) | ((RAM[i] & 0x10) << 1) | ((RAM[i] & 0x20) >> 1);
RAM[i]=(RAM[i] & 0x5f) | ((RAM[i] & 0x20) << 2) | ((RAM[i] & 0x80) >> 2);
RAM[i+1]=(RAM[i+1] & 0xbd) | ((RAM[i+1] & 0x2) << 5) | ((RAM[i+1] & 0x40) >> 5);
RAM[i+1]=(RAM[i+1] & 0xf5) | ((RAM[i+1] & 0x2) << 2) | ((RAM[i+1] & 0x8) >> 2);
#endif
}
/* Rearrange the 'extra' sprite bank to be in the same format as main sprites */
RAM = memory_region(REGION_GFX4) + 0x080000;
PTR = memory_region(REGION_GFX4) + 0x140000;
for (i=0; i<0x20000; i+=64) {
for (j=0; j<16; j+=1) { /* Copy 16 lines down */
RAM[i+ 0+j*2]=PTR[i/2+ 0+j]; /* Pixels 0-7 for each plane */
RAM[i+ 1+j*2]=PTR[i/2+0x10000+j];
RAM[i+0xa0000+j*2]=PTR[i/2+0x20000+j];
RAM[i+0xa0001+j*2]=PTR[i/2+0x30000+j];
}
for (j=0; j<16; j+=1) { /* Copy 16 lines down */
RAM[i+ 0x20+j*2]=PTR[i/2+ 0x10+j]; /* Pixels 8-15 for each plane */
RAM[i+ 0x21+j*2]=PTR[i/2+0x10010+j];
RAM[i+0xa0020+j*2]=PTR[i/2+0x20010+j];
RAM[i+0xa0021+j*2]=PTR[i/2+0x30010+j];
}
}
}
/******************************************************************************/
GAME( 1990, cbuster, 0, twocrude, twocrude, twocrude, ROT0, "Data East Corporation", "Crude Buster (World FX version)" )
GAME( 1990, cbusterw, cbuster, twocrude, twocrude, twocrude, ROT0, "Data East Corporation", "Crude Buster (World FU version)" )
GAME( 1990, cbusterj, cbuster, twocrude, twocrude, twocrude, ROT0, "Data East Corporation", "Crude Buster (Japan)" )
GAME( 1990, twocrude, cbuster, twocrude, twocrude, twocrude, ROT0, "Data East USA", "Two Crude (US)" )<|fim▁end|> | 0,0,0,0,
{
{ |
<|file_name|>sentiment-satisfied-outlined.js<|end_file_name|><|fim▁begin|>import { h } from 'omi';
import createSvgIcon from './utils/createSvgIcon';<|fim▁hole|> cx: "15.5",
cy: "9.5",
r: "1.5"
}), h("circle", {
cx: "8.5",
cy: "9.5",
r: "1.5"
}), h("path", {
d: "M12 16c-1.48 0-2.75-.81-3.45-2H6.88c.8 2.05 2.79 3.5 5.12 3.5s4.32-1.45 5.12-3.5h-1.67c-.7 1.19-1.97 2-3.45 2zm-.01-14C6.47 2 2 6.48 2 12s4.47 10 9.99 10C17.52 22 22 17.52 22 12S17.52 2 11.99 2zM12 20c-4.42 0-8-3.58-8-8s3.58-8 8-8 8 3.58 8 8-3.58 8-8 8z"
})), 'SentimentSatisfiedOutlined');<|fim▁end|> | export default createSvgIcon(h(h.f, null, h("circle", { |
<|file_name|>counters.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
//! Generic types for counters-related CSS values.
#[cfg(feature = "servo")]
use crate::computed_values::list_style_type::T as ListStyleType;
#[cfg(feature = "gecko")]
use crate::values::generics::CounterStyleOrNone;
#[cfg(feature = "gecko")]
use crate::values::specified::Attr;
use crate::values::CustomIdent;
use std::ops::Deref;
/// A name / value pair for counters.
#[derive(
Clone,
Debug,
MallocSizeOf,
PartialEq,
SpecifiedValueInfo,
ToComputedValue,
ToCss,
ToResolvedValue,<|fim▁hole|> pub name: CustomIdent,
/// The value of the counter / increment / etc.
pub value: Integer,
}
/// A generic value for the `counter-increment` property.
#[derive(
Clone,
Debug,
Default,
MallocSizeOf,
PartialEq,
SpecifiedValueInfo,
ToComputedValue,
ToCss,
ToResolvedValue,
ToShmem,
)]
pub struct CounterIncrement<I>(pub Counters<I>);
impl<I> CounterIncrement<I> {
/// Returns a new value for `counter-increment`.
#[inline]
pub fn new(counters: Vec<CounterPair<I>>) -> Self {
CounterIncrement(Counters(counters.into_boxed_slice()))
}
}
impl<I> Deref for CounterIncrement<I> {
type Target = [CounterPair<I>];
#[inline]
fn deref(&self) -> &Self::Target {
&(self.0).0
}
}
/// A generic value for the `counter-set` and `counter-reset` properties.
#[derive(
Clone,
Debug,
Default,
MallocSizeOf,
PartialEq,
SpecifiedValueInfo,
ToComputedValue,
ToCss,
ToResolvedValue,
ToShmem,
)]
pub struct CounterSetOrReset<I>(pub Counters<I>);
impl<I> CounterSetOrReset<I> {
/// Returns a new value for `counter-set` / `counter-reset`.
#[inline]
pub fn new(counters: Vec<CounterPair<I>>) -> Self {
CounterSetOrReset(Counters(counters.into_boxed_slice()))
}
}
impl<I> Deref for CounterSetOrReset<I> {
type Target = [CounterPair<I>];
#[inline]
fn deref(&self) -> &Self::Target {
&(self.0).0
}
}
/// A generic value for lists of counters.
///
/// Keyword `none` is represented by an empty vector.
#[derive(
Clone,
Debug,
Default,
MallocSizeOf,
PartialEq,
SpecifiedValueInfo,
ToComputedValue,
ToCss,
ToResolvedValue,
ToShmem,
)]
pub struct Counters<I>(#[css(iterable, if_empty = "none")] Box<[CounterPair<I>]>);
impl<I> Counters<I> {
/// Move out the Box into a vector. This could just return the Box<>, but
/// Vec<> is a bit more convenient because Box<[T]> doesn't implement
/// IntoIter: https://github.com/rust-lang/rust/issues/59878
#[inline]
pub fn into_vec(self) -> Vec<CounterPair<I>> {
self.0.into_vec()
}
}
#[cfg(feature = "servo")]
type CounterStyleType = ListStyleType;
#[cfg(feature = "gecko")]
type CounterStyleType = CounterStyleOrNone;
#[cfg(feature = "servo")]
#[inline]
fn is_decimal(counter_type: &CounterStyleType) -> bool {
*counter_type == ListStyleType::Decimal
}
#[cfg(feature = "gecko")]
#[inline]
fn is_decimal(counter_type: &CounterStyleType) -> bool {
*counter_type == CounterStyleOrNone::decimal()
}
/// The specified value for the `content` property.
///
/// https://drafts.csswg.org/css-content/#propdef-content
#[derive(
Clone,
Debug,
Eq,
MallocSizeOf,
PartialEq,
SpecifiedValueInfo,
ToComputedValue,
ToCss,
ToResolvedValue,
ToShmem,
)]
pub enum Content<ImageUrl> {
/// `normal` reserved keyword.
Normal,
/// `none` reserved keyword.
None,
/// `-moz-alt-content`.
#[cfg(feature = "gecko")]
MozAltContent,
/// Content items.
Items(#[css(iterable)] Box<[ContentItem<ImageUrl>]>),
}
impl<ImageUrl> Content<ImageUrl> {
/// Set `content` property to `normal`.
#[inline]
pub fn normal() -> Self {
Content::Normal
}
}
/// Items for the `content` property.
#[derive(
Clone,
Debug,
Eq,
MallocSizeOf,
PartialEq,
SpecifiedValueInfo,
ToComputedValue,
ToCss,
ToResolvedValue,
ToShmem,
)]
pub enum ContentItem<ImageUrl> {
/// Literal string content.
String(Box<str>),
/// `counter(name, style)`.
#[css(comma, function)]
Counter(CustomIdent, #[css(skip_if = "is_decimal")] CounterStyleType),
/// `counters(name, separator, style)`.
#[css(comma, function)]
Counters(
CustomIdent,
Box<str>,
#[css(skip_if = "is_decimal")] CounterStyleType,
),
/// `open-quote`.
OpenQuote,
/// `close-quote`.
CloseQuote,
/// `no-open-quote`.
NoOpenQuote,
/// `no-close-quote`.
NoCloseQuote,
/// `attr([namespace? `|`]? ident)`
#[cfg(feature = "gecko")]
Attr(Attr),
/// `url(url)`
Url(ImageUrl),
}<|fim▁end|> | ToShmem,
)]
pub struct CounterPair<Integer> {
/// The name of the counter. |
<|file_name|>auth.js<|end_file_name|><|fim▁begin|>/**
* Created by IvanIsrael on 06/04/2017.
*/
'use strict';
const express = require('express');
const router = express.Router();
const user_controller = require("../controllers/users");
const methods = user_controller.methods;
const controllers = user_controller.controllers;
const passport = require('passport');
/* GET users listing. */
router.post('/', passport.authenticate('local', {
successRedirect: '/',
failureRedirect: '../users',
failureFlash: true<|fim▁hole|> req.logOut();
console.log("Cerrando sesion...");
res.redirect('../users');
}
catch (e) {
console.log(e);
res.redirect('../users');
}
});
module.exports = router;<|fim▁end|> | }));
router.get('/logout', function(req, res, next){
try { |
<|file_name|>test_util.py<|end_file_name|><|fim▁begin|>from unittest import TestCase
class Test(TestCase):<|fim▁hole|><|fim▁end|> | pass |
<|file_name|>upload.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3
import sys
import os
import subprocess
import time
filename = sys.argv[1]
print("extracting " + filename)
p = subprocess.Popen(["unzip", filename, "-dintro"], stdout=subprocess.PIPE)
p.communicate()
p = subprocess.Popen(["php","-f","uploadtodb.php","intro/courses.csv","courses"],stdout=subprocess.PIPE, universal_newlines=True)
out, err = p.communicate()
p = subprocess.Popen(["php","-f","uploadtodb.php","intro/users.csv","users"],stdout=subprocess.PIPE, universal_newlines=True)
out, err = p.communicate()
p = subprocess.Popen(["php","-f","uploadtodb.php","intro/course_formative_quiz_grades.csv","course_formative_quiz_grades"],stdout=subprocess.PIPE, universal_newlines=True)
out, err = p.communicate()
p = subprocess.Popen(["php","-f","uploadtodb.php","intro/course_item_passing_states.csv","course_item_passing_states"],stdout=subprocess.PIPE, universal_newlines=True)
out, err = p.communicate()
p = subprocess.Popen(["php","-f","uploadtodb.php","intro/course_passing_states.csv","course_passing_states"],stdout=subprocess.PIPE, universal_newlines=True)
out, err = p.communicate()
<|fim▁hole|>p = subprocess.Popen(["php","-f","uploadtodb.php","intro/course_grades.csv","course_grades"],stdout=subprocess.PIPE, universal_newlines=True)
out, err = p.communicate()
p = subprocess.Popen(["php","-f","uploadtodb.php","intro/course_modules.csv","course_modules"],stdout=subprocess.PIPE, universal_newlines=True)
out, err = p.communicate()
p = subprocess.Popen(["php","-f","uploadtodb.php","intro/course_lessons.csv","course_lessons"],stdout=subprocess.PIPE, universal_newlines=True)
out, err = p.communicate()
p = subprocess.Popen(["php","-f","uploadtodb.php","intro/course_items.csv","course_items"],stdout=subprocess.PIPE, universal_newlines=True)
out, err = p.communicate()
p = subprocess.Popen(["php","-f","uploadtodb.php","intro/course_item_grades.csv","course_item_grades"],stdout=subprocess.PIPE, universal_newlines=True)
out, err = p.communicate()
p = subprocess.Popen(["php","-f","uploadtodb.php","intro/course_item_types.csv","course_item_types"],stdout=subprocess.PIPE, universal_newlines=True)
out, err = p.communicate()
subprocess.call("rm intro/*", shell=True)<|fim▁end|> | |
<|file_name|>get_folder.py<|end_file_name|><|fim▁begin|>from ..errors import ErrorFolderNotFound, ErrorInvalidOperation, ErrorNoPublicFolderReplicaAvailable
from ..util import MNS, create_element
from .common import EWSAccountService, folder_ids_element, parse_folder_elem, shape_element
class GetFolder(EWSAccountService):
"""MSDN: https://docs.microsoft.com/en-us/exchange/client-developer/web-service-reference/getfolder-operation"""
SERVICE_NAME = "GetFolder"
element_container_name = f"{{{MNS}}}Folders"
ERRORS_TO_CATCH_IN_RESPONSE = EWSAccountService.ERRORS_TO_CATCH_IN_RESPONSE + (
ErrorFolderNotFound,
ErrorNoPublicFolderReplicaAvailable,
ErrorInvalidOperation,
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.folders = [] # A hack to communicate parsing args to _elems_to_objs()
def call(self, folders, additional_fields, shape):
"""Take a folder ID and returns the full information for that folder.
:param folders: a list of Folder objects
:param additional_fields: the extra fields that should be returned with the folder, as FieldPath objects
:param shape: The set of attributes to return
:return: XML elements for the folders, in stable order
"""
# We can't easily find the correct folder class from the returned XML. Instead, return objects with the same
# class as the folder instance it was requested with.
self.folders = list(folders) # Convert to a list, in case 'folders' is a generator. We're iterating twice.
return self._elems_to_objs(
self._chunked_get_elements(
self.get_payload,
items=self.folders,
additional_fields=additional_fields,
shape=shape,
)<|fim▁hole|> )
def _elems_to_objs(self, elems):
for folder, elem in zip(self.folders, elems):
if isinstance(elem, Exception):
yield elem
continue
yield parse_folder_elem(elem=elem, folder=folder, account=self.account)
def get_payload(self, folders, additional_fields, shape):
payload = create_element(f"m:{self.SERVICE_NAME}")
payload.append(
shape_element(
tag="m:FolderShape", shape=shape, additional_fields=additional_fields, version=self.account.version
)
)
payload.append(folder_ids_element(folders=folders, version=self.account.version))
return payload<|fim▁end|> | |
<|file_name|>ie.d.ts<|end_file_name|><|fim▁begin|>import * as webdriver from './index';
/**
* A WebDriver client for Microsoft's Internet Explorer.
*/
export class Driver extends webdriver.WebDriver {
/**
* @param {(capabilities.Capabilities|Options)=} opt_config The configuration
* options.
* @param {promise.ControlFlow=} opt_flow The control flow to use,
* or {@code null} to use the currently active flow.
*/
constructor(opt_config?: webdriver.Capabilities | Options, opt_flow?: webdriver.promise.ControlFlow);
/**
* This function is a no-op as file detectors are not supported by this
* implementation.
* @override
*/
setFileDetector(): void;
}
/**
* Class for managing IEDriver specific options.
*/
export class Options {
constructor();
/**
* Extracts the IEDriver specific options from the given capabilities
* object.
* @param {!capabilities.Capabilities} caps The capabilities object.
* @return {!Options} The IEDriver options.
*/
static fromCapabilities(caps: webdriver.Capabilities): Options;
/**
* Whether to disable the protected mode settings check when the session is
* created. Disbling this setting may lead to significant instability as the
* browser may become unresponsive/hang. Only 'best effort' support is provided
* when using this capability.
*
* For more information, refer to the IEDriver's
* [required system configuration](http://goo.gl/eH0Yi3).
*
* @param {boolean} ignoreSettings Whether to ignore protected mode settings.
* @return {!Options} A self reference.
*/
introduceFlakinessByIgnoringProtectedModeSettings(ignoreSettings: boolean): Options;
/**
* Indicates whether to skip the check that the browser's zoom level is set to
* 100%.
*
* @param {boolean} ignore Whether to ignore the browser's zoom level settings.
* @return {!Options} A self reference.
*/
ignoreZoomSetting(ignore: boolean): Options;
/**
* Sets the initial URL loaded when IE starts. This is intended to be used with
* {@link #ignoreProtectedModeSettings} to allow the user to initialize IE in
* the proper Protected Mode zone. Setting this option may cause browser
* instability or flaky and unresponsive code. Only 'best effort' support is
* provided when using this option.
*
* @param {string} url The initial browser URL.
* @return {!Options} A self reference.
*/
initialBrowserUrl(url: string): Options;
/**
* Configures whether to enable persistent mouse hovering (true by default).
* Persistent hovering is achieved by continuously firing mouse over events at
* the last location the mouse cursor has been moved to.
*
* @param {boolean} enable Whether to enable persistent hovering.
* @return {!Options} A self reference.
*/
enablePersistentHover(enable: boolean): Options;
/**
* Configures whether the driver should attempt to remove obsolete
* {@linkplain webdriver.WebElement WebElements} from its internal cache on
* page navigation (true by default). Disabling this option will cause the
* driver to run with a larger memory footprint.
*
* @param {boolean} enable Whether to enable element reference cleanup.
* @return {!Options} A self reference.
*/
enableElementCacheCleanup(enable: boolean): Options;
/**
* Configures whether to require the IE window to have input focus before
* performing any user interactions (i.e. mouse or keyboard events). This
* option is disabled by default, but delivers much more accurate interaction
* events when enabled.
*
* @param {boolean} require Whether to require window focus.
* @return {!Options} A self reference.
*/
requireWindowFocus(require: boolean): Options;
/**
* Configures the timeout, in milliseconds, that the driver will attempt to
* located and attach to a newly opened instance of Internet Explorer. The
* default is zero, which indicates waiting indefinitely.
*
* @param {number} timeout How long to wait for IE.
* @return {!Options} A self reference.
*/
browserAttachTimeout(timeout: number): Options;
/**
* Configures whether to launch Internet Explorer using the CreateProcess API.
* If this option is not specified, IE is launched using IELaunchURL, if
* available. For IE 8 and above, this option requires the TabProcGrowth
* registry value to be set to 0.
*
* @param {boolean} force Whether to use the CreateProcess API.
* @return {!Options} A self reference.
*/
forceCreateProcessApi(force: boolean): Options;
/**
* Specifies command-line switches to use when launching Internet Explorer.
* This is only valid when used with {@link #forceCreateProcessApi}.
*
* @param {...(string|!Array.<string>)} var_args The arguments to add.
* @return {!Options} A self reference.
*/
addArguments(...var_args: string[]): Options;
/**
* Configures whether proxies should be configured on a per-process basis. If
* not set, setting a {@linkplain #setProxy proxy} will configure the system
* proxy. The default behavior is to use the system proxy.
*
* @param {boolean} enable Whether to enable per-process proxy settings.
* @return {!Options} A self reference.
*/
usePerProcessProxy(enable: boolean): Options;
/**
* Configures whether to clear the cache, cookies, history, and saved form data
* before starting the browser. _Using this capability will clear session data
* for all running instances of Internet Explorer, including those started
* manually._
*
* @param {boolean} cleanSession Whether to clear all session data on startup.
* @return {!Options} A self reference.
*/
ensureCleanSession(cleanSession: boolean): Options;
/**
* Sets the path to the log file the driver should log to.
* @param {string} file The log file path.
* @return {!Options} A self reference.
*/
setLogFile(file: string): Options;
/**
* Sets the IEDriverServer's logging {@linkplain Level level}.
* @param {Level} level The logging level.
* @return {!Options} A self reference.
*/
setLogLevel(level: webdriver.logging.Level): Options;
/**
* Sets the IP address of the driver's host adapter.
* @param {string} host The IP address to use.
* @return {!Options} A self reference.
*/
setHost(host: string): Options;
/**
* Sets the path of the temporary data directory to use.
* @param {string} path The log file path.
* @return {!Options} A self reference.
*/
setExtractPath(path: string): Options;
/**
* Sets whether the driver should start in silent mode.
* @param {boolean} silent Whether to run in silent mode.
* @return {!Options} A self reference.
*/
silent(silent: boolean): Options;
/**
* Sets the proxy settings for the new session.
* @param {capabilities.ProxyConfig} proxy The proxy configuration to use.
* @return {!Options} A self reference.
*/
setProxy(proxy: webdriver.ProxyConfig): Options;
/**
* Converts this options instance to a {@link capabilities.Capabilities}
* object.
* @param {capabilities.Capabilities=} opt_capabilities The capabilities to
* merge these options into, if any.
<|fim▁hole|> */
toCapabilities(opt_capabilities: webdriver.Capabilities): webdriver.Capabilities;
}<|fim▁end|> | * @return {!capabilities.Capabilities} The capabilities.
|
<|file_name|>acf-address.js<|end_file_name|><|fim▁begin|>function init_map(field_id) {
//console.log(field_id);
}
/*acf.fields.address = acf.field.extend({
type: 'address',
$el: null,
$input: null,
status: '', // '', 'loading', 'ready'
geocoder: false,
map: false,
maps: {},
pending: $(),
actions: {
'ready': 'initialize'
},
initialize: function () {
console.log('init');
}
});*/
(function ($) {
function initialize_field($el) {
console.log('init hook');
console.log($el);
initMap($el);
}
if (typeof acf.add_action !== 'undefined') {
/*
* ready append (ACF5)
*
* These are 2 events which are fired during the page load
* ready = on page load similar to $(document).ready()
* append = on new DOM elements appended via repeater field
<|fim▁hole|> *
* @param $el (jQuery selection) the jQuery element which contains the ACF fields
* @return n/a
*/
acf.add_action('ready append', function ($el) {
// search $el for fields of type 'FIELD_NAME'
acf.get_fields({type: 'address'}, $el).each(function () {
initialize_field($(this));
});
});
} else {
/*
* acf/setup_fields (ACF4)
*
* This event is triggered when ACF adds any new elements to the DOM.
*
* @type function
* @since 1.0.0
* @date 01/01/12
*
* @param event e: an event object. This can be ignored
* @param Element postbox: An element which contains the new HTML
*
* @return n/a
*/
$(document).on('acf/setup_fields', function (e, postbox) {
$(postbox).find('.field[data-field_type="address"]').each(function () {
initialize_field($(this));
});
});
}
function initMap($mapElement) {
ymaps.ready(function () {
/**
* Массив сохраняемых данных
*
* address - краткий адрес, без города
* addressFull - полный адрес, с городом
* coordinates - координаты адреса
* coordinatesMetro - координаты ближайшей станции метро
* metroDist - расстояние до ближайшей станции метро (в метрах)
* addressMetro - адрес ближайшей станции метро
* addressMetroFull - полный адрес ближайшей станции метро
* metroLine - ближайшая линия метро, формат line_{number}
*
* @type {{}}
*/
var field = {};
/**
* Центр карты и координаты метки по умолчанию
* @type {number[]}
*/
var centerMap = [55.753994, 37.622093];
/**
* Карта
* @type {undefined}
*/
var addressMap = undefined;
/**
* Метка
* @type {ymaps.GeoObject}
*/
var geoPoint = new ymaps.GeoObject({
geometry: {
type: "Point",
coordinates: centerMap
}
}, {
preset: 'islands#blackStretchyIcon',
draggable: true
});
geoPoint.events.add('dragend', function () {
changeLocation();
});
/**
* Кнопка определения местоположения
* @type {GeolocationButton}
*/
var geolocationButton = new GeolocationButton({
data: {
image: btn.img,
title: 'Определить местоположение'
},
geolocationOptions: {
enableHighAccuracy: true,
noPlacemark: false,
point: geoPoint,
afterSearch: function () {
changeLocation()
}
}
}, {
selectOnClick: false
});
/**
* Строка поиска адреса
* @type {ymaps.control.SearchControl}
*/
var searchControl = new ymaps.control.SearchControl({
noPlacemark: true
});
searchControl.events.add('resultselect', function (e) {
var index = e.get("resultIndex");
var result = searchControl.getResult(index);
result.then(function (res) {
var geo = res.geometry.getCoordinates();
geoPoint.geometry.setCoordinates(geo);
changeLocation();
});
});
/**
* Кнопка для поиска ближайшего метро
* @type {Button}
*/
var button = new ymaps.control.Button({
data: {
image: btn.metro,
title: 'Найти ближайшее метро'
}
}, {
selectOnClick: false
});
button.events.add('click', function () {
findMetro();
});
/**
* Поиск ближайшего метро
*/
function findMetro() {
ymaps.geocode(field.coordinates, {
kind: 'metro',
results: 1
}).then(function (res) {
if (res.geoObjects.getLength()) {
var m0 = res.geoObjects.get(0);
var coords = m0.geometry.getCoordinates();
field.coordinatesMetro = coords;
var dist = ymaps.coordSystem.geo.getDistance(field.coordinates, coords);
field.metroDist = Math.round(dist).toFixed(0);
res.geoObjects.options.set('preset', 'twirl#metroMoscowIcon');
addressMap.geoObjects.add(res.geoObjects);
var getObject = res.geoObjects.get(0);
field.addressMetro = getObject.properties.get('name');
field.addressMetroFull = getObject.properties.get('text').replace('Россия,', '').trim();
$('.metro-row').show();
$('input[name="metro"]').val(field.addressMetro);
$('input[name="metro_full"]').val(field.addressMetroFull);
$('input[name="metro_dist"]').val(field.metroDist);
var metroLine = colorMetro(field.addressMetroFull);
if (metroLine != undefined)
field.metroLine = metroLine;
}
});
}
/**
* Событие при смене координат
*/
function changeLocation() {
var coord = geoPoint.geometry.getCoordinates();
field.coordinates = coord;
ymaps.geocode(coord).then(function (res) {
var getObject = res.geoObjects.get(0);
field.address = getObject.properties.get('name');
field.addressFull = getObject.properties.get('text').replace('Россия,', '').trim();
updateField();
});
}
/**
* Обновление полей с адресом
*/
function updateField() {
$('input[name="address"]').val(field.address);
$('input[name="address_full"]').val(field.addressFull);
}
/**
* Загрузка данных
*/
function loadField() {
//field = JSON.parse($('#acf-address-input').val());
updateField();
var loadCoord = (field.coordinates != undefined) ? field.coordinates : centerMap;
var loadZoom = (field.zoom != undefined) ? field.zoom : 10;
geoPoint.geometry.setCoordinates(loadCoord);
addressMap.setCenter(loadCoord);
addressMap.setZoom(loadZoom);
if (field.addressMetro != undefined || field.addressMetroFull != undefined) {
$('.metro-row').show();
$('input[name="metro"]').val(field.addressMetro);
$('input[name="metro_full"]').val(field.addressMetroFull);
$('input[name="metro_dist"]').val(field.metroDist);
}
}
/**
* Возвращает номер линии метро
*
* @param metro
* @returns {*}
*/
function colorMetro(metro) {
var metroArray = metro.split(',');
if (metroArray.length >= 3) {
metro = metroArray[2].replace('линия', '').trim();
} else
return undefined;
var moscowMetro = {};
moscowMetro['Сокольническая'] = 'line_1';
moscowMetro['Замоскворецкая'] = 'line_2';
moscowMetro['Арбатско-Покровская'] = 'line_3';
moscowMetro['Филёвская'] = 'line_4';
moscowMetro['Кольцевая'] = 'line_5';
moscowMetro['Калужско-Рижская'] = 'line_6';
moscowMetro['Таганско-Краснопресненская'] = 'line_7';
moscowMetro['Калининско-Солнцевская'] = 'line_8';
moscowMetro['Калининская'] = 'line_8';
moscowMetro['Серпуховско-Тимирязевская'] = 'line_9';
moscowMetro['Люблинско-Дмитровская'] = 'line_10';
moscowMetro['Каховская'] = 'line_11';
moscowMetro['Бутовская'] = 'line_12';
return moscowMetro[metro];
}
$('.address-btn-cancel').click(function () {
tb_remove();
});
$('#address-btn-ok').click(function () {
$('#acf-address-input').val(JSON.stringify(field));
$('#acf-address-display').val(field.addressFull);
tb_remove();
});
$('#acf-address-btn').click(function () {
if (addressMap != undefined)
addressMap.destroy();
addressMap = new ymaps.Map($mapElement, {
center: centerMap,
zoom: 9,
behaviors: ['default', 'scrollZoom']
});
addressMap.events.add('boundschange', function (e) {
var zoom = e.get("newZoom");
field.zoom = zoom;
});
addressMap.controls
.add(geolocationButton, {top: 5, left: 100})
.add('zoomControl')
.add('typeSelector', {top: 5, right: 5})
.add(button, {top: 5, left: 65})
.add(searchControl, {top: 5, left: 200});
addressMap.geoObjects.add(geoPoint);
loadField();
});
$('#acf-address-clear').click(function () {
field = {};
$('.metro-row').hide();
$('#acf-address-display').val('');
$('#acf-address-display').val('');
$('input[name="metro"]').val('');
$('input[name="metro_full"]').val('');
$('input[name="metro_dist"]').val('');
});
$('#acf-address-display').click(function () {
$('#acf-address-btn').trigger('click');
});
field = JSON.parse($('#acf-address-input').val());
$('#acf-address-display').val(field.addressFull);
});
}
})(jQuery);<|fim▁end|> | *
* @type event
* @date 20/07/13
|
<|file_name|>zkclient.py<|end_file_name|><|fim▁begin|>#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
import os
import gevent
import logging
import kazoo.client
import kazoo.exceptions
import kazoo.handlers.gevent
import kazoo.recipe.election
from kazoo.client import KazooState
from kazoo.retry import KazooRetry
from bitarray import bitarray
from cfgm_common.exceptions import ResourceExhaustionError, ResourceExistsError
from gevent.coros import BoundedSemaphore
import uuid
LOG_DIR = '/var/log/contrail/'
class IndexAllocator(object):
def __init__(self, zookeeper_client, path, size=0, start_idx=0,
reverse=False,alloc_list=None, max_alloc=0):
self._size = size
self._start_idx = start_idx
if alloc_list is None:
self._alloc_list = [{'start':start_idx, 'end':start_idx+size}]
else:
sorted_alloc_list = sorted(alloc_list, key=lambda k: k['start'])
self._alloc_list = sorted_alloc_list
alloc_count = len(self._alloc_list)
total_size = 0
size = 0
#check for overlap in alloc_list --TODO
for alloc_idx in range (0, alloc_count -1):
idx_start_addr = self._alloc_list[alloc_idx]['start']
idx_end_addr = self._alloc_list[alloc_idx]['end']
next_start_addr = self._alloc_list[alloc_idx+1]['start']
if next_start_addr <= idx_end_addr:
raise Exception(
'Allocation Lists Overlapping: %s' %(alloc_list))
size += idx_end_addr - idx_start_addr + 1
size += self._alloc_list[alloc_count-1]['end'] - self._alloc_list[alloc_count-1]['start'] + 1
if max_alloc == 0:
self._max_alloc = size
else:
self._max_alloc = max_alloc
self._zookeeper_client = zookeeper_client
self._path = path
self._in_use = bitarray('0')
self._reverse = reverse
for idx in self._zookeeper_client.get_children(path):
idx_int = self._get_bit_from_zk_index(int(idx))
if idx_int >= 0:
self._set_in_use(idx_int)
# end for idx
# end __init__
def _get_zk_index_from_bit(self, idx):
size = idx
if self._reverse:
for alloc in reversed(self._alloc_list):
size -= alloc['end'] - alloc['start'] + 1
if size < 0:
return alloc['start']-size - 1
else:
for alloc in self._alloc_list:
size -= alloc['end'] - alloc['start'] + 1
if size < 0:
return alloc['end']+size + 1
raise ResourceExhaustionError(
'Cannot get zk index from bit %s' %(idx))
# end _get_zk_index
def _get_bit_from_zk_index(self, idx):
size = 0
if self._reverse:
for alloc in reversed(self._alloc_list):
if alloc['start'] <= idx <= alloc['end']:
return alloc['end'] - idx + size
size += alloc['end'] - alloc['start'] + 1
pass
else:
for alloc in self._alloc_list:
if alloc['start'] <= idx <= alloc['end']:
return idx - alloc['start'] + size
size += alloc['end'] - alloc['start'] + 1
return -1
# end _get_bit_from_zk_index
def _set_in_use(self, bitnum):
# if the index is higher than _max_alloc, do not use the bitarray, in
# order to reduce the size of the bitarray. Otherwise, set the bit
# corresponding to idx to 1 and extend the _in_use bitarray if needed
if bitnum > self._max_alloc:
return
if bitnum >= self._in_use.length():
temp = bitarray(bitnum - self._in_use.length())
temp.setall(0)
temp.append('1')
self._in_use.extend(temp)
else:
self._in_use[bitnum] = 1
# end _set_in_use
def _reset_in_use(self, bitnum):
# if the index is higher than _max_alloc, do not use the bitarray, in
# order to reduce the size of the bitarray. Otherwise, set the bit
# corresponding to idx to 1 and extend the _in_use bitarray if needed
if bitnum > self._max_alloc:
return
if bitnum >= self._in_use.length():
return
else:
self._in_use[bitnum] = 0
# end _reset_in_use
def set_in_use(self, idx):
bit_idx = self._get_bit_from_zk_index(idx)
if bit_idx < 0:
return
self._set_in_use(bit_idx)
# end set_in_use
def reset_in_use(self, idx):
bit_idx = self._get_bit_from_zk_index(idx)
if bit_idx < 0:
return
self._reset_in_use(bit_idx)
# end reset_in_use
def get_alloc_count(self):
return self._in_use.count()
# end get_alloc_count
def alloc(self, value=None):
# Allocates a index from the allocation list
if self._in_use.all():
idx = self._in_use.length()
if idx > self._max_alloc:
raise ResourceExhaustionError()
self._in_use.append(1)
else:
idx = self._in_use.index(0)
self._in_use[idx] = 1
idx = self._get_zk_index_from_bit(idx)
try:
# Create a node at path and return its integer value
id_str = "%(#)010d" % {'#': idx}
self._zookeeper_client.create_node(self._path + id_str, value)
return idx
except ResourceExistsError:
return self.alloc(value)
# end alloc
def reserve(self, idx, value=None):
# Reserves the requested index if available
if not self._start_idx <= idx < self._start_idx + self._size:
return None
try:
# Create a node at path and return its integer value
id_str = "%(#)010d" % {'#': idx}
self._zookeeper_client.create_node(self._path + id_str, value)
self.set_in_use(idx)
return idx
except ResourceExistsError:
self.set_in_use(idx)
existing_value = self.read(idx)
if (value == existing_value):
# idempotent reserve
return idx
msg = 'For index %s reserve conflicts with existing value %s.' \
%(idx, existing_value)
self._zookeeper_client.syslog(msg, level='notice')
raise
# end reserve
def delete(self, idx):
id_str = "%(#)010d" % {'#': idx}
self._zookeeper_client.delete_node(self._path + id_str)
bit_idx = self._get_bit_from_zk_index(idx)
if 0 <= bit_idx < self._in_use.length():
self._in_use[bit_idx] = 0
# end delete
def read(self, idx):
id_str = "%(#)010d" % {'#': idx}
id_val = self._zookeeper_client.read_node(self._path+id_str)
if id_val is not None:
bit_idx = self._get_bit_from_zk_index(idx)
if bit_idx >= 0:
self._set_in_use(bit_idx)
return id_val
# end read
def empty(self):
return not self._in_use.any()
# end empty
@classmethod
def delete_all(cls, zookeeper_client, path):
try:
zookeeper_client.delete_node(path, recursive=True)
except kazoo.exceptions.NotEmptyError:
#TODO: Add retries for NotEmptyError
zookeeper_client.syslog("NotEmptyError while deleting %s" % path)
# end delete_all
#end class IndexAllocator
class ZookeeperClient(object):
def __init__(self, module, server_list, logging_fn=None):
# logging
logger = logging.getLogger(module)
logger.setLevel(logging.DEBUG)
try:
handler = logging.handlers.RotatingFileHandler(
LOG_DIR + module + '-zk.log', maxBytes=10*1024*1024, backupCount=5)
except IOError:
print "Cannot open log file in %s" %(LOG_DIR)
else:
log_format = logging.Formatter('%(asctime)s [%(name)s]: %(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p')
handler.setFormatter(log_format)
logger.addHandler(handler)
if logging_fn:
self.log = logging_fn
else:
self.log = self.syslog
# KazooRetry to retry keeper CRUD operations
self._retry = KazooRetry(max_tries=None, max_delay=300,
sleep_func=gevent.sleep)
self._zk_client = kazoo.client.KazooClient(
server_list,
timeout=400,
handler=kazoo.handlers.gevent.SequentialGeventHandler(),
logger=logger,
connection_retry=self._retry,
command_retry=self._retry)
self._zk_client.add_listener(self._zk_listener)
self._logger = logger
self._election = None
self._server_list = server_list
self._conn_state = None
self._sandesh_connection_info_update(status='INIT', message='')
self._lost_cb = None
self._suspend_cb = None
self.connect()
# end __init__
# start
def connect(self):
while True:
try:
self._zk_client.start()
break
except gevent.event.Timeout as e:
# Update connection info
self._sandesh_connection_info_update(status='DOWN',
message=str(e))
gevent.sleep(1)
# Zookeeper is also throwing exception due to delay in master election
except Exception as e:
# Update connection info
self._sandesh_connection_info_update(status='DOWN',
message=str(e))
gevent.sleep(1)
# Update connection info
self._sandesh_connection_info_update(status='UP', message='')
# end
def is_connected(self):
return self._zk_client.state == KazooState.CONNECTED
# end is_connected
def syslog(self, msg, *args, **kwargs):
if not self._logger:
return
level = kwargs.get('level', 'info')
if isinstance(level, int):
from pysandesh.sandesh_logger import SandeshLogger
level = SandeshLogger.get_py_logger_level(level)
self._logger.log(level, msg)
return
log_method = getattr(self._logger, level, self._logger.info)
log_method(msg)
# end syslog
def set_lost_cb(self, lost_cb=None):
# set a callback to be called when kazoo state is lost
# set to None for default action
self._lost_cb = lost_cb
# end set_lost_cb
def set_suspend_cb(self, suspend_cb=None):
# set a callback to be called when kazoo state is suspend
# set to None for default action
self._suspend_cb = suspend_cb
# end set_suspend_cb
def _zk_listener(self, state):
if state == KazooState.CONNECTED:
if self._election:
self._election.cancel()
# Update connection info
self._sandesh_connection_info_update(status='UP', message='')
elif state == KazooState.LOST:
# Lost the session with ZooKeeper Server
# Best of option we have is to exit the process and restart all
# over again
self._sandesh_connection_info_update(status='DOWN',
message='Connection to Zookeeper lost')
if self._lost_cb:
self._lost_cb()
else:
os._exit(2)
elif state == KazooState.SUSPENDED:
# Update connection info
self._sandesh_connection_info_update(status='INIT',
message = 'Connection to zookeeper lost. Retrying')
if self._suspend_cb:
self._suspend_cb()
# end
def master_election(self, path, identifier, func, *args, **kwargs):
self._election = self._zk_client.Election(path, identifier)
self._election.run(func, *args, **kwargs)
# end master_election
def create_node(self, path, value=None):
try:
if value is None:
value = uuid.uuid4()
retry = self._retry.copy()
retry(self._zk_client.create, path, str(value), makepath=True)
except kazoo.exceptions.NodeExistsError:<|fim▁hole|> # end create_node
def delete_node(self, path, recursive=False):
try:
retry = self._retry.copy()
retry(self._zk_client.delete, path, recursive=recursive)
except kazoo.exceptions.NoNodeError:
pass
except Exception as e:
raise e
# end delete_node
def read_node(self, path, include_timestamp=False):
try:
retry = self._retry.copy()
value = retry(self._zk_client.get, path)
if include_timestamp:
return value
return value[0]
except Exception:
return None
# end read_node
def get_children(self, path):
try:
retry = self._retry.copy()
return retry(self._zk_client.get_children, path)
except Exception:
return []
# end read_node
def exists(self, path):
try:
retry = self._retry.copy()
return retry(self._zk_client.exists, path)
except Exception:
return []
# end exists
def _sandesh_connection_info_update(self, status, message):
from pysandesh.connection_info import ConnectionState
from pysandesh.gen_py.process_info.ttypes import ConnectionStatus
from pysandesh.gen_py.process_info.ttypes import ConnectionType as ConnType
from pysandesh.gen_py.sandesh.ttypes import SandeshLevel
new_conn_state = getattr(ConnectionStatus, status)
ConnectionState.update(conn_type = ConnType.ZOOKEEPER,
name = 'Zookeeper', status = new_conn_state,
message = message,
server_addrs = self._server_list.split(','))
if (self._conn_state and self._conn_state != ConnectionStatus.DOWN and
new_conn_state == ConnectionStatus.DOWN):
msg = 'Connection to Zookeeper down: %s' %(message)
self.log(msg, level=SandeshLevel.SYS_ERR)
if (self._conn_state and self._conn_state != new_conn_state and
new_conn_state == ConnectionStatus.UP):
msg = 'Connection to Zookeeper ESTABLISHED'
self.log(msg, level=SandeshLevel.SYS_NOTICE)
self._conn_state = new_conn_state
# end _sandesh_connection_info_update
# end class ZookeeperClient<|fim▁end|> | current_value = self.read_node(path)
if current_value == value:
return True;
raise ResourceExistsError(path, str(current_value), 'zookeeper') |
<|file_name|>jupyter_configure.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
# *****************************************************************************
#
# Copyright (c) 2016, EPAM SYSTEMS INC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ******************************************************************************
import logging
import json
import sys
from dlab.fab import *
from dlab.meta_lib import *
from dlab.actions_lib import *
import os
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--uuid', type=str, default='')
args = parser.parse_args()
if __name__ == "__main__":
local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['edge_user_name'],
os.environ['request_id'])
local_log_filepath = "/logs/" + os.environ['conf_resource'] + "/" + local_log_filename
logging.basicConfig(format='%(levelname)-8s [%(asctime)s] %(message)s',
level=logging.DEBUG,
filename=local_log_filepath)
notebook_config = dict()
try:
notebook_config['exploratory_name'] = os.environ['exploratory_name']
except:
notebook_config['exploratory_name'] = ''
notebook_config['service_base_name'] = os.environ['conf_service_base_name']
notebook_config['instance_type'] = os.environ['aws_notebook_instance_type']
notebook_config['key_name'] = os.environ['conf_key_name']
notebook_config['user_keyname'] = os.environ['edge_user_name']
notebook_config['instance_name'] = '{}-{}-nb-{}-{}'.format(notebook_config['service_base_name'],
os.environ['edge_user_name'],
notebook_config['exploratory_name'], args.uuid)
notebook_config['expected_image_name'] = '{}-{}-notebook-image'.format(notebook_config['service_base_name'],
os.environ['application'])
notebook_config['notebook_image_name'] = str(os.environ.get('notebook_image_name'))
notebook_config['role_profile_name'] = '{}-{}-nb-de-Profile' \
.format(notebook_config['service_base_name'].lower().replace('-', '_'), os.environ['edge_user_name'])
notebook_config['security_group_name'] = '{}-{}-nb-SG'.format(notebook_config['service_base_name'],
os.environ['edge_user_name'])
notebook_config['tag_name'] = '{}-Tag'.format(notebook_config['service_base_name'])
notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
# generating variables regarding EDGE proxy on Notebook instance
instance_hostname = get_instance_hostname(notebook_config['tag_name'], notebook_config['instance_name'])
edge_instance_name = os.environ['conf_service_base_name'] + "-" + os.environ['edge_user_name'] + '-edge'
edge_instance_hostname = get_instance_hostname(notebook_config['tag_name'], edge_instance_name)
edge_instance_ip = get_instance_ip_address(notebook_config['tag_name'], edge_instance_name).get('Public')
keyfile_name = "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
try:
if os.environ['conf_os_family'] == 'debian':
initial_user = 'ubuntu'
sudo_group = 'sudo'
if os.environ['conf_os_family'] == 'redhat':
initial_user = 'ec2-user'
sudo_group = 'wheel'
logging.info('[CREATING DLAB SSH USER]')
print('[CREATING DLAB SSH USER]')
params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format\
(instance_hostname, os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem", initial_user,
notebook_config['dlab_ssh_user'], sudo_group)
try:
local("~/scripts/{}.py {}".format('create_ssh_user', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
append_result("Failed creating ssh user 'dlab'.", str(err))
remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
sys.exit(1)
# configuring proxy on Notebook instance
try:
logging.info('[CONFIGURE PROXY ON JUPYTER INSTANCE]')
print('[CONFIGURE PROXY ON JUPYTER INSTANCE]')
additional_config = {"proxy_host": edge_instance_hostname, "proxy_port": "3128"}
params = "--hostname {} --instance_name {} --keyfile {} --additional_config '{}' --os_user {}"\
.format(instance_hostname, notebook_config['instance_name'], keyfile_name, json.dumps(additional_config), notebook_config['dlab_ssh_user'])
try:
local("~/scripts/{}.py {}".format('common_configure_proxy', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
append_result("Failed to configure proxy.", str(err))
remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
sys.exit(1)
# updating repositories & installing python packages
try:
logging.info('[INSTALLING PREREQUISITES TO JUPYTER NOTEBOOK INSTANCE]')
print('[INSTALLING PREREQUISITES TO JUPYTER NOTEBOOK INSTANCE]')
params = "--hostname {} --keyfile {} --user {} --region {}".\
format(instance_hostname, keyfile_name, notebook_config['dlab_ssh_user'], os.environ['aws_region'])
try:
local("~/scripts/{}.py {}".format('install_prerequisites', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
append_result("Failed installing apps: apt & pip.", str(err))
remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
sys.exit(1)
# installing and configuring jupiter and all dependencies
try:
logging.info('[CONFIGURE JUPYTER NOTEBOOK INSTANCE]')
print('[CONFIGURE JUPYTER NOTEBOOK INSTANCE]')
params = "--hostname {} " \
"--keyfile {} " \
"--region {} " \
"--spark_version {} " \
"--hadoop_version {} " \
"--os_user {} " \
"--scala_version {} " \
"--r_mirror {} " \
"--exploratory_name {}".\
format(instance_hostname,
keyfile_name,
os.environ['aws_region'],
os.environ['notebook_spark_version'],
os.environ['notebook_hadoop_version'],
notebook_config['dlab_ssh_user'],
os.environ['notebook_scala_version'],
os.environ['notebook_r_mirror'],
notebook_config['exploratory_name'])
try:
local("~/scripts/{}.py {}".format('configure_jupyter_node', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
append_result("Failed to configure jupyter.", str(err))
remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
sys.exit(1)
try:
print('[INSTALLING USERs KEY]')
logging.info('[INSTALLING USERs KEY]')
additional_config = {"user_keyname": notebook_config['user_keyname'],
"user_keydir": os.environ['conf_key_dir']}
params = "--hostname {} --keyfile {} --additional_config '{}' --user {}".format(
instance_hostname, keyfile_name, json.dumps(additional_config), notebook_config['dlab_ssh_user'])
try:
local("~/scripts/{}.py {}".format('install_user_key', params))
except:
append_result("Failed installing users key")
raise Exception
except Exception as err:
append_result("Failed installing users key.", str(err))
remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
sys.exit(1)
try:
print('[SETUP USER GIT CREDENTIALS]')
logging.info('[SETUP USER GIT CREDENTIALS]')
params = '--os_user {} --notebook_ip {} --keyfile "{}"' \
.format(notebook_config['dlab_ssh_user'], instance_hostname, keyfile_name)
try:
local("~/scripts/{}.py {}".format('common_download_git_certfile', params))
local("~/scripts/{}.py {}".format('manage_git_creds', params))
except:
append_result("Failed setup git credentials")
raise Exception
except Exception as err:<|fim▁hole|> append_result("Failed to setup git credentials.", str(err))
remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
sys.exit(1)
try:
logging.info('[POST CONFIGURING PROCESS]')
print('[POST CONFIGURING PROCESS')
if notebook_config['notebook_image_name'] not in [notebook_config['expected_image_name'], 'None']:
params = "--hostname {} --keyfile {} --os_user {} --nb_tag_name {} --nb_tag_value {}" \
.format(instance_hostname, keyfile_name, notebook_config['dlab_ssh_user'],
notebook_config['tag_name'], notebook_config['instance_name'])
try:
local("~/scripts/{}.py {}".format('common_remove_remote_kernels', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
append_result("Failed to post configuring instance.", str(err))
remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
sys.exit(1)
try:
print('[SETUP EDGE REVERSE PROXY TEMPLATE]')
logging.info('[SETUP EDGE REVERSE PROXY TEMPLATE]')
additional_info = {
'instance_hostname': instance_hostname,
'tensor': False
}
params = "--edge_hostname {} " \
"--keyfile {} " \
"--os_user {} " \
"--type {} " \
"--exploratory_name {} " \
"--additional_info '{}'"\
.format(edge_instance_hostname,
keyfile_name,
notebook_config['dlab_ssh_user'],
'jupyter',
notebook_config['exploratory_name'],
json.dumps(additional_info))
try:
local("~/scripts/{}.py {}".format('common_configure_reverse_proxy', params))
except:
append_result("Failed edge reverse proxy template")
raise Exception
except Exception as err:
append_result("Failed to set edge reverse proxy template.", str(err))
remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
sys.exit(1)
if notebook_config['shared_image_enabled'] == 'true':
try:
print('[CREATING AMI]')
ami_id = get_ami_id_by_name(notebook_config['expected_image_name'])
if ami_id == '':
print("Looks like it's first time we configure notebook server. Creating image.")
image_id = create_image_from_instance(tag_name=notebook_config['tag_name'],
instance_name=notebook_config['instance_name'],
image_name=notebook_config['expected_image_name'])
if image_id != '':
print("Image was successfully created. It's ID is {}".format(image_id))
except Exception as err:
append_result("Failed creating image.", str(err))
remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
sys.exit(1)
# generating output information
ip_address = get_instance_ip_address(notebook_config['tag_name'], notebook_config['instance_name']).get('Private')
dns_name = get_instance_hostname(notebook_config['tag_name'], notebook_config['instance_name'])
jupyter_ip_url = "http://" + ip_address + ":8888/{}/".format(notebook_config['exploratory_name'])
jupyter_dns_url = "http://" + dns_name + ":8888/{}/".format(notebook_config['exploratory_name'])
jupyter_notebook_acces_url = "http://" + edge_instance_ip + "/{}/".format(notebook_config['exploratory_name'])
jupyter_ungit_acces_url = "http://" + edge_instance_ip + "/{}-ungit/".format(notebook_config['exploratory_name'])
ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(notebook_config['exploratory_name'])
print('[SUMMARY]')
logging.info('[SUMMARY]')
print("Instance name: {}".format(notebook_config['instance_name']))
print("Private DNS: {}".format(dns_name))
print("Private IP: {}".format(ip_address))
print("Instance ID: {}".format(get_instance_by_name(notebook_config['tag_name'], notebook_config['instance_name'])))
print("Instance type: {}".format(notebook_config['instance_type']))
print("Key name: {}".format(notebook_config['key_name']))
print("User key name: {}".format(notebook_config['user_keyname']))
print("Image name: {}".format(notebook_config['notebook_image_name']))
print("Profile name: {}".format(notebook_config['role_profile_name']))
print("SG name: {}".format(notebook_config['security_group_name']))
print("Jupyter URL: {}".format(jupyter_ip_url))
print("Jupyter URL: {}".format(jupyter_dns_url))
print("Ungit URL: {}".format(ungit_ip_url))
print("ReverseProxyNotebook".format(jupyter_notebook_acces_url))
print("ReverseProxyUngit".format(jupyter_ungit_acces_url))
print('SSH access (from Edge node, via IP address): ssh -i {0}.pem {1}@{2}'.
format(notebook_config['key_name'], notebook_config['dlab_ssh_user'], ip_address))
print('SSH access (from Edge node, via FQDN): ssh -i {0}.pem {1}@{2}'.
format(notebook_config['key_name'], notebook_config['dlab_ssh_user'], dns_name))
with open("/root/result.json", 'w') as result:
res = {"hostname": dns_name,
"ip": ip_address,
"instance_id": get_instance_by_name(notebook_config['tag_name'], notebook_config['instance_name']),
"master_keyname": os.environ['conf_key_name'],
"notebook_name": notebook_config['instance_name'],
"notebook_image_name": notebook_config['notebook_image_name'],
"Action": "Create new notebook server",
"exploratory_url": [
{"description": "Jupyter",
"url": jupyter_notebook_acces_url},
{"description": "Ungit",
"url": jupyter_ungit_acces_url},
{"description": "Jupyter (via tunnel)",
"url": jupyter_ip_url},
{"description": "Ungit (via tunnel)",
"url": ungit_ip_url}
]}
result.write(json.dumps(res))<|fim▁end|> | |
<|file_name|>bus.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
import zmq
import threading
import logging
import logging.handlers
import util
class Logger(threading.Thread):
"""logger for all messages and events"""
def __init__(self, stop_logging, filename='bus.log'):
super(Logger, self).__init__()
self.filename = filename
self.stop_logging = stop_logging
# receiving socket
self.context = zmq.Context.instance()
self.log_in = self.context.socket(zmq.PAIR)
self.log_in.connect("inproc://logging")
self.log_in.setsockopt(zmq.RCVTIMEO, 1000)
# logger parameters for stdout and compressed file
log_format = logging.Formatter('%(asctime)s [%(levelname)s] %(message)s')
file_log_handler = util.TimedCompressingRotatingFileHandler(self.filename, when='midnight', backupCount=7)
file_log_handler.setFormatter(log_format)
stream_log_handler = logging.StreamHandler()
stream_log_handler.setFormatter(log_format)
self.logger = logging.getLogger('logger')
self.logger.setLevel(logging.INFO)
self.logger.addHandler(file_log_handler)
self.logger.addHandler(stream_log_handler)
def run(self):
while not self.stop_logging.is_set():
try:
# receive message
message = self.log_in.recv_multipart()
if len(message) > 1:
# message with content
[topic, contents] = message
self.logger.info("[msg] {%s} %s", topic, contents)
else:
# subscribe/unsubscribe
message = message[0]
topic = message[1:]
if message.startswith(b'\x00'):
# unsubscribe
self.logger.info("[unsub] {%s}", topic)
elif message.startswith(b'\x01'):
# subscribe
self.logger.info("[sub] {%s}", topic)
else:
self.logger.warning("[unknown message] %s", message)
except zmq.ZMQError as e:
if e.errno == zmq.ETERM:
self.logger.error("socket error, stopped logging")
break
elif e.errno == zmq.EAGAIN:
pass
else:
print(e)
self.logger.error("unknown error occurred during logging")
def main():
context = zmq.Context.instance()
# socket facing clients
frontend = context.socket(zmq.XSUB)<|fim▁hole|> backend = context.socket(zmq.XPUB)
backend.bind("tcp://*:5560")
# log socket
log_out = context.socket(zmq.PAIR)
log_out.bind("inproc://logging")
# start logging thread
stop_logging = threading.Event()
logger = Logger(stop_logging)
logger.start()
try:
zmq.proxy(frontend, backend, log_out)
except KeyboardInterrupt:
print("shutting down")
finally:
frontend.close()
backend.close()
stop_logging.set()
logger.join()
if __name__ == "__main__":
main()<|fim▁end|> | frontend.bind("tcp://*:5559")
# socket facing services |
<|file_name|>linear_operator_test_util.py<|end_file_name|><|fim▁begin|># Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for testing `LinearOperator` and sub-classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import numpy as np
import six
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.linalg import linalg_impl as linalg
from tensorflow.python.ops.linalg import linear_operator_util
from tensorflow.python.platform import test
class OperatorBuildInfo(object):
"""Object encoding expected shape for a test.
Encodes the expected shape of a matrix for a test. Also
allows additional metadata for the test harness.
"""
def __init__(self, shape, **kwargs):
self.shape = shape
self.__dict__.update(kwargs)
@six.add_metaclass(abc.ABCMeta) # pylint: disable=no-init
class LinearOperatorDerivedClassTest(test.TestCase):
"""Tests for derived classes.
Subclasses should implement every abstractmethod, and this will enable all
test methods to work.
"""
# Absolute/relative tolerance for tests.
_atol = {
dtypes.float16: 1e-3,
dtypes.float32: 1e-6,
dtypes.float64: 1e-12,
dtypes.complex64: 1e-6,
dtypes.complex128: 1e-12
}
_rtol = {
dtypes.float16: 1e-3,
dtypes.float32: 1e-6,
dtypes.float64: 1e-12,
dtypes.complex64: 1e-6,
dtypes.complex128: 1e-12
}
def assertAC(self, x, y):
"""Derived classes can set _atol, _rtol to get different tolerance."""
dtype = dtypes.as_dtype(x.dtype)
atol = self._atol[dtype]
rtol = self._rtol[dtype]
self.assertAllClose(x, y, atol=atol, rtol=rtol)
@property
def _adjoint_options(self):
return [False, True]
@property
def _adjoint_arg_options(self):
return [False, True]
@property
def _dtypes_to_test(self):
# TODO(langmore) Test tf.float16 once tf.matrix_solve works in 16bit.
return [dtypes.float32, dtypes.float64, dtypes.complex64, dtypes.complex128]
@property
def _use_placeholder_options(self):
return [False, True]
@abc.abstractproperty
def _operator_build_infos(self):
"""Returns list of OperatorBuildInfo, encapsulating the shape to test."""
raise NotImplementedError("operator_build_infos has not been implemented.")
@abc.abstractmethod
def _operator_and_mat_and_feed_dict(self, build_info, dtype, use_placeholder):
"""Build a batch matrix and an Operator that should have similar behavior.
Every operator acts like a (batch) matrix. This method returns both
together, and is used by tests.
Args:
build_info: `OperatorBuildInfo`, encoding shape information about the
operator.
dtype: Numpy dtype. Data type of returned array/operator.
use_placeholder: Python bool. If True, initialize the operator with a
placeholder of undefined shape and correct dtype.
Returns:
operator: `LinearOperator` subclass instance.
mat: `Tensor` representing operator.
feed_dict: Dictionary.
If placholder is True, this must contains everything needed to be fed
to sess.run calls at runtime to make the operator work.
"""
# Create a matrix as a numpy array with desired shape/dtype.
# Create a LinearOperator that should have the same behavior as the matrix.
raise NotImplementedError("Not implemented yet.")
@abc.abstractmethod
def _make_rhs(self, operator, adjoint, with_batch=True):
"""Make a rhs appropriate for calling operator.solve(rhs).
Args:
operator: A `LinearOperator`
adjoint: Python `bool`. If `True`, we are making a 'rhs' value for the
adjoint operator.
with_batch: Python `bool`. If `True`, create `rhs` with the same batch
shape as operator, and otherwise create a matrix without any batch
shape.
Returns:
A `Tensor`
"""
raise NotImplementedError("_make_rhs is not defined.")
@abc.abstractmethod
def _make_x(self, operator, adjoint, with_batch=True):
"""Make an 'x' appropriate for calling operator.matmul(x).
Args:
operator: A `LinearOperator`
adjoint: Python `bool`. If `True`, we are making an 'x' value for the
adjoint operator.
with_batch: Python `bool`. If `True`, create `x` with the same batch shape
as operator, and otherwise create a matrix without any batch shape.
Returns:
A `Tensor`
"""
raise NotImplementedError("_make_x is not defined.")
@property
def _tests_to_skip(self):
"""List of test names to skip."""
# Subclasses should over-ride if they want to skip some tests.
# To skip "test_foo", add "foo" to this list.
return []
def _skip_if_tests_to_skip_contains(self, test_name):
"""If self._tests_to_skip contains test_name, raise SkipTest exception.
See tests below for usage.
Args:
test_name: String name corresponding to a test.
Raises:
SkipTest Exception, if test_name is in self._tests_to_skip.
"""
if test_name in self._tests_to_skip:
self.skipTest(
"{} skipped because it was added to self._tests_to_skip.".format(
test_name))
def test_to_dense(self):
self._skip_if_tests_to_skip_contains("to_dense")
for use_placeholder in self._use_placeholder_options:
for build_info in self._operator_build_infos:
for dtype in self._dtypes_to_test:
with self.test_session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
operator, mat, feed_dict = self._operator_and_mat_and_feed_dict(
build_info, dtype, use_placeholder=use_placeholder)
op_dense = operator.to_dense()
if not use_placeholder:
self.assertAllEqual(build_info.shape, op_dense.get_shape())
op_dense_v, mat_v = sess.run([op_dense, mat], feed_dict=feed_dict)
self.assertAC(op_dense_v, mat_v)
def test_det(self):
self._skip_if_tests_to_skip_contains("det")
for use_placeholder in self._use_placeholder_options:
for build_info in self._operator_build_infos:
for dtype in self._dtypes_to_test:
with self.test_session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
operator, mat, feed_dict = self._operator_and_mat_and_feed_dict(
build_info, dtype, use_placeholder=use_placeholder)
op_det = operator.determinant()
if not use_placeholder:
self.assertAllEqual(build_info.shape[:-2], op_det.get_shape())
op_det_v, mat_det_v = sess.run(
[op_det, linalg_ops.matrix_determinant(mat)],
feed_dict=feed_dict)
self.assertAC(op_det_v, mat_det_v)
def test_log_abs_det(self):
self._skip_if_tests_to_skip_contains("log_abs_det")
for use_placeholder in self._use_placeholder_options:
for build_info in self._operator_build_infos:
for dtype in self._dtypes_to_test:
with self.test_session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
operator, mat, feed_dict = self._operator_and_mat_and_feed_dict(
build_info, dtype, use_placeholder=use_placeholder)
op_log_abs_det = operator.log_abs_determinant()
_, mat_log_abs_det = linalg.slogdet(mat)
if not use_placeholder:
self.assertAllEqual(
build_info.shape[:-2], op_log_abs_det.get_shape())
op_log_abs_det_v, mat_log_abs_det_v = sess.run(
[op_log_abs_det, mat_log_abs_det], feed_dict=feed_dict)
self.assertAC(op_log_abs_det_v, mat_log_abs_det_v)
def _test_matmul(self, with_batch):
for use_placeholder in self._use_placeholder_options:
for build_info in self._operator_build_infos:
# If batch dimensions are omitted, but there are
# no batch dimensions for the linear operator, then
# skip the test case. This is already checked with
# with_batch=True.
if not with_batch and len(build_info.shape) <= 2:
continue
for dtype in self._dtypes_to_test:
for adjoint in self._adjoint_options:
for adjoint_arg in self._adjoint_arg_options:
with self.test_session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
operator, mat, feed_dict = self._operator_and_mat_and_feed_dict(
build_info, dtype, use_placeholder=use_placeholder)
x = self._make_x(
operator, adjoint=adjoint, with_batch=with_batch)
# If adjoint_arg, compute A X^H^H = A X.
if adjoint_arg:
op_matmul = operator.matmul(
linalg.adjoint(x),
adjoint=adjoint,
adjoint_arg=adjoint_arg)
else:
op_matmul = operator.matmul(x, adjoint=adjoint)
mat_matmul = linear_operator_util.matmul_with_broadcast(
mat, x, adjoint_a=adjoint)
if not use_placeholder:
self.assertAllEqual(op_matmul.get_shape(),
mat_matmul.get_shape())
op_matmul_v, mat_matmul_v = sess.run(
[op_matmul, mat_matmul], feed_dict=feed_dict)
self.assertAC(op_matmul_v, mat_matmul_v)
def test_matmul(self):
self._skip_if_tests_to_skip_contains("matmul")
self._test_matmul(with_batch=True)
def test_matmul_with_broadcast(self):
self._skip_if_tests_to_skip_contains("matmul_with_broadcast")
self._test_matmul(with_batch=False)
def _test_solve(self, with_batch):
for use_placeholder in self._use_placeholder_options:
for build_info in self._operator_build_infos:
# If batch dimensions are omitted, but there are
# no batch dimensions for the linear operator, then
# skip the test case. This is already checked with
# with_batch=True.
if not with_batch and len(build_info.shape) <= 2:
continue
for dtype in self._dtypes_to_test:
for adjoint in self._adjoint_options:
for adjoint_arg in self._adjoint_arg_options:
with self.test_session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
operator, mat, feed_dict = self._operator_and_mat_and_feed_dict(
build_info, dtype, use_placeholder=use_placeholder)
rhs = self._make_rhs(
operator, adjoint=adjoint, with_batch=with_batch)
# If adjoint_arg, solve A X = (rhs^H)^H = rhs.
if adjoint_arg:
op_solve = operator.solve(
linalg.adjoint(rhs),
adjoint=adjoint,
adjoint_arg=adjoint_arg)
else:
op_solve = operator.solve(
rhs, adjoint=adjoint, adjoint_arg=adjoint_arg)
mat_solve = linear_operator_util.matrix_solve_with_broadcast(
mat, rhs, adjoint=adjoint)
if not use_placeholder:
self.assertAllEqual(op_solve.get_shape(),
mat_solve.get_shape())
op_solve_v, mat_solve_v = sess.run(
[op_solve, mat_solve], feed_dict=feed_dict)
self.assertAC(op_solve_v, mat_solve_v)
def test_solve(self):
self._skip_if_tests_to_skip_contains("solve")
self._test_solve(with_batch=True)
def test_solve_with_broadcast(self):
self._skip_if_tests_to_skip_contains("solve_with_broadcast")
self._test_solve(with_batch=False)
def test_trace(self):
self._skip_if_tests_to_skip_contains("trace")
for use_placeholder in self._use_placeholder_options:
for build_info in self._operator_build_infos:
for dtype in self._dtypes_to_test:
with self.test_session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
operator, mat, feed_dict = self._operator_and_mat_and_feed_dict(
build_info, dtype, use_placeholder=use_placeholder)
op_trace = operator.trace()
mat_trace = math_ops.trace(mat)
if not use_placeholder:
self.assertAllEqual(op_trace.get_shape(), mat_trace.get_shape())
op_trace_v, mat_trace_v = sess.run(
[op_trace, mat_trace], feed_dict=feed_dict)
self.assertAC(op_trace_v, mat_trace_v)
def test_add_to_tensor(self):
self._skip_if_tests_to_skip_contains("add_to_tensor")<|fim▁hole|> for build_info in self._operator_build_infos:
for dtype in self._dtypes_to_test:
with self.test_session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
operator, mat, feed_dict = self._operator_and_mat_and_feed_dict(
build_info, dtype, use_placeholder=use_placeholder)
op_plus_2mat = operator.add_to_tensor(2 * mat)
if not use_placeholder:
self.assertAllEqual(build_info.shape, op_plus_2mat.get_shape())
op_plus_2mat_v, mat_v = sess.run(
[op_plus_2mat, mat], feed_dict=feed_dict)
self.assertAC(op_plus_2mat_v, 3 * mat_v)
def test_diag_part(self):
self._skip_if_tests_to_skip_contains("diag_part")
for use_placeholder in self._use_placeholder_options:
for build_info in self._operator_build_infos:
for dtype in self._dtypes_to_test:
with self.test_session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
operator, mat, feed_dict = self._operator_and_mat_and_feed_dict(
build_info, dtype, use_placeholder=use_placeholder)
op_diag_part = operator.diag_part()
mat_diag_part = array_ops.matrix_diag_part(mat)
if not use_placeholder:
self.assertAllEqual(mat_diag_part.get_shape(),
op_diag_part.get_shape())
op_diag_part_, mat_diag_part_ = sess.run(
[op_diag_part, mat_diag_part], feed_dict=feed_dict)
self.assertAC(op_diag_part_, mat_diag_part_)
@six.add_metaclass(abc.ABCMeta)
class SquareLinearOperatorDerivedClassTest(LinearOperatorDerivedClassTest):
"""Base test class appropriate for square operators.
Sub-classes must still define all abstractmethods from
LinearOperatorDerivedClassTest that are not defined here.
"""
@property
def _operator_build_infos(self):
build_info = OperatorBuildInfo
# non-batch operators (n, n) and batch operators.
return [
build_info((0, 0)),
build_info((1, 1)),
build_info((1, 3, 3)),
build_info((3, 4, 4)),
build_info((2, 1, 4, 4))]
def _make_rhs(self, operator, adjoint, with_batch=True):
# This operator is square, so rhs and x will have same shape.
# adjoint value makes no difference because the operator shape doesn't
# change since it is square, but be pedantic.
return self._make_x(operator, adjoint=not adjoint, with_batch=with_batch)
def _make_x(self, operator, adjoint, with_batch=True):
# Value of adjoint makes no difference because the operator is square.
# Return the number of systems to solve, R, equal to 1 or 2.
r = self._get_num_systems(operator)
# If operator.shape = [B1,...,Bb, N, N] this returns a random matrix of
# shape [B1,...,Bb, N, R], R = 1 or 2.
if operator.shape.is_fully_defined():
batch_shape = operator.batch_shape.as_list()
n = operator.domain_dimension.value
if with_batch:
x_shape = batch_shape + [n, r]
else:
x_shape = [n, r]
else:
batch_shape = operator.batch_shape_tensor()
n = operator.domain_dimension_tensor()
if with_batch:
x_shape = array_ops.concat((batch_shape, [n, r]), 0)
else:
x_shape = [n, r]
return random_normal(x_shape, dtype=operator.dtype)
def _get_num_systems(self, operator):
"""Get some number, either 1 or 2, depending on operator."""
if operator.tensor_rank is None or operator.tensor_rank % 2:
return 1
else:
return 2
@six.add_metaclass(abc.ABCMeta)
class NonSquareLinearOperatorDerivedClassTest(LinearOperatorDerivedClassTest):
"""Base test class appropriate for generic rectangular operators.
Square shapes are never tested by this class, so if you want to test your
operator with a square shape, create two test classes, the other subclassing
SquareLinearOperatorFullMatrixTest.
Sub-classes must still define all abstractmethods from
LinearOperatorDerivedClassTest that are not defined here.
"""
@property
def _tests_to_skip(self):
"""List of test names to skip."""
return ["solve", "solve_with_broadcast", "det", "log_abs_det"]
@property
def _operator_build_infos(self):
build_info = OperatorBuildInfo
# non-batch operators (n, n) and batch operators.
return [
build_info((2, 1)),
build_info((1, 2)),
build_info((1, 3, 2)),
build_info((3, 3, 4)),
build_info((2, 1, 2, 4))]
def _make_rhs(self, operator, adjoint, with_batch=True):
# TODO(langmore) Add once we're testing solve_ls.
raise NotImplementedError(
"_make_rhs not implemented because we don't test solve")
def _make_x(self, operator, adjoint, with_batch=True):
# Return the number of systems for the argument 'x' for .matmul(x)
r = self._get_num_systems(operator)
# If operator.shape = [B1,...,Bb, M, N] this returns a random matrix of
# shape [B1,...,Bb, N, R], R = 1 or 2.
if operator.shape.is_fully_defined():
batch_shape = operator.batch_shape.as_list()
if adjoint:
n = operator.range_dimension.value
else:
n = operator.domain_dimension.value
if with_batch:
x_shape = batch_shape + [n, r]
else:
x_shape = [n, r]
else:
batch_shape = operator.batch_shape_tensor()
if adjoint:
n = operator.range_dimension_tensor()
else:
n = operator.domain_dimension_tensor()
if with_batch:
x_shape = array_ops.concat((batch_shape, [n, r]), 0)
else:
x_shape = [n, r]
return random_normal(x_shape, dtype=operator.dtype)
def _get_num_systems(self, operator):
"""Get some number, either 1 or 2, depending on operator."""
if operator.tensor_rank is None or operator.tensor_rank % 2:
return 1
else:
return 2
def random_positive_definite_matrix(shape, dtype, force_well_conditioned=False):
"""[batch] positive definite matrix.
Args:
shape: `TensorShape` or Python list. Shape of the returned matrix.
dtype: `TensorFlow` `dtype` or Python dtype.
force_well_conditioned: Python bool. If `True`, returned matrix has
eigenvalues with modulus in `(1, 4)`. Otherwise, eigenvalues are
chi-squared random variables.
Returns:
`Tensor` with desired shape and dtype.
"""
dtype = dtypes.as_dtype(dtype)
if not tensor_util.is_tensor(shape):
shape = tensor_shape.TensorShape(shape)
# Matrix must be square.
shape[-1].assert_is_compatible_with(shape[-2])
with ops.name_scope("random_positive_definite_matrix"):
tril = random_tril_matrix(
shape, dtype, force_well_conditioned=force_well_conditioned)
return math_ops.matmul(tril, tril, adjoint_b=True)
def random_tril_matrix(shape,
dtype,
force_well_conditioned=False,
remove_upper=True):
"""[batch] lower triangular matrix.
Args:
shape: `TensorShape` or Python `list`. Shape of the returned matrix.
dtype: `TensorFlow` `dtype` or Python dtype
force_well_conditioned: Python `bool`. If `True`, returned matrix will have
eigenvalues with modulus in `(1, 2)`. Otherwise, eigenvalues are unit
normal random variables.
remove_upper: Python `bool`.
If `True`, zero out the strictly upper triangle.
If `False`, the lower triangle of returned matrix will have desired
properties, but will not have the strictly upper triangle zero'd out.
Returns:
`Tensor` with desired shape and dtype.
"""
with ops.name_scope("random_tril_matrix"):
# Totally random matrix. Has no nice properties.
tril = random_normal(shape, dtype=dtype)
if remove_upper:
tril = array_ops.matrix_band_part(tril, -1, 0)
# Create a diagonal with entries having modulus in [1, 2].
if force_well_conditioned:
maxval = ops.convert_to_tensor(np.sqrt(2.), dtype=dtype.real_dtype)
diag = random_sign_uniform(
shape[:-1], dtype=dtype, minval=1., maxval=maxval)
tril = array_ops.matrix_set_diag(tril, diag)
return tril
def random_normal(shape, mean=0.0, stddev=1.0, dtype=dtypes.float32, seed=None):
"""Tensor with (possibly complex) Gaussian entries.
Samples are distributed like
```
N(mean, stddev^2), if dtype is real,
X + iY, where X, Y ~ N(mean, stddev^2) if dtype is complex.
```
Args:
shape: `TensorShape` or Python list. Shape of the returned tensor.
mean: `Tensor` giving mean of normal to sample from.
stddev: `Tensor` giving stdev of normal to sample from.
dtype: `TensorFlow` `dtype` or numpy dtype
seed: Python integer seed for the RNG.
Returns:
`Tensor` with desired shape and dtype.
"""
dtype = dtypes.as_dtype(dtype)
with ops.name_scope("random_normal"):
samples = random_ops.random_normal(
shape, mean=mean, stddev=stddev, dtype=dtype.real_dtype, seed=seed)
if dtype.is_complex:
if seed is not None:
seed += 1234
more_samples = random_ops.random_normal(
shape, mean=mean, stddev=stddev, dtype=dtype.real_dtype, seed=seed)
samples = math_ops.complex(samples, more_samples)
return samples
def random_uniform(shape,
minval=None,
maxval=None,
dtype=dtypes.float32,
seed=None):
"""Tensor with (possibly complex) Uniform entries.
Samples are distributed like
```
Uniform[minval, maxval], if dtype is real,
X + iY, where X, Y ~ Uniform[minval, maxval], if dtype is complex.
```
Args:
shape: `TensorShape` or Python list. Shape of the returned tensor.
minval: `0-D` `Tensor` giving the minimum values.
maxval: `0-D` `Tensor` giving the maximum values.
dtype: `TensorFlow` `dtype` or Python dtype
seed: Python integer seed for the RNG.
Returns:
`Tensor` with desired shape and dtype.
"""
dtype = dtypes.as_dtype(dtype)
with ops.name_scope("random_uniform"):
samples = random_ops.random_uniform(
shape, dtype=dtype.real_dtype, minval=minval, maxval=maxval, seed=seed)
if dtype.is_complex:
if seed is not None:
seed += 12345
more_samples = random_ops.random_uniform(
shape,
dtype=dtype.real_dtype,
minval=minval,
maxval=maxval,
seed=seed)
samples = math_ops.complex(samples, more_samples)
return samples
def random_sign_uniform(shape,
minval=None,
maxval=None,
dtype=dtypes.float32,
seed=None):
"""Tensor with (possibly complex) random entries from a "sign Uniform".
Letting `Z` be a random variable equal to `-1` and `1` with equal probability,
Samples from this `Op` are distributed like
```
Z * X, where X ~ Uniform[minval, maxval], if dtype is real,
Z * (X + iY), where X, Y ~ Uniform[minval, maxval], if dtype is complex.
```
Args:
shape: `TensorShape` or Python list. Shape of the returned tensor.
minval: `0-D` `Tensor` giving the minimum values.
maxval: `0-D` `Tensor` giving the maximum values.
dtype: `TensorFlow` `dtype` or Python dtype
seed: Python integer seed for the RNG.
Returns:
`Tensor` with desired shape and dtype.
"""
dtype = dtypes.as_dtype(dtype)
with ops.name_scope("random_sign_uniform"):
unsigned_samples = random_uniform(
shape, minval=minval, maxval=maxval, dtype=dtype, seed=seed)
if seed is not None:
seed += 12
signs = math_ops.sign(
random_ops.random_uniform(shape, minval=-1., maxval=1., seed=seed))
return unsigned_samples * math_ops.cast(signs, unsigned_samples.dtype)
def random_normal_correlated_columns(shape,
mean=0.0,
stddev=1.0,
dtype=dtypes.float32,
eps=1e-4,
seed=None):
"""Batch matrix with (possibly complex) Gaussian entries and correlated cols.
Returns random batch matrix `A` with specified element-wise `mean`, `stddev`,
living close to an embedded hyperplane.
Suppose `shape[-2:] = (M, N)`.
If `M < N`, `A` is a random `M x N` [batch] matrix with iid Gaussian entries.
If `M >= N`, then the colums of `A` will be made almost dependent as follows:
```
L = random normal N x N-1 matrix, mean = 0, stddev = 1 / sqrt(N - 1)
B = random normal M x N-1 matrix, mean = 0, stddev = stddev.
G = (L B^H)^H, a random normal M x N matrix, living on N-1 dim hyperplane
E = a random normal M x N matrix, mean = 0, stddev = eps
mu = a constant M x N matrix, equal to the argument "mean"
A = G + E + mu
```
Args:
shape: Python list of integers.
Shape of the returned tensor. Must be at least length two.
mean: `Tensor` giving mean of normal to sample from.
stddev: `Tensor` giving stdev of normal to sample from.
dtype: `TensorFlow` `dtype` or numpy dtype
eps: Distance each column is perturbed from the low-dimensional subspace.
seed: Python integer seed for the RNG.
Returns:
`Tensor` with desired shape and dtype.
Raises:
ValueError: If `shape` is not at least length 2.
"""
dtype = dtypes.as_dtype(dtype)
if len(shape) < 2:
raise ValueError(
"Argument shape must be at least length 2. Found: %s" % shape)
# Shape is the final shape, e.g. [..., M, N]
shape = list(shape)
batch_shape = shape[:-2]
m, n = shape[-2:]
# If there is only one column, "they" are by definition correlated.
if n < 2 or n < m:
return random_normal(
shape, mean=mean, stddev=stddev, dtype=dtype, seed=seed)
# Shape of the matrix with only n - 1 columns that we will embed in higher
# dimensional space.
smaller_shape = batch_shape + [m, n - 1]
# Shape of the embedding matrix, mapping batch matrices
# from [..., N-1, M] to [..., N, M]
embedding_mat_shape = batch_shape + [n, n - 1]
# This stddev for the embedding_mat ensures final result has correct stddev.
stddev_mat = 1 / np.sqrt(n - 1)
with ops.name_scope("random_normal_correlated_columns"):
smaller_mat = random_normal(
smaller_shape, mean=0.0, stddev=stddev_mat, dtype=dtype, seed=seed)
if seed is not None:
seed += 1287
embedding_mat = random_normal(embedding_mat_shape, dtype=dtype, seed=seed)
embedded_t = math_ops.matmul(embedding_mat, smaller_mat, transpose_b=True)
embedded = array_ops.matrix_transpose(embedded_t)
mean_mat = array_ops.ones_like(embedded) * mean
return embedded + random_normal(shape, stddev=eps, dtype=dtype) + mean_mat<|fim▁end|> | for use_placeholder in self._use_placeholder_options: |
<|file_name|>PhotoView.java<|end_file_name|><|fim▁begin|>/*******************************************************************************
* Copyright 2011, 2012 Chris Banes.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
package uk.co.senab.photoview;
import uk.co.senab.photoview.PhotoViewAttacher.OnMatrixChangedListener;
import uk.co.senab.photoview.PhotoViewAttacher.OnPhotoTapListener;
import uk.co.senab.photoview.PhotoViewAttacher.OnViewTapListener;
import android.content.Context;
import android.graphics.Bitmap;
import android.graphics.Matrix;
import android.graphics.RectF;
import android.graphics.drawable.Drawable;
import android.net.Uri;
import android.util.AttributeSet;
import android.widget.ImageView;
public class PhotoView extends ImageView implements IPhotoView {
private final PhotoViewAttacher mAttacher;
private ScaleType mPendingScaleType;
public PhotoView(Context context) {
this(context, null);
}
public PhotoView(Context context, AttributeSet attr) {
this(context, attr, 0);
}
public PhotoView(Context context, AttributeSet attr, int defStyle) {
super(context, attr, defStyle);
super.setScaleType(ScaleType.MATRIX);
mAttacher = new PhotoViewAttacher(this);
if (null != mPendingScaleType) {
setScaleType(mPendingScaleType);
mPendingScaleType = null;
}
}
@Override
public void setPhotoViewRotation(float rotationDegree) {
mAttacher.setPhotoViewRotation(rotationDegree);
}
@Override
public boolean canZoom() {
return mAttacher.canZoom();
}
@Override
public RectF getDisplayRect() {
return mAttacher.getDisplayRect();
}
@Override
public Matrix getDisplayMatrix() {
return mAttacher.getDrawMatrix();
}
@Override
public boolean setDisplayMatrix(Matrix finalRectangle) {
return mAttacher.setDisplayMatrix(finalRectangle);
}
@Override
@Deprecated
public float getMinScale() {
return getMinimumScale();
}
@Override
public float getMinimumScale() {
return mAttacher.getMinimumScale();
}
@Override
@Deprecated
public float getMidScale() {
return getMediumScale();
}
@Override
public float getMediumScale() {
return mAttacher.getMediumScale();
}
@Override
@Deprecated
public float getMaxScale() {
return getMaximumScale();
}
@Override
public float getMaximumScale() {<|fim▁hole|> @Override
public float getScale() {
return mAttacher.getScale();
}
@Override
public ScaleType getScaleType() {
return mAttacher.getScaleType();
}
@Override
public void setAllowParentInterceptOnEdge(boolean allow) {
mAttacher.setAllowParentInterceptOnEdge(allow);
}
@Override
@Deprecated
public void setMinScale(float minScale) {
setMinimumScale(minScale);
}
@Override
public void setMinimumScale(float minimumScale) {
mAttacher.setMinimumScale(minimumScale);
}
@Override
@Deprecated
public void setMidScale(float midScale) {
setMediumScale(midScale);
}
@Override
public void setMediumScale(float mediumScale) {
mAttacher.setMediumScale(mediumScale);
}
@Override
@Deprecated
public void setMaxScale(float maxScale) {
setMaximumScale(maxScale);
}
@Override
public void setMaximumScale(float maximumScale) {
mAttacher.setMaximumScale(maximumScale);
}
@Override
// setImageBitmap calls through to this method
public void setImageDrawable(Drawable drawable) {
super.setImageDrawable(drawable);
if (null != mAttacher) {
mAttacher.update();
}
}
@Override
public void setImageResource(int resId) {
super.setImageResource(resId);
if (null != mAttacher) {
mAttacher.update();
}
}
@Override
public void setImageURI(Uri uri) {
super.setImageURI(uri);
if (null != mAttacher) {
mAttacher.update();
}
}
@Override
public void setOnMatrixChangeListener(OnMatrixChangedListener listener) {
mAttacher.setOnMatrixChangeListener(listener);
}
@Override
public void setOnLongClickListener(OnLongClickListener l) {
mAttacher.setOnLongClickListener(l);
}
@Override
public void setOnPhotoTapListener(OnPhotoTapListener listener) {
mAttacher.setOnPhotoTapListener(listener);
}
@Override
public OnPhotoTapListener getOnPhotoTapListener() {
return mAttacher.getOnPhotoTapListener();
}
@Override
public void setOnViewTapListener(OnViewTapListener listener) {
mAttacher.setOnViewTapListener(listener);
}
@Override
public OnViewTapListener getOnViewTapListener() {
return mAttacher.getOnViewTapListener();
}
@Override
public void setScale(float scale) {
mAttacher.setScale(scale);
}
@Override
public void setScale(float scale, boolean animate) {
mAttacher.setScale(scale, animate);
}
@Override
public void setScale(float scale, float focalX, float focalY, boolean animate) {
mAttacher.setScale(scale, focalX, focalY, animate);
}
@Override
public void setScaleType(ScaleType scaleType) {
if (null != mAttacher) {
mAttacher.setScaleType(scaleType);
} else {
mPendingScaleType = scaleType;
}
}
@Override
public void setZoomable(boolean zoomable) {
mAttacher.setZoomable(zoomable);
}
@Override
public Bitmap getVisibleRectangleBitmap() {
return mAttacher.getVisibleRectangleBitmap();
}
@Override
public void setZoomTransitionDuration(int milliseconds) {
mAttacher.setZoomTransitionDuration(milliseconds);
}
@Override
protected void onDetachedFromWindow() {
mAttacher.cleanup();
super.onDetachedFromWindow();
}
}<|fim▁end|> | return mAttacher.getMaximumScale();
}
|
<|file_name|>issue-54521-3.rs<|end_file_name|><|fim▁begin|>// run-rustfix
// This test checks that the following error is emitted and the suggestion works:
//
// ```
// let _ = vec![1, 2, 3].into_iter().collect::<Vec<usize>>>>();
// ^^ help: remove extra angle brackets<|fim▁hole|>fn main() {
let _ = vec![1, 2, 3].into_iter().collect::<Vec<usize>>>>>>();
//~^ ERROR unmatched angle bracket
let _ = vec![1, 2, 3].into_iter().collect::<Vec<usize>>>>>();
//~^ ERROR unmatched angle bracket
let _ = vec![1, 2, 3].into_iter().collect::<Vec<usize>>>>();
//~^ ERROR unmatched angle bracket
let _ = vec![1, 2, 3].into_iter().collect::<Vec<usize>>>();
//~^ ERROR unmatched angle bracket
}<|fim▁end|> | // ```
|
<|file_name|>RTreeGridRecordWriter.java<|end_file_name|><|fim▁begin|>package org.apache.hadoop.mapred.spatial;
import java.io.IOException;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.RecordWriter;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.spatial.CellInfo;
import org.apache.hadoop.spatial.Shape;
public class RTreeGridRecordWriter
extends org.apache.hadoop.spatial.RTreeGridRecordWriter<Shape>
implements RecordWriter<IntWritable, Text> {
public RTreeGridRecordWriter(FileSystem fileSystem, Path outFile, CellInfo[] cells, boolean overwrite) throws IOException {
super(fileSystem, outFile, cells, overwrite);
}
@Override
public void write(IntWritable key, Text value) throws IOException {
super.write(key.get(), value);
}
@Override
public void close(Reporter reporter) throws IOException {
super.close(reporter);
}<|fim▁hole|>}<|fim▁end|> | |
<|file_name|>lawnchair.js<|end_file_name|><|fim▁begin|>/**
* Lawnchair!
* ---
* clientside json store
*
*/
var Lawnchair = function () {
// lawnchair requires json
if (!JSON) throw 'JSON unavailable! Include http://www.json.org/json2.js to fix.'
// options are optional; callback is not
if (arguments.length <= 2 && arguments.length > 0) {
var callback = (typeof arguments[0] === 'function') ? arguments[0] : arguments[1]
, options = (typeof arguments[0] === 'function') ? {} : arguments[0]
} else {
throw 'Incorrect # of ctor args!'
}
if (typeof callback !== 'function') throw 'No callback was provided';
// default configuration
this.record = options.record || 'record' // default for records
this.name = options.name || 'records' // default name for underlying store
// mixin first valid adapter
var adapter
// if the adapter is passed in we try to load that only
if (options.adapter) {
adapter = Lawnchair.adapters[Lawnchair.adapters.indexOf(options.adapter)]
adapter = adapter.valid() ? adapter : undefined
// otherwise find the first valid adapter for this env
} else {
for (var i = 0, l = Lawnchair.adapters.length; i < l; i++) {
adapter = Lawnchair.adapters[i].valid() ? Lawnchair.adapters[i] : undefined
if (adapter) break
}
}
// we have failed
if (!adapter) throw 'No valid adapter.'
// yay! mixin the adapter
for (var j in adapter) {
this[j] = adapter[j]
}
// call init for each mixed in plugin
for (var i = 0, l = Lawnchair.plugins.length; i < l; i++)
Lawnchair.plugins[i].call(this)
// init the adapter
this.init(options, callback)
}
Lawnchair.adapters = []
/**
* queues an adapter for mixin
* ===
* - ensures an adapter conforms to a specific interface
*
*/
Lawnchair.adapter = function (id, obj) {
// add the adapter id to the adapter obj
// ugly here for a cleaner dsl for implementing adapters
obj['adapter'] = id
// methods required to implement a lawnchair adapter
var implementing = 'adapter valid init keys save batch get exists all remove nuke'.split(' ')
// mix in the adapter
for (var i in obj) if (implementing.indexOf(i) === -1) throw 'Invalid adapter! Nonstandard method: ' + i
// if we made it this far the adapter interface is valid
Lawnchair.adapters.push(obj)
}
Lawnchair.plugins = []
/**
* generic shallow extension for plugins
* ===
* - if an init method is found it registers it to be called when the lawnchair is inited
* - yes we could use hasOwnProp but nobody here is an asshole
*/
Lawnchair.plugin = function (obj) {
for (var i in obj)
i === 'init' ? Lawnchair.plugins.push(obj[i]) : this.prototype[i] = obj[i]
}
/**
* helpers
*
*/
Lawnchair.prototype = {
isArray: Array.isArray || function(o) { return Object.prototype.toString.call(o) === '[object Array]' },
// awesome shorthand callbacks as strings. this is shameless theft from dojo.
lambda: function (callback) {
return this.fn(this.record, callback)
},
// first stab at named parameters for terse callbacks; dojo: first != best // ;D
fn: function (name, callback) {
return typeof callback == 'string' ? new Function(name, callback) : callback
},
// returns a unique identifier (by way of Backbone.localStorage.js)
// TODO investigate smaller UUIDs to cut on storage cost
uuid: function () {
var S4 = function () {
return (((1+Math.random())*0x10000)|0).toString(16).substring(1);
}
return (S4()+S4()+"-"+S4()+"-"+S4()+"-"+S4()+"-"+S4()+S4()+S4());
},
// a classic iterator
each: function (callback) {
var cb = this.lambda(callback)
// iterate from chain
if (this.__results) {
for (var i = 0, l = this.__results.length; i < l; i++) cb.call(this, this.__results[i], i)
}
// otherwise iterate the entire collection
else {
this.all(function(r) {
for (var i = 0, l = r.length; i < l; i++) cb.call(this, r[i], i)
})
}
return this
}
// --
};
Lawnchair.adapter('webkit-sqlite', (function () {
// private methods
var fail = function (e, i) { console.log('error in sqlite adaptor!', e, i) }
, now = function () { return new Date() } // FIXME need to use better date fn
// not entirely sure if this is needed...
if (!Function.prototype.bind) {
Function.prototype.bind = function( obj ) {
var slice = [].slice
, args = slice.call(arguments, 1)
, self = this
, nop = function () {}
, bound = function () {
return self.apply(this instanceof nop ? this : (obj || {}), args.concat(slice.call(arguments)))
}
nop.prototype = self.prototype
bound.prototype = new nop()
return bound
}
}
// public methods
return {
valid: function() { return !!(window.openDatabase) },
init: function (options, callback) {
var that = this
, cb = that.fn(that.name, callback)
, create = "CREATE TABLE IF NOT EXISTS " + this.name + " (id NVARCHAR(32) UNIQUE PRIMARY KEY, value TEXT, timestamp REAL)"
, win = cb.bind(this)
// open a connection and create the db if it doesn't exist
this.db = openDatabase(this.name, '1.0.0', this.name, 65536)
this.db.transaction(function (t) {
t.executeSql(create, [], win, fail)
})
},
keys: function (callback) {
var cb = this.lambda(callback)
, that = this
, keys = "SELECT id FROM " + this.name + " ORDER BY timestamp DESC"
this.db.transaction(function(t) {
var win = function (xxx, results) {
if (results.rows.length == 0 ) {
cb.call(that, [])
} else {
var r = [];
for (var i = 0, l = results.rows.length; i < l; i++) {
r.push(results.rows.item(i).id);
}
cb.call(that, r)
}
}
t.executeSql(keys, [], win, fail)
})
return this
},
// you think thats air you're breathing now?
save: function (obj, callback) {
var that = this
, id = obj.key || that.uuid()
, ins = "INSERT INTO " + this.name + " (value, timestamp, id) VALUES (?,?,?)"
, up = "UPDATE " + this.name + " SET value=?, timestamp=? WHERE id=?"
, win = function () { if (callback) { obj.key = id; that.lambda(callback).call(that, obj) }}
, val = [now(), id]
// existential
that.exists(obj.key, function(exists) {
// transactions are like condoms
that.db.transaction(function(t) {
// TODO move timestamp to a plugin
var insert = function (obj) {
val.unshift(JSON.stringify(obj))
t.executeSql(ins, val, win, fail)
}
// TODO move timestamp to a plugin
var update = function (obj) {
delete(obj.key)
val.unshift(JSON.stringify(obj))
t.executeSql(up, val, win, fail)
}
// pretty
exists ? update(obj) : insert(obj)
})
});
return this
},
// FIXME this should be a batch insert / just getting the test to pass...
batch: function (objs, cb) {
var results = []
, done = false
, that = this
var updateProgress = function(obj) {
results.push(obj)
done = results.length === objs.length
}
var checkProgress = setInterval(function() {
if (done) {
if (cb) that.lambda(cb).call(that, results)
clearInterval(checkProgress)
}
}, 200)
for (var i = 0, l = objs.length; i < l; i++)
this.save(objs[i], updateProgress)
return this
},
get: function (keyOrArray, cb) {
var that = this
, sql = ''
// batch selects support
if (this.isArray(keyOrArray)) {
sql = 'SELECT id, value FROM ' + this.name + " WHERE id IN ('" + keyOrArray.join("','") + "')"
} else {<|fim▁hole|> sql = 'SELECT id, value FROM ' + this.name + " WHERE id = '" + keyOrArray + "'"
}
// FIXME
// will always loop the results but cleans it up if not a batch return at the end..
// in other words, this could be faster
var win = function (xxx, results) {
var o = null
, r = []
if (results.rows.length) {
for (var i = 0, l = results.rows.length; i < l; i++) {
o = JSON.parse(results.rows.item(i).value)
o.key = results.rows.item(i).id
r.push(o)
}
}
if (!that.isArray(keyOrArray)) r = r.length ? r[0] : null
if (cb) that.lambda(cb).call(that, r)
}
this.db.transaction(function(t){ t.executeSql(sql, [], win, fail) })
return this
},
exists: function (key, cb) {
var is = "SELECT * FROM " + this.name + " WHERE id = ?"
, that = this
, win = function(xxx, results) { if (cb) that.fn('exists', cb).call(that, (results.rows.length > 0)) }
this.db.transaction(function(t){ t.executeSql(is, [key], win, fail) })
return this
},
all: function (callback) {
var that = this
, all = "SELECT * FROM " + this.name
, r = []
, cb = this.fn(this.name, callback) || undefined
, win = function (xxx, results) {
if (results.rows.length != 0) {
for (var i = 0, l = results.rows.length; i < l; i++) {
var obj = JSON.parse(results.rows.item(i).value)
obj.key = results.rows.item(i).id
r.push(obj)
}
}
if (cb) cb.call(that, r)
}
this.db.transaction(function (t) {
t.executeSql(all, [], win, fail)
})
return this
},
remove: function (keyOrObj, cb) {
var that = this
, key = typeof keyOrObj === 'string' ? keyOrObj : keyOrObj.key
, del = "DELETE FROM " + this.name + " WHERE id = ?"
, win = function () { if (cb) that.lambda(cb).call(that) }
this.db.transaction( function (t) {
t.executeSql(del, [key], win, fail);
});
return this;
},
nuke: function (cb) {
var nuke = "DELETE FROM " + this.name
, that = this
, win = cb ? function() { that.lambda(cb).call(that) } : function(){}
this.db.transaction(function (t) {
t.executeSql(nuke, [], win, fail)
})
return this
}
//////
}})())<|fim▁end|> | |
<|file_name|>pants_daemon.py<|end_file_name|><|fim▁begin|># Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import logging
import os
import sys
import threading
from contextlib import contextmanager
from dataclasses import dataclass
from setproctitle import setproctitle as set_process_title
from pants.base.build_environment import get_buildroot
from pants.base.exception_sink import ExceptionSink, SignalHandler
from pants.base.exiter import Exiter
from pants.bin.daemon_pants_runner import DaemonPantsRunner
from pants.engine.native import Native
from pants.engine.rules import UnionMembership
from pants.init.engine_initializer import EngineInitializer
from pants.init.logging import init_rust_logger, setup_logging
from pants.init.options_initializer import BuildConfigInitializer, OptionsInitializer
from pants.option.options_bootstrapper import OptionsBootstrapper
from pants.option.options_fingerprinter import OptionsFingerprinter
from pants.option.scope import GLOBAL_SCOPE
from pants.pantsd.process_manager import FingerprintedProcessManager
from pants.pantsd.service.fs_event_service import FSEventService
from pants.pantsd.service.pailgun_service import PailgunService
from pants.pantsd.service.pants_service import PantsServices
from pants.pantsd.service.scheduler_service import SchedulerService
from pants.pantsd.service.store_gc_service import StoreGCService
from pants.pantsd.watchman_launcher import WatchmanLauncher
from pants.util.contextutil import stdio_as
from pants.util.memo import memoized_property
from pants.util.strutil import ensure_text
class _LoggerStream(object):
"""A sys.std{out,err} replacement that pipes output to a logger.
N.B. `logging.Logger` expects unicode. However, most of our outstream logic, such as in
`exiter.py`, will use `sys.std{out,err}.buffer` and thus a bytes interface. So, we must provide
a `buffer` property, and change the semantics of the buffer to always convert the message to
unicode. This is an unfortunate code smell, as `logging` does not expose a bytes interface so
this is the best solution we could think of.
"""
def __init__(self, logger, log_level, handler):
"""
:param logging.Logger logger: The logger instance to emit writes to.
:param int log_level: The log level to use for the given logger.
:param Handler handler: The underlying log handler, for determining the fileno
to support faulthandler logging.
"""
self._logger = logger
self._log_level = log_level
self._handler = handler
def write(self, msg):
msg = ensure_text(msg)
for line in msg.rstrip().splitlines():
# The log only accepts text, and will raise a decoding error if the default encoding is ascii
# if provided a bytes input for unicode text.
line = ensure_text(line)
self._logger.log(self._log_level, line.rstrip())
def flush(self):
return
def isatty(self):
return False
def fileno(self):
return self._handler.stream.fileno()
@property
def buffer(self):
return self
class PantsDaemonSignalHandler(SignalHandler):
def __init__(self, daemon):
super().__init__()
self._daemon = daemon
def handle_sigint(self, signum, _frame):
self._daemon.terminate(include_watchman=False)
class PantsDaemon(FingerprintedProcessManager):
"""A daemon that manages PantsService instances."""
JOIN_TIMEOUT_SECONDS = 1
LOG_NAME = "pantsd.log"
class StartupFailure(Exception):
"""Represents a failure to start pantsd."""
class RuntimeFailure(Exception):
"""Represents a pantsd failure at runtime, usually from an underlying service failure."""
@dataclass(frozen=True)
class Handle:
"""A handle to a "probably running" pantsd instance.
We attempt to verify that the pantsd instance is still running when we create a Handle, but
after it has been created it is entirely process that the pantsd instance perishes.
"""
pid: int
port: int
metadata_base_dir: str
class Factory:
@classmethod
def maybe_launch(cls, options_bootstrapper):
"""Creates and launches a daemon instance if one does not already exist.
:param OptionsBootstrapper options_bootstrapper: The bootstrap options.
:returns: A Handle for the running pantsd instance.
:rtype: PantsDaemon.Handle
"""
stub_pantsd = cls.create(options_bootstrapper, full_init=False)
with stub_pantsd._services.lifecycle_lock:
if stub_pantsd.needs_restart(stub_pantsd.options_fingerprint):
# Once we determine we actually need to launch, recreate with full initialization.
pantsd = cls.create(options_bootstrapper)
return pantsd.launch()
else:
# We're already launched.
return PantsDaemon.Handle(
stub_pantsd.await_pid(10),
stub_pantsd.read_named_socket("pailgun", int),
stub_pantsd._metadata_base_dir,
)
@classmethod
def restart(cls, options_bootstrapper):
"""Restarts a running daemon instance.
:param OptionsBootstrapper options_bootstrapper: The bootstrap options.
:returns: A Handle for the pantsd instance.
:rtype: PantsDaemon.Handle
"""
pantsd = cls.create(options_bootstrapper)
with pantsd._services.lifecycle_lock:
# N.B. This will call `pantsd.terminate()` before starting.
return pantsd.launch()
<|fim▁hole|> :param OptionsBootstrapper options_bootstrapper: The bootstrap options.
:param bool full_init: Whether or not to fully initialize an engine et al for the purposes
of spawning a new daemon. `full_init=False` is intended primarily
for lightweight lifecycle checks (since there is a ~1s overhead to
initialize the engine). See the impl of `maybe_launch` for an example
of the intended usage.
"""
bootstrap_options = options_bootstrapper.bootstrap_options
bootstrap_options_values = bootstrap_options.for_global_scope()
# TODO: https://github.com/pantsbuild/pants/issues/3479
watchman = WatchmanLauncher.create(bootstrap_options_values).watchman
if full_init:
build_root = get_buildroot()
native = Native()
build_config = BuildConfigInitializer.get(options_bootstrapper)
legacy_graph_scheduler = EngineInitializer.setup_legacy_graph(
native, options_bootstrapper, build_config
)
services = cls._setup_services(
build_root,
bootstrap_options_values,
legacy_graph_scheduler,
watchman,
union_membership=UnionMembership(build_config.union_rules()),
)
else:
build_root = None
native = None
services = PantsServices()
return PantsDaemon(
native=native,
build_root=build_root,
work_dir=bootstrap_options_values.pants_workdir,
log_level=bootstrap_options_values.level.upper(),
services=services,
metadata_base_dir=bootstrap_options_values.pants_subprocessdir,
bootstrap_options=bootstrap_options,
)
@staticmethod
def _setup_services(
build_root,
bootstrap_options,
legacy_graph_scheduler,
watchman,
union_membership: UnionMembership,
):
"""Initialize pantsd services.
:returns: A PantsServices instance.
"""
should_shutdown_after_run = bootstrap_options.shutdown_pantsd_after_run
fs_event_service = FSEventService(watchman, build_root,)
pidfile_absolute = PantsDaemon.metadata_file_path(
"pantsd", "pid", bootstrap_options.pants_subprocessdir
)
if pidfile_absolute.startswith(build_root):
pidfile = os.path.relpath(pidfile_absolute, build_root)
else:
pidfile = None
logging.getLogger(__name__).warning(
"Not watching pantsd pidfile because subprocessdir is outside of buildroot. Having "
"subprocessdir be a child of buildroot (as it is by default) may help avoid stray "
"pantsd processes."
)
scheduler_service = SchedulerService(
fs_event_service=fs_event_service,
legacy_graph_scheduler=legacy_graph_scheduler,
build_root=build_root,
invalidation_globs=OptionsInitializer.compute_pantsd_invalidation_globs(
build_root, bootstrap_options
),
pantsd_pidfile=pidfile,
union_membership=union_membership,
)
pailgun_service = PailgunService(
(bootstrap_options.pantsd_pailgun_host, bootstrap_options.pantsd_pailgun_port),
DaemonPantsRunner,
scheduler_service,
should_shutdown_after_run,
)
store_gc_service = StoreGCService(legacy_graph_scheduler.scheduler)
return PantsServices(
services=(fs_event_service, scheduler_service, pailgun_service, store_gc_service),
port_map=dict(pailgun=pailgun_service.pailgun_port),
)
def __init__(
self,
native,
build_root,
work_dir,
log_level,
services,
metadata_base_dir,
bootstrap_options=None,
):
"""
:param Native native: A `Native` instance.
:param string build_root: The pants build root.
:param string work_dir: The pants work directory.
:param string log_level: The log level to use for daemon logging.
:param PantsServices services: A registry of services to use in this run.
:param string metadata_base_dir: The ProcessManager metadata base dir.
:param Options bootstrap_options: The bootstrap options, if available.
"""
super().__init__(name="pantsd", metadata_base_dir=metadata_base_dir)
self._native = native
self._build_root = build_root
self._work_dir = work_dir
self._log_level = log_level
self._services = services
self._bootstrap_options = bootstrap_options
self._log_show_rust_3rdparty = (
bootstrap_options.for_global_scope().log_show_rust_3rdparty
if bootstrap_options
else True
)
self._log_dir = os.path.join(work_dir, self.name)
self._logger = logging.getLogger(__name__)
# N.B. This Event is used as nothing more than a convenient atomic flag - nothing waits on it.
self._kill_switch = threading.Event()
@memoized_property
def watchman_launcher(self):
return WatchmanLauncher.create(self._bootstrap_options.for_global_scope())
@property
def is_killed(self):
return self._kill_switch.is_set()
@property
def options_fingerprint(self):
return OptionsFingerprinter.combined_options_fingerprint_for_scope(
GLOBAL_SCOPE, self._bootstrap_options, fingerprint_key="daemon", invert=True
)
def shutdown(self, service_thread_map):
"""Gracefully terminate all services and kill the main PantsDaemon loop."""
with self._services.lifecycle_lock:
for service, service_thread in service_thread_map.items():
self._logger.info(f"terminating pantsd service: {service}")
service.terminate()
service_thread.join(self.JOIN_TIMEOUT_SECONDS)
self._logger.info("terminating pantsd")
self._kill_switch.set()
@staticmethod
def _close_stdio():
"""Close stdio streams to avoid output in the tty that launched pantsd."""
for fd in (sys.stdin, sys.stdout, sys.stderr):
file_no = fd.fileno()
fd.flush()
fd.close()
os.close(file_no)
@contextmanager
def _pantsd_logging(self):
"""A context manager that runs with pantsd logging.
Asserts that stdio (represented by file handles 0, 1, 2) is closed to ensure that we can
safely reuse those fd numbers.
"""
# Ensure that stdio is closed so that we can safely reuse those file descriptors.
for fd in (0, 1, 2):
try:
os.fdopen(fd)
raise AssertionError(f"pantsd logging cannot initialize while stdio is open: {fd}")
except OSError:
pass
# Redirect stdio to /dev/null for the rest of the run, to reserve those file descriptors
# for further forks.
with stdio_as(stdin_fd=-1, stdout_fd=-1, stderr_fd=-1):
# Reinitialize logging for the daemon context.
init_rust_logger(self._log_level, self._log_show_rust_3rdparty)
result = setup_logging(
self._log_level,
log_dir=self._log_dir,
log_name=self.LOG_NAME,
native=self._native,
warnings_filter_regexes=self._bootstrap_options.for_global_scope(),
)
self._native.override_thread_logging_destination_to_just_pantsd()
# Do a python-level redirect of stdout/stderr, which will not disturb `0,1,2`.
# TODO: Consider giving these pipes/actual fds, in order to make them "deep" replacements
# for `1,2`, and allow them to be used via `stdio_as`.
sys.stdout = _LoggerStream(logging.getLogger(), logging.INFO, result.log_handler)
sys.stderr = _LoggerStream(logging.getLogger(), logging.WARN, result.log_handler)
self._logger.debug("logging initialized")
yield (result.log_handler.stream, result.log_handler.native_filename)
def _setup_services(self, pants_services):
for service in pants_services.services:
self._logger.info(f"setting up service {service}")
service.setup(self._services)
@staticmethod
def _make_thread(service):
name = f"{service.__class__.__name__}Thread"
def target():
Native().override_thread_logging_destination_to_just_pantsd()
service.run()
t = threading.Thread(target=target, name=name)
t.daemon = True
return t
def _run_services(self, pants_services):
"""Service runner main loop."""
if not pants_services.services:
self._logger.critical("no services to run, bailing!")
return
service_thread_map = {
service: self._make_thread(service) for service in pants_services.services
}
# Start services.
for service, service_thread in service_thread_map.items():
self._logger.info(f"starting service {service}")
try:
service_thread.start()
except (RuntimeError, FSEventService.ServiceError):
self.shutdown(service_thread_map)
raise PantsDaemon.StartupFailure(
f"service {service} failed to start, shutting down!"
)
# Once all services are started, write our pid.
self.write_pid()
self.write_metadata_by_name(
"pantsd", self.FINGERPRINT_KEY, ensure_text(self.options_fingerprint)
)
# Monitor services.
while not self.is_killed:
for service, service_thread in service_thread_map.items():
if not service_thread.is_alive():
self.shutdown(service_thread_map)
raise PantsDaemon.RuntimeFailure(
f"service failure for {service}, shutting down!"
)
else:
# Avoid excessive CPU utilization.
service_thread.join(self.JOIN_TIMEOUT_SECONDS)
def _write_named_sockets(self, socket_map):
"""Write multiple named sockets using a socket mapping."""
for socket_name, socket_info in socket_map.items():
self.write_named_socket(socket_name, socket_info)
def run_sync(self):
"""Synchronously run pantsd."""
os.environ.pop("PYTHONPATH")
# Switch log output to the daemon's log stream from here forward.
# Also, register an exiter using os._exit to ensure we only close stdio streams once.
self._close_stdio()
with self._pantsd_logging() as (log_stream, log_filename), ExceptionSink.exiter_as(
lambda _: Exiter(exiter=os._exit)
):
# We don't have any stdio streams to log to anymore, so we log to a file.
# We don't override the faulthandler destination because the stream we get will proxy things
# via the rust logging code, and faulthandler needs to be writing directly to a real file
# descriptor. When pantsd logging was originally initialised, we already set up faulthandler
# to log to the correct file descriptor, so don't override it.
#
# We can get tracebacks of the pantsd process by tailing the pantsd log and sending it
# SIGUSR2.
ExceptionSink.reset_interactive_output_stream(
log_stream, override_faulthandler_destination=False,
)
# Reset the log location and the backtrace preference from the global bootstrap options.
global_bootstrap_options = self._bootstrap_options.for_global_scope()
ExceptionSink.reset_should_print_backtrace_to_terminal(
global_bootstrap_options.print_exception_stacktrace
)
ExceptionSink.reset_log_location(global_bootstrap_options.pants_workdir)
self._native.set_panic_handler()
# Set the process name in ps output to 'pantsd' vs './pants compile src/etc:: -ldebug'.
set_process_title(f"pantsd [{self._build_root}]")
# Write service socket information to .pids.
self._write_named_sockets(self._services.port_map)
# Enter the main service runner loop.
self._setup_services(self._services)
self._run_services(self._services)
def post_fork_child(self):
"""Post-fork() child callback for ProcessManager.daemon_spawn()."""
spawn_control_env = dict(
PANTS_ENTRYPOINT=f"{__name__}:launch",
# The daemon should run under the same sys.path as us; so we ensure
# this. NB: It will scrub PYTHONPATH once started to avoid infecting
# its own unrelated subprocesses.
PYTHONPATH=os.pathsep.join(sys.path),
)
exec_env = {**os.environ, **spawn_control_env}
# Pass all of sys.argv so that we can proxy arg flags e.g. `-ldebug`.
cmd = [sys.executable] + sys.argv
spawn_control_env_vars = " ".join(f"{k}={v}" for k, v in spawn_control_env.items())
cmd_line = " ".join(cmd)
self._logger.debug(f"cmd is: {spawn_control_env_vars} {cmd_line}")
# TODO: Improve error handling on launch failures.
os.spawnve(os.P_NOWAIT, sys.executable, cmd, env=exec_env)
def needs_launch(self):
"""Determines if pantsd needs to be launched.
N.B. This should always be called under care of the `lifecycle_lock`.
:returns: True if the daemon needs launching, False otherwise.
:rtype: bool
"""
new_fingerprint = self.options_fingerprint
self._logger.debug(
"pantsd: is_alive={self.is_alive()} new_fingerprint={new_fingerprint} current_fingerprint={self.fingerprint}"
)
return self.needs_restart(new_fingerprint)
def launch(self):
"""Launches pantsd in a subprocess.
N.B. This should always be called under care of the `lifecycle_lock`.
:returns: A Handle for the pantsd instance.
:rtype: PantsDaemon.Handle
"""
self.terminate(include_watchman=False)
self.watchman_launcher.maybe_launch()
self._logger.debug("launching pantsd")
self.daemon_spawn()
# Wait up to 60 seconds for pantsd to write its pidfile.
pantsd_pid = self.await_pid(60)
listening_port = self.read_named_socket("pailgun", int)
self._logger.debug(f"pantsd is running at pid {self.pid}, pailgun port is {listening_port}")
return self.Handle(pantsd_pid, listening_port, self._metadata_base_dir)
def terminate(self, include_watchman=True):
"""Terminates pantsd and watchman.
N.B. This should always be called under care of the `lifecycle_lock`.
"""
super().terminate()
if include_watchman:
self.watchman_launcher.terminate()
def needs_restart(self, option_fingerprint):
"""Overrides ProcessManager.needs_restart, to account for the case where pantsd is running
but we want to shutdown after this run.
:param option_fingerprint: A fingeprint of the global bootstrap options.
:return: True if the daemon needs to restart.
"""
should_shutdown_after_run = (
self._bootstrap_options.for_global_scope().shutdown_pantsd_after_run
)
return super().needs_restart(option_fingerprint) or (
self.is_alive() and should_shutdown_after_run
)
def launch():
"""An external entrypoint that spawns a new pantsd instance."""
PantsDaemon.Factory.create(OptionsBootstrapper.create()).run_sync()<|fim▁end|> | @classmethod
def create(cls, options_bootstrapper, full_init=True):
""" |
<|file_name|>ordinal_split_handler_test.py<|end_file_name|><|fim▁begin|># Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test for checking stats accumulator related ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.boosted_trees.lib.learner.batch import ordinal_split_handler
from tensorflow.contrib.boosted_trees.proto import learner_pb2
from tensorflow.contrib.boosted_trees.proto import split_info_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import resources
from tensorflow.python.platform import googletest
def get_empty_tensors(gradient_shape, hessian_shape):
empty_hess_shape = [1] + hessian_shape.as_list()
empty_grad_shape = [1] + gradient_shape.as_list()
empty_gradients = constant_op.constant(
[], dtype=dtypes.float32, shape=empty_grad_shape)
empty_hessians = constant_op.constant(
[], dtype=dtypes.float32, shape=empty_hess_shape)
return empty_gradients, empty_hessians
class DenseSplitHandlerTest(test_util.TensorFlowTestCase):
def testGenerateFeatureSplitCandidates(self):
with self.test_session() as sess:
# The data looks like the following:
# Example | Gradients | Partition | Dense Quantile |
# i0 | (0.2, 0.12) | 0 | 1 |
# i1 | (-0.5, 0.07) | 0 | 1 |
# i2 | (1.2, 0.2) | 0 | 0 |
# i3 | (4.0, 0.13) | 1 | 1 |
dense_column = array_ops.constant([0.52, 0.52, 0.3, 0.52])
gradients = array_ops.constant([0.2, -0.5, 1.2, 4.0])
hessians = array_ops.constant([0.12, 0.07, 0.2, 0.13])
partition_ids = array_ops.constant([0, 0, 0, 1], dtype=dtypes.int32)
class_id = -1
gradient_shape = tensor_shape.scalar()
hessian_shape = tensor_shape.scalar()
split_handler = ordinal_split_handler.DenseSplitHandler(
l1_regularization=0.1,
l2_regularization=1,
tree_complexity_regularization=0,
min_node_weight=0,
epsilon=0.001,
num_quantiles=10,
feature_column_group_id=0,
dense_float_column=dense_column,
init_stamp_token=0,
gradient_shape=gradient_shape,
hessian_shape=hessian_shape,
multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS)
resources.initialize_resources(resources.shared_resources()).run()
empty_gradients, empty_hessians = get_empty_tensors(
gradient_shape, hessian_shape)
example_weights = array_ops.ones([4, 1], dtypes.float32)
update_1 = split_handler.update_stats_sync(
0,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_1]):
are_splits_ready = split_handler.make_splits(0, 1, class_id)[0]
with ops.control_dependencies([are_splits_ready]):
update_2 = split_handler.update_stats_sync(
1,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_2]):
are_splits_ready2, partitions, gains, splits = (
split_handler.make_splits(1, 2, class_id))
are_splits_ready, are_splits_ready2, partitions, gains, splits = (
sess.run([
are_splits_ready, are_splits_ready2, partitions, gains, splits
]))
# During the first iteration, inequality split handlers are not going to
# have any splits. Make sure that we return not_ready in that case.
self.assertFalse(are_splits_ready)
self.assertTrue(are_splits_ready2)
self.assertAllEqual([0, 1], partitions)
# Check the split on partition 0.
# -(1.2 - 0.1) / (0.2 + 1)
expected_left_weight = -0.91666
# expected_left_weight * -(1.2 - 0.1)
expected_left_gain = 1.0083333333333331
# (-0.5 + 0.2 + 0.1) / (0.19 + 1)
expected_right_weight = 0.1680672
# expected_right_weight * -(-0.5 + 0.2 + 0.1))
expected_right_gain = 0.033613445378151252
# (0.2 + -0.5 + 1.2 - 0.1) ** 2 / (0.12 + 0.07 + 0.2 + 1)
expected_bias_gain = 0.46043165467625885
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[0])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.dense_float_binary_split
self.assertAllClose(
expected_left_gain + expected_right_gain - expected_bias_gain, gains[0],
0.00001)
self.assertAllClose([expected_left_weight], left_child.value, 0.00001)
self.assertAllClose([expected_right_weight], right_child.value, 0.00001)
self.assertEqual(0, split_node.feature_column)
self.assertAllClose(0.3, split_node.threshold, 0.00001)
# Check the split on partition 1.
# (-4 + 0.1) / (0.13 + 1)
expected_left_weight = -3.4513274336283186
# (-4 + 0.1) ** 2 / (0.13 + 1)
expected_left_gain = 13.460176991150442
expected_right_weight = 0
expected_right_gain = 0
# (-4 + 0.1) ** 2 / (0.13 + 1)
expected_bias_gain = 13.460176991150442
# Verify candidate for partition 1, there's only one active bucket here
# so zero gain is expected.
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[1])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.dense_float_binary_split
self.assertAllClose(0.0, gains[1], 0.00001)
self.assertAllClose([expected_left_weight], left_child.value, 0.00001)
self.assertAllClose([expected_right_weight], right_child.value, 0.00001)
self.assertEqual(0, split_node.feature_column)
self.assertAllClose(0.52, split_node.threshold, 0.00001)
def testGenerateFeatureSplitCandidatesMulticlassFullHessian(self):
with self.test_session() as sess:
dense_column = array_ops.constant([0.52, 0.52, 0.3, 0.52])
# Batch size is 4, 2 gradients per each instance.
gradients = array_ops.constant(
[[0.2, 0.1], [-0.5, 0.2], [1.2, 3.4], [4.0, -3.5]], shape=[4, 2])
# 2x2 matrix for each instance
hessian_0 = [[0.12, 0.02], [0.3, 0.11]]
hessian_1 = [[0.07, -0.2], [-0.5, 0.2]]
hessian_2 = [[0.2, -0.23], [-0.8, 0.9]]
hessian_3 = [[0.13, -0.3], [-1.5, 2.2]]
hessians = array_ops.constant(
[hessian_0, hessian_1, hessian_2, hessian_3])
partition_ids = array_ops.constant([0, 0, 0, 1], dtype=dtypes.int32)
class_id = -1
gradient_shape = tensor_shape.TensorShape([2])
hessian_shape = tensor_shape.TensorShape([2, 2])
split_handler = ordinal_split_handler.DenseSplitHandler(
l1_regularization=0,
l2_regularization=1,
tree_complexity_regularization=0,
min_node_weight=0,
epsilon=0.001,
num_quantiles=3,
feature_column_group_id=0,
dense_float_column=dense_column,
init_stamp_token=0,
gradient_shape=gradient_shape,
hessian_shape=hessian_shape,
multiclass_strategy=learner_pb2.LearnerConfig.FULL_HESSIAN)
resources.initialize_resources(resources.shared_resources()).run()
empty_gradients, empty_hessians = get_empty_tensors(
gradient_shape, hessian_shape)
example_weights = array_ops.ones([4, 1], dtypes.float32)
update_1 = split_handler.update_stats_sync(
0,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_1]):
are_splits_ready = split_handler.make_splits(0, 1, class_id)[0]
with ops.control_dependencies([are_splits_ready]):
update_2 = split_handler.update_stats_sync(
1,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_2]):
are_splits_ready2, partitions, gains, splits = (
split_handler.make_splits(1, 2, class_id))
are_splits_ready, are_splits_ready2, partitions, gains, splits = (
sess.run([
are_splits_ready, are_splits_ready2, partitions, gains, splits
]))
# During the first iteration, inequality split handlers are not going to
# have any splits. Make sure that we return not_ready in that case.
self.assertFalse(are_splits_ready)
self.assertTrue(are_splits_ready2)
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[0])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.dense_float_binary_split
# Each leaf has 2 element vector.
self.assertEqual(2, len(left_child.value))
self.assertEqual(2, len(right_child.value))
self.assertEqual(0, split_node.feature_column)
self.assertAllClose(0.3, split_node.threshold, 1e-6)
def testGenerateFeatureSplitCandidatesMulticlassDiagonalHessian(self):
with self.test_session() as sess:
dense_column = array_ops.constant([0.52, 0.52, 0.3, 0.52])
# Batch size is 4, 2 gradients per each instance.
gradients = array_ops.constant(
[[0.2, 0.1], [-0.5, 0.2], [1.2, 3.4], [4.0, -3.5]], shape=[4, 2])
# Each hessian is a diagonal of a full hessian matrix.
hessian_0 = [0.12, 0.11]
hessian_1 = [0.07, 0.2]
hessian_2 = [0.2, 0.9]
hessian_3 = [0.13, 2.2]
hessians = array_ops.constant(
[hessian_0, hessian_1, hessian_2, hessian_3])
partition_ids = array_ops.constant([0, 0, 0, 1], dtype=dtypes.int32)
class_id = -1
gradient_shape = tensor_shape.TensorShape([2])
hessian_shape = tensor_shape.TensorShape([2])
split_handler = ordinal_split_handler.DenseSplitHandler(
l1_regularization=0,
l2_regularization=1,
tree_complexity_regularization=0,
min_node_weight=0,
epsilon=0.001,
num_quantiles=3,
feature_column_group_id=0,
dense_float_column=dense_column,
init_stamp_token=0,
gradient_shape=gradient_shape,
hessian_shape=hessian_shape,
multiclass_strategy=learner_pb2.LearnerConfig.DIAGONAL_HESSIAN)
resources.initialize_resources(resources.shared_resources()).run()
empty_gradients, empty_hessians = get_empty_tensors(
gradient_shape, hessian_shape)
example_weights = array_ops.ones([4, 1], dtypes.float32)
update_1 = split_handler.update_stats_sync(
0,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_1]):
are_splits_ready = split_handler.make_splits(0, 1, class_id)[0]
with ops.control_dependencies([are_splits_ready]):
update_2 = split_handler.update_stats_sync(
1,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_2]):
are_splits_ready2, partitions, gains, splits = (
split_handler.make_splits(1, 2, class_id))
are_splits_ready, are_splits_ready2, partitions, gains, splits = (
sess.run([
are_splits_ready, are_splits_ready2, partitions, gains, splits
]))
# During the first iteration, inequality split handlers are not going to
# have any splits. Make sure that we return not_ready in that case.
self.assertFalse(are_splits_ready)
self.assertTrue(are_splits_ready2)
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[0])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.dense_float_binary_split
# Each leaf has 2 element vector.
self.assertEqual(2, len(left_child.value))
self.assertEqual(2, len(right_child.value))
self.assertEqual(0, split_node.feature_column)
self.assertAllClose(0.3, split_node.threshold, 1e-6)
def testGenerateFeatureSplitCandidatesInactive(self):
with self.test_session() as sess:
# The data looks like the following:
# Example | Gradients | Partition | Dense Quantile |
# i0 | (0.2, 0.12) | 0 | 1 |
# i1 | (-0.5, 0.07) | 0 | 1 |
# i2 | (1.2, 0.2) | 0 | 0 |
# i3 | (4.0, 0.13) | 1 | 1 |
dense_column = array_ops.constant([0.52, 0.52, 0.3, 0.52])
gradients = array_ops.constant([0.2, -0.5, 1.2, 4.0])
hessians = array_ops.constant([0.12, 0.07, 0.2, 0.13])
partition_ids = array_ops.constant([0, 0, 0, 1], dtype=dtypes.int32)
gradient_shape = tensor_shape.scalar()
hessian_shape = tensor_shape.scalar()
class_id = -1
split_handler = ordinal_split_handler.DenseSplitHandler(
l1_regularization=0.1,
l2_regularization=1,
tree_complexity_regularization=0,
min_node_weight=0,
epsilon=0.001,
num_quantiles=10,
feature_column_group_id=0,
dense_float_column=dense_column,
init_stamp_token=0,
gradient_shape=gradient_shape,
hessian_shape=hessian_shape,
multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS)
resources.initialize_resources(resources.shared_resources()).run()
empty_gradients, empty_hessians = get_empty_tensors(
gradient_shape, hessian_shape)
example_weights = array_ops.ones([4, 1], dtypes.float32)
update_1 = split_handler.update_stats_sync(
0,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, False]))
with ops.control_dependencies([update_1]):
are_splits_ready = split_handler.make_splits(0, 1, class_id)[0]
with ops.control_dependencies([are_splits_ready]):
update_2 = split_handler.update_stats_sync(
1,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([False, True]))
with ops.control_dependencies([update_2]):
are_splits_ready2, partitions, gains, splits = (
split_handler.make_splits(1, 2, class_id))
are_splits_ready, are_splits_ready2, partitions, gains, splits = (
sess.run([
are_splits_ready, are_splits_ready2, partitions, gains, splits
]))
# During the first iteration, inequality split handlers are not going to
# have any splits. Make sure that we return not_ready in that case.
self.assertFalse(are_splits_ready)
self.assertTrue(are_splits_ready2)
# The handler was inactive, so it shouldn't return any splits.
self.assertEqual(len(partitions), 0)
self.assertEqual(len(gains), 0)
self.assertEqual(len(splits), 0)
def testGenerateFeatureSplitCandidatesWithTreeComplexity(self):
with self.test_session() as sess:
# The data looks like the following:
# Example | Gradients | Partition | Dense Quantile |
# i0 | (0.2, 0.12) | 0 | 1 |
# i1 | (-0.5, 0.07) | 0 | 1 |
# i2 | (1.2, 0.2) | 0 | 0 |
# i3 | (4.0, 0.13) | 1 | 1 |
dense_column = array_ops.constant([0.52, 0.52, 0.3, 0.52])
gradients = array_ops.constant([0.2, -0.5, 1.2, 4.0])
hessians = array_ops.constant([0.12, 0.07, 0.2, 0.13])
partition_ids = array_ops.constant([0, 0, 0, 1], dtype=dtypes.int32)
gradient_shape = tensor_shape.scalar()
hessian_shape = tensor_shape.scalar()
class_id = -1
split_handler = ordinal_split_handler.DenseSplitHandler(
l1_regularization=0.1,
l2_regularization=1,
tree_complexity_regularization=0.5,
min_node_weight=0,
epsilon=0.001,
num_quantiles=10,
feature_column_group_id=0,
dense_float_column=dense_column,
init_stamp_token=0,
gradient_shape=gradient_shape,
hessian_shape=hessian_shape,
multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS)
resources.initialize_resources(resources.shared_resources()).run()
empty_gradients, empty_hessians = get_empty_tensors(
gradient_shape, hessian_shape)
example_weights = array_ops.ones([4, 1], dtypes.float32)
update_1 = split_handler.update_stats_sync(
0,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_1]):
are_splits_ready = split_handler.make_splits(0, 1, class_id)[0]
with ops.control_dependencies([are_splits_ready]):
update_2 = split_handler.update_stats_sync(
1,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_2]):
are_splits_ready2, partitions, gains, splits = (
split_handler.make_splits(1, 2, class_id))
are_splits_ready, are_splits_ready2, partitions, gains, splits = (
sess.run([
are_splits_ready, are_splits_ready2, partitions, gains, splits
]))
# During the first iteration, inequality split handlers are not going to
# have any splits. Make sure that we return not_ready in that case.
self.assertFalse(are_splits_ready)
self.assertTrue(are_splits_ready2)
self.assertAllEqual([0, 1], partitions)
# Check the split on partition 0.
# -(1.2 - 0.1) / (0.2 + 1)
expected_left_weight = -0.91666
# expected_left_weight * -(1.2 - 0.1)
expected_left_gain = 1.0083333333333331
# (-0.5 + 0.2 + 0.1) / (0.19 + 1)
expected_right_weight = 0.1680672
# expected_right_weight * -(-0.5 + 0.2 + 0.1))
expected_right_gain = 0.033613445378151252
# (0.2 + -0.5 + 1.2 - 0.1) ** 2 / (0.12 + 0.07 + 0.2 + 1)
expected_bias_gain = 0.46043165467625885
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[0])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.dense_float_binary_split
# Make sure the gain is subtracted by the tree complexity regularization.
self.assertAllClose(
expected_left_gain + expected_right_gain - expected_bias_gain - 0.5,
gains[0], 0.00001)
self.assertAllClose([expected_left_weight], left_child.value, 0.00001)
self.assertAllClose([expected_right_weight], right_child.value, 0.00001)
self.assertEqual(0, split_node.feature_column)
self.assertAllClose(0.3, split_node.threshold, 0.00001)
# Check the split on partition 1.
# (-4 + 0.1) / (0.13 + 1)
expected_left_weight = -3.4513274336283186
# (-4 + 0.1) ** 2 / (0.13 + 1)
expected_left_gain = 13.460176991150442
expected_right_weight = 0
expected_right_gain = 0
# (-4 + 0.1) ** 2 / (0.13 + 1)
expected_bias_gain = 13.460176991150442
# Verify candidate for partition 1, there's only one active bucket here
# so -0.5 gain is expected (because of tree complexity.
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[1])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.dense_float_binary_split
self.assertAllClose(-0.5, gains[1], 0.00001)
self.assertAllClose([expected_left_weight], left_child.value, 0.00001)
self.assertAllClose([expected_right_weight], right_child.value, 0.00001)
self.assertEqual(0, split_node.feature_column)
self.assertAllClose(0.52, split_node.threshold, 0.00001)
def testGenerateFeatureSplitCandidatesWithMinNodeWeight(self):
with self.test_session() as sess:
# The data looks like the following:
# Example | Gradients | Partition | Dense Quantile |
# i0 | (0.2, 0.12) | 0 | 1 |
# i1 | (-0.5, 0.07) | 0 | 1 |
# i2 | (1.2, 0.2) | 0 | 0 |
# i3 | (4.0, 2.0) | 1 | 1 |
dense_column = array_ops.constant([0.52, 0.52, 0.3, 0.52])
gradients = array_ops.constant([0.2, -0.5, 1.2, 4.0])
hessians = array_ops.constant([0.12, 0.07, 0.2, 2])
partition_ids = array_ops.constant([0, 0, 0, 1], dtype=dtypes.int32)
gradient_shape = tensor_shape.scalar()
hessian_shape = tensor_shape.scalar()
class_id = -1
split_handler = ordinal_split_handler.DenseSplitHandler(
l1_regularization=0.1,
l2_regularization=1,
tree_complexity_regularization=0.5,
min_node_weight=1.5,
epsilon=0.001,
num_quantiles=10,
feature_column_group_id=0,
dense_float_column=dense_column,
init_stamp_token=0,
gradient_shape=gradient_shape,
hessian_shape=hessian_shape,
multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS)
resources.initialize_resources(resources.shared_resources()).run()
empty_gradients, empty_hessians = get_empty_tensors(
gradient_shape, hessian_shape)
example_weights = array_ops.ones([4, 1], dtypes.float32)
update_1 = split_handler.update_stats_sync(
0,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_1]):
are_splits_ready = split_handler.make_splits(0, 1, class_id)[0]
with ops.control_dependencies([are_splits_ready]):
update_2 = split_handler.update_stats_sync(
1,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_2]):
are_splits_ready2, partitions, gains, splits = (
split_handler.make_splits(1, 2, class_id))
are_splits_ready, are_splits_ready2, partitions, gains, splits = (
sess.run([
are_splits_ready, are_splits_ready2, partitions, gains, splits
]))
# During the first iteration, inequality split handlers are not going to
# have any splits. Make sure that we return not_ready in that case.
self.assertFalse(are_splits_ready)
self.assertTrue(are_splits_ready2)
self.assertAllEqual([0, 1], partitions)
# Check the gain on partition 0 to be -0.5.
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[0])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.dense_float_binary_split
# Make sure the gain is subtracted by the tree complexity regularization.
self.assertAllClose(-0.5, gains[0], 0.00001)
self.assertEqual(0, split_node.feature_column)
# Check the split on partition 1.
# (-4 + 0.1) / (2 + 1)
expected_left_weight = -1.3
expected_right_weight = 0
# Verify candidate for partition 1, there's only one active bucket here
# so -0.5 gain is expected (because of tree complexity.
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[1])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.dense_float_binary_split
self.assertAllClose(-0.5, gains[1], 0.00001)
self.assertAllClose([expected_left_weight], left_child.value, 0.00001)
self.assertAllClose([expected_right_weight], right_child.value, 0.00001)
self.assertEqual(0, split_node.feature_column)
self.assertAllClose(0.52, split_node.threshold, 0.00001)
class SparseSplitHandlerTest(test_util.TensorFlowTestCase):
def testGenerateFeatureSplitCandidates(self):
with self.test_session() as sess:
# The data looks like the following:
# Example | Gradients | Partition | Sparse Quantile |
# i0 | (0.2, 0.12) | 0 | 1 |
# i1 | (-0.5, 0.07) | 0 | N/A |
# i2 | (1.2, 0.2) | 0 | 0 |
# i3 | (4.0, 0.13) | 1 | 1 |
gradients = array_ops.constant([0.2, -0.5, 1.2, 4.0])
hessians = array_ops.constant([0.12, 0.07, 0.2, 0.13])
example_partitions = array_ops.constant([0, 0, 0, 1], dtype=dtypes.int32)
indices = array_ops.constant([[0, 0], [2, 0], [3, 0]], dtype=dtypes.int64)
values = array_ops.constant([0.52, 0.3, 0.52])
sparse_column = sparse_tensor.SparseTensor(indices, values, [4, 1])
gradient_shape = tensor_shape.scalar()
hessian_shape = tensor_shape.scalar()
class_id = -1
split_handler = ordinal_split_handler.SparseSplitHandler(
l1_regularization=0,
l2_regularization=2,
tree_complexity_regularization=0,
min_node_weight=0,
epsilon=0.01,
num_quantiles=2,
feature_column_group_id=0,
sparse_float_column=sparse_column,
init_stamp_token=0,
gradient_shape=gradient_shape,
hessian_shape=hessian_shape,
multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS)
resources.initialize_resources(resources.shared_resources()).run()
empty_gradients, empty_hessians = get_empty_tensors(
gradient_shape, hessian_shape)
example_weights = array_ops.ones([4, 1], dtypes.float32)
update_1 = split_handler.update_stats_sync(
0,
example_partitions,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_1]):
are_splits_ready = split_handler.make_splits(0, 1, class_id)[0]
with ops.control_dependencies([are_splits_ready]):
update_2 = split_handler.update_stats_sync(
1,<|fim▁hole|> empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_2]):
are_splits_ready2, partitions, gains, splits = (
split_handler.make_splits(1, 2, class_id))
are_splits_ready, are_splits_ready2, partitions, gains, splits = (
sess.run([
are_splits_ready, are_splits_ready2, partitions, gains, splits
]))
# During the first iteration, inequality split handlers are not going to
# have any splits. Make sure that we return not_ready in that case.
self.assertFalse(are_splits_ready)
self.assertTrue(are_splits_ready2)
self.assertAllEqual([0, 1], partitions)
# Check the split on partition 0.
# -(0.2 + 1.2) / (0.12 + 0.2 + 2)
expected_left_weight = -0.603448275862069
# (0.2 + 1.2) ** 2 / (0.12 + 0.2 + 2)
expected_left_gain = 0.8448275862068965
# 0.5 / (0.07 + 2)
expected_right_weight = 0.24154589371980678
# 0.5 ** 2 / (0.07 + 2)
expected_right_gain = 0.12077294685990339
# (0.2 + 1.2 - 0.5) ** 2 / (0.12 + 0.2 + 0.07 + 2)
expected_bias_gain = 0.3389121338912133
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[0])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.sparse_float_binary_split_default_right
self.assertAllClose(
expected_left_gain + expected_right_gain - expected_bias_gain, gains[0])
self.assertAllClose([expected_left_weight], left_child.value)
self.assertAllClose([expected_right_weight], right_child.value)
self.assertEqual(0, split_node.split.feature_column)
self.assertAllClose(0.52, split_node.split.threshold)
# Check the split on partition 1.
expected_left_weight = -1.8779342723004695
expected_right_weight = 0
# Verify candidate for partition 1, there's only one active bucket here
# so zero gain is expected.
split_info.ParseFromString(splits[1])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.sparse_float_binary_split_default_left
self.assertAllClose(0.0, gains[1])
self.assertAllClose([expected_left_weight], left_child.value)
self.assertAllClose([expected_right_weight], right_child.value)
self.assertEqual(0, split_node.split.feature_column)
self.assertAllClose(0.52, split_node.split.threshold)
def testGenerateFeatureSplitCandidatesMulticlassFullHessian(self):
with self.test_session() as sess:
# Batch is 4, 2 classes
gradients = array_ops.constant(
[[0.2, 1.4], [-0.5, 0.1], [1.2, 3], [4.0, -3]])
# 2x2 matrix for each instance
hessian_0 = [[0.12, 0.02], [0.3, 0.11]]
hessian_1 = [[0.07, -0.2], [-0.5, 0.2]]
hessian_2 = [[0.2, -0.23], [-0.8, 0.9]]
hessian_3 = [[0.13, -0.3], [-1.5, 2.2]]
hessians = array_ops.constant(
[hessian_0, hessian_1, hessian_2, hessian_3])
example_partitions = array_ops.constant([0, 0, 0, 1], dtype=dtypes.int32)
indices = array_ops.constant([[0, 0], [2, 0], [3, 0]], dtype=dtypes.int64)
values = array_ops.constant([0.52, 0.3, 0.52])
sparse_column = sparse_tensor.SparseTensor(indices, values, [4, 1])
gradient_shape = tensor_shape.TensorShape([2])
hessian_shape = tensor_shape.TensorShape([2, 2])
class_id = -1
split_handler = ordinal_split_handler.SparseSplitHandler(
l1_regularization=0,
l2_regularization=2,
tree_complexity_regularization=0,
min_node_weight=0,
epsilon=0.01,
num_quantiles=2,
feature_column_group_id=0,
sparse_float_column=sparse_column,
init_stamp_token=0,
gradient_shape=gradient_shape,
hessian_shape=hessian_shape,
multiclass_strategy=learner_pb2.LearnerConfig.FULL_HESSIAN)
resources.initialize_resources(resources.shared_resources()).run()
empty_gradients, empty_hessians = get_empty_tensors(
gradient_shape, hessian_shape)
example_weights = array_ops.ones([4, 1], dtypes.float32)
update_1 = split_handler.update_stats_sync(
0,
example_partitions,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_1]):
are_splits_ready = split_handler.make_splits(0, 1, class_id)[0]
with ops.control_dependencies([are_splits_ready]):
update_2 = split_handler.update_stats_sync(
1,
example_partitions,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_2]):
are_splits_ready2, partitions, gains, splits = (
split_handler.make_splits(1, 2, class_id))
are_splits_ready, are_splits_ready2, partitions, gains, splits = (
sess.run([
are_splits_ready, are_splits_ready2, partitions, gains, splits
]))
self.assertFalse(are_splits_ready)
self.assertTrue(are_splits_ready2)
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[0])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.sparse_float_binary_split_default_right
# Each leaf has 2 element vector.
self.assertEqual(2, len(left_child.value))
self.assertEqual(2, len(right_child.value))
self.assertEqual(0, split_node.split.feature_column)
self.assertAllClose(0.52, split_node.split.threshold)
split_info.ParseFromString(splits[1])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.sparse_float_binary_split_default_left
self.assertEqual(2, len(left_child.value))
self.assertEqual(0, split_node.split.feature_column)
self.assertAllClose(0.52, split_node.split.threshold)
def testGenerateFeatureSplitCandidatesMulticlassDiagonalHessian(self):
with self.test_session() as sess:
# Batch is 4, 2 classes
gradients = array_ops.constant(
[[0.2, 1.4], [-0.5, 0.1], [1.2, 3], [4.0, -3]])
# Each hessian is a diagonal from a full hessian matrix.
hessian_0 = [0.12, 0.11]
hessian_1 = [0.07, 0.2]
hessian_2 = [0.2, 0.9]
hessian_3 = [0.13, 2.2]
hessians = array_ops.constant(
[hessian_0, hessian_1, hessian_2, hessian_3])
example_partitions = array_ops.constant([0, 0, 0, 1], dtype=dtypes.int32)
indices = array_ops.constant([[0, 0], [2, 0], [3, 0]], dtype=dtypes.int64)
values = array_ops.constant([0.52, 0.3, 0.52])
sparse_column = sparse_tensor.SparseTensor(indices, values, [4, 1])
gradient_shape = tensor_shape.TensorShape([2])
hessian_shape = tensor_shape.TensorShape([2])
class_id = -1
split_handler = ordinal_split_handler.SparseSplitHandler(
l1_regularization=0,
l2_regularization=2,
tree_complexity_regularization=0,
min_node_weight=0,
epsilon=0.01,
num_quantiles=2,
feature_column_group_id=0,
sparse_float_column=sparse_column,
init_stamp_token=0,
gradient_shape=gradient_shape,
hessian_shape=hessian_shape,
multiclass_strategy=learner_pb2.LearnerConfig.DIAGONAL_HESSIAN)
resources.initialize_resources(resources.shared_resources()).run()
empty_gradients, empty_hessians = get_empty_tensors(
gradient_shape, hessian_shape)
example_weights = array_ops.ones([4, 1], dtypes.float32)
update_1 = split_handler.update_stats_sync(
0,
example_partitions,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_1]):
are_splits_ready = split_handler.make_splits(0, 1, class_id)[0]
with ops.control_dependencies([are_splits_ready]):
update_2 = split_handler.update_stats_sync(
1,
example_partitions,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_2]):
are_splits_ready2, partitions, gains, splits = (
split_handler.make_splits(1, 2, class_id))
are_splits_ready, are_splits_ready2, partitions, gains, splits = (
sess.run([
are_splits_ready, are_splits_ready2, partitions, gains, splits
]))
self.assertFalse(are_splits_ready)
self.assertTrue(are_splits_ready2)
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[0])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.sparse_float_binary_split_default_right
# Each leaf has 2 element vector.
self.assertEqual(2, len(left_child.value))
self.assertEqual(2, len(right_child.value))
self.assertEqual(0, split_node.split.feature_column)
self.assertAllClose(0.52, split_node.split.threshold)
split_info.ParseFromString(splits[1])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.sparse_float_binary_split_default_left
self.assertEqual(2, len(left_child.value))
self.assertEqual(0, split_node.split.feature_column)
self.assertAllClose(0.52, split_node.split.threshold)
def testGenerateFeatureSplitCandidatesInactive(self):
with self.test_session() as sess:
# The data looks like the following:
# Example | Gradients | Partition | Sparse Quantile |
# i0 | (0.2, 0.12) | 0 | 1 |
# i1 | (-0.5, 0.07) | 0 | N/A |
# i2 | (1.2, 0.2) | 0 | 0 |
# i3 | (4.0, 0.13) | 1 | 1 |
gradients = array_ops.constant([0.2, -0.5, 1.2, 4.0])
hessians = array_ops.constant([0.12, 0.07, 0.2, 0.13])
example_partitions = array_ops.constant([0, 0, 0, 1], dtype=dtypes.int32)
indices = array_ops.constant([[0, 0], [2, 0], [3, 0]], dtype=dtypes.int64)
values = array_ops.constant([0.52, 0.3, 0.52])
sparse_column = sparse_tensor.SparseTensor(indices, values, [4, 1])
gradient_shape = tensor_shape.scalar()
hessian_shape = tensor_shape.scalar()
class_id = -1
split_handler = ordinal_split_handler.SparseSplitHandler(
l1_regularization=0,
l2_regularization=2,
tree_complexity_regularization=0,
min_node_weight=0,
epsilon=0.01,
num_quantiles=2,
feature_column_group_id=0,
gradient_shape=gradient_shape,
hessian_shape=hessian_shape,
sparse_float_column=sparse_column,
init_stamp_token=0,
multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS)
resources.initialize_resources(resources.shared_resources()).run()
empty_gradients, empty_hessians = get_empty_tensors(
gradient_shape, hessian_shape)
example_weights = array_ops.ones([4, 1], dtypes.float32)
update_1 = split_handler.update_stats_sync(
0,
example_partitions,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, False]))
with ops.control_dependencies([update_1]):
are_splits_ready = split_handler.make_splits(0, 1, class_id)[0]
with ops.control_dependencies([are_splits_ready]):
update_2 = split_handler.update_stats_sync(
1,
example_partitions,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([False, True]))
with ops.control_dependencies([update_2]):
are_splits_ready2, partitions, gains, splits = (
split_handler.make_splits(1, 2, class_id))
are_splits_ready, are_splits_ready2, partitions, gains, splits = (
sess.run([
are_splits_ready, are_splits_ready2, partitions, gains, splits
]))
# During the first iteration, inequality split handlers are not going to
# have any splits. Make sure that we return not_ready in that case.
self.assertFalse(are_splits_ready)
self.assertTrue(are_splits_ready2)
# The handler was inactive so it shouldn't any splits.
self.assertEqual(len(partitions), 0)
self.assertEqual(len(gains), 0)
self.assertEqual(len(splits), 0)
def testEmpty(self):
with self.test_session() as sess:
indices = array_ops.constant([], dtype=dtypes.int64, shape=[0, 2])
# No values in this feature column in this mini-batch.
values = array_ops.constant([], dtype=dtypes.float32)
sparse_column = sparse_tensor.SparseTensor(indices, values, [4, 1])
gradient_shape = tensor_shape.scalar()
hessian_shape = tensor_shape.scalar()
class_id = -1
split_handler = ordinal_split_handler.SparseSplitHandler(
l1_regularization=0,
l2_regularization=2,
tree_complexity_regularization=0,
min_node_weight=0,
epsilon=0.01,
num_quantiles=2,
feature_column_group_id=0,
sparse_float_column=sparse_column,
init_stamp_token=0,
gradient_shape=gradient_shape,
hessian_shape=hessian_shape,
multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS)
resources.initialize_resources(resources.shared_resources()).run()
gradients = array_ops.constant([0.2, -0.5, 1.2, 4.0])
hessians = array_ops.constant([0.12, 0.07, 0.2, 0.13])
partition_ids = array_ops.constant([0, 0, 0, 1], dtype=dtypes.int32)
empty_gradients, empty_hessians = get_empty_tensors(
gradient_shape, hessian_shape)
example_weights = array_ops.ones([4, 1], dtypes.float32)
update_1 = split_handler.update_stats_sync(
0,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_1]):
are_splits_ready = split_handler.make_splits(0, 1, class_id)[0]
with ops.control_dependencies([are_splits_ready]):
update_2 = split_handler.update_stats_sync(
1,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_2]):
are_splits_ready2, partitions, gains, splits = (
split_handler.make_splits(1, 2, class_id))
are_splits_ready, are_splits_ready2, partitions, gains, splits = (
sess.run([
are_splits_ready, are_splits_ready2, partitions, gains, splits
]))
self.assertFalse(are_splits_ready)
self.assertTrue(are_splits_ready2)
self.assertEqual(len(partitions), 0)
self.assertEqual(len(gains), 0)
self.assertEqual(len(splits), 0)
if __name__ == "__main__":
googletest.main()<|fim▁end|> | example_partitions,
gradients,
hessians,
empty_gradients, |
<|file_name|>dag.js<|end_file_name|><|fim▁begin|>'use strict';
var _ = require('./underscore-mixins.js');
var fnToNode = new Map();
exports.load = function (nodes, prefix) {
let uuid = require('uuid');
if (prefix != null && prefix.length > 0) {
prefix = prefix + ':';
} else {
prefix = '';
}
_(nodes).forEach(function (fn, name) {
if (_.isFunction(fn)) {
fnToNode.set(fn, {
name: prefix + name,
id: uuid.v4(),
fn: fn,
ingoingLinks: new Map(),
outgoingLinks: new Map(),
counter: 0
});
} else {
exports.load(fn, name);
}
});
};
exports.clone = function (fn, name) {
var node = fnToNode.get(fn);
var newNodeLoader = {};
newNodeLoader[name] = node.fn.bind({});
exports.load(newNodeLoader);
return newNodeLoader[name];
};
exports.debug = function () {
fnToNode.forEach(function (node) {
console.log(
_(Array.from(node.ingoingLinks.keys())).pluck('name'),
node.name,
_(Array.from(node.outgoingLinks.keys())).pluck('name')
);
});
};
<|fim▁hole|>var defaultLinkOptions = {
primary: true
};
exports.link = function (fn) {
var node = fnToNode.get(fn);
return {
to: function (toFn, options) {
options = _(options || {}).defaults(defaultLinkOptions);
var toNode = fnToNode.get(toFn);
toNode.ingoingLinks.set(node, options);
node.outgoingLinks.set(toNode, options);
}
};
};
exports.unlink = function (fn) {
var node = fnToNode.get(fn);
return {
from: function (fromFn) {
var fromNode = fnToNode.get(fromFn);
fromNode.ingoingLinks.delete(node);
node.outgoingLinks.delete(fromNode);
}
};
};
exports.remove = function (fn) {
var node = fnToNode.get(fn),
todo = [];
node.ingoingLinks.forEach(function (inOptions, inNode) {
todo.push(function () {
exports.unlink(inNode.fn).from(fn);
});
node.outgoingLinks.forEach(function (outOptions, outNode) {
todo.push(function () {
exports.unlink(fn).from(outNode.fn);
exports.link(inNode.fn).to(outNode.fn, outOptions);
});
});
});
_(todo).invoke('call');
};
exports.replace = function (fn) {
var node = fnToNode.get(fn),
todo = [];
return {
by: function (byFn) {
node.ingoingLinks.forEach(function (inOptions, inNode) {
todo.push(function () {
exports.unlink(inNode.fn).from(node.fn);
exports.link(inNode.fn).to(byFn, inOptions);
});
});
node.outgoingLinks.forEach(function (outOptions, outNode) {
todo.push(function () {
exports.unlink(node.fn).from(outNode.fn);
exports.link(byFn).to(outNode.fn, outOptions);
});
});
_(todo).invoke('call');
}
}
};
exports.before = function (fn) {
var node = fnToNode.get(fn),
todo = [];
return {
insert: function (beforeFn) {
node.ingoingLinks.forEach(function (inOptions, inNode) {
todo.push(function () {
exports.unlink(inNode.fn).from(node.fn);
exports.link(inNode.fn).to(beforeFn, inOptions);
});
});
todo.push(function () {
exports.link(beforeFn).to(node.fn);
});
_(todo).invoke('call');
}
};
};
exports.after = function (fn) {
var node = fnToNode.get(fn),
todo = [];
return {
insert: function (afterFn) {
node.outgoingLinks.forEach(function (outOptions, outNode) {
todo.push(function () {
exports.unlink(node.fn).from(outNode.fn);
exports.link(afterFn).to(outNode.fn, outOptions);
});
});
todo.push(function () {
exports.link(node.fn).to(afterFn);
});
_(todo).invoke('call');
}
};
};
var runEntryNode;
function linksToStream(links) {
let plumber = require('gulp-plumber');
return _(links).chain()
.pluck('fn')
.map(exports.run)
.compact()
.concatVinylStreams()
.value()
.pipe(plumber());
}
exports.run = function (fn) {
var result,
node = fnToNode.get(fn);
runEntryNode = runEntryNode || node;
if (node.lastResult != null) {
return node.lastResult;
}
var primaryStreams = [],
secondaryStreams = [];
node.ingoingLinks.forEach(function (options, node) {
if (options.primary) {
primaryStreams.push(node);
} else {
secondaryStreams.push(node);
}
});
var primaryStream = linksToStream(primaryStreams),
secondaryStream = linksToStream(secondaryStreams);
result = fn(primaryStream, secondaryStream);
if (runEntryNode === node) {
fnToNode.forEach(function (node) {
delete node.lastResult;
});
}
else {
node.lastResult = result;
}
return result;
};
function isNotAlone(node) {
return (node.ingoingLinks.size + node.outgoingLinks.size) > 0;
}
function isSource(node) {
return node.ingoingLinks.size === 0;
}
function isOutput(node) {
return node.outgoingLinks.size === 0;
}
exports.renderGraph = function (renderer, filepath, callback) {
let graphdot = require('./graphdot.js');
var graph = graphdot.digraph('one_gulp_streams_graph');
graph.set('label', 'one-gulp streams graph\n\n');
graph.set('fontname', 'sans-serif');
graph.set('fontsize', '20');
graph.set('labelloc', 't');
graph.set('pad', '0.5,0.5');
graph.set('nodesep', '0.3');
graph.set('splines', 'spline');
graph.set('ranksep', '1');
graph.set('rankdir', 'LR');
var promises = [];
fnToNode.forEach(function (node) {
var nodeOptions = {
label: node.name,
shape: 'rectangle',
fontname: 'sans-serif',
style: 'bold',
margin: '0.2,0.1'
};
if (isSource(node)) {
nodeOptions.shape = 'ellipse';
nodeOptions.color = 'lavender';
nodeOptions.fontcolor = 'lavender';
nodeOptions.margin = '0.1,0.1';
}
if (isOutput(node)) {
nodeOptions.color = 'limegreen';
nodeOptions.fontcolor = 'white';
nodeOptions.style = 'filled';
nodeOptions.margin = '0.25,0.25';
}
if (isNotAlone(node)) {
node.graphNode = graph.addNode(node.id, nodeOptions);
if (isSource(node)) {
var donePromise = new Promise(function (resolve, reject) {
node.fn()
.on('data', function () {
node.counter += 1;
node.graphNode.set('color', 'mediumslateblue');
node.graphNode.set('fontcolor', 'mediumslateblue');
node.graphNode.set('label', node.name + ' (' + node.counter + ')');
})
.on('error', reject)
.on('end', resolve);
});
promises.push(donePromise);
}
}
});
Promise.all(promises).then(function () {
fnToNode.forEach(function (node) {
node.ingoingLinks.forEach(function (options, linkedNode) {
var edgeOptions = {};
if (options.primary) {
edgeOptions.penwidth = '1.5';
} else {
edgeOptions.arrowhead = 'empty';
edgeOptions.style = 'dashed';
}
if (isSource(linkedNode)) {
edgeOptions.color = linkedNode.graphNode.get('color');
}
graph.addEdge(linkedNode.graphNode, node.graphNode, edgeOptions);
});
});
graphdot.renderToSvgFile(graph, renderer, filepath, callback);
});
};<|fim▁end|> | |
<|file_name|>Options.js<|end_file_name|><|fim▁begin|>jQuery(document).ready(function(){
var numSaves, _autoSaveChanges;<|fim▁hole|> systemSettingSave = 0;
_autoSaveChanges = autoSaveChanges;
numSaves = 0;
autoSaveChanges = function() {
numSaves += 1;
return _autoSaveChanges.apply(this, arguments);
}
},
teardown: function() {
numSaves = null;
config.options.chkAutoSave = false;
autoSaveChanges = _autoSaveChanges;
}
});
test("save multiple system settings", function() {
saveSystemSetting("foo", true);
saveSystemSetting("foo", false);
saveSystemSetting("foo", true);
strictEqual(numSaves, 0, "The save is asynchronous so no saves have yet been made");
strictEqual(systemSettingSave > 0, true, "However there should be a timeout in progress");
});
});<|fim▁end|> | module("TiddlyWiki options", {
setup: function() {
config.options.chkAutoSave = true; |
<|file_name|>OntologyHelperBuilder.java<|end_file_name|><|fim▁begin|>/**
* Copyright (c) 2016 Lemur Consulting Ltd.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.co.flax.biosolr.elasticsearch;
import org.apache.commons.lang3.StringUtils;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.threadpool.ThreadPool;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import uk.co.flax.biosolr.elasticsearch.mapper.ontology.ElasticOntologyHelperFactory;
import uk.co.flax.biosolr.elasticsearch.mapper.ontology.OntologySettings;
import uk.co.flax.biosolr.ontology.core.OntologyHelper;
import uk.co.flax.biosolr.ontology.core.OntologyHelperException;
import java.io.Closeable;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
/**
* Created by mlp on 09/02/16.
* @author mlp
*/
public class OntologyHelperBuilder implements Closeable {
private static final Logger LOGGER = LoggerFactory.getLogger(OntologyHelperBuilder.class);
private ThreadPool threadPool;
private static OntologyHelperBuilder instance;
private Map<String, OntologyHelper> helpers = new ConcurrentHashMap<>();
@Inject
public OntologyHelperBuilder(ThreadPool threadPool) {
this.threadPool = threadPool;
setInstance(this);
}
private static void setInstance(OntologyHelperBuilder odb) {
instance = odb;
}
public static OntologyHelperBuilder getInstance() {
return instance;
}
private OntologyHelper getHelper(OntologySettings settings) throws OntologyHelperException {
String helperKey = buildHelperKey(settings);
OntologyHelper helper = helpers.get(helperKey);
if (helper == null) {
helper = new ElasticOntologyHelperFactory(settings).buildOntologyHelper();
OntologyCheckRunnable checker = new OntologyCheckRunnable(helperKey, settings.getThreadCheckMs());
threadPool.scheduleWithFixedDelay(checker, TimeValue.timeValueMillis(settings.getThreadCheckMs()));
helpers.put(helperKey, helper);
helper.updateLastCallTime();
}
return helper;
}
public static OntologyHelper getOntologyHelper(OntologySettings settings) throws OntologyHelperException {
OntologyHelperBuilder builder = getInstance();
return builder.getHelper(settings);
}
@Override
public void close() {
// Explicitly dispose of any remaining helpers
for (Map.Entry<String, OntologyHelper> helperEntry : helpers.entrySet()) {
if (helperEntry.getValue() != null) {
LOGGER.info("Disposing of helper for {}", helperEntry.getKey());
helperEntry.getValue().dispose();
}
}
}
private static String buildHelperKey(OntologySettings settings) {<|fim▁hole|> if (StringUtils.isNotBlank(settings.getOntologyUri())) {
key = settings.getOntologyUri();
} else {
if (StringUtils.isNotBlank(settings.getOlsOntology())) {
key = settings.getOlsBaseUrl() + "_" + settings.getOlsOntology();
} else {
key = settings.getOlsBaseUrl();
}
}
return key;
}
private final class OntologyCheckRunnable implements Runnable {
final String threadKey;
final long deleteCheckMs;
public OntologyCheckRunnable(String threadKey, long deleteCheckMs) {
this.threadKey = threadKey;
this.deleteCheckMs = deleteCheckMs;
}
@Override
public void run() {
OntologyHelper helper = helpers.get(threadKey);
if (helper != null) {
// Check if the last call time was longer ago than the maximum
if (System.currentTimeMillis() - deleteCheckMs > helper.getLastCallTime()) {
// Assume helper is out of use - dispose of it to allow memory to be freed
helper.dispose();
helpers.remove(threadKey);
}
}
}
}
}<|fim▁end|> | String key;
|
<|file_name|>xrandr.py<|end_file_name|><|fim▁begin|>import os
from functools import reduce, lru_cache
import logging
import re
import subprocess
from randrctl import DISPLAY, XAUTHORITY
from randrctl.exception import XrandrException, ParseException
from randrctl.model import Profile, Viewport, XrandrConnection, Display
logger = logging.getLogger(__name__)
class Xrandr:
"""
Interface for xrandr application. Provides methods for calling xrandr operating with python objects such as
randrctl.profile.Profile
"""
EXECUTABLE = "/usr/bin/xrandr"
OUTPUT_KEY = "--output"
MODE_KEY = "--mode"
POS_KEY = "--pos"
ROTATE_KEY = "--rotate"
PANNING_KEY = "--panning"
RATE_KEY = "--rate"
SCALE_KEY = "--scale"
PRIMARY_KEY = "--primary"
CRTC_KEY = "--crtc"
QUERY_KEY = "-q"
VERBOSE_KEY = "--verbose"
OFF_KEY = "--off"
OUTPUT_DETAILS_REGEX = re.compile(
'(?P<primary>primary )?(?P<geometry>[\dx\+]+) (?:(?P<rotate>\w+) )?.*?(?:panning (?P<panning>[\dx\+]+))?$')
MODE_REGEX = re.compile("(\d+x\d+)\+(\d+\+\d+)")
CURRENT_MODE_REGEX = re.compile("\s*(\S+)\s+([0-9\.]+)(.*$)")
def __init__(self, display: str, xauthority: str):
env = dict(os.environ)
if display:
env[DISPLAY] = display
if xauthority:
env[XAUTHORITY] = xauthority
self.env = env
def apply(self, profile: Profile):
"""
Apply given profile by calling xrandr
"""
logger.debug("Applying profile %s", profile.name)
args = self._compose_mode_args(profile, self.get_all_outputs())
self._xrandr(*args)
@lru_cache()
def _xrandr(self, *args):
"""
Perform call to xrandr executable with passed arguments.
Returns subprocess.Popen object
"""
args = list(args)
logger.debug("Calling xrandr with args %s", args)
args.insert(0, self.EXECUTABLE)
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=False, env=self.env)
err = p.stderr.readlines()
if err:
# close descriptors
p.stderr.close()
p.stdout.close()
err_str = ''.join(map(lambda x: x.decode(), err)).strip()
raise XrandrException(err_str, args)
out = list(map(lambda x: x.decode(), p.stdout.readlines()))
if out:
out.pop(0) # remove first line. It describes Screen
return out
def _compose_mode_args(self, profile: Profile, xrandr_connections: list):
"""
Composes list of arguments to xrandr to apply profile settings and disable the other outputs
"""
args = []
active_names = []
for name, o in profile.outputs.items():
active_names.append(name)
args.append(self.OUTPUT_KEY)
args.append(name)
args.append(self.MODE_KEY)
args.append(o.mode)
args.append(self.POS_KEY)
args.append(o.pos)
args.append(self.ROTATE_KEY)
args.append(o.rotate)
args.append(self.PANNING_KEY)
args.append(o.panning)
args.append(self.SCALE_KEY)
args.append(o.scale)
if o.rate:
args.append(self.RATE_KEY)
args.append(str(o.rate))
if name == profile.primary:
args.append(self.PRIMARY_KEY)
if o.crtc is not None:
args.append(self.CRTC_KEY)
args.append(str(o.crtc))
# turn off the others
for c in xrandr_connections:
if active_names.count(c.name) == 0:
args.append(self.OUTPUT_KEY)
args.append(c.name)
args.append(self.OFF_KEY)
return args
def get_all_outputs(self):
"""
Query xrandr for all supported outputs.
Performs call to xrandr with -q key and parses output.
Returns list of outputs with some properties missing (only name and status are guaranteed)
"""
outputs = []
items = self._xrandr(self.QUERY_KEY)
items = self._group_query_result(items)
logger.debug("Detected total %d outputs", len(items))
crtcs = self._get_verbose_fields('CRTC')
for i in items:
o = self._parse_xrandr_connection(i)
o.crtc = int(crtcs[o.name]) if o.name in crtcs and len(crtcs[o.name]) else None
outputs.append(o)
return outputs
def get_connected_outputs(self):
"""
Query xrandr and return list of connected outputs.
Performs call to xrandr with -q and --verbose keys.
Returns list of connected outputs with all properties set
"""
outputs = list(filter(lambda o: o.display is not None, self.get_all_outputs()))
edids = self._get_verbose_fields('EDID')
for o in outputs:
o.display.edid = edids[o.name]
if logger.isEnabledFor(logging.DEBUG):
logger.debug("Connected outputs: %s", list(map(lambda o: o.name, outputs)))
return outputs
def _get_verbose_fields(self, field):
"""
Get particular field of all connected displays.
Return dictionary of {"connection_name": field_value}
"""
ret = dict()
items = self._xrandr(self.QUERY_KEY, self.VERBOSE_KEY)
items = self._group_query_result(items)
items = filter(lambda x: x[0].find(' connected') > 0, items)
for i in items:
name_idx = i[0].find(' ')
name = i[0][:name_idx]
ret[name] = self._field_from_query_item(i, field)
return ret
def _field_from_query_item(self, item_lines: list, field: str):
"""
Extracts display field from xrandr --verbose output
"""
val = ''
indent = ''
in_field = False
lines_collected = 0
for i, line in enumerate(item_lines):
m = re.match(r'(\s+)(.*):\s*(.*)$', line)
if m and m.group(2).lower() == field.lower():
indent = m.group(1)
in_field = True
val = m.group(3).strip()
elif in_field and m and (len(indent) >= len(m.group(1)) or m.group(1) == indent):
return val<|fim▁hole|> elif in_field:
val += line.strip()
lines_collected += 1
if field == 'EDID' and lines_collected >= 8:
return val
return val
def _parse_xrandr_connection(self, item_lines: list):
"""
Creates XrandrConnection from lines returned by xrandr --query.
Example:
LVDS1 connected primary 1366x768+0+312 (normal left inverted right x axis y axis) 277mm x 156mm
1366x768 60.02*+
1024x768 60.00
"""
connection_info = item_lines[0]
name, status, state = connection_info.split(' ', 2)
if status != 'connected':
# We are not connected, do not parse the rest.
return XrandrConnection(name)
# We are connected parse connected display.
display = self._parse_display(item_lines[1:])
if not display.is_on():
# inactive output
return XrandrConnection(name, display)
parsed = self.OUTPUT_DETAILS_REGEX.match(state)
if parsed is None:
raise ParseException(name, status, state)
primary = parsed.group('primary') is not None
rotate = parsed.group('rotate')
panning = parsed.group('panning')
geometry = parsed.group('geometry')
size, pos = self._parse_geometry(geometry)
is_rotated = rotate in ['left', 'right']
if is_rotated:
size = 'x'.join(size.split('x')[::-1])
scale = '1x1'
if size != display.mode:
dw, dh = map(lambda s: int(s), display.mode.split('x'))
vw, vh = map(lambda s: int(s), size.split('x'))
sw, sh = vw / dw, vh / dh
if is_rotated:
sw, sh = sh, sw
scale = "{}x{}".format(sw, sh)
viewport = Viewport(size, pos, rotate, panning, scale)
return XrandrConnection(name, display, viewport, primary)
def _parse_display(self, lines: list):
supported_modes = []
preferred_mode = None
current_mode = None
current_rate = None
for mode_line in lines:
mode_line = mode_line.strip()
(mode, rate, extra) = self.CURRENT_MODE_REGEX.match(mode_line).groups()
current = (extra.find("*") >= 0)
preferred = (extra.find("+") >= 0)
supported_modes.append(mode)
if current:
current_mode = mode
current_rate = rate
if preferred:
preferred_mode = mode
return Display(supported_modes, preferred_mode, current_mode, current_rate)
def _group_query_result(self, query_result: list):
"""
Group input list of lines such that every line starting with a non-whitespace character is a start of a
group, and every subsequent line starting with whitespace is a member of that group.
:param query_result: list of lines
:return: list of lists of lines
"""
def group_fn(result, line):
# We append
if type(result) is str:
if line.startswith(' ') or line.startswith('\t'):
return [[result, line]]
else:
return [[result], [line]]
else:
if line.startswith(' ') or line.startswith('\t'):
last = result[len(result) - 1]
last.append(line)
return result
else:
result.append([line])
return result
# TODO rewrite in imperative code
grouped = reduce(lambda result, line: group_fn(result, line), query_result)
return grouped
def _parse_geometry(self, s: str):
"""
Parses geometry string (i.e. 1111x2222+333+444) into tuple (widthxheight, leftxtop)
"""
match = self.MODE_REGEX.match(s)
mode = match.group(1)
pos = match.group(2).replace('+', 'x')
return mode, pos<|fim▁end|> | elif in_field and not line.startswith(indent):
return val |
<|file_name|>test_convert.py<|end_file_name|><|fim▁begin|>#
# Copyright © 2012 - 2021 Michal Čihař <[email protected]>
#
# This file is part of Weblate <https://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
"""File format specific behavior."""
from weblate.formats.convert import (
HTMLFormat,
IDMLFormat,
OpenDocumentFormat,
PlainTextFormat,
WindowsRCFormat,
)
from weblate.formats.helpers import BytesIOMode
from weblate.formats.tests.test_formats import AutoFormatTest
from weblate.trans.tests.utils import get_test_file
IDML_FILE = get_test_file("en.idml")
HTML_FILE = get_test_file("cs.html")
OPENDOCUMENT_FILE = get_test_file("cs.odt")
TEST_RC = get_test_file("cs-CZ.rc")
TEST_TXT = get_test_file("cs.txt")
class ConvertFormatTest(AutoFormatTest):
NEW_UNIT_MATCH = None
EXPECTED_FLAGS = ""
def parse_file(self, filename):
return self.FORMAT(filename, template_store=self.FORMAT(filename))
class HTMLFormatTest(ConvertFormatTest):
FORMAT = HTMLFormat
FILE = HTML_FILE
MIME = "text/html"
EXT = "html"
COUNT = 5
MASK = "*/translations.html"
EXPECTED_PATH = "cs_CZ/translations.html"
FIND_CONTEXT = "+html.body.p:5-1"
FIND_MATCH = "Orangutan has five bananas."
MATCH = b"<body>"
NEW_UNIT_MATCH = None
BASE = HTML_FILE
EXPECTED_FLAGS = ""
EDIT_OFFSET = 1
class OpenDocumentFormatTest(ConvertFormatTest):
FORMAT = OpenDocumentFormat
FILE = OPENDOCUMENT_FILE
MIME = "application/vnd.oasis.opendocument.text"
EXT = "odt"
COUNT = 4
MASK = "*/translations.odt"
EXPECTED_PATH = "cs_CZ/translations.odt"
FIND_CONTEXT = (
"odf///office:document-content[0]/office:body[0]/office:text[0]/text:p[1]"
)
FIND_MATCH = "Orangutan has five bananas."
MATCH = b"PK"
NEW_UNIT_MATCH = None
BASE = OPENDOCUMENT_FILE
EXPECTED_FLAGS = ""
EDIT_OFFSET = 1
@staticmethod
def extract_document(content):
return bytes(
OpenDocumentFormat.convertfile(BytesIOMode("test.odt", content), None)
).decode()
def assert_same(self, newdata, testdata):
self.assertEqual(
self.extract_document(newdata),
self.extract_document(testdata),
)
class IDMLFormatTest(ConvertFormatTest):
FORMAT = IDMLFormat
FILE = IDML_FILE
MIME = "application/octet-stream"
EXT = "idml"
COUNT = 6
MASK = "*/translations.idml"
EXPECTED_PATH = "cs_CZ/translations.idml"
FIND_CONTEXT = (
"idPkg:Story[0]/{}Story[0]/{}XMLElement[0]/{}ParagraphStyleRange[0]"
"Stories/Story_mainmainmainmainmainmainmainmainmainmainmainu188.xml"
)
FIND_MATCH = """<g id="0"><g id="1">THE HEADLINE HERE</g></g>"""
MATCH = b"PK"
NEW_UNIT_MATCH = None
BASE = IDML_FILE
EXPECTED_FLAGS = ""
EDIT_OFFSET = 1
@staticmethod
def extract_document(content):
return bytes(
IDMLFormat.convertfile(BytesIOMode("test.idml", content), None)
).decode()
def assert_same(self, newdata, testdata):
self.assertEqual(
self.extract_document(newdata),
self.extract_document(testdata),
)
class WindowsRCFormatTest(ConvertFormatTest):
FORMAT = WindowsRCFormat
FILE = TEST_RC
BASE = TEST_RC
MIME = "text/plain"
EXT = "rc"<|fim▁hole|> COUNT = 5
MASK = "rc/*.rc"
EXPECTED_PATH = "rc/cs-CZ.rc"
MATCH = "STRINGTABLE"
FIND_CONTEXT = "STRINGTABLE.IDS_MSG1"
FIND_MATCH = "Hello, world!\n"
EDIT_OFFSET = 1
class PlainTextFormatTest(ConvertFormatTest):
FORMAT = PlainTextFormat
FILE = TEST_TXT
BASE = TEST_TXT
MIME = "text/plain"
EXT = "txt"
COUNT = 5
MASK = "txt/*.txt"
EXPECTED_PATH = "txt/cs_CZ.txt"
MATCH = "Hello"
FIND_CONTEXT = "cs.txt:2"
FIND_MATCH = "Hello, world!"
EDIT_OFFSET = 1<|fim▁end|> | |
<|file_name|>bitcoin_zh_CN.ts<|end_file_name|><|fim▁begin|><?xml version="1.0" ?><!DOCTYPE TS><TS language="zh_CN" version="2.0">
<defaucmcodec>UTF-8</defaucmcodec>
<context>
<name>AboutDialog</name>
<message>
<location filename="../forms/aboutdialog.ui" line="+14"/>
<source>About Communistcoin</source>
<translation>关于莱特币</translation>
</message>
<message>
<location line="+39"/>
<source><b>Communistcoin</b> version</source>
<translation><b>莱特币</b>版本</translation>
</message>
<message>
<location line="+57"/>
<source>
This is experimental software.
Distributed under the MIT/X11 software license, see the accompanying file COPYING or http://www.opensource.org/licenses/mit-license.php.
This product includes software developed by the OpenSSL Project for use in the OpenSSL Toolkit (http://www.openssl.org/) and cryptographic software written by Eric Young ([email protected]) and UPnP software written by Thomas Bernard.</source>
<translation>
This is experimental software.
Distributed under the MIT/X11 software license, see the accompanying file COPYING or http://www.opensource.org/licenses/mit-license.php.
This product includes software developed by the OpenSSL Project for use in the OpenSSL Toolkit (http://www.openssl.org/) and cryptographic software written by Eric Young ([email protected]) and UPnP software written by Thomas Bernard.</translation>
</message>
<message>
<location filename="../aboutdialog.cpp" line="+14"/>
<source>Copyright</source>
<translation>版权</translation>
</message>
<message>
<location line="+0"/>
<source>The Communistcoin developers</source>
<translation>Communistcoin-qt 客户端开发团队</translation>
</message>
</context>
<context>
<name>AddressBookPage</name>
<message>
<location filename="../forms/addressbookpage.ui" line="+14"/>
<source>Address Book</source>
<translation>通讯录</translation>
</message>
<message>
<location line="+19"/>
<source>Double-click to edit address or label</source>
<translation>双击以编辑地址或标签</translation>
</message>
<message>
<location line="+27"/>
<source>Create a new address</source>
<translation>创建新地址</translation>
</message>
<message>
<location line="+14"/>
<source>Copy the currently selected address to the system clipboard</source>
<translation>复制当前选中地址到系统剪贴板</translation>
</message>
<message>
<location line="-11"/>
<source>&New Address</source>
<translation>&新建地址</translation>
</message>
<message>
<location filename="../addressbookpage.cpp" line="+63"/>
<source>These are your Communistcoin addresses for receiving payments. You may want to give a different one to each sender so you can keep track of who is paying you.</source>
<translation>这是您用来收款的莱特币地址。为了标记不同的资金来源,建议为每个付款人保留不同的收款地址。</translation>
</message>
<message>
<location filename="../forms/addressbookpage.ui" line="+14"/>
<source>&Copy Address</source>
<translation>&复制地址</translation>
</message>
<message>
<location line="+11"/>
<source>Show &QR Code</source>
<translation>显示二维码</translation>
</message>
<message>
<location line="+11"/>
<source>Sign a message to prove you own a Communistcoin address</source>
<translation>签名消息,证明这个地址属于您。</translation>
</message>
<message>
<location line="+3"/>
<source>Sign &Message</source>
<translation>对消息签名</translation>
</message>
<message>
<location line="+25"/>
<source>Delete the currently selected address from the list</source>
<translation>从列表中删除选中的地址</translation>
</message>
<message>
<location line="+27"/>
<source>Export the data in the current tab to a file</source>
<translation>导出当前数据到文件</translation>
</message>
<message>
<location line="+3"/>
<source>&Export</source>
<translation>&导出</translation>
</message>
<message>
<location line="-44"/>
<source>Verify a message to ensure it was signed with a specified Communistcoin address</source>
<translation>验证消息,确保消息是由指定的莱特币地址签名过的。</translation>
</message>
<message>
<location line="+3"/>
<source>&Verify Message</source><|fim▁hole|> <message>
<location line="+14"/>
<source>&Delete</source>
<translation>&删除</translation>
</message>
<message>
<location filename="../addressbookpage.cpp" line="-5"/>
<source>These are your Communistcoin addresses for sending payments. Always check the amount and the receiving address before sending coins.</source>
<translation>这是您用来付款的莱特币地址。在付款前,请总是核实付款金额和收款地址。</translation>
</message>
<message>
<location line="+13"/>
<source>Copy &Label</source>
<translation>复制 &标签</translation>
</message>
<message>
<location line="+1"/>
<source>&Edit</source>
<translation>&编辑</translation>
</message>
<message>
<location line="+1"/>
<source>Send &Coins</source>
<translation>付款</translation>
</message>
<message>
<location line="+260"/>
<source>Export Address Book Data</source>
<translation>导出通讯录数据</translation>
</message>
<message>
<location line="+1"/>
<source>Comma separated file (*.csv)</source>
<translation>逗号分隔文件 (*.csv)</translation>
</message>
<message>
<location line="+13"/>
<source>Error exporting</source>
<translation>导出错误</translation>
</message>
<message>
<location line="+0"/>
<source>Could not write to file %1.</source>
<translation>无法写入文件 %1。</translation>
</message>
</context>
<context>
<name>AddressTableModel</name>
<message>
<location filename="../addresstablemodel.cpp" line="+144"/>
<source>Label</source>
<translation>标签</translation>
</message>
<message>
<location line="+0"/>
<source>Address</source>
<translation>地址</translation>
</message>
<message>
<location line="+36"/>
<source>(no label)</source>
<translation>(没有标签)</translation>
</message>
</context>
<context>
<name>AskPassphraseDialog</name>
<message>
<location filename="../forms/askpassphrasedialog.ui" line="+26"/>
<source>Passphrase Dialog</source>
<translation>密码对话框</translation>
</message>
<message>
<location line="+21"/>
<source>Enter passphrase</source>
<translation>输入密码</translation>
</message>
<message>
<location line="+14"/>
<source>New passphrase</source>
<translation>新密码</translation>
</message>
<message>
<location line="+14"/>
<source>Repeat new passphrase</source>
<translation>重复新密码</translation>
</message>
<message>
<location filename="../askpassphrasedialog.cpp" line="+33"/>
<source>Enter the new passphrase to the wallet.<br/>Please use a passphrase of <b>10 or more random characters</b>, or <b>eight or more words</b>.</source>
<translation>输入钱包的新密码。<br/>使用的密码请至少包含<b>10个以上随机字符</>,或者是<b>8个以上的单词</b>。</translation>
</message>
<message>
<location line="+1"/>
<source>Encrypt wallet</source>
<translation>加密钱包</translation>
</message>
<message>
<location line="+3"/>
<source>This operation needs your wallet passphrase to unlock the wallet.</source>
<translation>该操作需要您首先使用密码解锁钱包。</translation>
</message>
<message>
<location line="+5"/>
<source>Unlock wallet</source>
<translation>解锁钱包</translation>
</message>
<message>
<location line="+3"/>
<source>This operation needs your wallet passphrase to decrypt the wallet.</source>
<translation>该操作需要您首先使用密码解密钱包。</translation>
</message>
<message>
<location line="+5"/>
<source>Decrypt wallet</source>
<translation>解密钱包</translation>
</message>
<message>
<location line="+3"/>
<source>Change passphrase</source>
<translation>修改密码</translation>
</message>
<message>
<location line="+1"/>
<source>Enter the old and new passphrase to the wallet.</source>
<translation>请输入钱包的旧密码与新密码。</translation>
</message>
<message>
<location line="+46"/>
<source>Confirm wallet encryption</source>
<translation>确认加密钱包</translation>
</message>
<message>
<location line="+1"/>
<source>Warning: If you encrypt your wallet and lose your passphrase, you will <b>LOSE ALL OF YOUR LITECOINS</b>!</source>
<translation>警告:如果您加密了您的钱包,但是忘记了密码,你将会<b>丢失所有的莱特币</b>!</translation>
</message>
<message>
<location line="+0"/>
<source>Are you sure you wish to encrypt your wallet?</source>
<translation>您确定需要为钱包加密吗?</translation>
</message>
<message>
<location line="+15"/>
<source>IMPORTANT: Any previous backups you have made of your wallet file should be replaced with the newly generated, encrypted wallet file. For security reasons, previous backups of the unencrypted wallet file will become useless as soon as you start using the new, encrypted wallet.</source>
<translation>重要提示:您以前备份的钱包文件应该替换成最新生成的加密钱包文件(重新备份)。从安全性上考虑,您以前备份的未加密的钱包文件,在您使用新的加密钱包后将无效,请重新备份。</translation>
</message>
<message>
<location line="+100"/>
<location line="+24"/>
<source>Warning: The Caps Lock key is on!</source>
<translation>警告:大写锁定键处于打开状态!</translation>
</message>
<message>
<location line="-130"/>
<location line="+58"/>
<source>Wallet encrypted</source>
<translation>钱包已加密</translation>
</message>
<message>
<location line="-56"/>
<source>Communistcoin will close now to finish the encryption process. Remember that encrypting your wallet cannot fully protect your communistcoins from being stolen by malware infecting your computer.</source>
<translation>将关闭软件以完成加密过程。 请您谨记:钱包加密并不是万能的,电脑中毒,您的莱特币还是有可能丢失。</translation>
</message>
<message>
<location line="+13"/>
<location line="+7"/>
<location line="+42"/>
<location line="+6"/>
<source>Wallet encryption failed</source>
<translation>钱包加密失败</translation>
</message>
<message>
<location line="-54"/>
<source>Wallet encryption failed due to an internal error. Your wallet was not encrypted.</source>
<translation>由于一个本地错误,加密钱包操作已经失败。您的钱包没有被加密。</translation>
</message>
<message>
<location line="+7"/>
<location line="+48"/>
<source>The supplied passphrases do not match.</source>
<translation>密码不匹配。</translation>
</message>
<message>
<location line="-37"/>
<source>Wallet unlock failed</source>
<translation>钱包解锁失败</translation>
</message>
<message>
<location line="+1"/>
<location line="+11"/>
<location line="+19"/>
<source>The passphrase entered for the wallet decryption was incorrect.</source>
<translation>用于解密钱包的密码不正确。</translation>
</message>
<message>
<location line="-20"/>
<source>Wallet decryption failed</source>
<translation>钱包解密失败。</translation>
</message>
<message>
<location line="+14"/>
<source>Wallet passphrase was successfully changed.</source>
<translation>修改钱包密码成功。</translation>
</message>
</context>
<context>
<name>BitcoinGUI</name>
<message>
<location filename="../bitcoingui.cpp" line="+233"/>
<source>Sign &message...</source>
<translation>对&消息签名...</translation>
</message>
<message>
<location line="+280"/>
<source>Synchronizing with network...</source>
<translation>正在与网络同步...</translation>
</message>
<message>
<location line="-349"/>
<source>&Overview</source>
<translation>&概况</translation>
</message>
<message>
<location line="+1"/>
<source>Show general overview of wallet</source>
<translation>显示钱包概况</translation>
</message>
<message>
<location line="+20"/>
<source>&Transactions</source>
<translation>&交易记录</translation>
</message>
<message>
<location line="+1"/>
<source>Browse transaction history</source>
<translation>查看交易历史</translation>
</message>
<message>
<location line="+7"/>
<source>Edit the list of stored addresses and labels</source>
<translation>修改存储的地址和标签列表</translation>
</message>
<message>
<location line="-14"/>
<source>Show the list of addresses for receiving payments</source>
<translation>显示接收支付的地址列表</translation>
</message>
<message>
<location line="+31"/>
<source>E&xit</source>
<translation>退出</translation>
</message>
<message>
<location line="+1"/>
<source>Quit application</source>
<translation>退出程序</translation>
</message>
<message>
<location line="+4"/>
<source>Show information about Communistcoin</source>
<translation>显示莱特币的相关信息</translation>
</message>
<message>
<location line="+2"/>
<source>About &Qt</source>
<translation>关于 &Qt</translation>
</message>
<message>
<location line="+1"/>
<source>Show information about Qt</source>
<translation>显示Qt相关信息</translation>
</message>
<message>
<location line="+2"/>
<source>&Options...</source>
<translation>&选项...</translation>
</message>
<message>
<location line="+6"/>
<source>&Encrypt Wallet...</source>
<translation>&加密钱包...</translation>
</message>
<message>
<location line="+3"/>
<source>&Backup Wallet...</source>
<translation>&备份钱包...</translation>
</message>
<message>
<location line="+2"/>
<source>&Change Passphrase...</source>
<translation>&修改密码...</translation>
</message>
<message>
<location line="+285"/>
<source>Importing blocks from disk...</source>
<translation>正在从磁盘导入数据块...</translation>
</message>
<message>
<location line="+3"/>
<source>Reindexing blocks on disk...</source>
<translation>正在为数据块建立索引...</translation>
</message>
<message>
<location line="-347"/>
<source>Send coins to a Communistcoin address</source>
<translation>向一个莱特币地址发送莱特币</translation>
</message>
<message>
<location line="+49"/>
<source>Modify configuration options for Communistcoin</source>
<translation>设置选项</translation>
</message>
<message>
<location line="+9"/>
<source>Backup wallet to another location</source>
<translation>备份钱包到其它文件夹</translation>
</message>
<message>
<location line="+2"/>
<source>Change the passphrase used for wallet encryption</source>
<translation>修改钱包加密口令</translation>
</message>
<message>
<location line="+6"/>
<source>&Debug window</source>
<translation>&调试窗口</translation>
</message>
<message>
<location line="+1"/>
<source>Open debugging and diagnostic console</source>
<translation>在诊断控制台调试</translation>
</message>
<message>
<location line="-4"/>
<source>&Verify message...</source>
<translation>&验证消息...</translation>
</message>
<message>
<location line="-165"/>
<location line="+530"/>
<source>Communistcoin</source>
<translation>莱特币</translation>
</message>
<message>
<location line="-530"/>
<source>Wallet</source>
<translation>钱包</translation>
</message>
<message>
<location line="+101"/>
<source>&Send</source>
<translation>&发送</translation>
</message>
<message>
<location line="+7"/>
<source>&Receive</source>
<translation>&接收</translation>
</message>
<message>
<location line="+14"/>
<source>&Addresses</source>
<translation>&地址</translation>
</message>
<message>
<location line="+22"/>
<source>&About Communistcoin</source>
<translation>&关于莱特币</translation>
</message>
<message>
<location line="+9"/>
<source>&Show / Hide</source>
<translation>&显示 / 隐藏</translation>
</message>
<message>
<location line="+1"/>
<source>Show or hide the main Window</source>
<translation>显示或隐藏主窗口</translation>
</message>
<message>
<location line="+3"/>
<source>Encrypt the private keys that belong to your wallet</source>
<translation>对钱包中的私钥加密</translation>
</message>
<message>
<location line="+7"/>
<source>Sign messages with your Communistcoin addresses to prove you own them</source>
<translation>用莱特币地址关联的私钥为消息签名,以证明您拥有这个莱特币地址</translation>
</message>
<message>
<location line="+2"/>
<source>Verify messages to ensure they were signed with specified Communistcoin addresses</source>
<translation>校验消息,确保该消息是由指定的莱特币地址所有者签名的</translation>
</message>
<message>
<location line="+28"/>
<source>&File</source>
<translation>&文件</translation>
</message>
<message>
<location line="+7"/>
<source>&Settings</source>
<translation>&设置</translation>
</message>
<message>
<location line="+6"/>
<source>&Help</source>
<translation>&帮助</translation>
</message>
<message>
<location line="+9"/>
<source>Tabs toolbar</source>
<translation>分页工具栏</translation>
</message>
<message>
<location line="+17"/>
<location line="+10"/>
<source>[testnet]</source>
<translation>[testnet]</translation>
</message>
<message>
<location line="+47"/>
<source>Communistcoin client</source>
<translation>莱特币客户端</translation>
</message>
<message numerus="yes">
<location line="+141"/>
<source>%n active connection(s) to Communistcoin network</source>
<translation><numerusform>到莱特币网络的连接共有%n条</numerusform></translation>
</message>
<message>
<location line="+22"/>
<source>No block source available...</source>
<translation>No block source available...</translation>
</message>
<message>
<location line="+12"/>
<source>Processed %1 of %2 (estimated) blocks of transaction history.</source>
<translation>%1 / %2 个交易历史的区块已下载</translation>
</message>
<message>
<location line="+4"/>
<source>Processed %1 blocks of transaction history.</source>
<translation>已处理 %1 个交易历史数据块。</translation>
</message>
<message numerus="yes">
<location line="+20"/>
<source>%n hour(s)</source>
<translation><numerusform>%n 小时前</numerusform></translation>
</message>
<message numerus="yes">
<location line="+4"/>
<source>%n day(s)</source>
<translation><numerusform>%n 天前</numerusform></translation>
</message>
<message numerus="yes">
<location line="+4"/>
<source>%n week(s)</source>
<translation><numerusform>%n 周前</numerusform></translation>
</message>
<message>
<location line="+4"/>
<source>%1 behind</source>
<translation>落后 %1 </translation>
</message>
<message>
<location line="+14"/>
<source>Last received block was generated %1 ago.</source>
<translation>最新收到的区块产生于 %1。</translation>
</message>
<message>
<location line="+2"/>
<source>Transactions after this will not yet be visible.</source>
<translation>在此之后的交易尚未可见</translation>
</message>
<message>
<location line="+22"/>
<source>Error</source>
<translation>错误</translation>
</message>
<message>
<location line="+3"/>
<source>Warning</source>
<translation>警告</translation>
</message>
<message>
<location line="+3"/>
<source>Information</source>
<translation>信息</translation>
</message>
<message>
<location line="+70"/>
<source>This transaction is over the size limit. You can still send it for a fee of %1, which goes to the nodes that process your transaction and helps to support the network. Do you want to pay the fee?</source>
<translation>该交易的字节数超标。您可以选择支付%1的交易费给处理您的交易的网络节点,有助于莱特币网络的运行。您愿意支付这笔交易费用吗?</translation>
</message>
<message>
<location line="-140"/>
<source>Up to date</source>
<translation>最新状态</translation>
</message>
<message>
<location line="+31"/>
<source>Catching up...</source>
<translation>更新中...</translation>
</message>
<message>
<location line="+113"/>
<source>Confirm transaction fee</source>
<translation>确认交易费</translation>
</message>
<message>
<location line="+8"/>
<source>Sent transaction</source>
<translation>已发送交易</translation>
</message>
<message>
<location line="+0"/>
<source>Incoming transaction</source>
<translation>流入交易</translation>
</message>
<message>
<location line="+1"/>
<source>Date: %1
Amount: %2
Type: %3
Address: %4
</source>
<translation>日期: %1
金额: %2
类别: %3
地址: %4
</translation>
</message>
<message>
<location line="+33"/>
<location line="+23"/>
<source>URI handling</source>
<translation>URI 处理</translation>
</message>
<message>
<location line="-23"/>
<location line="+23"/>
<source>URI can not be parsed! This can be caused by an invalid Communistcoin address or malformed URI parameters.</source>
<translation>URI无法解析!原因可能是莱特币地址不正确,或者URI参数错误。</translation>
</message>
<message>
<location line="+17"/>
<source>Wallet is <b>encrypted</b> and currently <b>unlocked</b></source>
<translation>钱包已被<b>加密</b>,当前为<b>解锁</b>状态</translation>
</message>
<message>
<location line="+8"/>
<source>Wallet is <b>encrypted</b> and currently <b>locked</b></source>
<translation>钱包已被<b>加密</b>,当前为<b>锁定</b>状态</translation>
</message>
<message>
<location filename="../bitcoin.cpp" line="+111"/>
<source>A fatal error occurred. Communistcoin can no longer continue safely and will quit.</source>
<translation>发生严重错误。</translation>
</message>
</context>
<context>
<name>ClientModel</name>
<message>
<location filename="../clientmodel.cpp" line="+104"/>
<source>Network Alert</source>
<translation>网络警报</translation>
</message>
</context>
<context>
<name>EditAddressDialog</name>
<message>
<location filename="../forms/editaddressdialog.ui" line="+14"/>
<source>Edit Address</source>
<translation>编辑地址</translation>
</message>
<message>
<location line="+11"/>
<source>&Label</source>
<translation>&标签</translation>
</message>
<message>
<location line="+10"/>
<source>The label associated with this address book entry</source>
<translation>与此地址条目关联的标签</translation>
</message>
<message>
<location line="+7"/>
<source>&Address</source>
<translation>&地址</translation>
</message>
<message>
<location line="+10"/>
<source>The address associated with this address book entry. This can only be modified for sending addresses.</source>
<translation>该地址与地址簿中的条目已关联,无法作为发送地址编辑。</translation>
</message>
<message>
<location filename="../editaddressdialog.cpp" line="+21"/>
<source>New receiving address</source>
<translation>新接收地址</translation>
</message>
<message>
<location line="+4"/>
<source>New sending address</source>
<translation>新发送地址</translation>
</message>
<message>
<location line="+3"/>
<source>Edit receiving address</source>
<translation>编辑接收地址</translation>
</message>
<message>
<location line="+4"/>
<source>Edit sending address</source>
<translation>编辑发送地址</translation>
</message>
<message>
<location line="+76"/>
<source>The entered address "%1" is already in the address book.</source>
<translation>输入的地址 "%1" 已经存在于地址簿。</translation>
</message>
<message>
<location line="-5"/>
<source>The entered address "%1" is not a valid Communistcoin address.</source>
<translation>您输入的 "%1" 不是合法的莱特币地址.</translation>
</message>
<message>
<location line="+10"/>
<source>Could not unlock wallet.</source>
<translation>无法解锁钱包</translation>
</message>
<message>
<location line="+5"/>
<source>New key generation failed.</source>
<translation>密钥创建失败.</translation>
</message>
</context>
<context>
<name>GUIUtil::HelpMessageBox</name>
<message>
<location filename="../guiutil.cpp" line="+424"/>
<location line="+12"/>
<source>Communistcoin-Qt</source>
<translation>Communistcoin-Qt</translation>
</message>
<message>
<location line="-12"/>
<source>version</source>
<translation>版本</translation>
</message>
<message>
<location line="+2"/>
<source>Usage:</source>
<translation>使用:</translation>
</message>
<message>
<location line="+1"/>
<source>command-line options</source>
<translation>命令行选项</translation>
</message>
<message>
<location line="+4"/>
<source>UI options</source>
<translation>UI选项</translation>
</message>
<message>
<location line="+1"/>
<source>Set language, for example "de_DE" (default: system locale)</source>
<translation>设置语言, 例如 "de_DE" (缺省: 系统语言)</translation>
</message>
<message>
<location line="+1"/>
<source>Start minimized</source>
<translation>启动时最小化
</translation>
</message>
<message>
<location line="+1"/>
<source>Show splash screen on startup (default: 1)</source>
<translation>启动时显示版权页 (缺省: 1)</translation>
</message>
</context>
<context>
<name>OptionsDialog</name>
<message>
<location filename="../forms/optionsdialog.ui" line="+14"/>
<source>Options</source>
<translation>选项</translation>
</message>
<message>
<location line="+16"/>
<source>&Main</source>
<translation>&主要的</translation>
</message>
<message>
<location line="+6"/>
<source>Optional transaction fee per kB that helps make sure your transactions are processed quickly. Most transactions are 1 kB.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+15"/>
<source>Pay transaction &fee</source>
<translation>支付交易 &费用</translation>
</message>
<message>
<location line="+31"/>
<source>Automatically start Communistcoin after logging in to the system.</source>
<translation>登录系统后自动开启莱特币客户端</translation>
</message>
<message>
<location line="+3"/>
<source>&Start Communistcoin on system login</source>
<translation>启动时&运行</translation>
</message>
<message>
<location line="+35"/>
<source>Reset all client options to default.</source>
<translation>恢复客户端的缺省设置</translation>
</message>
<message>
<location line="+3"/>
<source>&Reset Options</source>
<translation>恢复缺省设置</translation>
</message>
<message>
<location line="+13"/>
<source>&Network</source>
<translation>&网络</translation>
</message>
<message>
<location line="+6"/>
<source>Automatically open the Communistcoin client port on the router. This only works when your router supports UPnP and it is enabled.</source>
<translation>自动在路由器中打开莱特币端口。只有当您的路由器开启 UPnP 选项时此功能才有效。</translation>
</message>
<message>
<location line="+3"/>
<source>Map port using &UPnP</source>
<translation>使用 &UPnP 映射端口</translation>
</message>
<message>
<location line="+7"/>
<source>Connect to the Communistcoin network through a SOCKS proxy (e.g. when connecting through Tor).</source>
<translation>通过代理服务器连接莱特币网络(例如:通过Tor连接)</translation>
</message>
<message>
<location line="+3"/>
<source>&Connect through SOCKS proxy:</source>
<translation>&通过Socks代理连接:</translation>
</message>
<message>
<location line="+9"/>
<source>Proxy &IP:</source>
<translation>代理服务器&IP:</translation>
</message>
<message>
<location line="+19"/>
<source>IP address of the proxy (e.g. 127.0.0.1)</source>
<translation>代理服务器IP (如 127.0.0.1)</translation>
</message>
<message>
<location line="+7"/>
<source>&Port:</source>
<translation>&端口:</translation>
</message>
<message>
<location line="+19"/>
<source>Port of the proxy (e.g. 9050)</source>
<translation>代理端口(例如 9050)</translation>
</message>
<message>
<location line="+7"/>
<source>SOCKS &Version:</source>
<translation>Socks &版本</translation>
</message>
<message>
<location line="+13"/>
<source>SOCKS version of the proxy (e.g. 5)</source>
<translation>Socks代理版本 (例如 5)</translation>
</message>
<message>
<location line="+36"/>
<source>&Window</source>
<translation>&窗口</translation>
</message>
<message>
<location line="+6"/>
<source>Show only a tray icon after minimizing the window.</source>
<translation>最小化窗口后仅显示托盘图标</translation>
</message>
<message>
<location line="+3"/>
<source>&Minimize to the tray instead of the taskbar</source>
<translation>&最小化到托盘</translation>
</message>
<message>
<location line="+7"/>
<source>Minimize instead of exit the application when the window is closed. When this option is enabled, the application will be closed only after selecting Quit in the menu.</source>
<translation>当窗口关闭时程序最小化而不是退出。当使用该选项时,程序只能通过在菜单中选择退出来关闭</translation>
</message>
<message>
<location line="+3"/>
<source>M&inimize on close</source>
<translation>单击关闭按钮最小化</translation>
</message>
<message>
<location line="+21"/>
<source>&Display</source>
<translation>&显示</translation>
</message>
<message>
<location line="+8"/>
<source>User Interface &language:</source>
<translation>用户界面&语言:</translation>
</message>
<message>
<location line="+13"/>
<source>The user interface language can be set here. This setting will take effect after restarting Communistcoin.</source>
<translation>在这里设置用户界面的语言。设置将在客户端重启后生效。</translation>
</message>
<message>
<location line="+11"/>
<source>&Unit to show amounts in:</source>
<translation>&莱特币金额单位:</translation>
</message>
<message>
<location line="+13"/>
<source>Choose the default subdivision unit to show in the interface and when sending coins.</source>
<translation>选择莱特币单位。</translation>
</message>
<message>
<location line="+9"/>
<source>Whether to show Communistcoin addresses in the transaction list or not.</source>
<translation>是否需要在交易清单中显示莱特币地址。</translation>
</message>
<message>
<location line="+3"/>
<source>&Display addresses in transaction list</source>
<translation>在交易清单中&显示莱特币地址</translation>
</message>
<message>
<location line="+71"/>
<source>&OK</source>
<translation>&确定</translation>
</message>
<message>
<location line="+7"/>
<source>&Cancel</source>
<translation>&取消</translation>
</message>
<message>
<location line="+10"/>
<source>&Apply</source>
<translation>&应用</translation>
</message>
<message>
<location filename="../optionsdialog.cpp" line="+53"/>
<source>default</source>
<translation>缺省</translation>
</message>
<message>
<location line="+130"/>
<source>Confirm options reset</source>
<translation>确认恢复缺省设置</translation>
</message>
<message>
<location line="+1"/>
<source>Some settings may require a client restart to take effect.</source>
<translation>某些设置选项需要重启客户端才能生效</translation>
</message>
<message>
<location line="+0"/>
<source>Do you want to proceed?</source>
<translation>您希望继续吗?</translation>
</message>
<message>
<location line="+42"/>
<location line="+9"/>
<source>Warning</source>
<translation>警告</translation>
</message>
<message>
<location line="-9"/>
<location line="+9"/>
<source>This setting will take effect after restarting Communistcoin.</source>
<translation>需要重启客户端软件才能生效。</translation>
</message>
<message>
<location line="+29"/>
<source>The supplied proxy address is invalid.</source>
<translation>提供的代理服务器地址无效。</translation>
</message>
</context>
<context>
<name>OverviewPage</name>
<message>
<location filename="../forms/overviewpage.ui" line="+14"/>
<source>Form</source>
<translation>表单</translation>
</message>
<message>
<location line="+50"/>
<location line="+166"/>
<source>The displayed information may be out of date. Your wallet automatically synchronizes with the Communistcoin network after a connection is established, but this process has not completed yet.</source>
<translation>现在显示的消息可能是过期的. 在连接上莱特币网络节点后,您的钱包将自动与网络同步,但是这个过程还没有完成.</translation>
</message>
<message>
<location line="-124"/>
<source>Balance:</source>
<translation>余额:</translation>
</message>
<message>
<location line="+29"/>
<source>Unconfirmed:</source>
<translation>未确认:</translation>
</message>
<message>
<location line="-78"/>
<source>Wallet</source>
<translation>钱包</translation>
</message>
<message>
<location line="+107"/>
<source>Immature:</source>
<translation>未成熟的:</translation>
</message>
<message>
<location line="+13"/>
<source>Mined balance that has not yet matured</source>
<translation>尚未成熟的挖矿收入余额</translation>
</message>
<message>
<location line="+46"/>
<source><b>Recent transactions</b></source>
<translation><b>最近交易记录</b></translation>
</message>
<message>
<location line="-101"/>
<source>Your current balance</source>
<translation>您的当前余额</translation>
</message>
<message>
<location line="+29"/>
<source>Total of transactions that have yet to be confirmed, and do not yet count toward the current balance</source>
<translation>尚未确认的交易总额, 未计入当前余额</translation>
</message>
<message>
<location filename="../overviewpage.cpp" line="+116"/>
<location line="+1"/>
<source>out of sync</source>
<translation>数据同步中</translation>
</message>
</context>
<context>
<name>PaymentServer</name>
<message>
<location filename="../paymentserver.cpp" line="+107"/>
<source>Cannot start communistcoin: click-to-pay handler</source>
<translation>暂时无法启动莱特币:点击支付功能</translation>
</message>
</context>
<context>
<name>QRCodeDialog</name>
<message>
<location filename="../forms/qrcodedialog.ui" line="+14"/>
<source>QR Code Dialog</source>
<translation>二维码对话框</translation>
</message>
<message>
<location line="+59"/>
<source>Request Payment</source>
<translation>请求付款</translation>
</message>
<message>
<location line="+56"/>
<source>Amount:</source>
<translation>金额:</translation>
</message>
<message>
<location line="-44"/>
<source>Label:</source>
<translation>标签:</translation>
</message>
<message>
<location line="+19"/>
<source>Message:</source>
<translation>消息:</translation>
</message>
<message>
<location line="+71"/>
<source>&Save As...</source>
<translation>&另存为</translation>
</message>
<message>
<location filename="../qrcodedialog.cpp" line="+62"/>
<source>Error encoding URI into QR Code.</source>
<translation>将 URI 转换成二维码失败.</translation>
</message>
<message>
<location line="+40"/>
<source>The entered amount is invalid, please check.</source>
<translation>输入的金额非法,请检查。</translation>
</message>
<message>
<location line="+23"/>
<source>Resulting URI too long, try to reduce the text for label / message.</source>
<translation>URI 太长, 请试着精简标签/消息的内容.</translation>
</message>
<message>
<location line="+25"/>
<source>Save QR Code</source>
<translation>保存二维码</translation>
</message>
<message>
<location line="+0"/>
<source>PNG Images (*.png)</source>
<translation>PNG图像文件(*.png)</translation>
</message>
</context>
<context>
<name>RPCConsole</name>
<message>
<location filename="../forms/rpcconsole.ui" line="+46"/>
<source>Client name</source>
<translation>客户端名称</translation>
</message>
<message>
<location line="+10"/>
<location line="+23"/>
<location line="+26"/>
<location line="+23"/>
<location line="+23"/>
<location line="+36"/>
<location line="+53"/>
<location line="+23"/>
<location line="+23"/>
<location filename="../rpcconsole.cpp" line="+339"/>
<source>N/A</source>
<translation>不可用</translation>
</message>
<message>
<location line="-217"/>
<source>Client version</source>
<translation>客户端版本</translation>
</message>
<message>
<location line="-45"/>
<source>&Information</source>
<translation>&信息</translation>
</message>
<message>
<location line="+68"/>
<source>Using OpenSSL version</source>
<translation>使用OpenSSL版本</translation>
</message>
<message>
<location line="+49"/>
<source>Startup time</source>
<translation>启动时间</translation>
</message>
<message>
<location line="+29"/>
<source>Network</source>
<translation>网络</translation>
</message>
<message>
<location line="+7"/>
<source>Number of connections</source>
<translation>连接数</translation>
</message>
<message>
<location line="+23"/>
<source>On testnet</source>
<translation>当前为莱特币测试网络</translation>
</message>
<message>
<location line="+23"/>
<source>Block chain</source>
<translation>数据链</translation>
</message>
<message>
<location line="+7"/>
<source>Current number of blocks</source>
<translation>当前数据块数量</translation>
</message>
<message>
<location line="+23"/>
<source>Estimated total blocks</source>
<translation>预计数据块数量</translation>
</message>
<message>
<location line="+23"/>
<source>Last block time</source>
<translation>上一数据块时间</translation>
</message>
<message>
<location line="+52"/>
<source>&Open</source>
<translation>&打开</translation>
</message>
<message>
<location line="+16"/>
<source>Command-line options</source>
<translation>命令行选项</translation>
</message>
<message>
<location line="+7"/>
<source>Show the Communistcoin-Qt help message to get a list with possible Communistcoin command-line options.</source>
<translation>显示Communistcoin命令行选项帮助信息</translation>
</message>
<message>
<location line="+3"/>
<source>&Show</source>
<translation>&显示</translation>
</message>
<message>
<location line="+24"/>
<source>&Console</source>
<translation>&控制台</translation>
</message>
<message>
<location line="-260"/>
<source>Build date</source>
<translation>创建时间</translation>
</message>
<message>
<location line="-104"/>
<source>Communistcoin - Debug window</source>
<translation>莱特币 - 调试窗口</translation>
</message>
<message>
<location line="+25"/>
<source>Communistcoin Core</source>
<translation>莱特币核心</translation>
</message>
<message>
<location line="+279"/>
<source>Debug log file</source>
<translation>调试日志文件</translation>
</message>
<message>
<location line="+7"/>
<source>Open the Communistcoin debug log file from the current data directory. This can take a few seconds for large log files.</source>
<translation>打开当前目录中的调试日志文件。日志文件大的话可能要等上几秒钟。</translation>
</message>
<message>
<location line="+102"/>
<source>Clear console</source>
<translation>清空控制台</translation>
</message>
<message>
<location filename="../rpcconsole.cpp" line="-30"/>
<source>Welcome to the Communistcoin RPC console.</source>
<translation>欢迎来到 RPC 控制台.</translation>
</message>
<message>
<location line="+1"/>
<source>Use up and down arrows to navigate history, and <b>Ctrl-L</b> to clear screen.</source>
<translation>使用上下方向键浏览历史, <b>Ctrl-L</b>清除屏幕.</translation>
</message>
<message>
<location line="+1"/>
<source>Type <b>help</b> for an overview of available commands.</source>
<translation>使用 <b>help</b> 命令显示帮助信息.</translation>
</message>
</context>
<context>
<name>SendCoinsDialog</name>
<message>
<location filename="../forms/sendcoinsdialog.ui" line="+14"/>
<location filename="../sendcoinsdialog.cpp" line="+124"/>
<location line="+5"/>
<location line="+5"/>
<location line="+5"/>
<location line="+6"/>
<location line="+5"/>
<location line="+5"/>
<source>Send Coins</source>
<translation>发送货币</translation>
</message>
<message>
<location line="+50"/>
<source>Send to multiple recipients at once</source>
<translation>一次发送给多个接收者</translation>
</message>
<message>
<location line="+3"/>
<source>Add &Recipient</source>
<translation>添加收款人</translation>
</message>
<message>
<location line="+20"/>
<source>Remove all transaction fields</source>
<translation>移除所有交易项</translation>
</message>
<message>
<location line="+3"/>
<source>Clear &All</source>
<translation>清除 &所有</translation>
</message>
<message>
<location line="+22"/>
<source>Balance:</source>
<translation>余额:</translation>
</message>
<message>
<location line="+10"/>
<source>123.456 BTC</source>
<translation>123.456 BTC</translation>
</message>
<message>
<location line="+31"/>
<source>Confirm the send action</source>
<translation>确认并发送货币</translation>
</message>
<message>
<location line="+3"/>
<source>S&end</source>
<translation>发送</translation>
</message>
<message>
<location filename="../sendcoinsdialog.cpp" line="-59"/>
<source><b>%1</b> to %2 (%3)</source>
<translation><b>%1</b> 到 %2 (%3)</translation>
</message>
<message>
<location line="+5"/>
<source>Confirm send coins</source>
<translation>确认发送货币</translation>
</message>
<message>
<location line="+1"/>
<source>Are you sure you want to send %1?</source>
<translation>确定您要发送 %1?</translation>
</message>
<message>
<location line="+0"/>
<source> and </source>
<translation> 和 </translation>
</message>
<message>
<location line="+23"/>
<source>The recipient address is not valid, please recheck.</source>
<translation>收款人地址不合法,请检查。</translation>
</message>
<message>
<location line="+5"/>
<source>The amount to pay must be larger than 0.</source>
<translation>支付金额必须大于0.</translation>
</message>
<message>
<location line="+5"/>
<source>The amount exceeds your balance.</source>
<translation>金额超出您的账上余额。</translation>
</message>
<message>
<location line="+5"/>
<source>The total exceeds your balance when the %1 transaction fee is included.</source>
<translation>计入 %1 交易费后的金额超出您的账上余额。</translation>
</message>
<message>
<location line="+6"/>
<source>Duplicate address found, can only send to each address once per send operation.</source>
<translation>发现重复的地址, 每次只能对同一地址发送一次.</translation>
</message>
<message>
<location line="+5"/>
<source>Error: Transaction creation failed!</source>
<translation>错误:创建交易失败!</translation>
</message>
<message>
<location line="+5"/>
<source>Error: The transaction was rejected. This might happen if some of the coins in your wallet were already spent, such as if you used a copy of wallet.dat and coins were spent in the copy but not marked as spent here.</source>
<translation>错误: 交易被拒绝. 如果您使用的是备份钱包,可能存在两个钱包不同步的情况,另一个钱包中的莱特币已经被使用,但本地的这个钱包尚没有记录。</translation>
</message>
</context>
<context>
<name>SendCoinsEntry</name>
<message>
<location filename="../forms/sendcoinsentry.ui" line="+14"/>
<source>Form</source>
<translation>表单</translation>
</message>
<message>
<location line="+15"/>
<source>A&mount:</source>
<translation>金额</translation>
</message>
<message>
<location line="+13"/>
<source>Pay &To:</source>
<translation>付款&给:</translation>
</message>
<message>
<location line="+34"/>
<source>The address to send the payment to (e.g. Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</source>
<translation>付款给这个地址 (例如 Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</translation>
</message>
<message>
<location line="+60"/>
<location filename="../sendcoinsentry.cpp" line="+26"/>
<source>Enter a label for this address to add it to your address book</source>
<translation>为这个地址输入一个标签,以便将它添加到您的地址簿</translation>
</message>
<message>
<location line="-78"/>
<source>&Label:</source>
<translation>&标签:</translation>
</message>
<message>
<location line="+28"/>
<source>Choose address from address book</source>
<translation>从地址簿选择地址</translation>
</message>
<message>
<location line="+10"/>
<source>Alt+A</source>
<translation>Alt+A</translation>
</message>
<message>
<location line="+7"/>
<source>Paste address from clipboard</source>
<translation>从剪贴板粘贴地址</translation>
</message>
<message>
<location line="+10"/>
<source>Alt+P</source>
<translation>Alt+P</translation>
</message>
<message>
<location line="+7"/>
<source>Remove this recipient</source>
<translation>移除此接收者</translation>
</message>
<message>
<location filename="../sendcoinsentry.cpp" line="+1"/>
<source>Enter a Communistcoin address (e.g. Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</source>
<translation>请输入莱特币地址 (例如: Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</translation>
</message>
</context>
<context>
<name>SignVerifyMessageDialog</name>
<message>
<location filename="../forms/signverifymessagedialog.ui" line="+14"/>
<source>Signatures - Sign / Verify a Message</source>
<translation>签名 - 为消息签名/验证签名消息</translation>
</message>
<message>
<location line="+13"/>
<source>&Sign Message</source>
<translation>&签名消息</translation>
</message>
<message>
<location line="+6"/>
<source>You can sign messages with your addresses to prove you own them. Be careful not to sign anything vague, as phishing attacks may try to trick you into signing your identity over to them. Only sign fully-detailed statements you agree to.</source>
<translation>您可以用你的地址对消息进行签名,以证明您是该地址的所有人。注意不要对模棱两可的消息签名,以免遭受钓鱼式攻击。请确保消息内容准确的表达了您的真实意愿。</translation>
</message>
<message>
<location line="+18"/>
<source>The address to sign the message with (e.g. Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</source>
<translation>用于签名消息的地址(例如: Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</translation>
</message>
<message>
<location line="+10"/>
<location line="+213"/>
<source>Choose an address from the address book</source>
<translation>从地址簿选择地址</translation>
</message>
<message>
<location line="-203"/>
<location line="+213"/>
<source>Alt+A</source>
<translation>Alt+A</translation>
</message>
<message>
<location line="-203"/>
<source>Paste address from clipboard</source>
<translation>从剪贴板粘贴地址</translation>
</message>
<message>
<location line="+10"/>
<source>Alt+P</source>
<translation>Alt+P</translation>
</message>
<message>
<location line="+12"/>
<source>Enter the message you want to sign here</source>
<translation>请输入您要发送的签名消息</translation>
</message>
<message>
<location line="+7"/>
<source>Signature</source>
<translation>签名</translation>
</message>
<message>
<location line="+27"/>
<source>Copy the current signature to the system clipboard</source>
<translation>复制当前签名至剪切板</translation>
</message>
<message>
<location line="+21"/>
<source>Sign the message to prove you own this Communistcoin address</source>
<translation>签名消息,证明这个地址属于您。</translation>
</message>
<message>
<location line="+3"/>
<source>Sign &Message</source>
<translation>消息签名</translation>
</message>
<message>
<location line="+14"/>
<source>Reset all sign message fields</source>
<translation>清空所有签名消息栏</translation>
</message>
<message>
<location line="+3"/>
<location line="+146"/>
<source>Clear &All</source>
<translation>清除 &所有</translation>
</message>
<message>
<location line="-87"/>
<source>&Verify Message</source>
<translation>&验证消息</translation>
</message>
<message>
<location line="+6"/>
<source>Enter the signing address, message (ensure you copy line breaks, spaces, tabs, etc. exactly) and signature below to verify the message. Be careful not to read more into the signature than what is in the signed message itself, to avoid being tricked by a man-in-the-middle attack.</source>
<translation>在下面输入签名地址,消息(请确保换行符、空格符、制表符等等一个不漏)和签名以验证消息。请确保签名信息准确,提防中间人攻击。</translation>
</message>
<message>
<location line="+21"/>
<source>The address the message was signed with (e.g. Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</source>
<translation>用于签名消息的地址(例如: Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</translation>
</message>
<message>
<location line="+40"/>
<source>Verify the message to ensure it was signed with the specified Communistcoin address</source>
<translation>验证消息,确保消息是由指定的莱特币地址签名过的。</translation>
</message>
<message>
<location line="+3"/>
<source>Verify &Message</source>
<translation>验证消息签名</translation>
</message>
<message>
<location line="+14"/>
<source>Reset all verify message fields</source>
<translation>清空所有验证消息栏</translation>
</message>
<message>
<location filename="../signverifymessagedialog.cpp" line="+27"/>
<location line="+3"/>
<source>Enter a Communistcoin address (e.g. Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</source>
<translation>请输入莱特币地址 (例如: Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</translation>
</message>
<message>
<location line="-2"/>
<source>Click "Sign Message" to generate signature</source>
<translation>单击“签名消息“产生签名。</translation>
</message>
<message>
<location line="+3"/>
<source>Enter Communistcoin signature</source>
<translation>输入莱特币签名</translation>
</message>
<message>
<location line="+82"/>
<location line="+81"/>
<source>The entered address is invalid.</source>
<translation>输入的地址非法。</translation>
</message>
<message>
<location line="-81"/>
<location line="+8"/>
<location line="+73"/>
<location line="+8"/>
<source>Please check the address and try again.</source>
<translation>请检查地址后重试。</translation>
</message>
<message>
<location line="-81"/>
<location line="+81"/>
<source>The entered address does not refer to a key.</source>
<translation>输入的地址没有关联的公私钥对。</translation>
</message>
<message>
<location line="-73"/>
<source>Wallet unlock was cancelled.</source>
<translation>钱包解锁动作取消。</translation>
</message>
<message>
<location line="+8"/>
<source>Private key for the entered address is not available.</source>
<translation>找不到输入地址关联的私钥。</translation>
</message>
<message>
<location line="+12"/>
<source>Message signing failed.</source>
<translation>消息签名失败。</translation>
</message>
<message>
<location line="+5"/>
<source>Message signed.</source>
<translation>消息已签名。</translation>
</message>
<message>
<location line="+59"/>
<source>The signature could not be decoded.</source>
<translation>签名无法解码。</translation>
</message>
<message>
<location line="+0"/>
<location line="+13"/>
<source>Please check the signature and try again.</source>
<translation>请检查签名后重试。</translation>
</message>
<message>
<location line="+0"/>
<source>The signature did not match the message digest.</source>
<translation>签名与消息摘要不匹配。</translation>
</message>
<message>
<location line="+7"/>
<source>Message verification failed.</source>
<translation>消息验证失败。</translation>
</message>
<message>
<location line="+5"/>
<source>Message verified.</source>
<translation>消息验证成功。</translation>
</message>
</context>
<context>
<name>SplashScreen</name>
<message>
<location filename="../splashscreen.cpp" line="+22"/>
<source>The Communistcoin developers</source>
<translation>Communistcoin-qt 客户端开发团队</translation>
</message>
<message>
<location line="+1"/>
<source>[testnet]</source>
<translation>[testnet]</translation>
</message>
</context>
<context>
<name>TransactionDesc</name>
<message>
<location filename="../transactiondesc.cpp" line="+20"/>
<source>Open until %1</source>
<translation>至 %1 个数据块时开启</translation>
</message>
<message>
<location line="+6"/>
<source>%1/offline</source>
<translation>%1 / 离线</translation>
</message>
<message>
<location line="+2"/>
<source>%1/unconfirmed</source>
<translation>%1/未确认</translation>
</message>
<message>
<location line="+2"/>
<source>%1 confirmations</source>
<translation>%1 确认项</translation>
</message>
<message>
<location line="+18"/>
<source>Status</source>
<translation>状态</translation>
</message>
<message numerus="yes">
<location line="+7"/>
<source>, broadcast through %n node(s)</source>
<translation><numerusform>通过 %n 个节点广播</numerusform></translation>
</message>
<message>
<location line="+4"/>
<source>Date</source>
<translation>日期</translation>
</message>
<message>
<location line="+7"/>
<source>Source</source>
<translation>源</translation>
</message>
<message>
<location line="+0"/>
<source>Generated</source>
<translation>生成</translation>
</message>
<message>
<location line="+5"/>
<location line="+17"/>
<source>From</source>
<translation>来自</translation>
</message>
<message>
<location line="+1"/>
<location line="+22"/>
<location line="+58"/>
<source>To</source>
<translation>到</translation>
</message>
<message>
<location line="-77"/>
<location line="+2"/>
<source>own address</source>
<translation>自己的地址</translation>
</message>
<message>
<location line="-2"/>
<source>label</source>
<translation>标签</translation>
</message>
<message>
<location line="+37"/>
<location line="+12"/>
<location line="+45"/>
<location line="+17"/>
<location line="+30"/>
<source>Credit</source>
<translation>收入</translation>
</message>
<message numerus="yes">
<location line="-102"/>
<source>matures in %n more block(s)</source>
<translation><numerusform>将在 %n 个数据块后成熟</numerusform></translation>
</message>
<message>
<location line="+2"/>
<source>not accepted</source>
<translation>未被接受</translation>
</message>
<message>
<location line="+44"/>
<location line="+8"/>
<location line="+15"/>
<location line="+30"/>
<source>Debit</source>
<translation>支出</translation>
</message>
<message>
<location line="-39"/>
<source>Transaction fee</source>
<translation>交易费</translation>
</message>
<message>
<location line="+16"/>
<source>Net amount</source>
<translation>净额</translation>
</message>
<message>
<location line="+6"/>
<source>Message</source>
<translation>消息</translation>
</message>
<message>
<location line="+2"/>
<source>Comment</source>
<translation>备注</translation>
</message>
<message>
<location line="+2"/>
<source>Transaction ID</source>
<translation>交易ID</translation>
</message>
<message>
<location line="+3"/>
<source>Generated coins must mature 120 blocks before they can be spent. When you generated this block, it was broadcast to the network to be added to the block chain. If it fails to get into the chain, its state will change to "not accepted" and it won't be spendable. This may occasionally happen if another node generates a block within a few seconds of yours.</source>
<translation>新挖出的莱特币必须等确120个确认才能使用。您生产出的数据块,将被广播到全网并添加到数据块链。如果入链失败,状态将变为“未被接受”,意味着您的数据块竞争失败,挖出的莱特币将不能使用。当某个节点先于你几秒生产出新的数据块,这种情况会偶尔发生。</translation>
</message>
<message>
<location line="+7"/>
<source>Debug information</source>
<translation>调试信息</translation>
</message>
<message>
<location line="+8"/>
<source>Transaction</source>
<translation>交易</translation>
</message>
<message>
<location line="+3"/>
<source>Inputs</source>
<translation>输入</translation>
</message>
<message>
<location line="+23"/>
<source>Amount</source>
<translation>金额</translation>
</message>
<message>
<location line="+1"/>
<source>true</source>
<translation>正确</translation>
</message>
<message>
<location line="+0"/>
<source>false</source>
<translation>错误</translation>
</message>
<message>
<location line="-209"/>
<source>, has not been successfully broadcast yet</source>
<translation>, 未被成功广播</translation>
</message>
<message numerus="yes">
<location line="-35"/>
<source>Open for %n more block(s)</source>
<translation><numerusform>Open for %n more block</numerusform></translation>
</message>
<message>
<location line="+70"/>
<source>unknown</source>
<translation>未知</translation>
</message>
</context>
<context>
<name>TransactionDescDialog</name>
<message>
<location filename="../forms/transactiondescdialog.ui" line="+14"/>
<source>Transaction details</source>
<translation>交易明细</translation>
</message>
<message>
<location line="+6"/>
<source>This pane shows a detailed description of the transaction</source>
<translation>当前面板显示了交易的详细信息</translation>
</message>
</context>
<context>
<name>TransactionTableModel</name>
<message>
<location filename="../transactiontablemodel.cpp" line="+225"/>
<source>Date</source>
<translation>日期</translation>
</message>
<message>
<location line="+0"/>
<source>Type</source>
<translation>类型</translation>
</message>
<message>
<location line="+0"/>
<source>Address</source>
<translation>地址</translation>
</message>
<message>
<location line="+0"/>
<source>Amount</source>
<translation>数量</translation>
</message>
<message numerus="yes">
<location line="+57"/>
<source>Open for %n more block(s)</source>
<translation><numerusform>Open for %n more block</numerusform></translation>
</message>
<message>
<location line="+3"/>
<source>Open until %1</source>
<translation>至 %1 个数据块时开启</translation>
</message>
<message>
<location line="+3"/>
<source>Offline (%1 confirmations)</source>
<translation>离线 (%1 个确认项)</translation>
</message>
<message>
<location line="+3"/>
<source>Unconfirmed (%1 of %2 confirmations)</source>
<translation>未确认 (%1 / %2 条确认信息)</translation>
</message>
<message>
<location line="+3"/>
<source>Confirmed (%1 confirmations)</source>
<translation>已确认 (%1 条确认信息)</translation>
</message>
<message numerus="yes">
<location line="+8"/>
<source>Mined balance will be available when it matures in %n more block(s)</source>
<translation><numerusform>挖矿收入余额将在 %n 个数据块后可用</numerusform></translation>
</message>
<message>
<location line="+5"/>
<source>This block was not received by any other nodes and will probably not be accepted!</source>
<translation>此数据块未被其他节点接收,并可能不被接受!</translation>
</message>
<message>
<location line="+3"/>
<source>Generated but not accepted</source>
<translation>已生成但未被接受</translation>
</message>
<message>
<location line="+43"/>
<source>Received with</source>
<translation>接收于</translation>
</message>
<message>
<location line="+2"/>
<source>Received from</source>
<translation>收款来自</translation>
</message>
<message>
<location line="+3"/>
<source>Sent to</source>
<translation>发送到</translation>
</message>
<message>
<location line="+2"/>
<source>Payment to yourself</source>
<translation>付款给自己</translation>
</message>
<message>
<location line="+2"/>
<source>Mined</source>
<translation>挖矿所得</translation>
</message>
<message>
<location line="+38"/>
<source>(n/a)</source>
<translation>(n/a)</translation>
</message>
<message>
<location line="+199"/>
<source>Transaction status. Hover over this field to show number of confirmations.</source>
<translation>交易状态。 鼠标移到此区域上可显示确认消息项的数目。</translation>
</message>
<message>
<location line="+2"/>
<source>Date and time that the transaction was received.</source>
<translation>接收莱特币的时间</translation>
</message>
<message>
<location line="+2"/>
<source>Type of transaction.</source>
<translation>交易类别。</translation>
</message>
<message>
<location line="+2"/>
<source>Destination address of transaction.</source>
<translation>交易目的地址。</translation>
</message>
<message>
<location line="+2"/>
<source>Amount removed from or added to balance.</source>
<translation>从余额添加或移除的金额。</translation>
</message>
</context>
<context>
<name>TransactionView</name>
<message>
<location filename="../transactionview.cpp" line="+52"/>
<location line="+16"/>
<source>All</source>
<translation>全部</translation>
</message>
<message>
<location line="-15"/>
<source>Today</source>
<translation>今天</translation>
</message>
<message>
<location line="+1"/>
<source>This week</source>
<translation>本周</translation>
</message>
<message>
<location line="+1"/>
<source>This month</source>
<translation>本月</translation>
</message>
<message>
<location line="+1"/>
<source>Last month</source>
<translation>上月</translation>
</message>
<message>
<location line="+1"/>
<source>This year</source>
<translation>今年</translation>
</message>
<message>
<location line="+1"/>
<source>Range...</source>
<translation>范围...</translation>
</message>
<message>
<location line="+11"/>
<source>Received with</source>
<translation>接收于</translation>
</message>
<message>
<location line="+2"/>
<source>Sent to</source>
<translation>发送到</translation>
</message>
<message>
<location line="+2"/>
<source>To yourself</source>
<translation>到自己</translation>
</message>
<message>
<location line="+1"/>
<source>Mined</source>
<translation>挖矿所得</translation>
</message>
<message>
<location line="+1"/>
<source>Other</source>
<translation>其他</translation>
</message>
<message>
<location line="+7"/>
<source>Enter address or label to search</source>
<translation>输入地址或标签进行搜索</translation>
</message>
<message>
<location line="+7"/>
<source>Min amount</source>
<translation>最小金额</translation>
</message>
<message>
<location line="+34"/>
<source>Copy address</source>
<translation>复制地址</translation>
</message>
<message>
<location line="+1"/>
<source>Copy label</source>
<translation>复制标签</translation>
</message>
<message>
<location line="+1"/>
<source>Copy amount</source>
<translation>复制金额</translation>
</message>
<message>
<location line="+1"/>
<source>Copy transaction ID</source>
<translation>复制交易编号</translation>
</message>
<message>
<location line="+1"/>
<source>Edit label</source>
<translation>编辑标签</translation>
</message>
<message>
<location line="+1"/>
<source>Show transaction details</source>
<translation>显示交易详情</translation>
</message>
<message>
<location line="+139"/>
<source>Export Transaction Data</source>
<translation>导出交易数据</translation>
</message>
<message>
<location line="+1"/>
<source>Comma separated file (*.csv)</source>
<translation>逗号分隔文件(*.csv)</translation>
</message>
<message>
<location line="+8"/>
<source>Confirmed</source>
<translation>已确认</translation>
</message>
<message>
<location line="+1"/>
<source>Date</source>
<translation>日期</translation>
</message>
<message>
<location line="+1"/>
<source>Type</source>
<translation>类别</translation>
</message>
<message>
<location line="+1"/>
<source>Label</source>
<translation>标签</translation>
</message>
<message>
<location line="+1"/>
<source>Address</source>
<translation>地址</translation>
</message>
<message>
<location line="+1"/>
<source>Amount</source>
<translation>金额</translation>
</message>
<message>
<location line="+1"/>
<source>ID</source>
<translation>ID</translation>
</message>
<message>
<location line="+4"/>
<source>Error exporting</source>
<translation>导出错误</translation>
</message>
<message>
<location line="+0"/>
<source>Could not write to file %1.</source>
<translation>无法写入文件 %1。</translation>
</message>
<message>
<location line="+100"/>
<source>Range:</source>
<translation>范围:</translation>
</message>
<message>
<location line="+8"/>
<source>to</source>
<translation>到</translation>
</message>
</context>
<context>
<name>WalletModel</name>
<message>
<location filename="../walletmodel.cpp" line="+193"/>
<source>Send Coins</source>
<translation>发送莱特币</translation>
</message>
</context>
<context>
<name>WalletView</name>
<message>
<location filename="../walletview.cpp" line="+42"/>
<source>&Export</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Export the data in the current tab to a file</source>
<translation>导出当前数据到文件</translation>
</message>
<message>
<location line="+193"/>
<source>Backup Wallet</source>
<translation>备份钱包</translation>
</message>
<message>
<location line="+0"/>
<source>Wallet Data (*.dat)</source>
<translation>钱包文件(*.dat)</translation>
</message>
<message>
<location line="+3"/>
<source>Backup Failed</source>
<translation>备份失败</translation>
</message>
<message>
<location line="+0"/>
<source>There was an error trying to save the wallet data to the new location.</source>
<translation>备份钱包到其它文件夹失败.</translation>
</message>
<message>
<location line="+4"/>
<source>Backup Successful</source>
<translation>备份成功</translation>
</message>
<message>
<location line="+0"/>
<source>The wallet data was successfully saved to the new location.</source>
<translation>钱包数据成功存储到新位置</translation>
</message>
</context>
<context>
<name>bitcoin-core</name>
<message>
<location filename="../bitcoinstrings.cpp" line="+94"/>
<source>Communistcoin version</source>
<translation>莱特币版本</translation>
</message>
<message>
<location line="+102"/>
<source>Usage:</source>
<translation>使用:</translation>
</message>
<message>
<location line="-29"/>
<source>Send command to -server or communistcoind</source>
<translation>发送命令到服务器或者 communistcoind
</translation>
</message>
<message>
<location line="-23"/>
<source>List commands</source>
<translation>列出命令
</translation>
</message>
<message>
<location line="-12"/>
<source>Get help for a command</source>
<translation>获得某条命令的帮助
</translation>
</message>
<message>
<location line="+24"/>
<source>Options:</source>
<translation>选项:
</translation>
</message>
<message>
<location line="+24"/>
<source>Specify configuration file (default: communistcoin.conf)</source>
<translation>指定配置文件 (默认为 communistcoin.conf)
</translation>
</message>
<message>
<location line="+3"/>
<source>Specify pid file (default: communistcoind.pid)</source>
<translation>指定 pid 文件 (默认为 communistcoind.pid)
</translation>
</message>
<message>
<location line="-1"/>
<source>Specify data directory</source>
<translation>指定数据目录
</translation>
</message>
<message>
<location line="-9"/>
<source>Set database cache size in megabytes (default: 25)</source>
<translation>设置数据库缓冲区大小 (缺省: 25MB)</translation>
</message>
<message>
<location line="-28"/>
<source>Listen for connections on <port> (default: 9333 or testnet: 19333)</source>
<translation>监听端口连接 <port> (缺省: 9333 or testnet: 19333)</translation>
</message>
<message>
<location line="+5"/>
<source>Maintain at most <n> connections to peers (default: 125)</source>
<translation>最大连接数 <n> (缺省: 125)</translation>
</message>
<message>
<location line="-48"/>
<source>Connect to a node to retrieve peer addresses, and disconnect</source>
<translation>连接一个节点并获取对端地址, 然后断开连接</translation>
</message>
<message>
<location line="+82"/>
<source>Specify your own public address</source>
<translation>指定您的公共地址</translation>
</message>
<message>
<location line="+3"/>
<source>Threshold for disconnecting misbehaving peers (default: 100)</source>
<translation>Threshold for disconnecting misbehaving peers (缺省: 100)</translation>
</message>
<message>
<location line="-134"/>
<source>Number of seconds to keep misbehaving peers from reconnecting (default: 86400)</source>
<translation>Number of seconds to keep misbehaving peers from reconnecting (缺省: 86400)</translation>
</message>
<message>
<location line="-29"/>
<source>An error occurred while setting up the RPC port %u for listening on IPv4: %s</source>
<translation>设置RPC监听端口%u时发生错误, IPv4:%s</translation>
</message>
<message>
<location line="+27"/>
<source>Listen for JSON-RPC connections on <port> (default: 9332 or testnet: 19332)</source>
<translation>JSON-RPC连接监听端口<port> (缺省:9332 testnet:19332)</translation>
</message>
<message>
<location line="+37"/>
<source>Accept command line and JSON-RPC commands</source>
<translation>接受命令行和 JSON-RPC 命令
</translation>
</message>
<message>
<location line="+76"/>
<source>Run in the background as a daemon and accept commands</source>
<translation>在后台运行并接受命令
</translation>
</message>
<message>
<location line="+37"/>
<source>Use the test network</source>
<translation>使用测试网络
</translation>
</message>
<message>
<location line="-112"/>
<source>Accept connections from outside (default: 1 if no -proxy or -connect)</source>
<translation>接受来自外部的连接 (缺省: 如果不带 -proxy or -connect 参数设置为1)</translation>
</message>
<message>
<location line="-80"/>
<source>%s, you must set a rpcpassword in the configuration file:
%s
It is recommended you use the following random password:
rpcuser=communistcoinrpc
rpcpassword=%s
(you do not need to remember this password)
The username and password MUST NOT be the same.
If the file does not exist, create it with owner-readable-only file permissions.
It is also recommended to set alertnotify so you are notified of problems;
for example: alertnotify=echo %%s | mail -s "Communistcoin Alert" [email protected]
</source>
<translation>%s, 您必须在配置文件设置rpcpassword:
%s
建议您使用下面的随机密码:
rpcuser=communistcoinrpc
rpcpassword=%s
(您无需记住此密码)
用户名和密码 必! 须! 不一样。
如果配置文件不存在,请自行建立一个只有所有者拥有只读权限的文件。
推荐您开启提示通知以便收到错误通知,
像这样: alertnotify=echo %%s | mail -s "Communistcoin Alert" [email protected]
</translation>
</message>
<message>
<location line="+17"/>
<source>An error occurred while setting up the RPC port %u for listening on IPv6, falling back to IPv4: %s</source>
<translation>在IPv6模式下设置RPC监听端口 %u 失败,返回到IPv4模式: %s</translation>
</message>
<message>
<location line="+3"/>
<source>Bind to given address and always listen on it. Use [host]:port notation for IPv6</source>
<translation>绑定指定的IP地址开始监听。IPv6地址请使用[host]:port 格式</translation>
</message>
<message>
<location line="+3"/>
<source>Cannot obtain a lock on data directory %s. Communistcoin is probably already running.</source>
<translation>无法给数据目录 %s上锁。本软件可能已经在运行。</translation>
</message>
<message>
<location line="+3"/>
<source>Error: The transaction was rejected! This might happen if some of the coins in your wallet were already spent, such as if you used a copy of wallet.dat and coins were spent in the copy but not marked as spent here.</source>
<translation>错误:该交易被拒绝!发生这种错误的原因可能是:钱包中的莱特币已经被用掉,有可能您复制了wallet.dat钱包文件,然后用复制的钱包文件支付了莱特币,但是这个钱包文件中没有记录。</translation>
</message>
<message>
<location line="+4"/>
<source>Error: This transaction requires a transaction fee of at least %s because of its amount, complexity, or use of recently received funds!</source>
<translation>错误:因为该交易的数量、复杂度或者动用了刚收到不久的资金,您需要支付不少于%s的交易费用。</translation>
</message>
<message>
<location line="+3"/>
<source>Execute command when a relevant alert is received (%s in cmd is replaced by message)</source>
<translation>当收到相关通知时执行命令(命令行中的 %s 的替换为消息)</translation>
</message>
<message>
<location line="+3"/>
<source>Execute command when a wallet transaction changes (%s in cmd is replaced by TxID)</source>
<translation>当最佳区块变化时执行命令 (命令行中的 %s 会被替换成区块哈希值)</translation>
</message>
<message>
<location line="+11"/>
<source>Set maximum size of high-priority/low-fee transactions in bytes (default: 27000)</source>
<translation>Set maximum size of high-priority/low-fee transactions in bytes (default: 27000)</translation>
</message>
<message>
<location line="+6"/>
<source>This is a pre-release test build - use at your own risk - do not use for mining or merchant applications</source>
<translation>这是测试用的预发布版本 - 请谨慎使用 - 不要用来挖矿,或者在正式商用环境下使用</translation>
</message>
<message>
<location line="+5"/>
<source>Warning: -paytxfee is set very high! This is the transaction fee you will pay if you send a transaction.</source>
<translation>警告:-paytxfee 交易费设置得太高了!每笔交易都将支付交易费。</translation>
</message>
<message>
<location line="+3"/>
<source>Warning: Displayed transactions may not be correct! You may need to upgrade, or other nodes may need to upgrade.</source>
<translation>警告:显示的交易可能不正确!您需要升级客户端软件,或者网络上的其他节点需要升级。</translation>
</message>
<message>
<location line="+3"/>
<source>Warning: Please check that your computer's date and time are correct! If your clock is wrong Communistcoin will not work properly.</source>
<translation>警告:请检查电脑的日期时间设置是否正确!时间错误可能会导致莱特币客户端运行异常。</translation>
</message>
<message>
<location line="+3"/>
<source>Warning: error reading wallet.dat! All keys read correctly, but transaction data or address book entries might be missing or incorrect.</source>
<translation>警告:钱包文件wallet.dat读取失败!最重要的公钥、私钥数据都没有问题,但是交易记录或地址簿数据不正确,或者存在数据丢失。</translation>
</message>
<message>
<location line="+3"/>
<source>Warning: wallet.dat corrupt, data salvaged! Original wallet.dat saved as wallet.{timestamp}.bak in %s; if your balance or transactions are incorrect you should restore from a backup.</source>
<translation>警告:钱包文件wallet.dat损坏! 原始的钱包文件已经备份到%s目录下并重命名为{timestamp}.bak 。如果您的账户余额或者交易记录不正确,请使用您的钱包备份文件恢复。</translation>
</message>
<message>
<location line="+14"/>
<source>Attempt to recover private keys from a corrupt wallet.dat</source>
<translation>尝试从损坏的钱包文件wallet.dat中恢复私钥</translation>
</message>
<message>
<location line="+2"/>
<source>Block creation options:</source>
<translation>数据块创建选项:</translation>
</message>
<message>
<location line="+5"/>
<source>Connect only to the specified node(s)</source>
<translation>仅连接到指定节点</translation>
</message>
<message>
<location line="+3"/>
<source>Corrupted block database detected</source>
<translation>检测发现数据块数据库损坏。请使用 -reindex参数重启客户端。</translation>
</message>
<message>
<location line="+1"/>
<source>Discover own IP address (default: 1 when listening and no -externalip)</source>
<translation>发现自己的IP地址(缺省:不带 -externalip 参数监听时设置为1)</translation>
</message>
<message>
<location line="+1"/>
<source>Do you want to rebuild the block database now?</source>
<translation>你想现在就重建块数据库吗?</translation>
</message>
<message>
<location line="+2"/>
<source>Error initializing block database</source>
<translation>初始化数据块数据库出错</translation>
</message>
<message>
<location line="+1"/>
<source>Error initializing wallet database environment %s!</source>
<translation>Error initializing wallet database environment %s!</translation>
</message>
<message>
<location line="+1"/>
<source>Error loading block database</source>
<translation>导入数据块数据库出错</translation>
</message>
<message>
<location line="+4"/>
<source>Error opening block database</source>
<translation>导入数据块数据库出错</translation>
</message>
<message>
<location line="+2"/>
<source>Error: Disk space is low!</source>
<translation>错误:磁盘剩余空间低!</translation>
</message>
<message>
<location line="+1"/>
<source>Error: Wallet locked, unable to create transaction!</source>
<translation>错误:钱包被锁定,无法创建交易!</translation>
</message>
<message>
<location line="+1"/>
<source>Error: system error: </source>
<translation>错误:系统出错。</translation>
</message>
<message>
<location line="+1"/>
<source>Failed to listen on any port. Use -listen=0 if you want this.</source>
<translation>监听端口失败。请使用 -listen=0 参数。</translation>
</message>
<message>
<location line="+1"/>
<source>Failed to read block info</source>
<translation>无法读取数据块信息</translation>
</message>
<message>
<location line="+1"/>
<source>Failed to read block</source>
<translation>读取数据块失败</translation>
</message>
<message>
<location line="+1"/>
<source>Failed to sync block index</source>
<translation>无法同步数据块索引</translation>
</message>
<message>
<location line="+1"/>
<source>Failed to write block index</source>
<translation>无法写入数据块索引</translation>
</message>
<message>
<location line="+1"/>
<source>Failed to write block info</source>
<translation>无法写入数据块信息</translation>
</message>
<message>
<location line="+1"/>
<source>Failed to write block</source>
<translation>无法写数据块</translation>
</message>
<message>
<location line="+1"/>
<source>Failed to write file info</source>
<translation>无法写入文件信息</translation>
</message>
<message>
<location line="+1"/>
<source>Failed to write to coin database</source>
<translation>无法写入coin数据库</translation>
</message>
<message>
<location line="+1"/>
<source>Failed to write transaction index</source>
<translation>无法写入交易索引</translation>
</message>
<message>
<location line="+1"/>
<source>Failed to write undo data</source>
<translation>无法写入回滚信息</translation>
</message>
<message>
<location line="+2"/>
<source>Find peers using DNS lookup (default: 1 unless -connect)</source>
<translation>通过DNS查找节点(缺省:1 除非使用 -connect 选项)</translation>
</message>
<message>
<location line="+1"/>
<source>Generate coins (default: 0)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>How many blocks to check at startup (default: 288, 0 = all)</source>
<translation>启动时检测多少个数据块(缺省:288,0=所有)</translation>
</message>
<message>
<location line="+1"/>
<source>How thorough the block verification is (0-4, default: 3)</source>
<translation>How thorough the block verification is (0-4, default: 3)</translation>
</message>
<message>
<location line="+19"/>
<source>Not enough file descriptors available.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+8"/>
<source>Rebuild block chain index from current blk000??.dat files</source>
<translation>重新为当前的blk000??.dat文件建立索引</translation>
</message>
<message>
<location line="+16"/>
<source>Set the number of threads to service RPC calls (default: 4)</source>
<translation>设置使用调用服务 RPC 的线程数量(默认:4)</translation>
</message>
<message>
<location line="+26"/>
<source>Verifying blocks...</source>
<translation>正在验证数据库的完整性...</translation>
</message>
<message>
<location line="+1"/>
<source>Verifying wallet...</source>
<translation>正在检测钱包的完整性...</translation>
</message>
<message>
<location line="-69"/>
<source>Imports blocks from external blk000??.dat file</source>
<translation>从blk000??.dat文件导入数据块</translation>
</message>
<message>
<location line="-76"/>
<source>Set the number of script verification threads (up to 16, 0 = auto, <0 = leave that many cores free, default: 0)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+77"/>
<source>Information</source>
<translation>信息</translation>
</message>
<message>
<location line="+3"/>
<source>Invalid -tor address: '%s'</source>
<translation>非法的 -tor 地址:'%s' </translation>
</message>
<message>
<location line="+1"/>
<source>Invalid amount for -minrelaytxfee=<amount>: '%s'</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Invalid amount for -mintxfee=<amount>: '%s'</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+8"/>
<source>Maintain a full transaction index (default: 0)</source>
<translation>维护一份完整的交易索引(缺省:0)</translation>
</message>
<message>
<location line="+2"/>
<source>Maximum per-connection receive buffer, <n>*1000 bytes (default: 5000)</source>
<translation>每个连接的最大接收缓存,<n>*1000 字节(缺省:5000)</translation>
</message>
<message>
<location line="+1"/>
<source>Maximum per-connection send buffer, <n>*1000 bytes (default: 1000)</source>
<translation>每个连接的最大发送缓存,<n>*1000 字节(缺省:1000)</translation>
</message>
<message>
<location line="+2"/>
<source>Only accept block chain matching built-in checkpoints (default: 1)</source>
<translation>仅接受符合客户端检查点设置的数据块文件</translation>
</message>
<message>
<location line="+1"/>
<source>Only connect to nodes in network <net> (IPv4, IPv6 or Tor)</source>
<translation>仅连接至指定网络的节点<net>(IPv4, IPv6 或者 Tor)</translation>
</message>
<message>
<location line="+2"/>
<source>Output extra debugging information. Implies all other -debug* options</source>
<translation>输出额外的调试信息。打开所有 -debug* 开关</translation>
</message>
<message>
<location line="+1"/>
<source>Output extra network debugging information</source>
<translation>输出额外的网络调试信息</translation>
</message>
<message>
<location line="+2"/>
<source>Prepend debug output with timestamp</source>
<translation>为调试输出信息添加时间戳</translation>
</message>
<message>
<location line="+5"/>
<source>SSL options: (see the Communistcoin Wiki for SSL setup instructions)</source>
<translation>SSL选项:(参见Communistcoin Wiki关于SSL设置栏目)</translation>
</message>
<message>
<location line="+1"/>
<source>Select the version of socks proxy to use (4-5, default: 5)</source>
<translation>请选择Socks代理服务器版本 (4 或 5, 缺省: 5)</translation>
</message>
<message>
<location line="+3"/>
<source>Send trace/debug info to console instead of debug.log file</source>
<translation>跟踪/调试信息输出到控制台,不输出到debug.log文件</translation>
</message>
<message>
<location line="+1"/>
<source>Send trace/debug info to debugger</source>
<translation>跟踪/调试信息输出到 调试器debugger</translation>
</message>
<message>
<location line="+5"/>
<source>Set maximum block size in bytes (default: 250000)</source>
<translation>设置最大数据块大小(缺省:250000)</translation>
</message>
<message>
<location line="+1"/>
<source>Set minimum block size in bytes (default: 0)</source>
<translation>设置最小数据块大小(缺省:0)</translation>
</message>
<message>
<location line="+2"/>
<source>Shrink debug.log file on client startup (default: 1 when no -debug)</source>
<translation>客户端启动时压缩debug.log文件(缺省:no-debug模式时为1)</translation>
</message>
<message>
<location line="+1"/>
<source>Signing transaction failed</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Specify connection timeout in milliseconds (default: 5000)</source>
<translation>设置连接超时时间(缺省:5000毫秒)</translation>
</message>
<message>
<location line="+4"/>
<source>System error: </source>
<translation>系统错误:</translation>
</message>
<message>
<location line="+4"/>
<source>Transaction amount too small</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Transaction amounts must be positive</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Transaction too large</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Use UPnP to map the listening port (default: 0)</source>
<translation>使用UPnp映射监听端口(缺省: 0)</translation>
</message>
<message>
<location line="+1"/>
<source>Use UPnP to map the listening port (default: 1 when listening)</source>
<translation>使用UPnp映射监听端口(缺省: 监听状态设为1)</translation>
</message>
<message>
<location line="+1"/>
<source>Use proxy to reach tor hidden services (default: same as -proxy)</source>
<translation>使用代理服务器访问隐藏服务(缺省:同 -proxy)</translation>
</message>
<message>
<location line="+2"/>
<source>Username for JSON-RPC connections</source>
<translation>JSON-RPC连接用户名
</translation>
</message>
<message>
<location line="+4"/>
<source>Warning</source>
<translation>警告</translation>
</message>
<message>
<location line="+1"/>
<source>Warning: This version is obsolete, upgrade required!</source>
<translation>警告:该软件版本已过时,请升级!</translation>
</message>
<message>
<location line="+1"/>
<source>You need to rebuild the databases using -reindex to change -txindex</source>
<translation>You need to rebuild the databases using -reindex to change -txindex</translation>
</message>
<message>
<location line="+1"/>
<source>wallet.dat corrupt, salvage failed</source>
<translation>钱包文件wallet.dat损坏,抢救备份失败</translation>
</message>
<message>
<location line="-50"/>
<source>Password for JSON-RPC connections</source>
<translation>JSON-RPC连接密码
</translation>
</message>
<message>
<location line="-67"/>
<source>Allow JSON-RPC connections from specified IP address</source>
<translation>允许从指定IP接受到的JSON-RPC连接
</translation>
</message>
<message>
<location line="+76"/>
<source>Send commands to node running on <ip> (default: 127.0.0.1)</source>
<translation>向IP地址为 <ip> 的节点发送指令 (缺省: 127.0.0.1)
</translation>
</message>
<message>
<location line="-120"/>
<source>Execute command when the best block changes (%s in cmd is replaced by block hash)</source>
<translation>当最佳数据块变化时执行命令 (命令行中的 %s 会被替换成数据块哈希值)</translation>
</message>
<message>
<location line="+147"/>
<source>Upgrade wallet to latest format</source>
<translation>将钱包升级到最新的格式</translation>
</message>
<message>
<location line="-21"/>
<source>Set key pool size to <n> (default: 100)</source>
<translation>设置密钥池大小为 <n> (缺省: 100)
</translation>
</message>
<message>
<location line="-12"/>
<source>Rescan the block chain for missing wallet transactions</source>
<translation>重新扫描数据链以查找遗漏的交易
</translation>
</message>
<message>
<location line="+35"/>
<source>Use OpenSSL (https) for JSON-RPC connections</source>
<translation>为 JSON-RPC 连接使用 OpenSSL (https)连接</translation>
</message>
<message>
<location line="-26"/>
<source>Server certificate file (default: server.cert)</source>
<translation>服务器证书 (默认为 server.cert)
</translation>
</message>
<message>
<location line="+1"/>
<source>Server private key (default: server.pem)</source>
<translation>服务器私钥 (默认为 server.pem)
</translation>
</message>
<message>
<location line="-151"/>
<source>Acceptable ciphers (default: TLSv1+HIGH:!SSLv2:!aNULL:!eNULL:!AH:!3DES:@STRENGTH)</source>
<translation>可接受的加密器 (默认为 TLSv1+HIGH:!SSLv2:!aNULL:!eNULL:!AH:!3DES:@STRENGTH)
</translation>
</message>
<message>
<location line="+165"/>
<source>This help message</source>
<translation>该帮助信息
</translation>
</message>
<message>
<location line="+6"/>
<source>Unable to bind to %s on this computer (bind returned error %d, %s)</source>
<translation>无法绑定本机端口 %s (返回错误消息 %d, %s)</translation>
</message>
<message>
<location line="-91"/>
<source>Connect through socks proxy</source>
<translation>通过 socks 代理连接</translation>
</message>
<message>
<location line="-10"/>
<source>Allow DNS lookups for -addnode, -seednode and -connect</source>
<translation>使用 -addnode, -seednode 和 -connect选项时允许DNS查找</translation>
</message>
<message>
<location line="+55"/>
<source>Loading addresses...</source>
<translation>正在加载地址...</translation>
</message>
<message>
<location line="-35"/>
<source>Error loading wallet.dat: Wallet corrupted</source>
<translation>wallet.dat钱包文件加载错误:钱包损坏</translation>
</message>
<message>
<location line="+1"/>
<source>Error loading wallet.dat: Wallet requires newer version of Communistcoin</source>
<translation>wallet.dat钱包文件加载错误:请升级到最新Communistcoin客户端</translation>
</message>
<message>
<location line="+93"/>
<source>Wallet needed to be rewritten: restart Communistcoin to complete</source>
<translation>钱包文件需要重写:请退出并重新启动Communistcoin客户端</translation>
</message>
<message>
<location line="-95"/>
<source>Error loading wallet.dat</source>
<translation>wallet.dat钱包文件加载错误</translation>
</message>
<message>
<location line="+28"/>
<source>Invalid -proxy address: '%s'</source>
<translation>非法的代理地址: '%s'</translation>
</message>
<message>
<location line="+56"/>
<source>Unknown network specified in -onlynet: '%s'</source>
<translation>被指定的是未知网络 -onlynet: '%s'</translation>
</message>
<message>
<location line="-1"/>
<source>Unknown -socks proxy version requested: %i</source>
<translation>被指定的是未知socks代理版本: %i</translation>
</message>
<message>
<location line="-96"/>
<source>Cannot resolve -bind address: '%s'</source>
<translation>无法解析 -bind 端口地址: '%s'</translation>
</message>
<message>
<location line="+1"/>
<source>Cannot resolve -externalip address: '%s'</source>
<translation>无法解析 -externalip 地址: '%s'</translation>
</message>
<message>
<location line="+44"/>
<source>Invalid amount for -paytxfee=<amount>: '%s'</source>
<translation>非法金额 -paytxfee=<amount>: '%s'</translation>
</message>
<message>
<location line="+1"/>
<source>Invalid amount</source>
<translation>金额不对</translation>
</message>
<message>
<location line="-6"/>
<source>Insufficient funds</source>
<translation>金额不足</translation>
</message>
<message>
<location line="+10"/>
<source>Loading block index...</source>
<translation>加载数据块索引...</translation>
</message>
<message>
<location line="-57"/>
<source>Add a node to connect to and attempt to keep the connection open</source>
<translation>添加节点并与其保持连接</translation>
</message>
<message>
<location line="-25"/>
<source>Unable to bind to %s on this computer. Communistcoin is probably already running.</source>
<translation>无法在本机绑定 %s 端口 . 莱特币客户端软件可能已经在运行.</translation>
</message>
<message>
<location line="+64"/>
<source>Fee per KB to add to transactions you send</source>
<translation>每发送1KB交易所需的费用</translation>
</message>
<message>
<location line="+19"/>
<source>Loading wallet...</source>
<translation>正在加载钱包...</translation>
</message>
<message>
<location line="-52"/>
<source>Cannot downgrade wallet</source>
<translation>无法降级钱包格式</translation>
</message>
<message>
<location line="+3"/>
<source>Cannot write default address</source>
<translation>无法写入缺省地址</translation>
</message>
<message>
<location line="+64"/>
<source>Rescanning...</source>
<translation>正在重新扫描...</translation>
</message>
<message>
<location line="-57"/>
<source>Done loading</source>
<translation>加载完成</translation>
</message>
<message>
<location line="+82"/>
<source>To use the %s option</source>
<translation>使用 %s 选项</translation>
</message>
<message>
<location line="-74"/>
<source>Error</source>
<translation>错误</translation>
</message>
<message>
<location line="-31"/>
<source>You must set rpcpassword=<password> in the configuration file:
%s
If the file does not exist, create it with owner-readable-only file permissions.</source>
<translation>您必须在配置文件中加入选项 rpcpassword :
%s
如果配置文件不存在,请新建,并将文件权限设置为仅允许文件所有者读取.</translation>
</message>
</context>
</TS><|fim▁end|> | <translation>&验证消息</translation>
</message> |
<|file_name|>convert_to_records.py<|end_file_name|><|fim▁begin|>"""Converts image data to TFRecords file format with Example protos.
The image data set is expected to reside in JPEG files located in the
following directory structure.
data_dir/run/video1/00001.jpeg
data_dir/run/video1/00002.jpeg
data_dir/run/video1/00003.jpeg
...
data_dir/run/video2/00001.jpeg
data_dir/run/video2/00002.jpeg
...
where the sub-directory is the unique label associated with these images.
This TensorFlow script converts the training and evaluation data into
a sharded data set consisting of TFRecord files
train_directory/train-00000-of-01024
train_directory/train-00001-of-01024
...
train_directory/train-00127-of-01024
and
validation_directory/validation-00000-of-00128
validation_directory/validation-00001-of-00128
...
validation_directory/validation-00127-of-00128
where we have selected 64 and 8 shards for each data set. Each record
within the TFRecord file is a serialized Example proto. The Example proto
contains the following fields:
raw/image/001:
...
raw/image/nnn: string containing JPEG encoded image in RGB colorspace
image/height: integer, image height in pixels
image/width: integer, image width in pixels
image/colorspace: string, specifying the colorspace, always 'RGB'
image/channels: integer, specifying the number of channels, always 3
image/format: string, specifying the format, always'JPEG'
image/filename: string containing the basename of the image file
e.g. '00001.JPEG' or '00002.JPEG'
image/class/label: integer specifying the index in a classification layer.
The label ranges from [0, num_labels] where 0 is unused and left as
the background class.
image/class/text: string specifying the human-readable version of the label
e.g. 'walk'
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import os
import random
import sys
import threading
import numpy as np
import tensorflow as tf
tf.app.flags.DEFINE_string('train_directory', '/tmp/dataset/train_directory',
'Training data directory')
tf.app.flags.DEFINE_string('validation_directory', '/tmp/dataset/train_directory',
'Validation data directory')
tf.app.flags.DEFINE_string('output_directory', '/tmp/dataset/result',
'Output data directory')
tf.app.flags.DEFINE_string('label_file', '/tmp/dataset/label.txt', 'Labels file')
tf.app.flags.DEFINE_integer('train_shards', 64,
'Number of shards in training TFRecord files.')
tf.app.flags.DEFINE_integer('validation_shards', 8,
'Number of shards in validation TFRecord files.')
tf.app.flags.DEFINE_integer('sequence_length', 16,
'The length of one video clips ')
tf.app.flags.DEFINE_integer('num_threads', 4,
'Number of threads to preprocess the images.')
tf.app.flags.DEFINE_boolean('sequence_random', True,
'Determine whether to shuffle the image order or not.')
FLAGS = tf.app.flags.FLAGS
def _int64_feature(value):
"""Wrapper for inserting int64 features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def _bytes_feature(value):
"""Wrapper for inserting bytes features into Example proto."""
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _convert_to_example(foldername, images_buffer, label, text, height, width):
"""Build an Example proto for an example.
Args:
foldername: string, path to an image file, e.g., '/training_data/walk/video1'
images_buffer: list, containing string of JPEG encoding of RGB image
label: integer, identifier for the ground truth for the network
text: string, unique human-readable, e.g. 'dog'
height: integer, image height in pixels
width: integer, image width in pixels
Returns:
Example proto
"""<|fim▁hole|> image_format = 'JPEG'
# create the feature data for the TFRecord example
images = {}
for index, image in enumerate(images_buffer):
images['raw/image/%03d' % index] = _bytes_feature(image)
feature_dict = {
'image/height': _int64_feature(height),
'image/width': _int64_feature(width),
'image/colorspace': _bytes_feature(colorspace),
'image/channels': _int64_feature(channels),
'image/class/label': _int64_feature(label),
'image/class/text': _bytes_feature(text),
'image/format': _bytes_feature(image_format),
'image/filename': _bytes_feature(os.path.basename(foldername)),
}
feature_dict.update(images)
# create the TFRecord Example
example = tf.train.Example(features=tf.train.Features(feature=feature_dict))
return example
class ImageCoder(object):
"""Helper class that provides TensorFlow image coding utilities."""
def __init__(self):
# Create a single Session to run all image coding calls.
self._sess = tf.Session()
# Initializes function that converts PNG to JPEG data.
self._png_data = tf.placeholder(dtype=tf.string)
image = tf.image.decode_png(self._png_data, channels=3)
self._png_to_jpeg = tf.image.encode_jpeg(image, format='rgb', quality=100)
# Initializes function that decodes RGB JPEG data.
self._decode_jpeg_data = tf.placeholder(dtype=tf.string)
self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3)
def png_to_jpeg(self, image_data):
return self._sess.run(self._png_to_jpeg,
feed_dict={self._png_data: image_data})
def decode_jpeg(self, image_data):
image = self._sess.run(self._decode_jpeg,
feed_dict={self._decode_jpeg_data: image_data})
assert len(image.shape) == 3
assert image.shape[2] == 3
return image
def _is_png(filename):
"""Determine if a file contains a PNG format image.
Args:
filename: string, path of the image file.
Returns:
boolean indicating if the image is a PNG.
"""
return '.png' in filename
def _split(arr, size):
"""Split an arrary according to the size parameters, the last element of the
output array takes the last `size` elemnts of `arr`
Args:
arr: array, input array
size: the size used to split the array
Returns:
sub-array
Examples:
_split([1,2,3,4,5,6], 5) #=> [[1,2,3,4,5], [2,3,4,5,6]]
_split([1,2,3,4,5,6], 3) #=> [[1,2,3], [4,5,6]]
"""
arr_size = len(arr)
if arr_size < size:
raise ValueError('sequence length is too long, please set the length '
'smaller than the video length')
elif arr_size == size:
return arr
result = []
last_element = arr[-size:]
iter_num = arr_size//size
for i in range(iter_num):
pice = arr[:size]
result.append(pice)
arr = arr[size:]
# insert the last element
result.append(last_element)
return result
def _process_video(foldername, coder):
"""Process a single video file.
Args:
foldernames: string, path to a video folder e.g., '/path/to/video'.
coder: instance of ImageCoder to provide TensorFlow image coding utils.
Returns:
videos_buffer: list, contains list of video with specific sequence length.
These video is actually list of strings of JPEG encoding of RGB image.
height: integer, image height in pixels.
width: integer, image width in pixels.
"""
se_size = FLAGS.sequence_length
# Read the image file.
images_data = []
filenames = tf.gfile.Glob(foldername + '/*')
for filename in filenames:
image_data = tf.gfile.FastGFile(filename, 'r').read()
# Convert any PNG to JPEG's for consistency.
if _is_png(filename):
image_data = coder.png_to_jpeg(image_data)
# Decode the RGB JPEG.
image = coder.decode_jpeg(image_data)
# Check that image converted to RGB
assert len(image.shape) == 3
height = image.shape[0]
width = image.shape[1]
assert image.shape[2] == 3
# Add the image to the images data
images_data.append(image_data)
videos_data = _split(images_data, se_size)
return videos_data, height, width
def _process_image_files_batch(coder, thread_index, ranges, name, foldernames,
texts, labels, num_shards):
"""Processes and saves list of images as TFRecord in 1 thread.
Args:
coder: instance of ImageCoder to provide TensorFlow image coding utils.
thread_index: integer, unique batch to run index is within [0, len(ranges)).
ranges: list of pairs of integers specifying ranges of each batches to
analyze in parallel.
name: string, unique identifier specifying the data set
foldernames: list of strings; each string is a path to a video file
texts: list of strings; each string is human readable, e.g. 'dog'
labels: list of integer; each integer identifies the ground truth
num_shards: integer number of shards for this data set.
"""
# Each thread produces N shards where N = int(num_shards / num_threads).
# For instance, if num_shards = 128, and the num_threads = 2, then the first
# thread would produce shards [0, 64).
num_threads = len(ranges)
assert not num_shards % num_threads
num_shards_per_batch = num_shards // num_threads
shard_ranges = np.linspace(ranges[thread_index][0],
ranges[thread_index][1],
num_shards_per_batch + 1).astype(int)
num_files_in_thread = ranges[thread_index][1] - ranges[thread_index][0]
counter = 0
for s in range(num_shards_per_batch):
# Generate a sharded version of the file name, e.g. 'train-00002-of-00010'
shard = thread_index * num_shards_per_batch + s
output_filename = '%s-%.5d-of-%.5d' % (name, shard, num_shards)
output_file = os.path.join(FLAGS.output_directory, output_filename)
writer = tf.python_io.TFRecordWriter(output_file)
shard_counter = 0
files_in_shard = np.arange(shard_ranges[s], shard_ranges[s + 1], dtype=int)
for i in files_in_shard:
foldername = foldernames[i]
label = labels[i]
text = texts[i]
videos_buffer, height, width = _process_video(foldername, coder)
for video_buffer in videos_buffer:
example = _convert_to_example(foldername, video_buffer, label,
text, height, width)
writer.write(example.SerializeToString())
counter += 1
shard_counter += 1
if not counter % 1000:
print('%s [thread %d]: Processed %d videos in thread batch.' %
(datetime.now(), thread_index, counter))
sys.stdout.flush()
print('%s [thread %d]: Wrote %d videos' %
(datetime.now(), thread_index, shard_counter))
sys.stdout.flush()
shard_counter = 0
print('%s [thread %d]: Wrote %d videos in total' %
(datetime.now(), thread_index, counter))
sys.stdout.flush()
def _process_image_files(name, foldernames, texts, labels, num_shards):
"""Process and save list of images as TFRecord of Example protos.
Args:
name: string, unique identifier specifying the data set
foldernames: list of strings; each string is a path to a video folder
texts: list of strings; each string is human readable, e.g. 'dog'
labels: list of integer; each integer identifies the ground truth
num_shards: integer number of shards for this data set.
"""
assert len(foldernames) == len(texts)
assert len(foldernames) == len(labels)
# Break all images into batches with a [ranges[i][0], ranges[i][1]].
spacing = np.linspace(0, len(foldernames), FLAGS.num_threads + 1).astype(np.int)
ranges = []
threads = []
for i in range(len(spacing) - 1):
ranges.append([spacing[i], spacing[i+1]])
# Launch a thread for each batch.
print('Launching %d threads for spacings: %s' % (FLAGS.num_threads, ranges))
sys.stdout.flush()
# Create a mechanism for monitoring when all threads are finished.
coord = tf.train.Coordinator()
# Create a generic TensorFlow-based utility for converting all image codings.
coder = ImageCoder()
threads = []
for thread_index in range(len(ranges)):
args = (coder, thread_index, ranges, name, foldernames,
texts, labels, num_shards)
t = threading.Thread(target=_process_image_files_batch, args=args)
t.start()
threads.append(t)
# Wait for all the threads to terminate.
coord.join(threads)
def _find_video_folders(data_dir, label_file):
"""Build a list of all video folders and labels in the data set.
Args:
data_dir: string, path to the root directory of video folders.
Assumes that the video data set resides in JPEG files located in
the following directory structure.
data_dir/walk/video1/00001.JPEG
data_dir/walk/video1/00002.JPEG
...
data_dir/walk/video2/00001.jpg
...
where 'walk' is the label associated with these images.
number 1..n means that all the images in folder video1 belongs to one video
label_file: string, path to the label file.
The list of valid labels are held in this file. Assumes that the file
contains entries as such:
walk
run
play
where each line corresponds to a label. We map each label contained in
the file to an integer starting with the integer 0 corresponding to the
label contained in the first line.
Returns:
folders: list of strings; each string is a path to an video folder.
texts: list of strings; each string is the class, e.g. 'walk'
labels: list of integer; each integer identifies the ground truth.
"""
print('Determining list of input files and labels from %s.' % data_dir)
unique_labels = [l.strip() for l in tf.gfile.FastGFile(
label_file, 'r').readlines()]
labels = []
folders = []
texts = []
# Leave label index 0 empty as a background class.
label_index = 1
# Construct the list of video files and labels.
for text in unique_labels:
jpeg_file_path = '%s/%s/*' % (data_dir, text)
matching_files = tf.gfile.Glob(jpeg_file_path)
labels.extend([label_index] * len(matching_files))
texts.extend([text] * len(matching_files))
folders.extend(matching_files)
if not label_index % 100:
print('Finished finding files in %d of %d classes.' % (
label_index, len(labels)))
label_index += 1
# Shuffle the ordering of all video folder in order to guarantee
# random ordering of the videos with respect to label in the
# saved TFRecord files. Make the randomization repeatable.
if FLAGS.sequence_random:
shuffled_index = range(len(folders))
random.seed(12345)
random.shuffle(shuffled_index)
folders = [folders[i] for i in shuffled_index]
texts = [texts[i] for i in shuffled_index]
labels = [labels[i] for i in shuffled_index]
print('Found %d video files across %d labels inside %s.' %
(len(folders), len(unique_labels), data_dir))
return folders, texts, labels
def _process_dataset(name, directory, num_shards, label_file):
"""Process a complete data set and save it as a TFRecord.
Args:
name: string, unique identifier specifying the data set.
directory: string, root path to the data set.
num_shards: integer number of shards for this data set.
label_file: string, path to the labels file.
"""
foldernames, texts, labels = _find_video_folders(directory, label_file)
_process_image_files(name, foldernames, texts, labels, num_shards)
def main(unused_argv):
assert not FLAGS.train_shards % FLAGS.num_threads, (
'Please make the FLAGS.num_threads commensurate with FLAGS.train_shards')
assert not FLAGS.validation_shards % FLAGS.num_threads, (
'Please make the FLAGS.num_threads commensurate with '
'FLAGS.validation_shards')
print('Saving results to %s' % FLAGS.output_directory)
# Run it!
#_process_dataset('validation', FLAGS.validation_directory,
# FLAGS.validation_shards, FLAGS.label_file)
_process_dataset('train', FLAGS.train_directory,
FLAGS.train_shards, FLAGS.label_file)
if __name__ == '__main__':
tf.app.run()<|fim▁end|> |
colorspace = 'RGB'
channels = 3 |
<|file_name|>issues_loop_mut_cond.rs<|end_file_name|><|fim▁begin|>#![allow(clippy::blocks_in_if_conditions)]<|fim▁hole|>pub fn loop_on_block_condition(u: &mut isize) {
while { *u < 0 } {
*u += 1;
}
}
/// https://github.com/rust-lang/rust-clippy/issues/2584
fn loop_with_unsafe_condition(ptr: *const u8) {
let mut len = 0;
while unsafe { *ptr.offset(len) } != 0 {
len += 1;
}
}
/// https://github.com/rust-lang/rust-clippy/issues/2710
static mut RUNNING: bool = true;
fn loop_on_static_condition() {
unsafe {
while RUNNING {
RUNNING = false;
}
}
}
fn main() {}<|fim▁end|> | #![allow(dead_code)]
/// Issue: https://github.com/rust-lang/rust-clippy/issues/2596 |
<|file_name|>rest.py<|end_file_name|><|fim▁begin|>from __future__ import absolute_import, unicode_literals, division
import hashlib
import hmac
import time
from quadriga.exceptions import RequestError
class RestClient(object):
"""REST client using HMAC SHA256 authentication.
:param url: QuadrigaCX URL.
:type url: str | unicode
:param api_key: QuadrigaCX API key.
:type api_key: str | unicode
:param api_secret: QuadrigaCX API secret.
:type api_secret: str | unicode
:param client_id: QuadrigaCX client ID (number used for user login).
:type client_id: str | unicode | int
:param timeout: Number of seconds to wait for QuadrigaCX to respond to an
API request.
:type timeout: int | float
:param session: User-defined requests.Session object.
:type session: requests.Session
"""
<|fim▁hole|> def __init__(self, url, api_key, api_secret, client_id, timeout, session):
self._url = url
self._api_key = str(api_key)
self._hmac_key = str(api_secret).encode('utf-8')
self._client_id = str(client_id)
self._timeout = timeout
self._session = session
def _handle_response(self, resp):
"""Handle the response from QuadrigaCX.
:param resp: Response from QuadrigaCX.
:type resp: requests.models.Response
:return: Response body.
:rtype: dict
:raise quadriga.exceptions.RequestError: If HTTP OK was not returned.
"""
http_code = resp.status_code
if http_code not in self.http_success_status_codes:
raise RequestError(
response=resp,
message='[HTTP {}] {}'.format(http_code, resp.reason)
)
try:
body = resp.json()
except ValueError:
raise RequestError(
response=resp,
message='[HTTP {}] response body: {}'.format(
http_code,
resp.text
)
)
else:
if 'error' in body:
error_code = body['error'].get('code', '?')
raise RequestError(
response=resp,
message='[HTTP {}][ERR {}] {}'.format(
resp.status_code,
error_code,
body['error'].get('message', 'no error message')
),
error_code=error_code
)
return body
def get(self, endpoint, params=None):
"""Send an HTTP GET request to QuadrigaCX.
:param endpoint: API endpoint.
:type endpoint: str | unicode
:param params: URL parameters.
:type params: dict
:return: Response body from QuadrigaCX.
:rtype: dict
:raise quadriga.exceptions.RequestError: If HTTP OK was not returned.
"""
response = self._session.get(
url=self._url + endpoint,
params=params,
timeout=self._timeout
)
return self._handle_response(response)
def post(self, endpoint, payload=None):
"""Send an HTTP POST request to QuadrigaCX.
:param endpoint: API endpoint.
:type endpoint: str | unicode
:param payload: Request payload.
:type payload: dict
:return: Response body from QuadrigaCX.
:rtype: dict
:raise quadriga.exceptions.RequestError: If HTTP OK was not returned.
"""
nonce = int(time.time() * 10000)
hmac_msg = str(nonce) + self._client_id + self._api_key
signature = hmac.new(
key=self._hmac_key,
msg=hmac_msg.encode('utf-8'),
digestmod=hashlib.sha256
).hexdigest()
if payload is None:
payload = {}
payload['key'] = self._api_key
payload['nonce'] = nonce
payload['signature'] = signature
response = self._session.post(
url=self._url + endpoint,
json=payload,
timeout=self._timeout
)
return self._handle_response(response)<|fim▁end|> | http_success_status_codes = {200, 201, 202}
|
<|file_name|>c1_Optimizer.hpp<|end_file_name|><|fim▁begin|>/*
* Copyright 1999-2001 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.<|fim▁hole|> * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
* CA 95054 USA or visit www.sun.com if you need additional information or
* have any questions.
*
*/
class Optimizer VALUE_OBJ_CLASS_SPEC {
private:
IR* _ir;
public:
Optimizer(IR* ir);
IR* ir() const { return _ir; }
// optimizations
void eliminate_conditional_expressions();
void eliminate_blocks();
void eliminate_null_checks();
};<|fim▁end|> | * |
<|file_name|>iostat_darwin.go<|end_file_name|><|fim▁begin|>// +build darwin
package readers<|fim▁hole|> "encoding/json"
"errors"
)
func init() {
Register("IOStat", NewIOStat)
}
func NewIOStat() IReader {
ios := &IOStat{}
ios.Data = make(map[string]interface{})
return ios
}
type IOStat struct {
Data map[string]interface{}
}
// Run gathers load average information from gosigar.
func (ios *IOStat) Run() error {
return errors.New("iostat -x is only available on Linux.")
}
// ToJson serialize Data field to JSON.
func (ios *IOStat) ToJson() ([]byte, error) {
return json.Marshal(ios.Data)
}<|fim▁end|> |
import ( |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
#
# Copyright (C) 2011 Uninett AS
#
# This file is part of Network Administration Visualized (NAV).
#
# NAV is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License version 3 as published by the Free
# Software Foundation.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details. You should have received a copy of the GNU General Public License
# along with NAV. If not, see <http://www.gnu.org/licenses/>.
#
"""Module containing everything regarding patches in SeedDB"""
import logging
from django import forms
from django.http import HttpResponse
from django.shortcuts import render, get_object_or_404
from django.views.decorators.http import require_POST
from nav.models.cabling import Patch, Cabling
from nav.models.manage import Netbox, Interface, Room
from nav.bulkparse import PatchBulkParser
from nav.bulkimport import PatchImporter
from nav.web.seeddb import SeeddbInfo, reverse_lazy
from nav.web.seeddb.constants import SEEDDB_EDITABLE_MODELS
from nav.web.seeddb.page import view_switcher, not_implemented
from nav.web.seeddb.utils.list import render_list
from nav.web.seeddb.utils.bulk import render_bulkimport
from nav.web.seeddb.utils.delete import render_delete
_logger = logging.getLogger(__name__)
class PatchInfo(SeeddbInfo):
"""Class for storing meta information related to patches in SeedDB"""
active = {'patch': True}
active_page = 'patch'
documentation_url = '/doc/reference/cabling_and_patch.html'
caption = 'Patch'
tab_template = 'seeddb/tabs_generic.html'
_title = 'Patch'
verbose_name = Patch._meta.verbose_name
_navpath = [('Patch', reverse_lazy('seeddb-patch'))]
hide_move = True
delete_url = reverse_lazy('seeddb-patch')
delete_url_name = 'seeddb-patch-delete'
back_url = reverse_lazy('seeddb-patch')
add_url = reverse_lazy('seeddb-patch-edit')
bulk_url = reverse_lazy('seeddb-patch-bulk')
class PatchForm(forms.ModelForm):
"""Form for editing and creating patches"""
class Meta(object):
model = Patch
fields = '__all__'
def patch(request):
"""Creates a view switcher containing the appropriate views"""
return view_switcher(
request,
list_view=patch_list,
move_view=not_implemented,
delete_view=patch_delete,
)
<|fim▁hole|>def patch_list(request):
"""The view used when listing all patches"""
query = Patch.objects.none()
info = PatchInfo()
value_list = (
'cabling__room',
'interface__netbox__sysname',
'interface__ifname',
'interface__ifalias',
'cabling__jack',
'split',
)
context = info.template_context
context.update({'rooms': Room.objects.all(), 'netboxes': Netbox.objects.all()})
return render_list(
request,
query,
value_list,
'seeddb-patch-edit',
template='seeddb/list_patches.html',
extra_context=context,
)
def patch_delete(request, object_id=None):
"""The view used when deleting patches"""
info = PatchInfo()
return render_delete(
request,
Patch,
'seeddb-patch',
whitelist=SEEDDB_EDITABLE_MODELS,
extra_context=info.template_context,
object_id=object_id,
)
def patch_edit(request):
"""Renders gui for editing patches"""
context = PatchInfo().template_context
try:
netbox = Netbox.objects.get(pk=request.GET.get('netboxid'))
except (ValueError, Netbox.DoesNotExist):
netbox = Netbox.objects.none()
cables = Cabling.objects.none()
else:
cables = Cabling.objects.filter(room=netbox.room)
context.update(
{'netboxes': Netbox.objects.all(), 'netbox': netbox, 'cables': cables}
)
return render(request, 'seeddb/edit_patch.html', context)
@require_POST
def patch_save(request):
"""Save a patch"""
interface = get_object_or_404(Interface, pk=request.POST.get('interfaceid'))
cable = get_object_or_404(Cabling, pk=request.POST.get('cableid'))
split = request.POST.get('split', '')
_logger.debug('Creating patch for interface %s and cable %s', interface, cable)
try:
Patch.objects.create(interface=interface, cabling=cable, split=split)
except Exception as error:
_logger.debug(error)
return HttpResponse(error, status=500)
return HttpResponse()
@require_POST
def patch_remove(request):
"""Remove all patches from an interface"""
interface = get_object_or_404(Interface, pk=request.POST.get('interfaceid'))
Patch.objects.filter(interface=interface).delete()
return HttpResponse()
def patch_bulk(request):
"""The view used when bulk importing patches"""
info = PatchInfo()
return render_bulkimport(
request,
PatchBulkParser,
PatchImporter,
'seeddb-patch',
extra_context=info.template_context,
)
def load_cell(request):
"""Renders patches for an interface"""
interface = Interface.objects.get(pk=request.GET.get('interfaceid'))
return render(request, 'seeddb/fragments/patches.html', {'interface': interface})<|fim▁end|> | |
<|file_name|>error.py<|end_file_name|><|fim▁begin|>#
# Copyright (C) 2009, 2010 JAGSAT Development Team (see below)
#
# This file is part of JAGSAT.
#
# JAGSAT is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# JAGSAT is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# JAGSAT Development Team is:
# - Juan Pedro Bolivar Puente
# - Alberto Villegas Erce
# - Guillem Medina
# - Sarah Lindstrom
# - Aksel Junkkila
# - Thomas Forss
#
# along with this program. If not, see <http://www.gnu.org/licenses/>.<|fim▁hole|>#
from base.error import *
class CoreError (LoggableError):
pass<|fim▁end|> | |
<|file_name|>config-test.js<|end_file_name|><|fim▁begin|>var should = require('should');
var npg = require('../core/npg').npg;
var config = require('./_test_config_npg');
var request = ({body: {phrase: "111"}});
describe('NPG w/ config', function () {
it('should using fixed key', function () {<|fim▁hole|> })
});<|fim▁end|> | config.useFixedKey.should.be.ok;
});
it('should return correct answer', function () {
npg("111",config.key1,config.key2).should.eql({ md5: '747207ac', b64: '!QWOwMWY3AjM3QzN' }) |
<|file_name|>commands.py<|end_file_name|><|fim▁begin|>import collections
import numbers
import re
import sqlalchemy as sa
from sqlalchemy.ext import compiler as sa_compiler
from sqlalchemy.sql import expression as sa_expression
# At the time of this implementation, no specification for a session token was
# found. After looking at a few session tokens they appear to be the same as
# the aws_secret_access_key pattern, but much longer. An example token can be
# found here:
# https://docs.aws.amazon.com/STS/latest/APIReference/API_GetSessionToken.html
# The regexs for access keys can be found here:
# https://blogs.aws.amazon.com/security/blog/tag/key+rotation
# The pattern of IAM role ARNs can be found here:
# http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-iam
ACCESS_KEY_ID_RE = re.compile('[A-Z0-9]{20}')
SECRET_ACCESS_KEY_RE = re.compile('[A-Za-z0-9/+=]{40}')
TOKEN_RE = re.compile('[A-Za-z0-9/+=]+')
AWS_ACCOUNT_ID_RE = re.compile('[0-9]{12}')
IAM_ROLE_NAME_RE = re.compile('[A-Za-z0-9+=,.@-_]{1,64}')
def _process_aws_credentials(access_key_id=None, secret_access_key=None,
session_token=None, aws_account_id=None,
iam_role_name=None):
if (access_key_id is not None and secret_access_key is not None and
aws_account_id is not None and iam_role_name is not None):
raise TypeError(
'Either access key based credentials or role based credentials '
'should be specified, but not both'
)
credentials = None
if aws_account_id is not None and iam_role_name is not None:
if not AWS_ACCOUNT_ID_RE.match(aws_account_id):
raise ValueError(
'invalid AWS account ID; does not match {pattern}'.format(
pattern=AWS_ACCOUNT_ID_RE.pattern,
)
)
elif not IAM_ROLE_NAME_RE.match(iam_role_name):
raise ValueError(
'invalid IAM role name; does not match {pattern}'.format(
pattern=IAM_ROLE_NAME_RE.pattern,
)
)
credentials = 'aws_iam_role=arn:aws:iam::{0}:role/{1}'.format(
aws_account_id,
iam_role_name,
)
if access_key_id is not None and secret_access_key is not None:
if not ACCESS_KEY_ID_RE.match(access_key_id):
raise ValueError(
'invalid access_key_id; does not match {pattern}'.format(
pattern=ACCESS_KEY_ID_RE.pattern,
)
)
if not SECRET_ACCESS_KEY_RE.match(secret_access_key):
raise ValueError(
'invalid secret_access_key; does not match {pattern}'.format(
pattern=SECRET_ACCESS_KEY_RE.pattern,
)
)
credentials = 'aws_access_key_id={0};aws_secret_access_key={1}'.format(
access_key_id,
secret_access_key,
)
if session_token is not None:
if not TOKEN_RE.match(session_token):
raise ValueError(
'invalid session_token; does not match {pattern}'.format(
pattern=TOKEN_RE.pattern,
)
)
credentials += ';token={0}'.format(session_token)
if credentials is None:
raise TypeError(
'Either access key based credentials or role based credentials '
'should be specified'
)
return credentials
<|fim▁hole|>
class _ExecutableClause(sa_expression.Executable,
sa_expression.ClauseElement):
pass
class UnloadFromSelect(_ExecutableClause):
"""
Prepares a Redshift unload statement to drop a query to Amazon S3
https://docs.aws.amazon.com/redshift/latest/dg/r_UNLOAD_command_examples.html
Parameters
----------
select: sqlalchemy.sql.selectable.Selectable
The selectable Core Table Expression query to unload from.
data_location: str
The Amazon S3 location where the file will be created, or a manifest
file if the `manifest` option is used
access_key_id: str, optional
Access Key. Required unless you supply role-based credentials
(``aws_account_id`` and ``iam_role_name``)
secret_access_key: str, optional
Secret Access Key ID. Required unless you supply role-based credentials
(``aws_account_id`` and ``iam_role_name``)
session_token : str, optional
aws_account_id: str, optional
AWS account ID for role-based credentials. Required unless you supply
key based credentials (``access_key_id`` and ``secret_access_key``)
iam_role_name: str, optional
IAM role name for role-based credentials. Required unless you supply
key based credentials (``access_key_id`` and ``secret_access_key``)
manifest: bool, optional
Boolean value denoting whether data_location is a manifest file.
delimiter: File delimiter, optional
defaults to '|'
fixed_width: iterable of (str, int), optional
List of (column name, length) pairs to control fixed-width output.
encrypted: bool, optional
Write to encrypted S3 key.
gzip: bool, optional
Create file using GZIP compression.
add_quotes: bool, optional
Quote fields so that fields containing the delimiter can be
distinguished.
null: str, optional
Write null values as the given string. Defaults to ''.
escape: bool, optional
For CHAR and VARCHAR columns in delimited unload files, an escape
character (``\\``) is placed before every occurrence of the following
characters: ``\\r``, ``\\n``, ``\\``, the specified delimiter string.
If `add_quotes` is specified, ``"`` and ``'`` are also escaped.
allow_overwrite: bool, optional
Overwrite the key at unload_location in the S3 bucket.
parallel: bool, optional
If disabled unload sequentially as one file.
"""
def __init__(self, select, unload_location, access_key_id=None,
secret_access_key=None, session_token=None,
aws_account_id=None, iam_role_name=None,
manifest=False, delimiter=None, fixed_width=None,
encrypted=False, gzip=False, add_quotes=False, null=None,
escape=False, allow_overwrite=False, parallel=True):
if delimiter is not None and len(delimiter) != 1:
raise ValueError(
'"delimiter" parameter must be a single character'
)
credentials = _process_aws_credentials(
access_key_id=access_key_id,
secret_access_key=secret_access_key,
session_token=session_token,
aws_account_id=aws_account_id,
iam_role_name=iam_role_name,
)
self.select = select
self.unload_location = unload_location
self.credentials = credentials
self.manifest = manifest
self.delimiter = delimiter
self.fixed_width = fixed_width
self.encrypted = encrypted
self.gzip = gzip
self.add_quotes = add_quotes
self.null = null
self.escape = escape
self.allow_overwrite = allow_overwrite
self.parallel = parallel
@sa_compiler.compiles(UnloadFromSelect)
def visit_unload_from_select(element, compiler, **kw):
"""Returns the actual sql query for the UnloadFromSelect class."""
template = """
UNLOAD (:select) TO :unload_location
CREDENTIALS :credentials
{manifest}
{delimiter}
{encrypted}
{fixed_width}
{gzip}
{add_quotes}
{null}
{escape}
{allow_overwrite}
{parallel}
"""
el = element
qs = template.format(
manifest='MANIFEST' if el.manifest else '',
delimiter=(
'DELIMITER AS :delimiter' if el.delimiter is not None else ''
),
encrypted='ENCRYPTED' if el.encrypted else '',
fixed_width='FIXEDWIDTH AS :fixed_width' if el.fixed_width else '',
gzip='GZIP' if el.gzip else '',
add_quotes='ADDQUOTES' if el.add_quotes else '',
escape='ESCAPE' if el.escape else '',
null='NULL AS :null_as' if el.null is not None else '',
allow_overwrite='ALLOWOVERWRITE' if el.allow_overwrite else '',
parallel='PARALLEL OFF' if not el.parallel else '',
)
query = sa.text(qs)
if el.delimiter is not None:
query = query.bindparams(sa.bindparam(
'delimiter', value=element.delimiter, type_=sa.String,
))
if el.fixed_width:
query = query.bindparams(sa.bindparam(
'fixed_width',
value=_process_fixed_width(el.fixed_width),
type_=sa.String,
))
if el.null is not None:
query = query.bindparams(sa.bindparam(
'null_as', value=el.null, type_=sa.String
))
return compiler.process(
query.bindparams(
sa.bindparam('credentials', value=el.credentials, type_=sa.String),
sa.bindparam(
'unload_location', value=el.unload_location, type_=sa.String,
),
sa.bindparam(
'select',
value=compiler.process(
el.select,
literal_binds=True,
),
type_=sa.String,
),
),
**kw
)
class CopyCommand(_ExecutableClause):
"""
Prepares a Redshift COPY statement.
Parameters
----------
to : sqlalchemy.Table or iterable of sqlalchemy.ColumnElement
The table or columns to copy data into
data_location : str
The Amazon S3 location from where to copy, or a manifest file if
the `manifest` option is used
access_key_id: str, optional
Access Key. Required unless you supply role-based credentials
(``aws_account_id`` and ``iam_role_name``)
secret_access_key: str, optional
Secret Access Key ID. Required unless you supply role-based credentials
(``aws_account_id`` and ``iam_role_name``)
session_token : str, optional
aws_account_id: str, optional
AWS account ID for role-based credentials. Required unless you supply
key based credentials (``access_key_id`` and ``secret_access_key``)
iam_role_name: str, optional
IAM role name for role-based credentials. Required unless you supply
key based credentials (``access_key_id`` and ``secret_access_key``)
format : str, optional
CSV, JSON, or AVRO. Indicates the type of file to copy from
quote : str, optional
Specifies the character to be used as the quote character when using
``format='CSV'``. The default is a double quotation mark ( ``"`` )
delimiter : File delimiter, optional
defaults to ``|``
path_file : str, optional
Specifies an Amazon S3 location to a JSONPaths file to explicitly map
Avro or JSON data elements to columns.
defaults to ``'auto'``
fixed_width: iterable of (str, int), optional
List of (column name, length) pairs to control fixed-width output.
compression : str, optional
GZIP, LZOP, indicates the type of compression of the file to copy
accept_any_date : bool, optional
Allows any date format, including invalid formats such as
``00/00/00 00:00:00``, to be loaded as NULL without generating an error
defaults to False
accept_inv_chars : str, optional
Enables loading of data into VARCHAR columns even if the data contains
invalid UTF-8 characters. When specified each invalid UTF-8 byte is
replaced by the specified replacement character
blanks_as_null : bool, optional
Boolean value denoting whether to load VARCHAR fields with whitespace
only values as NULL instead of whitespace
date_format : str, optional
Specified the date format. If you want Amazon Redshift to automatically
recognize and convert the date format in your source data, specify
``'auto'``
empty_as_null : bool, optional
Boolean value denoting whether to load VARCHAR fields with empty
values as NULL instead of empty string
encoding : str, optional
``'UTF8'``, ``'UTF16'``, ``'UTF16LE'``, ``'UTF16BE'``. Specifies the
encoding type of the load data
defaults to ``'UTF8'``
escape : bool, optional
When this parameter is specified, the backslash character (``\``) in
input data is treated as an escape character. The character that
immediately follows the backslash character is loaded into the table
as part of the current column value, even if it is a character that
normally serves a special purpose
explicit_ids : bool, optional
Override the autogenerated IDENTITY column values with explicit values
from the source data files for the tables
fill_record : bool, optional
Allows data files to be loaded when contiguous columns are missing at
the end of some of the records. The missing columns are filled with
either zero-length strings or NULLs, as appropriate for the data types
of the columns in question.
ignore_blank_lines : bool, optional
Ignores blank lines that only contain a line feed in a data file and
does not try to load them
ignore_header : int, optional
Integer value of number of lines to skip at the start of each file
dangerous_null_delimiter : str, optional
Optional string value denoting what to interpret as a NULL value from
the file. Note that this parameter *is not properly quoted* due to a
difference between redshift's and postgres's COPY commands
interpretation of strings. For example, null bytes must be passed to
redshift's ``NULL`` verbatim as ``'\\0'`` whereas postgres's ``NULL``
accepts ``'\\x00'``.
remove_quotes : bool, optional
Removes surrounding quotation marks from strings in the incoming data.
All characters within the quotation marks, including delimiters, are
retained.
roundec : bool, optional
Rounds up numeric values when the scale of the input value is greater
than the scale of the column
time_format : str, optional
Specified the date format. If you want Amazon Redshift to automatically
recognize and convert the time format in your source data, specify
``'auto'``
trim_blanks : bool, optional
Removes the trailing white space characters from a VARCHAR string
truncate_columns : bool, optional
Truncates data in columns to the appropriate number of characters so
that it fits the column specification
comp_rows : int, optional
Specifies the number of rows to be used as the sample size for
compression analysis
comp_update : bool, optional
Controls whether compression encodings are automatically applied.
If omitted or None, COPY applies automatic compression only if the
target table is empty and all the table columns either have RAW
encoding or no encoding.
If True COPY applies automatic compression if the table is empty, even
if the table columns already have encodings other than RAW.
If False automatic compression is disabled
max_error : int, optional
If the load returns the ``max_error`` number of errors or greater, the
load fails
defaults to 100000
no_load : bool, optional
Checks the validity of the data file without actually loading the data
stat_update : bool, optional
Update statistics automatically regardless of whether the table is
initially empty
manifest : bool, optional
Boolean value denoting whether data_location is a manifest file.
"""
formats = ['CSV', 'JSON', 'AVRO', None]
compression_types = ['GZIP', 'LZOP']
def __init__(self, to, data_location, access_key_id=None,
secret_access_key=None, session_token=None,
aws_account_id=None, iam_role_name=None,
format=None, quote=None,
path_file='auto', delimiter=None, fixed_width=None,
compression=None, accept_any_date=False,
accept_inv_chars=None, blanks_as_null=False, date_format=None,
empty_as_null=False, encoding=None, escape=False,
explicit_ids=False, fill_record=False,
ignore_blank_lines=False, ignore_header=None,
dangerous_null_delimiter=None, remove_quotes=False,
roundec=False, time_format=None, trim_blanks=False,
truncate_columns=False, comp_rows=None, comp_update=None,
max_error=None, no_load=False, stat_update=None,
manifest=False):
credentials = _process_aws_credentials(
access_key_id=access_key_id,
secret_access_key=secret_access_key,
session_token=session_token,
aws_account_id=aws_account_id,
iam_role_name=iam_role_name,
)
if delimiter is not None and len(delimiter) != 1:
raise ValueError('"delimiter" parameter must be a single '
'character')
if ignore_header is not None:
if not isinstance(ignore_header, numbers.Integral):
raise TypeError(
'"ignore_header" parameter should be an integer'
)
if format not in self.formats:
raise ValueError('"format" parameter must be one of %s' %
self.formats)
if compression is not None:
if compression not in self.compression_types:
raise ValueError(
'"compression" parameter must be one of %s' %
self.compression_types
)
table = None
columns = []
if isinstance(to, collections.Iterable):
for column in to:
if table is not None and table != column.table:
raise ValueError(
'All columns must come from the same table: '
'%s comes from %s not %s' % (
column, column.table, table
),
)
columns.append(column)
table = column.table
else:
table = to
self.table = table
self.columns = columns
self.data_location = data_location
self.credentials = credentials
self.format = format
self.quote = quote
self.path_file = path_file
self.delimiter = delimiter
self.fixed_width = fixed_width
self.compression = compression
self.manifest = manifest
self.accept_any_date = accept_any_date
self.accept_inv_chars = accept_inv_chars
self.blanks_as_null = blanks_as_null
self.date_format = date_format
self.empty_as_null = empty_as_null
self.encoding = encoding
self.escape = escape
self.explicit_ids = explicit_ids
self.fill_record = fill_record
self.ignore_blank_lines = ignore_blank_lines
self.ignore_header = ignore_header
self.dangerous_null_delimiter = dangerous_null_delimiter
self.remove_quotes = remove_quotes
self.roundec = roundec
self.time_format = time_format
self.trim_blanks = trim_blanks
self.truncate_columns = truncate_columns
self.comp_rows = comp_rows
self.comp_update = comp_update
self.max_error = max_error
self.no_load = no_load
self.stat_update = stat_update
@sa_compiler.compiles(CopyCommand)
def visit_copy_command(element, compiler, **kw):
"""
Returns the actual sql query for the CopyCommand class.
"""
qs = """COPY {table}{columns} FROM :data_location
WITH CREDENTIALS AS :credentials
{format}
{parameters}"""
parameters = []
bindparams = [
sa.bindparam(
'data_location',
value=element.data_location,
type_=sa.String,
),
sa.bindparam(
'credentials',
value=element.credentials,
type_=sa.String,
),
]
if element.format == 'CSV':
format_ = 'FORMAT AS CSV'
if element.quote is not None:
format_ += ' QUOTE AS :quote_character'
bindparams.append(sa.bindparam(
'quote_character',
value=element.quote,
type_=sa.String,
))
elif element.format == 'JSON':
format_ = 'FORMAT AS JSON AS :json_option'
bindparams.append(sa.bindparam(
'json_option',
value=element.path_file,
type_=sa.String,
))
elif element.format == 'AVRO':
format_ = 'FORMAT AS AVRO AS :avro_option'
bindparams.append(sa.bindparam(
'avro_option',
value=element.path_file,
type_=sa.String,
))
else:
format_ = ''
if element.delimiter is not None:
parameters.append('DELIMITER AS :delimiter_char')
bindparams.append(sa.bindparam(
'delimiter_char',
value=element.delimiter,
type_=sa.String,
))
if element.fixed_width is not None:
parameters.append('FIXEDWIDTH AS :fixedwidth_spec')
bindparams.append(sa.bindparam(
'fixedwidth_spec',
value=_process_fixed_width(element.fixed_width),
type_=sa.String,
))
if element.compression in ['GZIP', 'LZOP']:
parameters.append(element.compression)
if element.manifest:
parameters.append('MANIFEST')
if element.accept_any_date:
parameters.append('ACCEPTANYDATE')
if element.accept_inv_chars is not None:
parameters.append('ACCEPTINVCHARS AS :replacement_char')
bindparams.append(sa.bindparam(
'replacement_char',
value=element.accept_inv_chars,
type_=sa.String
))
if element.blanks_as_null:
parameters.append('BLANKSASNULL')
if element.date_format is not None:
parameters.append('DATEFORMAT AS :dateformat_string')
bindparams.append(sa.bindparam(
'dateformat_string',
value=element.date_format,
type_=sa.String,
))
if element.empty_as_null:
parameters.append('EMPTYASNULL')
if element.encoding in ['UTF8', 'UTF16', 'UTF16LE', 'UTF16BE']:
parameters.append('ENCODING AS ' + element.encoding)
if element.escape:
parameters.append('ESCAPE')
if element.explicit_ids:
parameters.append('EXPLICIT_IDS')
if element.fill_record:
parameters.append('FILLRECORD')
if element.ignore_blank_lines:
parameters.append('IGNOREBLANKLINES')
if element.ignore_header is not None:
parameters.append('IGNOREHEADER AS :number_rows')
bindparams.append(sa.bindparam(
'number_rows',
value=element.ignore_header,
type_=sa.Integer,
))
if element.dangerous_null_delimiter is not None:
parameters.append("NULL AS '%s'" % element.dangerous_null_delimiter)
if element.remove_quotes:
parameters.append('REMOVEQUOTES')
if element.roundec:
parameters.append('ROUNDEC')
if element.time_format is not None:
parameters.append('TIMEFORMAT AS :timeformat_string')
bindparams.append(sa.bindparam(
'timeformat_string',
value=element.time_format,
type_=sa.String,
))
if element.trim_blanks:
parameters.append('TRIMBLANKS')
if element.truncate_columns:
parameters.append('TRUNCATECOLUMNS')
if element.comp_rows:
parameters.append('COMPROWS :numrows')
bindparams.append(sa.bindparam(
'numrows',
value=element.comp_rows,
type_=sa.Integer,
))
if element.comp_update:
parameters.append('COMPUPDATE ON')
elif element.comp_update is not None:
parameters.append('COMPUPDATE OFF')
if element.max_error is not None:
parameters.append('MAXERROR AS :error_count')
bindparams.append(sa.bindparam(
'error_count',
value=element.max_error,
type_=sa.Integer,
))
if element.no_load:
parameters.append('NOLOAD')
if element.stat_update:
parameters.append('STATUPDATE ON')
elif element.stat_update is not None:
parameters.append('STATUPDATE OFF')
columns = ' (%s)' % ', '.join(
compiler.preparer.format_column(column) for column in element.columns
) if element.columns else ''
qs = qs.format(
table=compiler.preparer.format_table(element.table),
columns=columns,
format=format_,
parameters='\n'.join(parameters)
)
return compiler.process(sa.text(qs).bindparams(*bindparams), **kw)<|fim▁end|> |
def _process_fixed_width(spec):
return ','.join(('{0}:{1:d}'.format(col, width) for col, width in spec))
|
<|file_name|>TestLazyPersistFiles.java<|end_file_name|><|fim▁begin|>/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file<|fim▁hole|> * to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
import com.google.common.collect.Iterators;
import com.google.common.util.concurrent.Uninterruptibles;
import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.Assert;
import org.junit.Test;
import java.io.IOException;
import java.util.EnumSet;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicBoolean;
import static org.apache.hadoop.fs.StorageType.RAM_DISK;
import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
import static org.hamcrest.core.Is.is;
import static org.junit.Assert.assertThat;
import static org.junit.Assert.fail;
public class TestLazyPersistFiles extends LazyPersistTestCase {
private static final int THREADPOOL_SIZE = 10;
/**
* Append to lazy persist file is denied.
* @throws IOException
*/
@Test
public void testAppendIsDenied() throws IOException {
getClusterBuilder().build();
final String METHOD_NAME = GenericTestUtils.getMethodName();
Path path = new Path("/" + METHOD_NAME + ".dat");
makeTestFile(path, BLOCK_SIZE, true);
try {
client.append(path.toString(), BUFFER_LENGTH,
EnumSet.of(CreateFlag.APPEND), null, null).close();
fail("Append to LazyPersist file did not fail as expected");
} catch (Throwable t) {
LOG.info("Got expected exception ", t);
}
}
/**
* Truncate to lazy persist file is denied.
* @throws IOException
*/
@Test
public void testTruncateIsDenied() throws IOException {
getClusterBuilder().build();
final String METHOD_NAME = GenericTestUtils.getMethodName();
Path path = new Path("/" + METHOD_NAME + ".dat");
makeTestFile(path, BLOCK_SIZE, true);
try {
client.truncate(path.toString(), BLOCK_SIZE/2);
fail("Truncate to LazyPersist file did not fail as expected");
} catch (Throwable t) {
LOG.info("Got expected exception ", t);
}
}
/**
* If one or more replicas of a lazyPersist file are lost, then the file
* must be discarded by the NN, instead of being kept around as a
* 'corrupt' file.
*/
@Test
public void testCorruptFilesAreDiscarded()
throws IOException, InterruptedException, TimeoutException {
getClusterBuilder().setRamDiskReplicaCapacity(2).build();
final String METHOD_NAME = GenericTestUtils.getMethodName();
Path path1 = new Path("/" + METHOD_NAME + ".01.dat");
makeTestFile(path1, BLOCK_SIZE, true);
ensureFileReplicasOnStorageType(path1, RAM_DISK);
// Stop the DataNode and sleep for the time it takes the NN to
// detect the DN as being dead.
cluster.shutdownDataNodes();
Thread.sleep(30000L);
assertThat(cluster.getNamesystem().getNumDeadDataNodes(), is(1));
// Next, wait for the redundancy monitor to mark the file as corrupt.
Thread.sleep(2 * DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_DEFAULT * 1000);
// Wait for the LazyPersistFileScrubber to run
Thread.sleep(2 * LAZY_WRITE_FILE_SCRUBBER_INTERVAL_SEC * 1000);
// Ensure that path1 does not exist anymore, whereas path2 does.
assert(!fs.exists(path1));
// We should have zero blocks that needs replication i.e. the one
// belonging to path2.
assertThat(cluster.getNameNode()
.getNamesystem()
.getBlockManager()
.getLowRedundancyBlocksCount(),
is(0L));
}
@Test
public void testDisableLazyPersistFileScrubber()
throws IOException, InterruptedException, TimeoutException {
getClusterBuilder().setRamDiskReplicaCapacity(2).disableScrubber().build();
final String METHOD_NAME = GenericTestUtils.getMethodName();
Path path1 = new Path("/" + METHOD_NAME + ".01.dat");
makeTestFile(path1, BLOCK_SIZE, true);
ensureFileReplicasOnStorageType(path1, RAM_DISK);
// Stop the DataNode and sleep for the time it takes the NN to
// detect the DN as being dead.
cluster.shutdownDataNodes();
Thread.sleep(30000L);
// Next, wait for the redundancy monitor to mark the file as corrupt.
Thread.sleep(2 * DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_DEFAULT * 1000);
// Wait for the LazyPersistFileScrubber to run
Thread.sleep(2 * LAZY_WRITE_FILE_SCRUBBER_INTERVAL_SEC * 1000);
// Ensure that path1 exist.
Assert.assertTrue(fs.exists(path1));
}
/**
* If NN restarted then lazyPersist files should not deleted
*/
@Test
public void testFileShouldNotDiscardedIfNNRestarted()
throws IOException, InterruptedException, TimeoutException {
getClusterBuilder().setRamDiskReplicaCapacity(2).build();
final String METHOD_NAME = GenericTestUtils.getMethodName();
Path path1 = new Path("/" + METHOD_NAME + ".01.dat");
makeTestFile(path1, BLOCK_SIZE, true);
ensureFileReplicasOnStorageType(path1, RAM_DISK);
cluster.shutdownDataNodes();
cluster.restartNameNodes();
// wait for the redundancy monitor to mark the file as corrupt.
Thread.sleep(2 * DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_DEFAULT * 1000);
Long corruptBlkCount = (long) Iterators.size(cluster.getNameNode()
.getNamesystem().getBlockManager().getCorruptReplicaBlockIterator());
// Check block detected as corrupted
assertThat(corruptBlkCount, is(1L));
// Ensure path1 exist.
Assert.assertTrue(fs.exists(path1));
}
/**
* Concurrent read from the same node and verify the contents.
*/
@Test
public void testConcurrentRead()
throws Exception {
getClusterBuilder().setRamDiskReplicaCapacity(2).build();
final String METHOD_NAME = GenericTestUtils.getMethodName();
final Path path1 = new Path("/" + METHOD_NAME + ".dat");
final int SEED = 0xFADED;
final int NUM_TASKS = 5;
makeRandomTestFile(path1, BLOCK_SIZE, true, SEED);
ensureFileReplicasOnStorageType(path1, RAM_DISK);
//Read from multiple clients
final CountDownLatch latch = new CountDownLatch(NUM_TASKS);
final AtomicBoolean testFailed = new AtomicBoolean(false);
Runnable readerRunnable = new Runnable() {
@Override
public void run() {
try {
Assert.assertTrue(verifyReadRandomFile(path1, BLOCK_SIZE, SEED));
} catch (Throwable e) {
LOG.error("readerRunnable error", e);
testFailed.set(true);
} finally {
latch.countDown();
}
}
};
Thread threads[] = new Thread[NUM_TASKS];
for (int i = 0; i < NUM_TASKS; i++) {
threads[i] = new Thread(readerRunnable);
threads[i].start();
}
Thread.sleep(500);
for (int i = 0; i < NUM_TASKS; i++) {
Uninterruptibles.joinUninterruptibly(threads[i]);
}
Assert.assertFalse(testFailed.get());
}
/**
* Concurrent write with eviction
* RAM_DISK can hold 9 replicas
* 4 threads each write 5 replicas
* @throws IOException
* @throws InterruptedException
*/
@Test
public void testConcurrentWrites()
throws IOException, InterruptedException {
getClusterBuilder().setRamDiskReplicaCapacity(9).build();
final String METHOD_NAME = GenericTestUtils.getMethodName();
final int SEED = 0xFADED;
final int NUM_WRITERS = 4;
final int NUM_WRITER_PATHS = 5;
Path paths[][] = new Path[NUM_WRITERS][NUM_WRITER_PATHS];
for (int i = 0; i < NUM_WRITERS; i++) {
paths[i] = new Path[NUM_WRITER_PATHS];
for (int j = 0; j < NUM_WRITER_PATHS; j++) {
paths[i][j] =
new Path("/" + METHOD_NAME + ".Writer" + i + ".File." + j + ".dat");
}
}
final CountDownLatch latch = new CountDownLatch(NUM_WRITERS);
final AtomicBoolean testFailed = new AtomicBoolean(false);
ExecutorService executor = Executors.newFixedThreadPool(THREADPOOL_SIZE);
for (int i = 0; i < NUM_WRITERS; i++) {
Runnable writer = new WriterRunnable(i, paths[i], SEED, latch, testFailed);
executor.execute(writer);
}
Thread.sleep(3 * LAZY_WRITER_INTERVAL_SEC * 1000);
triggerBlockReport();
// Stop executor from adding new tasks to finish existing threads in queue
latch.await();
assertThat(testFailed.get(), is(false));
}
class WriterRunnable implements Runnable {
private final int id;
private final Path paths[];
private final int seed;
private CountDownLatch latch;
private AtomicBoolean bFail;
public WriterRunnable(int threadIndex, Path[] paths,
int seed, CountDownLatch latch,
AtomicBoolean bFail) {
id = threadIndex;
this.paths = paths;
this.seed = seed;
this.latch = latch;
this.bFail = bFail;
System.out.println("Creating Writer: " + id);
}
public void run() {
System.out.println("Writer " + id + " starting... ");
int i = 0;
try {
for (i = 0; i < paths.length; i++) {
makeRandomTestFile(paths[i], BLOCK_SIZE, true, seed);
// eviction may faiL when all blocks are not persisted yet.
// ensureFileReplicasOnStorageType(paths[i], RAM_DISK);
}
} catch (IOException e) {
bFail.set(true);
LOG.error("Writer exception: writer id:" + id +
" testfile: " + paths[i].toString() +
" " + e);
} finally {
latch.countDown();
}
}
}
}<|fim▁end|> | * distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file |
<|file_name|>computeNodeListNextOptions.js<|end_file_name|><|fim▁begin|>/*
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for
* license information.
*
* Code generated by Microsoft (R) AutoRest Code Generator.
* Changes may cause incorrect behavior and will be lost if the code is
* regenerated.
*/
'use strict';
/**
* @class
* Initializes a new instance of the ComputeNodeListNextOptions class.
* @constructor
* Additional parameters for the listNext operation.
*
* @member {string} [clientRequestId] The caller-generated request identity,
* in the form of a GUID with no decoration such as curly braces, e.g.
* 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
*
* @member {boolean} [returnClientRequestId] Whether the server should return
* the client-request-id identifier in the response.
*
* @member {date} [ocpDate] The time the request was issued. If not specified,
* this header will be automatically populated with the current system clock
* time.
*
*/
function ComputeNodeListNextOptions() {
}
/**
* Defines the metadata of ComputeNodeListNextOptions
*
* @returns {object} metadata of ComputeNodeListNextOptions
*
*/
ComputeNodeListNextOptions.prototype.mapper = function () {
return {
required: false,
type: {
name: 'Composite',
className: 'ComputeNodeListNextOptions',
modelProperties: {
clientRequestId: {
required: false,
type: {
name: 'String'
}
},
returnClientRequestId: {
required: false,
type: {
name: 'Boolean'
}<|fim▁hole|> required: false,
type: {
name: 'DateTimeRfc1123'
}
}
}
}
};
};
module.exports = ComputeNodeListNextOptions;<|fim▁end|> | },
ocpDate: { |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>'''
Library for doing fun things with computers.
'''
__author__ = 'Andrew M Bates'
__version__ = '0.001'
import io, os, sys<|fim▁hole|>
# the core imports go here
# this should go in in the mods dir
try:
'''IF RASPBERRY PI & HAS A GPIO BOARD'''
import RPi.GPIO as RPi
except ImportError:
pass<|fim▁end|> | |
<|file_name|>manage.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
import os
import sys
<|fim▁hole|> from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)<|fim▁end|> | if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "MyGarden.settings")
try: |
<|file_name|>test_glance.py<|end_file_name|><|fim▁begin|># Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import random
import time
import mock
from mox3 import mox
from os_xenapi.client import XenAPI<|fim▁hole|>from nova.compute import utils as compute_utils
from nova import context
from nova import exception
from nova.tests.unit.virt.xenapi import stubs
from nova.virt.xenapi import driver as xenapi_conn
from nova.virt.xenapi import fake
from nova.virt.xenapi.image import glance
from nova.virt.xenapi import vm_utils
class TestGlanceStore(stubs.XenAPITestBaseNoDB):
def setUp(self):
super(TestGlanceStore, self).setUp()
self.store = glance.GlanceStore()
self.flags(api_servers=['http://localhost:9292'], group='glance')
self.flags(connection_url='test_url',
connection_password='test_pass',
group='xenserver')
self.context = context.RequestContext(
'user', 'project', auth_token='foobar')
fake.reset()
stubs.stubout_session(self.stubs, fake.SessionBase)
driver = xenapi_conn.XenAPIDriver(False)
self.session = driver._session
self.stubs.Set(
vm_utils, 'get_sr_path', lambda *a, **kw: '/fake/sr/path')
self.instance = {'uuid': 'blah',
'system_metadata': [],
'auto_disk_config': True,
'os_type': 'default',
'xenapi_use_agent': 'true'}
def _get_params(self):
return {'image_id': 'fake_image_uuid',
'endpoint': 'http://localhost:9292',
'sr_path': '/fake/sr/path',
'api_version': 2,
'extra_headers': {'X-Auth-Token': 'foobar',
'X-Roles': '',
'X-Tenant-Id': 'project',
'X-User-Id': 'user',
'X-Identity-Status': 'Confirmed'}}
def _get_download_params(self):
params = self._get_params()
params['uuid_stack'] = ['uuid1']
return params
def test_download_image(self):
params = self._get_download_params()
self.stubs.Set(vm_utils, '_make_uuid_stack',
lambda *a, **kw: ['uuid1'])
self.mox.StubOutWithMock(self.session, 'call_plugin_serialized')
self.session.call_plugin_serialized('glance.py', 'download_vhd2',
**params)
self.mox.ReplayAll()
self.store.download_image(self.context, self.session,
self.instance, 'fake_image_uuid')
self.mox.VerifyAll()
@mock.patch.object(vm_utils, '_make_uuid_stack', return_value=['uuid1'])
@mock.patch.object(random, 'shuffle')
@mock.patch.object(time, 'sleep')
@mock.patch.object(compute_utils, 'add_instance_fault_from_exc')
def test_download_image_retry(self, mock_fault, mock_sleep,
mock_shuffle, mock_make_uuid_stack):
params = self._get_download_params()
self.flags(num_retries=2, group='glance')
params.pop("endpoint")
calls = [mock.call('glance.py', 'download_vhd2',
endpoint='http://10.0.1.1:9292',
**params),
mock.call('glance.py', 'download_vhd2',
endpoint='http://10.0.0.1:9293',
**params)]
glance_api_servers = ['10.0.1.1:9292',
'http://10.0.0.1:9293']
self.flags(api_servers=glance_api_servers, group='glance')
with (mock.patch.object(self.session, 'call_plugin_serialized')
) as mock_call_plugin_serialized:
error_details = ["", "", "RetryableError", ""]
error = self.session.XenAPI.Failure(details=error_details)
mock_call_plugin_serialized.side_effect = [error, "success"]
self.store.download_image(self.context, self.session,
self.instance, 'fake_image_uuid')
mock_call_plugin_serialized.assert_has_calls(calls)
self.assertEqual(1, mock_fault.call_count)
def _get_upload_params(self, auto_disk_config=True,
expected_os_type='default'):
params = self._get_params()
params['vdi_uuids'] = ['fake_vdi_uuid']
params['properties'] = {'auto_disk_config': auto_disk_config,
'os_type': expected_os_type}
return params
def _test_upload_image(self, auto_disk_config, expected_os_type='default'):
params = self._get_upload_params(auto_disk_config, expected_os_type)
self.mox.StubOutWithMock(self.session, 'call_plugin_serialized')
self.session.call_plugin_serialized('glance.py', 'upload_vhd2',
**params)
self.mox.ReplayAll()
self.store.upload_image(self.context, self.session, self.instance,
'fake_image_uuid', ['fake_vdi_uuid'])
self.mox.VerifyAll()
def test_upload_image(self):
self._test_upload_image(True)
def test_upload_image_None_os_type(self):
self.instance['os_type'] = None
self._test_upload_image(True, 'linux')
def test_upload_image_no_os_type(self):
del self.instance['os_type']
self._test_upload_image(True, 'linux')
def test_upload_image_auto_config_disk_disabled(self):
sys_meta = [{"key": "image_auto_disk_config", "value": "Disabled"}]
self.instance["system_metadata"] = sys_meta
self._test_upload_image("disabled")
def test_upload_image_raises_exception(self):
params = self._get_upload_params()
self.mox.StubOutWithMock(self.session, 'call_plugin_serialized')
self.session.call_plugin_serialized('glance.py', 'upload_vhd2',
**params).AndRaise(RuntimeError)
self.mox.ReplayAll()
self.assertRaises(RuntimeError, self.store.upload_image,
self.context, self.session, self.instance,
'fake_image_uuid', ['fake_vdi_uuid'])
self.mox.VerifyAll()
def test_upload_image_retries_then_raises_exception(self):
self.flags(num_retries=2, group='glance')
params = self._get_upload_params()
self.mox.StubOutWithMock(self.session, 'call_plugin_serialized')
self.mox.StubOutWithMock(time, 'sleep')
self.mox.StubOutWithMock(compute_utils, 'add_instance_fault_from_exc')
error_details = ["", "", "RetryableError", ""]
error = self.session.XenAPI.Failure(details=error_details)
self.session.call_plugin_serialized('glance.py', 'upload_vhd2',
**params).AndRaise(error)
compute_utils.add_instance_fault_from_exc(self.context, self.instance,
error, (XenAPI.Failure,
error,
mox.IgnoreArg()))
time.sleep(0.5)
self.session.call_plugin_serialized('glance.py', 'upload_vhd2',
**params).AndRaise(error)
compute_utils.add_instance_fault_from_exc(self.context, self.instance,
error, (XenAPI.Failure,
error,
mox.IgnoreArg()))
time.sleep(1)
self.session.call_plugin_serialized('glance.py', 'upload_vhd2',
**params).AndRaise(error)
compute_utils.add_instance_fault_from_exc(self.context, self.instance,
error, (XenAPI.Failure,
error,
mox.IgnoreArg()))
self.mox.ReplayAll()
self.assertRaises(exception.CouldNotUploadImage,
self.store.upload_image,
self.context, self.session, self.instance,
'fake_image_uuid', ['fake_vdi_uuid'])
self.mox.VerifyAll()
def test_upload_image_retries_on_signal_exception(self):
self.flags(num_retries=2, group='glance')
params = self._get_upload_params()
self.mox.StubOutWithMock(self.session, 'call_plugin_serialized')
self.mox.StubOutWithMock(time, 'sleep')
self.mox.StubOutWithMock(compute_utils, 'add_instance_fault_from_exc')
error_details = ["", "task signaled", "", ""]
error = self.session.XenAPI.Failure(details=error_details)
self.session.call_plugin_serialized('glance.py', 'upload_vhd2',
**params).AndRaise(error)
compute_utils.add_instance_fault_from_exc(self.context, self.instance,
error, (XenAPI.Failure,
error,
mox.IgnoreArg()))
time.sleep(0.5)
# Note(johngarbutt) XenServer 6.1 and later has this error
error_details = ["", "signal: SIGTERM", "", ""]
error = self.session.XenAPI.Failure(details=error_details)
self.session.call_plugin_serialized('glance.py', 'upload_vhd2',
**params).AndRaise(error)
compute_utils.add_instance_fault_from_exc(self.context, self.instance,
error, (XenAPI.Failure,
error,
mox.IgnoreArg()))
time.sleep(1)
self.session.call_plugin_serialized('glance.py', 'upload_vhd2',
**params)
self.mox.ReplayAll()
self.store.upload_image(self.context, self.session, self.instance,
'fake_image_uuid', ['fake_vdi_uuid'])
self.mox.VerifyAll()<|fim▁end|> | |
<|file_name|>audioPlayer.js<|end_file_name|><|fim▁begin|>import React from "react";
import basicComponent from "core/basicComponent";
import Radium from "radium";
import ReactAplayer from "react-aplayer";
import mergeAdvanced from "object-merge-advanced";
class audioPlayer extends basicComponent {
constructor(props) {
super(props);
if (!this.isRestored) {
this.state = { ...this.state, audios: [] };
}
this.myRef = React.createRef();
}
onInit = ap => {
this.ap = ap;
};
play = () => this.ap.play();
pause = () => this.ap.pause();
seek = timePos => this.ap.seek(timePos);
addAudio(audioProps) {
this.setState(prevState => {
const prevAudios = prevState.audios;
const nextAudios = prevAudios.concat([audioProps]);
const nextState = mergeAdvanced(prevState, { audios: nextAudios });
return nextState;
});
}
thisComponent = () => {
const state = this.getState();<|fim▁hole|> theme="#F57F17"
lrcType={3}
audio={state.audios || []}
onInit={this.onInit}
style={styles}
{...this.getEvents()}
/>
);
};
}
export default Radium(audioPlayer);<|fim▁end|> | const styles = this.getStyles();
return (
<ReactAplayer
ref={this.myRef} |
<|file_name|>한겨레.ts<|end_file_name|><|fim▁begin|>import * as $ from 'jquery';
import { clearStyles } from '../util';
import { Article } from '..';
export const cleanup = () => {
$('#scrollDiv').remove();
}
export function parse(): Article {
let articleBody = clearStyles($('.article-text')[0].cloneNode(true));
let $subtitle = $('.subtitle', articleBody);
const subtitle = $subtitle.html();
$subtitle.remove();<|fim▁hole|> const content = articleBody.innerHTML;
return {
title: $('#article_view_headline .title').text().trim(),
content: content,
subtitle: subtitle,
timestamp: (() => {
let $span = $('#article_view_headline .date-time span');
if ($span[0].childNodes[1].textContent!.replace(/-/g, '/'))
return {
created: new Date($span[0].childNodes[1].textContent!.replace(/-/g, '/')),
lastModified: $span[1] ? new Date($span[1].childNodes[1].textContent!.replace(/-/g, '/')) : undefined
};
else return undefined;
})(),
reporters: []
};
}<|fim▁end|> | $('.relation2-area', articleBody).remove(); |
<|file_name|>JSONHeader.java<|end_file_name|><|fim▁begin|>package cn.oftenporter.servlet;
/**
* 实现了该接口的对象,被输出时设置内容类型为{@linkplain ContentType#APP_JSON}。<|fim▁hole|>}<|fim▁end|> | */
public interface JSONHeader
{ |
<|file_name|>rpg_operation.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
"""
RPG: Operation
These are any operations we want to carry out from our YAML files. Operations
are strings that are tied to Python code, to carry out things that arent
possible to easily make YAML tags for directly.
"""
<|fim▁hole|>import rpg_combat
def HandleOperation(game, operation, data):
"""Handle the operation.
Args:
game: Game object
operation: string, name of the operation to look up
data: dict, data at the level the operation was specified in, which may
contain information the operation needs to operate. Operation specific.
"""
# Pay for a room's night sleep
if operation == 'RoomSleepPay':
Log('RoomSleepPay: You are rested!')
# Max up the player's current health
#NOTE(g): Uses a percentage based increase from the game data. If not
# present, assume full recovery.
modifier = game.data['game'].get('sleep_regeneration_percent', 1.0)
# Add the modified version of the full health
game.player.health_current += game.player.attributes['health'] * modifier
# Max out at full health
if game.player.health_current > game.player.attributes['health']:
game.player.health_current = game.player.attributes['health']
# No longer fatigued (running and such)
game.player.fatigued = False
# Combat with the Player
elif operation == 'CombatPlayer':
if game.dialogue:
# If a map is specified for the encouter, then fight
map = data.get('map', None)
# Set combat to be with the given actor
game.combat = rpg_combat.Combat(game, [game.dialogue.actor], map=map)
# Clear the dialogue. The time for talking is OVER!
game.dialogue = None
else:
Log('Operatino: CombatPlayer: Not initiated from Dialogue. Unknown actor.')
# Close the Dialogue
if operation == 'CloseDialogue':
game.dialogue = None<|fim▁end|> |
from rpg_log import Log
|
<|file_name|>user.js<|end_file_name|><|fim▁begin|>'use strict';
<|fim▁hole|>var userSchema = new mongoose.Schema({
name: {
type: String,
required: true
},
email: {
type: String,
required: true,
unique: true,
trim: true
},
username: {
type: String,
required: true,
unique: true,
trim: true
},
biography: {
type: String
},
location: {
type: String
},
auth: {
basic: {
username: String,
password: String
}
}
});
userSchema.methods.hashPassword = function(password) {
var hash = this.auth.basic.password = bcrypt.hashSync(password, 8);
return hash;
};
userSchema.methods.checkPassword = function(password) {
return bcrypt.compareSync(password, this.auth.basic.password);
};
userSchema.methods.generateToken = function(callback) {
var id = this._id;
eat.encode({id: id}, process.env.APP_SECRET, callback);
};
module.exports = mongoose.model('User', userSchema);<|fim▁end|> | var mongoose = require('mongoose');
var bcrypt = require('bcrypt');
var eat = require('eat');
|
<|file_name|>main.rs<|end_file_name|><|fim▁begin|>// Copyright 2015 The Athena Developers.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
extern crate zeus;
extern crate rustc_serialize;
extern crate docopt;
extern crate toml;
mod commands;
use std::error::Error;
use docopt::Docopt;
static USAGE: &'static str = "
Athena's project build system.
Usage:
zeus <command> [<args>...]
zeus
Some common zeus commands are:
version Display version info and exit
list Display a list of commands
new Create a new athena project
setup Sets up all athena tools for this project
See 'zeus help <command>' for more information on a specific command.
";
#[derive(RustcDecodable, Debug)]
struct Flags {
arg_command: String,
arg_args: Vec<String>
}
fn main() {
// Parse in the command line flags
let flags: Flags = Docopt::new(USAGE)
.and_then(|d| d.decode())
.unwrap_or_else(|e| e.exit());
// Run the actual command
let result = match &flags.arg_command[..] {
"list" => commands::list::execute(),
"new" => commands::new::execute(),<|fim▁hole|> "" => display_usage(),
_ => display_not_found()
};
// Set the exit code depending on the result
match result {
Ok(_) => std::process::exit(0),
Err(err) => {
println!("{}", err);
std::process::exit(1)
}
}
}
// ### Misc Command Handlers ###
fn display_usage() -> Result<(), Box<Error>> {
println!("{}", USAGE);
return Ok(());
}
fn display_not_found() -> Result<(), Box<Error>> {
unimplemented!();
}<|fim▁end|> | "setup" => commands::setup::execute(), |
<|file_name|>SecretsConfigDump.ts<|end_file_name|><|fim▁begin|>// Original file: deps/envoy-api/envoy/admin/v3/config_dump.proto
import type { Timestamp as _google_protobuf_Timestamp, Timestamp__Output as _google_protobuf_Timestamp__Output } from '../../../google/protobuf/Timestamp';
import type { Any as _google_protobuf_Any, Any__Output as _google_protobuf_Any__Output } from '../../../google/protobuf/Any';
import type { UpdateFailureState as _envoy_admin_v3_UpdateFailureState, UpdateFailureState__Output as _envoy_admin_v3_UpdateFailureState__Output } from '../../../envoy/admin/v3/UpdateFailureState';
import type { ClientResourceStatus as _envoy_admin_v3_ClientResourceStatus } from '../../../envoy/admin/v3/ClientResourceStatus';
/**
* DynamicSecret contains secret information fetched via SDS.
* [#next-free-field: 7]
*/
export interface _envoy_admin_v3_SecretsConfigDump_DynamicSecret {
/**
* The name assigned to the secret.
*/
'name'?: (string);
/**
* This is the per-resource version information.
*/
'version_info'?: (string);
/**
* The timestamp when the secret was last updated.
*/
'last_updated'?: (_google_protobuf_Timestamp | null);
/**
* The actual secret information.
* Security sensitive information is redacted (replaced with "[redacted]") for
* private keys and passwords in TLS certificates.
*/
'secret'?: (_google_protobuf_Any | null);
/**
* Set if the last update failed, cleared after the next successful update.
* The *error_state* field contains the rejected version of this particular
* resource along with the reason and timestamp. For successfully updated or
* acknowledged resource, this field should be empty.
* [#not-implemented-hide:]
*/
'error_state'?: (_envoy_admin_v3_UpdateFailureState | null);
/**
* The client status of this resource.
* [#not-implemented-hide:]
*/
'client_status'?: (_envoy_admin_v3_ClientResourceStatus | keyof typeof _envoy_admin_v3_ClientResourceStatus);
}
/**
* DynamicSecret contains secret information fetched via SDS.
* [#next-free-field: 7]
*/
export interface _envoy_admin_v3_SecretsConfigDump_DynamicSecret__Output {
/**
* The name assigned to the secret.
*/
'name': (string);
/**
* This is the per-resource version information.
*/
'version_info': (string);
/**
* The timestamp when the secret was last updated.
*/
'last_updated': (_google_protobuf_Timestamp__Output | null);
/**
* The actual secret information.
* Security sensitive information is redacted (replaced with "[redacted]") for
* private keys and passwords in TLS certificates.
*/
'secret': (_google_protobuf_Any__Output | null);
/**
* Set if the last update failed, cleared after the next successful update.
* The *error_state* field contains the rejected version of this particular
* resource along with the reason and timestamp. For successfully updated or
* acknowledged resource, this field should be empty.
* [#not-implemented-hide:]
*/
'error_state': (_envoy_admin_v3_UpdateFailureState__Output | null);
/**
* The client status of this resource.
* [#not-implemented-hide:]
*/
'client_status': (keyof typeof _envoy_admin_v3_ClientResourceStatus);
}
/**
* StaticSecret specifies statically loaded secret in bootstrap.
*/
export interface _envoy_admin_v3_SecretsConfigDump_StaticSecret {
/**
* The name assigned to the secret.
*/
'name'?: (string);
/**
* The timestamp when the secret was last updated.
*/
'last_updated'?: (_google_protobuf_Timestamp | null);
/**
* The actual secret information.
* Security sensitive information is redacted (replaced with "[redacted]") for
* private keys and passwords in TLS certificates.
*/
'secret'?: (_google_protobuf_Any | null);
}
/**
* StaticSecret specifies statically loaded secret in bootstrap.
*/
export interface _envoy_admin_v3_SecretsConfigDump_StaticSecret__Output {
/**
* The name assigned to the secret.
*/
'name': (string);
/**
* The timestamp when the secret was last updated.
*/
'last_updated': (_google_protobuf_Timestamp__Output | null);
/**
* The actual secret information.
* Security sensitive information is redacted (replaced with "[redacted]") for
* private keys and passwords in TLS certificates.
*/
'secret': (_google_protobuf_Any__Output | null);
}
/**
* Envoys SDS implementation fills this message with all secrets fetched dynamically via SDS.
*/
export interface SecretsConfigDump {
/**
* The statically loaded secrets.<|fim▁hole|> * clusters or listeners.
*/
'dynamic_active_secrets'?: (_envoy_admin_v3_SecretsConfigDump_DynamicSecret)[];
/**
* The dynamically loaded warming secrets. These are secrets that are currently undergoing
* warming in preparation to service clusters or listeners.
*/
'dynamic_warming_secrets'?: (_envoy_admin_v3_SecretsConfigDump_DynamicSecret)[];
}
/**
* Envoys SDS implementation fills this message with all secrets fetched dynamically via SDS.
*/
export interface SecretsConfigDump__Output {
/**
* The statically loaded secrets.
*/
'static_secrets': (_envoy_admin_v3_SecretsConfigDump_StaticSecret__Output)[];
/**
* The dynamically loaded active secrets. These are secrets that are available to service
* clusters or listeners.
*/
'dynamic_active_secrets': (_envoy_admin_v3_SecretsConfigDump_DynamicSecret__Output)[];
/**
* The dynamically loaded warming secrets. These are secrets that are currently undergoing
* warming in preparation to service clusters or listeners.
*/
'dynamic_warming_secrets': (_envoy_admin_v3_SecretsConfigDump_DynamicSecret__Output)[];
}<|fim▁end|> | */
'static_secrets'?: (_envoy_admin_v3_SecretsConfigDump_StaticSecret)[];
/**
* The dynamically loaded active secrets. These are secrets that are available to service |
<|file_name|>datatype.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-<|fim▁hole|>
def __init__(self, type, preprocessor=None):
self.type = type
self.preprocessor = preprocessor
def preprocess(self, value):
return self.preprocessor(value) if self.preprocessor else value
def serialize(self, value):
return value
def unserialize(self, value):
processed = self.preprocess(value)
if isinstance(processed, self.type):
return processed
return self.type(processed)<|fim▁end|> |
class DataType(object): |
<|file_name|>pandas_03a_selecting_data2.py<|end_file_name|><|fim▁begin|><|fim▁hole|><|fim▁end|> | males['Age'].mean() |
<|file_name|>pose.py<|end_file_name|><|fim▁begin|>"""
Custom made pose attribute for simulation
"""<|fim▁hole|>
import math
from vector_3 import Vector3
from polar_vector import PolarVector
class Pose:
def __init__(self):
self.position = Vector3()
self.velocity = PolarVector()<|fim▁end|> | |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|># *** submodules *** #
from matrix import constants, decompositions, errors, approximation, nearest
# *** functions *** #
from matrix.calculate import (is_positive_semidefinite, is_positive_definite, is_invertible,
decompose, solve)
# *** constants *** #
from matrix.constants import (
DECOMPOSITION_TYPES,
LDL_DECOMPOSITION_TYPE, LDL_DECOMPOSITION_COMPRESSED_TYPE, LL_DECOMPOSITION_TYPE,
UNIVERSAL_PERMUTATION_METHODS, SPARSE_ONLY_PERMUTATION_METHODS,
NO_PERMUTATION_METHOD,
DECREASING_DIAGONAL_VALUES_PERMUTATION_METHOD, INCREASING_DIAGONAL_VALUES_PERMUTATION_METHOD,
DECREASING_ABSOLUTE_DIAGONAL_VALUES_PERMUTATION_METHOD,
INCREASING_ABSOLUTE_DIAGONAL_VALUES_PERMUTATION_METHOD)
DECOMPOSITION_TYPES = DECOMPOSITION_TYPES
""" Supported types of decompositions. """
UNIVERSAL_PERMUTATION_METHODS = UNIVERSAL_PERMUTATION_METHODS
""" Supported permutation methods for decompose dense and sparse matrices. """
SPARSE_ONLY_PERMUTATION_METHODS = SPARSE_ONLY_PERMUTATION_METHODS
""" Supported permutation methods only for sparse matrices. """
# *** version *** #
<|fim▁hole|>del get_versions
# *** logging *** #
import logging
logger = logging.getLogger(__name__)
del logging
# *** deprecated *** #
def __getattr__(name):
deprecated_names = ['decomposition', 'positive_definite_matrix',
'positive_semidefinite_matrix', 'APPROXIMATION_ONLY_PERMUTATION_METHODS']
if name in deprecated_names:
import warnings
warnings.warn(f'"matrix.{name}" is deprecated. Take a look at'
' "matrix.approximation.positive_semidefinite" instead.',
DeprecationWarning, stacklevel=2)
import matrix.approximate
return matrix.approximate.__getattribute__(name)
raise AttributeError(f'Module {__name__} has no attribute {name}.')<|fim▁end|> | from ._version import get_versions
__version__ = get_versions()['version'] |
<|file_name|>settings.py<|end_file_name|><|fim▁begin|>DEBUG = False
TEMPLATE_DEBUG = DEBUG
TIME_ZONE = 'UTC'
LANGUAGE_CODE = 'en-US'
SITE_ID = 1
USE_L10N = True
USE_TZ = True
SECRET_KEY = 'local'
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
}
}
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',<|fim▁hole|>
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'django_flanker',
'tests',
)<|fim▁end|> | }
} |
<|file_name|>secrets.template.py<|end_file_name|><|fim▁begin|># These are the instance-dependent settings. Copy this file to
# secrets.py and apply the desired settings.
#
# Only one variable is required here, SECRET_KEY. Fill this using:
# http://www.miniwebtool.com/django-secret-key-generator/
SECRET_KEY = ''
# In your development setup, you can leave the following variables
# unset:<|fim▁hole|>#STATIC_ROOT =
#MEDIA_ROOT =
#DEBUG =
#DATABASES =
#EMAIL_BACKEND =
#EMAIL_USE_TLS =
#EMAIL_HOST =
#EMAIL_PORT =
#EMAIL_HOST_USER =
#EMAIL_HOST_PASSWORD =
#SESSION_COOKIE_DOMAIN =
#CSRF_COOKIE_DOMAIN =
#SECURE_HSTS_SECONDS =<|fim▁end|> | |
<|file_name|>lot.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
from openprocurement.auctions.core.utils import (
apply_patch,
context_unpack,
get_now,
json_view,
opresource,
save_auction,
)
from openprocurement.auctions.core.validation import (
validate_lot_data,
validate_patch_lot_data,
)
from openprocurement.auctions.core.views.mixins import AuctionLotResource
@opresource(name='dgfOtherAssets:Auction Lots',
collection_path='/auctions/{auction_id}/lots',
path='/auctions/{auction_id}/lots/{lot_id}',
auctionsprocurementMethodType="dgfOtherAssets",
description="Auction lots")
class AuctionLotResource(AuctionLotResource):
@json_view(content_type="application/json", validators=(validate_lot_data,), permission='edit_auction')
def collection_post(self):
"""Add a lot
"""
auction = self.request.validated['auction']
if auction.status not in ['active.tendering']:
self.request.errors.add('body', 'data', 'Can\'t add lot in current ({}) auction status'.format(auction.status))
self.request.errors.status = 403
return
lot = self.request.validated['lot']
lot.date = get_now()
auction.lots.append(lot)
if save_auction(self.request):
self.LOGGER.info('Created auction lot {}'.format(lot.id),
extra=context_unpack(self.request, {'MESSAGE_ID': 'auction_lot_create'}, {'lot_id': lot.id}))
self.request.response.status = 201
route = self.request.matched_route.name.replace("collection_", "")
self.request.response.headers['Location'] = self.request.current_route_url(_route_name=route, lot_id=lot.id, _query={})
return {'data': lot.serialize("view")}
@json_view(content_type="application/json", validators=(validate_patch_lot_data,), permission='edit_auction')
def patch(self):
"""Update of lot
"""
auction = self.request.validated['auction']
if auction.status not in ['active.tendering']:
self.request.errors.add('body', 'data', 'Can\'t update lot in current ({}) auction status'.format(auction.status))
self.request.errors.status = 403
return
if apply_patch(self.request, src=self.request.context.serialize()):
self.LOGGER.info('Updated auction lot {}'.format(self.request.context.id),
extra=context_unpack(self.request, {'MESSAGE_ID': 'auction_lot_patch'}))
return {'data': self.request.context.serialize("view")}
@json_view(permission='edit_auction')
def delete(self):
"""Lot deleting
"""
auction = self.request.validated['auction']<|fim▁hole|> return
lot = self.request.context
res = lot.serialize("view")
auction.lots.remove(lot)
if save_auction(self.request):
self.LOGGER.info('Deleted auction lot {}'.format(self.request.context.id),
extra=context_unpack(self.request, {'MESSAGE_ID': 'auction_lot_delete'}))
return {'data': res}<|fim▁end|> | if auction.status not in ['active.tendering']:
self.request.errors.add('body', 'data', 'Can\'t delete lot in current ({}) auction status'.format(auction.status))
self.request.errors.status = 403 |
<|file_name|>restricted.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This file is part of the web2py Web Framework
Copyrighted by Massimo Di Pierro <[email protected]>
License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
"""
import sys
import cPickle
import traceback
import types
import os
import logging
from storage import Storage
from http import HTTP
from html import BEAUTIFY
logger = logging.getLogger("web2py")
__all__ = ['RestrictedError', 'restricted', 'TicketStorage', 'compile2']
class TicketStorage(Storage):
"""
defines the ticket object and the default values of its members (None)
"""
def __init__(
self,
db=None,
tablename='web2py_ticket'
):
self.db = db
self.tablename = tablename
def store(self, request, ticket_id, ticket_data):
"""
stores the ticket. It will figure out if this must be on disk or in db
"""
if self.db:
self._store_in_db(request, ticket_id, ticket_data)
else:
self._store_on_disk(request, ticket_id, ticket_data)
def _store_in_db(self, request, ticket_id, ticket_data):
table = self._get_table(self.db, self.tablename, request.application)
table.insert(ticket_id=ticket_id,
ticket_data=cPickle.dumps(ticket_data),
created_datetime=request.now)
logger.error('In FILE: %(layer)s\n\n%(traceback)s\n' % ticket_data)
def _store_on_disk(self, request, ticket_id, ticket_data):
ef = self._error_file(request, ticket_id, 'wb')
try:
cPickle.dump(ticket_data, ef)
finally:
ef.close()
def _error_file(self, request, ticket_id, mode, app=None):
root = request.folder
if app:
root = os.path.join(os.path.join(root, '..'), app)
errors_folder = os.path.abspath(os.path.join(root, 'errors'))#.replace('\\', '/')
return open(os.path.join(errors_folder, ticket_id), mode)
def _get_table(self, db, tablename, app):
tablename = tablename + '_' + app
table = db.get(tablename, None)
if table is None:
db.rollback() # not necessary but one day
# any app may store tickets on DB
table = db.define_table(
tablename,
db.Field('ticket_id', length=100),
db.Field('ticket_data', 'text'),
db.Field('created_datetime', 'datetime'),
)
return table
def load(
self,
request,
app,
ticket_id,
):
if not self.db:
ef = self._error_file(request, ticket_id, 'rb', app)
try:
return cPickle.load(ef)
finally:
ef.close()
table = self._get_table(self.db, self.tablename, app)
rows = self.db(table.ticket_id == ticket_id).select()
if rows:
return cPickle.loads(rows[0].ticket_data)
return None
class RestrictedError(Exception):
"""
class used to wrap an exception that occurs in the restricted environment
below. the traceback is used to log the exception and generate a ticket.
"""
def __init__(
self,
layer='',
code='',
output='',
environment=None,
):
"""
layer here is some description of where in the system the exception
occurred.
"""
if environment is None: environment = {}
self.layer = layer
self.code = code
self.output = output
self.environment = environment
if layer:
try:
self.traceback = traceback.format_exc()
except:
self.traceback = 'no traceback because template parting error'
try:
self.snapshot = snapshot(context=10,code=code,
environment=self.environment)
except:
self.snapshot = {}
else:
self.traceback = '(no error)'
self.snapshot = {}
def log(self, request):
"""
logs the exception.
"""
try:
d = {
'layer': str(self.layer),
'code': str(self.code),
'output': str(self.output),
'traceback': str(self.traceback),
'snapshot': self.snapshot,
}
ticket_storage = TicketStorage(db=request.tickets_db)
ticket_storage.store(request, request.uuid.split('/',1)[1], d)
return request.uuid
except:
logger.error(self.traceback)
return None
def load(self, request, app, ticket_id):
"""
loads a logged exception.
"""
ticket_storage = TicketStorage(db=request.tickets_db)
d = ticket_storage.load(request, app, ticket_id)
self.layer = d['layer']
self.code = d['code']
self.output = d['output']
self.traceback = d['traceback']
self.snapshot = d.get('snapshot')
def __str__(self):
# safely show an useful message to the user
try:
output = self.output
if isinstance(output, unicode):
output = output.encode("utf8")
elif not isinstance(output, str):
output = str(output)
except:
output = ""
return output
def compile2(code,layer):
"""
The +'\n' is necessary else compile fails when code ends in a comment.
"""
return compile(code.rstrip().replace('\r\n','\n')+'\n', layer, 'exec')
def restricted(code, environment=None, layer='Unknown'):
"""
runs code in environment and returns the output. if an exception occurs
in code it raises a RestrictedError containing the traceback. layer is
passed to RestrictedError to identify where the error occurred.
"""
if environment is None: environment = {}
environment['__file__'] = layer
environment['__name__'] = '__restricted__'
try:
if type(code) == types.CodeType:
ccode = code
else:
ccode = compile2(code,layer)
exec ccode in environment
except HTTP:
raise
except RestrictedError:
# do not encapsulate (obfuscate) the original RestrictedError
raise
except Exception, error:
# extract the exception type and value (used as output message)
etype, evalue, tb = sys.exc_info()
# XXX Show exception in Wing IDE if running in debugger
if __debug__ and 'WINGDB_ACTIVE' in os.environ:
sys.excepthook(etype, evalue, tb)
output = "%s %s" % (etype, evalue)
raise RestrictedError(layer, code, output, environment)
def snapshot(info=None, context=5, code=None, environment=None):
"""Return a dict describing a given traceback (based on cgitb.text)."""
import os, types, time, linecache, inspect, pydoc, cgitb
# if no exception info given, get current:
etype, evalue, etb = info or sys.exc_info()
if type(etype) is types.ClassType:
etype = etype.__name__
# create a snapshot dict with some basic information
s = {}<|fim▁hole|> # start to process frames
records = inspect.getinnerframes(etb, context)
s['frames'] = []
for frame, file, lnum, func, lines, index in records:
file = file and os.path.abspath(file) or '?'
args, varargs, varkw, locals = inspect.getargvalues(frame)
call = ''
if func != '?':
call = inspect.formatargvalues(args, varargs, varkw, locals,
formatvalue=lambda value: '=' + pydoc.text.repr(value))
# basic frame information
f = {'file': file, 'func': func, 'call': call, 'lines': {}, 'lnum': lnum}
highlight = {}
def reader(lnum=[lnum]):
highlight[lnum[0]] = 1
try: return linecache.getline(file, lnum[0])
finally: lnum[0] += 1
vars = cgitb.scanvars(reader, frame, locals)
# if it is a view, replace with generated code
if file.endswith('html'):
lmin = lnum>context and (lnum-context) or 0
lmax = lnum+context
lines = code.split("\n")[lmin:lmax]
index = min(context, lnum) - 1
if index is not None:
i = lnum - index
for line in lines:
f['lines'][i] = line.rstrip()
i += 1
# dump local variables (referenced in current line only)
f['dump'] = {}
for name, where, value in vars:
if name in f['dump']: continue
if value is not cgitb.__UNDEF__:
if where == 'global': name = 'global ' + name
elif where != 'local': name = where + name.split('.')[-1]
f['dump'][name] = pydoc.text.repr(value)
else:
f['dump'][name] = 'undefined'
s['frames'].append(f)
# add exception type, value and attributes
s['etype'] = str(etype)
s['evalue'] = str(evalue)
s['exception'] = {}
if isinstance(evalue, BaseException):
for name in dir(evalue):
# prevent py26 DeprecatedWarning:
if name!='message' or sys.version_info<(2.6):
value = pydoc.text.repr(getattr(evalue, name))
s['exception'][name] = value
# add all local values (of last frame) to the snapshot
s['locals'] = {}
for name, value in locals.items():
s['locals'][name] = pydoc.text.repr(value)
# add web2py environment variables
for k,v in environment.items():
if k in ('request', 'response', 'session'):
s[k] = BEAUTIFY(v)
return s<|fim▁end|> | s['pyver'] = 'Python ' + sys.version.split()[0] + ': ' + sys.executable
s['date'] = time.ctime(time.time())
|
<|file_name|>scu_interrupt.rs<|end_file_name|><|fim▁begin|>#[doc = r"Register block"]
#[repr(C)]
pub struct RegisterBlock {
#[doc = "0x00 - SCU Service Request Status"]
pub srstat: crate::Reg<srstat::SRSTAT_SPEC>,
#[doc = "0x04 - SCU Raw Service Request Status"]
pub srraw: crate::Reg<srraw::SRRAW_SPEC>,
#[doc = "0x08 - SCU Service Request Mask"]
pub srmsk: crate::Reg<srmsk::SRMSK_SPEC>,
#[doc = "0x0c - SCU Service Request Clear"]
pub srclr: crate::Reg<srclr::SRCLR_SPEC>,
#[doc = "0x10 - SCU Service Request Set"]
pub srset: crate::Reg<srset::SRSET_SPEC>,
#[doc = "0x14 - SCU Service Request Mask"]
pub nmireqen: crate::Reg<nmireqen::NMIREQEN_SPEC>,
}
#[doc = "SRSTAT register accessor: an alias for `Reg<SRSTAT_SPEC>`"]
pub type SRSTAT = crate::Reg<srstat::SRSTAT_SPEC>;
#[doc = "SCU Service Request Status"]<|fim▁hole|>pub mod srstat;
#[doc = "SRRAW register accessor: an alias for `Reg<SRRAW_SPEC>`"]
pub type SRRAW = crate::Reg<srraw::SRRAW_SPEC>;
#[doc = "SCU Raw Service Request Status"]
pub mod srraw;
#[doc = "SRMSK register accessor: an alias for `Reg<SRMSK_SPEC>`"]
pub type SRMSK = crate::Reg<srmsk::SRMSK_SPEC>;
#[doc = "SCU Service Request Mask"]
pub mod srmsk;
#[doc = "SRCLR register accessor: an alias for `Reg<SRCLR_SPEC>`"]
pub type SRCLR = crate::Reg<srclr::SRCLR_SPEC>;
#[doc = "SCU Service Request Clear"]
pub mod srclr;
#[doc = "SRSET register accessor: an alias for `Reg<SRSET_SPEC>`"]
pub type SRSET = crate::Reg<srset::SRSET_SPEC>;
#[doc = "SCU Service Request Set"]
pub mod srset;
#[doc = "NMIREQEN register accessor: an alias for `Reg<NMIREQEN_SPEC>`"]
pub type NMIREQEN = crate::Reg<nmireqen::NMIREQEN_SPEC>;
#[doc = "SCU Service Request Mask"]
pub mod nmireqen;<|fim▁end|> | |
<|file_name|>path.py<|end_file_name|><|fim▁begin|>import unicodedata
import re
class PathExtension:
"""
Enables readable url path names instead of ids for object traversal.
Names are stored as meta.pool_filename and generated from
title by default. Automatic generation can be disabled by setting
*meta.customfilename* to False for each object.
Extensions like *.html* are not stored. Path matching works independent
from extensions.
"""
maxlength = 55 # max path length
containerNamespace = True # unique filenames for container or global
extension = None
def Init(self):
if self.id == 0:
# skip roots
return
self.ListenEvent("commit", "TitleToFilename")
self._SetName()<|fim▁hole|>
def TitleToFilename(self, **kw):
"""
Uses title for filename
"""
customfilename = self.data.get("customfilename", None) # might not exist
if customfilename:
self._SetName()
return
# create url compatible filename from title
filename = self.EscapeFilename(self.meta.title)
# make unique filename
filename = self.UniqueFilename(filename)
if self.AddExtension(filename) == self.meta.pool_filename:
# no change
return
if filename:
# update
self.meta["pool_filename"] = self.AddExtension(filename)
else:
# reset filename
self.meta["pool_filename"] = ""
self._SetName()
self.Signal("pathupdate", path=self.meta["pool_filename"])
def UniqueFilename(self, name):
"""
Converts name to valid path/url
"""
if name == "file":
name = "file_"
if self.containerNamespace:
unitref = self.parent.id
else:
unitref = None
cnt = 1
root = self.root
while root.search.FilenameToID(self.AddExtension(name), unitref, parameter=dict(id=self.id), operators=dict(id="!=")) != 0:
if cnt>1:
name = name.rstrip("1234567890-")
name = name+"-"+str(cnt)
cnt += 1
return name
def EscapeFilename(self, path):
"""
Converts name to valid path/url
Path length between *self.maxlength-20* and *self.maxlength* chars. Tries to cut longer names at spaces.
(based on django's slugify)
"""
path = unicodedata.normalize("NFKD", path).encode("ascii", "ignore")
path = path.decode("utf-8")
path = re.sub('[^\w\s-]', '', path).strip().lower()
path = re.sub('[-\s]+', '_', path)
# avoid ids as filenames
try:
int(path)
path += "_n"
except:
pass
# cut long filenames
cutlen = 20
if len(path) <= self.maxlength:
return path
# cut at '_'
pos = path[self.maxlength-cutlen:].find("_")
if pos > cutlen:
# no '_' found. cut at maxlength.
return path[:self.maxlength]
return path[:self.maxlength-cutlen+pos]
def AddExtension(self, filename):
if not self.extension:
return filename
return "%s.%s" % (filename, self.extension)
# system functions -----------------------------------------------------------------
def __getitem__(self, id):
"""
Traversal lookup based on object.pool_filename and object.id. Trailing extensions
are ignored if self.extension is None.
`file` is a reserved name and used in the current object to map file downloads.
"""
if id == "file":
raise KeyError(id)
if self.extension is None:
id = id.split(".")
if len(id)>2:
id = (".").join(id[:-1])
else:
id = id[0]
try:
id = int(id)
except ValueError:
name = id
id = 0
if name:
id = self.root.search.FilenameToID(name, self.id)
if not id:
raise KeyError(id)
obj = self.GetObj(id)
if obj is None:
raise KeyError(id)
return obj
def _SetName(self):
self.__name__ = self.meta["pool_filename"]
if not self.__name__:
self.__name__ = str(self.id)
class RootPathExtension(object):
"""
Extension for nive root objects to handle alternative url names
"""
extension = None
# system functions -----------------------------------------------------------------
def __getitem__(self, id):
"""
Traversal lookup based on object.pool_filename and object.id. Trailing extensions
are ignored.
`file` is a reserved name and used in the current object to map file downloads.
"""
if id == "file":
raise KeyError(id)
if self.extension is None:
id = id.split(".")
if len(id)>2:
id = (".").join(id[:-1])
else:
id = id[0]
try:
id = int(id)
except:
name = id
id = 0
if name:
id = self.search.FilenameToID(name, self.id)
if not id:
raise KeyError(id)
obj = self.GetObj(id)
if not obj:
raise KeyError(id)
return obj
class PersistentRootPath(object):
"""
Extension for nive root objects to handle alternative url names
"""
def Init(self):
self.ListenEvent("commit", "UpdateRouting")
self.ListenEvent("dataloaded", "UpdateRouting")
self.UpdateRouting()
def UpdateRouting(self, **kw):
# check url name of root
if self.meta.get("pool_filename"):
name = self.meta.get("pool_filename")
if name != self.__name__:
# close cached root
self.app._CloseRootObj(name=self.__name__)
# update __name__ and hash
self.__name__ = str(name)
self.path = name
# unique root id generated from name . negative integer.
self.idhash = abs(hash(self.__name__))*-1
from nive.tool import Tool, ToolView
from nive.definitions import ToolConf, FieldConf, ViewConf, IApplication
tool_configuration = ToolConf(
id = "rewriteFilename",
context = "nive.extensions.path.RewriteFilenamesTool",
name = "Rewrite pool_filename based on title",
description = "Rewrites all or empty filenames based on form selection.",
apply = (IApplication,),
mimetype = "text/html",
data = [
FieldConf(id="types", datatype="checkbox", default="", settings=dict(codelist="types"), name="Object types", description=""),
FieldConf(id="testrun", datatype="bool", default=1, name="Testrun, no commits", description=""),
FieldConf(id="resetall", datatype="string", default="", size=15, name="Reset all filenames", description="<b>Urls will change! Enter 'reset all'</b>"),
FieldConf(id="tag", datatype="string", default="rewriteFilename", hidden=1)
],
views = [
ViewConf(name="", view=ToolView, attr="form", permission="admin", context="nive.extensions.path.RewriteFilenamesTool")
]
)
class RewriteFilenamesTool(Tool):
def _Run(self, **values):
parameter = dict()
if values.get("resetall")!="reset all":
parameter["pool_filename"] = ""
if values.get("types"):
tt = values.get("types")
if not isinstance(tt, list):
tt = [tt]
parameter["pool_type"] = tt
operators = dict(pool_type="IN", pool_filename="=")
fields = ("id", "title", "pool_type", "pool_filename")
root = self.app.root
recs = root.search.Search(parameter, fields, max=10000, operators=operators, sort="id", ascending=0)
if len(recs["items"]) == 0:
return "<h2>None found!</h2>", False
user = values["original"]["user"]
testrun = values["testrun"]
result = []
cnt = 0
for rec in recs["items"]:
obj = root.LookupObj(rec["id"])
if obj is None or not hasattr(obj, "TitleToFilename"):
continue
filename = obj.meta["pool_filename"]
obj.TitleToFilename()
if filename!=obj.meta["pool_filename"]:
result.append(filename+" <> "+obj.meta["pool_filename"])
if testrun==False:
obj.dbEntry.Commit(user=user)
#obj.CommitInternal(user=user)
cnt += 1
return "OK. %d filenames updated, %d different!<br>%s" % (cnt, len(result), "<br>".join(result)), True<|fim▁end|> | |
<|file_name|>test_provider.py<|end_file_name|><|fim▁begin|>import pytest
import math
import io
import time
import base64
import hashlib
from http import client
from unittest import mock
import aiohttpretty
from waterbutler.core import streams
from waterbutler.core import metadata
from waterbutler.core import exceptions
from waterbutler.core.path import WaterButlerPath
from waterbutler.providers.azureblobstorage import AzureBlobStorageProvider
from waterbutler.providers.azureblobstorage.metadata import AzureBlobStorageFileMetadata
from waterbutler.providers.azureblobstorage.metadata import AzureBlobStorageFolderMetadata
from waterbutler.providers.azureblobstorage.provider import (
MAX_UPLOAD_BLOCK_SIZE,
)
@pytest.fixture
def auth():
return {
'name': 'cat',
'email': '[email protected]',
}
@pytest.fixture
def credentials():
return {
'account_name': 'dontdead',
'account_key': base64.b64encode(b'open inside'),
}
@pytest.fixture
def settings():
return {
'container': 'thatkerning'
}
@pytest.fixture
def mock_time(monkeypatch):
mock_time = mock.Mock(return_value=1454684930.0)
monkeypatch.setattr(time, 'time', mock_time)
@pytest.fixture
def provider(auth, credentials, settings):
provider = AzureBlobStorageProvider(auth, credentials, settings)
return provider
@pytest.fixture
def file_content():
return b'sleepy'
@pytest.fixture
def file_like(file_content):
return io.BytesIO(file_content)
@pytest.fixture
def file_stream(file_like):
return streams.FileStreamReader(file_like)
@pytest.fixture
def large_file_content():
# 71MB (4MB * 17 + 3MB)
return b'a' * (71 * (2 ** 20))
@pytest.fixture
def large_file_like(large_file_content):
return io.BytesIO(large_file_content)
@pytest.fixture
def large_file_stream(large_file_like):
return streams.FileStreamReader(large_file_like)
@pytest.fixture
def folder_metadata():
return b'''<?xml version="1.0" encoding="utf-8"?>
<EnumerationResults ServiceEndpoint="https://vrosf.blob.core.windows.net/" ContainerName="sample-container1">
<Blobs>
<Blob>
<Name>Photos/test-text.txt</Name>
<Properties>
<Last-Modified>Thu, 10 Nov 2016 11:04:45 GMT</Last-Modified>
<Etag>0x8D40959613D32F6</Etag>
<Content-Length>0</Content-Length>
<Content-Type>text/plain</Content-Type>
<Content-Encoding />
<Content-Language />
<Content-MD5 />
<Cache-Control />
<Content-Disposition />
<BlobType>BlockBlob</BlobType>
<LeaseStatus>unlocked</LeaseStatus>
<LeaseState>available</LeaseState>
</Properties>
</Blob>
<Blob>
<Name>Photos/a/test.txt</Name>
<Properties>
<Last-Modified>Thu, 10 Nov 2016 11:04:45 GMT</Last-Modified>
<Etag>0x8D40959613D32F6</Etag>
<Content-Length>0</Content-Length>
<Content-Type>text/plain</Content-Type>
<Content-Encoding />
<Content-Language />
<Content-MD5 />
<Cache-Control />
<Content-Disposition />
<BlobType>BlockBlob</BlobType>
<LeaseStatus>unlocked</LeaseStatus>
<LeaseState>available</LeaseState>
</Properties>
</Blob>
<Blob>
<Name>top.txt</Name>
<Properties>
<Last-Modified>Thu, 10 Nov 2016 11:04:45 GMT</Last-Modified>
<Etag>0x8D40959613D32F6</Etag>
<Content-Length>0</Content-Length>
<Content-Type>text/plain</Content-Type>
<Content-Encoding />
<Content-Language />
<Content-MD5 />
<Cache-Control />
<Content-Disposition /><|fim▁hole|> </Properties>
</Blob>
</Blobs>
<NextMarker />
</EnumerationResults>'''
@pytest.fixture
def file_metadata():
return {
'CONTENT-LENGTH': '0',
'CONTENT-TYPE': 'text/plain',
'LAST-MODIFIED': 'Thu, 10 Nov 2016 11:04:45 GMT',
'ACCEPT-RANGES': 'bytes',
'ETAG': '"0x8D40959613D32F6"',
'SERVER': 'Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0',
'X-MS-REQUEST-ID': '5b4a3cb6-0001-00ea-4575-895e2c000000',
'X-MS-VERSION': '2015-07-08',
'X-MS-LEASE-STATUS': 'unlocked',
'X-MS-LEASE-STATE': 'available',
'X-MS-BLOB-TYPE': 'BlockBlob',
'DATE': 'Fri, 17 Feb 2017 23:28:33 GMT'
}
@pytest.fixture
def large_file_metadata(large_file_content):
return {
'CONTENT-LENGTH': str(len(large_file_content)),
'CONTENT-TYPE': 'text/plain',
'LAST-MODIFIED': 'Thu, 10 Nov 2016 11:04:45 GMT',
'ACCEPT-RANGES': 'bytes',
'ETAG': '"0x8D40959613D32F6"',
'SERVER': 'Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0',
'X-MS-REQUEST-ID': '5b4a3cb6-0001-00ea-4575-895e2c000000',
'X-MS-VERSION': '2015-07-08',
'X-MS-LEASE-STATUS': 'unlocked',
'X-MS-LEASE-STATE': 'available',
'X-MS-BLOB-TYPE': 'BlockBlob',
'DATE': 'Fri, 17 Feb 2017 23:28:33 GMT'
}
class TestValidatePath:
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_validate_v1_path_file(self, provider, file_metadata,
mock_time):
file_path = 'foobah'
for good_metadata_url in provider.generate_urls(file_path, secondary=True):
aiohttpretty.register_uri('HEAD', good_metadata_url, headers=file_metadata)
for bad_metadata_url in provider.generate_urls(secondary=True):
aiohttpretty.register_uri('GET', bad_metadata_url,
params={'restype': 'container', 'comp': 'list'}, status=404)
try:
wb_path_v1 = await provider.validate_v1_path('/' + file_path)
except Exception as exc:
pytest.fail(str(exc))
with pytest.raises(exceptions.NotFoundError) as exc:
await provider.validate_v1_path('/' + file_path + '/')
assert exc.value.code == client.NOT_FOUND
wb_path_v0 = await provider.validate_path('/' + file_path)
assert wb_path_v1 == wb_path_v0
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_validate_v1_path_folder(self, provider, folder_metadata, mock_time):
folder_path = 'Photos'
for good_metadata_url in provider.generate_urls(secondary=True):
aiohttpretty.register_uri(
'GET', good_metadata_url, params={'restype': 'container', 'comp': 'list'},
body=folder_metadata, headers={'Content-Type': 'application/xml'}
)
for bad_metadata_url in provider.generate_urls(folder_path, secondary=True):
aiohttpretty.register_uri('HEAD', bad_metadata_url, status=404)
try:
wb_path_v1 = await provider.validate_v1_path('/' + folder_path + '/')
except Exception as exc:
pytest.fail(str(exc))
with pytest.raises(exceptions.NotFoundError) as exc:
await provider.validate_v1_path('/' + folder_path)
assert exc.value.code == client.NOT_FOUND
wb_path_v0 = await provider.validate_path('/' + folder_path + '/')
assert wb_path_v1 == wb_path_v0
@pytest.mark.asyncio
async def test_normal_name(self, provider, mock_time):
path = await provider.validate_path('/this/is/a/path.txt')
assert path.name == 'path.txt'
assert path.parent.name == 'a'
assert path.is_file
assert not path.is_dir
assert not path.is_root
@pytest.mark.asyncio
async def test_folder(self, provider, mock_time):
path = await provider.validate_path('/this/is/a/folder/')
assert path.name == 'folder'
assert path.parent.name == 'a'
assert not path.is_file
assert path.is_dir
assert not path.is_root
@pytest.mark.asyncio
async def test_root(self, provider, mock_time):
path = await provider.validate_path('/this/is/a/folder/')
assert path.name == 'folder'
assert path.parent.name == 'a'
assert not path.is_file
assert path.is_dir
assert not path.is_root
class TestCRUD:
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_download(self, provider, mock_time):
path = WaterButlerPath('/muhtriangle')
for url in provider.generate_urls(path.path, secondary=True):
aiohttpretty.register_uri('GET', url, body=b'delicious', auto_length=True)
result = await provider.download(path)
content = await result.read()
assert content == b'delicious'
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_download_folder_400s(self, provider, mock_time):
with pytest.raises(exceptions.DownloadError) as e:
await provider.download(WaterButlerPath('/cool/folder/mom/'))
assert e.value.code == 400
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_delete(self, provider, mock_time):
path = WaterButlerPath('/some-file')
for url in provider.generate_urls(path.path):
aiohttpretty.register_uri('DELETE', url, status=200)
await provider.delete(path)
assert aiohttpretty.has_call(method='DELETE', uri=url)
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_folder_delete(self, provider, folder_metadata, mock_time):
path = WaterButlerPath('/Photos/')
for url in provider.generate_urls(secondary=True):
aiohttpretty.register_uri(
'GET', url, params={'restype': 'container', 'comp': 'list'},
body=folder_metadata, headers={'Content-Type': 'application/xml'}
)
delete_urls = []
for url in provider.generate_urls(path.path + "test-text.txt"):
aiohttpretty.register_uri('DELETE', url, status=200)
delete_urls.append(url)
for url in provider.generate_urls(path.path + "a/test.txt"):
aiohttpretty.register_uri('DELETE', url, status=200)
delete_urls.append(url)
await provider.delete(path)
assert aiohttpretty.has_call(method='DELETE', uri=delete_urls[0])
assert aiohttpretty.has_call(method='DELETE', uri=delete_urls[1])
class TestMetadata:
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_metadata_root(self, provider, folder_metadata, mock_time):
path = WaterButlerPath('/')
assert path.is_root
for url in provider.generate_urls(secondary=True):
aiohttpretty.register_uri('GET', url,
params={'restype': 'container', 'comp': 'list'},
body=folder_metadata,
headers={'Content-Type': 'application/xml'})
result = await provider.metadata(path)
assert isinstance(result, list)
assert len(result) == 2
assert result[0].path == '/Photos/'
assert result[0].name == 'Photos'
assert result[0].is_folder
assert result[1].path == '/top.txt'
assert result[1].name == 'top.txt'
assert not result[1].is_folder
assert result[1].extra['md5'] == None
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_metadata_folder(self, provider, folder_metadata, mock_time):
path = WaterButlerPath('/Photos/')
for url in provider.generate_urls(secondary=True):
aiohttpretty.register_uri('GET', url,
params={'restype': 'container', 'comp': 'list'},
body=folder_metadata,
headers={'Content-Type': 'application/xml'})
result = await provider.metadata(path)
assert isinstance(result, list)
assert len(result) == 2
assert result[0].path == '/Photos/a/'
assert result[0].name == 'a'
assert result[0].is_folder
assert result[1].path == '/Photos/test-text.txt'
assert result[1].name == 'test-text.txt'
assert not result[1].is_folder
assert result[1].extra['md5'] == None
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_metadata_file(self, provider, file_metadata, mock_time):
path = WaterButlerPath('/Foo/Bar/my-image.jpg')
provider.url = 'http://test_url'
provider.token = 'test'
for url in provider.generate_urls(path.path, secondary=True):
aiohttpretty.register_uri('HEAD', url, headers=file_metadata)
result = await provider.metadata(path)
assert isinstance(result, metadata.BaseFileMetadata)
assert result.path == str(path)
assert result.name == 'my-image.jpg'
assert result.modified is not None
assert result.extra['md5'] == None
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_metadata_file_missing(self, provider, mock_time):
path = WaterButlerPath('/notfound.txt')
provider.url = 'http://test_url'
provider.token = 'test'
for url in provider.generate_urls(path.path, secondary=True):
aiohttpretty.register_uri('HEAD', url, status=404)
with pytest.raises(exceptions.MetadataError):
await provider.metadata(path)
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_upload(self, provider, file_content, file_stream, file_metadata, mock_time):
path = WaterButlerPath('/foobah')
for url in provider.generate_urls(path.path):
aiohttpretty.register_uri('PUT', url, status=200)
for metadata_url in provider.generate_urls(path.path):
aiohttpretty.register_uri(
'HEAD',
metadata_url,
responses=[
{'status': 404},
{'headers': file_metadata},
],
)
metadata, created = await provider.upload(file_stream, path)
assert metadata.kind == 'file'
assert created
assert aiohttpretty.has_call(method='PUT', uri=url)
assert aiohttpretty.has_call(method='HEAD', uri=metadata_url)
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_upload_large(self, provider, large_file_content, large_file_stream, large_file_metadata, mock_time):
# upload 4MB data 17 times and 3MB once, and request block_list
upload_times = math.floor(len(large_file_content) / MAX_UPLOAD_BLOCK_SIZE)
block_id_prefix = 'hogefuga'
block_id_list = [AzureBlobStorageProvider._format_block_id(block_id_prefix, i) for i in range(upload_times)]
block_req_params_list = [{'comp': 'block', 'blockid': block_id} for block_id in block_id_list]
block_list_req_params = {'comp': 'blocklist'}
path = WaterButlerPath('/large_foobah')
for url in provider.generate_urls(path.path):
for block_req_params in block_req_params_list:
aiohttpretty.register_uri('PUT', url, status=200, params=block_req_params)
aiohttpretty.register_uri('PUT', url, status=200, params=block_list_req_params)
for metadata_url in provider.generate_urls(path.path):
aiohttpretty.register_uri(
'HEAD',
metadata_url,
responses=[
{'status': 404},
{'headers': large_file_metadata},
],
)
metadata, created = await provider.upload(large_file_stream, path, block_id_prefix=block_id_prefix)
assert metadata.kind == 'file'
assert created
for block_req_params in block_req_params_list:
assert aiohttpretty.has_call(method='PUT', uri=url, params=block_req_params)
assert aiohttpretty.has_call(method='PUT', uri=url, params=block_list_req_params)
assert aiohttpretty.has_call(method='HEAD', uri=metadata_url)
class TestCreateFolder:
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_must_start_with_slash(self, provider, mock_time):
path = WaterButlerPath('/alreadyexists')
with pytest.raises(exceptions.CreateFolderError) as e:
await provider.create_folder(path)
assert e.value.code == 400
assert e.value.message == 'Path must be a directory'
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_errors_conflict(self, provider, folder_metadata, mock_time):
path = WaterButlerPath('/alreadyexists/')
for url in provider.generate_urls(secondary=True):
aiohttpretty.register_uri('GET', url,
params={'restype': 'container', 'comp': 'list'},
body=folder_metadata,
headers={'Content-Type': 'application/xml'})
for url in provider.generate_urls('alreadyexists', secondary=True):
aiohttpretty.register_uri('HEAD', url, status=200)
for url in provider.generate_urls('alreadyexists/.osfkeep'):
aiohttpretty.register_uri('PUT', url, status=200)
with pytest.raises(exceptions.FolderNamingConflict) as e:
await provider.create_folder(path)
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_creates(self, provider, folder_metadata, mock_time):
path = WaterButlerPath('/doesntalreadyexists/')
for url in provider.generate_urls(secondary=True):
aiohttpretty.register_uri('GET', url,
params={'restype': 'container', 'comp': 'list'},
body=folder_metadata,
headers={'Content-Type': 'application/xml'})
for url in provider.generate_urls('doesntalreadyexists', secondary=True):
aiohttpretty.register_uri('HEAD', url, status=404)
for url in provider.generate_urls('doesntalreadyexists/.osfkeep'):
aiohttpretty.register_uri('PUT', url, status=200)
resp = await provider.create_folder(path)
assert resp.kind == 'folder'
assert resp.name == 'doesntalreadyexists'
assert resp.path == '/doesntalreadyexists/'
class TestOperations:
async def test_equality(self, provider, mock_time):
assert provider.can_intra_copy(provider)
assert provider.can_intra_move(provider)<|fim▁end|> | <BlobType>BlockBlob</BlobType>
<LeaseStatus>unlocked</LeaseStatus>
<LeaseState>available</LeaseState> |
<|file_name|>urls.py<|end_file_name|><|fim▁begin|>'''
Copyright 2015
This file is part of Orbach.
Orbach is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or<|fim▁hole|>
Orbach is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Orbach. If not, see <http://www.gnu.org/licenses/>.
'''
from django.conf.urls import url, include
from rest_framework.routers import DefaultRouter
from orbach.core import views
router = DefaultRouter()
router.register(r'galleries', views.GalleryViewSet)
router.register(r'image_files', views.ImageFileViewSet)
router.register(r'users', views.UserViewSet)
urlpatterns = [
url(r'^', include(router.urls)),
]<|fim▁end|> | (at your option) any later version. |
<|file_name|>stream_server.js<|end_file_name|><|fim▁begin|>// Generated by CoffeeScript 1.12.6
(function() {
var Bits, CustomReceiver, DEBUG_INCOMING_PACKET_DATA, DEBUG_INCOMING_PACKET_HASH, DEFAULT_SERVER_NAME, Sequent, StreamServer, aac, avstreams, config, crypto, fs, h264, http, logger, mp4, net, packageJson, ref, rtmp, rtsp, serverName;
var uuid = require('node-uuid');
net = require('net');
fs = require('fs');
crypto = require('crypto');
config = require('./config');
rtmp = require('./rtmp');
http = require('./http');
rtsp = require('./rtsp');
h264 = require('./h264');
aac = require('./aac');
mp4 = require('./mp4');
Bits = require('./bits');
avstreams = require('./avstreams');
CustomReceiver = require('./custom_receiver');
logger = require('./logger');
packageJson = require('./package.json');
Sequent = require('sequent');
// var datastore = require('@google-cloud/datastore')({
// projectId: 'hivecast-syndicate',
// keyFilename: './hivecast-syndicate.json'
// });
DEBUG_INCOMING_PACKET_DATA = false;
DEBUG_INCOMING_PACKET_HASH = false;
DEFAULT_SERVER_NAME = "node-rtsp-rtmp-server/" + packageJson.version;
serverName = (ref = config.serverName) != null ? ref : DEFAULT_SERVER_NAME;
StreamServer = (function() {
function StreamServer(opts) {
var httpHandler, ref1, rtmptCallback;
this.serverName = (ref1 = opts != null ? opts.serverName : void 0) != null ? ref1 : serverName;
if (config.enableRTMP || config.enableRTMPT) {
this.rtmpServer = new rtmp.RTMPServer;
this.rtmpServer.on('video_start', (function(_this) {
return function(streamId) {
var stream;
stream = avstreams.getOrCreate(streamId);
_this.dumpToFile(streamId);
return _this.onReceiveVideoControlBuffer(stream);
};
})(this));
this.rtmpServer.on('video_data', (function(_this) {
return function(streamId, pts, dts, nalUnits) {
var stream;
stream = avstreams.get(streamId);
if (stream != null) {
return _this.onReceiveVideoPacket(stream, nalUnits, pts, dts);
} else {
return logger.warn("warn: Received invalid streamId from rtmp: " + streamId);
}
};
})(this));
this.rtmpServer.on('audio_start', (function(_this) {
return function(streamId) {
var stream;
stream = avstreams.getOrCreate(streamId);
return _this.onReceiveAudioControlBuffer(stream);
};
})(this));
this.rtmpServer.on('audio_data', (function(_this) {
return function(streamId, pts, dts, adtsFrame) {
var stream;
stream = avstreams.get(streamId);
if (stream != null) {
return _this.onReceiveAudioPacket(stream, adtsFrame, pts, dts);
} else {
return logger.warn("warn: Received invalid streamId from rtmp: " + streamId);
}
};
})(this));
}
if (config.enableCustomReceiver) {
this.customReceiver = new CustomReceiver(config.receiverType, {
videoControl: (function(_this) {
return function() {
return _this.onReceiveVideoControlBuffer.apply(_this, arguments);
};
})(this),
audioControl: (function(_this) {
return function() {
return _this.onReceiveAudioControlBuffer.apply(_this, arguments);
};
})(this),
videoData: (function(_this) {
return function() {
return _this.onReceiveVideoDataBuffer.apply(_this, arguments);
};
})(this),
audioData: (function(_this) {
return function() {
return _this.onReceiveAudioDataBuffer.apply(_this, arguments);
};
})(this)
});
this.customReceiver.deleteReceiverSocketsSync();
}
if (config.enableHTTP) {
this.httpHandler = new http.HTTPHandler({
serverName: this.serverName,
documentRoot: opts != null ? opts.documentRoot : void 0
});
}
if (config.enableRTSP || config.enableHTTP || config.enableRTMPT) {
if (config.enableRTMPT) {
rtmptCallback = (function(_this) {
return function() {
var ref2;
return (ref2 = _this.rtmpServer).handleRTMPTRequest.apply(ref2, arguments);
};
})(this);
} else {
rtmptCallback = null;
}
if (config.enableHTTP) {
httpHandler = this.httpHandler;
} else {
httpHandler = null;
}
this.rtspServer = new rtsp.RTSPServer({
serverName: this.serverName,
httpHandler: httpHandler,
rtmptCallback: rtmptCallback
});
this.rtspServer.on('video_start', (function(_this) {
return function(stream) {
return _this.onReceiveVideoControlBuffer(stream);
};
})(this));
this.rtspServer.on('audio_start', (function(_this) {
return function(stream) {
return _this.onReceiveAudioControlBuffer(stream);
};
})(this));
this.rtspServer.on('video', (function(_this) {
return function(stream, nalUnits, pts, dts) {
return _this.onReceiveVideoNALUnits(stream, nalUnits, pts, dts);
};
})(this));
this.rtspServer.on('audio', (function(_this) {
return function(stream, accessUnits, pts, dts) {
return _this.onReceiveAudioAccessUnits(stream, accessUnits, pts, dts);
};
})(this));
}
avstreams.on('new', function(stream) {
if (DEBUG_INCOMING_PACKET_HASH) {
return stream.lastSentVideoTimestamp = 0;
}
});
avstreams.on('reset', function(stream) {<|fim▁hole|> return stream.lastSentVideoTimestamp = 0;
}
});
avstreams.on('end', (function(_this) {
return function(stream) {
if (config.enableRTSP) {
_this.rtspServer.sendEOS(stream);
}
if (config.enableRTMP || config.enableRTMPT) {
return _this.rtmpServer.sendEOS(stream);
}
};
})(this));
avstreams.on('audio_data', (function(_this) {
return function(stream, data, pts) {
return _this.onReceiveAudioAccessUnits(stream, [data], pts, pts);
};
})(this));
avstreams.on('video_data', (function(_this) {
return function(stream, nalUnits, pts, dts) {
if (dts == null) {
dts = pts;
}
return _this.onReceiveVideoNALUnits(stream, nalUnits, pts, dts);
};
})(this));
avstreams.on('audio_start', (function(_this) {
return function(stream) {
return _this.onReceiveAudioControlBuffer(stream);
};
})(this));
avstreams.on('video_start', (function(_this) {
return function(stream) {
return _this.onReceiveVideoControlBuffer(stream);
};
})(this));
}
StreamServer.prototype.dumpToFile = function(streamId) {
var serverAddr = config.serverAddress;
var dumpId = (streamId.split('/'))[1];
var spawn = require('child_process').spawn;
var fileName = dumpId + '_' + uuid.v1() + '.flv';
var dumpCmd = 'rtmpdump';
//ffmpeg -re -i input.mp4 -c:v copy -c:a copy -f flv rtmp://localhost/live/STREAM_NAME
//rtmpdump -v -r rtmp://localhost/live/STREAM_NAME -o dump.flv
var dumpArgs = [
'-v',
'-r', `rtmp://${serverAddr}/` + streamId,
'-o', `public/file/${fileName}`
];
var dumpProc = spawn(dumpCmd, dumpArgs);
//var ds_key = datastore.key(['Stream', ])
dumpProc.stdout.on('data', function(data) {
});
dumpProc.stderr.on('data', function(data) {
});
dumpProc.on('close', function() {
console.log(`Stream dump is finished. File could be found at file/${fileName}`);
/*setTimeout(function() {
var streamCmd = 'ffmpeg';
var streamArgs = [
'-re',
'-i', 'file/' + dumpId + '.flv',
'-c', 'copy',
'-f', 'flv',
`rtmp://${serverAddr}/live/cloned_` + dumpId
];
var streamProc = spawn(streamCmd, streamArgs);
streamProc.on('close', function() {
console.log(`FLV: file/${dumpId}.flv is streamed successfully.`);
});
}, 3000);*/
});
};
StreamServer.prototype.attachRecordedDir = function(dir) {
if (config.recordedApplicationName != null) {
logger.info("attachRecordedDir: dir=" + dir + " app=" + config.recordedApplicationName);
return avstreams.attachRecordedDirToApp(dir, config.recordedApplicationName);
}
};
StreamServer.prototype.attachMP4 = function(filename, streamName) {
var context, generator;
logger.info("attachMP4: file=" + filename + " stream=" + streamName);
context = this;
generator = new avstreams.AVStreamGenerator({
generate: function() {
var ascBuf, ascInfo, audioSpecificConfig, bits, err, mp4File, mp4Stream, streamId;
try {
mp4File = new mp4.MP4File(filename);
} catch (error) {
err = error;
logger.error("error opening MP4 file " + filename + ": " + err);
return null;
}
streamId = avstreams.createNewStreamId();
mp4Stream = new avstreams.MP4Stream(streamId);
logger.info("created stream " + streamId + " from " + filename);
avstreams.emit('new', mp4Stream);
avstreams.add(mp4Stream);
mp4Stream.type = avstreams.STREAM_TYPE_RECORDED;
audioSpecificConfig = null;
mp4File.on('audio_data', function(data, pts) {
return context.onReceiveAudioAccessUnits(mp4Stream, [data], pts, pts);
});
mp4File.on('video_data', function(nalUnits, pts, dts) {
if (dts == null) {
dts = pts;
}
return context.onReceiveVideoNALUnits(mp4Stream, nalUnits, pts, dts);
});
mp4File.on('eof', (function(_this) {
return function() {
return mp4Stream.emit('end');
};
})(this));
mp4File.parse();
mp4Stream.updateSPS(mp4File.getSPS());
mp4Stream.updatePPS(mp4File.getPPS());
ascBuf = mp4File.getAudioSpecificConfig();
bits = new Bits(ascBuf);
ascInfo = aac.readAudioSpecificConfig(bits);
mp4Stream.updateConfig({
audioSpecificConfig: ascBuf,
audioASCInfo: ascInfo,
audioSampleRate: ascInfo.samplingFrequency,
audioClockRate: 90000,
audioChannels: ascInfo.channelConfiguration,
audioObjectType: ascInfo.audioObjectType
});
mp4Stream.durationSeconds = mp4File.getDurationSeconds();
mp4Stream.lastTagTimestamp = mp4File.getLastTimestamp();
mp4Stream.mp4File = mp4File;
mp4File.fillBuffer(function() {
context.onReceiveAudioControlBuffer(mp4Stream);
return context.onReceiveVideoControlBuffer(mp4Stream);
});
return mp4Stream;
},
play: function() {
return this.mp4File.play();
},
pause: function() {
return this.mp4File.pause();
},
resume: function() {
return this.mp4File.resume();
},
seek: function(seekSeconds, callback) {
var actualStartTime;
actualStartTime = this.mp4File.seek(seekSeconds);
return callback(null, actualStartTime);
},
sendVideoPacketsSinceLastKeyFrame: function(endSeconds, callback) {
return this.mp4File.sendVideoPacketsSinceLastKeyFrame(endSeconds, callback);
},
teardown: function() {
this.mp4File.close();
return this.destroy();
},
getCurrentPlayTime: function() {
return this.mp4File.currentPlayTime;
},
isPaused: function() {
return this.mp4File.isPaused();
}
});
return avstreams.addGenerator(streamName, generator);
};
StreamServer.prototype.stop = function(callback) {
if (config.enableCustomReceiver) {
this.customReceiver.deleteReceiverSocketsSync();
}
return typeof callback === "function" ? callback() : void 0;
};
StreamServer.prototype.start = function(callback) {
var seq, waitCount;
seq = new Sequent;
waitCount = 0;
if (config.enableRTMP) {
waitCount++;
this.rtmpServer.start({
port: config.rtmpServerPort
}, function() {
return seq.done();
});
}
if (config.enableCustomReceiver) {
this.customReceiver.start();
}
if (config.enableRTSP || config.enableHTTP || config.enableRTMPT) {
waitCount++;
this.rtspServer.start({
port: config.serverPort
}, function() {
return seq.done();
});
}
return seq.wait(waitCount, function() {
return typeof callback === "function" ? callback() : void 0;
});
};
StreamServer.prototype.setLivePathConsumer = function(func) {
if (config.enableRTSP) {
return this.rtspServer.setLivePathConsumer(func);
}
};
StreamServer.prototype.setAuthenticator = function(func) {
if (config.enableRTSP) {
return this.rtspServer.setAuthenticator(func);
}
};
StreamServer.prototype.onReceiveVideoControlBuffer = function(stream, buf) {
stream.resetFrameRate(stream);
stream.isVideoStarted = true;
stream.timeAtVideoStart = Date.now();
return stream.timeAtAudioStart = stream.timeAtVideoStart;
};
StreamServer.prototype.onReceiveAudioControlBuffer = function(stream, buf) {
stream.isAudioStarted = true;
stream.timeAtAudioStart = Date.now();
return stream.timeAtVideoStart = stream.timeAtAudioStart;
};
StreamServer.prototype.onReceiveVideoDataBuffer = function(stream, buf) {
var dts, nalUnit, pts;
pts = buf[1] * 0x010000000000 + buf[2] * 0x0100000000 + buf[3] * 0x01000000 + buf[4] * 0x010000 + buf[5] * 0x0100 + buf[6];
dts = pts;
nalUnit = buf.slice(7);
return this.onReceiveVideoPacket(stream, nalUnit, pts, dts);
};
StreamServer.prototype.onReceiveAudioDataBuffer = function(stream, buf) {
var adtsFrame, dts, pts;
pts = buf[1] * 0x010000000000 + buf[2] * 0x0100000000 + buf[3] * 0x01000000 + buf[4] * 0x010000 + buf[5] * 0x0100 + buf[6];
dts = pts;
adtsFrame = buf.slice(7);
return this.onReceiveAudioPacket(stream, adtsFrame, pts, dts);
};
StreamServer.prototype.onReceiveVideoNALUnits = function(stream, nalUnits, pts, dts) {
var hasVideoFrame, j, len, md5, nalUnit, nalUnitType, tsDiff;
if (DEBUG_INCOMING_PACKET_DATA) {
logger.info("receive video: num_nal_units=" + nalUnits.length + " pts=" + pts);
}
if (config.enableRTSP) {
this.rtspServer.sendVideoData(stream, nalUnits, pts, dts);
}
if (config.enableRTMP || config.enableRTMPT) {
this.rtmpServer.sendVideoPacket(stream, nalUnits, pts, dts);
}
hasVideoFrame = false;
for (j = 0, len = nalUnits.length; j < len; j++) {
nalUnit = nalUnits[j];
nalUnitType = h264.getNALUnitType(nalUnit);
if (nalUnitType === h264.NAL_UNIT_TYPE_SPS) {
stream.updateSPS(nalUnit);
} else if (nalUnitType === h264.NAL_UNIT_TYPE_PPS) {
stream.updatePPS(nalUnit);
} else if ((nalUnitType === h264.NAL_UNIT_TYPE_IDR_PICTURE) || (nalUnitType === h264.NAL_UNIT_TYPE_NON_IDR_PICTURE)) {
hasVideoFrame = true;
}
if (DEBUG_INCOMING_PACKET_HASH) {
md5 = crypto.createHash('md5');
md5.update(nalUnit);
tsDiff = pts - stream.lastSentVideoTimestamp;
logger.info("video: pts=" + pts + " pts_diff=" + tsDiff + " md5=" + (md5.digest('hex').slice(0, 7)) + " nal_unit_type=" + nalUnitType + " bytes=" + nalUnit.length);
stream.lastSentVideoTimestamp = pts;
}
}
if (hasVideoFrame) {
stream.calcFrameRate(pts);
}
};
StreamServer.prototype.onReceiveVideoPacket = function(stream, nalUnitGlob, pts, dts) {
var nalUnits;
nalUnits = h264.splitIntoNALUnits(nalUnitGlob);
if (nalUnits.length === 0) {
return;
}
this.onReceiveVideoNALUnits(stream, nalUnits, pts, dts);
};
StreamServer.prototype.onReceiveAudioAccessUnits = function(stream, accessUnits, pts, dts) {
var accessUnit, i, j, len, md5, ptsPerFrame;
if (config.enableRTSP) {
this.rtspServer.sendAudioData(stream, accessUnits, pts, dts);
}
if (DEBUG_INCOMING_PACKET_DATA) {
logger.info("receive audio: num_access_units=" + accessUnits.length + " pts=" + pts);
}
ptsPerFrame = 90000 / (stream.audioSampleRate / 1024);
for (i = j = 0, len = accessUnits.length; j < len; i = ++j) {
accessUnit = accessUnits[i];
if (DEBUG_INCOMING_PACKET_HASH) {
md5 = crypto.createHash('md5');
md5.update(accessUnit);
logger.info("audio: pts=" + pts + " md5=" + (md5.digest('hex').slice(0, 7)) + " bytes=" + accessUnit.length);
}
if (config.enableRTMP || config.enableRTMPT) {
this.rtmpServer.sendAudioPacket(stream, accessUnit, Math.round(pts + ptsPerFrame * i), Math.round(dts + ptsPerFrame * i));
}
}
};
StreamServer.prototype.onReceiveAudioPacket = function(stream, adtsFrameGlob, pts, dts) {
var adtsFrame, adtsFrames, adtsInfo, i, isConfigUpdated, j, len, rawDataBlock, rawDataBlocks, rtpTimePerFrame;
adtsFrames = aac.splitIntoADTSFrames(adtsFrameGlob);
if (adtsFrames.length === 0) {
return;
}
adtsInfo = aac.parseADTSFrame(adtsFrames[0]);
isConfigUpdated = false;
stream.updateConfig({
audioSampleRate: adtsInfo.sampleRate,
audioClockRate: adtsInfo.sampleRate,
audioChannels: adtsInfo.channels,
audioObjectType: adtsInfo.audioObjectType
});
rtpTimePerFrame = 1024;
rawDataBlocks = [];
for (i = j = 0, len = adtsFrames.length; j < len; i = ++j) {
adtsFrame = adtsFrames[i];
rawDataBlock = adtsFrame.slice(7);
rawDataBlocks.push(rawDataBlock);
}
return this.onReceiveAudioAccessUnits(stream, rawDataBlocks, pts, dts);
};
return StreamServer;
})();
module.exports = StreamServer;
}).call(this);<|fim▁end|> | if (DEBUG_INCOMING_PACKET_HASH) { |
<|file_name|>ColorSensor.py<|end_file_name|><|fim▁begin|># coding: utf-8
from Sensor import Sensor
import nxt
class ColorSensor(Sensor):
name = 'color'<|fim▁hole|>
def Scan(self):
return self.sensor.get_sample()<|fim▁end|> | def Initialize(self):
#self.sensor = nxt.Light(self.robot.GetBrick(), self.port)
#self.sensor.set_illuminated(0)
self.sensor = nxt.Color20(self.robot.GetBrick(), self.port) |
<|file_name|>firtFile.py<|end_file_name|><|fim▁begin|><|fim▁hole|><|fim▁end|> | # The scripts begin here |
<|file_name|>collections_deque_maxlen.py<|end_file_name|><|fim▁begin|><|fim▁hole|>import collections
import random
# Set the random seed so we see the same output each time
# the script is run.
random.seed(1)
d1 = collections.deque(maxlen=3)
d2 = collections.deque(maxlen=3)
for i in range(5):
n = random.randint(0, 100)
print('n =', n)
d1.append(n)
d2.appendleft(n)
print('D1:', d1)
print('D2:', d2)<|fim▁end|> | |
<|file_name|>gmap.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
Run GMAP/GSNAP commands. GMAP/GSNAP manual:
<http://research-pub.gene.com/gmap/src/README>
"""
import os.path as op
import sys
import logging
from jcvi.formats.sam import get_prefix
from jcvi.apps.base import OptionParser, ActionDispatcher, need_update, sh, \
get_abs_path
def main():
actions = (
('index', 'wraps gmap_build'),
('align', 'wraps gsnap'),
('gmap', 'wraps gmap'),
)
p = ActionDispatcher(actions)
p.dispatch(globals())
def check_index(dbfile):
dbfile = get_abs_path(dbfile)
dbdir, filename = op.split(dbfile)
if not dbdir:
dbdir = "."
dbname = filename.rsplit(".", 1)[0]
safile = op.join(dbdir, "{0}/{0}.genomecomp".format(dbname))
if dbname == filename:
dbname = filename + ".db"
if need_update(dbfile, safile):
cmd = "gmap_build -D {0} -d {1} {2}".format(dbdir, dbname, filename)
sh(cmd)
else:
logging.error("`{0}` exists. `gmap_build` already run.".format(safile))
return dbdir, dbname
def index(args):
"""
%prog index database.fasta
`
Wrapper for `gmap_build`. Same interface.
"""
p = OptionParser(index.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
dbfile, = args
check_index(dbfile)
def gmap(args):
"""
%prog gmap database.fasta fastafile
Wrapper for `gmap`.
"""
p = OptionParser(gmap.__doc__)
p.add_option("--cross", default=False, action="store_true",
help="Cross-species alignment")
p.add_option("--npaths", default=0, type="int",
help="Maximum number of paths to show."
" If set to 0, prints two paths if chimera"
" detected, else one.")
p.set_cpus()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
dbfile, fastafile = args
assert op.exists(dbfile) and op.exists(fastafile)
prefix = get_prefix(fastafile, dbfile)
logfile = prefix + ".log"
gmapfile = prefix + ".gmap.gff3"
if not need_update((dbfile, fastafile), gmapfile):
logging.error("`{0}` exists. `gmap` already run.".format(gmapfile))
else:
dbdir, dbname = check_index(dbfile)
cmd = "gmap -D {0} -d {1}".format(dbdir, dbname)
cmd += " -f 2 --intronlength=100000" # Output format 2
cmd += " -t {0}".format(opts.cpus)
cmd += " --npaths {0}".format(opts.npaths)
if opts.cross:
cmd += " --cross-species"
cmd += " " + fastafile
sh(cmd, outfile=gmapfile, errfile=logfile)
return gmapfile, logfile
def align(args):
"""
%prog align database.fasta read1.fq read2.fq
Wrapper for `gsnap` single-end or paired-end, depending on the number of
args.
"""
from jcvi.formats.fasta import join
from jcvi.formats.fastq import guessoffset
p = OptionParser(align.__doc__)
p.add_option("--join", default=False, action="store_true",
help="Join sequences with padded 50Ns")
p.add_option("--rnaseq", default=False, action="store_true",
help="Input is RNA-seq reads, turn splicing on")
p.add_option("--snp", default=False, action="store_true",
help="Call SNPs after GSNAP")
p.set_home("eddyyeh")
p.set_cpus()
opts, args = p.parse_args(args)
if len(args) == 2:
logging.debug("Single-end alignment")
elif len(args) == 3:
logging.debug("Paired-end alignment")
else:
sys.exit(not p.print_help())
dbfile, readfile = args[0:2]
if opts.join:
dbfile = join([dbfile, "--gapsize=50", "--newid=chr1"])
assert op.exists(dbfile) and op.exists(readfile)
prefix = get_prefix(readfile, dbfile)
logfile = prefix + ".log"
gsnapfile = prefix + ".gsnap"
if not need_update((dbfile, readfile), gsnapfile):
logging.error("`{0}` exists. `gsnap` already run.".format(gsnapfile))
else:
dbdir, dbname = check_index(dbfile)
cmd = "gsnap -D {0} -d {1}".format(dbdir, dbname)
cmd += " -B 5 -m 0.1 -i 2 -n 3" # memory, mismatch, indel penalty, nhits
if opts.rnaseq:
cmd += " -N 1"
cmd += " -t {0}".format(opts.cpus)
cmd += " --gmap-mode none --nofails"
if readfile.endswith(".gz"):
cmd += " --gunzip"
try:
offset = "sanger" if guessoffset([readfile]) == 33 else "illumina"
cmd += " --quality-protocol {0}".format(offset)
except AssertionError:
pass
cmd += " " + " ".join(args[1:])
sh(cmd, outfile=gsnapfile, errfile=logfile)
if opts.snp:
EYHOME = opts.eddyyeh_home
pf = gsnapfile.rsplit(".", 1)[0]
nativefile = pf + ".unique.native"
if need_update(gsnapfile, nativefile):
cmd = op.join(EYHOME, "convert2native.pl")
cmd += " --gsnap {0} -o {1}".format(gsnapfile, nativefile)
cmd += " -proc {0}".format(opts.cpus)
sh(cmd)
<|fim▁hole|> return gsnapfile, logfile
if __name__ == '__main__':
main()<|fim▁end|> | |
<|file_name|>conf.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
#
# cloudtracker documentation build configuration file, created by
# sphinx-quickstart on Fri Aug 5 12:45:40 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
sys.path.insert(0, os.path.abspath('../cloudtracker/'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.pngmath']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'cloudtracker'
copyright = u'2011, Jordan Dawe'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#<|fim▁hole|>release = '1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'cloudtrackerdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'cloudtracker.tex', u'cloudtracker Documentation',
u'Jordan Dawe', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'cloudtracker', u'cloudtracker Documentation',
[u'Jordan Dawe'], 1)
]<|fim▁end|> | # The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags. |
<|file_name|>migration.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""Module providing base class migration for blog entry content"""
import lxml
from Acquisition import aq_inner
from Products.CMFCore.utils import getToolByName
from Products.Five.browser import BrowserView
from meetshaus.blog.blogpost import IBlogPost
from plone import api
from plone.portlets.interfaces import ILocalPortletAssignable, IPortletManager, \
IPortletAssignmentMapping
from zope.component import getMultiAdapter, getUtility
from zope.lifecycleevent import modified
from meetshaus.blog.blogentry import IBlogEntry
class BlogMigrationView(BrowserView):
""" Migrate blog content
Move blog entries to folderish blog posting content types and
transfer the associated images to the folder content
"""
def __call__(self):
self.has_blog_entries = len(self.blog_entries()) > 0
return self.render()
def render(self):
return self.index()
def blog_entries(self):
items = api.content.find(
context=api.portal.get(),
object_provides=IBlogEntry,
sort_on='effective',
sort_order='reverse'
)
return items
def blog_entries_count(self):
return len(self.blog_entries())
def used_image_assets(self, uuid):
item = api.content.get(UID=uuid)
html_body = item.text.raw
xhtml = lxml.html.document_fromstring(html_body)
images = xhtml.xpath('//img')
image_idx = len(images)
return image_idx
class BlogMigrationRunnerView(BrowserView):
""" Blog migration runner """
def __call__(self):
return self.render()
def render(self):
context = aq_inner(self.context)
base_url = context.absolute_url()
authenticator = getMultiAdapter((context, self.request),
name=u"authenticator")
next_url = '{0}/@@migration-finished?_authenticator={1}'.format(
base_url, authenticator.token())
self._migrate_blog_posts()
modified(context)
context.reindexObject(idxs='modified')
return self.request.response.redirect(next_url)
def _migrate_blog_posts(self):
context = aq_inner(self.context)
migrated = []
not_migrated = []
results = api.content.find(
context=api.portal.get(),
object_provides=IBlogEntry
)
for brain in results:
obj = brain.getObject()
html_body = obj.text.raw
xhtml = lxml.html.document_fromstring(html_body)
images = xhtml.xpath('//img')
img_list = list()
if images:
for i in images:
img_src = i.attrib['src']
if img_src.startswith('resolve'):
uuid = img_src.split('/')[1]
img_list.append(uuid)
new_item = api.content.create(
type='meetshaus.blog.blogpost',
title=obj.Title(),
description=obj.Description(),
container=context
)
setattr(new_item, 'Subject', obj.Subject())
setattr(new_item, 'text', obj.text)
api.content.transition(obj=new_item, transition='publish')
effective = obj.EffectiveDate()
new_item.setEffectiveDate(effective)
modified(new_item)
new_item.reindexObject(idxs='modified')
# for img_uid in img_list:
# img_obj = api.content.get(UID=img_uid)
# api.content.move(source=img_obj, target=new_item)
migrated.append(obj.UID())
info_message_template = 'There are {0} objects migrated.'
warn_message_template = 'There are {0} objects not migrated.'
if migrated:
msg = info_message_template.format(len(migrated))
if not_migrated:
msg = warn_message_template.format(len(not_migrated))
api.portal.show_message(
message=msg,
request=self.request
)
return len(migrated)
class BlogMigrationFinishedView(BrowserView):
""" Migration done """
def __call__(self):<|fim▁hole|> return self.render()
def render(self):
return self.index()
class GatherAssetsView(BrowserView):
""" Gather image assets and move to current context"""
def __call__(self):
return self.render()
def render(self):
context = aq_inner(self.context)
base_url = context.absolute_url()
authenticator = getMultiAdapter((context, self.request),
name=u"authenticator")
next_url = '{0}?_authenticator={1}'.format(
base_url, authenticator.token())
self._gather_assets()
modified(context)
context.reindexObject(idxs='modified')
return self.request.response.redirect(next_url)
def _collect_assets(self):
context = aq_inner(self.context)
html_body = context.text.raw
xhtml = lxml.html.document_fromstring(html_body)
images = xhtml.xpath('//img')
img_list = list()
if images:
for i in images:
img_src = i.attrib['src']
if img_src.startswith('resolve'):
uuid = img_src.split('/')[1]
img_list.append(uuid)
return img_list
def _gather_assets(self):
context = aq_inner(self.context)
migrated = 0
contained_images = self._collect_assets()
for uuid in contained_images:
image = api.content.get(UID=uuid)
try:
api.content.move(source=image, target=context)
migrated += 1
except:
# catch potential errors beforehand and debug
import pdb; pdb.set_trace()
pass
modified(context)
context.reindexObject(idxs='modified')
return migrated
class CollectAssets(BrowserView):
""" Collect all assigned images and assets and move to current context"""
def __call__(self):
return self.render()
def render(self):
context = aq_inner(self.context)
base_url = context.absolute_url()
authenticator = getMultiAdapter((context, self.request),
name=u"authenticator")
next_url = '{0}?_authenticator={1}'.format(
base_url, authenticator.token())
self._collect_assets()
return self.request.response.redirect(next_url)
@staticmethod
def _collect_assets():
results = api.content.find(
context=api.portal.get(),
object_provides=IBlogPost
)
for brain in results:
context = brain.getObject()
context.restrictedTraverse('@@gather-assets')()
return
class RemovePortletAssignments(BrowserView):
""" Gather image assets and move to current context"""
def __call__(self):
return self.render()
def render(self):
context = aq_inner(self.context)
base_url = context.absolute_url()
authenticator = getMultiAdapter((context, self.request),
name=u"authenticator")
next_url = '{0}?_authenticator={1}'.format(
base_url, authenticator.token())
self._cleanup_assignments()
return self.request.response.redirect(next_url)
@staticmethod
def _cleanup_assignments():
catalog = api.portal.get_tool('portal_catalog')
all_brains = catalog.searchResults()
for i in all_brains:
obj = i.getObject()
if not ILocalPortletAssignable.providedBy(obj):
continue
for manager_name in ('plone.leftcolumn','plone.rightcolumn'):
manager = getUtility(IPortletManager, name=manager_name)
assignment_mapping = getMultiAdapter((obj, manager),
IPortletAssignmentMapping)
for item in list(assignment_mapping.keys()):
del assignment_mapping[item]<|fim▁end|> | |
<|file_name|>Paths.java<|end_file_name|><|fim▁begin|>package be.idoneus.hipchat.buildbot.hipchat.server;
<|fim▁hole|> public static String PATH_CAPABILITIES = "";
public static String PATH_INSTALL = "install";
public static String PATH_WEBHOOK_ROOM_MESSAGE = "webhooks/room_message";
public static String PATH_GLANCES = "glances";
}<|fim▁end|> | public class Paths { |
<|file_name|>problem_0001.rs<|end_file_name|><|fim▁begin|>fn triangle_number(n: i32) -> i32{
n*(n+1)/2
}<|fim▁hole|> let brute_force = (1..1000)//All numbers below 1000
.filter(|&x| (x%3)*(x%5) ==0)//which are multiples of 3 or 5
.fold(0, |acc, item| acc + item);//sum them
let mathy = 3*triangle_number(999/3)
+5*triangle_number(999/5)
-15*triangle_number(999/15);
println!("{}, {}", mathy, brute_force);
}<|fim▁end|> |
fn main() { |
<|file_name|>get_ng_build.py<|end_file_name|><|fim▁begin|>import io
import os
import requests
import shutil
import sys
import zipfile
from waxe_image import __version__
API_RELEASES_URL = 'https://api.github.com/repos/waxe/waxe-image/releases'
NG_BUILD_FOLDER = 'website'
def main(argv=sys.argv):
if len(argv) > 2:
print('Too many arguments')
sys.exit(1)
global NG_BUILD_FOLDER
if len(argv) == 2:
NG_BUILD_FOLDER = argv[1]
if os.path.isdir(NG_BUILD_FOLDER):
shutil.rmtree(NG_BUILD_FOLDER)
if os.path.exists(NG_BUILD_FOLDER):
print('There is an issue with the folder %s' % NG_BUILD_FOLDER)
sys.exit(1)
r = requests.get(API_RELEASES_URL)
if r.status_code != 200:
raise ValueError('Bad status code %s' % r.status_code)
releases = r.json()
release = None
for rel in releases:
if rel['tag_name'] == __version__:
release = rel
break
if not release:
raise Exception('No release found for the current version %s' %
__version__)
ng_asset = None
for asset in release['assets']:
if 'waxe-image-ng.zip' in asset['browser_download_url']:
ng_asset = asset
break<|fim▁hole|> r = requests.get(url, stream=True)
if r.status_code != 200:
raise ValueError('Bad status code %s' % r.status_code)
z = zipfile.ZipFile(io.StringIO(r.content))
z.extractall(NG_BUILD_FOLDER)<|fim▁end|> |
assert(ng_asset)
url = ng_asset['browser_download_url']
|
<|file_name|>fibonacci.rs<|end_file_name|><|fim▁begin|>fn fib(n: i64) -> i64 {
if n == 0 {
0<|fim▁hole|> fib(n - 1) + fib(n - 2)
}
}
fn main() {
let argument = 10;
println!("fib({}) = {}", argument, fib(argument));
}<|fim▁end|> | } else if n == 1 {
1
} else { |
<|file_name|>ProjectBanner.js<|end_file_name|><|fim▁begin|>const m = require('mithril');
const Component = require('../../core/Component');
class ProjectBanner extends Component {
view(vnode) {
return m('.project', {
style: "background-image: url(" + vnode.attrs.bannerImage + ")",
onclick: function() {
m.route.set("/" + vnode.attrs.id)
}
}, [
m('.overlay', [
m('.text-container', [
m('span', [
m('h5', vnode.attrs.title),
m('i.fa.fa-info-circle')
]),
m('p', vnode.attrs.brief)
]),<|fim▁hole|>
module.exports = ProjectBanner;<|fim▁end|> | ])
])
}
} |
<|file_name|>database.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pymongo
from pymongo import MongoClient
from pymongo import errors
import re
class Database(object):
'''Database creation'''
def __init__(self, database_name):
self.client = MongoClient('mongodb://localhost,localhost:27017')
self.db_name = database_name
self.db = self.client[self.db_name]
#self.jobs = self.client[self.db_name].jobs
#self.results = self.db['results']
#self.queue = self.db['queue']
#self.log = self.db['log']
#self.sources = self.db['sources']
#self.jobs = self.db['jobs']
#self.db.x = self.db[x]
# def __repr__(self, database_name):
# print "Using database: %s" %self.client[database_name]
# return self.db
def use_db(self, database_name):
return self.client[str(name)]<|fim▁hole|> def create_coll(self, coll_name):
setattr(self, str(coll_name), self.db[str(coll_name)])
#print "coll : %s has been created in db:%s " %(self.__dict__[str(coll_name)], self.db_name)
return self.__dict__[str(coll_name)]
def create_colls(self, coll_names=["results","sources", "logs", "queue"]):
for n in coll_names:
setattr(self, n, self.db[str(n)])
# self.queue = self.db['queue']
# self.log = self.db['log']
# self.sources = self.db['sources']
# #print "Creating coll", [n for n in self.db.collection_names()]
return [n for n in self.db.collection_names()]
def show_coll(self):
try:
print "using collection %s in DB : %s" %(self.coll_name, self.db_name)
return self.coll_name
except AttributeError:
return False
#return self.db.collection_names()
def show_coll_items(self, coll_name):
return [n for n in self.db[str(coll_name)].find()]
# def count(self, coll_name):
# self.db_coll = self.db[str(coll_name)]
# return self.db_coll.count()
def drop(self, type, name):
if type == "collection":
return self.db[str(name)].drop()
elif type == "database":
return self.client.drop_database(str(name))
else:
print "Unknown Type"
return False
def drop_all_dbs():
'''remove EVERY SINGLE MONGO DATABASE'''
for n in self.show_dbs():
self.use_db(n)
self.drop("database", n)
def stats(self):
'''Output the current stats of database in Terminal'''
title = "===STATS===\n"
name ="Stored results in Mongo Database: %s \n" %(self.db_name)
res = "\t-Nombre de resultats dans la base: %d\n" % (self.db.results.count())
sources = "\t-Nombre de sources: %d\n" % len(self.db.sources.distinct('url'))
url = "\t-urls en cours de traitement: %d\n" % (self.db.queue.count())
url2 = "\t-urls traitees: %d\n" % (self.db.results.count()+ self.db.log.count())
url3 = "\t-urls erronées: %d\n" % (self.db.log.count())
size = "\t-Size of the database %s: %d MB\n" % (self.db_name, (self.db.command('dbStats', 1024)['storageSize'])/1024/1024.)
result = [title, name, res, sources, url, url2, size]
return "".join(result)
def report(self):
''' Output the currents of database for Email Report'''
res = "<li>Nombre de resultats dans la base: %d</li>" % (self.db.results.count())
sources = "<li>Nombre de sources: %d</li>" % len(self.db.sources.distinct('url'))
url = "<li>urls en cours de traitement: %d\n</li>" % (self.db.queue.count())
url2 = "<li>urls traitees: %d</li>" % (self.db.results.count()+ self.db.log.count())
size = "<li>Size of the database %s: %d MB</li>" % (self.db_name, (self.db.command('dbStats', 1024)['storageSize'])/1024/1024.)
result = [res, sources, url, url2, size]
return "".join(result)
# Define export gephi inside report option
# def create_node(self):
# label = ["url", "outlink", "backlink"]
# urllist = [n for n in self.db.results.distinct("url")]
# # outlist = [u for u in n['outlinks'] for n in self.db.results.find() if u not in outlist]
# # backlist = [u["url"] for u in n['backlinks'] for n in self.db.results.find() if u["url"] not in backlist]
# outlist = []
# backlist = []
# print len(urllist)
# for n in self.db.results.find():
# if n["outlinks"] is None:
# pass
# for o in n["outlinks"]:
# if o is not None:
# outlist.append([o["url"], "backlink"])
# for n in self.db.results.find():
# if n != []:
# for o in n["backlinks"]:
# if o is not None:
# backlist.append([o["url"], "backlink"])
# return
# def export_outlinks(self):
# '''Output url : outlink'''
# print "source; target"
# for n in self.db.results.find():
# for o in n["outlinks"]:
# if o is not None:
# print n['url']+";"+o
# else:
# print n["url"]+";None"
# return
# def export_backlinks(self):
# print "source;target"
# for n in self.db.results.find():
# if n != []:
# for u in n["backlinks"]:
# print n["url"]+";"+u["url"]
# # for o in n["backlinks"]:
# # if o is not None:
# # print n['url']+";"+o
# # else:
# # print n["url"]+";None"
# return
if __name__ == "__main__":
db = Database('RRI')
db.create_node()<|fim▁end|> |
def show_dbs(self):
return self.client.database_names()
|
<|file_name|>utils.py<|end_file_name|><|fim▁begin|># Copyright (C) 2011 Lukas Lalinsky
# Distributed under the MIT license, see the LICENSE file for details.
import re
import syslog
from logging import Handler
from logging.handlers import SysLogHandler<|fim▁hole|> Logging handler that logs to the local syslog using the syslog module
"""
facility_names = {
"auth": syslog.LOG_AUTH,
"cron": syslog.LOG_CRON,
"daemon": syslog.LOG_DAEMON,
"kern": syslog.LOG_KERN,
"lpr": syslog.LOG_LPR,
"mail": syslog.LOG_MAIL,
"news": syslog.LOG_NEWS,
"syslog": syslog.LOG_SYSLOG,
"user": syslog.LOG_USER,
"uucp": syslog.LOG_UUCP,
"local0": syslog.LOG_LOCAL0,
"local1": syslog.LOG_LOCAL1,
"local2": syslog.LOG_LOCAL2,
"local3": syslog.LOG_LOCAL3,
"local4": syslog.LOG_LOCAL4,
"local5": syslog.LOG_LOCAL5,
"local6": syslog.LOG_LOCAL6,
"local7": syslog.LOG_LOCAL7,
}
priority_map = {
"DEBUG": syslog.LOG_DEBUG,
"INFO": syslog.LOG_INFO,
"WARNING": syslog.LOG_WARNING,
"ERROR": syslog.LOG_ERR,
"CRITICAL": syslog.LOG_CRIT
}
def __init__(self, ident=None, facility=syslog.LOG_USER, log_pid=False):
Handler.__init__(self)
self.facility = facility
if isinstance(facility, basestring):
self.facility = self.facility_names[facility]
options = 0
if log_pid:
options |= syslog.LOG_PID
syslog.openlog(ident, options, self.facility)
self.formatter = None
def close(self):
Handler.close(self)
syslog.closelog()
def emit(self, record):
try:
msg = self.format(record)
if isinstance(msg, unicode):
msg = msg.encode('utf-8')
priority = self.priority_map[record.levelname]
for m in msg.splitlines():
syslog.syslog(self.facility | priority, m)
except StandardError:
self.handleError(record)<|fim▁end|> |
class LocalSysLogHandler(Handler):
""" |
<|file_name|>RecentHistory-test.tsx<|end_file_name|><|fim▁begin|>/*
* SonarQube
* Copyright (C) 2009-2022 SonarSource SA
* mailto:info AT sonarsource DOT com
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 3 of the License, or (at your option) any later version.
*<|fim▁hole|> * This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
import { get, remove, save } from '../../../helpers/storage';
import RecentHistory, { History } from '../RecentHistory';
jest.mock('../../../helpers/storage', () => ({
get: jest.fn(),
remove: jest.fn(),
save: jest.fn()
}));
beforeEach(() => {
(get as jest.Mock).mockClear();
(remove as jest.Mock).mockClear();
(save as jest.Mock).mockClear();
});
it('should get existing history', () => {
const history = [{ key: 'foo', name: 'Foo', icon: 'TRK' }];
(get as jest.Mock).mockReturnValueOnce(JSON.stringify(history));
expect(RecentHistory.get()).toEqual(history);
expect(get).toBeCalledWith('sonar_recent_history');
});
it('should get empty history', () => {
(get as jest.Mock).mockReturnValueOnce(null);
expect(RecentHistory.get()).toEqual([]);
expect(get).toBeCalledWith('sonar_recent_history');
});
it('should return [] and clear history in case of failure', () => {
(get as jest.Mock).mockReturnValueOnce('not a json');
expect(RecentHistory.get()).toEqual([]);
expect(get).toBeCalledWith('sonar_recent_history');
expect(remove).toBeCalledWith('sonar_recent_history');
});
it('should save history', () => {
const history = [{ key: 'foo', name: 'Foo', icon: 'TRK' }];
RecentHistory.set(history);
expect(save).toBeCalledWith('sonar_recent_history', JSON.stringify(history));
});
it('should clear history', () => {
RecentHistory.clear();
expect(remove).toBeCalledWith('sonar_recent_history');
});
it('should add item to history', () => {
const history = [{ key: 'foo', name: 'Foo', icon: 'TRK' }];
(get as jest.Mock).mockReturnValueOnce(JSON.stringify(history));
RecentHistory.add('bar', 'Bar', 'VW');
expect(save).toBeCalledWith(
'sonar_recent_history',
JSON.stringify([{ key: 'bar', name: 'Bar', icon: 'VW' }, ...history])
);
});
it('should keep 10 items maximum', () => {
const history: History = [];
for (let i = 0; i < 10; i++) {
history.push({ key: `key-${i}`, name: `name-${i}`, icon: 'TRK' });
}
(get as jest.Mock).mockReturnValueOnce(JSON.stringify(history));
RecentHistory.add('bar', 'Bar', 'VW');
expect(save).toBeCalledWith(
'sonar_recent_history',
JSON.stringify([{ key: 'bar', name: 'Bar', icon: 'VW' }, ...history.slice(0, 9)])
);
});
it('should remove component from history', () => {
const history: History = [];
for (let i = 0; i < 10; i++) {
history.push({ key: `key-${i}`, name: `name-${i}`, icon: 'TRK' });
}
(get as jest.Mock).mockReturnValueOnce(JSON.stringify(history));
RecentHistory.remove('key-5');
expect(save).toBeCalledWith(
'sonar_recent_history',
JSON.stringify([...history.slice(0, 5), ...history.slice(6)])
);
});<|fim▁end|> | |
<|file_name|>real_test.py<|end_file_name|><|fim▁begin|># coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, softwar
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
<|fim▁hole|>
__author__ = 'Sean Lip'
from core.tests import test_utils
from extensions.rules import real
class RealRuleUnitTests(test_utils.GenericTestBase):
"""Tests for rules operating on Real objects."""
def test_equals_rule(self):
self.assertTrue(real.Equals(3).eval(3))
self.assertTrue(real.Equals(3.0).eval(3))
self.assertFalse(real.Equals(4).eval(3))
def test_is_less_than_rule(self):
self.assertTrue(real.IsLessThan(4).eval(3))
self.assertTrue(real.IsLessThan(4).eval(3.0))
self.assertTrue(real.IsLessThan(4.0).eval(3.0))
self.assertFalse(real.IsLessThan(3).eval(3))
self.assertFalse(real.IsLessThan(3.0).eval(3.0))
self.assertFalse(real.IsLessThan(3.0).eval(4.0))
self.assertFalse(real.IsLessThan(3).eval(4))
def test_is_greater_than_rule(self):
self.assertTrue(real.IsGreaterThan(3).eval(4))
self.assertTrue(real.IsGreaterThan(3.0).eval(4))
self.assertTrue(real.IsGreaterThan(3.0).eval(4.0))
self.assertFalse(real.IsGreaterThan(3).eval(3))
self.assertFalse(real.IsGreaterThan(3.0).eval(3.0))
self.assertFalse(real.IsGreaterThan(4.0).eval(3.0))
self.assertFalse(real.IsGreaterThan(4).eval(3))
def test_is_less_than_or_equal_to_rule(self):
rule = real.IsLessThanOrEqualTo(3)
self.assertTrue(rule.eval(2))
self.assertTrue(rule.eval(3))
self.assertFalse(rule.eval(4))
def test_is_greater_than_or_equal_to_rule(self):
rule = real.IsGreaterThanOrEqualTo(3)
self.assertTrue(rule.eval(4))
self.assertTrue(rule.eval(3))
self.assertFalse(rule.eval(2))
def test_is_inclusively_between_rule(self):
with self.assertRaises(AssertionError):
real.IsInclusivelyBetween(2, 1)
rule = real.IsInclusivelyBetween(1, 3)
self.assertTrue(rule.eval(2))
self.assertTrue(rule.eval(1))
self.assertTrue(rule.eval(3))
self.assertTrue(rule.eval(1.0))
self.assertFalse(rule.eval(3.001))
def test_is_within_tolerance_rule(self):
rule = real.IsWithinTolerance(0.5, 0)
self.assertTrue(rule.eval(0))
self.assertTrue(rule.eval(0.5))
self.assertFalse(rule.eval(0.51))<|fim▁end|> | """Tests for classification of real numbers.""" |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
##############################################################################
#
# Authors: Stéphane Bidoul & Olivier Laurent
# Copyright (c) 2012 Acsone SA/NV (http://www.acsone.eu)
# All Rights Reserved
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsibility of assessing all potential
# consequences resulting from its eventual inadequacies and bugs.
# End users who are looking for a ready-to-use solution with commercial
# guarantees and support are strongly advised to contact a Free Software
# Service Company.
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#<|fim▁hole|># This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
from . import hr_utilization_print<|fim▁end|> | |
<|file_name|>export-worker.ts<|end_file_name|><|fim▁begin|>
//this is here for compilation in a web worker
interface CanvasRenderingContext2D {
}
declare var PDFDocument: PDFKit.PDFDocument;
/* module system */
var module = this as NodeModule;
module.require = (id: string): any => {
if (id in module) {
return module[id];
}
return this;
};
importScripts(
'../../target/js/browser.maker.js?' + new Date().valueOf(),
'../../external/bezier-js/bezier.js',
'../iexport.js');
var makerjs: typeof MakerJs = require('makerjs');
var unionCount = 0;
var unionIndex = 0;
var polygonCount = 0;
var polygonIndex = 0;
var incrementUnion: Function;
var incrementPolygon: Function;
var deps: { [format: number]: { loaded: boolean; load?: Function } } = {};
deps[MakerJsPlaygroundExport.ExportFormat.Dxf] = { loaded: true };
deps[MakerJsPlaygroundExport.ExportFormat.Json] = { loaded: true };
deps[MakerJsPlaygroundExport.ExportFormat.OpenJsCad] = { loaded: true };
deps[MakerJsPlaygroundExport.ExportFormat.Svg] = { loaded: true };
deps[MakerJsPlaygroundExport.ExportFormat.Stl] = {
loaded: false,
load: function () {
importScripts(
'../../external/OpenJsCad/csg.js',
'../../external/OpenJsCad/formats.js'
);
CSG.Path2D.prototype['appendArc2'] = CSG.Path2D.prototype.appendArc;
CSG.Path2D.prototype.appendArc = function (endpoint: CSG.Vector2D, options: CSG.IEllpiticalArcOptions): CSG.Path2D {
unionIndex++;
incrementUnion();
return this['appendArc2'](endpoint, options);
};
CSG.Path2D.prototype['appendPoint2'] = CSG.Path2D.prototype.appendPoint;
CSG.Path2D.prototype.appendPoint = function (point: CSG.Vector2D): CSG.Path2D {
unionIndex++;
incrementUnion();
return this['appendPoint2'](point);
};
CAG.prototype['union2'] = CAG.prototype.union;
CAG.prototype.union = function (cag: any): CAG {
unionIndex++;
incrementUnion();
return this['union2'](cag);
};
CSG.Polygon.prototype['toStlString2'] = CSG.Polygon.prototype.toStlString;
CSG.Polygon.prototype.toStlString = function (): string {
polygonIndex++;
incrementPolygon();
return this['toStlString2']();
};
CSG.prototype['toStlString2'] = CSG.prototype.toStlString;
CSG.prototype.toStlString = function (): string {
polygonCount = (<CSG>this).polygons.length;
polygonIndex = 0;
return this['toStlString2']();
};
}
};
deps[MakerJsPlaygroundExport.ExportFormat.Pdf] = {
loaded: false,
load: function () {
importScripts(
'../../external/text-encoding/encoding-indexes.js',
'../../external/text-encoding/encoding.js',
'../../external/PDFKit/pdfkit.js',
'string-reader.js'
);
}
//TODO: instrument stringreader for PDF percentage ouput<|fim▁hole|>
interface IExporter {
(modelToExport: MakerJs.IModel, options: MakerJs.exporter.IExportOptions): any;
}
function getExporter(format: MakerJsPlaygroundExport.ExportFormat, result: MakerJsPlaygroundExport.IExportResponse): IExporter {
var f = MakerJsPlaygroundExport.ExportFormat;
if (!deps[format].loaded) {
deps[format].load();
deps[format].loaded = true;
}
switch (format) {
case f.Json:
return JSON.stringify;
case f.Dxf:
function toDXF(model: MakerJs.IModel, options: MakerJs.exporter.IDXFRenderOptions) {
if (!options.units) {
options.units = model.units || makerjs.unitType.Millimeter;
}
return makerjs.exporter.toDXF(model, options);
}
return toDXF;
case f.Svg:
return makerjs.exporter.toSVG;
case f.OpenJsCad:
return makerjs.exporter.toOpenJsCad;
case f.Stl:
function toStl(model: MakerJs.IModel, options: MakerJs.exporter.IOpenJsCadOptions) {
var script = makerjs.exporter.toOpenJsCad(model, options);
script += 'return ' + options.functionName + '();';
unionCount = (script.match(/union/g) || []).length
+ (script.match(/appendArc/g) || []).length
+ (script.match(/appendPoint/g) || []).length;
unionIndex = 0;
var f = new Function(script);
var csg = <CSG>f();
return csg.toStlString();
}
return toStl;
case f.Pdf:
function toPdf(model: MakerJs.IModel, options: MakerJs.exporter.IExportOptions) {
function complete(pdfDataString: string) {
result.text = pdfDataString;
result.percentComplete = 100;
postMessage(result);
}
//TODO: watermark
//TODO: title, author, grid from options
var pdfOptions: PDFKit.PDFDocumentOptions = {
compress: false,
info: {
Producer: 'MakerJs',
Author: 'MakerJs'
}
};
var doc: PDFKit.PDFDocument = new PDFDocument(pdfOptions);
var reader = new StringReader(complete);
var stream = doc.pipe(reader);
//TODO: break up model across pages
//one inch margin
var exportOptions: MakerJs.exporter.IPDFRenderOptions = {
origin: [72, 72]
};
makerjs.exporter.toPDF(doc, model, exportOptions);
doc.end();
}
return toPdf;
}
}
/* events */
onmessage = (ev: MessageEvent) => {
var request = ev.data as MakerJsPlaygroundExport.IExportRequest;
var result: MakerJsPlaygroundExport.IExportResponse = {
request: request,
text: null,
percentComplete: 0
};
var exporter = getExporter(request.format, result);
if (exporter) {
incrementUnion = function () {
result.percentComplete = 50 * unionIndex / unionCount;
postMessage(result);
};
incrementPolygon = function () {
result.percentComplete = 50 + 50 * polygonIndex / polygonCount;
postMessage(result);
}
//call the exporter function.
result.text = exporter(request.model, request.options);
result.percentComplete = 100;
postMessage(result);
}
}<|fim▁end|> | }; |
<|file_name|>mock_stats_provider.go<|end_file_name|><|fim▁begin|>/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package testing
import cm "k8s.io/kubernetes/pkg/kubelet/cm"
import corev1 "k8s.io/api/core/v1"
import mock "github.com/stretchr/testify/mock"
import types "k8s.io/apimachinery/pkg/types"
import v1 "github.com/google/cadvisor/info/v1"
import v1alpha1 "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1"
import volume "k8s.io/kubernetes/pkg/volume"
// GENERATED BY mockery
// StatsProvider is an autogenerated mock type for the StatsProvider type
type StatsProvider struct {
mock.Mock
}
// GetCgroupStats provides a mock function with given fields: cgroupName, updateStats
func (_m *StatsProvider) GetCgroupStats(cgroupName string, updateStats bool) (*v1alpha1.ContainerStats, *v1alpha1.NetworkStats, error) {
ret := _m.Called(cgroupName, updateStats)
var r0 *v1alpha1.ContainerStats
if rf, ok := ret.Get(0).(func(string, bool) *v1alpha1.ContainerStats); ok {
r0 = rf(cgroupName, updateStats)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*v1alpha1.ContainerStats)
}
}
var r1 *v1alpha1.NetworkStats
if rf, ok := ret.Get(1).(func(string, bool) *v1alpha1.NetworkStats); ok {
r1 = rf(cgroupName, updateStats)
} else {
if ret.Get(1) != nil {
r1 = ret.Get(1).(*v1alpha1.NetworkStats)
}
}
var r2 error
if rf, ok := ret.Get(2).(func(string, bool) error); ok {
r2 = rf(cgroupName, updateStats)
} else {
r2 = ret.Error(2)
}
return r0, r1, r2
}
// GetPodByCgroupfs provides the pod that maps to the specified cgroup, as well
// as whether the pod was found.
func (_m *StatsProvider) GetPodByCgroupfs(cgroupfs string) (*corev1.Pod, bool) {
return nil, false
}
// GetContainerInfo provides a mock function with given fields: podFullName, uid, containerName, req
func (_m *StatsProvider) GetContainerInfo(podFullName string, uid types.UID, containerName string, req *v1.ContainerInfoRequest) (*v1.ContainerInfo, error) {
ret := _m.Called(podFullName, uid, containerName, req)
var r0 *v1.ContainerInfo
if rf, ok := ret.Get(0).(func(string, types.UID, string, *v1.ContainerInfoRequest) *v1.ContainerInfo); ok {
r0 = rf(podFullName, uid, containerName, req)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*v1.ContainerInfo)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(string, types.UID, string, *v1.ContainerInfoRequest) error); ok {
r1 = rf(podFullName, uid, containerName, req)
} else {
r1 = ret.Error(1)
}<|fim▁hole|>
// GetNode provides a mock function with given fields:
func (_m *StatsProvider) GetNode() (*corev1.Node, error) {
ret := _m.Called()
var r0 *corev1.Node
if rf, ok := ret.Get(0).(func() *corev1.Node); ok {
r0 = rf()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*corev1.Node)
}
}
var r1 error
if rf, ok := ret.Get(1).(func() error); ok {
r1 = rf()
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// GetNodeConfig provides a mock function with given fields:
func (_m *StatsProvider) GetNodeConfig() cm.NodeConfig {
ret := _m.Called()
var r0 cm.NodeConfig
if rf, ok := ret.Get(0).(func() cm.NodeConfig); ok {
r0 = rf()
} else {
r0 = ret.Get(0).(cm.NodeConfig)
}
return r0
}
// GetPodCgroupRoot provides a mock function with given fields:
func (_m *StatsProvider) GetPodCgroupRoot() string {
ret := _m.Called()
var r0 string
if rf, ok := ret.Get(0).(func() string); ok {
r0 = rf()
} else {
r0 = ret.Get(0).(string)
}
return r0
}
// GetPodByName provides a mock function with given fields: namespace, name
func (_m *StatsProvider) GetPodByName(namespace string, name string) (*corev1.Pod, bool) {
ret := _m.Called(namespace, name)
var r0 *corev1.Pod
if rf, ok := ret.Get(0).(func(string, string) *corev1.Pod); ok {
r0 = rf(namespace, name)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*corev1.Pod)
}
}
var r1 bool
if rf, ok := ret.Get(1).(func(string, string) bool); ok {
r1 = rf(namespace, name)
} else {
r1 = ret.Get(1).(bool)
}
return r0, r1
}
// GetPods provides a mock function with given fields:
func (_m *StatsProvider) GetPods() []*corev1.Pod {
ret := _m.Called()
var r0 []*corev1.Pod
if rf, ok := ret.Get(0).(func() []*corev1.Pod); ok {
r0 = rf()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]*corev1.Pod)
}
}
return r0
}
// GetRawContainerInfo provides a mock function with given fields: containerName, req, subcontainers
func (_m *StatsProvider) GetRawContainerInfo(containerName string, req *v1.ContainerInfoRequest, subcontainers bool) (map[string]*v1.ContainerInfo, error) {
ret := _m.Called(containerName, req, subcontainers)
var r0 map[string]*v1.ContainerInfo
if rf, ok := ret.Get(0).(func(string, *v1.ContainerInfoRequest, bool) map[string]*v1.ContainerInfo); ok {
r0 = rf(containerName, req, subcontainers)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(map[string]*v1.ContainerInfo)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(string, *v1.ContainerInfoRequest, bool) error); ok {
r1 = rf(containerName, req, subcontainers)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// ImageFsStats provides a mock function with given fields:
func (_m *StatsProvider) ImageFsStats() (*v1alpha1.FsStats, error) {
ret := _m.Called()
var r0 *v1alpha1.FsStats
if rf, ok := ret.Get(0).(func() *v1alpha1.FsStats); ok {
r0 = rf()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*v1alpha1.FsStats)
}
}
var r1 error
if rf, ok := ret.Get(1).(func() error); ok {
r1 = rf()
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// ListPodStats provides a mock function with given fields:
func (_m *StatsProvider) ListPodStats() ([]v1alpha1.PodStats, error) {
ret := _m.Called()
var r0 []v1alpha1.PodStats
if rf, ok := ret.Get(0).(func() []v1alpha1.PodStats); ok {
r0 = rf()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]v1alpha1.PodStats)
}
}
var r1 error
if rf, ok := ret.Get(1).(func() error); ok {
r1 = rf()
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// ListVolumesForPod provides a mock function with given fields: podUID
func (_m *StatsProvider) ListVolumesForPod(podUID types.UID) (map[string]volume.Volume, bool) {
ret := _m.Called(podUID)
var r0 map[string]volume.Volume
if rf, ok := ret.Get(0).(func(types.UID) map[string]volume.Volume); ok {
r0 = rf(podUID)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(map[string]volume.Volume)
}
}
var r1 bool
if rf, ok := ret.Get(1).(func(types.UID) bool); ok {
r1 = rf(podUID)
} else {
r1 = ret.Get(1).(bool)
}
return r0, r1
}
// RootFsStats provides a mock function with given fields:
func (_m *StatsProvider) RootFsStats() (*v1alpha1.FsStats, error) {
ret := _m.Called()
var r0 *v1alpha1.FsStats
if rf, ok := ret.Get(0).(func() *v1alpha1.FsStats); ok {
r0 = rf()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*v1alpha1.FsStats)
}
}
var r1 error
if rf, ok := ret.Get(1).(func() error); ok {
r1 = rf()
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// RlimitStats provides a mock function with given fields:
func (_m *StatsProvider) RlimitStats() (*v1alpha1.RlimitStats, error) {
ret := _m.Called()
var r0 *v1alpha1.RlimitStats
if rf, ok := ret.Get(0).(func() *v1alpha1.RlimitStats); ok {
r0 = rf()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*v1alpha1.RlimitStats)
}
}
var r1 error
if rf, ok := ret.Get(1).(func() error); ok {
r1 = rf()
} else {
r1 = ret.Error(1)
}
return r0, r1
}<|fim▁end|> |
return r0, r1
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.