prompt
large_stringlengths 70
991k
| completion
large_stringlengths 0
1.02k
|
---|---|
<|file_name|>FormText.d.ts<|end_file_name|><|fim▁begin|>import * as React from 'react';
import { CSSModule } from '../index';
export type FormTextProps<T = {}> = React.HTMLAttributes<HTMLElement> & {
inline?: boolean;
tag?: React.ReactType;
color?: string;
className?: string;<|fim▁hole|>} & T;
declare class FormText<T = {[key: string]: any}> extends React.Component<FormTextProps<T>> {}
export default FormText;<|fim▁end|> | cssModule?: CSSModule; |
<|file_name|>test_threadable.py<|end_file_name|><|fim▁begin|># Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.python.threadable}.
"""
from __future__ import division, absolute_import
import sys, pickle
try:
import threading
except ImportError:
threadingSkip = "Platform lacks thread support"
else:
threadingSkip = None
from twisted.python.compat import _PY3
from twisted.trial import unittest
from twisted.python import threadable
class TestObject:
synchronized = ['aMethod']
x = -1
y = 1
def aMethod(self):
for i in range(10):
self.x, self.y = self.y, self.x
self.z = self.x + self.y
assert self.z == 0, "z == %d, not 0 as expected" % (self.z,)
<|fim▁hole|>class SynchronizationTestCase(unittest.SynchronousTestCase):
def setUp(self):
"""
Reduce the CPython check interval so that thread switches happen much
more often, hopefully exercising more possible race conditions. Also,
delay actual test startup until the reactor has been started.
"""
if _PY3:
if getattr(sys, 'getswitchinterval', None) is not None:
self.addCleanup(sys.setswitchinterval, sys.getswitchinterval())
sys.setswitchinterval(0.0000001)
else:
if getattr(sys, 'getcheckinterval', None) is not None:
self.addCleanup(sys.setcheckinterval, sys.getcheckinterval())
sys.setcheckinterval(7)
def test_synchronizedName(self):
"""
The name of a synchronized method is inaffected by the synchronization
decorator.
"""
self.assertEqual("aMethod", TestObject.aMethod.__name__)
def test_isInIOThread(self):
"""
L{threadable.isInIOThread} returns C{True} if and only if it is called
in the same thread as L{threadable.registerAsIOThread}.
"""
threadable.registerAsIOThread()
foreignResult = []
t = threading.Thread(
target=lambda: foreignResult.append(threadable.isInIOThread()))
t.start()
t.join()
self.assertFalse(
foreignResult[0], "Non-IO thread reported as IO thread")
self.assertTrue(
threadable.isInIOThread(), "IO thread reported as not IO thread")
def testThreadedSynchronization(self):
o = TestObject()
errors = []
def callMethodLots():
try:
for i in range(1000):
o.aMethod()
except AssertionError as e:
errors.append(str(e))
threads = []
for x in range(5):
t = threading.Thread(target=callMethodLots)
threads.append(t)
t.start()
for t in threads:
t.join()
if errors:
raise unittest.FailTest(errors)
if threadingSkip is not None:
testThreadedSynchronization.skip = threadingSkip
test_isInIOThread.skip = threadingSkip
def testUnthreadedSynchronization(self):
o = TestObject()
for i in range(1000):
o.aMethod()
class SerializationTestCase(unittest.SynchronousTestCase):
def testPickling(self):
lock = threadable.XLock()
lockType = type(lock)
lockPickle = pickle.dumps(lock)
newLock = pickle.loads(lockPickle)
self.assertTrue(isinstance(newLock, lockType))
if threadingSkip is not None:
testPickling.skip = threadingSkip
def testUnpickling(self):
lockPickle = b'ctwisted.python.threadable\nunpickle_lock\np0\n(tp1\nRp2\n.'
lock = pickle.loads(lockPickle)
newPickle = pickle.dumps(lock, 2)
newLock = pickle.loads(newPickle)<|fim▁end|> | threadable.synchronize(TestObject)
|
<|file_name|>eg2_real_world_data.py<|end_file_name|><|fim▁begin|>from collections import Counter
from imblearn.datasets import make_imbalance
from imblearn.metrics import classification_report_imbalanced
from imblearn.pipeline import make_pipeline
from imblearn.under_sampling import ClusterCentroids
from imblearn.under_sampling import NearMiss
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
import seaborn as sns
import numpy as np
import pandas as pd
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.svm import LinearSVC
def scatter_plot_2d(x_ls, y_ls):
# setup marker generator and color map
markers = ('s', 'x', 'o', '^', 'v')
colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')
cmap = ListedColormap(colors[:len(np.unique(y_ls))])
# plot class samples
for idx, c1 in enumerate(np.unique(y_ls)):
plt.scatter(x = x_ls[y_ls == c1, 0], y = x_ls[y_ls == c1, 1],
alpha = .8, c = cmap(idx),
marker = markers[idx], label = c1)
# plt.show()
def deci_bdry_plot_2d(x_ls, y_ls, classifier, resolution = .02):
# setup marker generator and color map
markers = ('s', 'x', 'o', '^', 'v')
colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')
cmap = ListedColormap(colors[:len(np.unique(y_ls))])
# plot the decision surface
x1_min, x1_max = x_ls[:, 0].min() - 1, x_ls[:, 0].max() + 1
x2_min, x2_max = x_ls[:, 1].min() - 1, x_ls[:, 1].max() + 1
xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution), np.arange(x2_min, x2_max, resolution))
Z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T)
Z = Z.reshape(xx1.shape)
plt.contourf(xx1, xx2, Z, alpha = .4, cmap = cmap)
plt.xlim(xx1.min(), xx1.max())
plt.ylim(xx2.min(), xx2.max())
# plot class samples
for idx, c1 in enumerate(np.unique(y_ls)):
plt.scatter(x = x_ls[y_ls == c1, 0], y = x_ls[y_ls == c1, 1],
alpha = .8, c = cmap(idx),
marker = markers[idx], label = c1)
# plt.show()
def multi_class_under_sampling():
'''
EXAMPLE: Multiclass classification with under-sampling
'''
RANDOM_STATE = 42
iris = load_iris()
X, y = make_imbalance(iris.data, iris.target, ratio = {0:25, 1:50, 2:50}, random_state = 0)
# print (X[:, [1, 2]])
# print (type(y))
X_train, X_test, y_train, y_test = train_test_split(X[:, [1, 2]], y, random_state = RANDOM_STATE)
# print ('Training target statistics: {}'.format(Counter(y_train)))
# print ('Testing target statistics: {}'.format(Counter(y_test)))
nm = NearMiss(version = 1, random_state = RANDOM_STATE)
X_resample_nm, y_resample_nm = nm.fit_sample(X_train, y_train)
cc = ClusterCentroids(random_state = 0)
X_resample_cc, y_resample_cc = cc.fit_sample(X_train, y_train)
'''plot two in one frame'''
fig, (ax0, ax1) = plt.subplots(ncols = 2)
# ax0, ax1 = axes.flatten()
ax0 = scatter_plot_2d(X_resample_nm, y_resample_nm)
ax1 = scatter_plot_2d(X_resample_nm, y_resample_nm)
# fig.tight_layout()
plt.show()
# pipeline_nm = make_pipeline(NearMiss(version = 1, random_state = RANDOM_STATE), LinearSVC(random_state = RANDOM_STATE))
# pipeline_nm.fit(X_train, y_train)
# pipeline_cc = make_pipeline(ClusterCentroids(random_state = 0), LinearSVC(random_state = RANDOM_STATE))
# pipeline_cc.fit(X_train, y_train)
# print (classification_report_imbalanced(y_test, pipeline_nm.predict(X_test)))
# deci_bdry_plot_2d(X[:, [1, 2]], y, pipeline_nm)
fig = plt.figure()<|fim▁hole|> pipeline_1= make_pipeline(NearMiss(version = 1, random_state = RANDOM_STATE), LinearSVC(random_state = RANDOM_STATE))
pipeline_1.fit(X_train, y_train)
ax2 = fig.add_subplot(212)
ax2.scatter_plot(X[:, [1, 2]], y, pipeline_1)
plt.show()
def wendy_try_iris():
'''
EXAMPLE: Multiclass classification with under-sampling
'''
RANDOM_STATE = 42
iris = load_iris()
# X, y = make_imbalance(iris.data, iris.target, ratio = {0:25, 1:50, 2:50}, random_state = 0)
X = pd.DataFrame(iris.data, columns = ['Sepal_length', 'Sepal_width', 'Petal_length', 'Petal_width'])
y = pd.DataFrame(iris.target, columns = ['Species'])
df = X
df['Species'] = y
'''pair plot for the features'''
# sns.set(style='whitegrid', context='notebook')
# cols = ['Sepal_length', 'Sepal_width', 'Petal_length', 'Petal_width']
# sns.pairplot(df, vars = cols, size=2.5, hue = 'Species')
# plt.show()
'''dimension reduction'''
# print (classification_report_imbalanced(y_test, pipeline_cc.predict(X_test)))
# deci_bdry_plot_2d(X[:, [1, 2]], y, pipeline_cc)
if __name__ == '__main__':
wendy_try_iris()<|fim▁end|> | ax1 = fig.add_subplot(211)
ax1.scatter_plot(X[:, [1, 2]], y, pipeline)
# print (classification_report_imbalanced(y_test, pipeline.predict(X_test)))
|
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|><|fim▁hole|><|fim▁end|> | from AutoNetkit.examples.examples import *
import AutoNetkit.examples.examples |
<|file_name|>test.js<|end_file_name|><|fim▁begin|>casper.test.begin('BAS GeoMap Features site shows a home page and has 1 or more Features', 5, function suite(test) {
casper.start("https://geomap-features.web.bas.ac.uk", function() {
// Basic checks
test.assertTitle("BAS GeoMap Features", "homepage title is the one expected");
test.assertExists('.page-header h1', "there is a main header on the homepage");
// View Features button
test.assertExists('.page-header h1', "there is a main header on the homepage");
var featreButtonText = casper.fetchText('.btn.btn-primary.btn-lg.btn-bsk.btn-bsk-primary');
test.assertMatch(featreButtonText, /^View all [1-9]\d* Features/, 'the view features button has a count of one or more features');
// Click button to do next tests
casper.click('.btn.btn-primary.btn-lg.btn-bsk.btn-bsk-primary')
});
casper.then(function() {
test.assertTitle("Index of Features", "features index title is the one expected");
});
casper.run(function() {
test.done();
});<|fim▁hole|><|fim▁end|> | }); |
<|file_name|>mapentity.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
# File: mapentity.py
# import pygtk
# pygtk.require('2.0')
from gi.repository import Gtk, Gdk
class MapEntity:
# self.x = None<|fim▁hole|>
def getCoords(self):
return self.x,self.y
def getx(self):
return self.x
def gety(self):
return self.y
def setCoords(self,xcoord,ycoord):
self.x = xcoord
self.y = ycoord
def getName(self):
return self.name
def setName(self, strname):
self.name = strname
def __init__(self, xarg, yarg, namearg):
self.x = xarg
self.y = yarg
self.name = namearg
return<|fim▁end|> | # self.y = None
# self.name = None
# self.texture = None |
<|file_name|>ipkernel.py<|end_file_name|><|fim▁begin|>"""The IPython kernel implementation"""
import getpass
import sys
import traceback
from IPython.core import release
from IPython.html.widgets import Widget
from IPython.utils.py3compat import builtin_mod, PY3
from IPython.utils.tokenutil import token_at_cursor, line_at_cursor
from IPython.utils.traitlets import Instance, Type, Any
from IPython.utils.decorators import undoc
from ..comm import CommManager
from .kernelbase import Kernel as KernelBase
from .serialize import serialize_object, unpack_apply_message
from .zmqshell import ZMQInteractiveShell
class IPythonKernel(KernelBase):
shell = Instance('IPython.core.interactiveshell.InteractiveShellABC')
shell_class = Type(ZMQInteractiveShell)
user_module = Any()
def _user_module_changed(self, name, old, new):
if self.shell is not None:
self.shell.user_module = new
user_ns = Instance(dict, args=None, allow_none=True)
def _user_ns_changed(self, name, old, new):
if self.shell is not None:
self.shell.user_ns = new
self.shell.init_user_ns()
# A reference to the Python builtin 'raw_input' function.
# (i.e., __builtin__.raw_input for Python 2.7, builtins.input for Python 3)
_sys_raw_input = Any()
_sys_eval_input = Any()
def __init__(self, **kwargs):
super(IPythonKernel, self).__init__(**kwargs)
# Initialize the InteractiveShell subclass
self.shell = self.shell_class.instance(parent=self,
profile_dir=self.profile_dir,
user_module=self.user_module,
user_ns=self.user_ns,
kernel=self,
)
self.shell.displayhook.session = self.session
self.shell.displayhook.pub_socket = self.iopub_socket
self.shell.displayhook.topic = self._topic('execute_result')
self.shell.display_pub.session = self.session
self.shell.display_pub.pub_socket = self.iopub_socket
self.shell.data_pub.session = self.session
self.shell.data_pub.pub_socket = self.iopub_socket
# TMP - hack while developing
self.shell._reply_content = None
self.comm_manager = CommManager(shell=self.shell, parent=self,
kernel=self)
self.comm_manager.register_target(
'ipython.widget', Widget.handle_comm_opened)
self.shell.configurables.append(self.comm_manager)
comm_msg_types = ['comm_open', 'comm_msg', 'comm_close']
for msg_type in comm_msg_types:
self.shell_handlers[msg_type] = getattr(
self.comm_manager, msg_type)
# Kernel info fields
implementation = 'ipython'
implementation_version = release.version
language_info = {
'name': 'python',
'version': sys.version.split()[0],
'mimetype': 'text/x-python',
'codemirror_mode': {'name': 'ipython',
'version': sys.version_info[0]},
'pygments_lexer': 'ipython%d' % (3 if PY3 else 2),
'nbconvert_exporter': 'python',
'file_extension': '.py'
}
@property
def banner(self):
return self.shell.banner
def start(self):
self.shell.exit_now = False
super(IPythonKernel, self).start()
def set_parent(self, ident, parent):
"""Overridden from parent to tell the display hook and output streams
about the parent message.
"""
super(IPythonKernel, self).set_parent(ident, parent)
self.shell.set_parent(parent)
def _forward_input(self, allow_stdin=False):
"""Forward raw_input and getpass to the current frontend.
via input_request
"""
self._allow_stdin = allow_stdin
if PY3:
self._sys_raw_input = builtin_mod.input
builtin_mod.input = self.raw_input
else:
self._sys_raw_input = builtin_mod.raw_input
self._sys_eval_input = builtin_mod.input
builtin_mod.raw_input = self.raw_input
builtin_mod.input = lambda prompt='': eval(self.raw_input(prompt))
self._save_getpass = getpass.getpass
getpass.getpass = self.getpass
def _restore_input(self):
"""Restore raw_input, getpass"""
if PY3:
builtin_mod.input = self._sys_raw_input
else:
builtin_mod.raw_input = self._sys_raw_input
builtin_mod.input = self._sys_eval_input
getpass.getpass = self._save_getpass
@property
def execution_count(self):
return self.shell.execution_count
@execution_count.setter
def execution_count(self, value):
# Ignore the incrememnting done by KernelBase, in favour of our shell's
# execution counter.
pass
def do_execute(self, code, silent, store_history=True,
user_expressions=None, allow_stdin=False):
shell = self.shell # we'll need this a lot here
self._forward_input(allow_stdin)
reply_content = {}
# FIXME: the shell calls the exception handler itself.
shell._reply_content = None
try:
shell.run_cell(code, store_history=store_history, silent=silent)
except:
status = u'error'
# FIXME: this code right now isn't being used yet by default,
# because the run_cell() call above directly fires off exception
# reporting. This code, therefore, is only active in the scenario
# where runlines itself has an unhandled exception. We need to
# uniformize this, for all exception construction to come from a
# single location in the codbase.
etype, evalue, tb = sys.exc_info()
tb_list = traceback.format_exception(etype, evalue, tb)
reply_content.update(shell._showtraceback(etype, evalue, tb_list))
else:
status = u'ok'
finally:
self._restore_input()
reply_content[u'status'] = status
# Return the execution counter so clients can display prompts
reply_content['execution_count'] = shell.execution_count - 1
# FIXME - fish exception info out of shell, possibly left there by
# runlines. We'll need to clean up this logic later.
if shell._reply_content is not None:
reply_content.update(shell._reply_content)
e_info = dict(
engine_uuid=self.ident, engine_id=self.int_id, method='execute')
reply_content['engine_info'] = e_info
# reset after use
shell._reply_content = None
if 'traceback' in reply_content:
self.log.info(
"Exception in execute request:\n%s", '\n'.join(reply_content['traceback']))
# At this point, we can tell whether the main code execution succeeded
# or not. If it did, we proceed to evaluate user_expressions
if reply_content['status'] == 'ok':
reply_content[u'user_expressions'] = \
shell.user_expressions(user_expressions or {})
else:
# If there was an error, don't even try to compute expressions
reply_content[u'user_expressions'] = {}
# Payloads should be retrieved regardless of outcome, so we can both
# recover partial output (that could have been generated early in a
# block, before an error) and clear the payload system always.
reply_content[u'payload'] = shell.payload_manager.read_payload()
# Be agressive about clearing the payload because we don't want
# it to sit in memory until the next execute_request comes in.
shell.payload_manager.clear_payload()
return reply_content
def do_complete(self, code, cursor_pos):
# FIXME: IPython completers currently assume single line,
# but completion messages give multi-line context
# For now, extract line from cell, based on cursor_pos:
if cursor_pos is None:
cursor_pos = len(code)
line, offset = line_at_cursor(code, cursor_pos)
line_cursor = cursor_pos - offset
txt, matches = self.shell.complete('', line, line_cursor)
return {'matches': matches,
'cursor_end': cursor_pos,
'cursor_start': cursor_pos - len(txt),
'metadata': {},
'status': 'ok'}
def do_inspect(self, code, cursor_pos, detail_level=0):
name = token_at_cursor(code, cursor_pos)
info = self.shell.object_inspect(name)
reply_content = {'status': 'ok'}
reply_content['data'] = data = {}
reply_content['metadata'] = {}
reply_content['found'] = info['found']
if info['found']:
info_text = self.shell.object_inspect_text(
name,
detail_level=detail_level,
)
data['text/plain'] = info_text
return reply_content
def do_history(self, hist_access_type, output, raw, session=None, start=None,
stop=None, n=None, pattern=None, unique=False):
if hist_access_type == 'tail':
hist = self.shell.history_manager.get_tail(n, raw=raw, output=output,
include_latest=True)
elif hist_access_type == 'range':
hist = self.shell.history_manager.get_range(session, start, stop,
raw=raw, output=output)
elif hist_access_type == 'search':
hist = self.shell.history_manager.search(
pattern, raw=raw, output=output, n=n, unique=unique)
else:
hist = []
return {'history': list(hist)}
def do_shutdown(self, restart):
self.shell.exit_now = True
return dict(status='ok', restart=restart)
def do_is_complete(self, code):
status, indent_spaces = self.shell.input_transformer_manager.check_complete(
code)
r = {'status': status}
if status == 'incomplete':
r['indent'] = ' ' * indent_spaces
return r
def do_apply(self, content, bufs, msg_id, reply_metadata):
shell = self.shell
try:
working = shell.user_ns
prefix = "_" + str(msg_id).replace("-", "") + "_"
f, args, kwargs = unpack_apply_message(bufs, working, copy=False)
fname = getattr(f, '__name__', 'f')
fname = prefix + "f"
argname = prefix + "args"
kwargname = prefix + "kwargs"
resultname = prefix + "result"
ns = {fname: f, argname: args, kwargname: kwargs, resultname: None}
# print ns
working.update(ns)
code = "%s = %s(*%s,**%s)" % (resultname,
fname, argname, kwargname)
try:
exec(code, shell.user_global_ns, shell.user_ns)
result = working.get(resultname)
finally:
for key in ns:
working.pop(key)
result_buf = serialize_object(result,
buffer_threshold=self.session.buffer_threshold,
item_threshold=self.session.item_threshold,
)
except:
# invoke IPython traceback formatting
shell.showtraceback()
# FIXME - fish exception info out of shell, possibly left there by
# run_code. We'll need to clean up this logic later.
reply_content = {}
if shell._reply_content is not None:
reply_content.update(shell._reply_content)
e_info = dict(
engine_uuid=self.ident, engine_id=self.int_id, method='apply')
reply_content['engine_info'] = e_info
# reset after use
shell._reply_content = None
self.send_response(self.iopub_socket, u'error', reply_content,
ident=self._topic('error'))
self.log.info(<|fim▁hole|>
if reply_content['ename'] == 'UnmetDependency':
reply_metadata['dependencies_met'] = False
else:
reply_content = {'status': 'ok'}
return reply_content, result_buf
def do_clear(self):
self.shell.reset(False)
return dict(status='ok')
# This exists only for backwards compatibility - use IPythonKernel instead
@undoc
class Kernel(IPythonKernel):
def __init__(self, *args, **kwargs):
import warnings
warnings.warn('Kernel is a deprecated alias of IPython.kernel.zmq.ipkernel.IPythonKernel',
DeprecationWarning)
super(Kernel, self).__init__(*args, **kwargs)<|fim▁end|> | "Exception in apply request:\n%s", '\n'.join(reply_content['traceback']))
result_buf = [] |
<|file_name|>electrode_ocv_gen.py<|end_file_name|><|fim▁begin|>import pickle
from matplotlib import pyplot as plt
plt.style.use('classic')
import matplotlib as mpl
fs = 12.
fw = 'bold'
mpl.rc('lines', linewidth=2., color='k')
mpl.rc('font', size=fs, weight=fw, family='Arial')
mpl.rc('legend', fontsize='small')
import numpy
def grad( x, u ) :
return numpy.gradient(u) / numpy.gradient(x)
date = '20160519'
base = '/home/mk-sim-linux/Battery_TempGrad/Python/batt_simulation/battsimpy/'
base_dir = '/home/mk-sim-linux/Battery_TempGrad/JournalPaper2/Paper2/ocv_unif35/'
fig_dir = '/home/mk-sim-linux/Battery_TempGrad/JournalPaper3/modeling_paper_p3/figs/'
#base_dir = '/home/m_klein/tgs_data/ocv_unif35/'
#base_dir = '/Volumes/Data/Paper2/ocv_dat/'
#bsp_path = '/Users/mk/Desktop/battsim/battsimpy/'
nmc_rest_523 = numpy.loadtxt( base+'data/Model_nmc/Model_Pars/solid/thermodynamics/2012Yang_523NMC_dchg_restOCV.csv', delimiter=',' )
nmc_cby25_111 = numpy.loadtxt( base+'data/Model_nmc/Model_Pars/solid/thermodynamics/2012Wu_NMC111_Cby25_dchg.csv' , delimiter=',' )
nmc_YangWu_mix = numpy.loadtxt( base+'data/Model_nmc/Model_Pars/solid/thermodynamics/YangWuMix_NMC_20170607.csv' , delimiter=',' )
lfp_prada_dchg = numpy.loadtxt( base+'data/Model_v1/Model_Pars/solid/thermodynamics/2012Prada_LFP_U_dchg.csv' , delimiter=',' )
graph_hess_dchg = numpy.loadtxt( base+'data/Model_nmc/Model_Pars/solid/thermodynamics/Ua_cell4Fit_NMC_2012Yang_refx.csv' , delimiter=',' ) #graphite_Hess_discharge_x.csv
#xin, Uin = 1.-lfp_prada_dchg[:,0], lfp_prada_dchg[:,1]
#xin, Uin = 1.-nmc_rest_523[:,0], nmc_rest_523[:,1]
xin, Uin = 1.-nmc_YangWu_mix[:,0], nmc_YangWu_mix[:,1]
#xin, Uin = 1.-nmc_cby25_111[:,0], nmc_cby25_111[:,1]
xin2, Uin2 = graph_hess_dchg[:,0], graph_hess_dchg[:,1]#-0.025
pfiles2 = [ base_dir+'slowOCVdat_cell4_slow_ocv_'+date+'.p', ]
# Load the cell ocv c/60 data
d = pickle.load( open( pfiles2[0], 'rb' ) )
max_cap = numpy.amax( d['interp']['cap'] )
x_cell, U_cell = 1-numpy.array(d['interp']['cap'])/max_cap*1., d['interp']['dchg']['volt']
# NMC 532 scale - NMC cyl cells (cell 4)
#scale_x = 1.8#1.5 # 1.55
#shift_x = -.01#-.06 #-.12
scale_x = 1.42 # 1.55
shift_x = -.03 #-.12<|fim▁hole|>#scale_x1 = 1.9
#shift_x1 = -.03
## LFP Prada - (cell 2)
#scale_x = 1.25
#shift_x = 1.05-scale_x
# Graphite - scale NMC cyl cells (cell 4)
scale_x2 = 1/.8 #1./0.83 #
shift_x2 = -.06 #-.035
#scale_x2 = 1/.74
#shift_x2 = -.04
figres = 300
figname = base_dir+'ocv-plots_'+date+'.pdf'
sty = [ '-', '--' ]
fsz = (190./25.4,120./25.4)
f1, axes = plt.subplots(1,2,figsize=fsz)
a1,a2 = axes
# Plot the full cell ocv
a1.plot( x_cell, U_cell, '-b', label='Cell C/60 Data' )
# Plot the cathode curve for the shifted soc operating window
a1.plot( xin*scale_x+shift_x, Uin, '-g', label='Cathode' )
# Plot the anode curve for the shifted soc operating window
#a1t = a1.twinx()
a1.plot( xin2*scale_x2+shift_x2, Uin2, '-k', label='Anode' )
# Compute the cathode ocv for the full cell soc operating window
if xin[1] < xin[0] :
Uc = numpy.interp( x_cell, numpy.flipud(xin*scale_x+shift_x), numpy.flipud(Uin) )
else :
Uc = numpy.interp( x_cell, xin*scale_x+shift_x, Uin )
Ua = numpy.interp( x_cell, xin2*scale_x2+shift_x2, Uin2 )
# Plot the estimated full cell ocv curve for the aligned anode and cathode equilibrium curves
#a1.plot( x_cell, Uc-U_cell, ':k', label='U$_{anode}$ fit' )
#a1t.set_ylim([0.,2.])
a1.plot( x_cell, Uc-Ua, ':k', label='U$_{cell}$ fit' )
# Calculate the alignment stoichs for anode and cathode
Ua_out = Uc - U_cell
xa_out = (x_cell-shift_x2)/scale_x2
#numpy.savetxt( base+'data/Model_v1/Model_Pars/solid/thermodynamics/Ua_lfp_2012Prada.csv', numpy.array([xa_out, Ua_out]).T, delimiter=',' )
#numpy.savetxt( base+'data/Model_v1/Model_Pars/solid/thermodynamics/Ua_nmc_2012Yang.csv', numpy.array([xa_out, Ua_out]).T, delimiter=',' )
yin = 1.-xin
xc_lo = 1. - (-shift_x/scale_x)
xc_hi = 1. - (1.-shift_x)/scale_x
xa_lo = (-shift_x2/scale_x2)
xa_hi = (1.-shift_x2)/scale_x2
# Print out the stoich limits for the anode and cathode
print 'xc_lo, xc_hi:',xc_lo, xc_hi
print 'xa_lo, xa_hi:',xa_lo, xa_hi
a1.set_xlabel( 'State of Charge', fontsize=fs, fontweight=fw )
a1.set_ylabel( 'Voltage vs. Li [V]', fontsize=fs, fontweight=fw )
a1.set_title( 'Full and Half Cell OCV', fontsize=fs, fontweight=fw )
a1.legend(loc='best')
a1.set_axisbelow(True)
a1.grid(color='gray')
a2.plot( x_cell, grad(x_cell, U_cell), label=r'$\frac{\partial U_{cell}}{\partial SOC}$' )
a2.plot( x_cell, -grad(x_cell, Ua), label=r'$\frac{\partial U_{anode}}{\partial SOC}$' )
a2.set_xlabel( 'State of Charge', fontsize=fs, fontweight=fw )
a2.set_ylabel( '$\partial U / \partial SOC$', fontsize=fs, fontweight=fw )
a2.set_title( 'OCV Gradients for Anode Alignment', fontsize=fs, fontweight=fw )
a2.legend(loc='best')
a2.set_axisbelow(True)
a2.grid(color='gray')
a2.set_ylim([-0.1,1.5])
#plt.suptitle('LFP/C$_6$ Half Cell OCV Alignment', fontsize=fs, fontweight=fw)
plt.suptitle('NMC/C$_6$ Half Cell OCV Alignment', fontsize=fs, fontweight=fw)
plt.tight_layout(rect=[0,0.03,1,0.97])
plt.show()
#f1.savefig( fig_dir+'ocv_alignment_cell2_lfp.pdf', dpi=figres)
#f1.savefig( fig_dir+'ocv_alignment_cell4_nmc.pdf', dpi=figres)<|fim▁end|> | |
<|file_name|>archive_migration.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python2.7
#! -*- encoding: utf-8 -*-
import simplejson as json
import threading
from sqlobject import SQLObject, connectionForURI
from sqlobject.sqlbuilder import Insert, Select, Delete
from octopus.dispatcher.model.node import FolderNode, TaskNode
from octopus.dispatcher.model.task import Task, TaskGroup
from octopus.dispatcher.model.command import Command
from octopus.dispatcher.model.rendernode import RenderNode
from octopus.dispatcher.model.pool import Pool, PoolShare
from octopus.dispatcher.db.pulidb import FolderNodes, TaskNodes, Dependencies, TaskGroups, Rules, Tasks, Commands, Pools, PoolShares
BUFFER_SIZE = 1000
def deleteElementFromMainDB(table, elementId):
mainConn.query(mainConn.sqlrepr(Delete(table.q, where=(table.q.id==elementId))))
def insertElementIntoStatDB(table, values):
statConn.query(statConn.sqlrepr(Insert(table.q, values=values)))
def archiveTaskNodesDependencies(taskNodeId):
Dependencies._connection = mainConn
dependencies = Dependencies.select(Dependencies.q.taskNodes == taskNodeId)
for dependency in dependencies:
duplicateDependencyIntoStatDB(dependency)
deleteElementFromMainDB(Dependencies, dependency.id)
def archiveFolderNodesDependencies(folderNodeId):
Dependencies._connection = mainConn
dependencies = Dependencies.select(Dependencies.q.folderNodes ==folderNodeId)
for dependency in dependencies:
duplicateDependencyIntoStatDB(dependency)
deleteElementFromMainDB(Dependencies, dependency.id)
def archiveTaskNodesRules(taskNodeId):
Rules._connection = mainConn
rules = Rules.select(Rules.q.taskNodeId == taskNodeId )
for rule in rules:
duplicateRuleIntoStatDB(rule)
deleteElementFromMainDB(Rules, rule.id)
def archiveFolderNodesRules(folderNodeId):
Rules._connection = mainConn
rules = Rules.select(Rules.q.folderNodeId == folderNodeId )
for rule in rules:
duplicateRuleIntoStatDB(rule)
deleteElementFromMainDB(Rules, rule.id)
def archivePoolShares():
PoolShares._connection = mainConn
print "Starting to archive PoolShares"
poolSharestoArchive = PoolShares.select(PoolShares.q.archived==True)
processedItems = 0
totalItems = poolSharestoArchive.count()
print "Found " + str(totalItems) + " PoolShares to archive"
while totalItems > processedItems:
for poolShare in poolSharestoArchive.limit(BUFFER_SIZE):
duplicatePoolSharesIntoStatDB(poolShare)
deleteElementFromMainDB(PoolShares, poolShare.id)
processedItems+=1
print str(totalItems - processedItems) + " PoolShares remaining"
print "Finished to archive PoolShares"
def archivePools():
Pools._connection = mainConn
print "Starting to archive Pools"
poolstoArchive = Pools.select(Pools.q.archived==True)
processedItems = 0
totalItems = poolstoArchive.count()
print "Found " + str(totalItems) + " Pools to archive"
while totalItems > processedItems:
for pool in poolstoArchive.limit(BUFFER_SIZE):
duplicatePoolsIntoStatDB(pool)
deleteElementFromMainDB(Pools, pool.id)
processedItems+=1
print str(totalItems - processedItems) + " Pools remaining"
print "Finished to archive Pools"
def archiveFolderNodes():
FolderNodes._connection = mainConn
print "Starting to archive FolderNodes"
folderNodestoArchive = FolderNodes.select(FolderNodes.q.archived==True)
processedItems = 0
totalItems = folderNodestoArchive.count()
print "Found " + str(totalItems) + " FolderNodes to archive"
while totalItems > processedItems:
for node in folderNodestoArchive.limit(BUFFER_SIZE):
manageFolderNode(node)
processedItems+=1
print str(totalItems - processedItems) + " FolderNodes remaining"
print "Finished to archive FolderNodes"
def manageFolderNode(node):
duplicateFolderNodesIntoStatDB(node)
deleteElementFromMainDB(FolderNodes, node.id)
archiveFolderNodesDependencies(node.id)
archiveFolderNodesRules(node.id)
def archiveTaskNodes():
TaskNodes._connection = mainConn
print "Starting to archive TaskNodes"
taskNodestoArchive = TaskNodes.select(TaskNodes.q.archived==True)
processedItems = 0
totalItems = taskNodestoArchive.count()
print "Found " + str(totalItems) + " TaskNodes to archive"
while totalItems > processedItems:
for node in taskNodestoArchive.limit(BUFFER_SIZE):
manageTaskNode(node)
processedItems+=1
print str(totalItems - processedItems) + " TaskNodes remaining"
print "Finished to archive TaskNodes"
def manageTaskNode(node):
duplicateTaskNodesIntoStatDB(node)
deleteElementFromMainDB(TaskNodes, node.id)
archiveTaskNodesDependencies(node.id)
archiveTaskNodesRules(node.id)
def archiveCommands():
Commands._connection = mainConn
print "Starting to archive Commands"
commandsToArchive = Commands.select(Commands.q.archived==True)
processedItems = 0
totalItems = commandsToArchive.count()
print "Found " + str(totalItems) + " Commands to archive"
while totalItems > processedItems:
for commands in commandsToArchive.limit(BUFFER_SIZE):
duplicateCommandIntoStatDB(commands)
deleteElementFromMainDB(Commands, commands.id)
processedItems+=1
print str(totalItems - processedItems) + " Commands remaining"
print "Finished to archive Commands"
def archiveTaskGroups():
TaskGroups._connection = mainConn
print "Starting to archive taskGroups"
tasksGroupsToArchive = TaskGroups.select(TaskGroups.q.archived==True)
processedItems = 0
totalItems = tasksGroupsToArchive.count()
print "Found " + str(totalItems) + " taskGroups to archive"
while totalItems > processedItems:
for taskGroup in tasksGroupsToArchive.limit(BUFFER_SIZE):
duplicateTaskGroupIntoStatDB(taskGroup)
deleteElementFromMainDB(TaskGroups, taskGroup.id)
processedItems+=1
print str(totalItems - processedItems) + " taskGroups remaining"
print "Finished to archive taskGroups"
def archiveTasks():
Tasks._connection = mainConn
print "Starting to archive tasks"
tasksToArchive = Tasks.select(Tasks.q.archived==True)
processedItems = 0
totalItems = tasksToArchive.count()
print "Found " + str(totalItems) + " tasks to archive"
while totalItems > processedItems:
for task in tasksToArchive.limit(BUFFER_SIZE):
duplicateTaskIntoStatDB(task)
deleteElementFromMainDB(Tasks, task.id)
processedItems+=1
print str(totalItems - processedItems) + " tasks remaining"
print "Finished to archive tasks"
def duplicateRuleIntoStatDB(rule):
fields = {Rules.q.id.fieldName: rule.id,
Rules.q.name.fieldName: rule.name,
Rules.q.taskNodeId.fieldName: rule.taskNodeId,
Rules.q.folderNodeId.fieldName: rule.folderNodeId}
insertElementIntoStatDB(Rules, fields)
def duplicateDependencyIntoStatDB(element):
fields = {Dependencies.q.toNodeId.fieldName: element.toNodeId,
Dependencies.q.statusList.fieldName: element.statusList,
Dependencies.q.taskNodes.fieldName: element.taskNodesID,
Dependencies.q.folderNodes.fieldName: element.folderNodesID,
Dependencies.q.archived.fieldName: False}
insertElementIntoStatDB(Dependencies,fields)
def duplicateRenderNodesIntoStatDB(element):
fields = {RenderNodes.q.id.fieldName: element.id,
RenderNodes.q.name.fieldName: element.name,
RenderNodes.q.coresNumber.fieldName: element.coresNumber,
RenderNodes.q.speed.fieldName: element.speed,
RenderNodes.q.ip.fieldName: element.ip,
RenderNodes.q.port.fieldName: element.port,<|fim▁hole|>
def duplicatePoolSharesIntoStatDB(element):
fields = {PoolShares.q.id.fieldName: element.id,
PoolShares.q.poolId.fieldName: element.poolId,
PoolShares.q.nodeId.fieldName: element.nodeId,
PoolShares.q.maxRN.fieldName: element.maxRN,
PoolShares.q.archived.fieldName: True}
insertElementIntoStatDB(PoolShares,fields)
def duplicatePoolsIntoStatDB(element):
fields = {Pools.q.id.fieldName: element.id,
Pools.q.name.fieldName: element.name,
Pools.q.archived.fieldName: True}
insertElementIntoStatDB(Pools,fields)
def duplicateFolderNodesIntoStatDB(element):
fields = {FolderNodes.q.id.fieldName: element.id,
FolderNodes.q.name.fieldName: element.name,
FolderNodes.q.parentId.fieldName: element.parentId,
FolderNodes.q.user.fieldName: element.user,
FolderNodes.q.priority.fieldName: element.priority,
FolderNodes.q.dispatchKey.fieldName: element.dispatchKey,
FolderNodes.q.maxRN.fieldName: element.maxRN,
FolderNodes.q.taskGroupId.fieldName: element.taskGroupId,
FolderNodes.q.strategy.fieldName: element.strategy,
FolderNodes.q.creationTime.fieldName: element.creationTime,
FolderNodes.q.startTime.fieldName: element.startTime,
FolderNodes.q.updateTime.fieldName: element.updateTime,
FolderNodes.q.endTime.fieldName: element.endTime,
FolderNodes.q.archived.fieldName: True}
insertElementIntoStatDB(FolderNodes,fields)
def duplicateTaskNodesIntoStatDB(element):
fields = {TaskNodes.q.id.fieldName: element.id,
TaskNodes.q.name.fieldName: element.name,
TaskNodes.q.parentId.fieldName: element.parentId,
TaskNodes.q.user.fieldName: element.user,
TaskNodes.q.priority.fieldName: element.priority,
TaskNodes.q.dispatchKey.fieldName: element.dispatchKey,
TaskNodes.q.maxRN.fieldName: element.maxRN,
TaskNodes.q.taskId.fieldName: element.taskId,
TaskNodes.q.creationTime.fieldName: element.creationTime,
TaskNodes.q.startTime.fieldName: element.startTime,
TaskNodes.q.updateTime.fieldName: element.updateTime,
TaskNodes.q.endTime.fieldName: element.endTime,
TaskNodes.q.maxAttempt.fieldName: element.maxAttempt,
TaskNodes.q.archived.fieldName: True}
insertElementIntoStatDB(TaskNodes,fields)
def duplicateCommandIntoStatDB(element):
fields = {Commands.q.id.fieldName: element.id,
Commands.q.description.fieldName: element.description,
Commands.q.taskId.fieldName: element.taskId,
Commands.q.status.fieldName: element.status,
Commands.q.completion.fieldName: element.completion,
Commands.q.creationTime.fieldName: element.creationTime,
Commands.q.startTime.fieldName: element.startTime,
Commands.q.updateTime.fieldName: element.updateTime,
Commands.q.endTime.fieldName: element.endTime,
Commands.q.message.fieldName: element.message,
Commands.q.stats.fieldName: str(element.stats),
Commands.q.archived.fieldName: True,
Commands.q.args.fieldName: str(element.args),
Commands.q.attempt.fieldName: str(element.attempt),
Commands.q.runnerPackages.fieldName: json.dumps(element.runnerPackages),
Commands.q.watcherPackages.fieldName: json.dumps(element.watcherPackages)}
insertElementIntoStatDB(Commands,fields)
def duplicateTaskGroupIntoStatDB(element):
fields = {TaskGroups.q.id.fieldName: element.id,
TaskGroups.q.name.fieldName: element.name,
TaskGroups.q.parentId.fieldName: element.parentId,
TaskGroups.q.user.fieldName: element.user,
TaskGroups.q.priority.fieldName: element.priority,
TaskGroups.q.dispatchKey.fieldName: element.dispatchKey,
TaskGroups.q.maxRN.fieldName: element.maxRN,
TaskGroups.q.environment.fieldName: json.dumps(element.environment),
TaskGroups.q.requirements.fieldName: json.dumps(element.requirements),
TaskGroups.q.tags.fieldName: json.dumps(element.tags),
TaskGroups.q.strategy.fieldName: element.strategy,
TaskGroups.q.archived.fieldName: True,
TaskGroups.q.args.fieldName: str(element.args)}
insertElementIntoStatDB(TaskGroups,fields)
def duplicateTaskIntoStatDB(element):
fields = {Tasks.q.id.fieldName: element.id,
Tasks.q.name.fieldName: element.name,
Tasks.q.parentId.fieldName: element.parentId,
Tasks.q.user.fieldName: element.user,
Tasks.q.priority.fieldName: element.priority,
Tasks.q.dispatchKey.fieldName: element.dispatchKey,
Tasks.q.maxRN.fieldName: element.maxRN,
Tasks.q.runner.fieldName: element.runner,
Tasks.q.environment.fieldName: json.dumps(element.environment),
Tasks.q.requirements.fieldName: json.dumps(element.requirements),
Tasks.q.minNbCores.fieldName: element.minNbCores,
Tasks.q.maxNbCores.fieldName: element.maxNbCores,
Tasks.q.ramUse.fieldName: element.ramUse,
Tasks.q.licence.fieldName: element.licence,
Tasks.q.tags.fieldName: json.dumps(element.tags),
Tasks.q.validationExpression.fieldName: element.validationExpression,
Tasks.q.archived.fieldName: True,
Tasks.q.args.fieldName: str(element.args),
Tasks.q.maxAttempt.fieldName: element.maxAttempt,
Tasks.q.runnerPackages.fieldName: json.dumps(element.runnerPackages),
Tasks.q.watcherPackages.fieldName: json.dumps(element.watcherPackages)}
insertElementIntoStatDB(Tasks,fields)
def groupForThread1():
archivePoolShares()
archivePools()
archiveTaskNodes()
def groupForThread2():
archiveTasks()
archiveFolderNodes()
def groupForThread3():
archiveTaskGroups()
archiveCommands()
DB_URL = "mysql://[email protected]/pulidb"
STAT_DB_URL = "mysql://[email protected]/pulistatdb"
mainConn = connectionForURI(DB_URL)
statConn = connectionForURI(STAT_DB_URL)
threading.Thread(target=groupForThread1).start()
threading.Thread(target=groupForThread2).start()
threading.Thread(target=groupForThread3).start()<|fim▁end|> | RenderNodes.q.ramSize.fieldName: element.ramSize,
RenderNodes.q.caracteristics.fieldName: json.dumps(element.caracteristics),
RenderNodes.q.performance.fieldName: element.performance}
insertElementIntoStatDB(RenderNodes,fields) |
<|file_name|>plugin.js<|end_file_name|><|fim▁begin|>// @require core/widget/helpers.js
(function ( $, _, Svelto ) {
/* PLUGIN */
let Plugin = {
call ( Widget, $ele, args ) {
let options = args[0],
isMethodCall = ( _.isString ( options ) && options.charAt ( 0 ) !== '_' ); // Methods starting with '_' are private
for ( let i = 0, l = $ele.length; i < l; i++ ) {
let instance = $.widget.get ( $ele[i], Widget, options );
if ( isMethodCall && _.isFunction ( instance[options] ) ) {
let returnValue = args.length > 1 ? instance[options]( ...Array.prototype.slice.call ( args, 1 ) ) : instance[options]();
<|fim▁hole|>
}
return $ele;
},
make ( Widget ) {
if ( !Widget.config.plugin ) return;
$.fn[Widget.config.name] = function () {
return Plugin.call ( Widget, this, arguments );
};
},
unmake ( Widget ) {
if ( !Widget.config.plugin ) return;
delete $.fn[Widget.config.name];
}
};
/* EXPORT */
Svelto.Plugin = Plugin;
}( Svelto.$, Svelto._, Svelto ));<|fim▁end|> | if ( !_.isNil ( returnValue ) ) return returnValue;
} |
<|file_name|>AnimationToolbox.hh<|end_file_name|><|fim▁begin|>#include <ui_animationControls.hh>
#if QT_VERSION >= 0x050000 <|fim▁hole|>#endif
class AnimationToolboxWidget : public QWidget, public Ui::AnimationControls
{
Q_OBJECT
public:
AnimationToolboxWidget(QWidget *parent = 0);
};<|fim▁end|> | #include <QtWidgets>
#else
#include <QtGui> |
<|file_name|>stack1.go<|end_file_name|><|fim▁begin|>// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
import "unsafe"
const (
// stackDebug == 0: no logging
// == 1: logging of per-stack operations
// == 2: logging of per-frame operations
// == 3: logging of per-word updates
// == 4: logging of per-word reads
stackDebug = 0
stackFromSystem = 0 // allocate stacks from system memory instead of the heap
stackFaultOnFree = 0 // old stacks are mapped noaccess to detect use after free
stackPoisonCopy = 0 // fill stack that should not be accessed with garbage, to detect bad dereferences during copy
stackCache = 1
)
const (
uintptrMask = 1<<(8*ptrSize) - 1
poisonStack = uintptrMask & 0x6868686868686868
// Goroutine preemption request.
// Stored into g->stackguard0 to cause split stack check failure.
// Must be greater than any real sp.
// 0xfffffade in hex.
stackPreempt = uintptrMask & -1314
// Thread is forking.
// Stored into g->stackguard0 to cause split stack check failure.
// Must be greater than any real sp.
stackFork = uintptrMask & -1234
)
// Global pool of spans that have free stacks.
// Stacks are assigned an order according to size.
// order = log_2(size/FixedStack)
// There is a free list for each order.
// TODO: one lock per order?
var stackpool [_NumStackOrders]mspan
var stackpoolmu mutex
// List of stack spans to be freed at the end of GC. Protected by
// stackpoolmu.
var stackFreeQueue mspan
// Cached value of haveexperiment("framepointer")
var framepointer_enabled bool
func stackinit() {
if _StackCacheSize&_PageMask != 0 {
throw("cache size must be a multiple of page size")
}
for i := range stackpool {
mSpanList_Init(&stackpool[i])
}
mSpanList_Init(&stackFreeQueue)
}
// Allocates a stack from the free pool. Must be called with
// stackpoolmu held.
func stackpoolalloc(order uint8) gclinkptr {
list := &stackpool[order]
s := list.next
if s == list {
// no free stacks. Allocate another span worth.
s = mHeap_AllocStack(&mheap_, _StackCacheSize>>_PageShift)
if s == nil {
throw("out of memory")
}
if s.ref != 0 {
throw("bad ref")
}
if s.freelist.ptr() != nil {
throw("bad freelist")
}
for i := uintptr(0); i < _StackCacheSize; i += _FixedStack << order {
x := gclinkptr(uintptr(s.start)<<_PageShift + i)
x.ptr().next = s.freelist
s.freelist = x
}
mSpanList_Insert(list, s)
}
x := s.freelist
if x.ptr() == nil {
throw("span has no free stacks")
}
s.freelist = x.ptr().next
s.ref++
if s.freelist.ptr() == nil {
// all stacks in s are allocated.
mSpanList_Remove(s)
}
return x
}
// Adds stack x to the free pool. Must be called with stackpoolmu held.
func stackpoolfree(x gclinkptr, order uint8) {
s := mHeap_Lookup(&mheap_, (unsafe.Pointer)(x))
if s.state != _MSpanStack {
throw("freeing stack not in a stack span")
}
if s.freelist.ptr() == nil {
// s will now have a free stack
mSpanList_Insert(&stackpool[order], s)
}
x.ptr().next = s.freelist
s.freelist = x
s.ref--
if gcphase == _GCoff && s.ref == 0 {
// Span is completely free. Return it to the heap
// immediately if we're sweeping.
//
// If GC is active, we delay the free until the end of
// GC to avoid the following type of situation:
//
// 1) GC starts, scans a SudoG but does not yet mark the SudoG.elem pointer
// 2) The stack that pointer points to is copied
// 3) The old stack is freed
// 4) The containing span is marked free
// 5) GC attempts to mark the SudoG.elem pointer. The
// marking fails because the pointer looks like a
// pointer into a free span.
//
// By not freeing, we prevent step #4 until GC is done.
mSpanList_Remove(s)
s.freelist = 0
mHeap_FreeStack(&mheap_, s)
}
}
// stackcacherefill/stackcacherelease implement a global pool of stack segments.
// The pool is required to prevent unlimited growth of per-thread caches.
func stackcacherefill(c *mcache, order uint8) {
if stackDebug >= 1 {
print("stackcacherefill order=", order, "\n")
}
// Grab some stacks from the global cache.
// Grab half of the allowed capacity (to prevent thrashing).
var list gclinkptr
var size uintptr
lock(&stackpoolmu)
for size < _StackCacheSize/2 {
x := stackpoolalloc(order)
x.ptr().next = list
list = x
size += _FixedStack << order
}
unlock(&stackpoolmu)
c.stackcache[order].list = list
c.stackcache[order].size = size
}
func stackcacherelease(c *mcache, order uint8) {
if stackDebug >= 1 {
print("stackcacherelease order=", order, "\n")
}
x := c.stackcache[order].list
size := c.stackcache[order].size
lock(&stackpoolmu)
for size > _StackCacheSize/2 {
y := x.ptr().next
stackpoolfree(x, order)
x = y
size -= _FixedStack << order
}
unlock(&stackpoolmu)
c.stackcache[order].list = x
c.stackcache[order].size = size
}
func stackcache_clear(c *mcache) {
if stackDebug >= 1 {
print("stackcache clear\n")
}
lock(&stackpoolmu)
for order := uint8(0); order < _NumStackOrders; order++ {
x := c.stackcache[order].list
for x.ptr() != nil {
y := x.ptr().next
stackpoolfree(x, order)
x = y
}
c.stackcache[order].list = 0
c.stackcache[order].size = 0
}
unlock(&stackpoolmu)
}
func stackalloc(n uint32) (stack, []stkbar) {
// Stackalloc must be called on scheduler stack, so that we
// never try to grow the stack during the code that stackalloc runs.
// Doing so would cause a deadlock (issue 1547).
thisg := getg()
if thisg != thisg.m.g0 {
throw("stackalloc not on scheduler stack")
}
if n&(n-1) != 0 {
throw("stack size not a power of 2")
}
if stackDebug >= 1 {
print("stackalloc ", n, "\n")
}
// Compute the size of stack barrier array.
maxstkbar := gcMaxStackBarriers(int(n))
nstkbar := unsafe.Sizeof(stkbar{}) * uintptr(maxstkbar)
if debug.efence != 0 || stackFromSystem != 0 {
v := sysAlloc(round(uintptr(n), _PageSize), &memstats.stacks_sys)
if v == nil {
throw("out of memory (stackalloc)")
}
top := uintptr(n) - nstkbar
stkbarSlice := slice{add(v, top), 0, maxstkbar}
return stack{uintptr(v), uintptr(v) + top}, *(*[]stkbar)(unsafe.Pointer(&stkbarSlice))
}
// Small stacks are allocated with a fixed-size free-list allocator.
// If we need a stack of a bigger size, we fall back on allocating
// a dedicated span.
var v unsafe.Pointer
if stackCache != 0 && n < _FixedStack<<_NumStackOrders && n < _StackCacheSize {
order := uint8(0)
n2 := n
for n2 > _FixedStack {
order++
n2 >>= 1
}
var x gclinkptr
c := thisg.m.mcache
if c == nil || thisg.m.preemptoff != "" || thisg.m.helpgc != 0 {
// c == nil can happen in the guts of exitsyscall or
// procresize. Just get a stack from the global pool.
// Also don't touch stackcache during gc
// as it's flushed concurrently.
lock(&stackpoolmu)
x = stackpoolalloc(order)
unlock(&stackpoolmu)
} else {
x = c.stackcache[order].list
if x.ptr() == nil {
stackcacherefill(c, order)
x = c.stackcache[order].list
}
c.stackcache[order].list = x.ptr().next
c.stackcache[order].size -= uintptr(n)
}
v = (unsafe.Pointer)(x)
} else {
s := mHeap_AllocStack(&mheap_, round(uintptr(n), _PageSize)>>_PageShift)
if s == nil {
throw("out of memory")
}
v = (unsafe.Pointer)(s.start << _PageShift)
}
if raceenabled {
racemalloc(v, uintptr(n))
}
if stackDebug >= 1 {
print(" allocated ", v, "\n")
}
top := uintptr(n) - nstkbar
stkbarSlice := slice{add(v, top), 0, maxstkbar}
return stack{uintptr(v), uintptr(v) + top}, *(*[]stkbar)(unsafe.Pointer(&stkbarSlice))
}
func stackfree(stk stack, n uintptr) {
gp := getg()
v := (unsafe.Pointer)(stk.lo)
if n&(n-1) != 0 {
throw("stack not a power of 2")
}
if stk.lo+n < stk.hi {
throw("bad stack size")
}
if stackDebug >= 1 {
println("stackfree", v, n)
memclr(v, n) // for testing, clobber stack data
}
if debug.efence != 0 || stackFromSystem != 0 {
if debug.efence != 0 || stackFaultOnFree != 0 {
sysFault(v, n)
} else {
sysFree(v, n, &memstats.stacks_sys)
}
return
}
if stackCache != 0 && n < _FixedStack<<_NumStackOrders && n < _StackCacheSize {
order := uint8(0)
n2 := n
for n2 > _FixedStack {
order++
n2 >>= 1
}
x := gclinkptr(v)
c := gp.m.mcache
if c == nil || gp.m.preemptoff != "" || gp.m.helpgc != 0 {
lock(&stackpoolmu)
stackpoolfree(x, order)
unlock(&stackpoolmu)
} else {
if c.stackcache[order].size >= _StackCacheSize {
stackcacherelease(c, order)
}
x.ptr().next = c.stackcache[order].list
c.stackcache[order].list = x
c.stackcache[order].size += n
}
} else {
s := mHeap_Lookup(&mheap_, v)
if s.state != _MSpanStack {
println(hex(s.start<<_PageShift), v)
throw("bad span state")
}
if gcphase == _GCoff {
// Free the stack immediately if we're
// sweeping.
mHeap_FreeStack(&mheap_, s)
} else {
// Otherwise, add it to a list of stack spans
// to be freed at the end of GC.
//
// TODO(austin): Make it possible to re-use
// these spans as stacks, like we do for small
// stack spans. (See issue #11466.)
lock(&stackpoolmu)
mSpanList_Insert(&stackFreeQueue, s)
unlock(&stackpoolmu)
}
}
}
var maxstacksize uintptr = 1 << 20 // enough until runtime.main sets it for real
var ptrnames = []string{
0: "scalar",
1: "ptr",
}
// Stack frame layout
//
// (x86)
// +------------------+
// | args from caller |
// +------------------+ <- frame->argp
// | return address |
// +------------------+
// | caller's BP (*) | (*) if framepointer_enabled && varp < sp
// +------------------+ <- frame->varp
// | locals |
// +------------------+
// | args to callee |
// +------------------+ <- frame->sp
//
// (arm)
// +------------------+
// | args from caller |
// +------------------+ <- frame->argp
// | caller's retaddr |
// +------------------+ <- frame->varp
// | locals |
// +------------------+
// | args to callee |
// +------------------+
// | return address |
// +------------------+ <- frame->sp
type adjustinfo struct {
old stack
delta uintptr // ptr distance from old to new stack (newbase - oldbase)
}
// Adjustpointer checks whether *vpp is in the old stack described by adjinfo.
// If so, it rewrites *vpp to point into the new stack.
func adjustpointer(adjinfo *adjustinfo, vpp unsafe.Pointer) {
pp := (*unsafe.Pointer)(vpp)
p := *pp
if stackDebug >= 4 {
print(" ", pp, ":", p, "\n")
}
if adjinfo.old.lo <= uintptr(p) && uintptr(p) < adjinfo.old.hi {
*pp = add(p, adjinfo.delta)
if stackDebug >= 3 {
print(" adjust ptr ", pp, ":", p, " -> ", *pp, "\n")
}
}
}
// Information from the compiler about the layout of stack frames.
type bitvector struct {
n int32 // # of bits
bytedata *uint8
}
type gobitvector struct {
n uintptr
bytedata []uint8
}
func gobv(bv bitvector) gobitvector {
return gobitvector{
uintptr(bv.n),
(*[1 << 30]byte)(unsafe.Pointer(bv.bytedata))[:(bv.n+7)/8],
}
}
func ptrbit(bv *gobitvector, i uintptr) uint8 {
return (bv.bytedata[i/8] >> (i % 8)) & 1
}
// bv describes the memory starting at address scanp.
// Adjust any pointers contained therein.
func adjustpointers(scanp unsafe.Pointer, cbv *bitvector, adjinfo *adjustinfo, f *_func) {
bv := gobv(*cbv)
minp := adjinfo.old.lo
maxp := adjinfo.old.hi
delta := adjinfo.delta
num := uintptr(bv.n)
for i := uintptr(0); i < num; i++ {
if stackDebug >= 4 {
print(" ", add(scanp, i*ptrSize), ":", ptrnames[ptrbit(&bv, i)], ":", hex(*(*uintptr)(add(scanp, i*ptrSize))), " # ", i, " ", bv.bytedata[i/8], "\n")
}
if ptrbit(&bv, i) == 1 {
pp := (*uintptr)(add(scanp, i*ptrSize))
p := *pp
if f != nil && 0 < p && p < _PageSize && debug.invalidptr != 0 || p == poisonStack {
// Looks like a junk value in a pointer slot.
// Live analysis wrong?
getg().m.traceback = 2
print("runtime: bad pointer in frame ", funcname(f), " at ", pp, ": ", hex(p), "\n")
throw("invalid stack pointer")
}
if minp <= p && p < maxp {
if stackDebug >= 3 {
print("adjust ptr ", p, " ", funcname(f), "\n")
}
*pp = p + delta
}
}
}
}
// Note: the argument/return area is adjusted by the callee.
func adjustframe(frame *stkframe, arg unsafe.Pointer) bool {
adjinfo := (*adjustinfo)(arg)
targetpc := frame.continpc
if targetpc == 0 {
// Frame is dead.
return true
}
f := frame.fn
if stackDebug >= 2 {
print(" adjusting ", funcname(f), " frame=[", hex(frame.sp), ",", hex(frame.fp), "] pc=", hex(frame.pc), " continpc=", hex(frame.continpc), "\n")
}
if f.entry == systemstack_switchPC {
// A special routine at the bottom of stack of a goroutine that does an systemstack call.
// We will allow it to be copied even though we don't
// have full GC info for it (because it is written in asm).
return true
}
if targetpc != f.entry {
targetpc--
}
pcdata := pcdatavalue(f, _PCDATA_StackMapIndex, targetpc)
if pcdata == -1 {
pcdata = 0 // in prologue
}
// Adjust local variables if stack frame has been allocated.
size := frame.varp - frame.sp
var minsize uintptr
switch thechar {
case '6', '8':
minsize = 0
case '7':
minsize = spAlign
default:
minsize = ptrSize
}
if size > minsize {
var bv bitvector
stackmap := (*stackmap)(funcdata(f, _FUNCDATA_LocalsPointerMaps))
if stackmap == nil || stackmap.n <= 0 {
print("runtime: frame ", funcname(f), " untyped locals ", hex(frame.varp-size), "+", hex(size), "\n")
throw("missing stackmap")
}
// Locals bitmap information, scan just the pointers in locals.
if pcdata < 0 || pcdata >= stackmap.n {
// don't know where we are
print("runtime: pcdata is ", pcdata, " and ", stackmap.n, " locals stack map entries for ", funcname(f), " (targetpc=", targetpc, ")\n")
throw("bad symbol table")
}
bv = stackmapdata(stackmap, pcdata)
size = uintptr(bv.n) * ptrSize
if stackDebug >= 3 {
print(" locals ", pcdata, "/", stackmap.n, " ", size/ptrSize, " words ", bv.bytedata, "\n")
}
adjustpointers(unsafe.Pointer(frame.varp-size), &bv, adjinfo, f)
}
// Adjust saved base pointer if there is one.
if thechar == '6' && frame.argp-frame.varp == 2*regSize {
if !framepointer_enabled {
print("runtime: found space for saved base pointer, but no framepointer experiment\n")
print("argp=", hex(frame.argp), " varp=", hex(frame.varp), "\n")
throw("bad frame layout")
}
if stackDebug >= 3 {
print(" saved bp\n")
}
adjustpointer(adjinfo, unsafe.Pointer(frame.varp))
}
// Adjust arguments.
if frame.arglen > 0 {
var bv bitvector
if frame.argmap != nil {
bv = *frame.argmap
} else {
stackmap := (*stackmap)(funcdata(f, _FUNCDATA_ArgsPointerMaps))
if stackmap == nil || stackmap.n <= 0 {
print("runtime: frame ", funcname(f), " untyped args ", frame.argp, "+", uintptr(frame.arglen), "\n")
throw("missing stackmap")
}
if pcdata < 0 || pcdata >= stackmap.n {
// don't know where we are
print("runtime: pcdata is ", pcdata, " and ", stackmap.n, " args stack map entries for ", funcname(f), " (targetpc=", targetpc, ")\n")
throw("bad symbol table")
}
bv = stackmapdata(stackmap, pcdata)
}
if stackDebug >= 3 {
print(" args\n")
}
adjustpointers(unsafe.Pointer(frame.argp), &bv, adjinfo, nil)
}
return true
}
func adjustctxt(gp *g, adjinfo *adjustinfo) {
adjustpointer(adjinfo, (unsafe.Pointer)(&gp.sched.ctxt))
}
func adjustdefers(gp *g, adjinfo *adjustinfo) {
// Adjust defer argument blocks the same way we adjust active stack frames.
tracebackdefers(gp, adjustframe, noescape(unsafe.Pointer(adjinfo)))
// Adjust pointers in the Defer structs.
// Defer structs themselves are never on the stack.
for d := gp._defer; d != nil; d = d.link {
adjustpointer(adjinfo, (unsafe.Pointer)(&d.fn))
adjustpointer(adjinfo, (unsafe.Pointer)(&d.sp))
adjustpointer(adjinfo, (unsafe.Pointer)(&d._panic))
}
}
func adjustpanics(gp *g, adjinfo *adjustinfo) {
// Panics are on stack and already adjusted.
// Update pointer to head of list in G.
adjustpointer(adjinfo, (unsafe.Pointer)(&gp._panic))
}
func adjustsudogs(gp *g, adjinfo *adjustinfo) {
// the data elements pointed to by a SudoG structure
// might be in the stack.
for s := gp.waiting; s != nil; s = s.waitlink {
adjustpointer(adjinfo, (unsafe.Pointer)(&s.elem))
adjustpointer(adjinfo, (unsafe.Pointer)(&s.selectdone))
}
}
func adjuststkbar(gp *g, adjinfo *adjustinfo) {
for i := int(gp.stkbarPos); i < len(gp.stkbar); i++ {
adjustpointer(adjinfo, (unsafe.Pointer)(&gp.stkbar[i].savedLRPtr))
}
}
func fillstack(stk stack, b byte) {
for p := stk.lo; p < stk.hi; p++ {
*(*byte)(unsafe.Pointer(p)) = b
}
}
// Copies gp's stack to a new stack of a different size.
// Caller must have changed gp status to Gcopystack.
func copystack(gp *g, newsize uintptr) {
if gp.syscallsp != 0 {
throw("stack growth not allowed in system call")
}
old := gp.stack
if old.lo == 0 {
throw("nil stackbase")
}
used := old.hi - gp.sched.sp
// allocate new stack
new, newstkbar := stackalloc(uint32(newsize))
if stackPoisonCopy != 0 {
fillstack(new, 0xfd)
}
if stackDebug >= 1 {
print("copystack gp=", gp, " [", hex(old.lo), " ", hex(old.hi-used), " ", hex(old.hi), "]/", gp.stackAlloc, " -> [", hex(new.lo), " ", hex(new.hi-used), " ", hex(new.hi), "]/", newsize, "\n")
}
// adjust pointers in the to-be-copied frames
var adjinfo adjustinfo
adjinfo.old = old
adjinfo.delta = new.hi - old.hi
gentraceback(^uintptr(0), ^uintptr(0), 0, gp, 0, nil, 0x7fffffff, adjustframe, noescape(unsafe.Pointer(&adjinfo)), 0)
// adjust other miscellaneous things that have pointers into stacks.
adjustctxt(gp, &adjinfo)
adjustdefers(gp, &adjinfo)
adjustpanics(gp, &adjinfo)
adjustsudogs(gp, &adjinfo)
adjuststkbar(gp, &adjinfo)
// copy the stack to the new location
if stackPoisonCopy != 0 {
fillstack(new, 0xfb)
}
memmove(unsafe.Pointer(new.hi-used), unsafe.Pointer(old.hi-used), used)
// copy old stack barriers to new stack barrier array
newstkbar = newstkbar[:len(gp.stkbar)]
copy(newstkbar, gp.stkbar)
// Swap out old stack for new one
gp.stack = new
gp.stackguard0 = new.lo + _StackGuard // NOTE: might clobber a preempt request
gp.sched.sp = new.hi - used
oldsize := gp.stackAlloc
gp.stackAlloc = newsize
gp.stkbar = newstkbar
// free old stack
if stackPoisonCopy != 0 {
fillstack(old, 0xfc)
}
stackfree(old, oldsize)
}
// round x up to a power of 2.
func round2(x int32) int32 {
s := uint(0)
for 1<<s < x {
s++
}
return 1 << s
}
// Called from runtime·morestack when more stack is needed.
// Allocate larger stack and relocate to new stack.
// Stack growth is multiplicative, for constant amortized cost.
//
// g->atomicstatus will be Grunning or Gscanrunning upon entry.
// If the GC is trying to stop this g then it will set preemptscan to true.
func newstack() {
thisg := getg()
// TODO: double check all gp. shouldn't be getg().
if thisg.m.morebuf.g.ptr().stackguard0 == stackFork {
throw("stack growth after fork")
}
if thisg.m.morebuf.g.ptr() != thisg.m.curg {
print("runtime: newstack called from g=", thisg.m.morebuf.g, "\n"+"\tm=", thisg.m, " m->curg=", thisg.m.curg, " m->g0=", thisg.m.g0, " m->gsignal=", thisg.m.gsignal, "\n")
morebuf := thisg.m.morebuf
traceback(morebuf.pc, morebuf.sp, morebuf.lr, morebuf.g.ptr())
throw("runtime: wrong goroutine in newstack")
}
if thisg.m.curg.throwsplit {
gp := thisg.m.curg
// Update syscallsp, syscallpc in case traceback uses them.
morebuf := thisg.m.morebuf
gp.syscallsp = morebuf.sp
gp.syscallpc = morebuf.pc
print("runtime: newstack sp=", hex(gp.sched.sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n",
"\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n",
"\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n")
traceback(morebuf.pc, morebuf.sp, morebuf.lr, gp)
throw("runtime: stack split at bad time")
}
gp := thisg.m.curg
morebuf := thisg.m.morebuf
thisg.m.morebuf.pc = 0
thisg.m.morebuf.lr = 0
thisg.m.morebuf.sp = 0
thisg.m.morebuf.g = 0
rewindmorestack(&gp.sched)
// NOTE: stackguard0 may change underfoot, if another thread
// is about to try to preempt gp. Read it just once and use that same
// value now and below.
preempt := atomicloaduintptr(&gp.stackguard0) == stackPreempt
// Be conservative about where we preempt.
// We are interested in preempting user Go code, not runtime code.
// If we're holding locks, mallocing, or preemption is disabled, don't
// preempt.
// This check is very early in newstack so that even the status change
// from Grunning to Gwaiting and back doesn't happen in this case.
// That status change by itself can be viewed as a small preemption,
// because the GC might change Gwaiting to Gscanwaiting, and then
// this goroutine has to wait for the GC to finish before continuing.
// If the GC is in some way dependent on this goroutine (for example,
// it needs a lock held by the goroutine), that small preemption turns
// into a real deadlock.
if preempt {
if thisg.m.locks != 0 || thisg.m.mallocing != 0 || thisg.m.preemptoff != "" || thisg.m.p.ptr().status != _Prunning {
// Let the goroutine keep running for now.
// gp->preempt is set, so it will be preempted next time.
gp.stackguard0 = gp.stack.lo + _StackGuard
gogo(&gp.sched) // never return
}
}
// The goroutine must be executing in order to call newstack,
// so it must be Grunning (or Gscanrunning).
casgstatus(gp, _Grunning, _Gwaiting)
gp.waitreason = "stack growth"
if gp.stack.lo == 0 {
throw("missing stack in newstack")
}
sp := gp.sched.sp
if thechar == '6' || thechar == '8' {
// The call to morestack cost a word.
sp -= ptrSize
}
if stackDebug >= 1 || sp < gp.stack.lo {
print("runtime: newstack sp=", hex(sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n",
"\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n",
"\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n")
}
if sp < gp.stack.lo {
print("runtime: gp=", gp, ", gp->status=", hex(readgstatus(gp)), "\n ")
print("runtime: split stack overflow: ", hex(sp), " < ", hex(gp.stack.lo), "\n")
throw("runtime: split stack overflow")
}
if gp.sched.ctxt != nil {
// morestack wrote sched.ctxt on its way in here,
// without a write barrier. Run the write barrier now.
// It is not possible to be preempted between then
// and now, so it's okay.
writebarrierptr_nostore((*uintptr)(unsafe.Pointer(&gp.sched.ctxt)), uintptr(gp.sched.ctxt))
}
if preempt {
if gp == thisg.m.g0 {
throw("runtime: preempt g0")
}
if thisg.m.p == 0 && thisg.m.locks == 0 {
throw("runtime: g is running but p is not")
}
if gp.preemptscan {
for !castogscanstatus(gp, _Gwaiting, _Gscanwaiting) {
// Likely to be racing with the GC as
// it sees a _Gwaiting and does the
// stack scan. If so, gcworkdone will
// be set and gcphasework will simply
// return.
}
if !gp.gcscandone {
scanstack(gp)
gp.gcscandone = true
}
gp.preemptscan = false
gp.preempt = false
casfrom_Gscanstatus(gp, _Gscanwaiting, _Gwaiting)
casgstatus(gp, _Gwaiting, _Grunning)
gp.stackguard0 = gp.stack.lo + _StackGuard
gogo(&gp.sched) // never return
}
// Act like goroutine called runtime.Gosched.
casgstatus(gp, _Gwaiting, _Grunning)
gopreempt_m(gp) // never return
}
// Allocate a bigger segment and move the stack.
oldsize := int(gp.stackAlloc)
newsize := oldsize * 2
if uintptr(newsize) > maxstacksize {
print("runtime: goroutine stack exceeds ", maxstacksize, "-byte limit\n")
throw("stack overflow")
}
<|fim▁hole|> // the gp is in a Gcopystack status.
copystack(gp, uintptr(newsize))
if stackDebug >= 1 {
print("stack grow done\n")
}
casgstatus(gp, _Gcopystack, _Grunning)
gogo(&gp.sched)
}
//go:nosplit
func nilfunc() {
*(*uint8)(nil) = 0
}
// adjust Gobuf as if it executed a call to fn
// and then did an immediate gosave.
func gostartcallfn(gobuf *gobuf, fv *funcval) {
var fn unsafe.Pointer
if fv != nil {
fn = (unsafe.Pointer)(fv.fn)
} else {
fn = unsafe.Pointer(funcPC(nilfunc))
}
gostartcall(gobuf, fn, (unsafe.Pointer)(fv))
}
// Maybe shrink the stack being used by gp.
// Called at garbage collection time.
func shrinkstack(gp *g) {
if readgstatus(gp) == _Gdead {
if gp.stack.lo != 0 {
// Free whole stack - it will get reallocated
// if G is used again.
stackfree(gp.stack, gp.stackAlloc)
gp.stack.lo = 0
gp.stack.hi = 0
gp.stkbar = nil
gp.stkbarPos = 0
}
return
}
if gp.stack.lo == 0 {
throw("missing stack in shrinkstack")
}
if debug.gcshrinkstackoff > 0 {
return
}
oldsize := gp.stackAlloc
newsize := oldsize / 2
// Don't shrink the allocation below the minimum-sized stack
// allocation.
if newsize < _FixedStack {
return
}
// Compute how much of the stack is currently in use and only
// shrink the stack if gp is using less than a quarter of its
// current stack. The currently used stack includes everything
// down to the SP plus the stack guard space that ensures
// there's room for nosplit functions.
avail := gp.stack.hi - gp.stack.lo
if used := gp.stack.hi - gp.sched.sp + _StackLimit; used >= avail/4 {
return
}
// We can't copy the stack if we're in a syscall.
// The syscall might have pointers into the stack.
if gp.syscallsp != 0 {
return
}
if goos_windows != 0 && gp.m != nil && gp.m.libcallsp != 0 {
return
}
if stackDebug > 0 {
print("shrinking stack ", oldsize, "->", newsize, "\n")
}
oldstatus := casgcopystack(gp)
copystack(gp, newsize)
casgstatus(gp, _Gcopystack, oldstatus)
}
// freeStackSpans frees unused stack spans at the end of GC.
func freeStackSpans() {
lock(&stackpoolmu)
// Scan stack pools for empty stack spans.
for order := range stackpool {
list := &stackpool[order]
for s := list.next; s != list; {
next := s.next
if s.ref == 0 {
mSpanList_Remove(s)
s.freelist = 0
mHeap_FreeStack(&mheap_, s)
}
s = next
}
}
// Free queued stack spans.
for stackFreeQueue.next != &stackFreeQueue {
s := stackFreeQueue.next
mSpanList_Remove(s)
mHeap_FreeStack(&mheap_, s)
}
unlock(&stackpoolmu)
}
//go:nosplit
func morestackc() {
systemstack(func() {
throw("attempt to execute C code on Go stack")
})
}<|fim▁end|> | casgstatus(gp, _Gwaiting, _Gcopystack)
// The concurrent GC will not scan the stack while we are doing the copy since |
<|file_name|>test_fields.py<|end_file_name|><|fim▁begin|># -*- coding:utf-8 -*-
from django import forms
try:
from django.utils.encoding import smart_unicode as smart_text
except ImportError:
from django.utils.encoding import smart_text
from cached_modelforms.tests.utils import SettingsTestCase
from cached_modelforms.tests.models import SimpleModel
from cached_modelforms import (
CachedModelChoiceField, CachedModelMultipleChoiceField)
class TestFields(SettingsTestCase):
def setUp(self):
self.settings_manager.set(INSTALLED_APPS=('cached_modelforms.tests',))
self.obj1 = SimpleModel.objects.create(name='name1')
self.obj2 = SimpleModel.objects.create(name='name2')
self.obj3 = SimpleModel.objects.create(name='name3')
self.cached_list = [self.obj1, self.obj2, self.obj3]
class FormSingle(forms.Form):
obj = CachedModelChoiceField(
objects=lambda:self.cached_list,
required=False
)
class FormMultiple(forms.Form):
obj = CachedModelMultipleChoiceField(
objects=lambda:self.cached_list,
required=False
)
self.FormSingle = FormSingle
self.FormMultiple = FormMultiple
def test_modelchoicefield_objects_arg(self):
'''
Test, how the field accepts different types of ``objects`` argument.
'''
as_list = CachedModelChoiceField(objects=lambda:self.cached_list)
as_iterable = CachedModelChoiceField(
objects=lambda:iter(self.cached_list)
)
list_of_tuples = [(x.pk, x) for x in self.cached_list]
as_list_of_tuples = CachedModelChoiceField(
objects=lambda:list_of_tuples
)
as_dict = CachedModelChoiceField(objects=lambda:dict(list_of_tuples))
choices_without_empty_label = as_list.choices[:]
if as_list.empty_label is not None:
choices_without_empty_label.pop(0)
# make sure all of the ``choices`` attrs are the same
self.assertTrue(
as_list.choices ==
as_iterable.choices ==
as_list_of_tuples.choices ==
as_dict.choices
)
# same for ``objects``
self.assertTrue(
as_list.objects ==
as_iterable.objects ==
as_list_of_tuples.objects ==
as_dict.objects
)
# ``objects`` should be a dict as ``{smart_text(pk1): obj1, ...}``
self.assertEqual(
set(as_list.objects.keys()),
set(smart_text(x.pk) for x in self.cached_list)
)
self.assertEqual(set(as_list.objects.values()), set(self.cached_list))
<|fim▁hole|> )
def test_modelmultiplechoicefield_objects_arg(self):
'''
Test, how the field accepts different types of ``objects`` argument.
'''
as_list = CachedModelMultipleChoiceField(
objects=lambda:self.cached_list
)
as_iterable = CachedModelMultipleChoiceField(
objects=lambda:iter(self.cached_list)
)
list_of_tuples = [(x.pk, x) for x in self.cached_list]
as_list_of_tuples = CachedModelMultipleChoiceField(
objects=lambda:list_of_tuples
)
as_dict = CachedModelMultipleChoiceField(objects=dict(list_of_tuples))
# make sure all of the ``choices`` attrs are the same
self.assertTrue(
as_list.choices ==
as_iterable.choices ==
as_list_of_tuples.choices ==
as_dict.choices)
# same for ``objects``
self.assertTrue(
as_list.objects ==
as_iterable.objects ==
as_list_of_tuples.objects ==
as_dict.objects)
# ``objects`` should be a dict as ``{smart_text(pk1): obj1, ...}``
self.assertEqual(
set(as_list.objects.keys()),
set(smart_text(x.pk) for x in self.cached_list)
)
self.assertEqual(set(as_list.objects.values()), set(self.cached_list))
# ``choices`` should be a list as ``[(smart_text(pk1), smart_text(obj1)), ...]``
self.assertEqual(
as_list.choices,
[(smart_text(x.pk), smart_text(x)) for x in self.cached_list]
)
def test_modelchoicefield_behavior(self):
'''
Test, how the field handles data in form.
'''
# some value
form = self.FormSingle({'obj': smart_text(self.obj1.pk)})
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['obj'], self.obj1)
# no value
form = self.FormSingle({})
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['obj'], None)
# invalid value
form = self.FormSingle({'obj': '-1'})
self.assertFalse(form.is_valid())
self.assertTrue(form._errors['obj'])
def test_modelmultiplechoicefield_behavior(self):
'''
Test, how the field handles data in form.
'''
# some value
form = self.FormMultiple({'obj': [smart_text(self.obj1.pk), smart_text(self.obj2.pk)]})
self.assertTrue(form.is_valid())
self.assertEqual(set(form.cleaned_data['obj']), set([self.obj1, self.obj2]))
# no value
form = self.FormMultiple({})
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['obj'], [])
# invalid value
form = self.FormMultiple({'obj': [smart_text(self.obj1.pk), '-1']})
self.assertFalse(form.is_valid())
self.assertTrue(form._errors['obj'])
# invalid list
form = self.FormMultiple({'obj': '-1'})
self.assertFalse(form.is_valid())
self.assertTrue(form._errors['obj'])
def test_modelchoicefield_objects_assignment(self):
field = CachedModelChoiceField(objects=self.cached_list)
field2 = CachedModelChoiceField(objects=self.cached_list[:2])
field.objects = self.cached_list[:2]
self.assertEqual(field.objects, field2.objects)
self.assertEqual(field.choices, field2.choices)
def test_modelmultiplechoicefield_objects_assignment(self):
field = CachedModelMultipleChoiceField(objects=self.cached_list)
field2 = CachedModelMultipleChoiceField(objects=self.cached_list[:2])
field.objects = self.cached_list[:2]
self.assertEqual(field.objects, field2.objects)
self.assertEqual(field.choices, field2.choices)<|fim▁end|> | # ``choices`` should be a list as ``[(smart_text(pk1), smart_text(obj1)), ...]``
self.assertEqual(
choices_without_empty_label,
[(smart_text(x.pk), smart_text(x)) for x in self.cached_list] |
<|file_name|>test_v3_two_factor_auth.py<|end_file_name|><|fim▁begin|># Copyright (C) 2015 Universidad Politecnica de Madrid
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import urllib
from keystone.tests import test_v3
from keystone.common import config as common_cfg
from keystone.contrib.two_factor_auth import controllers
from keystone.contrib.two_factor_auth import core
from keystone.openstack.common import log
from keystone import exception
import pyotp
import json
LOG = log.getLogger(__name__)
TWO_FACTOR_USER_URL = '/users/{user_id}'
TWO_FACTOR_BASE_URL = '/OS-TWO-FACTOR'
AUTH_ENDPOINT = '/two_factor_auth'
QUESTION_ENDPOINT = '/sec_question'
DATA_ENDPOINT = '/two_factor_data'
DEVICES_ENDPOINT = '/devices'
TWO_FACTOR_URL = TWO_FACTOR_USER_URL + TWO_FACTOR_BASE_URL + AUTH_ENDPOINT
TWO_FACTOR_QUESTION_URL = TWO_FACTOR_USER_URL + TWO_FACTOR_BASE_URL + QUESTION_ENDPOINT
TWO_FACTOR_DATA_URL = TWO_FACTOR_USER_URL + TWO_FACTOR_BASE_URL + DATA_ENDPOINT
TWO_FACTOR_DEVICES_URL = TWO_FACTOR_USER_URL + TWO_FACTOR_BASE_URL + DEVICES_ENDPOINT
class TwoFactorBaseTests(test_v3.RestfulTestCase):
EXTENSION_NAME = 'two_factor_auth'
EXTENSION_TO_ADD = 'two_factor_auth_extension'
SAMPLE_SECURITY_QUESTION = 'Sample question'
SAMPLE_SECURITY_ANSWER = 'Sample answer'
def setUp(self):
super(TwoFactorBaseTests, self).setUp()
# Now that the app has been served, we can query CONF values
self.base_url = 'http://localhost/v3'
self.controller = controllers.TwoFactorV3Controller()
self.manager = core.TwoFactorAuthManager()
def _create_two_factor_key(self, user_id, expected_status=None):
data = self.new_ref()
data['security_question'] = self.SAMPLE_SECURITY_QUESTION
data['security_answer'] = self.SAMPLE_SECURITY_ANSWER
return self.post(
TWO_FACTOR_URL.format(user_id=user_id),
body={'two_factor_auth': data},
expected_status=expected_status
)
def _create_two_factor_key_no_data(self, user_id, expected_status=None):
return self.post(
TWO_FACTOR_URL.format(user_id=user_id),
expected_status=expected_status
)
def _delete_two_factor_key(self, user_id, expected_status=None):
return self.delete(TWO_FACTOR_URL.format(user_id=user_id), expected_status=expected_status)
def _check_is_two_factor_enabled(self, expected_status=None, **kwargs):
return self.head(
TWO_FACTOR_BASE_URL + AUTH_ENDPOINT + '?' +urllib.urlencode(kwargs),
expected_status=expected_status)
def _check_security_question(self, user_id, sec_answer, expected_status=None):
body = {
'two_factor_auth': {
'security_answer': sec_answer
}
}
return self.get(TWO_FACTOR_QUESTION_URL.format(user_id=user_id),
expected_status=expected_status,
body=body)
def _get_two_factor_data(self, user_id, expected_status=None):
return self.get(TWO_FACTOR_DATA_URL.format(user_id=user_id),
expected_status=expected_status)
def _remember_device(self, user_id, expected_status=None, **kwargs):
try:
kwargs['user_id'] = user_id
self.manager.is_two_factor_enabled(user_id=user_id)
except exception.NotFound:
self._create_two_factor_key(user_id=user_id)
return json.loads(self.post(TWO_FACTOR_BASE_URL + DEVICES_ENDPOINT + '?' + urllib.urlencode(kwargs)).body)['two_factor_auth']
def _check_for_device(self, expected_status=None, **kwargs):
response = self.head(TWO_FACTOR_BASE_URL + DEVICES_ENDPOINT + '?' + urllib.urlencode(kwargs), expected_status=expected_status)
def _delete_devices(self, user_id, expected_status=None):
return self.delete(TWO_FACTOR_DEVICES_URL.format(user_id=user_id), expected_status=expected_status)
def _create_user(self):
user = self.new_user_ref(domain_id=self.domain_id)
password = user['password']
user = self.identity_api.create_user(user)
user['password'] = password
return user
def _delete_user(self, user_id):
self.delete(TWO_FACTOR_USER_URL.format(user_id=user_id))
class TwoFactorCRUDTests(TwoFactorBaseTests):
def test_two_factor_enable(self):
self._create_two_factor_key(user_id=self.user_id)
def test_two_factor_new_code(self):
key1 = self._create_two_factor_key(user_id=self.user_id)
key2 = self._create_two_factor_key(user_id=self.user_id)
self.assertNotEqual(key1, key2)
def test_two_factor_new_code_no_data_right(self):
self._create_two_factor_key(user_id=self.user_id)
self._create_two_factor_key_no_data(user_id=self.user_id)
def test_two_factor_new_code_no_data_wrong(self):
self._create_two_factor_key_no_data(user_id=self.user_id, expected_status=400)
def test_two_factor_disable_after_enabling(self):<|fim▁hole|> def test_two_factor_disable_without_enabling(self):
self._delete_two_factor_key(user_id=self.user_id, expected_status=404)
def test_two_factor_is_enabled(self):
self._create_two_factor_key(user_id=self.user_id)
self._check_is_two_factor_enabled(user_id=self.user_id)
def test_two_factor_is_enabled_name_and_domain(self):
self._create_two_factor_key(user_id=self.user_id)
self._check_is_two_factor_enabled(
user_name=self.user['name'],
domain_id=self.user['domain_id'])
def test_two_factor_is_disabled(self):
self._check_is_two_factor_enabled(user_id=self.user_id, expected_status=404)
def test_two_factor_is_disabled_name_and_domain(self):
self._check_is_two_factor_enabled(
user_name=self.user['name'],
domain_id=self.user['domain_id'],
expected_status=404)
def test_two_factor_check_no_params(self):
self._check_is_two_factor_enabled(expected_status=400)
def test_two_factor_check_no_domain(self):
self._check_is_two_factor_enabled(
user_name=self.user['name'],
expected_status=400)
def test_two_factor_check_no_username(self):
self._check_is_two_factor_enabled(
domain_id=self.user['domain_id'],
expected_status=400)
def test_two_factor_is_enabled_after_deleting(self):
self._create_two_factor_key(user_id=self.user_id)
self._check_is_two_factor_enabled(user_id=self.user_id)
self._delete_two_factor_key(user_id=self.user_id)
self._check_is_two_factor_enabled(user_id=self.user_id, expected_status=404)
def test_two_factor_create_key_for_nonexistent_user(self):
self._create_two_factor_key(user_id='nonexistent_user', expected_status=404)
def test_two_factor_delete_user(self):
user = self._create_user()
self._create_two_factor_key(user_id=user['id'])
self._check_is_two_factor_enabled(user_id=user['id'])
self._delete_user(user['id'])
self._check_is_two_factor_enabled(user_id=user['id'], expected_status=404)
class TwoFactorSecQuestionTests(TwoFactorBaseTests):
def test_security_question_get(self):
self._create_two_factor_key(user_id=self.user_id)
data = self._get_two_factor_data(user_id=self.user_id)
self.assertEqual(data.result['two_factor_auth']['security_question'],
self.SAMPLE_SECURITY_QUESTION)
def test_security_question_correct(self):
self._create_two_factor_key(user_id=self.user_id)
self._check_security_question(user_id=self.user_id,
sec_answer=self.SAMPLE_SECURITY_ANSWER)
def test_security_question_wrong(self):
self._create_two_factor_key(user_id=self.user_id)
self._check_security_question(user_id=self.user_id,
sec_answer='Wrong answer',
expected_status=401)
def test_security_question_nonexistent(self):
self._check_security_question(user_id=self.user_id,
sec_answer='Does not matter',
expected_status=404)
class TwoFactorDevicesCRUDTests(TwoFactorBaseTests):
def test_remember_device(self):
self._remember_device(user_id=self.user_id)
def test_remember_device_name_and_domain(self):
self._remember_device(user_id=self.user_id,
user_name=self.user['name'],
domain_id=self.user['domain_id'])
def test_device_right_data(self):
data = self._remember_device(user_id=self.user_id)
self._check_for_device(user_id=self.user_id,
device_id=data['device_id'],
device_token=data['device_token'])
def test_device_right_data_name_and_domain(self):
data = self._remember_device(user_id=self.user_id,
user_name=self.user['name'],
domain_id=self.user['domain_id'])
self._check_for_device(user_name=self.user['name'],
domain_id=self.user['domain_id'],
device_id=data['device_id'],
device_token=data['device_token'])
def test_device_updates_token(self):
data = self._remember_device(user_id=self.user_id)
new_data = self._remember_device(user_id=self.user_id,
device_id=data['device_id'],
device_token=data['device_token'])
self.assertEqual(new_data['device_id'], data['device_id'])
self.assertNotEqual(new_data['device_token'], data['device_token'])
def test_device_wrong_user(self):
user = self._create_user()
data = self._remember_device(user_id=self.user_id)
self._check_for_device(user_id=user['id'],
device_id=data['device_id'],
device_token=data['device_token'],
expected_status=404)
def test_device_wrong_device(self):
data = self._remember_device(user_id=self.user_id)
self._check_for_device(user_id=self.user_id,
device_id='just_another_device',
device_token=data['device_token'],
expected_status=404)
def test_device_fake_token(self):
data = self._remember_device(user_id=self.user_id)
self._check_for_device(user_id=self.user_id,
device_id=data['device_id'],
device_token='fake_token',
expected_status=404)
def test_device_old_token(self):
data = self._remember_device(user_id=self.user_id)
self._remember_device(user_id=self.user_id,
device_id=data['device_id'],
device_token=data['device_token'])
self._check_for_device(user_id=self.user_id,
device_id=data['device_id'],
device_token=data['device_token'],
expected_status=403)
def test_device_delete_all(self):
data = self._remember_device(user_id=self.user_id)
self._delete_devices(user_id=self.user_id)
self._check_for_device(user_id=self.user_id,
device_id=data['device_id'],
device_token=data['device_token'],
expected_status=404)
def test_device_does_not_delete_all_devices_when_fake_token(self):
data = self._remember_device(user_id=self.user_id)
self._check_for_device(user_id=self.user_id,
device_id=data['device_id'],
device_token='fake_token',
expected_status=404)
self._check_for_device(user_id=self.user_id,
device_id=data['device_id'],
device_token=data['device_token'])
def test_device_deletes_all_devices_when_old_token(self):
data = self._remember_device(user_id=self.user_id)
new_data = self._remember_device(user_id=self.user_id,
device_id=data['device_id'],
device_token=data['device_token'])
self._check_for_device(user_id=self.user_id,
device_id=data['device_id'],
device_token=data['device_token'],
expected_status=403)
self._check_for_device(user_id=self.user_id,
device_id=new_data['device_id'],
device_token=new_data['device_token'],
expected_status=404)
def test_device_delete_user(self):
user = self._create_user()
data = self._remember_device(user_id=user['id'])
self._delete_user(user['id'])
self._check_for_device(user_id=user['id'],
device_id=data['device_id'],
device_token=data['device_token'],
expected_status=404)
def test_device_disable_two_factor(self):
data = self._remember_device(user_id=self.user_id)
self._delete_two_factor_key(user_id=self.user_id)
self._check_for_device(user_id=self.user_id,
device_id=data['device_id'],
device_token=data['device_token'],
expected_status=404)
class TwoFactorAuthTests(TwoFactorBaseTests):
def auth_plugin_config_override(self, methods=None, **method_classes):
if methods is None:
methods = ['external', 'password', 'token', 'oauth1', 'saml2', 'oauth2']
if not method_classes:
method_classes = dict(
external='keystone.auth.plugins.external.DefaultDomain',
password='keystone.auth.plugins.two_factor.TwoFactor',
token='keystone.auth.plugins.token.Token',
oauth1='keystone.auth.plugins.oauth1.OAuth',
saml2='keystone.auth.plugins.saml2.Saml2',
oauth2='keystone.auth.plugins.oauth2.OAuth2',
)
self.config_fixture.config(group='auth', methods=methods)
common_cfg.setup_authentication()
if method_classes:
self.config_fixture.config(group='auth', **method_classes)
def _auth_body(self, **kwargs):
body = {
"auth": {
"identity": {
"methods": [
"password"
],
"password": {
"user": {
}
},
}
}
}
payload = body['auth']['identity']['password']
if 'user_id' in kwargs:
payload['user']['id'] = kwargs['user_id']
if 'password' in kwargs:
payload['user']['password'] = kwargs['password']
if 'user_name' in kwargs:
payload['user']['name'] = kwargs['user_name']
if 'domain_id' in kwargs:
payload['user']['domain'] = {}
payload['user']['domain']['id'] = kwargs['domain_id']
if 'verification_code' in kwargs:
payload['user']['verification_code'] = kwargs['verification_code']
if 'device_data' in kwargs:
payload['user']['device_data'] = kwargs['device_data']
return body
def _authenticate(self, auth_body, expected_status=201):
return self.post('/auth/tokens', body=auth_body, expected_status=expected_status, noauth=True)
def _get_current_code(self, user_id):
two_factor_info = self.manager.get_two_factor_info(user_id)
totp = pyotp.TOTP(two_factor_info.two_factor_key)
return totp.now()
def test_auth_correct(self):
self._create_two_factor_key(user_id=self.user_id)
req = self._auth_body(user_id=self.user_id,
password=self.user['password'],
verification_code=self._get_current_code(self.user_id))
self._authenticate(auth_body=req)
def test_auth_correct_two_factor_disabled(self):
req = self._auth_body(
user_id=self.user_id,
password=self.user['password'])
self._authenticate(auth_body=req)
def test_auth_correct_name_and_domain(self):
self._create_two_factor_key(user_id=self.user_id)
req = self._auth_body(
user_name=self.user['name'],
domain_id=self.user['domain_id'],
verification_code=self._get_current_code(self.user_id),
password=self.user['password'])
self._authenticate(auth_body=req)
def test_auth_correct_two_factor_disabled_name_and_domain(self):
req = self._auth_body(
user_name=self.user['name'],
domain_id=self.user['domain_id'],
password=self.user['password'])
self._authenticate(auth_body=req)
def test_auth_no_code(self):
self._create_two_factor_key(user_id=self.user_id)
req = self._auth_body(
user_id=self.user_id,
password=self.user['password'])
self._authenticate(auth_body=req, expected_status=400)
def test_auth_wrong_code(self):
self._create_two_factor_key(user_id=self.user_id)
req = self._auth_body(
user_id=self.user_id,
verification_code='123456',
password=self.user['password'])
self._authenticate(auth_body=req, expected_status=401)
def test_auth_right_device_data(self):
self._create_two_factor_key(user_id=self.user_id)
data = self.manager.remember_device(user_id=self.user_id)
req = self._auth_body(
user_id=self.user_id,
device_data=data,
password=self.user['password'])
self._authenticate(auth_body=req)
def test_auth_device_data_from_another_user(self):
user = self._create_user()
self._create_two_factor_key(user_id=user['id'])
self._create_two_factor_key(user_id=self.user_id)
user_device = self.manager.remember_device(user_id=self.user_id)
new_user_device = self.manager.remember_device(user_id=user['id'])
req = self._auth_body(
user_id=self.user_id,
device_data=new_user_device,
password=self.user['password'])
self._authenticate(auth_body=req, expected_status=401)<|fim▁end|> | self._create_two_factor_key(user_id=self.user_id)
self._delete_two_factor_key(user_id=self.user_id)
|
<|file_name|>models.go<|end_file_name|><|fim▁begin|>package main
import (
"errors"
"log"
"os"
_ "github.com/go-sql-driver/mysql"
"github.com/go-xorm/xorm"
)
// Account 银行账户
type Account struct {
Id int64
Name string `xorm:"unique"`
Balance float64
Version int `xorm:"version"` // 乐观锁
}
// BeforeInsert before insert
func (a *Account) BeforeInsert() {
log.Printf("before insert: %s", a.Name)
}
// AfterInsert after insert
func (a *Account) AfterInsert() {
log.Printf("after insert: %s", a.Name)
}
// ORM 引擎
var x *xorm.Engine
func init() {
// 创建 ORM 引擎与数据库
var err error
x, err = xorm.NewEngine("mysql", "root:password@tcp(10.10.0.122)/test?charset=utf8&parseTime=True&loc=Local")
if err != nil {
log.Fatalf("Fail to create engine: %v\n", err)
}
// 同步结构体与数据表
if err = x.Sync(new(Account)); err != nil {
log.Fatalf("Fail to sync database: %v\n", err)
}
// 记录日志
f, err := os.Create("sql.log")
if err != nil {
log.Fatalf("Fail to create log file: %v\n", err)
return
}
x.SetLogger(xorm.NewSimpleLogger(f))
x.ShowSQL()
// 设置默认 LRU 缓存
cacher := xorm.NewLRUCacher(xorm.NewMemoryStore(), 1000)
x.SetDefaultCacher(cacher)
}
// 创建新的账户
func newAccount(name string, balance float64) error {
// 对未存在记录进行插入<|fim▁hole|>// 获取账户数量
func getAccountCount() (int64, error) {
return x.Count(new(Account))
}
// 获取账户信息
func getAccount(id int64) (*Account, error) {
a := &Account{}
// 直接操作 ID 的简便方法
has, err := x.ID(id).Get(a)
// 判断操作是否发生错误或对象是否存在
if err != nil {
return nil, err
} else if !has {
return nil, errors.New("Account does not exist")
}
return a, nil
}<|fim▁end|> | _, err := x.Insert(&Account{Name: name, Balance: balance})
return err
}
|
<|file_name|>selectionSort.py<|end_file_name|><|fim▁begin|># idea is to scan the elments from left to right
# partition the list into two lists
# one with emtpy [ where we shall keep things sorted ] and the other the given list
# scan and swap elments in two lists
def ss(a):
for i in range(0,len(a)-1):
smallest=i;
for j in range(i,len(a)):
if a[j]<a[smallest]:
smallest=j
a[i],a[smallest]=a[smallest],a[i]
print a
<|fim▁hole|><|fim▁end|> | a=[3,4,3,5,-1]
ss(a) |
<|file_name|>config.js<|end_file_name|><|fim▁begin|>'use strict';
var program = require('commander');
var Q = require('q');
var fs = require('fs');
var resolve = require('path').resolve;
var stat = Q.denodeify(fs.stat);
var readFile = Q.denodeify(fs.readFile);
var writeFile = Q.denodeify(fs.writeFile);<|fim▁hole|>
program
.option('--get', 'Gets configuration property')
.option('--set', 'Sets a configuration value: Ex: "hello=world". If no value is set, then the property is unset from the config file');
program.parse(process.argv);
var args = program.args;
var opts = program.opts();
// Remove options that are not set in the same order as described above
var selectedOpt = Object.keys(opts).reduce(function (arr, next) {
if (opts[next]) {
arr.push(next);
}
return arr;
}, [])[0];
if (!args.length) {
process.exit(1);
}
args = [];
args.push(program.args[0].replace(/=.*$/, ''));
args.push(program.args[0].replace(new RegExp(args[0] + '=?'), ''));
if (args[1] === '') {
args[1] = undefined;
}
stat(resolve('./fxc.json'))
.then(function (stat) {
if (stat.isFile()) {
return readFile(resolve('./fxc.json'));
}
})
.catch(function () {
return Q.when(null);
})
.then(function (fileContent) {
fileContent = assign({}, fileContent && JSON.parse(fileContent.toString('utf8')));
if (selectedOpt === 'get') {
return fileContent[args[0]];
}
if (typeof args[1] !== 'undefined') {
fileContent[args[0]] = args[1];
} else {
delete fileContent[args[0]];
}
return writeFile(resolve('./fxc.json'), JSON.stringify(fileContent));
})
.then(function (output) {
if (output) {
console.log(output);
} else {
console.log('Property "' + args[0] + '" ' + (args[1] ? ('set to "' + args[1] + '"') : 'removed'));
}
process.exit();
});<|fim▁end|> | var assign = require('./util/assign'); |
<|file_name|>dependencies.go<|end_file_name|><|fim▁begin|>// Code generated by zanzibar
// @generated
// Copyright (c) 2018 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package module
import (
zanzibar "github.com/uber/zanzibar/runtime"
)
<|fim▁hole|>type Dependencies struct {
Default *zanzibar.DefaultDependencies
}<|fim▁end|> | // Dependencies contains dependencies for the bar client module |
<|file_name|>node.js<|end_file_name|><|fim▁begin|>import {Parser} from "./state"
import {SourceLocation} from "./locutil"
export class Node {
constructor(parser, pos, loc) {<|fim▁hole|> this.end = 0
if (parser.options.locations)
this.loc = new SourceLocation(parser, loc)
if (parser.options.directSourceFile)
this.sourceFile = parser.options.directSourceFile
if (parser.options.ranges)
this.range = [pos, 0]
}
}
// Start an AST node, attaching a start offset.
const pp = Parser.prototype
pp.startNode = function() {
return new Node(this, this.start, this.startLoc)
}
pp.startNodeAt = function(pos, loc) {
return new Node(this, pos, loc)
}
// Finish an AST node, adding `type` and `end` properties.
function finishNodeAt(node, type, pos, loc) {
node.type = type
node.end = pos
if (this.options.locations)
node.loc.end = loc
if (this.options.ranges)
node.range[1] = pos
return node
}
pp.finishNode = function(node, type) {
return finishNodeAt.call(this, node, type, this.lastTokEnd, this.lastTokEndLoc)
}
// Finish node at given position
pp.finishNodeAt = function(node, type, pos, loc) {
return finishNodeAt.call(this, node, type, pos, loc)
}<|fim▁end|> | this.type = ""
this.start = pos |
<|file_name|>accounts.py<|end_file_name|><|fim▁begin|>"""
sentry.web.forms.accounts
~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import pytz
import six
from datetime import datetime
from django import forms
from django.conf import settings
from django.contrib.auth import authenticate, get_user_model
from django.db.models import Q
from django.utils.text import capfirst, mark_safe
from django.utils.translation import ugettext_lazy as _
from sentry import newsletter, options
from sentry.auth import password_validation
from sentry.app import ratelimiter
from sentry.constants import LANGUAGES
from sentry.models import (Organization, OrganizationStatus, User, UserOption, UserOptionValue)
from sentry.security import capture_security_activity
from sentry.utils.auth import find_users, logger
from sentry.web.forms.fields import CustomTypedChoiceField, ReadOnlyTextField
from six.moves import range
def _get_timezone_choices():
results = []
for tz in pytz.common_timezones:
now = datetime.now(pytz.timezone(tz))
offset = now.strftime('%z')
results.append((int(offset), tz, '(UTC%s) %s' % (offset, tz)))
results.sort()
for i in range(len(results)):
results[i] = results[i][1:]
return results
TIMEZONE_CHOICES = _get_timezone_choices()
class AuthenticationForm(forms.Form):
username = forms.CharField(
label=_('Account'),
max_length=128,
widget=forms.TextInput(attrs={
'placeholder': _('username or email'),
'tabindex': 1,
}),
)
password = forms.CharField(
label=_('Password'),
widget=forms.PasswordInput(attrs={
'placeholder': _('password'),
'tabindex': 2,
}),
)
error_messages = {
'invalid_login':
_(
"Please enter a correct %(username)s and password. "
"Note that both fields may be case-sensitive."
),
'rate_limited':
_("You have made too many failed authentication "
"attempts. Please try again later."),
'no_cookies':
_(
"Your Web browser doesn't appear to have cookies "
"enabled. Cookies are required for logging in."
),
'inactive':
_("This account is inactive."),
}
def __init__(self, request=None, *args, **kwargs):
"""
If request is passed in, the form will validate that cookies are
enabled. Note that the request (a HttpRequest object) must have set a
cookie with the key TEST_COOKIE_NAME and value TEST_COOKIE_VALUE before
running this validation.
"""
self.request = request
self.user_cache = None
super(AuthenticationForm, self).__init__(*args, **kwargs)
# Set the label for the "username" field.
UserModel = get_user_model()
self.username_field = UserModel._meta.get_field(UserModel.USERNAME_FIELD)
if not self.fields['username'].label:
self.fields['username'].label = capfirst(self.username_field.verbose_name)
def clean_username(self):
value = (self.cleaned_data.get('username') or '').strip()
if not value:
return
return value.lower()
def is_rate_limited(self):
if self._is_ip_rate_limited():
return True
if self._is_user_rate_limited():
return True
return False
def _is_ip_rate_limited(self):
limit = options.get('auth.ip-rate-limit')
if not limit:
return False
ip_address = self.request.META['REMOTE_ADDR']
return ratelimiter.is_limited(
'auth:ip:{}'.format(ip_address),
limit,
)
def _is_user_rate_limited(self):
limit = options.get('auth.user-rate-limit')
if not limit:
return False
username = self.cleaned_data.get('username')
if not username:
return False
return ratelimiter.is_limited(
u'auth:username:{}'.format(username),
limit,
)
def clean(self):
username = self.cleaned_data.get('username')
password = self.cleaned_data.get('password')
if not (username and password):
raise forms.ValidationError(
self.error_messages['invalid_login'] %
{'username': self.username_field.verbose_name}
)
if self.is_rate_limited():
logger.info(
'user.auth.rate-limited',
extra={
'ip_address': self.request.META['REMOTE_ADDR'],
'username': username,
}
)
raise forms.ValidationError(self.error_messages['rate_limited'])
self.user_cache = authenticate(username=username, password=password)
if self.user_cache is None:
raise forms.ValidationError(
self.error_messages['invalid_login'] %
{'username': self.username_field.verbose_name}
)
self.check_for_test_cookie()
return self.cleaned_data
def check_for_test_cookie(self):
if self.request and not self.request.session.test_cookie_worked():
raise forms.ValidationError(self.error_messages['no_cookies'])
def get_user_id(self):
if self.user_cache:
return self.user_cache.id
return None
def get_user(self):
return self.user_cache
class PasswordlessRegistrationForm(forms.ModelForm):
name = forms.CharField(
label=_('Name'),
max_length=30,
widget=forms.TextInput(attrs={'placeholder': 'Jane Doe'}),
required=True
)
username = forms.EmailField(
label=_('Email'),
max_length=128,
widget=forms.TextInput(attrs={'placeholder': '[email protected]'}),
required=True
)
subscribe = CustomTypedChoiceField(
coerce=lambda x: six.text_type(x) == u'1',
label=_("Email updates"),
choices=(
(1, 'Yes, I would like to receive updates via email'),
(0, "No, I'd prefer not to receive these updates"),
),
widget=forms.RadioSelect,
required=True,
initial=False,
)
def __init__(self, *args, **kwargs):
super(PasswordlessRegistrationForm, self).__init__(*args, **kwargs)
if not newsletter.is_enabled():
del self.fields['subscribe']
else:
# NOTE: the text here is duplicated within the ``NewsletterConsent`` component
# in the UI
notice = (
"We'd love to keep you updated via email with product and feature "
"announcements, promotions, educational materials, and events. "
"Our updates focus on relevant information, and we'll never sell "
"your data to third parties. See our "
"<a href=\"{privacy_link}\">Privacy Policy</a> for more details."
)
self.fields['subscribe'].help_text = mark_safe(
notice.format(privacy_link=settings.PRIVACY_URL))
class Meta:
fields = ('username', 'name')
model = User
def clean_username(self):
value = (self.cleaned_data.get('username') or '').strip()
if not value:
return
if User.objects.filter(username__iexact=value).exists():
raise forms.ValidationError(
_('An account is already registered with that email address.'))
return value.lower()
def save(self, commit=True):
user = super(PasswordlessRegistrationForm, self).save(commit=False)
user.email = user.username
if commit:
user.save()
if self.cleaned_data.get('subscribe'):
newsletter.create_or_update_subscriptions(
user, list_ids=newsletter.get_default_list_ids())
return user
class RegistrationForm(PasswordlessRegistrationForm):
password = forms.CharField(
required=True,
widget=forms.PasswordInput(attrs={'placeholder': 'something super secret'}),
)
def clean_password(self):
password = self.cleaned_data['password']
password_validation.validate_password(password)
return password
def save(self, commit=True):
user = super(RegistrationForm, self).save(commit=False)
user.set_password(self.cleaned_data['password'])
if commit:
user.save()
if self.cleaned_data.get('subscribe'):
newsletter.create_or_update_subscriptions(
user, list_ids=newsletter.get_default_list_ids())
return user
class RecoverPasswordForm(forms.Form):
user = forms.CharField(
label=_('Account'),
max_length=128,
widget=forms.TextInput(attrs={'placeholder': _('username or email')}),
)
def clean_user(self):
value = (self.cleaned_data.get('user') or '').strip()
if not value:
return
users = find_users(value, with_valid_password=False)
if not users:
raise forms.ValidationError(_("We were unable to find a matching user."))
users = [u for u in users if not u.is_managed]
if not users:
raise forms.ValidationError(
_(
"The account you are trying to recover is managed and does not support password recovery."
)
)
if len(users) > 1:
raise forms.ValidationError(
_("Multiple accounts were found matching this email address.")
)
return users[0]
class ChangePasswordRecoverForm(forms.Form):
password = forms.CharField(widget=forms.PasswordInput())
def clean_password(self):
password = self.cleaned_data['password']
password_validation.validate_password(password)
return password
class EmailForm(forms.Form):
alt_email = forms.EmailField(
label=_('New Email'),
required=False,
help_text='Designate an alternative email for this account',
)
password = forms.CharField(
label=_('Current password'),
widget=forms.PasswordInput(),
help_text=_('You will need to enter your current account password to make changes.'),
required=True,
)
def __init__(self, user, *args, **kwargs):
self.user = user
super(EmailForm, self).__init__(*args, **kwargs)
needs_password = user.has_usable_password()
if not needs_password:
del self.fields['password']
def clean_password(self):
value = self.cleaned_data.get('password')
if value and not self.user.check_password(value):
raise forms.ValidationError(_('The password you entered is not correct.'))
elif not value:
raise forms.ValidationError(
_('You must confirm your current password to make changes.')
)
return value
class AccountSettingsForm(forms.Form):
name = forms.CharField(required=True, label=_('Name'), max_length=30)
username = forms.CharField(label=_('Username'), max_length=128)
email = forms.EmailField(label=_('Email'))
new_password = forms.CharField(
label=_('New password'),
widget=forms.PasswordInput(),
required=False,
# help_text=password_validation.password_validators_help_text_html(),
)
verify_new_password = forms.CharField(
label=_('Verify new password'),
widget=forms.PasswordInput(),
required=False,
)
password = forms.CharField(
label=_('Current password'),
widget=forms.PasswordInput(),
help_text='You will need to enter your current account password to make changes.',
required=False,
)
def __init__(self, user, request, *args, **kwargs):
self.user = user
self.request = request
super(AccountSettingsForm, self).__init__(*args, **kwargs)
needs_password = user.has_usable_password()
if self.user.is_managed:
# username and password always managed, email and
# name optionally managed
for field in ('email', 'name', 'username'):
if field == 'username' or field in settings.SENTRY_MANAGED_USER_FIELDS:
self.fields[field] = ReadOnlyTextField(label=self.fields[field].label)
if field == 'email':
needs_password = False
del self.fields['new_password']
del self.fields['verify_new_password']
# don't show username field if its the same as their email address
if self.user.email == self.user.username:
del self.fields['username']
if not needs_password:
del self.fields['password']
def is_readonly(self):
if self.user.is_managed:
return set(('email', 'name')) == set(settings.SENTRY_MANAGED_USER_FIELDS)
return False
def _clean_managed_field(self, field):
if self.user.is_managed and (
field == 'username' or field in settings.SENTRY_MANAGED_USER_FIELDS
):
return getattr(self.user, field)
return self.cleaned_data[field]
def clean_email(self):
value = self._clean_managed_field('email').lower()
if self.user.email.lower() == value:
return value
if User.objects.filter(Q(email__iexact=value) | Q(username__iexact=value)).exclude(
id=self.user.id
).exists():
raise forms.ValidationError(
_("There was an error adding %s: that email is already in use") %
self.cleaned_data['email']
)
return value
def clean_name(self):
return self._clean_managed_field('name')
def clean_username(self):
value = self._clean_managed_field('username')
if User.objects.filter(username__iexact=value).exclude(id=self.user.id).exists():
raise forms.ValidationError(_("That username is already in use."))
return value
def clean_password(self):
value = self.cleaned_data.get('password')
if value and not self.user.check_password(value):
raise forms.ValidationError('The password you entered is not correct.')
elif not value and (
self.cleaned_data.get('email', self.user.email) != self.user.email or
self.cleaned_data.get('new_password')
):
raise forms.ValidationError('You must confirm your current password to make changes.')
return value
def clean_verify_new_password(self):
new_password = self.cleaned_data.get('new_password')
if new_password:
verify_new_password = self.cleaned_data.get('verify_new_password')
if verify_new_password is None:
raise forms.ValidationError('You must verify your new password.')
if new_password != verify_new_password:
raise forms.ValidationError('Your new password and verify new password must match.')
return verify_new_password
def clean_new_password(self):
new_password = self.cleaned_data.get('new_password')
if new_password:
password_validation.validate_password(new_password)
return new_password
def save(self, commit=True):
if self.cleaned_data.get('new_password'):
self.user.set_password(self.cleaned_data['new_password'])
self.user.refresh_session_nonce(self.request)
capture_security_activity(
account=self.user,
type='password-changed',
actor=self.request.user,
ip_address=self.request.META['REMOTE_ADDR'],
send_email=True,
)
self.user.name = self.cleaned_data['name']
if self.cleaned_data['email'] != self.user.email:
new_username = self.user.email == self.user.username
else:
new_username = False
self.user.email = self.cleaned_data['email']
if self.cleaned_data.get('username'):
self.user.username = self.cleaned_data['username']
elif new_username and not User.objects.filter(username__iexact=self.user.email).exists():
self.user.username = self.user.email
if commit:
self.user.save()
return self.user
class AppearanceSettingsForm(forms.Form):
language = forms.ChoiceField(
label=_('Language'),
choices=LANGUAGES,
required=False,
widget=forms.Select(attrs={'class': 'input-xlarge'})
)
stacktrace_order = forms.ChoiceField(<|fim▁hole|> ('-1', _('Default (let Sentry decide)')), ('1', _('Most recent call last')),
('2', _('Most recent call first')),
),
help_text=_('Choose the default ordering of frames in stacktraces.'),
required=False,
widget=forms.Select(attrs={'class': 'input-xlarge'})
)
timezone = forms.ChoiceField(
label=_('Time zone'),
choices=TIMEZONE_CHOICES,
required=False,
widget=forms.Select(attrs={'class': 'input-xxlarge'})
)
clock_24_hours = forms.BooleanField(
label=_('Use a 24-hour clock'),
required=False,
)
def __init__(self, user, *args, **kwargs):
self.user = user
super(AppearanceSettingsForm, self).__init__(*args, **kwargs)
def save(self):
# Save user language
UserOption.objects.set_value(
user=self.user,
key='language',
value=self.cleaned_data['language'],
)
# Save stacktrace options
UserOption.objects.set_value(
user=self.user,
key='stacktrace_order',
value=self.cleaned_data['stacktrace_order'],
)
# Save time zone options
UserOption.objects.set_value(
user=self.user,
key='timezone',
value=self.cleaned_data['timezone'],
)
# Save clock 24 hours option
UserOption.objects.set_value(
user=self.user,
key='clock_24_hours',
value=self.cleaned_data['clock_24_hours'],
)
return self.user
class NotificationReportSettingsForm(forms.Form):
organizations = forms.ModelMultipleChoiceField(
queryset=Organization.objects.none(),
required=False,
widget=forms.CheckboxSelectMultiple(),
)
def __init__(self, user, *args, **kwargs):
self.user = user
super(NotificationReportSettingsForm, self).__init__(*args, **kwargs)
org_queryset = Organization.objects.filter(
status=OrganizationStatus.VISIBLE,
member_set__user=user,
)
disabled_orgs = set(
UserOption.objects.get_value(
user=user,
key='reports:disabled-organizations',
default=[],
)
)
self.fields['organizations'].queryset = org_queryset
self.fields['organizations'].initial = [
o.id for o in org_queryset if o.id not in disabled_orgs
]
def save(self):
enabled_orgs = set((o.id for o in self.cleaned_data.get('organizations')))
all_orgs = set(self.fields['organizations'].queryset.values_list('id', flat=True))
UserOption.objects.set_value(
user=self.user,
key='reports:disabled-organizations',
value=list(all_orgs.difference(enabled_orgs)),
)
class NotificationDeploySettingsForm(forms.Form):
CHOICES = [
(UserOptionValue.all_deploys, _('All deploys')),
(UserOptionValue.committed_deploys_only,
_('Deploys with your commits')), (UserOptionValue.no_deploys, _('Never'))
]
notifications = forms.ChoiceField(
choices=CHOICES,
required=False,
widget=forms.RadioSelect(),
)
def __init__(self, user, organization, *args, **kwargs):
self.user = user
self.organization = organization
super(NotificationDeploySettingsForm, self).__init__(*args, **kwargs)
self.fields['notifications'].label = "" # hide the label
deploy_setting = UserOption.objects.get_value(
user=user,
organization=self.organization,
key='deploy-emails',
default=UserOptionValue.committed_deploys_only,
)
self.fields['notifications'].initial = deploy_setting
def save(self):
value = self.data.get('{}-notifications'.format(self.prefix), None)
if value is not None:
UserOption.objects.set_value(
user=self.user,
organization=self.organization,
key='deploy-emails',
value=value,
)
class NotificationSettingsForm(forms.Form):
alert_email = forms.EmailField(
label=_('Email'),
help_text=_('Designate an alternative email address to send email notifications to.'),
required=False
)
subscribe_by_default = forms.BooleanField(
label=_('Automatically subscribe to alerts for new projects'),
help_text=_(
"When enabled, you'll automatically subscribe to alerts when you create or join a project."
),
required=False,
)
workflow_notifications = forms.ChoiceField(
label=_('Preferred workflow subscription level for new projects'),
choices=[
(UserOptionValue.all_conversations, "Receive workflow updates for all issues."),
(UserOptionValue.participating_only,
"Receive workflow updates only for issues that I am participating in or have subscribed to."),
(UserOptionValue.no_conversations, "Never receive workflow updates."),
],
help_text=_("This will be automatically set as your subscription preference when you create or join a project. It has no effect on existing projects."),
required=False,
)
self_notifications = forms.BooleanField(
label=_('Receive notifications about my own activity'),
help_text=_(
'Enable this if you wish to receive emails for your own actions, as well as others.'
),
required=False,
)
self_assign_issue = forms.BooleanField(
label=_('Claim unassigned issues when resolving them'),
help_text=_(
"When enabled, you'll automatically be assigned to unassigned issues when marking them as resolved."
),
required=False,
)
def __init__(self, user, *args, **kwargs):
self.user = user
super(NotificationSettingsForm, self).__init__(*args, **kwargs)
self.fields['alert_email'].initial = UserOption.objects.get_value(
user=self.user,
key='alert_email',
default=user.email,
)
self.fields['subscribe_by_default'].initial = (
UserOption.objects.get_value(
user=self.user,
key='subscribe_by_default',
default='1',
) == '1'
)
self.fields['workflow_notifications'].initial = UserOption.objects.get_value(
user=self.user,
key='workflow:notifications',
default=UserOptionValue.all_conversations,
project=None,
)
self.fields['self_notifications'].initial = UserOption.objects.get_value(
user=self.user, key='self_notifications', default='0'
) == '1'
self.fields['self_assign_issue'].initial = UserOption.objects.get_value(
user=self.user, key='self_assign_issue', default='0'
) == '1'
def get_title(self):
return "General"
def save(self):
UserOption.objects.set_value(
user=self.user,
key='alert_email',
value=self.cleaned_data['alert_email'],
)
UserOption.objects.set_value(
user=self.user,
key='subscribe_by_default',
value='1' if self.cleaned_data['subscribe_by_default'] else '0',
)
UserOption.objects.set_value(
user=self.user,
key='self_notifications',
value='1' if self.cleaned_data['self_notifications'] else '0',
)
UserOption.objects.set_value(
user=self.user,
key='self_assign_issue',
value='1' if self.cleaned_data['self_assign_issue'] else '0',
)
workflow_notifications_value = self.cleaned_data.get('workflow_notifications')
if not workflow_notifications_value:
UserOption.objects.unset_value(
user=self.user,
key='workflow:notifications',
project=None,
)
else:
UserOption.objects.set_value(
user=self.user,
key='workflow:notifications',
value=workflow_notifications_value,
project=None,
)
class ProjectEmailOptionsForm(forms.Form):
alert = forms.BooleanField(required=False)
workflow = forms.ChoiceField(
choices=[
(UserOptionValue.no_conversations, 'Nothing'),
(UserOptionValue.participating_only, 'Participating'),
(UserOptionValue.all_conversations, 'Everything'),
],
)
email = forms.ChoiceField(label="", choices=(), required=False,
widget=forms.Select())
def __init__(self, project, user, *args, **kwargs):
self.project = project
self.user = user
super(ProjectEmailOptionsForm, self).__init__(*args, **kwargs)
has_alerts = project.is_user_subscribed_to_mail_alerts(user)
# This allows users who have entered an alert_email value or have specified an email
# for notifications to keep their settings
emails = [e.email for e in user.get_verified_emails()]
alert_email = UserOption.objects.get_value(self.user, 'alert_email')
specified_email = UserOption.objects.get_value(self.user, 'mail:email', project=project)
emails.extend([user.email, alert_email, specified_email])
choices = [(email, email) for email in sorted(set(emails)) if email]
self.fields['email'].choices = choices
self.fields['alert'].initial = has_alerts
self.fields['workflow'].initial = UserOption.objects.get_value(
user=self.user,
project=self.project,
key='workflow:notifications',
default=UserOption.objects.get_value(
user=self.user,
project=None,
key='workflow:notifications',
default=UserOptionValue.all_conversations,
),
)
self.fields['email'].initial = specified_email or alert_email or user.email
def save(self):
UserOption.objects.set_value(
user=self.user,
key='mail:alert',
value=int(self.cleaned_data['alert']),
project=self.project,
)
UserOption.objects.set_value(
user=self.user,
key='workflow:notifications',
value=self.cleaned_data['workflow'],
project=self.project,
)
if self.cleaned_data['email']:
UserOption.objects.set_value(
user=self.user,
key='mail:email',
value=self.cleaned_data['email'],
project=self.project,
)
else:
UserOption.objects.unset_value(self.user, self.project, 'mail:email')
class TwoFactorForm(forms.Form):
otp = forms.CharField(
label=_('Authenticator code'),
max_length=20,
widget=forms.TextInput(
attrs={
'placeholder': _('Code from authenticator'),
'autofocus': True,
}
),
)
class ConfirmPasswordForm(forms.Form):
password = forms.CharField(
label=_('Sentry account password'),
widget=forms.PasswordInput(),
help_text='You will need to enter your current Sentry account password to make changes.',
required=True,
)
def __init__(self, user, *args, **kwargs):
self.user = user
super(ConfirmPasswordForm, self).__init__(*args, **kwargs)
needs_password = user.has_usable_password()
if not needs_password:
del self.fields['password']
def clean_password(self):
value = self.cleaned_data.get('password')
if value and not self.user.check_password(value):
raise forms.ValidationError(_('The password you entered is not correct.'))
elif not value:
raise forms.ValidationError(
_('You must confirm your current password to make changes.')
)
return value<|fim▁end|> | label=_('Stacktrace order'),
choices=( |
<|file_name|>require_recaptcha.js<|end_file_name|><|fim▁begin|>function recaptcha_verified () {<|fim▁hole|>}
function recaptcha_expired () {
document.querySelector('.recaptcha_submit').setAttribute('disabled', 'disabled')
}<|fim▁end|> | document.querySelector('.recaptcha_submit').removeAttribute('disabled') |
<|file_name|>feature_nulldummy.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3
# Copyright (c) 2016-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test NULLDUMMY softfork.
Connect to a single node.
Generate 2 blocks (save the coinbases for later).
Generate COINBASE_MATURITY (CB) more blocks to ensure the coinbases are mature.
[Policy/Consensus] Check that NULLDUMMY compliant transactions are accepted in block CB + 3.
[Policy] Check that non-NULLDUMMY transactions are rejected before activation.
[Consensus] Check that the new NULLDUMMY rules are not enforced on block CB + 4.
[Policy/Consensus] Check that the new NULLDUMMY rules are enforced on block CB + 5.
"""
import time
from test_framework.blocktools import (
COINBASE_MATURITY,
NORMAL_GBT_REQUEST_PARAMS,
add_witness_commitment,
create_block,
create_transaction,
)
from test_framework.messages import CTransaction
from test_framework.script import CScript
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error
NULLDUMMY_ERROR = "non-mandatory-script-verify-flag (Dummy CHECKMULTISIG argument must be zero)"
def trueDummy(tx):
scriptSig = CScript(tx.vin[0].scriptSig)
newscript = []
for i in scriptSig:
if len(newscript) == 0:
assert len(i) == 0
newscript.append(b'\x51')
else:
newscript.append(i)<|fim▁hole|> tx.rehash()
class NULLDUMMYTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
# This script tests NULLDUMMY activation, which is part of the 'segwit' deployment, so we go through
# normal segwit activation here (and don't use the default always-on behaviour).
self.extra_args = [[
f'-segwitheight={COINBASE_MATURITY + 5}',
'-addresstype=legacy',
]]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
self.nodes[0].createwallet(wallet_name='wmulti', disable_private_keys=True)
wmulti = self.nodes[0].get_wallet_rpc('wmulti')
w0 = self.nodes[0].get_wallet_rpc(self.default_wallet_name)
self.address = w0.getnewaddress()
self.pubkey = w0.getaddressinfo(self.address)['pubkey']
self.ms_address = wmulti.addmultisigaddress(1, [self.pubkey])['address']
self.wit_address = w0.getnewaddress(address_type='p2sh-segwit')
self.wit_ms_address = wmulti.addmultisigaddress(1, [self.pubkey], '', 'p2sh-segwit')['address']
if not self.options.descriptors:
# Legacy wallets need to import these so that they are watched by the wallet. This is unnecessary (and does not need to be tested) for descriptor wallets
wmulti.importaddress(self.ms_address)
wmulti.importaddress(self.wit_ms_address)
self.coinbase_blocks = self.nodes[0].generate(2) # block height = 2
coinbase_txid = []
for i in self.coinbase_blocks:
coinbase_txid.append(self.nodes[0].getblock(i)['tx'][0])
self.nodes[0].generate(COINBASE_MATURITY) # block height = COINBASE_MATURITY + 2
self.lastblockhash = self.nodes[0].getbestblockhash()
self.lastblockheight = COINBASE_MATURITY + 2
self.lastblocktime = int(time.time()) + self.lastblockheight
self.log.info(f"Test 1: NULLDUMMY compliant base transactions should be accepted to mempool and mined before activation [{COINBASE_MATURITY + 3}]")
test1txs = [create_transaction(self.nodes[0], coinbase_txid[0], self.ms_address, amount=49)]
txid1 = self.nodes[0].sendrawtransaction(test1txs[0].serialize_with_witness().hex(), 0)
test1txs.append(create_transaction(self.nodes[0], txid1, self.ms_address, amount=48))
txid2 = self.nodes[0].sendrawtransaction(test1txs[1].serialize_with_witness().hex(), 0)
test1txs.append(create_transaction(self.nodes[0], coinbase_txid[1], self.wit_ms_address, amount=49))
txid3 = self.nodes[0].sendrawtransaction(test1txs[2].serialize_with_witness().hex(), 0)
self.block_submit(self.nodes[0], test1txs, False, True)
self.log.info("Test 2: Non-NULLDUMMY base multisig transaction should not be accepted to mempool before activation")
test2tx = create_transaction(self.nodes[0], txid2, self.ms_address, amount=47)
trueDummy(test2tx)
assert_raises_rpc_error(-26, NULLDUMMY_ERROR, self.nodes[0].sendrawtransaction, test2tx.serialize_with_witness().hex(), 0)
self.log.info(f"Test 3: Non-NULLDUMMY base transactions should be accepted in a block before activation [{COINBASE_MATURITY + 4}]")
self.block_submit(self.nodes[0], [test2tx], False, True)
self.log.info("Test 4: Non-NULLDUMMY base multisig transaction is invalid after activation")
test4tx = create_transaction(self.nodes[0], test2tx.hash, self.address, amount=46)
test6txs = [CTransaction(test4tx)]
trueDummy(test4tx)
assert_raises_rpc_error(-26, NULLDUMMY_ERROR, self.nodes[0].sendrawtransaction, test4tx.serialize_with_witness().hex(), 0)
self.block_submit(self.nodes[0], [test4tx])
self.log.info("Test 5: Non-NULLDUMMY P2WSH multisig transaction invalid after activation")
test5tx = create_transaction(self.nodes[0], txid3, self.wit_address, amount=48)
test6txs.append(CTransaction(test5tx))
test5tx.wit.vtxinwit[0].scriptWitness.stack[0] = b'\x01'
assert_raises_rpc_error(-26, NULLDUMMY_ERROR, self.nodes[0].sendrawtransaction, test5tx.serialize_with_witness().hex(), 0)
self.block_submit(self.nodes[0], [test5tx], True)
self.log.info(f"Test 6: NULLDUMMY compliant base/witness transactions should be accepted to mempool and in block after activation [{COINBASE_MATURITY + 5}]")
for i in test6txs:
self.nodes[0].sendrawtransaction(i.serialize_with_witness().hex(), 0)
self.block_submit(self.nodes[0], test6txs, True, True)
def block_submit(self, node, txs, witness=False, accept=False):
tmpl = node.getblocktemplate(NORMAL_GBT_REQUEST_PARAMS)
assert_equal(tmpl['previousblockhash'], self.lastblockhash)
assert_equal(tmpl['height'], self.lastblockheight + 1)
block = create_block(tmpl=tmpl, ntime=self.lastblocktime + 1)
for tx in txs:
tx.rehash()
block.vtx.append(tx)
block.hashMerkleRoot = block.calc_merkle_root()
witness and add_witness_commitment(block)
block.rehash()
block.solve()
assert_equal(None if accept else 'block-validation-failed', node.submitblock(block.serialize().hex()))
if (accept):
assert_equal(node.getbestblockhash(), block.hash)
self.lastblockhash = block.hash
self.lastblocktime += 1
self.lastblockheight += 1
else:
assert_equal(node.getbestblockhash(), self.lastblockhash)
if __name__ == '__main__':
NULLDUMMYTest().main()<|fim▁end|> | tx.vin[0].scriptSig = CScript(newscript) |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|><|fim▁hole|>import IRT
print "Loading comatmor version 0.0.1"<|fim▁end|> | # module includes
import elliptic
import heat |
<|file_name|>RenderTableSection.cpp<|end_file_name|><|fim▁begin|>/*
* Copyright (C) 1997 Martin Jones ([email protected])
* (C) 1997 Torben Weis ([email protected])
* (C) 1998 Waldo Bastian ([email protected])
* (C) 1999 Lars Knoll ([email protected])
* (C) 1999 Antti Koivisto ([email protected])
* Copyright (C) 2003, 2004, 2005, 2006, 2008, 2009, 2010 Apple Inc. All rights reserved.
* Copyright (C) 2006 Alexey Proskuryakov ([email protected])
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Library General Public License for more details.
*
* You should have received a copy of the GNU Library General Public License
* along with this library; see the file COPYING.LIB. If not, write to
* the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
* Boston, MA 02110-1301, USA.
*/
#include "config.h"
#include "RenderTableSection.h"
#include "CachedImage.h"
#include "Document.h"
#include "HitTestResult.h"
#include "HTMLNames.h"
#include "PaintInfo.h"
#include "RenderTableCell.h"
#include "RenderTableCol.h"
#include "RenderTableRow.h"
#include "RenderView.h"
#include <limits>
#include <wtf/HashSet.h>
#include <wtf/Vector.h>
using namespace std;
namespace WebCore {
using namespace HTMLNames;
// Those 2 variables are used to balance the memory consumption vs the repaint time on big tables.
static unsigned gMinTableSizeToUseFastPaintPathWithOverflowingCell = 75 * 75;
static float gMaxAllowedOverflowingCellRatioForFastPaintPath = 0.1f;
static inline void setRowLogicalHeightToRowStyleLogicalHeightIfNotRelative(RenderTableSection::RowStruct* row)
{
ASSERT(row && row->rowRenderer);
row->logicalHeight = row->rowRenderer->style()->logicalHeight();
if (row->logicalHeight.isRelative())
row->logicalHeight = Length();
}
RenderTableSection::RenderTableSection(Node* node)
: RenderBox(node)
, m_gridRows(0)
, m_cCol(0)
, m_cRow(-1)
, m_outerBorderStart(0)
, m_outerBorderEnd(0)
, m_outerBorderBefore(0)
, m_outerBorderAfter(0)
, m_needsCellRecalc(false)
, m_hasMultipleCellLevels(false)
{
// init RenderObject attributes
setInline(false); // our object is not Inline
}
RenderTableSection::~RenderTableSection()
{
clearGrid();
}
void RenderTableSection::styleDidChange(StyleDifference diff, const RenderStyle* oldStyle)
{
RenderBox::styleDidChange(diff, oldStyle);
propagateStyleToAnonymousChildren();
}
void RenderTableSection::willBeDestroyed()
{
RenderTable* recalcTable = table();
RenderBox::willBeDestroyed();
// recalc cell info because RenderTable has unguarded pointers
// stored that point to this RenderTableSection.
if (recalcTable)
recalcTable->setNeedsSectionRecalc();
}
void RenderTableSection::addChild(RenderObject* child, RenderObject* beforeChild)
{
// Make sure we don't append things after :after-generated content if we have it.
if (!beforeChild) {
if (RenderObject* afterContentRenderer = findAfterContentRenderer())
beforeChild = anonymousContainer(afterContentRenderer);
}
if (!child->isTableRow()) {
RenderObject* last = beforeChild;
if (!last)
last = lastChild();
if (last && last->isAnonymous() && !last->isBeforeOrAfterContent()) {
if (beforeChild == last)
beforeChild = last->firstChild();
last->addChild(child, beforeChild);
return;
}
// If beforeChild is inside an anonymous cell/row, insert into the cell or into
// the anonymous row containing it, if there is one.
RenderObject* lastBox = last;
while (lastBox && lastBox->parent()->isAnonymous() && !lastBox->isTableRow())
lastBox = lastBox->parent();
if (lastBox && lastBox->isAnonymous() && !lastBox->isBeforeOrAfterContent()) {
lastBox->addChild(child, beforeChild);
return;
}
RenderObject* row = new (renderArena()) RenderTableRow(document() /* anonymous table row */);
RefPtr<RenderStyle> newStyle = RenderStyle::create();
newStyle->inheritFrom(style());
newStyle->setDisplay(TABLE_ROW);
row->setStyle(newStyle.release());
addChild(row, beforeChild);
row->addChild(child);
return;
}
if (beforeChild)
setNeedsCellRecalc();
++m_cRow;
m_cCol = 0;
// make sure we have enough rows
if (!ensureRows(m_cRow + 1))
return;
m_grid[m_cRow].rowRenderer = toRenderTableRow(child);
if (!beforeChild)
setRowLogicalHeightToRowStyleLogicalHeightIfNotRelative(&m_grid[m_cRow]);
// If the next renderer is actually wrapped in an anonymous table row, we need to go up and find that.
while (beforeChild && beforeChild->parent() != this)
beforeChild = beforeChild->parent();
ASSERT(!beforeChild || beforeChild->isTableRow());
RenderBox::addChild(child, beforeChild);
toRenderTableRow(child)->updateBeforeAndAfterContent();
}
void RenderTableSection::removeChild(RenderObject* oldChild)
{
setNeedsCellRecalc();
RenderBox::removeChild(oldChild);
}
bool RenderTableSection::ensureRows(int numRows)
{
int nRows = m_gridRows;
if (numRows > nRows) {
if (numRows > static_cast<int>(m_grid.size())) {
size_t maxSize = numeric_limits<size_t>::max() / sizeof(RowStruct);
if (static_cast<size_t>(numRows) > maxSize)
return false;
m_grid.grow(numRows);
}
m_gridRows = numRows;
int nCols = max(1, table()->numEffCols());
for (int r = nRows; r < numRows; r++) {
m_grid[r].row = new Row(nCols);
m_grid[r].rowRenderer = 0;
m_grid[r].baseline = 0;
m_grid[r].logicalHeight = Length();
}
}
return true;
}
void RenderTableSection::addCell(RenderTableCell* cell, RenderTableRow* row)
{
int rSpan = cell->rowSpan();
int cSpan = cell->colSpan();
Vector<RenderTable::ColumnStruct>& columns = table()->columns();
int nCols = columns.size();
// ### mozilla still seems to do the old HTML way, even for strict DTD
// (see the annotation on table cell layouting in the CSS specs and the testcase below:
// <TABLE border>
// <TR><TD>1 <TD rowspan="2">2 <TD>3 <TD>4
// <TR><TD colspan="2">5
// </TABLE>
while (m_cCol < nCols && (cellAt(m_cRow, m_cCol).hasCells() || cellAt(m_cRow, m_cCol).inColSpan))
m_cCol++;
if (rSpan == 1) {
// we ignore height settings on rowspan cells
Length logicalHeight = cell->style()->logicalHeight();
if (logicalHeight.isPositive() || (logicalHeight.isRelative() && logicalHeight.value() >= 0)) {
Length cRowLogicalHeight = m_grid[m_cRow].logicalHeight;
switch (logicalHeight.type()) {
case Percent:
if (!(cRowLogicalHeight.isPercent()) ||
(cRowLogicalHeight.isPercent() && cRowLogicalHeight.percent() < logicalHeight.percent()))
m_grid[m_cRow].logicalHeight = logicalHeight;
break;
case Fixed:
if (cRowLogicalHeight.type() < Percent ||
(cRowLogicalHeight.isFixed() && cRowLogicalHeight.value() < logicalHeight.value()))
m_grid[m_cRow].logicalHeight = logicalHeight;
break;
case Relative:
default:
break;
}
}
}
// make sure we have enough rows
if (!ensureRows(m_cRow + rSpan))
return;
m_grid[m_cRow].rowRenderer = row;
int col = m_cCol;
// tell the cell where it is
bool inColSpan = false;
while (cSpan) {
int currentSpan;
if (m_cCol >= nCols) {
table()->appendColumn(cSpan);
currentSpan = cSpan;
} else {
if (cSpan < (int)columns[m_cCol].span)
table()->splitColumn(m_cCol, cSpan);
currentSpan = columns[m_cCol].span;
}
for (int r = 0; r < rSpan; r++) {
CellStruct& c = cellAt(m_cRow + r, m_cCol);
ASSERT(cell);
c.cells.append(cell);
// If cells overlap then we take the slow path for painting.
if (c.cells.size() > 1)
m_hasMultipleCellLevels = true;
if (inColSpan)
c.inColSpan = true;
}
m_cCol++;
cSpan -= currentSpan;
inColSpan = true;
}
cell->setRow(m_cRow);
cell->setCol(table()->effColToCol(col));
}
void RenderTableSection::setCellLogicalWidths()
{
Vector<LayoutUnit>& columnPos = table()->columnPositions();
LayoutStateMaintainer statePusher(view());
for (int i = 0; i < m_gridRows; i++) {
Row& row = *m_grid[i].row;
int cols = row.size();
for (int j = 0; j < cols; j++) {
CellStruct& current = row[j];
RenderTableCell* cell = current.primaryCell();
if (!cell || current.inColSpan)
continue;
int endCol = j;
int cspan = cell->colSpan();
while (cspan && endCol < cols) {
ASSERT(endCol < (int)table()->columns().size());
cspan -= table()->columns()[endCol].span;
endCol++;
}
int w = columnPos[endCol] - columnPos[j] - table()->hBorderSpacing();
int oldLogicalWidth = cell->logicalWidth();
if (w != oldLogicalWidth) {<|fim▁hole|> // rows don't push a coordinate transform, that's not necessary.
statePusher.push(this, IntSize(x(), y()));
}
cell->repaint();
}
cell->updateLogicalWidth(w);
}
}
}
statePusher.pop(); // only pops if we pushed
}
LayoutUnit RenderTableSection::calcRowLogicalHeight()
{
#ifndef NDEBUG
setNeedsLayoutIsForbidden(true);
#endif
ASSERT(!needsLayout());
RenderTableCell* cell;
LayoutUnit spacing = table()->vBorderSpacing();
LayoutStateMaintainer statePusher(view());
m_rowPos.resize(m_gridRows + 1);
m_rowPos[0] = spacing;
for (int r = 0; r < m_gridRows; r++) {
m_rowPos[r + 1] = 0;
m_grid[r].baseline = 0;
LayoutUnit baseline = 0;
LayoutUnit bdesc = 0;
LayoutUnit ch = m_grid[r].logicalHeight.calcMinValue(0);
LayoutUnit pos = m_rowPos[r] + ch + (m_grid[r].rowRenderer ? spacing : 0);
m_rowPos[r + 1] = max(m_rowPos[r + 1], pos);
Row* row = m_grid[r].row;
int totalCols = row->size();
for (int c = 0; c < totalCols; c++) {
CellStruct& current = cellAt(r, c);
cell = current.primaryCell();
if (!cell || current.inColSpan)
continue;
if ((cell->row() + cell->rowSpan() - 1) > r)
continue;
int indx = max(r - cell->rowSpan() + 1, 0);
if (cell->hasOverrideHeight()) {
if (!statePusher.didPush()) {
// Technically, we should also push state for the row, but since
// rows don't push a coordinate transform, that's not necessary.
statePusher.push(this, locationOffset());
}
cell->clearIntrinsicPadding();
cell->clearOverrideSize();
cell->setChildNeedsLayout(true, false);
cell->layoutIfNeeded();
}
LayoutUnit adjustedPaddingBefore = cell->paddingBefore() - cell->intrinsicPaddingBefore();
LayoutUnit adjustedPaddingAfter = cell->paddingAfter() - cell->intrinsicPaddingAfter();
LayoutUnit adjustedLogicalHeight = cell->logicalHeight() - (cell->intrinsicPaddingBefore() + cell->intrinsicPaddingAfter());
// Explicit heights use the border box in quirks mode. In strict mode do the right
// thing and actually add in the border and padding.
ch = cell->style()->logicalHeight().calcValue(0) +
(document()->inQuirksMode() ? 0 : (adjustedPaddingBefore + adjustedPaddingAfter +
cell->borderBefore() + cell->borderAfter()));
ch = max(ch, adjustedLogicalHeight);
pos = m_rowPos[indx] + ch + (m_grid[r].rowRenderer ? spacing : 0);
m_rowPos[r + 1] = max(m_rowPos[r + 1], pos);
// find out the baseline
EVerticalAlign va = cell->style()->verticalAlign();
if (va == BASELINE || va == TEXT_BOTTOM || va == TEXT_TOP || va == SUPER || va == SUB) {
LayoutUnit b = cell->cellBaselinePosition();
if (b > cell->borderBefore() + cell->paddingBefore()) {
baseline = max(baseline, b - cell->intrinsicPaddingBefore());
bdesc = max(bdesc, m_rowPos[indx] + ch - (b - cell->intrinsicPaddingBefore()));
}
}
}
// do we have baseline aligned elements?
if (baseline) {
// increase rowheight if baseline requires
m_rowPos[r + 1] = max(m_rowPos[r + 1], baseline + bdesc + (m_grid[r].rowRenderer ? spacing : 0));
m_grid[r].baseline = baseline;
}
m_rowPos[r + 1] = max(m_rowPos[r + 1], m_rowPos[r]);
}
#ifndef NDEBUG
setNeedsLayoutIsForbidden(false);
#endif
ASSERT(!needsLayout());
statePusher.pop();
return m_rowPos[m_gridRows];
}
void RenderTableSection::layout()
{
ASSERT(needsLayout());
LayoutStateMaintainer statePusher(view(), this, locationOffset(), style()->isFlippedBlocksWritingMode());
for (RenderObject* child = children()->firstChild(); child; child = child->nextSibling()) {
if (child->isTableRow()) {
child->layoutIfNeeded();
ASSERT(!child->needsLayout());
}
}
statePusher.pop();
setNeedsLayout(false);
}
LayoutUnit RenderTableSection::layoutRows(LayoutUnit toAdd)
{
#ifndef NDEBUG
setNeedsLayoutIsForbidden(true);
#endif
ASSERT(!needsLayout());
LayoutUnit rHeight;
int rindx;
int totalRows = m_gridRows;
// Set the width of our section now. The rows will also be this width.
setLogicalWidth(table()->contentLogicalWidth());
m_overflow.clear();
m_overflowingCells.clear();
m_forceSlowPaintPathWithOverflowingCell = false;
if (toAdd && totalRows && (m_rowPos[totalRows] || !nextSibling())) {
LayoutUnit totalHeight = m_rowPos[totalRows] + toAdd;
LayoutUnit dh = toAdd;
int totalPercent = 0;
int numAuto = 0;
for (int r = 0; r < totalRows; r++) {
if (m_grid[r].logicalHeight.isAuto())
numAuto++;
else if (m_grid[r].logicalHeight.isPercent())
totalPercent += m_grid[r].logicalHeight.percent();
}
if (totalPercent) {
// try to satisfy percent
LayoutUnit add = 0;
totalPercent = min(totalPercent, 100);
int rh = m_rowPos[1] - m_rowPos[0];
for (int r = 0; r < totalRows; r++) {
if (totalPercent > 0 && m_grid[r].logicalHeight.isPercent()) {
LayoutUnit toAdd = min(dh, static_cast<LayoutUnit>((totalHeight * m_grid[r].logicalHeight.percent() / 100) - rh));
// If toAdd is negative, then we don't want to shrink the row (this bug
// affected Outlook Web Access).
toAdd = max<LayoutUnit>(0, toAdd);
add += toAdd;
dh -= toAdd;
totalPercent -= m_grid[r].logicalHeight.percent();
}
if (r < totalRows - 1)
rh = m_rowPos[r + 2] - m_rowPos[r + 1];
m_rowPos[r + 1] += add;
}
}
if (numAuto) {
// distribute over variable cols
LayoutUnit add = 0;
for (int r = 0; r < totalRows; r++) {
if (numAuto > 0 && m_grid[r].logicalHeight.isAuto()) {
LayoutUnit toAdd = dh / numAuto;
add += toAdd;
dh -= toAdd;
numAuto--;
}
m_rowPos[r + 1] += add;
}
}
if (dh > 0 && m_rowPos[totalRows]) {
// if some left overs, distribute equally.
LayoutUnit tot = m_rowPos[totalRows];
LayoutUnit add = 0;
LayoutUnit prev = m_rowPos[0];
for (int r = 0; r < totalRows; r++) {
// weight with the original height
add += dh * (m_rowPos[r + 1] - prev) / tot;
prev = m_rowPos[r + 1];
m_rowPos[r + 1] += add;
}
}
}
LayoutUnit hspacing = table()->hBorderSpacing();
LayoutUnit vspacing = table()->vBorderSpacing();
LayoutUnit nEffCols = table()->numEffCols();
LayoutStateMaintainer statePusher(view(), this, LayoutSize(x(), y()), style()->isFlippedBlocksWritingMode());
for (int r = 0; r < totalRows; r++) {
// Set the row's x/y position and width/height.
if (RenderTableRow* rowRenderer = m_grid[r].rowRenderer) {
rowRenderer->setLocation(LayoutPoint(0, m_rowPos[r]));
rowRenderer->setLogicalWidth(logicalWidth());
rowRenderer->setLogicalHeight(m_rowPos[r + 1] - m_rowPos[r] - vspacing);
rowRenderer->updateLayerTransform();
}
for (int c = 0; c < nEffCols; c++) {
CellStruct& cs = cellAt(r, c);
RenderTableCell* cell = cs.primaryCell();
if (!cell || cs.inColSpan)
continue;
rindx = cell->row();
rHeight = m_rowPos[rindx + cell->rowSpan()] - m_rowPos[rindx] - vspacing;
// Force percent height children to lay themselves out again.
// This will cause these children to grow to fill the cell.
// FIXME: There is still more work to do here to fully match WinIE (should
// it become necessary to do so). In quirks mode, WinIE behaves like we
// do, but it will clip the cells that spill out of the table section. In
// strict mode, Mozilla and WinIE both regrow the table to accommodate the
// new height of the cell (thus letting the percentages cause growth one
// time only). We may also not be handling row-spanning cells correctly.
//
// Note also the oddity where replaced elements always flex, and yet blocks/tables do
// not necessarily flex. WinIE is crazy and inconsistent, and we can't hope to
// match the behavior perfectly, but we'll continue to refine it as we discover new
// bugs. :)
bool cellChildrenFlex = false;
bool flexAllChildren = cell->style()->logicalHeight().isFixed()
|| (!table()->style()->logicalHeight().isAuto() && rHeight != cell->logicalHeight());
for (RenderObject* o = cell->firstChild(); o; o = o->nextSibling()) {
if (!o->isText() && o->style()->logicalHeight().isPercent() && (flexAllChildren || o->isReplaced() || (o->isBox() && toRenderBox(o)->scrollsOverflow()))) {
// Tables with no sections do not flex.
if (!o->isTable() || toRenderTable(o)->hasSections()) {
o->setNeedsLayout(true, false);
cellChildrenFlex = true;
}
}
}
if (HashSet<RenderBox*>* percentHeightDescendants = cell->percentHeightDescendants()) {
HashSet<RenderBox*>::iterator end = percentHeightDescendants->end();
for (HashSet<RenderBox*>::iterator it = percentHeightDescendants->begin(); it != end; ++it) {
RenderBox* box = *it;
if (!box->isReplaced() && !box->scrollsOverflow() && !flexAllChildren)
continue;
while (box != cell) {
if (box->normalChildNeedsLayout())
break;
box->setChildNeedsLayout(true, false);
box = box->containingBlock();
ASSERT(box);
if (!box)
break;
}
cellChildrenFlex = true;
}
}
if (cellChildrenFlex) {
cell->setChildNeedsLayout(true, false);
// Alignment within a cell is based off the calculated
// height, which becomes irrelevant once the cell has
// been resized based off its percentage.
cell->setOverrideHeightFromRowHeight(rHeight);
cell->layoutIfNeeded();
// If the baseline moved, we may have to update the data for our row. Find out the new baseline.
EVerticalAlign va = cell->style()->verticalAlign();
if (va == BASELINE || va == TEXT_BOTTOM || va == TEXT_TOP || va == SUPER || va == SUB) {
LayoutUnit baseline = cell->cellBaselinePosition();
if (baseline > cell->borderBefore() + cell->paddingBefore())
m_grid[r].baseline = max(m_grid[r].baseline, baseline);
}
}
LayoutUnit oldIntrinsicPaddingBefore = cell->intrinsicPaddingBefore();
LayoutUnit oldIntrinsicPaddingAfter = cell->intrinsicPaddingAfter();
LayoutUnit logicalHeightWithoutIntrinsicPadding = cell->logicalHeight() - oldIntrinsicPaddingBefore - oldIntrinsicPaddingAfter;
LayoutUnit intrinsicPaddingBefore = 0;
switch (cell->style()->verticalAlign()) {
case SUB:
case SUPER:
case TEXT_TOP:
case TEXT_BOTTOM:
case BASELINE: {
LayoutUnit b = cell->cellBaselinePosition();
if (b > cell->borderBefore() + cell->paddingBefore())
intrinsicPaddingBefore = getBaseline(r) - (b - oldIntrinsicPaddingBefore);
break;
}
case TOP:
break;
case MIDDLE:
intrinsicPaddingBefore = (rHeight - logicalHeightWithoutIntrinsicPadding) / 2;
break;
case BOTTOM:
intrinsicPaddingBefore = rHeight - logicalHeightWithoutIntrinsicPadding;
break;
default:
break;
}
LayoutUnit intrinsicPaddingAfter = rHeight - logicalHeightWithoutIntrinsicPadding - intrinsicPaddingBefore;
cell->setIntrinsicPaddingBefore(intrinsicPaddingBefore);
cell->setIntrinsicPaddingAfter(intrinsicPaddingAfter);
LayoutRect oldCellRect(cell->x(), cell->y() , cell->width(), cell->height());
LayoutPoint cellLocation(0, m_rowPos[rindx]);
if (!style()->isLeftToRightDirection())
cellLocation.setX(table()->columnPositions()[nEffCols] - table()->columnPositions()[table()->colToEffCol(cell->col() + cell->colSpan())] + hspacing);
else
cellLocation.setX(table()->columnPositions()[c] + hspacing);
cell->setLogicalLocation(cellLocation);
view()->addLayoutDelta(oldCellRect.location() - cell->location());
if (intrinsicPaddingBefore != oldIntrinsicPaddingBefore || intrinsicPaddingAfter != oldIntrinsicPaddingAfter)
cell->setNeedsLayout(true, false);
if (!cell->needsLayout() && view()->layoutState()->pageLogicalHeight() && view()->layoutState()->pageLogicalOffset(cell->logicalTop()) != cell->pageLogicalOffset())
cell->setChildNeedsLayout(true, false);
cell->layoutIfNeeded();
// FIXME: Make pagination work with vertical tables.
if (style()->isHorizontalWritingMode() && view()->layoutState()->pageLogicalHeight() && cell->height() != rHeight)
cell->setHeight(rHeight); // FIXME: Pagination might have made us change size. For now just shrink or grow the cell to fit without doing a relayout.
LayoutSize childOffset(cell->location() - oldCellRect.location());
if (childOffset.width() || childOffset.height()) {
view()->addLayoutDelta(childOffset);
// If the child moved, we have to repaint it as well as any floating/positioned
// descendants. An exception is if we need a layout. In this case, we know we're going to
// repaint ourselves (and the child) anyway.
if (!table()->selfNeedsLayout() && cell->checkForRepaintDuringLayout())
cell->repaintDuringLayoutIfMoved(oldCellRect);
}
}
}
#ifndef NDEBUG
setNeedsLayoutIsForbidden(false);
#endif
ASSERT(!needsLayout());
setLogicalHeight(m_rowPos[totalRows]);
unsigned totalCellsCount = nEffCols * totalRows;
int maxAllowedOverflowingCellsCount = totalCellsCount < gMinTableSizeToUseFastPaintPathWithOverflowingCell ? 0 : gMaxAllowedOverflowingCellRatioForFastPaintPath * totalCellsCount;
#ifndef NDEBUG
bool hasOverflowingCell = false;
#endif
// Now that our height has been determined, add in overflow from cells.
for (int r = 0; r < totalRows; r++) {
for (int c = 0; c < nEffCols; c++) {
CellStruct& cs = cellAt(r, c);
RenderTableCell* cell = cs.primaryCell();
if (!cell || cs.inColSpan)
continue;
if (r < totalRows - 1 && cell == primaryCellAt(r + 1, c))
continue;
addOverflowFromChild(cell);
#ifndef NDEBUG
hasOverflowingCell |= cell->hasVisualOverflow();
#endif
if (cell->hasVisualOverflow() && !m_forceSlowPaintPathWithOverflowingCell) {
m_overflowingCells.add(cell);
if (m_overflowingCells.size() > maxAllowedOverflowingCellsCount) {
// We need to set m_forcesSlowPaintPath only if there is a least one overflowing cells as the hit testing code rely on this information.
m_forceSlowPaintPathWithOverflowingCell = true;
// The slow path does not make any use of the overflowing cells info, don't hold on to the memory.
m_overflowingCells.clear();
}
}
}
}
ASSERT(hasOverflowingCell == this->hasOverflowingCell());
statePusher.pop();
return height();
}
LayoutUnit RenderTableSection::calcOuterBorderBefore() const
{
int totalCols = table()->numEffCols();
if (!m_gridRows || !totalCols)
return 0;
unsigned borderWidth = 0;
const BorderValue& sb = style()->borderBefore();
if (sb.style() == BHIDDEN)
return -1;
if (sb.style() > BHIDDEN)
borderWidth = sb.width();
const BorderValue& rb = firstChild()->style()->borderBefore();
if (rb.style() == BHIDDEN)
return -1;
if (rb.style() > BHIDDEN && rb.width() > borderWidth)
borderWidth = rb.width();
bool allHidden = true;
for (int c = 0; c < totalCols; c++) {
const CellStruct& current = cellAt(0, c);
if (current.inColSpan || !current.hasCells())
continue;
const BorderValue& cb = current.primaryCell()->style()->borderBefore(); // FIXME: Make this work with perpendicular and flipped cells.
// FIXME: Don't repeat for the same col group
RenderTableCol* colGroup = table()->colElement(c);
if (colGroup) {
const BorderValue& gb = colGroup->style()->borderBefore();
if (gb.style() == BHIDDEN || cb.style() == BHIDDEN)
continue;
allHidden = false;
if (gb.style() > BHIDDEN && gb.width() > borderWidth)
borderWidth = gb.width();
if (cb.style() > BHIDDEN && cb.width() > borderWidth)
borderWidth = cb.width();
} else {
if (cb.style() == BHIDDEN)
continue;
allHidden = false;
if (cb.style() > BHIDDEN && cb.width() > borderWidth)
borderWidth = cb.width();
}
}
if (allHidden)
return -1;
return borderWidth / 2;
}
LayoutUnit RenderTableSection::calcOuterBorderAfter() const
{
int totalCols = table()->numEffCols();
if (!m_gridRows || !totalCols)
return 0;
unsigned borderWidth = 0;
const BorderValue& sb = style()->borderAfter();
if (sb.style() == BHIDDEN)
return -1;
if (sb.style() > BHIDDEN)
borderWidth = sb.width();
const BorderValue& rb = lastChild()->style()->borderAfter();
if (rb.style() == BHIDDEN)
return -1;
if (rb.style() > BHIDDEN && rb.width() > borderWidth)
borderWidth = rb.width();
bool allHidden = true;
for (int c = 0; c < totalCols; c++) {
const CellStruct& current = cellAt(m_gridRows - 1, c);
if (current.inColSpan || !current.hasCells())
continue;
const BorderValue& cb = current.primaryCell()->style()->borderAfter(); // FIXME: Make this work with perpendicular and flipped cells.
// FIXME: Don't repeat for the same col group
RenderTableCol* colGroup = table()->colElement(c);
if (colGroup) {
const BorderValue& gb = colGroup->style()->borderAfter();
if (gb.style() == BHIDDEN || cb.style() == BHIDDEN)
continue;
allHidden = false;
if (gb.style() > BHIDDEN && gb.width() > borderWidth)
borderWidth = gb.width();
if (cb.style() > BHIDDEN && cb.width() > borderWidth)
borderWidth = cb.width();
} else {
if (cb.style() == BHIDDEN)
continue;
allHidden = false;
if (cb.style() > BHIDDEN && cb.width() > borderWidth)
borderWidth = cb.width();
}
}
if (allHidden)
return -1;
return (borderWidth + 1) / 2;
}
LayoutUnit RenderTableSection::calcOuterBorderStart() const
{
int totalCols = table()->numEffCols();
if (!m_gridRows || !totalCols)
return 0;
unsigned borderWidth = 0;
const BorderValue& sb = style()->borderStart();
if (sb.style() == BHIDDEN)
return -1;
if (sb.style() > BHIDDEN)
borderWidth = sb.width();
if (RenderTableCol* colGroup = table()->colElement(0)) {
const BorderValue& gb = colGroup->style()->borderStart();
if (gb.style() == BHIDDEN)
return -1;
if (gb.style() > BHIDDEN && gb.width() > borderWidth)
borderWidth = gb.width();
}
bool allHidden = true;
for (int r = 0; r < m_gridRows; r++) {
const CellStruct& current = cellAt(r, 0);
if (!current.hasCells())
continue;
// FIXME: Don't repeat for the same cell
const BorderValue& cb = current.primaryCell()->style()->borderStart(); // FIXME: Make this work with perpendicular and flipped cells.
const BorderValue& rb = current.primaryCell()->parent()->style()->borderStart();
if (cb.style() == BHIDDEN || rb.style() == BHIDDEN)
continue;
allHidden = false;
if (cb.style() > BHIDDEN && cb.width() > borderWidth)
borderWidth = cb.width();
if (rb.style() > BHIDDEN && rb.width() > borderWidth)
borderWidth = rb.width();
}
if (allHidden)
return -1;
return (borderWidth + (table()->style()->isLeftToRightDirection() ? 0 : 1)) / 2;
}
LayoutUnit RenderTableSection::calcOuterBorderEnd() const
{
int totalCols = table()->numEffCols();
if (!m_gridRows || !totalCols)
return 0;
unsigned borderWidth = 0;
const BorderValue& sb = style()->borderEnd();
if (sb.style() == BHIDDEN)
return -1;
if (sb.style() > BHIDDEN)
borderWidth = sb.width();
if (RenderTableCol* colGroup = table()->colElement(totalCols - 1)) {
const BorderValue& gb = colGroup->style()->borderEnd();
if (gb.style() == BHIDDEN)
return -1;
if (gb.style() > BHIDDEN && gb.width() > borderWidth)
borderWidth = gb.width();
}
bool allHidden = true;
for (int r = 0; r < m_gridRows; r++) {
const CellStruct& current = cellAt(r, totalCols - 1);
if (!current.hasCells())
continue;
// FIXME: Don't repeat for the same cell
const BorderValue& cb = current.primaryCell()->style()->borderEnd(); // FIXME: Make this work with perpendicular and flipped cells.
const BorderValue& rb = current.primaryCell()->parent()->style()->borderEnd();
if (cb.style() == BHIDDEN || rb.style() == BHIDDEN)
continue;
allHidden = false;
if (cb.style() > BHIDDEN && cb.width() > borderWidth)
borderWidth = cb.width();
if (rb.style() > BHIDDEN && rb.width() > borderWidth)
borderWidth = rb.width();
}
if (allHidden)
return -1;
return (borderWidth + (table()->style()->isLeftToRightDirection() ? 1 : 0)) / 2;
}
void RenderTableSection::recalcOuterBorder()
{
m_outerBorderBefore = calcOuterBorderBefore();
m_outerBorderAfter = calcOuterBorderAfter();
m_outerBorderStart = calcOuterBorderStart();
m_outerBorderEnd = calcOuterBorderEnd();
}
LayoutUnit RenderTableSection::firstLineBoxBaseline() const
{
if (!m_gridRows)
return -1;
LayoutUnit firstLineBaseline = m_grid[0].baseline;
if (firstLineBaseline)
return firstLineBaseline + m_rowPos[0];
firstLineBaseline = -1;
Row* firstRow = m_grid[0].row;
for (size_t i = 0; i < firstRow->size(); ++i) {
CellStruct& cs = firstRow->at(i);
RenderTableCell* cell = cs.primaryCell();
if (cell)
firstLineBaseline = max(firstLineBaseline, cell->logicalTop() + cell->paddingBefore() + cell->borderBefore() + cell->contentLogicalHeight());
}
return firstLineBaseline;
}
void RenderTableSection::paint(PaintInfo& paintInfo, const LayoutPoint& paintOffset)
{
// put this back in when all layout tests can handle it
// ASSERT(!needsLayout());
// avoid crashing on bugs that cause us to paint with dirty layout
if (needsLayout())
return;
unsigned totalRows = m_gridRows;
unsigned totalCols = table()->columns().size();
if (!totalRows || !totalCols)
return;
LayoutPoint adjustedPaintOffset = paintOffset + location();
PaintPhase phase = paintInfo.phase;
bool pushedClip = pushContentsClip(paintInfo, adjustedPaintOffset);
paintObject(paintInfo, adjustedPaintOffset);
if (pushedClip)
popContentsClip(paintInfo, phase, adjustedPaintOffset);
}
static inline bool compareCellPositions(RenderTableCell* elem1, RenderTableCell* elem2)
{
return elem1->row() < elem2->row();
}
// This comparison is used only when we have overflowing cells as we have an unsorted array to sort. We thus need
// to sort both on rows and columns to properly repaint.
static inline bool compareCellPositionsWithOverflowingCells(RenderTableCell* elem1, RenderTableCell* elem2)
{
if (elem1->row() != elem2->row())
return elem1->row() < elem2->row();
return elem1->col() < elem2->col();
}
void RenderTableSection::paintCell(RenderTableCell* cell, PaintInfo& paintInfo, const LayoutPoint& paintOffset)
{
LayoutPoint cellPoint = flipForWritingMode(cell, paintOffset, ParentToChildFlippingAdjustment);
PaintPhase paintPhase = paintInfo.phase;
RenderTableRow* row = toRenderTableRow(cell->parent());
if (paintPhase == PaintPhaseBlockBackground || paintPhase == PaintPhaseChildBlockBackground) {
// We need to handle painting a stack of backgrounds. This stack (from bottom to top) consists of
// the column group, column, row group, row, and then the cell.
RenderObject* col = table()->colElement(cell->col());
RenderObject* colGroup = 0;
if (col && col->parent()->style()->display() == TABLE_COLUMN_GROUP)
colGroup = col->parent();
// Column groups and columns first.
// FIXME: Columns and column groups do not currently support opacity, and they are being painted "too late" in
// the stack, since we have already opened a transparency layer (potentially) for the table row group.
// Note that we deliberately ignore whether or not the cell has a layer, since these backgrounds paint "behind" the
// cell.
cell->paintBackgroundsBehindCell(paintInfo, cellPoint, colGroup);
cell->paintBackgroundsBehindCell(paintInfo, cellPoint, col);
// Paint the row group next.
cell->paintBackgroundsBehindCell(paintInfo, cellPoint, this);
// Paint the row next, but only if it doesn't have a layer. If a row has a layer, it will be responsible for
// painting the row background for the cell.
if (!row->hasSelfPaintingLayer())
cell->paintBackgroundsBehindCell(paintInfo, cellPoint, row);
}
if ((!cell->hasSelfPaintingLayer() && !row->hasSelfPaintingLayer()) || paintInfo.phase == PaintPhaseCollapsedTableBorders)
cell->paint(paintInfo, cellPoint);
}
void RenderTableSection::paintObject(PaintInfo& paintInfo, const LayoutPoint& paintOffset)
{
// Check which rows and cols are visible and only paint these.
unsigned totalRows = m_gridRows;
unsigned totalCols = table()->columns().size();
PaintPhase paintPhase = paintInfo.phase;
LayoutUnit os = 2 * maximalOutlineSize(paintPhase);
unsigned startrow = 0;
unsigned endrow = totalRows;
LayoutRect localRepaintRect = paintInfo.rect;
localRepaintRect.moveBy(-paintOffset);
if (style()->isFlippedBlocksWritingMode()) {
if (style()->isHorizontalWritingMode())
localRepaintRect.setY(height() - localRepaintRect.maxY());
else
localRepaintRect.setX(width() - localRepaintRect.maxX());
}
if (!m_forceSlowPaintPathWithOverflowingCell) {
LayoutUnit before = (style()->isHorizontalWritingMode() ? localRepaintRect.y() : localRepaintRect.x()) - os;
// binary search to find a row
startrow = std::lower_bound(m_rowPos.begin(), m_rowPos.end(), before) - m_rowPos.begin();
// The binary search above gives us the first row with
// a y position >= the top of the paint rect. Thus, the previous
// may need to be repainted as well.
if (startrow == m_rowPos.size() || (startrow > 0 && (m_rowPos[startrow] > before)))
--startrow;
LayoutUnit after = (style()->isHorizontalWritingMode() ? localRepaintRect.maxY() : localRepaintRect.maxX()) + os;
endrow = std::lower_bound(m_rowPos.begin(), m_rowPos.end(), after) - m_rowPos.begin();
if (endrow == m_rowPos.size())
--endrow;
if (!endrow && m_rowPos[0] - table()->outerBorderBefore() <= after)
++endrow;
}
unsigned startcol = 0;
unsigned endcol = totalCols;
// FIXME: Implement RTL.
if (!m_forceSlowPaintPathWithOverflowingCell && style()->isLeftToRightDirection()) {
LayoutUnit start = (style()->isHorizontalWritingMode() ? localRepaintRect.x() : localRepaintRect.y()) - os;
Vector<LayoutUnit>& columnPos = table()->columnPositions();
startcol = std::lower_bound(columnPos.begin(), columnPos.end(), start) - columnPos.begin();
if ((startcol == columnPos.size()) || (startcol > 0 && (columnPos[startcol] > start)))
--startcol;
LayoutUnit end = (style()->isHorizontalWritingMode() ? localRepaintRect.maxX() : localRepaintRect.maxY()) + os;
endcol = std::lower_bound(columnPos.begin(), columnPos.end(), end) - columnPos.begin();
if (endcol == columnPos.size())
--endcol;
if (!endcol && columnPos[0] - table()->outerBorderStart() <= end)
++endcol;
}
if (startcol < endcol) {
if (!m_hasMultipleCellLevels && !m_overflowingCells.size()) {
// Draw the dirty cells in the order that they appear.
for (unsigned r = startrow; r < endrow; r++) {
for (unsigned c = startcol; c < endcol; c++) {
CellStruct& current = cellAt(r, c);
RenderTableCell* cell = current.primaryCell();
if (!cell || (r > startrow && primaryCellAt(r - 1, c) == cell) || (c > startcol && primaryCellAt(r, c - 1) == cell))
continue;
paintCell(cell, paintInfo, paintOffset);
}
}
} else {
// The overflowing cells should be scarce to avoid adding a lot of cells to the HashSet.
ASSERT(m_overflowingCells.size() < totalRows * totalCols * gMaxAllowedOverflowingCellRatioForFastPaintPath);
// To make sure we properly repaint the section, we repaint all the overflowing cells that we collected.
Vector<RenderTableCell*> cells;
copyToVector(m_overflowingCells, cells);
HashSet<RenderTableCell*> spanningCells;
for (unsigned r = startrow; r < endrow; r++) {
for (unsigned c = startcol; c < endcol; c++) {
CellStruct& current = cellAt(r, c);
if (!current.hasCells())
continue;
for (unsigned i = 0; i < current.cells.size(); ++i) {
if (m_overflowingCells.contains(current.cells[i]))
continue;
if (current.cells[i]->rowSpan() > 1 || current.cells[i]->colSpan() > 1) {
if (spanningCells.contains(current.cells[i]))
continue;
spanningCells.add(current.cells[i]);
}
cells.append(current.cells[i]);
}
}
}
// Sort the dirty cells by paint order.
if (!m_overflowingCells.size())
std::stable_sort(cells.begin(), cells.end(), compareCellPositions);
else
std::sort(cells.begin(), cells.end(), compareCellPositionsWithOverflowingCells);
int size = cells.size();
// Paint the cells.
for (int i = 0; i < size; ++i)
paintCell(cells[i], paintInfo, paintOffset);
}
}
}
void RenderTableSection::imageChanged(WrappedImagePtr, const IntRect*)
{
// FIXME: Examine cells and repaint only the rect the image paints in.
repaint();
}
void RenderTableSection::recalcCells()
{
m_cCol = 0;
m_cRow = -1;
clearGrid();
m_gridRows = 0;
for (RenderObject* row = firstChild(); row; row = row->nextSibling()) {
if (row->isTableRow()) {
m_cRow++;
m_cCol = 0;
if (!ensureRows(m_cRow + 1))
break;
RenderTableRow* tableRow = toRenderTableRow(row);
m_grid[m_cRow].rowRenderer = tableRow;
setRowLogicalHeightToRowStyleLogicalHeightIfNotRelative(&m_grid[m_cRow]);
for (RenderObject* cell = row->firstChild(); cell; cell = cell->nextSibling()) {
if (cell->isTableCell())
addCell(toRenderTableCell(cell), tableRow);
}
}
}
m_needsCellRecalc = false;
setNeedsLayout(true);
}
void RenderTableSection::setNeedsCellRecalc()
{
m_needsCellRecalc = true;
if (RenderTable* t = table())
t->setNeedsSectionRecalc();
}
void RenderTableSection::clearGrid()
{
int rows = m_gridRows;
while (rows--)
delete m_grid[rows].row;
}
int RenderTableSection::numColumns() const
{
int result = 0;
for (int r = 0; r < m_gridRows; ++r) {
for (int c = result; c < table()->numEffCols(); ++c) {
const CellStruct& cell = cellAt(r, c);
if (cell.hasCells() || cell.inColSpan)
result = c;
}
}
return result + 1;
}
void RenderTableSection::appendColumn(int pos)
{
for (int row = 0; row < m_gridRows; ++row)
m_grid[row].row->resize(pos + 1);
}
void RenderTableSection::splitColumn(int pos, int first)
{
if (m_cCol > pos)
m_cCol++;
for (int row = 0; row < m_gridRows; ++row) {
Row& r = *m_grid[row].row;
r.insert(pos + 1, CellStruct());
if (r[pos].hasCells()) {
r[pos + 1].cells.append(r[pos].cells);
RenderTableCell* cell = r[pos].primaryCell();
ASSERT(cell);
int colleft = cell->colSpan() - r[pos].inColSpan;
if (first > colleft)
r[pos + 1].inColSpan = 0;
else
r[pos + 1].inColSpan = first + r[pos].inColSpan;
} else {
r[pos + 1].inColSpan = 0;
}
}
}
// Hit Testing
bool RenderTableSection::nodeAtPoint(const HitTestRequest& request, HitTestResult& result, const LayoutPoint& pointInContainer, const LayoutPoint& accumulatedOffset, HitTestAction action)
{
// If we have no children then we have nothing to do.
if (!firstChild())
return false;
// Table sections cannot ever be hit tested. Effectively they do not exist.
// Just forward to our children always.
LayoutPoint adjustedLocation = accumulatedOffset + location();
if (hasOverflowClip() && !overflowClipRect(adjustedLocation).intersects(result.rectForPoint(pointInContainer)))
return false;
if (hasOverflowingCell()) {
for (RenderObject* child = lastChild(); child; child = child->previousSibling()) {
// FIXME: We have to skip over inline flows, since they can show up inside table rows
// at the moment (a demoted inline <form> for example). If we ever implement a
// table-specific hit-test method (which we should do for performance reasons anyway),
// then we can remove this check.
if (child->isBox() && !toRenderBox(child)->hasSelfPaintingLayer()) {
LayoutPoint childPoint = flipForWritingMode(toRenderBox(child), adjustedLocation, ParentToChildFlippingAdjustment);
if (child->nodeAtPoint(request, result, pointInContainer, childPoint, action)) {
updateHitTestResult(result, toLayoutPoint(pointInContainer - childPoint));
return true;
}
}
}
return false;
}
LayoutPoint location = pointInContainer - toLayoutSize(adjustedLocation);
if (style()->isFlippedBlocksWritingMode()) {
if (style()->isHorizontalWritingMode())
location.setY(height() - location.y());
else
location.setX(width() - location.x());
}
LayoutUnit offsetInColumnDirection = style()->isHorizontalWritingMode() ? location.y() : location.x();
// Find the first row that starts after offsetInColumnDirection.
unsigned nextRow = std::upper_bound(m_rowPos.begin(), m_rowPos.end(), offsetInColumnDirection) - m_rowPos.begin();
if (nextRow == m_rowPos.size())
return false;
// Now set hitRow to the index of the hit row, or 0.
unsigned hitRow = nextRow > 0 ? nextRow - 1 : 0;
Vector<LayoutUnit>& columnPos = table()->columnPositions();
LayoutUnit offsetInRowDirection = style()->isHorizontalWritingMode() ? location.x() : location.y();
if (!style()->isLeftToRightDirection())
offsetInRowDirection = columnPos[columnPos.size() - 1] - offsetInRowDirection;
unsigned nextColumn = std::lower_bound(columnPos.begin(), columnPos.end(), offsetInRowDirection) - columnPos.begin();
if (nextColumn == columnPos.size())
return false;
unsigned hitColumn = nextColumn > 0 ? nextColumn - 1 : 0;
CellStruct& current = cellAt(hitRow, hitColumn);
// If the cell is empty, there's nothing to do
if (!current.hasCells())
return false;
for (int i = current.cells.size() - 1; i >= 0; --i) {
RenderTableCell* cell = current.cells[i];
LayoutPoint cellPoint = flipForWritingMode(cell, adjustedLocation, ParentToChildFlippingAdjustment);
if (static_cast<RenderObject*>(cell)->nodeAtPoint(request, result, pointInContainer, cellPoint, action)) {
updateHitTestResult(result, toLayoutPoint(pointInContainer - cellPoint));
return true;
}
}
return false;
}
} // namespace WebCore<|fim▁end|> | cell->setNeedsLayout(true);
if (!table()->selfNeedsLayout() && cell->checkForRepaintDuringLayout()) {
if (!statePusher.didPush()) {
// Technically, we should also push state for the row, but since |
<|file_name|>vaccines.js<|end_file_name|><|fim▁begin|>import { fromJS, OrderedMap } from 'immutable';
const vaccines = fromJS([
{
name: 'AVA (BioThrax)',
id: '8b013618-439e-4829-b88f-98a44b420ee8',
diseases: ['Anthrax'],
},
{
name: 'VAR (Varivax)',
id: 'f3e08a56-003c-4b46-9dea-216298401ca0',
diseases: ['Varicella (Chickenpox)'],
},
{
name: 'MMRV (ProQuad)',
id: '3373721d-3d14-490c-9fa9-69a223888322',
diseases: [
'Varicella (Chickenpox)',
'Measles',
'Mumps',
'Rubella (German Measles)',
],
},
{
name: 'HepA (Havrix, Vaqta)',
id: 'a9144edf-13a2-4ce5-b6af-14eb38fd848c',
diseases: ['Hepatitis A'],
},
{
name: 'HepA-HepB (Twinrix)',
id: '6888fd1a-af4f-4f33-946d-40d4c473c9cc',
diseases: ['Hepatitis A', 'Hepatitis B'],
},
{
name: 'HepB (Engerix-B, Recombivax HB)',
id: 'ca079856-a561-4bc9-9bef-e62429ed3a38',
diseases: ['Hepatitis B'],
},
{
name: 'Hib-HepB (Comvax)',
id: '7305d769-0d1e-4bef-bd09-6998dc839825',
diseases: ['Hepatitis B', 'Haemophilus influenzae type b (Hib)'],
},
{
name: 'Hib (ActHIB, PedvaxHIB, Hiberix)',
id: 'd241f0c7-9920-4bc6-8f34-288a13e03f4d',
diseases: ['Haemophilus influenzae type b (Hib)'],
},
{
name: 'HPV4 (Gardasil)',
id: 'c2fef03c-db7f-483b-af70-50560712b189',
diseases: ['Human Papillomavirus (HPV)'],
},
{
name: 'HPV2 (Cervarix)',
id: '286f55e4-e727-4fc4-86b0-5a08ea712a77',
diseases: ['Human Papillomavirus (HPV)'],
},
{
name: 'TIV (Afluria, Agriflu, FluLaval, Fluarix, Fluvirin, Fluzone, Fluzone High-Dose, Fluzone Intradermal)', // eslint-disable-line max-len
id: '60e85a31-6a54-48e1-b0b7-deb28120675b',
diseases: ['Seasonal Influenza (Flu)'],
},
{
name: 'LAIV (FluMist)',
id: '9e67e321-9a7f-426f-ba9b-28885f93f9b9',
diseases: ['Seasonal Influenza (Flu)'],
},
{
name: 'JE (Ixiaro)',
id: '5ce00584-3350-442d-ac6c-7f19567eff8a',
diseases: ['Japanese Encephalitis'],<|fim▁hole|> {
name: 'MMR (M-M-R II)',
id: 'd10b7bf0-d51e-4117-a6a4-08bdb5cb682a',
diseases: ['Measles', 'Mumps', 'Rubella (German Measles)'],
},
{
name: 'MCV4 (Menactra)',
id: '6295fe11-f0ce-4967-952c-f271416cc300',
diseases: ['Meningococcal'],
},
{
name: 'MPSV4 (Menomune)',
id: '65f6d6d0-6dd8-49c9-95da-ed9fa403ae96',
diseases: ['Meningococcal'],
},
{
name: 'MODC (Menveo)',
id: 'be10b480-7934-46be-a488-66540aac2881',
diseases: ['Meningococcal'],
},
{
name: 'Tdap (Adacel, Boostrix)',
id: '0c6c33fb-f4dc-44c6-8684-625099f6fa21',
diseases: ['Pertussis (Whooping Cough)', 'Tetanus (Lockjaw)', 'Diphtheria'],
},
{
name: 'PCV13 (Prevnar13)',
id: 'd8c5a723-21e2-49a6-a921-705da16563e1',
diseases: ['Pneumococcal'],
},
{
name: 'PPSV23 (Pneumovax 23)',
id: '4005de2f-8e6d-40ae-bb5f-068ac56885b8',
diseases: ['Pneumococcal'],
},
{
name: 'Polio (Ipol)',
id: '9c1582f2-8a7b-4bae-8ba5-656efe33fb29',
diseases: ['Polio'],
},
{
name: 'Rabies (Imovax Rabies, RabAvert)',
id: '2bfeeb1f-b7a7-4ce6-aae1-72e840a93e2e',
diseases: ['Rabies'],
},
{
name: 'RV1 (Rotarix)',
id: '8ddfa840-7558-469a-a53b-19a40d016518',
diseases: ['Rotavirus'],
},
{
name: 'RV5 (RotaTeq)',
id: '9281ddcb-5ef3-47e6-a249-6b2b8bee1e7f',
diseases: ['Rotavirus'],
},
{
name: 'ZOS (Zostavax)',
id: '2921b034-8a4c-46f5-9753-70a112dfec3f',
diseases: ['Shingles (Herpes Zoster)'],
},
{
name: 'Vaccinia (ACAM2000)',
id: 'e26378f4-5d07-4b5f-9c93-53816c0faf9f',
diseases: ['Smallpox'],
},
{
name: 'DTaP (Daptacel, Infanrix)',
id: 'b23e765e-a05b-4a24-8095-03d79e47a8aa',
diseases: [
'Tetanus (Lockjaw)',
'Pertussis (Whooping Cough)',
'Diphtheria',
],
},
{
name: 'Td (Decavac, generic)',
id: '1af45230-cb2a-4242-81ac-2430cd64f8ce',
diseases: ['Tetanus (Lockjaw)', 'Diphtheria'],
},
{
name: 'DT (-generic-)',
id: '6eb77e28-aaa1-4e29-b124-5793a4bd6f1f',
diseases: ['Tetanus (Lockjaw)', 'Diphtheria'],
},
{
name: 'TT (-generic-)',
id: 'd6cf7277-831c-43c6-a1fa-7109d3325168',
diseases: ['Tetanus (Lockjaw)'],
},
{
name: 'DTaP-IPV (Kinrix)',
id: 'a8ecfef5-5f09-442c-84c3-4dfbcd99b3b8',
diseases: [
'Tetanus (Lockjaw)',
'Polio',
'Pertussis (Whooping Cough)',
'Diphtheria',
],
},
{
name: 'DTaP-HepB-IPV (Pediarix)',
id: '10bc0626-7b0a-4a42-b1bf-2742f0435c37',
diseases: [
'Tetanus (Lockjaw)',
'Polio',
'Hepatitis B',
'Pertussis (Whooping Cough)',
'Diphtheria',
],
},
{
name: 'DTaP-IPV/Hib (Pentacel)',
id: 'dcbb9691-1544-44fc-a9ca-351946010876',
diseases: [
'Tetanus (Lockjaw)',
'Polio',
'Haemophilus influenzae type b (Hib)',
'Pertussis (Whooping Cough)',
'Diphtheria',
],
},
{
name: 'DTaP/Hib',
id: 'e817c55d-e3db-4963-9fec-04d5823f6915',
diseases: [
'Tetanus (Lockjaw)',
'Diphtheria',
'Haemophilus influenzae type b (Hib)',
'Pertussis (Whooping Cough)',
],
},
{
name: 'BCG (TICE BCG, Mycobax)',
id: '8f2049a1-a1e3-44e1-947e-debbf3cafecc',
diseases: ['Tuberculosis (TB)'],
},
{
name: 'Typhoid Oral (Vivotif)',
id: '060f44be-e1e7-4575-ba0f-62611f03384b',
diseases: ['Typhoid Fever'],
},
{
name: 'Typhoid Polysaccharide (Typhim Vi)',
id: '87009829-1a48-4330-91e1-6bcd7ab04ee1',
diseases: ['Typhoid Fever'],
},
{
name: 'YF (YF-Vax)',
id: '24d5bfc4-d69a-4311-bb10-8980dddafa20',
diseases: ['Yellow Fever'],
},
]);
const keyedVaccines = vaccines.reduce((result, item) => (
result.set(item.get('id'), item)
), OrderedMap());
export default keyedVaccines.sortBy(vaccine => vaccine.get('name').toLowerCase());<|fim▁end|> | }, |
<|file_name|>InvFFT.java<|end_file_name|><|fim▁begin|>/*
* Zorbage: an algebraic data hierarchy for use in numeric processing.
*
* Copyright (c) 2016-2021 Barry DeZonia All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this list
* of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice, this
* list of conditions and the following disclaimer in the documentation and/or other
* materials provided with the distribution.
*
* Neither the name of the <copyright holder> nor the names of its contributors may
* be used to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*/
package nom.bdezonia.zorbage.algorithm;
import java.math.BigDecimal;
import java.math.MathContext;
import nom.bdezonia.zorbage.algebra.Addition;
import nom.bdezonia.zorbage.algebra.Algebra;
import nom.bdezonia.zorbage.algebra.Conjugate;
import nom.bdezonia.zorbage.algebra.Invertible;
import nom.bdezonia.zorbage.algebra.Multiplication;
import nom.bdezonia.zorbage.algebra.RealConstants;
import nom.bdezonia.zorbage.algebra.SetComplex;
import nom.bdezonia.zorbage.algebra.Trigonometric;
import nom.bdezonia.zorbage.algebra.Unity;
import nom.bdezonia.zorbage.datasource.IndexedDataSource;
/**
*
* @author Barry DeZonia
*
*/
public class InvFFT {
// do not instantiate
private InvFFT() {}
/**
*
* @param <T>
* @param <U>
* @param <V>
* @param <W>
* @param cmplxAlg
* @param realAlg
* @param a
* @param b
*/
public static
<T extends Algebra<T,U> & Addition<U> & Multiplication<U> & Conjugate<U>,
U extends SetComplex<W>,
V extends Algebra<V,W> & Trigonometric<W> & RealConstants<W> & Unity<W> &
Multiplication<W> & Addition<W> & Invertible<W>,
W>
void compute(T cmplxAlg, V realAlg, IndexedDataSource<U> a,IndexedDataSource<U> b)
{<|fim▁hole|> if (aSize != FFT.enclosingPowerOf2(aSize))
throw new IllegalArgumentException("input size is not a power of 2");
if (aSize != bSize)
throw new IllegalArgumentException("output size does not match input size");
U one_over_n = cmplxAlg.construct((BigDecimal.ONE.divide(BigDecimal.valueOf(aSize), new MathContext(100))).toString());
nom.bdezonia.zorbage.algorithm.Conjugate.compute(cmplxAlg, a, b);
FFT.compute(cmplxAlg, realAlg, b, b);
nom.bdezonia.zorbage.algorithm.Conjugate.compute(cmplxAlg, b, b);
Scale.compute(cmplxAlg, one_over_n, b, b);
}
}<|fim▁end|> | long aSize = a.size();
long bSize = b.size(); |
<|file_name|>parse-server.py<|end_file_name|><|fim▁begin|>import json
import sys
import requests
from collections import Counter
from wapy.api import Wapy
from http.server import BaseHTTPRequestHandler, HTTPServer
wapy = Wapy('frt6ajvkqm4aexwjksrukrey')
def removes(yes):
no = ["Walmart.com", ".", ","]
for x in no:
yes = yes.replace(x, '')
return yes
def post_some_dict(dict):
headers = {'Content-type': 'application/json'}
r = requests.post("http://127.0.0.1:5000/search", data=json.dumps(dict), headers=headers)
return r.text
def parse_image(image):
out = json.loads(post_some_dict({"image_url": image}))['titles']
print(out)
#out = [x for x in out if 'walmart' in x]
threshold = len(out)-1
#out = [x[27:-9] for x in out]
#print(out)
large = []
for line in out:
line = line.replace('-', '')
line = removes(line)
line = line.split(' ')
for word in line:
large.append(word)
#print(large)
c = Counter(large).most_common()
keywords = []
<|fim▁hole|> if x[1] > threshold:
keywords.append(x[0])
print(keywords)
return ' '.join(keywords)
def parse_wallmart(keywords):
products = wapy.search(' '.join(keywords))
out = {}
out['name'] = products[0].name
out['rating'] = products[0].customer_rating
out['price'] = products[0].sale_price
return json.dumps(out)
class StoreHandler(BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(200)
self.send_header('Content-type', 'text/json')
self.end_headers()
self.wfile.write(fh.read().encode())
def do_POST(self):
self.send_response(200)
length = self.headers['content-length']
data = self.rfile.read(int(length))
with open('/var/www/html/image.jpg', 'wb') as fh:
fh.write(data)
self.send_header('Content-type', 'text/json')
self.end_headers()
self.wfile.write(parse_wallmart(parse_image('http://45.33.95.66/image.jpg')).encode())
server = HTTPServer(('', 8081), StoreHandler)
server.serve_forever()<|fim▁end|> | for x in c: |
<|file_name|>unreachable-arm.rs<|end_file_name|><|fim▁begin|>#![feature(box_patterns)]
#![feature(box_syntax)]
#![allow(dead_code)]
#![deny(unreachable_patterns)]
enum Foo { A(Box<Foo>, isize), B(usize), }
fn main() {
match Foo::B(1) {<|fim▁hole|> Foo::A(_, 1) => { } //~ ERROR unreachable pattern
_ => { }
}
}<|fim▁end|> | Foo::B(_) | Foo::A(box _, 1) => { } |
<|file_name|>views.py<|end_file_name|><|fim▁begin|># Standard library imports
import logging
# Third party imports
from django.contrib import messages
from django.contrib.auth.decorators import user_passes_test
from django.contrib.admin.utils import NestedObjects
from django.urls import reverse
from django.db import DEFAULT_DB_ALIAS
from django.http import HttpResponseRedirect, HttpResponse
from django.shortcuts import render, get_object_or_404
from django.views.decorators.csrf import csrf_exempt
from github import Github
# Local application/library imports
from dojo.forms import GITHUBForm, DeleteGITHUBConfForm
from dojo.models import GITHUB_Conf
from dojo.utils import add_breadcrumb
logger = logging.getLogger(__name__)
@csrf_exempt
def webhook(request):
return HttpResponse('')
@user_passes_test(lambda u: u.is_staff)
def express_new_github(request):
return HttpResponse('')
@user_passes_test(lambda u: u.is_staff)
def new_github(request):
if request.method == 'POST':
gform = GITHUBForm(request.POST, instance=GITHUB_Conf())
if gform.is_valid():
try:
api_key = gform.cleaned_data.get('api_key')
g = Github(api_key)
user = g.get_user()
logger.debug('Using user ' + user.login)
new_j = gform.save(commit=False)
new_j.api_key = api_key
new_j.save()
messages.add_message(request,
messages.SUCCESS,
'Github Configuration Successfully Created.',
extra_tags='alert-success')
return HttpResponseRedirect(reverse('github', ))
except Exception as info:
logger.error(info)
messages.add_message(request,
messages.ERROR,
'Unable to authenticate on github.',
extra_tags='alert-danger')
return HttpResponseRedirect(reverse('github', ))
else:
gform = GITHUBForm()
add_breadcrumb(title="New Github Configuration", top_level=False, request=request)
return render(request, 'dojo/new_github.html',
{'gform': gform})
@user_passes_test(lambda u: u.is_staff)
def github(request):
confs = GITHUB_Conf.objects.all()
add_breadcrumb(title="Github List", top_level=not len(request.GET), request=request)
return render(request,
'dojo/github.html',
{'confs': confs,
})
@user_passes_test(lambda u: u.is_staff)
def delete_github(request, tid):
github_instance = get_object_or_404(GITHUB_Conf, pk=tid)
# eng = test.engagement
# TODO Make Form<|fim▁hole|>
if request.method == 'POST':
if 'id' in request.POST and str(github_instance.id) == request.POST['id']:
form = DeleteGITHUBConfForm(request.POST, instance=github_instance)
if form.is_valid():
github_instance.delete()
messages.add_message(request,
messages.SUCCESS,
'Github Conf and relationships removed.',
extra_tags='alert-success')
return HttpResponseRedirect(reverse('github'))
collector = NestedObjects(using=DEFAULT_DB_ALIAS)
collector.collect([github_instance])
rels = collector.nested()
add_breadcrumb(title="Delete", top_level=False, request=request)
return render(request, 'dojo/delete_github.html',
{'inst': github_instance,
'form': form,
'rels': rels,
'deletable_objects': rels,
})<|fim▁end|> | form = DeleteGITHUBConfForm(instance=github_instance) |
<|file_name|>components.py<|end_file_name|><|fim▁begin|>"""
Checking for connected components in a graph.
"""
__author__ = "Sergio J. Rey <[email protected]>"
__all__ = ["check_contiguity"]
from operator import lt
def is_component(w, ids):
"""Check if the set of ids form a single connected component
Parameters
----------
w : spatial weights boject
ids : list<|fim▁hole|> identifiers of units that are tested to be a single connected
component
Returns
-------
True : if the list of ids represents a single connected component
False : if the list of ids forms more than a single connected component
"""
components = 0
marks = dict([(node, 0) for node in ids])
q = []
for node in ids:
if marks[node] == 0:
components += 1
q.append(node)
if components > 1:
return False
while q:
node = q.pop()
marks[node] = components
others = [neighbor for neighbor in w.neighbors[node]
if neighbor in ids]
for other in others:
if marks[other] == 0 and other not in q:
q.append(other)
return True
def check_contiguity(w, neighbors, leaver):
"""Check if contiguity is maintained if leaver is removed from neighbors
Parameters
----------
w : spatial weights object
simple contiguity based weights
neighbors : list
nodes that are to be checked if they form a single \
connected component
leaver : id
a member of neighbors to check for removal
Returns
-------
True : if removing leaver from neighbors does not break contiguity
of remaining set
in neighbors
False : if removing leaver from neighbors breaks contiguity
Example
-------
Setup imports and a 25x25 spatial weights matrix on a 5x5 square region.
>>> import pysal.lib as lps
>>> w = lps.weights.lat2W(5, 5)
Test removing various areas from a subset of the region's areas. In the
first case the subset is defined as observations 0, 1, 2, 3 and 4. The
test shows that observations 0, 1, 2 and 3 remain connected even if
observation 4 is removed.
>>> check_contiguity(w,[0,1,2,3,4],4)
True
>>> check_contiguity(w,[0,1,2,3,4],3)
False
>>> check_contiguity(w,[0,1,2,3,4],0)
True
>>> check_contiguity(w,[0,1,2,3,4],1)
False
>>>
"""
ids = neighbors[:]
ids.remove(leaver)
return is_component(w, ids)
class Graph(object):
def __init__(self, undirected=True):
self.nodes = set()
self.edges = {}
self.cluster_lookup = {}
self.no_link = {}
self.undirected = undirected
def add_edge(self, n1, n2, w):
self.nodes.add(n1)
self.nodes.add(n2)
self.edges.setdefault(n1, {}).update({n2: w})
if self.undirected:
self.edges.setdefault(n2, {}).update({n1: w})
def connected_components(self, threshold=0.9, op=lt):
if not self.undirected:
warn = "Warning, connected _components not "
warn += "defined for a directed graph"
print(warn)
return None
else:
nodes = set(self.nodes)
components, visited = [], set()
while len(nodes) > 0:
connected, visited = self.dfs(
nodes.pop(), visited, threshold, op)
connected = set(connected)
for node in connected:
if node in nodes:
nodes.remove(node)
subgraph = Graph()
subgraph.nodes = connected
subgraph.no_link = self.no_link
for s in subgraph.nodes:
for k, v in list(self.edges.get(s, {}).items()):
if k in subgraph.nodes:
subgraph.edges.setdefault(s, {}).update({k: v})
if s in self.cluster_lookup:
subgraph.cluster_lookup[s] = self.cluster_lookup[s]
components.append(subgraph)
return components
def dfs(self, v, visited, threshold, op=lt, first=None):
aux = [v]
visited.add(v)
if first is None:
first = v
for i in (n for n, w in list(self.edges.get(v, {}).items())
if op(w, threshold) and n not in visited):
x, y = self.dfs(i, visited, threshold, op, first)
aux.extend(x)
visited = visited.union(y)
return aux, visited<|fim▁end|> | |
<|file_name|>models.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8; -*-
#
# @file models.py
# @brief coll-gate accession module models.
# @author Frédéric SCHERMA (INRA UMR1095), Medhi BOULNEMOUR (INRA UMR1095)
# @date 2016-09-01
# @copyright Copyright (c) 2016 INRA/CIRAD
# @license MIT (see LICENSE file)
# @details
import re
from django.contrib.auth.models import User
from django.contrib.postgres.fields import JSONField
from django.db import models
from django.db.models import Q, Prefetch
from django.utils import translation
from django.utils.translation import ugettext_lazy as _
from igdectk.common.models import ChoiceEnum, IntegerChoice
from accession import localsettings
from classification.models import ClassificationEntry
from descriptor.models import DescribableEntity
from descriptor.models import Layout
from main.models import Entity, EntitySynonym, ContentType, EntitySynonymType
class AccessionClassificationEntry(models.Model):
"""
M2M accession to classification entry with additional flags.
"""
# accession object
accession = models.ForeignKey('Accession', on_delete=models.PROTECT)
# classification entry object
classification_entry = models.ForeignKey(ClassificationEntry, on_delete=models.PROTECT)
# is a primary or secondary classification association
primary = models.BooleanField(default=False, db_index=True)
def natural_name(self):
return self.accession.name
class Meta:
index_together = (
('accession', 'classification_entry'),
('accession', 'primary')
)
class Accession(DescribableEntity):
"""
Accession entity defines a physical or virtual accession.
"""
# name pattern
NAME_RE = re.compile(r"^\S+.+\S+$", re.IGNORECASE)
# default name validator
NAME_VALIDATOR = {"type": "string", "minLength": 1, "maxLength": 255, "pattern": "^\S+.+\S+$"}
# non-unique primary name of the accession
name = models.CharField(max_length=255, db_index=True)
# unique GRC code of the accession
code = models.CharField(unique=True, max_length=255, db_index=True)
# primary classification as simple FK for a simple join
primary_classification_entry = models.ForeignKey(
ClassificationEntry, on_delete=models.PROTECT, related_name='primary_accessions', null=True)
# accession can have many classification but at least a primary
classifications_entries = models.ManyToManyField(
through=AccessionClassificationEntry, to=ClassificationEntry, related_name='accession_set')
@classmethod
def get_defaults_columns(cls):
columns = {
'primary_classification_entry': {
'label': _('Classification principal'),
'field': 'name',
'query': True, # False, # could be later, for the moment LEFT JOIN into the queryset
'format': {
'type': 'entity',
'model': 'classification.classificationentry',
'details': True
},
'available_operators': ['isnull', 'notnull', 'eq', 'neq', 'in', 'notin']
},
'layout': {
'label': _('Layout'),
'field': 'name',
'query': True,
'format': {
'type': 'layout',
'model': 'accession.accession'
},
'available_operators': ['isnull', 'notnull', 'eq', 'neq', 'in', 'notin']
},
'name': {
'label': _('Name'),
'query': False, # done by a prefetch related
'format': {
'type': 'string',
'model': 'accession.accession'
},
'available_operators': ['isnull', 'notnull', 'eq', 'neq', 'icontains']
},
'code': {
'label': _('Code'),
'query': False, # done by a prefetch related
'format': {
'type': 'string',
'model': 'accession.accession'
},
'available_operators': ['isnull', 'notnull', 'eq', 'neq', 'icontains']
},
'panels': {
'label': _('Linked panels'),
'field': 'name',
'query': False, # done by a prefetch related
'format': {
'type': 'entity',
'model': 'accession.accessionpanel'
},
'available_operators': [
'contains',
'not_contains',
'overlap',
'not_overlap'
],
'column_display': False,
'search_display': True
},
'classifications': {
'label': _('Classifications'),
'field': 'name',
'query': False, # done by a prefetch related
'format': {
'type': 'entity',
'model': 'classification.classificationentry',
},
'available_operators': [
'contains',
'not_contains',
'overlap',
'not_overlap'
],
'column_display': False,
'search_display': True
}
}
synonym_types = EntitySynonymType.objects.filter(target_model=ContentType.objects.get_for_model(Accession))
for synonym_type in synonym_types:
columns['&' + synonym_type.name] = {
'label': synonym_type.get_label(),
# 'field': 'synonym',
'query': False,
'format': {
'type': 'string',
'model': 'accession.accessionsynonym',
},
'available_operators': ['isnull', 'notnull', 'eq', 'neq', 'icontains']
}
if synonym_type.multiple_entry:
columns['&' + synonym_type.name]['column_display'] = False
columns['&' + synonym_type.name]['search_display'] = True
return columns
class Meta:
verbose_name = _("accession")
permissions = (
("get_accession", "Can get an accession"),
("list_accession", "Can list accessions"),
("search_accession", "Can search for accessions")
)
def natural_name(self):
return self.name
def details(self):
return {
'id': self.id,
'name': self.name,
}
@classmethod
def make_search_by_name(cls, term):
return Q(name__istartswith=term)
def audit_create(self, user):
return {
'name': self.name,
'code': self.code,
'primary_classification_entry': self.primary_classification_entry_id,
'layout': self.layout_id,
'descriptors': self.descriptors,
'comments': self.comments
}
def audit_update(self, user):
if hasattr(self, 'updated_fields'):
result = {'updated_fields': self.updated_fields}
if 'code' in self.updated_fields:
result['code'] = self.code
if 'name' in self.updated_fields:
result['name'] = self.name
if 'primary_classification_entry' in self.updated_fields:
result['primary_classification_entry'] = self.primary_classification_entry_id
if 'descriptors' in self.updated_fields:
if hasattr(self, 'updated_descriptors'):
result['descriptors'] = self.updated_descriptors
else:
result['descriptors'] = self.descriptors
if 'comments' in self.updated_fields:
result['comments'] = self.comments
return result
else:
return {
'name': self.name,
'code': self.code,
'primary_classification_entry': self.primary_classification_entry_id,
'descriptors': self.descriptors,
'comments': self.comments
}
def audit_delete(self, user):
return {
'name': self.name
}
def data(self, field=None, default=None):
data = self.layout.parameters.get('data')
if data and field in data:
return data.get(field)
else:
return default
@classmethod
def export_list(cls, columns, cursor, search, filters, order_by, limit, user):
res_columns = []
items = []
if not order_by:
order_by = ['id']
from main.cursor import CursorQuery
cq = CursorQuery(Accession)
if search:
cq.filter(search)
if filters:
cq.filter(filters)
# @todo filter given user permission per accession (v2)
# accession panels ids
cq.m2m_to_array_field(
relationship=AccessionPanel.accessions,
selected_field='accessionpanel_id',
from_related_field='id',
to_related_field='accession_id',
alias='panels'
)
# classifications entries
cq.m2m_to_array_field(
relationship=Accession.classifications_entries,
selected_field='classification_entry_id',
from_related_field='id',
to_related_field='accession_id',
alias='classifications'
)
# synonyms
cq.set_synonym_model(AccessionSynonym)
cq.prefetch_related(Prefetch(
"synonyms",
queryset=AccessionSynonym.objects.all().order_by('synonym_type', 'language')
))
cq.select_related('primary_classification_entry->name', 'primary_classification_entry->rank')
cq.cursor(cursor, order_by)
cq.order_by(order_by).limit(limit)
synonym_types = dict(EntitySynonymType.objects.filter(target_model=ContentType.objects.get_for_model(Accession)).values_list('id', 'name'))
for accession in cq:
item = []
for col in columns:
if col == 'id':
item.append(str(accession.pk))
elif col == 'name':
item.append(accession.name)
elif col == 'code':
item.append(accession.code)
elif col == 'primary_classification_entry':
item.append(str(accession.primary_classification_entry.name))
elif col == 'layout':
item.append(str(accession.layout.name))
elif col.startswith('#'):
# descriptors (@todo how to format at this level...)
descr = accession.descriptors[col[1:]]
if isinstance(descr, list):
v = '-'.join([str(x) for x in descr])
else:
v = str(descr)
item.append(v)
elif col.startswith('&'):
# synonyms
vals = []
for synonym in accession.synonyms.all():
synonym_type_name = synonym_types.get(synonym.synonym_type_id)
if col[1:] == synonym_type_name:
vals.append(synonym.language + ':' + synonym.name)
item.append("/".join(vals))
elif col.startswith('$'):
item.append("") # format
elif col.startswith('@'):
item.append("") # label
else:
item.append("")
items.append(item)
res_columns = columns
return res_columns, items
class AccessionSynonym(EntitySynonym):
"""
Synonym of accession model.
"""
# name validator, used with content validation, to avoid any whitespace before and after
NAME_VALIDATOR = {"type": "string", "minLength": 1, "maxLength": 128, "pattern": "^\S+.+\S+$"}
# code validator, used with content validation, to avoid any whitespace before and after
CODE_VALIDATOR = {"type": "string", "minLength": 1, "maxLength": 128, "pattern": r"^\S+.+\S+$"}
entity = models.ForeignKey(Accession, related_name='synonyms', on_delete=models.CASCADE)
class Meta:
verbose_name = _("accession synonym")
def is_primary(self):
"""
Is a primary name synonym.
:return: True if primary
"""
return self.synonym_type_id == localsettings.synonym_type_accession_name
def is_code(self):
"""
Is a code synonym.
:return: True if primary
"""
return self.synonym_type_id == localsettings.synonym_type_accession_code
class Batch(DescribableEntity):
"""
Batch for an accession.
"""
# name validator, used with content validation, to avoid any whitespace before and after
NAME_VALIDATOR = {"type": "string", "minLength": 3, "maxLength": 128, "pattern": r"^\S+.+\S+$"}
# unique name of the batch
name = models.CharField(unique=True, max_length=255, db_index=True)
# parent accession
accession = models.ForeignKey('Accession', related_name='batches', on_delete=models.PROTECT)
# direct parent batches
batches = models.ManyToManyField('Batch', related_name='children')
# parent location
location = models.ForeignKey('StorageLocation', related_name='batches', on_delete=models.PROTECT, null=True)
@classmethod
def get_defaults_columns(cls):
return {
'layout': {
'label': _('Layout'),
'field': 'name',
'query': True,
'format': {
'type': 'layout',
'model': 'accession.batch'
},
'available_operators': ['isnull', 'notnull', 'eq', 'neq', 'in', 'notin']
},
'name': {
'label': _('Name'),
'query': False, # done by a prefetch related
'format': {
'type': 'string',
'model': 'accession.batch'
},
'available_operators': ['isnull', 'notnull', 'eq', 'neq', 'icontains']
},
'location': {
'label': _('Storage location'),
'field': 'name',
'query': False, # done by a prefetch related
'format': {
'type': 'entity',
'model': 'accession.storagelocation'
},
'available_operators': [
'contains',
'not_contains',
'overlap',
'not_overlap'
],
},
'panels': {
'label': _('Linked panels'),
'field': 'name',
'query': False, # done by a prefetch related
'format': {
'type': 'entity',
'model': 'accession.batchpanel'
},
'available_operators': [
'contains',
'not_contains',
'overlap',
'not_overlap'
],
'column_display': False,
'search_display': True
}
}
class Meta:
verbose_name = _("batch")
default_related_name = "batches"
permissions = (
("get_batch", "Can get a batch"),
("list_batch", "Can list batch"),
("search_batch", "Can search for batches")
)
def natural_name(self):
return self.name
@classmethod
def make_search_by_name(cls, term):
return Q(name__istartswith=term)
def audit_create(self, user):
return {
'name': self.name,
'accession': self.accession_id,
'layout': self.layout_id,
'descriptors': self.descriptors,
'comments': self.comments
}
def audit_update(self, user):
if hasattr(self, 'updated_fields'):
result = {'updated_fields': self.updated_fields}
if 'name' in self.updated_fields:
result['name'] = self.name
if 'descriptors' in self.updated_fields:
if hasattr(self, 'updated_descriptors'):
result['descriptors'] = self.updated_descriptors
else:
result['descriptors'] = self.descriptors
if 'comments' in self.updated_fields:
result['comments'] = self.comments
return result
else:
return {
'name': self.name,
'descriptors': self.descriptors,
'comments': self.comments
}
def audit_delete(self, user):
return {
'name': self.name
}
class ActionType(Entity):
"""
Type of action.
"""
# unique name of the action
name = models.CharField(unique=True, max_length=128, db_index=True)
<|fim▁hole|> # Customisable label of the action.
# It is i18nized using a JSON dict with language code as key and label as string value.
label = JSONField(default={})
# Format of the action (can define a lot of parameters, like input, output, process...)
format = JSONField(default={"steps": []})
# informative description.
description = models.TextField(blank=True, default="")
class Meta:
verbose_name = _("batch action type")
def natural_name(self):
return self.get_label()
@classmethod
def get_defaults_columns(cls):
return {
'name': {
'label': _('Name'),
'query': False,
'format': {
'type': 'string',
'model': 'accession.action'
},
'available_operators': ['isnull', 'notnull', 'eq', 'neq', 'icontains']
},
'@label': {
'label': _('Label'),
'query': False,
'format': {
'type': 'string',
'model': 'accession.action'
},
'available_operators': ['isnull', 'notnull', 'eq', 'neq', 'icontains']
}
}
def set_label(self, lang, label):
"""
Set the label for a specific language.
:param str lang: language code string
:param str label: Localized label
:note Model instance save() is not called.
"""
self.label[lang] = label
def get_label(self):
"""
Get the label for this layout in the current regional.
"""
lang = translation.get_language()
return self.label.get(lang, "")
def on_client_cache_update(self):
return [{
'category': 'accession',
'name': "action_types:*",
'values': None
}]
def on_server_cache_update(self):
return [{
'category': 'accession',
'name': "action_types:*",
'values': None
}]
def in_usage(self):
return Action.objects.filter(type_id=self.id).exists()
def data(self, field=None, default=None):
data = self.format.get('data')
if data and field in data:
return data.get(field)
else:
return default
class Action(Entity):
"""
An action defines a process of creation or update of one or more entities like accessions or batches.
And considers a suit of steps, as a sequential pipeline.
"""
# free name, starting and finishing by a non space character and at least 3 length
NAME_RE = re.compile(r'^\S+.+\S+$', re.IGNORECASE)
# default name validator
NAME_VALIDATOR = {"type": "string", "minLength": 3, "maxLength": 32, "pattern": "^\S+.+\S+$"}
# default name validator optional
NAME_VALIDATOR_OPTIONAL = {
"type": "string", "minLength": 3, "maxLength": 32, "pattern": "^\S+.+\S+$", "required": False}
# display name
name = models.CharField(max_length=128, unique=True, db_index=True)
# actor of the action
user = models.ForeignKey(User, null=True, on_delete=models.SET_NULL)
# action type
action_type = models.ForeignKey(to=ActionType, on_delete=models.PROTECT)
# associated steps data
data = JSONField(default={"steps": []})
# is the action completed
completed = models.BooleanField(default=False, null=False, blank=False)
# informative description.
description = models.TextField(blank=True, default="")
# format of the action, it is a replication of the format field of the action type to keep consistency for audit
format = JSONField(default={"steps": []})
class Meta:
verbose_name = _("action")
default_permissions = list()
@classmethod
def get_defaults_columns(cls):
return {
'name': {
'label': _('Name'),
'query': False,
'format': {
'type': 'string',
'model': 'accession.action'
},
'available_operators': ['isnull', 'notnull', 'eq', 'neq', 'icontains']
},
'created_date': {
'label': _('Creation'),
'query': False,
'format': {
'type': 'datetime',
},
'available_operators': ['lte', 'gte', 'eq', 'neq']
},
'action_type': {
'label': _('Type'),
'field': 'name',
'query': True,
'format': {
'type': 'entity',
'model': 'accession.actiontype'
},
'available_operators': ['eq', 'neq', 'in', 'notin']
},
'completed': {
'label': _('Completed'),
'query': False,
'format': {
'type': 'boolean'
},
'available_operators': ['eq']
},
'user': {
'label': _('Author'),
'query': False,
'format': {
'type': 'user',
},
'available_operators': ['lte', 'gte', 'eq', 'neq']
},
}
def natural_name(self):
return self.name
class ActionDataType(ChoiceEnum):
"""
Type of a action data.
"""
INPUT = IntegerChoice(0, _('Input'))
OUTPUT = IntegerChoice(1, _('Output'))
class ActionData(models.Model):
"""
Purely the data (input or output) for each step of each action.
Input can be not defined.
"""
# related action
action = models.ForeignKey(Action, on_delete=models.CASCADE)
# step 0 based index
step_index = models.IntegerField(default=0)
# data array, empty by default
data = JSONField(default=[])
# type of data (False : input,
data_type = models.IntegerField(choices=ActionDataType.choices(), default=ActionDataType.INPUT.value)
class Meta:
unique_together = (('action', 'step_index', 'data_type'),)
class ActionToEntity(models.Model):
"""
List of managed entities per action.
"""
# related action
action = models.ForeignKey(Action, on_delete=models.CASCADE)
# content type of the target
entity_type = models.ForeignKey(ContentType, on_delete=models.DO_NOTHING)
# target entity id
entity_id = models.IntegerField(null=False, blank=False)
class Meta:
index_together = (("entity_type", "entity_id"),)
class PanelType(ChoiceEnum):
"""
Type of a panel.
"""
PERSISTENT = IntegerChoice(0, _('Persistent'))
WORKING = IntegerChoice(1, _('Working'))
class Panel(Entity):
"""
Panel abstract model
"""
# free name, starting and finishing by a non space character and at least 3 length
NAME_RE = re.compile(r'^\S+.+\S+$', re.IGNORECASE)
# default name validator
NAME_VALIDATOR = {"type": "string", "minLength": 3, "maxLength": 32, "pattern": "^\S+.+\S+$"}
# default name validator optional
NAME_VALIDATOR_OPTIONAL = {
"type": "string", "minLength": 3, "maxLength": 32, "pattern": "^\S+.+\S+$", "required": False}
# unique name of the panel
name = models.CharField(unique=True, max_length=255, db_index=True)
# panel type (persistent, working...)
panel_type = models.IntegerField(default=PanelType.PERSISTENT.value)
# JSONB field containing the list of descriptors model type id as key, with a descriptor value or value code.
descriptors = JSONField(default={})
# It refers to a set of models of type of descriptors through a layout of descriptor.
# It can be null because it is possible to have the choice to defines or not some descriptors
layout = models.ForeignKey(Layout, null=True, on_delete=models.PROTECT)
class Meta:
abstract = True
def natural_name(self):
return self.name
@classmethod
def make_search_by_name(cls, term):
return Q(name__istartswith=term)
class BatchPanel(Panel):
"""
Defines a collection of batches
"""
# list of batches
batches = models.ManyToManyField(Batch, related_name='panels')
@classmethod
def get_defaults_columns(cls):
return {
'layout': {
'label': _('Layout'),
'field': 'name',
'query': True,
'format': {
'type': 'layout',
'model': 'accession.batchpanel'
},
'available_operators': ['isnull', 'notnull', 'eq', 'neq', 'in', 'notin']
},
'name': {
'label': _('Name'),
'query': False, # done by a prefetch related
'format': {
'type': 'string',
'model': 'accession.batchpanel'
},
'available_operators': ['isnull', 'notnull', 'eq', 'neq', 'icontains']
},
'batches_amount': {
'label': _('Batches amount'),
'field': 'batches_amount',
'query': False,
'format': {
'type': 'int',
'model': 'accession.batchpanel'
},
'available_operators': ['isnull', 'notnull', 'eq', 'neq', 'gte', 'lte']
}
}
class Meta:
verbose_name = _("batch panel")
permissions = (
("get_batchpanel", "Can get a batch panel"),
("list_batchpanel", "Can list batch panel"),
)
class AccessionPanel(Panel):
"""
Defines a collection of accessions
"""
# related accessions
accessions = models.ManyToManyField(Accession, related_name='panels')
@classmethod
def get_defaults_columns(cls):
return {
'layout': {
'label': _('Layout'),
'field': 'name',
'query': True,
'format': {
'type': 'layout',
'model': 'accession.accessionpanel'
},
'available_operators': ['isnull', 'notnull', 'eq', 'neq', 'in', 'notin']
},
'name': {
'label': _('Name'),
'query': False, # done by a prefetch related
'format': {
'type': 'string',
'model': 'accession.accessionpanel'
},
'available_operators': ['isnull', 'notnull', 'eq', 'neq', 'icontains']
},
'accessions_amount': {
'label': _('Accessions amount'),
'field': 'accessions_amount',
'query': False,
'format': {
'type': 'int',
'model': 'accession.accessionpanel'
},
'available_operators': ['isnull', 'notnull', 'eq', 'neq', 'gte', 'lte']
}
}
class Meta:
verbose_name = _("accession panel")
permissions = (
("get_accessionpanel", "Can get a accession panel"),
("list_accessionpanel", "Can list accession panels"),
)
class StorageLocation(models.Model):
"""
Defines storage locations of batches.
@tddo why not an entity ?
"""
# default name validator
NAME_VALIDATOR = {"type": "string", "minLength": 3, "maxLength": 32, "pattern": "^[a-zA-Z0-9\-\_]+$"}
# label validator
LABEL_VALIDATOR = {"type": "string", "minLength": 1, "maxLength": 128, "pattern": r"^[^\s]+(\s+[^\s]+)*$"}
# unique name of the panel
name = models.CharField(unique=True, max_length=255, db_index=True)
# Customisable label of the action.
# It is i18nized using a JSON dict with language code as key and label as string value.
label = JSONField(default={})
# Parent location
parent = models.ForeignKey('self', blank=True, null=True, related_name='children', on_delete=models.PROTECT)
class Meta:
verbose_name = _("storage location")
permissions = (
("get_storagelocation", "Can get a storage location"),
("list_storagelocation", "Can list storage locations"),
)
def get_label(self):
"""
Get the label for this storage location in the current regional.
"""
lang = translation.get_language()
return self.label.get(lang, "")
def set_label(self, lang, label):
"""
Set the label for a specific language.
:param str lang: language code string
:param str label: Localized label
:note Model instance save() is not called.
"""
self.label[lang] = label
def natural_name(self):
return self.name
@classmethod
def make_search_by_name(cls, term):
return Q(name__istartswith=term)
def audit_create(self, user):
return {
'name': self.name,
# 'layout': self.layout_id,
# 'descriptors': self.descriptors,
# 'comments': self.comments
}
def audit_update(self, user):
if hasattr(self, 'updated_fields'):
result = {'updated_fields': self.updated_fields}
if 'name' in self.updated_fields:
result['name'] = self.name
# if 'descriptors' in self.updated_fields:
# if hasattr(self, 'updated_descriptors'):
# result['descriptors'] = self.updated_descriptors
# else:
# result['descriptors'] = self.descriptors
#
# if 'comments' in self.updated_fields:
# result['comments'] = self.comments
return result
else:
return {
'name': self.name,
# 'descriptors': self.descriptors,
# 'comments': self.comments
}
def audit_delete(self, user):
return {
'name': self.name
}<|fim▁end|> | |
<|file_name|>WSQueryImpl.java<|end_file_name|><|fim▁begin|><|fim▁hole|> * Copyright SAIC
*
* Distributed under the OSI-approved BSD 3-Clause License.
* See http://ncip.github.com/cacore-sdk-pre411/LICENSE.txt for details.
*/
package gov.nih.nci.system.webservice;
import gov.nih.nci.system.applicationservice.ApplicationService;
import gov.nih.nci.system.client.proxy.ListProxy;
import gov.nih.nci.system.query.hibernate.HQLCriteria;
import gov.nih.nci.system.query.nestedcriteria.NestedCriteriaPath;
import gov.nih.nci.system.util.ClassCache;
import gov.nih.nci.system.webservice.util.WSUtils;
import java.util.ArrayList;
import java.util.List;
import java.util.Properties;
import java.util.StringTokenizer;
import javax.xml.rpc.ServiceException;
import org.apache.log4j.Logger;
import org.springframework.remoting.jaxrpc.ServletEndpointSupport;
public class WSQueryImpl extends ServletEndpointSupport implements WSQuery{
private static Logger log = Logger.getLogger(WSQueryImpl.class);
private static ApplicationService applicationService;
private static ClassCache classCache;
private static int resultCountPerQuery = 1000;
public void destroy() {
applicationService = null;
classCache = null;
resultCountPerQuery = 0;
}
protected void onInit() throws ServiceException {
classCache = (ClassCache)getWebApplicationContext().getBean("ClassCache");
applicationService = (ApplicationService)getWebApplicationContext().getBean("ApplicationServiceImpl");
Properties systemProperties = (Properties) getWebApplicationContext().getBean("SystemProperties");
try {
String count = systemProperties.getProperty("resultCountPerQuery");
log.debug("resultCountPerQuery: " + count);
if (count != null) {
resultCountPerQuery = Integer.parseInt(count);
}
} catch (Exception ex) {
log.error("Exception initializing resultCountPerQuery: ", ex);
throw new ServiceException("Exception initializing resultCountPerQuery: ", ex);
}
}
public int getTotalNumberOfRecords(String targetClassName, Object criteria) throws Exception{
return getNestedCriteriaResultSet(targetClassName, criteria, 0).size();
}
public List queryObject(String targetClassName, Object criteria) throws Exception
{
return query(targetClassName,criteria,0);
}
public List query(String targetClassName, Object criteria, int startIndex) throws Exception
{
List results = new ArrayList();
results = getNestedCriteriaResultSet(targetClassName, criteria, startIndex);
List alteredResults = alterResultSet(results);
return alteredResults;
}
private List getNestedCriteriaResultSet(String targetClassName, Object searchCriteria, int startIndex) throws Exception{
List results = new ArrayList();
String searchClassName = getSearchClassName(targetClassName);
try
{
if(searchClassName != null && searchCriteria != null){
List<Object> paramList = new ArrayList<Object>();
paramList.add(searchCriteria);
NestedCriteriaPath pathCriteria = new NestedCriteriaPath(targetClassName,paramList);
results = applicationService.query(pathCriteria, startIndex, targetClassName);
}
else{
throw new Exception("Invalid arguments passed over to the server");
}
}
catch(Exception e)
{
log.error("WSQuery caught an exception: ", e);
throw e;
}
return results;
}
public List getAssociation(Object source, String associationName, int startIndex) throws Exception
{
List results = new ArrayList();
String targetClassName = source.getClass().getName();
log.debug("targetClassName: " + targetClassName);
String hql = "select obj."+associationName+" from "+targetClassName+" obj where obj = ?";
log.debug("hql: " + hql);
List<Object> params = new ArrayList<Object>();
params.add(source);
HQLCriteria criteria = new HQLCriteria(hql,params);
results = getHQLResultSet(targetClassName, criteria, startIndex);
List alteredResults = alterResultSet(results);
return alteredResults;
}
private List getHQLResultSet(String targetClassName, Object searchCriteria, int startIndex) throws Exception{
List results = new ArrayList();
String searchClassName = getSearchClassName(targetClassName);
try
{
if(searchClassName != null && searchCriteria != null){
results = applicationService.query(searchCriteria, startIndex, targetClassName);
}
else{
throw new Exception("Invalid arguments passed over to the server");
}
}
catch(Exception e)
{
log.error("WSQuery caught an exception: ", e);
throw e;
}
return results;
}
private String getSearchClassName(String targetClassName)throws Exception {
String searchClassName = "";
if(targetClassName.indexOf(",")>0){
StringTokenizer st = new StringTokenizer(targetClassName, ",");
while(st.hasMoreTokens()){
String className = st.nextToken();
String validClassName = classCache.getQualifiedClassName(className);
log.debug("validClassName: " + validClassName);
searchClassName += validClassName + ",";
}
searchClassName = searchClassName.substring(0,searchClassName.lastIndexOf(","));
} else{
searchClassName = classCache.getQualifiedClassName(targetClassName);
}
if(searchClassName == null){
throw new Exception("Invalid class name: " + targetClassName);
}
return searchClassName;
}
private List alterResultSet(List results) {
List objList;
if (results instanceof ListProxy)
{
ListProxy listProxy = (ListProxy)results;
objList = listProxy.getListChunk();
}
else
{
objList = results;
}
WSUtils util = new WSUtils();
objList = (List)util.convertToProxy(null, objList);
return objList;
}
}<|fim▁end|> | /*L
* Copyright Ekagra Software Technologies Ltd. |
<|file_name|>grammar.rs<|end_file_name|><|fim▁begin|>use crate::parser::Parser;
use crate::syntax::SyntaxKind;
pub(crate) mod atom;
pub(crate) mod block;
pub(crate) mod container;
pub(crate) mod error_recovery;
pub(crate) mod expr;
pub(crate) mod items;
pub(crate) mod macros;
pub(crate) mod stmt;
pub(crate) mod var;
<|fim▁hole|>}<|fim▁end|> | pub(super) fn root(p: &mut Parser) {
let m = p.mark();
self::block::parse_block(p, false);
m.complete(p, SyntaxKind::NODE_SOURCE_FILE); |
<|file_name|>tree.rs<|end_file_name|><|fim▁begin|><|fim▁hole|>use cargo::ops::tree::{self, EdgeKind};
use cargo::ops::Packages;
use cargo::util::print_available_packages;
use cargo::util::CargoResult;
use std::collections::HashSet;
use std::str::FromStr;
pub fn cli() -> App {
subcommand("tree")
.about("Display a tree visualization of a dependency graph")
.arg(opt("quiet", "Suppress status messages").short("q"))
.arg_manifest_path()
.arg_package_spec_no_all(
"Package to be used as the root of the tree",
"Display the tree for all packages in the workspace",
"Exclude specific workspace members",
)
.arg(Arg::with_name("all").long("all").short("a").hidden(true))
.arg(
Arg::with_name("all-targets")
.long("all-targets")
.hidden(true),
)
.arg_features()
.arg_target_triple(
"Filter dependencies matching the given target-triple (default host platform). \
Pass `all` to include all targets.",
)
.arg(
Arg::with_name("no-dev-dependencies")
.long("no-dev-dependencies")
.hidden(true),
)
.arg(
multi_opt(
"edges",
"KINDS",
"The kinds of dependencies to display \
(features, normal, build, dev, all, no-dev, no-build, no-normal)",
)
.short("e"),
)
.arg(
optional_multi_opt(
"invert",
"SPEC",
"Invert the tree direction and focus on the given package",
)
.short("i"),
)
.arg(Arg::with_name("no-indent").long("no-indent").hidden(true))
.arg(
Arg::with_name("prefix-depth")
.long("prefix-depth")
.hidden(true),
)
.arg(
opt(
"prefix",
"Change the prefix (indentation) of how each entry is displayed",
)
.value_name("PREFIX")
.possible_values(&["depth", "indent", "none"])
.default_value("indent"),
)
.arg(opt(
"no-dedupe",
"Do not de-duplicate (repeats all shared dependencies)",
))
.arg(
opt(
"duplicates",
"Show only dependencies which come in multiple versions (implies -i)",
)
.short("d")
.alias("duplicate"),
)
.arg(
opt("charset", "Character set to use in output: utf8, ascii")
.value_name("CHARSET")
.possible_values(&["utf8", "ascii"])
.default_value("utf8"),
)
.arg(
opt("format", "Format string used for printing dependencies")
.value_name("FORMAT")
.short("f")
.default_value("{p}"),
)
.arg(
// Backwards compatibility with old cargo-tree.
Arg::with_name("version")
.long("version")
.short("V")
.hidden(true),
)
.after_help("Run `cargo help tree` for more detailed information.\n")
}
pub fn exec(config: &mut Config, args: &ArgMatches<'_>) -> CliResult {
if args.is_present("version") {
let verbose = args.occurrences_of("verbose") > 0;
let version = cli::get_version_string(verbose);
cargo::drop_print!(config, "{}", version);
return Ok(());
}
let prefix = if args.is_present("no-indent") {
config
.shell()
.warn("the --no-indent flag has been changed to --prefix=none")?;
"none"
} else if args.is_present("prefix-depth") {
config
.shell()
.warn("the --prefix-depth flag has been changed to --prefix=depth")?;
"depth"
} else {
args.value_of("prefix").unwrap()
};
let prefix = tree::Prefix::from_str(prefix).map_err(|e| anyhow::anyhow!("{}", e))?;
let no_dedupe = args.is_present("no-dedupe") || args.is_present("all");
if args.is_present("all") {
config.shell().warn(
"The `cargo tree` --all flag has been changed to --no-dedupe, \
and may be removed in a future version.\n\
If you are looking to display all workspace members, use the --workspace flag.",
)?;
}
let targets = if args.is_present("all-targets") {
config
.shell()
.warn("the --all-targets flag has been changed to --target=all")?;
vec!["all".to_string()]
} else {
args._values_of("target")
};
let target = tree::Target::from_cli(targets);
let edge_kinds = parse_edge_kinds(config, args)?;
let graph_features = edge_kinds.contains(&EdgeKind::Feature);
let packages = args.packages_from_flags()?;
let mut invert = args
.values_of("invert")
.map_or_else(|| Vec::new(), |is| is.map(|s| s.to_string()).collect());
if args.is_present_with_zero_values("invert") {
match &packages {
Packages::Packages(ps) => {
// Backwards compatibility with old syntax of `cargo tree -i -p foo`.
invert.extend(ps.clone());
}
_ => {
return Err(format_err!(
"The `-i` flag requires a package name.\n\
\n\
The `-i` flag is used to inspect the reverse dependencies of a specific\n\
package. It will invert the tree and display the packages that depend on the\n\
given package.\n\
\n\
Note that in a workspace, by default it will only display the package's\n\
reverse dependencies inside the tree of the workspace member in the current\n\
directory. The --workspace flag can be used to extend it so that it will show\n\
the package's reverse dependencies across the entire workspace. The -p flag\n\
can be used to display the package's reverse dependencies only with the\n\
subtree of the package given to -p.\n\
"
)
.into());
}
}
}
let ws = args.workspace(config)?;
if args.is_present_with_zero_values("package") {
print_available_packages(&ws)?;
}
let charset = tree::Charset::from_str(args.value_of("charset").unwrap())
.map_err(|e| anyhow::anyhow!("{}", e))?;
let opts = tree::TreeOptions {
features: values(args, "features"),
all_features: args.is_present("all-features"),
no_default_features: args.is_present("no-default-features"),
packages,
target,
edge_kinds,
invert,
prefix,
no_dedupe,
duplicates: args.is_present("duplicates"),
charset,
format: args.value_of("format").unwrap().to_string(),
graph_features,
};
tree::build_and_print(&ws, &opts)?;
Ok(())
}
fn parse_edge_kinds(config: &Config, args: &ArgMatches<'_>) -> CargoResult<HashSet<EdgeKind>> {
let mut kinds: Vec<&str> = args
.values_of("edges")
.map_or_else(|| Vec::new(), |es| es.flat_map(|e| e.split(',')).collect());
if args.is_present("no-dev-dependencies") {
config
.shell()
.warn("the --no-dev-dependencies flag has changed to -e=no-dev")?;
kinds.push("no-dev");
}
if kinds.is_empty() {
kinds.extend(&["normal", "build", "dev"]);
}
let mut result = HashSet::new();
let insert_defaults = |result: &mut HashSet<EdgeKind>| {
result.insert(EdgeKind::Dep(DepKind::Normal));
result.insert(EdgeKind::Dep(DepKind::Build));
result.insert(EdgeKind::Dep(DepKind::Development));
};
let unknown = |k| {
bail!(
"unknown edge kind `{}`, valid values are \
\"normal\", \"build\", \"dev\", \
\"no-normal\", \"no-build\", \"no-dev\", \
\"features\", or \"all\"",
k
)
};
if kinds.iter().any(|k| k.starts_with("no-")) {
insert_defaults(&mut result);
for kind in &kinds {
match *kind {
"no-normal" => result.remove(&EdgeKind::Dep(DepKind::Normal)),
"no-build" => result.remove(&EdgeKind::Dep(DepKind::Build)),
"no-dev" => result.remove(&EdgeKind::Dep(DepKind::Development)),
"features" => result.insert(EdgeKind::Feature),
"normal" | "build" | "dev" | "all" => {
bail!("`no-` dependency kinds cannot be mixed with other dependency kinds")
}
k => return unknown(k),
};
}
return Ok(result);
}
for kind in &kinds {
match *kind {
"all" => {
insert_defaults(&mut result);
result.insert(EdgeKind::Feature);
}
"features" => {
result.insert(EdgeKind::Feature);
}
"normal" => {
result.insert(EdgeKind::Dep(DepKind::Normal));
}
"build" => {
result.insert(EdgeKind::Dep(DepKind::Build));
}
"dev" => {
result.insert(EdgeKind::Dep(DepKind::Development));
}
k => return unknown(k),
}
}
if kinds.len() == 1 && kinds[0] == "features" {
insert_defaults(&mut result);
}
Ok(result)
}<|fim▁end|> | use crate::cli;
use crate::command_prelude::*;
use anyhow::{bail, format_err};
use cargo::core::dependency::DepKind; |
<|file_name|>htmlstyleelement.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use cssparser::Parser as CssParser;
use dom::bindings::cell::DOMRefCell;
use dom::bindings::codegen::Bindings::HTMLStyleElementBinding;
use dom::bindings::codegen::Bindings::HTMLStyleElementBinding::HTMLStyleElementMethods;
use dom::bindings::codegen::Bindings::NodeBinding::NodeMethods;
use dom::bindings::inheritance::Castable;
use dom::bindings::js::{JS, MutNullableHeap, Root};
use dom::bindings::str::DOMString;
use dom::cssstylesheet::CSSStyleSheet;
use dom::document::Document;
use dom::element::Element;
use dom::htmlelement::HTMLElement;
use dom::node::{ChildrenMutation, Node, document_from_node, window_from_node};
use dom::stylesheet::StyleSheet as DOMStyleSheet;
use dom::virtualmethods::VirtualMethods;
use html5ever_atoms::LocalName;
use script_layout_interface::message::Msg;
use std::sync::Arc;
use style::media_queries::parse_media_query_list;
use style::parser::ParserContextExtraData;
use style::stylesheets::{Stylesheet, Origin};
#[dom_struct]
pub struct HTMLStyleElement {
htmlelement: HTMLElement,
#[ignore_heap_size_of = "Arc"]
stylesheet: DOMRefCell<Option<Arc<Stylesheet>>>,
cssom_stylesheet: MutNullableHeap<JS<CSSStyleSheet>>,
}
impl HTMLStyleElement {
fn new_inherited(local_name: LocalName,
prefix: Option<DOMString>,
document: &Document) -> HTMLStyleElement {
HTMLStyleElement {
htmlelement: HTMLElement::new_inherited(local_name, prefix, document),
stylesheet: DOMRefCell::new(None),
cssom_stylesheet: MutNullableHeap::new(None),
}
}
#[allow(unrooted_must_root)]
pub fn new(local_name: LocalName,
prefix: Option<DOMString>,
document: &Document) -> Root<HTMLStyleElement> {
Node::reflect_node(box HTMLStyleElement::new_inherited(local_name, prefix, document),
document,
HTMLStyleElementBinding::Wrap)
}
pub fn parse_own_css(&self) {
let node = self.upcast::<Node>();
let element = self.upcast::<Element>();
assert!(node.is_in_doc());
let win = window_from_node(node);
let url = win.get_url();
let mq_attribute = element.get_attribute(&ns!(), &local_name!("media"));
let mq_str = match mq_attribute {
Some(a) => String::from(&**a.value()),
None => String::new(),
};
let data = node.GetTextContent().expect("Element.textContent must be a string");
let mq = parse_media_query_list(&mut CssParser::new(&mq_str));
let sheet = Stylesheet::from_str(&data, url, Origin::Author, mq, win.css_error_reporter(),
ParserContextExtraData::default());
let sheet = Arc::new(sheet);
win.layout_chan().send(Msg::AddStylesheet(sheet.clone())).unwrap();
*self.stylesheet.borrow_mut() = Some(sheet);
let doc = document_from_node(self);
doc.invalidate_stylesheets();
}
pub fn get_stylesheet(&self) -> Option<Arc<Stylesheet>> {
self.stylesheet.borrow().clone()
}
pub fn get_cssom_stylesheet(&self) -> Option<Root<CSSStyleSheet>> {
self.get_stylesheet().map(|sheet| {
self.cssom_stylesheet.or_init(|| {
CSSStyleSheet::new(&window_from_node(self),
"text/css".into(),
None, // todo handle location
None, // todo handle title
sheet)<|fim▁hole|> })
})
}
}
impl VirtualMethods for HTMLStyleElement {
fn super_type(&self) -> Option<&VirtualMethods> {
Some(self.upcast::<HTMLElement>() as &VirtualMethods)
}
fn children_changed(&self, mutation: &ChildrenMutation) {
if let Some(ref s) = self.super_type() {
s.children_changed(mutation);
}
if self.upcast::<Node>().is_in_doc() {
self.parse_own_css();
}
}
fn bind_to_tree(&self, tree_in_doc: bool) {
if let Some(ref s) = self.super_type() {
s.bind_to_tree(tree_in_doc);
}
if tree_in_doc {
self.parse_own_css();
}
}
}
impl HTMLStyleElementMethods for HTMLStyleElement {
// https://drafts.csswg.org/cssom/#dom-linkstyle-sheet
fn GetSheet(&self) -> Option<Root<DOMStyleSheet>> {
self.get_cssom_stylesheet().map(Root::upcast)
}
}<|fim▁end|> | |
<|file_name|>category_tags.py<|end_file_name|><|fim▁begin|>from django import template
from django.shortcuts import render_to_response, redirect, get_object_or_404
# from product.models import Slide
register = template.Library()
# @register.inclusion_tag('slides/slides.html')
# def get_main_slides():
# slides = Slide.objects.filter(published_main=1).order_by('ordering')
# return {'slides': slides}
# @register.inclusion_tag('comments/comments.html')
# def comments(paket, item_model, item_id):
# from comments.models import Comments
# nodes = Comments.objects.filter(paket=paket, item_model=item_model,item_id=item_id, published=1)
# return {'nodes':nodes, 'paket':paket, 'item_model':item_model, 'item_id':item_id}
<|fim▁hole|># @register.filter(name='suit_conf')
# def suit_conf(name):
# value = get_config(name)
# return mark_safe(value) if isinstance(value, str) else value
# @register.tag
# def suit_date(parser, token):
# return NowNode(get_config('HEADER_DATE_FORMAT'))
# @register.tag
# def suit_time(parser, token):
# return NowNode(get_config('HEADER_TIME_FORMAT'))
# @register.filter
# def field_contents_foreign_linked(admin_field):
# """Return the .contents attribute of the admin_field, and if it
# is a foreign key, wrap it in a link to the admin page for that
# object.
# Use by replacing '{{ field.contents }}' in an admin template (e.g.
# fieldset.html) with '{{ field|field_contents_foreign_linked }}'.
# """
# fieldname = admin_field.field['field']
# displayed = admin_field.contents()
# obj = admin_field.form.instance
# if not hasattr(admin_field.model_admin,
# 'linked_readonly_fields') or fieldname not in admin_field \
# .model_admin \
# .linked_readonly_fields:
# return displayed
# try:
# fieldtype, attr, value = lookup_field(fieldname, obj,
# admin_field.model_admin)
# except ObjectDoesNotExist:
# fieldtype = None
# if isinstance(fieldtype, ForeignKey):
# try:
# url = admin_url(value)
# except NoReverseMatch:
# url = None
# if url:
# displayed = "<a href='%s'>%s</a>" % (url, displayed)
# return mark_safe(displayed)
# @register.filter
# def admin_url(obj):
# info = (obj._meta.app_label, obj._meta.module_name)
# return reverse("admin:%s_%s_change" % info, args=[obj.pk])
# @register.simple_tag
# def suit_bc(*args):
# return utils.value_by_version(args)
# @register.assignment_tag
# def suit_bc_value(*args):
# return utils.value_by_version(args)<|fim▁end|> | |
<|file_name|>buffer.rs<|end_file_name|><|fim▁begin|>use buffer::{BufferView, BufferViewAny, BufferType, BufferCreationError};
use uniforms::{AsUniformValue, UniformValue, UniformBlock, UniformType};
use std::ops::{Deref, DerefMut};
use backend::Facade;
/// Buffer that contains a uniform block.
#[derive(Debug)]
pub struct UniformBuffer<T> where T: Copy {
buffer: BufferView<T>,
}
/// Same as `UniformBuffer` but doesn't contain any information about the type.
#[derive(Debug)]
pub struct TypelessUniformBuffer {
buffer: BufferViewAny,
}
impl<T> UniformBuffer<T> where T: Copy {
/// Uploads data in the uniforms buffer.
///
/// # Features
///
/// Only available if the `gl_uniform_blocks` feature is enabled.
#[cfg(feature = "gl_uniform_blocks")]
pub fn new<F>(facade: &F, data: T) -> UniformBuffer<T> where F: Facade {
UniformBuffer::new_if_supported(facade, data).unwrap()
}
/// Uploads data in the uniforms buffer.
pub fn new_if_supported<F>(facade: &F, data: T) -> Option<UniformBuffer<T>> where F: Facade {
let buffer = match BufferView::new(facade, &data, BufferType::UniformBuffer, true) {
Ok(b) => b,
Err(BufferCreationError::BufferTypeNotSupported) => return None,
e @ Err(_) => e.unwrap(),
};
Some(UniformBuffer {
buffer: buffer,
})
}
/// Creates an empty buffer.
///
/// # Features
///
/// Only available if the `gl_uniform_blocks` feature is enabled.
#[cfg(feature = "gl_uniform_blocks")]
pub fn empty<F>(facade: &F) -> UniformBuffer<T> where F: Facade {
UniformBuffer::empty_if_supported(facade).unwrap()
}
/// Creates an empty buffer.
pub fn empty_if_supported<F>(facade: &F) -> Option<UniformBuffer<T>> where F: Facade {
let buffer = match BufferView::empty(facade, BufferType::UniformBuffer, true) {
Ok(b) => b,
Err(BufferCreationError::BufferTypeNotSupported) => return None,
e @ Err(_) => e.unwrap(),
};
Some(UniformBuffer {
buffer: buffer,<|fim▁hole|>}
impl<T> Deref for UniformBuffer<T> where T: Copy {
type Target = BufferView<T>;
fn deref(&self) -> &BufferView<T> {
&self.buffer
}
}
impl<T> DerefMut for UniformBuffer<T> where T: Copy {
fn deref_mut(&mut self) -> &mut BufferView<T> {
&mut self.buffer
}
}
impl<'a, T> AsUniformValue for &'a UniformBuffer<T> where T: UniformBlock + Copy {
fn as_uniform_value(&self) -> UniformValue {
UniformValue::Block(self.buffer.as_slice_any(), <T as UniformBlock>::matches)
}
fn matches(_: &UniformType) -> bool {
false
}
}<|fim▁end|> | })
} |
<|file_name|>index.js<|end_file_name|><|fim▁begin|>var DB = require('./lib/db.js');
function SQLContext(options) {
this.readOnly = options.isReadOnly;
this.db = options.db;
}
function _put(db, key, value, callback) {
db.createOrUpdate(key, value, function(err) {
if(err) {
return callback(err);
}
callback();
});
}
SQLContext.prototype.putObject = function(key, value, callback) {
if(this.readOnly) {
return callback(new Error('write operation on read-only context.'));
}
var json = JSON.stringify(value);
var buf = new Buffer(json, 'utf8');
_put(this.db, key, buf, callback);
};
SQLContext.prototype.putBuffer = function(key, value, callback) {
if(this.readOnly) {
return callback(new Error('write operation on read-only context.'));
}
_put(this.db, key, value, callback);
};
SQLContext.prototype.delete = function (key, callback) {
if(this.readOnly) {
return callback(new Error('write operation on read-only context.'));
}
this.db.remove(key, function(err) {
if(err) {
return callback(err);
}
callback();
});
};
SQLContext.prototype.clear = function (callback) {
if(this.readOnly) {
return callback(new Error('write operation on read-only context.'));
}
this.db.clearAll(callback);
};
function _get(db, key, callback) {
db.find(key, callback);
}
SQLContext.prototype.getObject = function(key, callback) {
_get(this.db, key, function(err, data) {
if(err) {
return callback(err);
}<|fim▁hole|> if(data) {
try {
data = JSON.parse(data.toString('utf8'));
} catch(e) {
return callback(e);
}
}
callback(null, data);
});
};
SQLContext.prototype.getBuffer = function(key, callback) {
_get(this.db, key, callback);
};
function SQLProvider(options) {
this.options = options || {};
this.user = options.user;
}
SQLProvider.isSupported = function() {
return (typeof module !== 'undefined' && module.exports);
};
SQLProvider.prototype.open = function(callback) {
if(!this.user) {
return callback(new Error('missing user'));
}
this.db = new DB(this.options, function(err) {
if (err) {
return callback(err);
}
callback();
});
};
SQLProvider.prototype.getReadOnlyContext = function() {
return new SQLContext({isReadOnly: true, db: this.db});
};
SQLProvider.prototype.getReadWriteContext = function() {
return new SQLContext({isReadOnly: false, db: this.db});
};
// Forward db type constants
SQLProvider.MYSQL = DB.MYSQL;
SQLProvider.SQLITE = DB.SQLITE;
SQLProvider.POSTGRES = DB.POSTGRES;
SQLProvider.MARIADB = DB.MARIADB;
module.exports = SQLProvider;<|fim▁end|> | |
<|file_name|>axis_demux_wrap.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
"""
Generates an AXI Stream demux wrapper with the specified number of ports
"""
import argparse
from jinja2 import Template
def main():
parser = argparse.ArgumentParser(description=__doc__.strip())
parser.add_argument('-p', '--ports', type=int, default=4, help="number of ports")
parser.add_argument('-n', '--name', type=str, help="module name")
parser.add_argument('-o', '--output', type=str, help="output file name")
args = parser.parse_args()
try:
generate(**args.__dict__)
except IOError as ex:
print(ex)
exit(1)
def generate(ports=4, name=None, output=None):
n = ports
if name is None:
name = "axis_demux_wrap_{0}".format(n)
if output is None:
output = name + ".v"
print("Generating {0} port AXI stream demux wrapper {1}...".format(n, name))
cn = (n-1).bit_length()
t = Template(u"""/*
Copyright (c) 2018-2021 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
// Language: Verilog 2001
`timescale 1ns / 1ps
/*
* AXI4-Stream {{n}} port demux (wrapper)
*/
module {{name}} #
(
// Width of AXI stream interfaces in bits
parameter DATA_WIDTH = 8,
// Propagate tkeep signal
parameter KEEP_ENABLE = (DATA_WIDTH>8),
// tkeep signal width (words per cycle)
parameter KEEP_WIDTH = (DATA_WIDTH/8),
// Propagate tid signal
parameter ID_ENABLE = 0,
// tid signal width<|fim▁hole|> parameter ID_WIDTH = 8,
// Propagate tdest signal
parameter DEST_ENABLE = 0,
// tdest signal width
parameter DEST_WIDTH = 8,
// Propagate tuser signal
parameter USER_ENABLE = 1,
// tuser signal width
parameter USER_WIDTH = 1
)
(
input wire clk,
input wire rst,
/*
* AXI Stream input
*/
input wire [DATA_WIDTH-1:0] s_axis_tdata,
input wire [KEEP_WIDTH-1:0] s_axis_tkeep,
input wire s_axis_tvalid,
output wire s_axis_tready,
input wire s_axis_tlast,
input wire [ID_WIDTH-1:0] s_axis_tid,
input wire [DEST_WIDTH-1:0] s_axis_tdest,
input wire [USER_WIDTH-1:0] s_axis_tuser,
/*
* AXI Stream outputs
*/
{%- for p in range(n) %}
output wire [DATA_WIDTH-1:0] m{{'%02d'%p}}_axis_tdata,
output wire [KEEP_WIDTH-1:0] m{{'%02d'%p}}_axis_tkeep,
output wire m{{'%02d'%p}}_axis_tvalid,
input wire m{{'%02d'%p}}_axis_tready,
output wire m{{'%02d'%p}}_axis_tlast,
output wire [ID_WIDTH-1:0] m{{'%02d'%p}}_axis_tid,
output wire [DEST_WIDTH-1:0] m{{'%02d'%p}}_axis_tdest,
output wire [USER_WIDTH-1:0] m{{'%02d'%p}}_axis_tuser,
{% endfor -%}
/*
* Control
*/
input wire enable,
input wire drop,
input wire [{{cn-1}}:0] select
);
axis_demux #(
.M_COUNT({{n}}),
.DATA_WIDTH(DATA_WIDTH),
.KEEP_ENABLE(KEEP_ENABLE),
.KEEP_WIDTH(KEEP_WIDTH),
.ID_ENABLE(ID_ENABLE),
.ID_WIDTH(ID_WIDTH),
.DEST_ENABLE(DEST_ENABLE),
.DEST_WIDTH(DEST_WIDTH),
.USER_ENABLE(USER_ENABLE),
.USER_WIDTH(USER_WIDTH)
)
axis_demux_inst (
.clk(clk),
.rst(rst),
// AXI inputs
.s_axis_tdata(s_axis_tdata),
.s_axis_tkeep(s_axis_tkeep),
.s_axis_tvalid(s_axis_tvalid),
.s_axis_tready(s_axis_tready),
.s_axis_tlast(s_axis_tlast),
.s_axis_tid(s_axis_tid),
.s_axis_tdest(s_axis_tdest),
.s_axis_tuser(s_axis_tuser),
// AXI output
.m_axis_tdata({ {% for p in range(n-1,-1,-1) %}m{{'%02d'%p}}_axis_tdata{% if not loop.last %}, {% endif %}{% endfor %} }),
.m_axis_tkeep({ {% for p in range(n-1,-1,-1) %}m{{'%02d'%p}}_axis_tkeep{% if not loop.last %}, {% endif %}{% endfor %} }),
.m_axis_tvalid({ {% for p in range(n-1,-1,-1) %}m{{'%02d'%p}}_axis_tvalid{% if not loop.last %}, {% endif %}{% endfor %} }),
.m_axis_tready({ {% for p in range(n-1,-1,-1) %}m{{'%02d'%p}}_axis_tready{% if not loop.last %}, {% endif %}{% endfor %} }),
.m_axis_tlast({ {% for p in range(n-1,-1,-1) %}m{{'%02d'%p}}_axis_tlast{% if not loop.last %}, {% endif %}{% endfor %} }),
.m_axis_tid({ {% for p in range(n-1,-1,-1) %}m{{'%02d'%p}}_axis_tid{% if not loop.last %}, {% endif %}{% endfor %} }),
.m_axis_tdest({ {% for p in range(n-1,-1,-1) %}m{{'%02d'%p}}_axis_tdest{% if not loop.last %}, {% endif %}{% endfor %} }),
.m_axis_tuser({ {% for p in range(n-1,-1,-1) %}m{{'%02d'%p}}_axis_tuser{% if not loop.last %}, {% endif %}{% endfor %} }),
// Control
.enable(enable),
.drop(drop),
.select(select)
);
endmodule
""")
print(f"Writing file '{output}'...")
with open(output, 'w') as f:
f.write(t.render(
n=n,
cn=cn,
name=name
))
f.flush()
print("Done")
if __name__ == "__main__":
main()<|fim▁end|> | |
<|file_name|>nmap_scannner.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
'''
Author: Christopher Duffy
Date: February 2015
Name: nmap_scanner.py
Purpose: To scan a network
Copyright (c) 2015, Christopher Duffy All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met: * Redistributions
of source code must retain the above copyright notice, this list of conditions and
the following disclaimer. * Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution. * Neither the
name of the nor the names of its contributors may be used to endorse or promote
products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL CHRISTOPHER DUFFY BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
import sys
try:
import nmap
except:
sys.exit("[!] Install the nmap library: pip install python-nmap")
# Argument Validator
if len(sys.argv) != 3:
sys.exit("Please provide two arguments the first being the targets the second the ports")
ports = str(sys.argv[2])
addrs = str(sys.argv[1])
scanner = nmap.PortScanner()
scanner.scan(addrs, ports)
for host in scanner.all_hosts():
if “” in host:
print("The host's IP address is %s and it's hostname was not found") % (host, scanner[host].hostname())
else:<|fim▁hole|><|fim▁end|> | print("The host's IP address is %s and it's hostname is %s") % (host, scanner[host].hostname()) |
<|file_name|>mod.rs<|end_file_name|><|fim▁begin|>// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*! See doc.rs for a thorough explanation of the borrow checker */
#![allow(non_camel_case_types)]
use middle::dataflow::DataFlowContext;
use middle::dataflow::DataFlowOperator;
use euv = middle::expr_use_visitor;
use mc = middle::mem_categorization;
use middle::ty;
use util::ppaux::{note_and_explain_region, Repr, UserString};
use std::cell::{Cell};
use std::ops::{BitOr, BitAnd};
use std::rc::Rc;
use std::strbuf::StrBuf;
use syntax::ast;
use syntax::ast_map;
use syntax::ast_util;
use syntax::codemap::Span;
use syntax::parse::token;
use syntax::visit;
use syntax::visit::{Visitor, FnKind};
use syntax::ast::{FnDecl, Block, NodeId};
macro_rules! if_ok(
($inp: expr) => (
match $inp {
Ok(v) => { v }
Err(e) => { return Err(e); }
}
)
)
pub mod doc;
pub mod check_loans;
pub mod gather_loans;
pub mod move_data;
#[deriving(Clone)]
pub struct LoanDataFlowOperator;
pub type LoanDataFlow<'a> = DataFlowContext<'a, LoanDataFlowOperator>;
impl<'a> Visitor<()> for BorrowckCtxt<'a> {
fn visit_fn(&mut self, fk: &FnKind, fd: &FnDecl,
b: &Block, s: Span, n: NodeId, _: ()) {
borrowck_fn(self, fk, fd, b, s, n);
}
fn visit_item(&mut self, item: &ast::Item, _: ()) {
borrowck_item(self, item);
}
}
pub fn check_crate(tcx: &ty::ctxt,
krate: &ast::Crate) {
let mut bccx = BorrowckCtxt {
tcx: tcx,
stats: @BorrowStats {
loaned_paths_same: Cell::new(0),
loaned_paths_imm: Cell::new(0),
stable_paths: Cell::new(0),
guaranteed_paths: Cell::new(0),
}
};
visit::walk_crate(&mut bccx, krate, ());
if tcx.sess.borrowck_stats() {
println!("--- borrowck stats ---");
println!("paths requiring guarantees: {}",
bccx.stats.guaranteed_paths.get());
println!("paths requiring loans : {}",
make_stat(&bccx, bccx.stats.loaned_paths_same.get()));
println!("paths requiring imm loans : {}",
make_stat(&bccx, bccx.stats.loaned_paths_imm.get()));
println!("stable paths : {}",
make_stat(&bccx, bccx.stats.stable_paths.get()));
}
fn make_stat(bccx: &BorrowckCtxt, stat: uint) -> StrBuf {
let stat_f = stat as f64;
let total = bccx.stats.guaranteed_paths.get() as f64;
format_strbuf!("{} ({:.0f}%)", stat , stat_f * 100.0 / total)
}
}
fn borrowck_item(this: &mut BorrowckCtxt, item: &ast::Item) {
// Gather loans for items. Note that we don't need
// to check loans for single expressions. The check
// loan step is intended for things that have a data
// flow dependent conditions.
match item.node {
ast::ItemStatic(_, _, ex) => {
gather_loans::gather_loans_in_static_initializer(this, ex);
}
_ => {
visit::walk_item(this, item, ());
}
}
}
fn borrowck_fn(this: &mut BorrowckCtxt,
fk: &FnKind,
decl: &ast::FnDecl,
body: &ast::Block,
sp: Span,
id: ast::NodeId) {
debug!("borrowck_fn(id={})", id);
// Check the body of fn items.
let id_range = ast_util::compute_id_range_for_fn_body(fk, decl, body, sp, id);
let (all_loans, move_data) =
gather_loans::gather_loans_in_fn(this, decl, body);
let mut loan_dfcx =
DataFlowContext::new(this.tcx,
LoanDataFlowOperator,
id_range,
all_loans.len());
for (loan_idx, loan) in all_loans.iter().enumerate() {
loan_dfcx.add_gen(loan.gen_scope, loan_idx);
loan_dfcx.add_kill(loan.kill_scope, loan_idx);
}
loan_dfcx.propagate(body);
let flowed_moves = move_data::FlowedMoveData::new(move_data,
this.tcx,
id_range,
body);
check_loans::check_loans(this, &loan_dfcx, flowed_moves,
all_loans.as_slice(), body);
visit::walk_fn(this, fk, decl, body, sp, ());
}
// ----------------------------------------------------------------------
// Type definitions
pub struct BorrowckCtxt<'a> {
tcx: &'a ty::ctxt,
// Statistics:
stats: @BorrowStats
}
pub struct BorrowStats {
loaned_paths_same: Cell<uint>,
loaned_paths_imm: Cell<uint>,
stable_paths: Cell<uint>,
guaranteed_paths: Cell<uint>,
}
pub type BckResult<T> = Result<T, BckError>;
#[deriving(Eq)]
pub enum PartialTotal {
Partial, // Loan affects some portion
Total // Loan affects entire path
}
///////////////////////////////////////////////////////////////////////////
// Loans and loan paths
/// Record of a loan that was issued.
pub struct Loan {
index: uint,
loan_path: Rc<LoanPath>,
cmt: mc::cmt,
kind: ty::BorrowKind,
restrictions: Vec<Restriction>,
gen_scope: ast::NodeId,
kill_scope: ast::NodeId,
span: Span,
cause: euv::LoanCause,
}
#[deriving(Eq, TotalEq, Hash)]
pub enum LoanPath {
LpVar(ast::NodeId), // `x` in doc.rs
LpExtend(Rc<LoanPath>, mc::MutabilityCategory, LoanPathElem)
}
#[deriving(Eq, TotalEq, Hash)]
pub enum LoanPathElem {
LpDeref(mc::PointerKind), // `*LV` in doc.rs
LpInterior(mc::InteriorKind) // `LV.f` in doc.rs
}
impl LoanPath {
pub fn node_id(&self) -> ast::NodeId {
match *self {
LpVar(local_id) => local_id,
LpExtend(ref base, _, _) => base.node_id()
}
}
}
pub fn opt_loan_path(cmt: &mc::cmt) -> Option<Rc<LoanPath>> {
//! Computes the `LoanPath` (if any) for a `cmt`.
//! Note that this logic is somewhat duplicated in
//! the method `compute()` found in `gather_loans::restrictions`,
//! which allows it to share common loan path pieces as it
//! traverses the CMT.
match cmt.cat {
mc::cat_rvalue(..) |
mc::cat_static_item |
mc::cat_copied_upvar(mc::CopiedUpvar { onceness: ast::Many, .. }) => {
None
}
mc::cat_local(id) |
mc::cat_arg(id) |
mc::cat_copied_upvar(mc::CopiedUpvar { upvar_id: id, .. }) |
mc::cat_upvar(ty::UpvarId {var_id: id, ..}, _) => {
Some(Rc::new(LpVar(id)))
}
mc::cat_deref(ref cmt_base, _, pk) => {
opt_loan_path(cmt_base).map(|lp| {
Rc::new(LpExtend(lp, cmt.mutbl, LpDeref(pk)))
})
}
mc::cat_interior(ref cmt_base, ik) => {
opt_loan_path(cmt_base).map(|lp| {
Rc::new(LpExtend(lp, cmt.mutbl, LpInterior(ik)))
})
}
mc::cat_downcast(ref cmt_base) |
mc::cat_discr(ref cmt_base, _) => {
opt_loan_path(cmt_base)
}
}
}
///////////////////////////////////////////////////////////////////////////
// Restrictions
//
// Borrowing an lvalue often results in *restrictions* that limit what
// can be done with this lvalue during the scope of the loan:
//
// - `RESTR_MUTATE`: The lvalue may not be modified or `&mut` borrowed.
// - `RESTR_FREEZE`: `&` borrows of the lvalue are forbidden.
//
// In addition, no value which is restricted may be moved. Therefore,
// restrictions are meaningful even if the RestrictionSet is empty,
// because the restriction against moves is implied.
pub struct Restriction {
loan_path: Rc<LoanPath>,
set: RestrictionSet
}
#[deriving(Eq)]
pub struct RestrictionSet {
bits: u32
}
#[allow(dead_code)] // potentially useful
pub static RESTR_EMPTY: RestrictionSet = RestrictionSet {bits: 0b0000};
pub static RESTR_MUTATE: RestrictionSet = RestrictionSet {bits: 0b0001};
pub static RESTR_FREEZE: RestrictionSet = RestrictionSet {bits: 0b0010};
impl RestrictionSet {
pub fn intersects(&self, restr: RestrictionSet) -> bool {
(self.bits & restr.bits) != 0
}
}
impl BitOr<RestrictionSet,RestrictionSet> for RestrictionSet {
fn bitor(&self, rhs: &RestrictionSet) -> RestrictionSet {
RestrictionSet {bits: self.bits | rhs.bits}
}
}
impl BitAnd<RestrictionSet,RestrictionSet> for RestrictionSet {
fn bitand(&self, rhs: &RestrictionSet) -> RestrictionSet {
RestrictionSet {bits: self.bits & rhs.bits}
}
}
impl Repr for RestrictionSet {
fn repr(&self, _tcx: &ty::ctxt) -> StrBuf {
format_strbuf!("RestrictionSet(0x{:x})", self.bits as uint)
}
}
///////////////////////////////////////////////////////////////////////////
// Errors
// Errors that can occur
#[deriving(Eq)]
pub enum bckerr_code {
err_mutbl,
err_out_of_scope(ty::Region, ty::Region), // superscope, subscope
err_borrowed_pointer_too_short(
ty::Region, ty::Region, RestrictionSet), // loan, ptr
}
// Combination of an error code and the categorization of the expression
// that caused it
#[deriving(Eq)]
pub struct BckError {
span: Span,
cause: euv::LoanCause,
cmt: mc::cmt,
code: bckerr_code
}
pub enum AliasableViolationKind {
MutabilityViolation,
BorrowViolation(euv::LoanCause)
}
pub enum MovedValueUseKind {
MovedInUse,
MovedInCapture,
}
///////////////////////////////////////////////////////////////////////////
// Misc
impl<'a> BorrowckCtxt<'a> {
pub fn is_subregion_of(&self, r_sub: ty::Region, r_sup: ty::Region)
-> bool {
self.tcx.region_maps.is_subregion_of(r_sub, r_sup)
}
pub fn is_subscope_of(&self, r_sub: ast::NodeId, r_sup: ast::NodeId)
-> bool {
self.tcx.region_maps.is_subscope_of(r_sub, r_sup)
}
pub fn mc(&self) -> mc::MemCategorizationContext<'a,ty::ctxt> {
mc::MemCategorizationContext::new(self.tcx)
}
pub fn cat_expr(&self, expr: &ast::Expr) -> mc::cmt {
match self.mc().cat_expr(expr) {
Ok(c) => c,
Err(()) => {
self.tcx.sess.span_bug(expr.span, "error in mem categorization");
}
}
}
pub fn cat_expr_unadjusted(&self, expr: &ast::Expr) -> mc::cmt {
match self.mc().cat_expr_unadjusted(expr) {
Ok(c) => c,
Err(()) => {
self.tcx.sess.span_bug(expr.span, "error in mem categorization");
}
}
}
pub fn cat_expr_autoderefd(&self,
expr: &ast::Expr,
adj: &ty::AutoAdjustment)
-> mc::cmt {
let r = match *adj {
ty::AutoAddEnv(..) | ty::AutoObject(..) => {
// no autoderefs
self.mc().cat_expr_unadjusted(expr)
}
ty::AutoDerefRef(
ty::AutoDerefRef {
autoderefs: autoderefs, ..}) => {
self.mc().cat_expr_autoderefd(expr, autoderefs)
}
};
match r {
Ok(c) => c,
Err(()) => {
self.tcx.sess.span_bug(expr.span,
"error in mem categorization");
}
}
}
pub fn cat_def(&self,
id: ast::NodeId,
span: Span,
ty: ty::t,
def: ast::Def)
-> mc::cmt {
match self.mc().cat_def(id, span, ty, def) {
Ok(c) => c,
Err(()) => {
self.tcx.sess.span_bug(span, "error in mem categorization");
}
}
}
pub fn cat_captured_var(&self,<|fim▁hole|> // Create the cmt for the variable being borrowed, from the
// caller's perspective
let var_id = ast_util::def_id_of_def(upvar_def).node;
let var_ty = ty::node_id_to_type(self.tcx, var_id);
self.cat_def(closure_id, closure_span, var_ty, upvar_def)
}
pub fn cat_discr(&self, cmt: mc::cmt, match_id: ast::NodeId) -> mc::cmt {
Rc::new(mc::cmt_ {
cat: mc::cat_discr(cmt.clone(), match_id),
mutbl: cmt.mutbl.inherit(),
..*cmt
})
}
pub fn cat_pattern(&self,
cmt: mc::cmt,
pat: &ast::Pat,
op: |mc::cmt, &ast::Pat|) {
let r = self.mc().cat_pattern(cmt, pat, |_,x,y| op(x,y));
assert!(r.is_ok());
}
pub fn report(&self, err: BckError) {
self.span_err(
err.span,
self.bckerr_to_str(&err).as_slice());
self.note_and_explain_bckerr(err);
}
pub fn report_use_of_moved_value(&self,
use_span: Span,
use_kind: MovedValueUseKind,
lp: &LoanPath,
move: &move_data::Move,
moved_lp: &LoanPath) {
let verb = match use_kind {
MovedInUse => "use",
MovedInCapture => "capture",
};
match move.kind {
move_data::Declared => {
self.tcx.sess.span_err(
use_span,
format!("{} of possibly uninitialized variable: `{}`",
verb,
self.loan_path_to_str(lp)));
}
_ => {
let partially = if lp == moved_lp {""} else {"partially "};
self.tcx.sess.span_err(
use_span,
format!("{} of {}moved value: `{}`",
verb,
partially,
self.loan_path_to_str(lp)));
}
}
match move.kind {
move_data::Declared => {}
move_data::MoveExpr => {
let (expr_ty, expr_span) = match self.tcx.map.find(move.id) {
Some(ast_map::NodeExpr(expr)) => {
(ty::expr_ty_adjusted(self.tcx, expr), expr.span)
}
r => self.tcx.sess.bug(format!("MoveExpr({:?}) maps to {:?}, not Expr",
move.id, r))
};
let suggestion = move_suggestion(self.tcx, expr_ty,
"moved by default (use `copy` to override)");
self.tcx.sess.span_note(
expr_span,
format!("`{}` moved here because it has type `{}`, which is {}",
self.loan_path_to_str(moved_lp),
expr_ty.user_string(self.tcx), suggestion));
}
move_data::MovePat => {
let pat_ty = ty::node_id_to_type(self.tcx, move.id);
self.tcx.sess.span_note(self.tcx.map.span(move.id),
format!("`{}` moved here because it has type `{}`, \
which is moved by default (use `ref` to override)",
self.loan_path_to_str(moved_lp),
pat_ty.user_string(self.tcx)));
}
move_data::Captured => {
let (expr_ty, expr_span) = match self.tcx.map.find(move.id) {
Some(ast_map::NodeExpr(expr)) => {
(ty::expr_ty_adjusted(self.tcx, expr), expr.span)
}
r => self.tcx.sess.bug(format!("Captured({:?}) maps to {:?}, not Expr",
move.id, r))
};
let suggestion = move_suggestion(self.tcx, expr_ty,
"moved by default (make a copy and \
capture that instead to override)");
self.tcx.sess.span_note(
expr_span,
format!("`{}` moved into closure environment here because it \
has type `{}`, which is {}",
self.loan_path_to_str(moved_lp),
expr_ty.user_string(self.tcx), suggestion));
}
}
fn move_suggestion(tcx: &ty::ctxt, ty: ty::t, default_msg: &'static str)
-> &'static str {
match ty::get(ty).sty {
ty::ty_closure(box ty::ClosureTy {
store: ty::RegionTraitStore(..),
..
}) =>
"a non-copyable stack closure (capture it in a new closure, \
e.g. `|x| f(x)`, to override)",
_ if ty::type_moves_by_default(tcx, ty) =>
"non-copyable (perhaps you meant to use clone()?)",
_ => default_msg,
}
}
}
pub fn report_reassigned_immutable_variable(&self,
span: Span,
lp: &LoanPath,
assign:
&move_data::Assignment) {
self.tcx.sess.span_err(
span,
format!("re-assignment of immutable variable `{}`",
self.loan_path_to_str(lp)));
self.tcx.sess.span_note(
assign.span,
format!("prior assignment occurs here"));
}
pub fn span_err(&self, s: Span, m: &str) {
self.tcx.sess.span_err(s, m);
}
pub fn span_note(&self, s: Span, m: &str) {
self.tcx.sess.span_note(s, m);
}
pub fn span_end_note(&self, s: Span, m: &str) {
self.tcx.sess.span_end_note(s, m);
}
pub fn bckerr_to_str(&self, err: &BckError) -> StrBuf {
match err.code {
err_mutbl => {
let descr = match opt_loan_path(&err.cmt) {
None => {
format_strbuf!("{} {}",
err.cmt.mutbl.to_user_str(),
self.cmt_to_str(&*err.cmt))
}
Some(lp) => {
format_strbuf!("{} {} `{}`",
err.cmt.mutbl.to_user_str(),
self.cmt_to_str(&*err.cmt),
self.loan_path_to_str(&*lp))
}
};
match err.cause {
euv::ClosureCapture(_) => {
format_strbuf!("closure cannot assign to {}", descr)
}
euv::OverloadedOperator |
euv::AddrOf |
euv::RefBinding |
euv::AutoRef => {
format_strbuf!("cannot borrow {} as mutable", descr)
}
euv::ClosureInvocation => {
self.tcx.sess.span_bug(err.span,
"err_mutbl with a closure invocation");
}
}
}
err_out_of_scope(..) => {
let msg = match opt_loan_path(&err.cmt) {
None => "borrowed value".to_strbuf(),
Some(lp) => {
format_strbuf!("`{}`", self.loan_path_to_str(&*lp))
}
};
format_strbuf!("{} does not live long enough", msg)
}
err_borrowed_pointer_too_short(..) => {
let descr = match opt_loan_path(&err.cmt) {
Some(lp) => {
format_strbuf!("`{}`", self.loan_path_to_str(&*lp))
}
None => self.cmt_to_str(&*err.cmt),
};
format_strbuf!("lifetime of {} is too short to guarantee \
its contents can be safely reborrowed",
descr)
}
}
}
pub fn report_aliasability_violation(&self,
span: Span,
kind: AliasableViolationKind,
cause: mc::AliasableReason) {
let prefix = match kind {
MutabilityViolation => {
"cannot assign to data"
}
BorrowViolation(euv::ClosureCapture(_)) => {
// I don't think we can get aliasability violations
// with closure captures, so no need to come up with a
// good error message. The reason this cannot happen
// is because we only capture local variables in
// closures, and those are never aliasable.
self.tcx.sess.span_bug(
span,
"aliasability violation with closure");
}
BorrowViolation(euv::OverloadedOperator) |
BorrowViolation(euv::AddrOf) |
BorrowViolation(euv::AutoRef) |
BorrowViolation(euv::RefBinding) => {
"cannot borrow data mutably"
}
BorrowViolation(euv::ClosureInvocation) => {
"closure invocation"
}
};
match cause {
mc::AliasableOther => {
self.tcx.sess.span_err(
span,
format!("{} in an aliasable location",
prefix));
}
mc::AliasableStatic(..) |
mc::AliasableStaticMut(..) => {
self.tcx.sess.span_err(
span,
format!("{} in a static location", prefix));
}
mc::AliasableManaged => {
self.tcx.sess.span_err(
span,
format!("{} in a `@` pointer", prefix));
}
mc::AliasableBorrowed => {
self.tcx.sess.span_err(
span,
format!("{} in a `&` reference", prefix));
}
}
}
pub fn note_and_explain_bckerr(&self, err: BckError) {
let code = err.code;
match code {
err_mutbl(..) => { }
err_out_of_scope(super_scope, sub_scope) => {
note_and_explain_region(
self.tcx,
"reference must be valid for ",
sub_scope,
"...");
note_and_explain_region(
self.tcx,
"...but borrowed value is only valid for ",
super_scope,
"");
}
err_borrowed_pointer_too_short(loan_scope, ptr_scope, _) => {
let descr = match opt_loan_path(&err.cmt) {
Some(lp) => {
format_strbuf!("`{}`", self.loan_path_to_str(&*lp))
}
None => self.cmt_to_str(&*err.cmt),
};
note_and_explain_region(
self.tcx,
format!("{} would have to be valid for ", descr),
loan_scope,
"...");
note_and_explain_region(
self.tcx,
format!("...but {} is only valid for ", descr),
ptr_scope,
"");
}
}
}
pub fn append_loan_path_to_str(&self,
loan_path: &LoanPath,
out: &mut StrBuf) {
match *loan_path {
LpVar(id) => {
out.push_str(ty::local_var_name_str(self.tcx, id).get());
}
LpExtend(ref lp_base, _, LpInterior(mc::InteriorField(fname))) => {
self.append_autoderefd_loan_path_to_str(&**lp_base, out);
match fname {
mc::NamedField(fname) => {
out.push_char('.');
out.push_str(token::get_name(fname).get());
}
mc::PositionalField(idx) => {
out.push_char('#'); // invent a notation here
out.push_str(idx.to_str());
}
}
}
LpExtend(ref lp_base, _, LpInterior(mc::InteriorElement(_))) => {
self.append_autoderefd_loan_path_to_str(&**lp_base, out);
out.push_str("[..]");
}
LpExtend(ref lp_base, _, LpDeref(_)) => {
out.push_char('*');
self.append_loan_path_to_str(&**lp_base, out);
}
}
}
pub fn append_autoderefd_loan_path_to_str(&self,
loan_path: &LoanPath,
out: &mut StrBuf) {
match *loan_path {
LpExtend(ref lp_base, _, LpDeref(_)) => {
// For a path like `(*x).f` or `(*x)[3]`, autoderef
// rules would normally allow users to omit the `*x`.
// So just serialize such paths to `x.f` or x[3]` respectively.
self.append_autoderefd_loan_path_to_str(&**lp_base, out)
}
LpVar(..) | LpExtend(_, _, LpInterior(..)) => {
self.append_loan_path_to_str(loan_path, out)
}
}
}
pub fn loan_path_to_str(&self, loan_path: &LoanPath) -> StrBuf {
let mut result = StrBuf::new();
self.append_loan_path_to_str(loan_path, &mut result);
result
}
pub fn cmt_to_str(&self, cmt: &mc::cmt_) -> StrBuf {
self.mc().cmt_to_str(cmt)
}
}
impl DataFlowOperator for LoanDataFlowOperator {
#[inline]
fn initial_value(&self) -> bool {
false // no loans in scope by default
}
#[inline]
fn join(&self, succ: uint, pred: uint) -> uint {
succ | pred // loans from both preds are in scope
}
}
impl Repr for Loan {
fn repr(&self, tcx: &ty::ctxt) -> StrBuf {
(format!("Loan_{:?}({}, {:?}, {:?}-{:?}, {})",
self.index,
self.loan_path.repr(tcx),
self.kind,
self.gen_scope,
self.kill_scope,
self.restrictions.repr(tcx))).to_strbuf()
}
}
impl Repr for Restriction {
fn repr(&self, tcx: &ty::ctxt) -> StrBuf {
(format!("Restriction({}, {:x})",
self.loan_path.repr(tcx),
self.set.bits as uint)).to_strbuf()
}
}
impl Repr for LoanPath {
fn repr(&self, tcx: &ty::ctxt) -> StrBuf {
match self {
&LpVar(id) => {
(format!("$({})", tcx.map.node_to_str(id))).to_strbuf()
}
&LpExtend(ref lp, _, LpDeref(_)) => {
(format!("{}.*", lp.repr(tcx))).to_strbuf()
}
&LpExtend(ref lp, _, LpInterior(ref interior)) => {
(format!("{}.{}",
lp.repr(tcx),
interior.repr(tcx))).to_strbuf()
}
}
}
}<|fim▁end|> | closure_id: ast::NodeId,
closure_span: Span,
upvar_def: ast::Def)
-> mc::cmt { |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|># Copyright 2001 by Tarjei Mikkelsen. All rights reserved.
# Copyright 2007 by Michiel de Hoon. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Code to work with the KEGG Enzyme database.
Functions:
parse - Returns an iterator giving Record objects.
Classes:
Record -- Holds the information from a KEGG Enzyme record.
"""
from __future__ import print_function
from Bio.KEGG import _write_kegg
from Bio.KEGG import _wrap_kegg
# Set up line wrapping rules (see Bio.KEGG._wrap_kegg)
rxn_wrap = [0, "",
(" + ", "", 1, 1),
(" = ", "", 1, 1),
(" ", "$", 1, 1),
("-", "$", 1, 1)]
name_wrap = [0, "",
(" ", "$", 1, 1),
("-", "$", 1, 1)]
id_wrap = lambda indent: [indent, "", (" ", "", 1, 0)]
struct_wrap = lambda indent: [indent, "", (" ", "", 1, 1)]
class Record(object):
"""Holds info from a KEGG Enzyme record.
Members:
entry The EC number (withou the 'EC ').
name A list of the enzyme names.
classname A list of the classification terms.
sysname The systematic name of the enzyme.
reaction A list of the reaction description strings.
substrate A list of the substrates.
product A list of the products.
inhibitor A list of the inhibitors.
cofactor A list of the cofactors.
effector A list of the effectors.
comment A list of the comment strings.
pathway A list of 3-tuples: (database, id, pathway)
genes A list of 2-tuples: (organism, list of gene ids)
disease A list of 3-tuples: (database, id, disease)
structures A list of 2-tuples: (database, list of struct ids)
dblinks A list of 2-tuples: (database, list of db ids)
"""
def __init__(self):
"""__init___(self)
Create a new Record.
"""
self.entry = ""
self.name = []
self.classname = []
self.sysname = []
self.reaction = []
self.substrate = []
self.product = []
self.inhibitor = []
self.cofactor = []
self.effector = []
self.comment = []
self.pathway = []
self.genes = []
self.disease = []
self.structures = []
self.dblinks = []
def __str__(self):
"""__str__(self)
Returns a string representation of this Record.
"""
return self._entry() + \
self._name() + \
self._classname() + \
self._sysname() + \
self._reaction() + \
self._substrate() + \
self._product() + \
self._inhibitor() + \
self._cofactor() + \
self._effector() + \
self._comment() + \
self._pathway() + \
self._genes() + \
self._disease() + \
self._structures() + \
self._dblinks() + \
"///"
def _entry(self):
return _write_kegg("ENTRY",
["EC " + self.entry])
def _name(self):
return _write_kegg("NAME",
[_wrap_kegg(l, wrap_rule=name_wrap)
for l in self.name])
def _classname(self):
return _write_kegg("CLASS",
self.classname)
def _sysname(self):
return _write_kegg("SYSNAME",
[_wrap_kegg(l, wrap_rule=name_wrap)
for l in self.sysname])
def _reaction(self):
return _write_kegg("REACTION",
[_wrap_kegg(l, wrap_rule=rxn_wrap)
for l in self.reaction])
def _substrate(self):
return _write_kegg("SUBSTRATE",
[_wrap_kegg(l, wrap_rule=name_wrap)
for l in self.substrate])
def _product(self):
return _write_kegg("PRODUCT",
[_wrap_kegg(l, wrap_rule=name_wrap)
for l in self.product])
def _inhibitor(self):
return _write_kegg("INHIBITOR",
[_wrap_kegg(l, wrap_rule=name_wrap)
for l in self.inhibitor])
def _cofactor(self):
return _write_kegg("COFACTOR",
[_wrap_kegg(l, wrap_rule=name_wrap)
for l in self.cofactor])
def _effector(self):
return _write_kegg("EFFECTOR",
[_wrap_kegg(l, wrap_rule=name_wrap)
for l in self.effector])
def _comment(self):
return _write_kegg("COMMENT",
[_wrap_kegg(l, wrap_rule=id_wrap(0))
for l in self.comment])
def _pathway(self):
s = []
for entry in self.pathway:
s.append(entry[0] + ": " + entry[1] + " " + entry[2])
return _write_kegg("PATHWAY",
[_wrap_kegg(l, wrap_rule=id_wrap(16))
for l in s])
def _genes(self):
s = []
for entry in self.genes:
s.append(entry[0] + ": " + " ".join(entry[1]))
return _write_kegg("GENES",
[_wrap_kegg(l, wrap_rule=id_wrap(5))
for l in s])
def _disease(self):
s = []
for entry in self.disease:
s.append(entry[0] + ": " + entry[1] + " " + entry[2])
return _write_kegg("DISEASE",
[_wrap_kegg(l, wrap_rule=id_wrap(13))
for l in s])
def _structures(self):
s = []
for entry in self.structures:
s.append(entry[0] + ": " + " ".join(entry[1]) + " ")
return _write_kegg("STRUCTURES",
[_wrap_kegg(l, wrap_rule=struct_wrap(5))
for l in s])
def _dblinks(self):
# This is a bit of a cheat that won't work if enzyme entries
# have more than one link id per db id. For now, that's not
# the case - storing links ids in a list is only to make
# this class similar to the Compound.Record class.
s = []
for entry in self.dblinks:
s.append(entry[0] + ": " + " ".join(entry[1]))
return _write_kegg("DBLINKS", s)
def parse(handle):
"""Parse a KEGG Enzyme file, returning Record objects.
This is an iterator function, typically used in a for loop. For
example, using one of the example KEGG files in the Biopython
test suite,
>>> with open("KEGG/enzyme.sample") as handle:
... for record in parse(handle):
... print("%s %s" % (record.entry, record.name[0]))
...
1.1.1.1 Alcohol dehydrogenase<|fim▁hole|> 1.1.1.62 Estradiol 17beta-dehydrogenase
1.1.1.68 Transferred to EC 1.7.99.5
1.6.5.3 NADH dehydrogenase (ubiquinone)
1.14.13.28 3,9-Dihydroxypterocarpan 6a-monooxygenase
2.4.1.68 Glycoprotein 6-alpha-L-fucosyltransferase
3.1.1.6 Acetylesterase
2.7.2.1 Acetate kinase
"""
record = Record()
for line in handle:
if line[:3] == "///":
yield record
record = Record()
continue
if line[:12] != " ":
keyword = line[:12]
data = line[12:].strip()
if keyword == "ENTRY ":
words = data.split()
record.entry = words[1]
elif keyword == "CLASS ":
record.classname.append(data)
elif keyword == "COFACTOR ":
record.cofactor.append(data)
elif keyword == "COMMENT ":
record.comment.append(data)
elif keyword == "DBLINKS ":
if ":" in data:
key, values = data.split(":")
values = values.split()
row = (key, values)
record.dblinks.append(row)
else:
row = record.dblinks[-1]
key, values = row
values.extend(data.split())
row = key, values
record.dblinks[-1] = row
elif keyword == "DISEASE ":
if ":" in data:
database, data = data.split(":")
number, name = data.split(None, 1)
row = (database, number, name)
record.disease.append(row)
else:
row = record.disease[-1]
database, number, name = row
name = name + " " + data
row = database, number, name
record.disease[-1] = row
elif keyword == "EFFECTOR ":
record.effector.append(data.strip(";"))
elif keyword == "GENES ":
if data[3:5] == ': ':
key, values = data.split(":", 1)
values = [value.split("(")[0] for value in values.split()]
row = (key, values)
record.genes.append(row)
else:
row = record.genes[-1]
key, values = row
for value in data.split():
value = value.split("(")[0]
values.append(value)
row = key, values
record.genes[-1] = row
elif keyword == "INHIBITOR ":
record.inhibitor.append(data.strip(";"))
elif keyword == "NAME ":
record.name.append(data.strip(";"))
elif keyword == "PATHWAY ":
if data[:5] == 'PATH:':
_, map_num, name = data.split(None, 2)
pathway = ('PATH', map_num, name)
record.pathway.append(pathway)
else:
ec_num, name = data.split(None, 1)
pathway = 'PATH', ec_num, name
record.pathway.append(pathway)
elif keyword == "PRODUCT ":
record.product.append(data.strip(";"))
elif keyword == "REACTION ":
record.reaction.append(data.strip(";"))
elif keyword == "STRUCTURES ":
if data[:4] == 'PDB:':
database = data[:3]
accessions = data[4:].split()
row = (database, accessions)
record.structures.append(row)
else:
row = record.structures[-1]
database, accessions = row
accessions.extend(data.split())
row = (database, accessions)
record.structures[-1] = row
elif keyword == "SUBSTRATE ":
record.substrate.append(data.strip(";"))
elif keyword == "SYSNAME ":
record.sysname.append(data.strip(";"))
if __name__ == "__main__":
from Bio._utils import run_doctest
run_doctest()<|fim▁end|> | |
<|file_name|>test_storage.py<|end_file_name|><|fim▁begin|>from nose.plugins.skip import SkipTest<|fim▁hole|>from oyster.storage.gridfs import GridFSStorage
from oyster.storage.dummy import DummyStorage
def _simple_storage_test(StorageCls):
kernel = Kernel(mongo_db='oyster_test')
kernel.doc_classes['default'] = {}
storage = StorageCls(kernel)
# ensure the class has a storage_type attribute
assert hasattr(storage, 'storage_type')
doc = {'_id': 'aabbccddeeff', 'url': 'http://localhost:8000/#test',
'doc_class': 'default', 'metadata': {} }
storage_id = storage.put(doc, 'hello oyster', 'text/plain')
assert storage_id
assert storage.get(storage_id) == 'hello oyster'
def test_s3():
if not hasattr(settings, 'AWS_BUCKET'):
raise SkipTest('S3 not configured')
from oyster.storage.s3 import S3Storage
_simple_storage_test(S3Storage)
def test_gridfs():
_simple_storage_test(GridFSStorage)
def test_dummy():
_simple_storage_test(DummyStorage)<|fim▁end|> |
from oyster.conf import settings
from oyster.core import Kernel |
<|file_name|>Cache.py<|end_file_name|><|fim▁begin|>from __future__ import generators
import ConfigParser
import copy
import email.Message
import email.Parser
import email.Utils
import errno
import hmac
import inspect
import md5
import os
import popen2
import random
import re
import select
import sha
import shutil
import socket
import sys
import tempfile
import time
import urllib2
import isconf
from isconf.Errno import iserrno
from isconf.Globals import *
from isconf.fbp822 import fbp822
from isconf.Kernel import kernel
(START,IHAVE,SENDME) = range(3)
# XXX the following were migrated from 4.1.7 for now -- really need to
# be FBP components, at least in terms of logging
class Cache:
"""a combined cache manager and UDP mesh -- XXX needs to be split
>>> pid = os.fork()
>>> if not pid:
... time.sleep(999)
... sys.exit(0)
>>> os.environ["HOSTNAME"] = "testhost"
>>> os.environ["IS_HOME"] = "/tmp/var/is"
>>> cache = Cache(54321,54322)
>>> assert cache
>>> os.kill(pid,9)
"""
def __init__(self,udpport,httpport,timeout=2):
# XXX kludge -- what we really need is a dict which
# shows the "mirror list" of all known locations for
# files, rather than self.req
self.req = {}
self.udpport = udpport
self.httpport = httpport
self.timeout = float(timeout)
self.lastSend = 0
self.sock = None
self.fetched = {}
self.nets = self.readnets()
self.sendq = []
# temporary uid -- uniquely identifies host in non-persistent
# packets. If we want something permanent we should store it
# somewhere under private.
self.tuid = "%s@%s" % (random.random(),
os.environ['HOSTNAME'])
class Path: pass
self.p = Path()
home = os.environ['IS_HOME']
# XXX redundant with definitions in ISFS.py -- use a common lib?
self.p.cache = os.path.join(home,"fs/cache")
self.p.private = os.path.join(home,"fs/private")
self.p.announce = "%s/.announce" % (self.p.private)
self.p.pull = "%s/.pull" % (self.p.private)
for d in (self.p.cache,self.p.private):
if not os.path.isdir(d):
os.makedirs(d,0700)
def readnets(self):
# read network list
nets = {'udp': [], 'tcp': []}
netsfn = os.environ.get('IS_NETS',None)
debug("netsfn", netsfn)
if netsfn and os.path.exists(netsfn):
netsfd = open(netsfn,'r')
for line in netsfd:
(scheme,addr) = line.strip().split()
nets[scheme].append(addr)
debug("nets", str(nets))
return nets
def ihaveTx(self,path):
path = path.lstrip('/')
fullpath = os.path.join(self.p.cache,path)
mtime = 0
if not os.path.exists(fullpath):
warn("file gone: %s" % fullpath)
return
mtime = getmtime_int(fullpath)
reply = FBP.msg('ihave',tuid=self.tuid,
file=path,mtime=mtime,port=self.httpport,scheme='http')
HMAC.msgset(reply)
self.bcast(str(reply))
def bcast(self,msg):
# XXX only udp supported so far
debug("bcast")
addrs = self.nets['udp']
if not os.environ.get('IS_NOBROADCAST',None):
addrs.append('<broadcast>')
for addr in addrs:
if len(self.sendq) > 20:
debug("sendq overflow")
return
self.sendq.append((msg,addr,self.udpport))
def sender(self):
while True:
yield None
yield kernel.sigsleep, 1
while len(self.sendq):
msg,addr,udpport = self.sendq.pop(0)
try:
debug("sendto", addr, msg)
self.sock.sendto(msg,0,(addr,udpport))
except:
info("sendto failed: %s" % addr)
self.sendq.append((msg,addr,udpport))
yield kernel.sigsleep, 1
yield kernel.sigsleep, self.timeout/5.0
def ihaveRx(self,msg,ip):
yield None
scheme = msg['scheme']
port = msg['port']
path = msg['file']
mtime = msg.head.mtime
# XXX is python's pseudo-random good enough here?
#
# probably, but for other cases, use 'gpg --gen-random 2 16'
# to generate 128 bits of random data from entropy
#
challenge = str(random.random())
url = "%s://%s:%s/%s?challenge=%s" % (scheme,ip,port,path,challenge)
path = path.lstrip('/')
# simple check to ignore foreign domains
# XXX probably want to make this a list of domains
domain = os.environ['IS_DOMAIN']
if not path.startswith(domain + '/'):
debug("foreign domain, ignoring: %s" % path)
return
fullpath = os.path.join(self.p.cache,path)
mymtime = 0
debug("checking",url)
if os.path.exists(fullpath):
mymtime = getmtime_int(fullpath)
if mtime > mymtime:
debug("remote is newer:",url)
if self.req.has_key(path):
self.req[path]['state'] = SENDME
yield kernel.wait(self.wget(path,url,challenge))
elif mtime < mymtime:
debug("remote is older:",url)
self.ihaveTx(path)
else:
debug("remote and local times are the same:",path,mtime,mymtime)
def puller(self):
tmp = "%s.tmp" % self.p.pull
while True:
timeout= self.timeout
yield None
# get list of files
if not os.path.exists(self.p.pull):
# hmm. we must have died while pulling
if os.path.exists(tmp):
old = open(tmp,'r').read()
open(self.p.pull,'a').write(old)
open(self.p.pull,'a')
os.rename(self.p.pull,tmp)
# files = open(tmp,'r').read().strip().split("\n")
data = open(tmp,'r').read()
if not len(data):
open(self.p.pull,'a')
yield kernel.sigsleep, 1
continue
files = data.strip().split("\n")
# create requests
for path in files:
path = path.lstrip('/')
fullpath = os.path.join(self.p.cache,path)
mtime = 0
if os.path.exists(fullpath):
mtime = getmtime_int(fullpath)
req = FBP.msg('whohas',file=path,newer=mtime,tuid=self.tuid)
HMAC.msgset(req)
self.req.setdefault(path,{})
self.req[path]['msg'] = req
self.req[path]['expires'] = time.time() + timeout
self.req[path]['state'] = START
while True:
# send requests
yield None
debug("calling resend")
self.resend()
yield kernel.sigsleep, 1<|fim▁hole|> open(self.p.pull,'a')
break
def resend(self):
"""(re)send outstanding requests"""
if time.time() < self.lastSend + .5:
return
self.lastSend = time.time()
paths = self.req.keys()
for path in paths:
debug("resend", self.req[path]['expires'], path, self.req[path])
if self.req[path]['state'] > START:
# file is being fetched
debug("resend fetching")
pass
elif time.time() > self.req[path]['expires']:
# fetch never started
debug("timeout",path)
del self.req[path]
continue
req = self.req[path]['msg']
debug("calling bcast")
self.bcast(str(req))
def flush(self):
if not os.path.exists(self.p.announce):
return
tmp = "%s.tmp" % self.p.announce
os.rename(self.p.announce,tmp)
files = open(tmp,'r').read().strip().split("\n")
for path in files:
self.ihaveTx(path)
def wget(self,path,url,challenge):
"""
# >>> port=random.randrange(50000,60000)
# >>> class fakesock:
# ... def sendto(self,msg,foo,bar):
# ... print "sendto called"
# >>> srcdir="/tmp/var/is/fs/cache/"
# >>> pridir="/tmp/var/isdst/fs/private/"
# >>> if not os.path.exists(srcdir):
# ... os.makedirs(srcdir)
# >>> if not os.path.exists(pridir):
# ... os.makedirs(pridir)
# >>> open(srcdir + "foo", 'w').write("lakfdsjl")
# >>> open(pridir + ".pull",'w').write("foo\\n")
# >>> h = kernel.spawn(httpServer(port=port,dir=srcdir))
# >>> kernel.run(steps=1000)
# >>> os.environ["HOSTNAME"] = "testhost"
# >>> os.environ["IS_HOME"] = "/tmp/var/isdst"
# >>> shutil.rmtree("/tmp/var/isdst",ignore_errors=True)
# >>> cache = Cache(54321,port)
# >>> assert cache
# >>> cache.sock = fakesock()
# >>> url = "http://localhost:%d/foo" % port
# >>> w = kernel.spawn(cache.wget("foo",url,"abc"))
# >>> kernel.run(steps=1000)
# >>> open("/tmp/var/isdst/fs/cache/foo",'r').read()
"""
yield None
# XXX kludge to keep from beating up HTTP servers
if self.fetched.get(url,0) > time.time() - 5:
debug("toosoon",path,url)
if self.req.has_key(path):
del self.req[path]
return
self.fetched[path] = time.time()
info("fetching", url)
path = path.lstrip('/')
fullpath = os.path.join(self.p.cache,path)
(dir,file) = os.path.split(fullpath)
# XXX security checks on pathname
mtime = 0
if os.path.exists(fullpath):
mtime = getmtime_int(fullpath)
if not os.path.exists(dir):
os.makedirs(dir,0700)
try:
u = urllib2.urlopen(url)
except:
debug("HTTP failed opening %s" % url)
return
uinfo = u.info()
response = uinfo.get('x-hmac')
if not HMAC.ck(challenge,response):
debug("HMAC failed, abort fetching: %s" % url)
return
mod = uinfo.get('last-modified')
size = uinfo.get('content-length')
mod_secs = email.Utils.mktime_tz(email.Utils.parsedate_tz(mod))
if mod_secs <= mtime:
warn("not newer:",url,mod,mod_secs,mtime)
if self.req.has_key(path):
del self.req[path]
return
debug(url,size,mod)
tmp = os.path.join(dir,".%s.tmp" % file)
# XXX set umask somewhere early
# XXX use the following algorithm everywhere else as a more
# secure way of creating files that aren't world readable
# -- also see os.mkstemp()
if os.path.exists(tmp): os.unlink(tmp)
open(tmp,'w')
os.chmod(tmp,0600)
open(tmp,'w') # what does this second open do?
tmpfd = open(tmp,'a')
while True:
# XXX move timeout to here
yield kernel.sigbusy
try:
(r,w,e) = select.select([u],[],[u],0)
if e:
# XXX not sure if we should break or raise here
break
if not r:
continue
except:
# python 2.4 throws a "no fileno attribute" exception if
# the entire page content has already arrived
pass
try:
rxd = u.read(8192)
except:
break
if len(rxd) == 0:
break
# XXX show progress
tmpfd.write(rxd)
tmpfd.close()
actual_size = os.stat(tmp).st_size
if size is None:
warn("""
The host at %s is running an older version of
ISconf; that older version does not send content-length
headers, so we can't check the length of files it sends
us; we might store a corrupt file as a result. You should
upgrade that host to a more recent ISconf version soon.
""")
else:
size = int(size)
if size != actual_size:
debug("size mismatch: wanted %d got %d, abort fetching: %s" %
(size, actual_size, url))
return
meta = (mod_secs,mod_secs)
os.rename(tmp,fullpath)
os.utime(fullpath,meta)
if self.req.has_key(path):
del self.req[path]
self.ihaveTx(path)
def run(self):
from SocketServer import UDPServer
from isconf.fbp822 import fbp822, Error822
kernel.spawn(self.puller())
kernel.spawn(self.sender())
# XXX most of the following should be broken out into a receiver() task
dir = self.p.cache
udpport = self.udpport
debug("UDP server serving %s on port %d" % (dir,udpport))
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.sock = sock
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, True)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, True)
sock.setblocking(0)
sock.bind(('',udpport))
# laddr = sock.getsockname()
# localip = os.environ['HOSTNAME']
while True:
yield None
self.flush()
yield None
try:
data,addr = sock.recvfrom(8192)
# XXX check against addrs or nets
debug("from %s: %s" % (addr,data))
factory = fbp822()
msg = factory.parse(data)
type = msg.type().strip()
if msg.head.tuid == self.tuid:
# debug("one of ours -- ignore",str(msg))
continue
if not HMAC.msgck(msg):
debug("HMAC failed, dropping: %s" % msg)
continue
if type == 'whohas':
path = msg['file']
path = path.lstrip('/')
fullpath = os.path.join(dir,path)
fullpath = os.path.normpath(fullpath)
newer = int(msg.get('newer',None))
# security checks
bad=0
if fullpath != os.path.normpath(fullpath):
bad += 1
if dir != os.path.commonprefix(
(dir,os.path.abspath(fullpath))):
print dir,os.path.commonprefix(
(dir,os.path.abspath(fullpath)))
bad += 2
if bad:
warn("unsafe request %d from %s: %s" % (
bad,addr,fullpath))
continue
if not os.path.isfile(fullpath):
debug("ignoring whohas from %s: not found: %s" % (addr,fullpath))
continue
if newer is not None and newer >= getmtime_int(
fullpath):
debug("ignoring whohas from %s: not newer: %s" % (addr,fullpath))
continue
# url = "http://%s:%d/%s" % (localip,httpport,path)
self.ihaveTx(path)
continue
if type == 'ihave':
debug("gotihave:",str(msg))
ip = addr[0]
yield kernel.wait(self.ihaveRx(msg,ip))
continue
warn("unsupported message type from %s: %s" % (addr,type))
except socket.error:
yield kernel.sigsleep, 1
continue
except Exception, e:
warn("%s from %s: %s" % (e,addr,str(msg)))
continue
def httpServer(port,dir):
from BaseHTTPServer import HTTPServer
from isconf.HTTPServer import SimpleHTTPRequestHandler
from SocketServer import ThreadingMixIn
"""
# >>> port=random.randrange(50000,60000)
# >>> srcdir="/tmp/var/is/fs/cache/"
# >>> if not os.path.exists(srcdir):
# >>> os.makedirs(srcdir)
# >>> open(srcdir + "foo",'w').write("lakfdsjl")
# >>> pid = os.fork()
# >>> if not pid:
# >>> kernel.run(httpServer(port=port,dir=srcdir))
# >>> time.sleep(1)
# >>> u = urllib2.urlopen("http://localhost:%d/foo" % port)
# >>> k = u.info().keys()
# >>> k.sort()
# >>> k
# ['content-length', 'content-type', 'date', 'last-modified', 'server']
# >>> u.read()
# 'lakfdsjl'
# >>> os.kill(pid,9)
"""
# Note: Switched from ForkingMixIn to ThreadingMixIn around
# 4.2.8.206 in order to remove nasty race condition between the
# waitpid() calls generated by the popen2 library in
# ISFS.updateExec and by the SocketServer.ForkingMixIn. The HTTP
# server was sometimes reaping exec processes and stealing the
# exit status... ForkingMixIn is *not* thread-safe or
# microtask-safe, because it calls waitpid(0, ...) rather than
# using the child pid list it already has. Argh.
def logger(*args):
msg = str(args)
open("/tmp/isconf.http.log",'a').write(msg+"\n")
SimpleHTTPRequestHandler.log_message = logger
if not os.path.isdir(dir):
os.makedirs(dir,0700)
os.chdir(dir)
class ThreadingServer(ThreadingMixIn,HTTPServer): pass
serveraddr = ('',port)
svr = ThreadingServer(serveraddr,SimpleHTTPRequestHandler)
svr.daemon_threads = True
svr.socket.setblocking(0)
debug("HTTP server serving %s on port %d" % (dir,port))
while True:
yield None
try:
request, client_address = svr.get_request()
except socket.error:
yield kernel.sigsleep, .1
# includes EAGAIN
continue
except Exception, e:
debug("get_request exception:", str(e))
yield kernel.sigsleep, 1
continue
# XXX filter request -- e.g. do we need directory listings?
try:
# process_request does the fork... For now we're going to
# say that it's okay that the Kernel and other tasks fork
# with it; since process_request does not yield, nothing
# else will run in the child before it exits.
os.chdir(dir)
svr.process_request(request, client_address)
except:
svr.handle_error(request, client_address)
svr.close_request(request)
class Hmac:
'''HMAC key management
>>> HMAC = Hmac(ckfreq=1)
>>> keyfile = "/tmp/hmac_keys-test-case-data"
>>> factory = fbp822()
>>> msg = factory.mkmsg('red apple')
>>> os.environ['IS_HMAC_KEYS'] = ""
>>> msg.hmacset('foo')
'8ca8301bb1a077358ce8c3e9a601d83a2643f33d'
>>> HMAC.msgck(msg)
True
>>> os.environ['IS_HMAC_KEYS'] = keyfile
>>> open(keyfile,'w').write("\\n\\n")
>>> time.sleep(2)
>>> msg.hmacset('foo')
'8ca8301bb1a077358ce8c3e9a601d83a2643f33d'
>>> HMAC.msgck(msg)
True
>>> open(keyfile,'w').write("someauthenticationkey\\nanotherkey\\n")
>>> time.sleep(2)
>>> HMAC.msgset(msg)
'0abf42fd374fc75cdc4bd0284f4c9ec48f9e0569'
>>> HMAC.msgck(msg)
True
>>> msg.hmacset('foo')
'8ca8301bb1a077358ce8c3e9a601d83a2643f33d'
>>> HMAC.msgck(msg)
False
>>> msg.hmacset('anotherkey')
'51116aaa8bc9de5078850b9347aa95ada066b259'
>>> HMAC.msgck(msg)
True
>>> msg.hmacset('someauthenticationkey')
'0abf42fd374fc75cdc4bd0284f4c9ec48f9e0569'
>>> HMAC.msgck(msg)
True
>>> res = HMAC.response('foo')
>>> res
'525a59615b881ab282ca60b2ab31e82aec7e31db'
>>> HMAC.ck('foo',res)
True
>>> HMAC.ck('foo','afds')
False
>>> HMAC.ck('bar',res)
False
>>> open(keyfile,'a').write("+ANY+\\n")
>>> time.sleep(2)
>>> HMAC.msgset(msg)
'0abf42fd374fc75cdc4bd0284f4c9ec48f9e0569'
>>> HMAC.msgck(msg)
True
>>> msg.hmacset('foo')
'8ca8301bb1a077358ce8c3e9a601d83a2643f33d'
>>> HMAC.msgck(msg)
True
>>> HMAC.ck('foo','afds')
True
'''
def __init__(self,ckfreq=10):
self.expires = 0
self.mtime = 0
self.ckfreq = ckfreq
self.reset()
def reset(self):
self._keys = []
self.any = False
def reload(self):
path = os.environ.get('IS_HMAC_KEYS',None)
if not path:
return []
if time.time() > self.expires \
and os.path.exists(path) \
and self.mtime < getmtime_int(path):
self.expires = time.time() + self.ckfreq
debug("reloading",path)
self.mtime = getmtime_int(path)
self.reset()
for line in open(path,'r').readlines():
line = line.strip()
if line.startswith('#'):
continue
if not len(line):
continue
if line == '+ANY+':
self.any = True
continue
self._keys.append(line)
# debug('XXX keys',self._keys)
return self._keys
def msgck(self,msg):
keys = self.reload()
if not len(keys):
return True
if self.any:
return True
for key in keys:
if msg.hmacok(key):
return True
return False
def msgset(self,msg):
keys = self.reload()
if not len(keys):
return
key = keys[0]
return msg.hmacset(key)
def ck(self,challenge,response):
debug('ck(): challenge',challenge)
debug('ck(): response',response)
keys = self.reload()
if not len(keys):
return True
if self.any:
return True
for key in keys:
h = hmac.new(key,msg=challenge,digestmod=sha)
digest = h.hexdigest()
if digest == response:
debug('ck: response ok')
debug('XXX ck(): key',key)
return True
debug('ck: bad response')
return False
def response(self,challenge):
keys = self.reload()
if not len(keys):
return
key = keys[0]
h = hmac.new(key,msg=challenge,digestmod=sha)
response = h.hexdigest()
debug('response(): challenge',challenge)
debug('response(): response',response)
return response
HMAC = Hmac()<|fim▁end|> | # see if they've all been filled or timed out
# debug(str(self.req))
if not self.req:
# okay, all done -- touch the file so ISFS knows |
<|file_name|>model.py<|end_file_name|><|fim▁begin|>import os
path = os.path.dirname(os.path.realpath(__file__))
sbmlFilePath = os.path.join(path, 'MODEL0912503622.xml')
<|fim▁hole|> sbmlString = f.read()
def module_exists(module_name):
try:
__import__(module_name)
except ImportError:
return False
else:
return True
if module_exists('libsbml'):
import libsbml
sbml = libsbml.readSBMLFromString(sbmlString)<|fim▁end|> | with open(sbmlFilePath,'r') as f: |
<|file_name|>community-modlog.component.ts<|end_file_name|><|fim▁begin|>import { Component, OnInit } from '@angular/core';
import {Router, ActivatedRoute} from '@angular/router';
import {CommunityService} from '../../services';
import {ToasterService} from 'angular2-toaster/angular2-toaster';
import {CommunityRole, Community} from '../../shared';
@Component({
selector: 'app-community-modlog',
templateUrl: './community-modlog.component.html',<|fim▁hole|>
public sub: any;
public modlog: Array<any>;
constructor(private route: ActivatedRoute,
private router: Router,
private communityService: CommunityService,
private toasterService: ToasterService) { }
ngOnInit() {
this.sub = this.route.params.subscribe(params => {
let communityId: number = +params['communityId'];
this.getCommunityModlog(communityId);
});
}
ngOnDestroy() {
this.sub.unsubscribe();
}
getCommunityModlog(communityId: number) {
this.communityService.getCommunityModlog(communityId).subscribe(c => {
this.modlog = c;
},
error => {
this.toasterService.pop("error", "Error", error);
this.router.navigate(['/']);
});
}
getRole(id: number): string {
return CommunityRole[id];
}
}<|fim▁end|> | styleUrls: ['./community-modlog.component.css'],
})
export class CommunityModlogComponent implements OnInit { |
<|file_name|>setup.js<|end_file_name|><|fim▁begin|>//configure requirejs
var requirejs = require('requirejs');<|fim▁hole|>global.RENDER = false;
//export requirejs
module.exports = {
require: requirejs
};<|fim▁end|> | requirejs.config({ baseUrl: __dirname + '/../javascripts', nodeRequire: require });
//turn off rendering for commandline unit tests
var global = requirejs('global'); |
<|file_name|>weka.py<|end_file_name|><|fim▁begin|># Natural Language Toolkit: Interface to Weka Classsifiers
#
# Copyright (C) 2001-2008 University of Pennsylvania
# Author: Edward Loper <[email protected]>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
#
# $Id: naivebayes.py 2063 2004-07-17 21:02:24Z edloper $
import time, tempfile, os, os.path, subprocess, re
from api import *
from nltk.probability import *
from nltk.internals import java, config_java
"""
Classifiers that make use of the external 'Weka' package.
"""
_weka_classpath = None
_weka_search = ['.',
'/usr/share/weka',
'/usr/local/share/weka',
'/usr/lib/weka',
'/usr/local/lib/weka',]
def config_weka(classpath=None):
global _weka_classpath
# Make sure java's configured first.
config_java()
if classpath is not None:
_weka_classpath = classpath
if _weka_classpath is None:
searchpath = _weka_search
if 'WEKAHOME' in os.environ:
searchpath.insert(0, os.environ['WEKAHOME'])
for path in searchpath:
if os.path.exists(os.path.join(path, 'weka.jar')):
_weka_classpath = os.path.join(path, 'weka.jar')
print '[Found Weka: %s]' % _weka_classpath
if _weka_classpath is None:
raise LookupError('Unable to find weka.jar! Use config_weka() '
'or set the WEKAHOME environment variable. '
'For more information about Weka, please see '
'http://www.cs.waikato.ac.nz/ml/weka/')
class WekaClassifier(ClassifierI):
def __init__(self, formatter, model_filename):
self._formatter = formatter
self._model = model_filename
def batch_prob_classify(self, featuresets):
return self._batch_classify(featuresets, ['-p', '0', '-distribution'])
def batch_classify(self, featuresets):
return self._batch_classify(featuresets, ['-p', '0'])
def _batch_classify(self, featuresets, options):
# Make sure we can find java & weka.
config_weka()
temp_dir = tempfile.mkdtemp()
try:
# Write the test data file.
test_filename = os.path.join(temp_dir, 'test.arff')
self._formatter.write(test_filename, featuresets)
# Call weka to classify the data.
cmd = ['weka.classifiers.bayes.NaiveBayes',
'-l', self._model, '-T', test_filename] + options
(stdout, stderr) = java(cmd, classpath=_weka_classpath,
stdout=subprocess.PIPE)
# Parse weka's output.
return self.parse_weka_output(stdout.split('\n'))
finally:
for f in os.listdir(temp_dir):
os.remove(os.path.join(temp_dir, f))
os.rmdir(temp_dir)
def parse_weka_distribution(self, s):
probs = [float(v) for v in re.split('[*,]+', s) if v.strip()]
probs = dict(zip(self._formatter.labels(), probs))
return DictionaryProbDist(probs)
def parse_weka_output(self, lines):
if lines[0].split() == ['inst#', 'actual', 'predicted',
'error', 'prediction']:
return [line.split()[2].split(':')[1]
for line in lines[1:] if line.strip()]
elif lines[0].split() == ['inst#', 'actual', 'predicted',
'error', 'distribution']:
return [self.parse_weka_distribution(line.split()[-1])
for line in lines[1:] if line.strip()]
else:
for line in lines[:10]: print line
raise ValueError('Unhandled output format -- your version '
'of weka may not be supported.\n'
' Header: %s' % lines[0])
@staticmethod
def train(model_filename, featuresets, quiet=True):
# Make sure we can find java & weka.
config_weka()
# Build an ARFF formatter.
formatter = ARFF_Formatter.from_train(featuresets)
temp_dir = tempfile.mkdtemp()
try:
# Write the training data file.
train_filename = os.path.join(temp_dir, 'train.arff')
formatter.write(train_filename, featuresets)
# Train the weka model.
cmd = ['weka.classifiers.bayes.NaiveBayes',
'-d', model_filename, '-t', train_filename]
if quiet: stdout = subprocess.PIPE
else: stdout = None
java(cmd, classpath=_weka_classpath, stdout=stdout)
# Return the new classifier.
return WekaClassifier(formatter, model_filename)
finally:
for f in os.listdir(temp_dir):
os.remove(os.path.join(temp_dir, f))
os.rmdir(temp_dir)
class ARFF_Formatter:
"""
Converts featuresets and labeled featuresets to ARFF-formatted
strings, appropriate for input into Weka.
"""
def __init__(self, labels, features):
"""
@param labels: A list of all labels that can be generated.
@param features: A list of feature specifications, where
each feature specification is a tuple (fname, ftype);
and ftype is an ARFF type string such as NUMERIC or
STRING.
"""
self._labels = labels
self._features = features
def format(self, tokens):
return self.header_section() + self.data_section(tokens)
def labels(self):
return list(self._labels)
def write(self, filename, tokens):
f = open(filename, 'w')
f.write(self.format(tokens))
f.close()
@staticmethod
def from_train(tokens):
# Find the set of all attested labels.
labels = set(label for (tok,label) in tokens)
# Determine the types of all features.
features = {}
for tok, label in tokens:
for (fname, fval) in tok.items():
if issubclass(type(fval), bool):
ftype = '{True, False}'
elif issubclass(type(fval), (int, float, long, bool)):
ftype = 'NUMERIC'
elif issubclass(type(fval), basestring):
ftype = 'STRING'
elif fval is None:
continue # can't tell the type.
else:
raise ValueError('Unsupported value type %r' % ftype)
if features.get(fname, ftype) != ftype:
raise ValueError('Inconsistent type for %s' % fname)
features[fname] = ftype
features = sorted(features.items())
return ARFF_Formatter(labels, features)
def header_section(self):
# Header comment.
s = ('% Weka ARFF file\n' +
'% Generated automatically by NLTK\n' +
'%% %s\n\n' % time.ctime())
# Relation name
s += '@RELATION rel\n\n'
# Input attribute specifications
for fname, ftype in self._features:
s += '@ATTRIBUTE %-30r %s\n' % (fname, ftype)
# Label attribute specification
s += '@ATTRIBUTE %-30r {%s}\n' % ('-label-', ','.join(self._labels))
return s
def data_section(self, tokens, labeled=None):
"""
@param labeled: Indicates whether the given tokens are labeled
or not. If C{None}, then the tokens will be assumed to be
labeled if the first token's value is a tuple or list.
"""
# Check if the tokens are labeled or unlabeled. If unlabeled,
# then use 'None'
if labeled is None:
labeled = tokens and isinstance(tokens[0], (tuple, list))
if not labeled:
tokens = [(tok, None) for tok in tokens]
# Data section<|fim▁hole|> for fname, ftype in self._features:
s += '%s,' % self._fmt_arff_val(tok.get(fname))
s += '%s\n' % self._fmt_arff_val(label)
return s
def _fmt_arff_val(self, fval):
if fval is None:
return '?'
elif isinstance(fval, (bool, int, long)):
return '%s' % fval
elif isinstance(fval, float):
return '%r' % fval
else:
return '%r' % fval
if __name__ == '__main__':
from nltk.classify.util import names_demo,binary_names_demo_features
def make_classifier(featuresets):
return WekaClassifier.train('/tmp/name.model', featuresets)
classifier = names_demo(make_classifier,binary_names_demo_features)<|fim▁end|> | s = '\n@DATA\n'
for (tok, label) in tokens: |
<|file_name|>SMB2_Header.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# ============================================================================ #
# SMB2_Header.py
#
# Copyright:
# Copyright (C) 2016 by Christopher R. Hertel
#
# $Id: SMB2_Header.py; 2019-06-18 17:56:20 -0500; crh$
#
# ---------------------------------------------------------------------------- #
#
# Description:
# Carnaval Toolkit: SMB2+ message header parsing and composition.
#
# ---------------------------------------------------------------------------- #
#
# License:
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3.0 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this library. If not, see <http://www.gnu.org/licenses/>.
#
# See Also:
# The 0.README file included with the distribution.
#
# ---------------------------------------------------------------------------- #
# This code was developed in participation with the
# Protocol Freedom Information Foundation.
# <www.protocolfreedom.org>
# ---------------------------------------------------------------------------- #
#
# Notes:
#
# - This module provides the basic tools used to compose and decompose
# SMB2/3 message headers. This module can be used by both client and
# server implementations.
#
# - The specific dialects considered by this module are:
# Common Name | Official Name | Dialect ID
# ============|===============|===========
# SMB2.0 | SMB 2.0.2 | 0x0202
# SMB2.1 | SMB 2.1 | 0x0210
# SMB3.0 | SMB 3.0 | 0x0300
# SMB3.02 | SMB 3.0.2 | 0x0302
# SMB3.11 | SMB 3.1.1 | 0x0311
#
# Others can be added as they are conjured up from the underworld.
#
# - The Python <int> type is "at least" 32 bits, but it's signed, so to
# be safe we use the <long> type to handle ULONG field values. That
# ensures that unsigned 32-bit field values are handled correctly.
# The <long> type can be used to store UINT32 and UINT64 values, as
# well as shorter integer types.
# See: https://docs.python.org/2/library/stdtypes.html#typesnumeric
#
# - This project, overall, is designed to protect against sending invalid
# field values. It also, to some extent, protects against invalid values
# in received messages. However, to make it easy to do protocol testing,
# these protections can be easily bypassed.
#
# References:
#
# [MS-SMB2] Microsoft Corporation, "Server Message Block (SMB)
# Protocol Versions 2 and 3",
# http://msdn.microsoft.com/en-us/library/cc246482.aspx
#
# ToDo:
# - Add more unit tests.
# - Add support for "related commands" (NextCommand chaining).
# - Add support for transform headers (\xfdSMB).
# - Extend the context information to include more connection-related
# data, including GUID, flags, etc.
# - Check the assert() calls in setters when decomposing a message header.
# We want consistent error handling, and asserts() can be compiled out.
# - Allow (and keep) invalid values where docs say "must ignore".
#
# FIX:
# - Use exceptions from SMB_Core.
#
# Moose:
#
# \_\_ _/_/
# \__/
# (oo)
# (..)
# --
#
# ============================================================================ #
#
"""Carnaval Toolkit: SMB2+ message header packing and parsing.
Common classes, functions, etc., for packing and unpacking SMB2+ Headers.
This module deals with structures common to both the client and server.
CONSTANTS:
Protocol constants:
SMB2_MSG_PROTOCOL : \\xFESMB; SMB2 message prefix (protocol ID).
4 bytes.
SMB2_HDR_SIZE : The fixed length of an SMB2+ message header
(64 bytes).
Supported SMB2+ dialect revision codes:
SMB2_DIALECT_202 : SMB 2.0.2 dialect revision (Vista, W2K8 Server)
SMB2_DIALECT_210 : SMB 2.1 dialect revision (Win7, W2K8r2 Server)
SMB2_DIALECT_300 : SMB 3.0 dialect revision (Win8, W2K12 Server)
SMB2_DIALECT_302 : SMB 3.0.2 dialect revision (Win8.1, W2K12r2 Server)
SMB2_DIALECT_311 : SMB 3.1.1 dialect revision (Win10, 2016 Server)
SMB2_DIALECT_LIST : A list of all supported dialects, ordered from
lowest to highest.
SMB2_DIALECT_MIN : The lowest supported dialect.
SMB2_DIALECT_MAX : The highest supported dialect.
SMB2+ command codes:
SMB2_COM_NEGOTIATE : Dialect and feature support negotiation.
SMB2_COM_SESSION_SETUP : Authentication and session establishment.
SMB2_COM_LOGOFF : Close a session; log out.
SMB2_COM_TREE_CONNECT : Connect to a remote share; mount.
SMB2_COM_TREE_DISCONNECT : Disconnect a connected share; umount.
SMB2_COM_CREATE : Create/open a filesystem object (file).
SMB2_COM_CLOSE : Close a previously opened handle.
SMB2_COM_FLUSH : Push data to disk (or thereabouts).
SMB2_COM_READ : Get some data.
SMB2_COM_WRITE : Put some data.
SMB2_COM_LOCK : Byte-range locks.
SMB2_COM_IOCTL : Do fiddly stuff.
SMB2_COM_CANCEL : Don't do whatever you're waiting to do.
SMB2_COM_ECHO : Ping!
SMB2_COM_QUERY_DIRECTORY : Find things in the Object Store.
SMB2_COM_CHANGE_NOTIFY : Let me know if something happens.
SMB2_COM_QUERY_INFO : Get some metadata.
SMB2_COM_SET_INFO : Put some metadata.
SMB2_COM_OPLOCK_BREAK : Server->client lease/oplock break.
SMB2+ header flags:
SMB2_FLAGS_SERVER_TO_REDIR : Response
SMB2_FLAGS_ASYNC_COMMAND : Async
SMB2_FLAGS_RELATED_OPERATIONS : Chained command
SMB2_FLAGS_SIGNED : Signed packet
SMB2_FLAGS_DFS_OPERATIONS : Distributed File System
SMB2_FLAGS_REPLAY_OPERATION : SMB3 Replay
SMB2_FLAGS_MASK : Flags Bitmask
"""
# Imports -------------------------------------------------------------------- #
#
import struct # Binary data handling.
from SMB_Status import * # Windows NT Status Codes.
from common.HexDump import hexstr # Convert binary data to readable output.
from common.HexDump import hexstrchop # Ditto, but with linewrap.
from common.HexDump import hexdump # Formatted hex dump à la hexdump(1).
# Constants ------------------------------------------------------------------ #
#
# Protocol constants
SMB2_MSG_PROTOCOL = '\xFESMB' # Standard SMB2 message prefix (protocol ID).
SMB2_HDR_SIZE = 64 # Fixed SMB2+ header size.
# Known SMB2+ dialect revision codes.
# An unknown or undefined dialect is indicated using <None>.
SMB2_DIALECT_202 = 0x0202 # SMB 2.0.2 dialect revision (Vista/W2K8 Server)
SMB2_DIALECT_210 = 0x0210 # SMB 2.1 dialect revision (Win7/W2K8r2 Server)
SMB2_DIALECT_300 = 0x0300 # SMB 3.0 dialect revision (Win8/W2K12 Server)
SMB2_DIALECT_302 = 0x0302 # SMB 3.0.2 dialect revision (Win8.1/W2K12r2 Server)
SMB2_DIALECT_311 = 0x0311 # SMB 3.1.1 dialect revision (Win10/W2K16 Server)
# List of supported dialects, in order from oldest to newest.
SMB2_DIALECT_LIST = [ SMB2_DIALECT_202,
SMB2_DIALECT_210,
SMB2_DIALECT_300,
SMB2_DIALECT_302,
SMB2_DIALECT_311 ]
SMB2_DIALECT_MIN = SMB2_DIALECT_LIST[0] # Oldest supported revision.
SMB2_DIALECT_MAX = SMB2_DIALECT_LIST[-1] # Newest supported revision.
# SMB2/3 command codes (there are, currently, 19 SMB2+ command codes).
SMB2_COM_NEGOTIATE = 0x0000 # 0
SMB2_COM_SESSION_SETUP = 0x0001 # 1
SMB2_COM_LOGOFF = 0x0002 # 2
SMB2_COM_TREE_CONNECT = 0x0003 # 3
SMB2_COM_TREE_DISCONNECT = 0x0004 # 4
SMB2_COM_CREATE = 0x0005 # 5
SMB2_COM_CLOSE = 0x0006 # 6
SMB2_COM_FLUSH = 0x0007 # 7
SMB2_COM_READ = 0x0008 # 8
SMB2_COM_WRITE = 0x0009 # 9
SMB2_COM_LOCK = 0x000A # 10
SMB2_COM_IOCTL = 0x000B # 11
SMB2_COM_CANCEL = 0x000C # 12
SMB2_COM_ECHO = 0x000D # 13
SMB2_COM_QUERY_DIRECTORY = 0x000E # 14
SMB2_COM_CHANGE_NOTIFY = 0x000F # 15
SMB2_COM_QUERY_INFO = 0x0010 # 16
SMB2_COM_SET_INFO = 0x0011 # 17
SMB2_COM_OPLOCK_BREAK = 0x0012 # 18
# SMB2/3 header flags
SMB2_FLAGS_SERVER_TO_REDIR = 0x00000001 # Response
SMB2_FLAGS_ASYNC_COMMAND = 0x00000002 # Async
SMB2_FLAGS_RELATED_OPERATIONS = 0x00000004 # ANDX
SMB2_FLAGS_SIGNED = 0x00000008 # Signed packet
SMB2_FLAGS_DFS_OPERATIONS = 0x10000000 # Distributed File System (DFS)
SMB2_FLAGS_REPLAY_OPERATION = 0x20000000 # SMB3 Replay
SMB2_FLAGS_PRIORITY_MASK = 0x00000070 # SMB311 priority bits
SMB2_FLAGS_MASK = 0x3000007F # Bitmask
# Max Size values
_UCHAR_MAX = 0xFF # Bitmask for Unsigned 8-bit (UCHAR) values.
_USHORT_MAX = 0xFFFF # Bitmask for Unsigned 16-bit (USHORT) values.
_ULONG_MAX = 0xFFFFFFFF # Bitmask for Unsigned 32-bit (ULONG) values.
_UINT64_MAX = (2**64) - 1 # Bitmask for Unsigned 64-bit (UINT64) values.
# Classes -------------------------------------------------------------------- #
#
class _SMB2_Header( object ):
# SMB2/SMB3 Message Header; [MS-SMB; 2.2.1].
#
# This class is used to format both Sync and Async SMB2 headers.
#
# Reminder: SMB2 and SMB3 are names for different sets of dialects of the
# same protocol; SMB3.0 was originally SMB2.2. Can you say
# "Marketing Upgrade"?
#
# Class values:
# Values instanciated once for the class (so that all instances can use them).
#
# These represent the four possible header formats defined for the
# supported SMB2 dialects. It's basically a 2x2 matrix.
#
# _format_SMB2_StatAsync - Async header, with <status> and <asyncId>.
# _format_SMB2_StatTreeId - Sync header, with <status> and <treeId>.
# _format_SMB2_cSeqAsync - Async header, with <channelSeq> and <asyncId>.
# _format_SMB2_cSeqTreeId - Sync header, with <channelSeq> and <treeId>.
#
# In general, Async headers are sent in server responses that are used to
# tell the client to wait for a pending operation to complete. That is,
# they are "hang on a bit" messages, telling the client not to time out.
#
# A client uses an async header when it is sending a CANCEL request for
# a command for which the server has already sent an Async response.
# That is:
# Command --> (sync)
# <-- Hang on a bit (async)
# Nevermind --> (async)
# <-- Command canceled (sync)
# The middle two are sent using Async headers.
#
# These two additional patterns are used for decoding header variants.
# _format_2H - Two unsigned 16-bit integers.
# _format_Q - One unsigned 64-bit integer.
#
# [MS-SMB2; 2.2.1] also mystically says that the Async header "MAY be used
# for any request", but doesn't explain when or why a client would do such
# a confusing thing.
#
# _cmd_LookupDict - A dictionary that maps command codes to strings.
# This is used for composing error messages, and when
# providing a header dump.
#
_format_SMB2_StatAsync = struct.Struct( '<4s H H L H H L L Q Q Q 16s' )
_format_SMB2_StatTreeId = struct.Struct( '<4s H H L H H L L Q L L Q 16s' )
_format_SMB2_cSeqAsync = struct.Struct( '<4s H H H H H H L L Q Q Q 16s' )
_format_SMB2_cSeqTreeId = struct.Struct( '<4s H H H H H H L L Q L L Q 16s' )
_format_2H = struct.Struct( "<H H" )
_format_Q = struct.Struct( "<Q" )
_cmd_LookupDict = \
{
SMB2_COM_NEGOTIATE : "NEGOTIATE",
SMB2_COM_SESSION_SETUP : "SESSION_SETUP",
SMB2_COM_LOGOFF : "LOGOFF",
SMB2_COM_TREE_CONNECT : "TREE_CONNECT",
SMB2_COM_TREE_DISCONNECT: "TREE_DISCONNECT",
SMB2_COM_CREATE : "CREATE",
SMB2_COM_CLOSE : "CLOSE",
SMB2_COM_FLUSH : "FLUSH",
SMB2_COM_READ : "READ",
SMB2_COM_WRITE : "WRITE",
SMB2_COM_LOCK : "LOCK",
SMB2_COM_IOCTL : "IOCTL",
SMB2_COM_CANCEL : "CANCEL",
SMB2_COM_ECHO : "ECHO",
SMB2_COM_QUERY_DIRECTORY: "QUERY_DIRECTORY",
SMB2_COM_CHANGE_NOTIFY : "CHANGE_NOTIFY",
SMB2_COM_QUERY_INFO : "QUERY_INFO",
SMB2_COM_SET_INFO : "SET_INFO",
SMB2_COM_OPLOCK_BREAK : "OPLOCK_BREAK"
}
# _SMB2_Header class methods:
#
@classmethod
def parseMsg( cls, msgBlob=None, dialect=SMB2_DIALECT_MIN ):
"""Decompose wire data and return an _SMB2_Header object.
Input:
cls - This class.
msgBlob - An array of at least 64 bytes, representing an SMB2+
message in wire format.
dialect - The minimum dialect under which to parse the header.
Output:
An <_SMB2_Header> object.
Errors:
AssertionError - Thrown if:
+ The length of <msgBlob> is less than the
minimum of 64 bytes.
+ The command code parsed from the message is
not a valid command code.
+ The given dialect is not known.
ValueError - Thrown if the packet cannot possibly contain a
valid SMB2+ message header. This exception is
raised if either the ProtocolId field doesn't
contain the correct string, or if the
StructureSize value is incorrect.
Notes:
- This function does not parse SMB3 Transform Headers. An SMB3
Transform header will be rejected with a ValueError.
- Beyond the basics of verifying that ProtocolId and StructureSize
are correct, this function does _no_ validation of the input.
"""
# Fundamental sanity check.
assert( SMB2_HDR_SIZE <= len( msgBlob ) ), "Incomplete message header."
# Parse it. Use the simple sync response format.
tup = cls._format_SMB2_StatTreeId.unpack( msgBlob[:SMB2_HDR_SIZE] )
# Look for trouble.
if( SMB2_MSG_PROTOCOL != tup[0] ):
raise ValueError( "Malformed SMB2 ProtocolId: [%s]." % repr( tup[0] ) )
elif( SMB2_HDR_SIZE != tup[1] ):
s = "The SMB2 Header StructureSize must be 64, not %d." % tup[1]
raise ValueError( s )
# Create and populate a header record instance.
hdr = cls( tup[4], dialect )
hdr._creditCharge = tup[2]
# 3: Status/ChannelSeq/Reserved1; see below
hdr.command = tup[4]
hdr._creditReqResp = tup[5]
hdr._flags = tup[6]
hdr._nextCommand = tup[7]
hdr._messageId = tup[8]
# 9, 10: Reserved2/TreeId/AsyncId; see below
hdr._sessionId = tup[11]
hdr._signature = tup[12]
# Handle the overloaded fields.
if( hdr.flagReply or (dialect < SMB2_DIALECT_300) ):
hdr._status = tup[3]
else:
hdr._channelSeq, hdr._reserved1 = cls._format_2H.unpack( msgBlob[8:12] )
if( hdr.flagAsync ):
hdr._asyncId = cls._format_Q.unpack( msgBlob[32:40] )
else:
hdr._reserved2 = tup[9]
hdr._treeId = tup[10]
# All done.
return( hdr )
@classmethod
def commandName( self, CmdId=0xFF ):
"""Given an SMB2 command code, return the name of the command.
Input:
CmdId - An SMB2/3 command code.
Output: A string.
If <CmdId> is a known SMB2/3 command code, the string
will be the command name. Otherwise, the empty string
is returned.
"""
if( CmdId in self._cmd_LookupDict ):
return( self._cmd_LookupDict[CmdId] )
return( '' )
def __init__( self, command=None, dialect=SMB2_DIALECT_MIN ):
# Create an SMB2 message header object.
#
# Input:
# command - The command code; one of the SMB2_COM_* values.
# dialect - The dialect version under which this header is being
# created. This is contextual information; in future
# revisions we may need to expand the context data to
# include things like negotiated flag settings, etc.
# Errors:
# AssertionError - Thrown if the given command code is not a
# known code, or if the given dialect is not
# in the list of supported dialects.
# [ TypeError, - Either of these may be thrown if an input value
# ValueError ] cannot be converted into the expected type.
#
# Notes:
# Several SMB2 Header fields are overloaded. For example, the
# <Status> field is a four byte field at offset 8.
# * In the 2.0 and 2.1 dialects, this field MUST be zero in
# Request messages.
# * In the 3.x dalects, in a request message only, the same
# bytes are used for a 2-byte <ChannelSequence> field,
# followed by a 2-byte Reserved-must-be-zero field.
# * In SMB2/3 Response messages, the field is always the 4-byte
# <Status> field.
#
# Similarly, in an Async header the 8 bytes at offset 32 are used
# for the <AsyncId>. In a Sync header, the first four bytes are
# Reserved-must-be-zero, and the next four bytes are the TreeID.
#
self._protocolId = SMB2_MSG_PROTOCOL # 4 bytes
self._headerSize = SMB2_HDR_SIZE # 2 bytes
self._creditCharge = 0 # 2 bytes
self._status = 0 # 4 bytes -- <status> --
self._channelSeq = 0 # 2 bytes \ Same bytes
self._reserved1 = 0 # 2 bytes / as <status>
self.command = command # 2 bytes
self._creditReqResp = 0 # 2 bytes
self._flags = 0 # 4 bytes
self._nextCommand = 0 # 4 bytes
self._messageId = 0 # 8 bytes
self._reserved2 = 0 # 4 bytes \ Same bytes
self._treeId = 0 # 4 bytes / as <asyncId>
self._asyncId = 0 # 8 bytes -- <asyncId> --
self._sessionId = 0 # 8 bytes
self._signature = (16 * '\0') # 16 bytes
# 64 bytes total.
# Context information:
#
assert( dialect in SMB2_DIALECT_LIST ), "Unknown Dialect: %0x04X" % dialect
self._dialect = int( dialect )
@property
def creditCharge( self ):
"""Get/set the SMB2_Header.CreditCharge field value (USHORT).
Errors:
AssertionError - Thrown if the assigned value (after conversion
to an <int>) is either negative or greater
than 0xFFFF.
- Thrown if the assigned value is non-zero and
the current dialect is SMBv2.0.2.
[ TypeError, - Either of these may be thrown if the assigned
ValueError ] value cannot be converted into an <int>.
Notes:
It is out of character to throw an exception based on the given
dialect level. This layer does minimal enforcement of
per-dialect syntax rules, generally allowing the caller to make
their own mess. You can, of course, still bypass the assertion
by setting <instance>._creditCharge directly.
"""
return( self._creditCharge )
@creditCharge.setter
def creditCharge( self, cc ):
cc = int( cc )
assert( 0 <= cc <= _USHORT_MAX ), "Assigned value (%d) out of range." % cc
assert( (cc == 0) or (self._dialect > SMB2_DIALECT_202) ), \
"Reserved; Value must be zero in SMBv2.0.2."
self._creditCharge = cc
@property
def status( self ):
"""Get/set the SMB2_Header.status field (ULONG).
Errors:
AssertionError - Thrown if the assigned value (after conversion
to a <long>) is either negative or greater
than 0xFFFFFFFF.
[ TypeError, - Either of these may be thrown if the assigned
ValueError ] value cannot be converted into a <long>.
Notes:
This field should only be set in response messages, and should
be considered "reserved; must be zero" in all requests.
Starting with SMBv3.0.0, this field is superceeded in request
messages by the 16-bit ChannelSequence field (plus an additional
16-bit Reserved field).
It is probably easiest to think of it this way:
- There is no <Status> field in request messages; it only exists
in response messages.
- If the dialect is less than 0x0300, then there is a 32-bit
"Reserved Must Be Zero" field where the <Status> field might
otherwise exist.
- If the dialect is 0x0300 or greater, then there is a 16-bit
<ChannelSequence> field followed by a 16-bit "Reserved Must Be
Zero" field where the <Status> might otherwise exist.
"""
return( self._status )
@status.setter
def status( self, st ):
st = 0L if( not st ) else long( st )
assert( 0 <= st <= _ULONG_MAX ), \
"Assigned value (0x%08X) out of range." % st
self._status = st
@property
def channelSeq( self ):
"""Get/set the Channel Sequence value (USHORT).
AssertionError - Thrown if the assigned value (after conversion
to an <int>) is either negative or greater
than 0xFFFF.
[ TypeError, - Either of these may be thrown if the assigned
ValueError ] value cannot be converted into an <int>.
Notes:
The ChannelSequence value is only recognized in request messages,
and only if the dialect is 0x0300 or greater. That is, this
field does not not exist in SMB2.x, only in SMB3.x. In all
responses, and in dialcts prior to 0x0300, the bytes of this
field are always seen as part of the Status field.
"""
return( self._channelSeq )
@channelSeq.setter
def channelSeq( self, cs ):
cs = int( cs )
assert( 0 <= cs <= _USHORT_MAX ), "Assigned value (%d) out of range." % cs
self._channelSeq = cs
@property
def command( self ):
"""Get/set the SMB2_Header.Command (UCHAR).
Errors: [ AssertionError, TypeError, ValueError ]
Thrown if the assigned value cannot be converted into a valid
SMB2 command code.
"""
return( self._command )
@command.setter
def command( self, cmd ):
cmd = int( cmd )
assert( 0 <= cmd <= 0x12 ), "Unknown command code: 0x%04X." % cmd
self._command = cmd
@property
def creditReqResp( self ):
"""Get/set the Credit Request / Credit Response value (USHORT).
Errors:
AssertionError - Thrown if the assigned value (after conversion
to an <int>) is either negative or greater
than 0xFFFF.
[ TypeError, - Either of these may be thrown if the assigned
ValueError ] value cannot be converted into an <int>.
ToDo: Document how and when this is used; references.
The credit management subsystem needs study.
"""
return( self._creditReqResp )
@creditReqResp.setter
def creditReqResp( self, crr ):
crr = int( crr )
assert( 0 <= crr <= _USHORT_MAX ), \
"Assigned value (%d) out of range." % crr
self._creditReqResp = crr
@property
def flags( self ):
"""Get/set the Flags field (ULONG).
Errors:
AssertionError - Thrown if the assigned value (after conversion
to a <long>) has bits that are set which do not
represent a known SMB2+ flag.
[ TypeError, - Either of these may be thrown if the assigned
ValueError ] value cannot be converted into a <long>.
"""
return( self._flags )
@flags.setter
def flags( self, flags ):
flgs = long( flags )
assert( flgs == (flgs & SMB2_FLAGS_MASK) ), "Unrecognized flag bit(s)."
self._flags = flgs
# Note: See below for per-flag get/set properties.
@property
def nextCommand( self ):
"""Get/set the Next Command offset value (ULONG).
Errors:
AssertionError - Thrown if the assigned value (after conversion
to a <long>) is either negative, or greater
than (2^32)-1.
[ TypeError, - Either of these may be thrown if the assigned
ValueError ] value cannot be converted into a <long>.
"""
return( self._nextCommand )
@nextCommand.setter
def nextCommand( self, nextOffset ):
nc = long( nextOffset )
assert( 0 <= nc <= _ULONG_MAX ), \
"Invalid Related Command Offset: %d." % nc
self._nextCommand = nc
@property
def messageId( self ):
"""Get/set the Message ID value (UINT64).
Errors:
AssertionError - Thrown if the assigned value (after conversion
to a <long>) is either negative, or greater
than (2^64)-1.
[ TypeError, - Either of these may be thrown if the assigned
ValueError ] value cannot be converted into a <long>.
"""
return( self._messageId )
@messageId.setter
def messageId( self, messageId ):
mi = long( messageId )
assert( 0 <= mi <= _UINT64_MAX ), \
"Assigned value (%d) out of range." % mi
self._messageId = mi
@property
def treeId( self ):
"""Get/set the Tree Connect ID (ULONG).
Errors:
AssertionError - Thrown if the assigned value (after conversion
to a <long>) is either negative or greater
than 0xFFFFFFFF.
[ TypeError, - Either of these may be thrown if the assigned
ValueError ] value cannot be converted into a <long>.
"""
return( self._treeId )
@treeId.setter
def treeId( self, treeId ):
tid = long( treeId )
assert( 0 <= tid <= _ULONG_MAX ), \
"Assigned value (%d) out of range." % tid
self._treeId = tid
@property
def asyncId( self ):
"""Get/set the Async Id (UINT64).
Errors:
AssertionError - Thrown if the assigned value (after conversion
to a <long>) is either negative or greater
than (2^64)-1.
[ TypeError, - Either of these may be thrown if the assigned
ValueError ] value cannot be converted into a <long>.
"""
return( self._asyncId )
@asyncId.setter
def asyncId( selfd, asyncId ):
ai = long( asyncId )
assert( 0 <= ai <= _UINT64_MAX ), \
"Assigned value (%d) out of range." % ai
self._asyncId = ai
@property
def sessionId( self ):
"""Get/set the Session Id (UINT64).<|fim▁hole|>
Errors:
AssertionError - Thrown if the assigned value (after conversion
to a <long>) is either negative or greater
than (2^64)-1.
[ TypeError, - Either of these may be thrown if the assigned
ValueError ] value cannot be converted into a <long>.
"""
return( self._sessionId )
@sessionId.setter
def sessionId( self, sessionId ):
si = long( sessionId )
assert( 0 <= si <= _UINT64_MAX ), \
"Assigned value (%d) out of range." % si
self._sessionId = si
@property
def signature( self ):
"""Get/set the packet signature.
Errors:
AssertionError - Thrown if the string representation of the
assigned value is not exactly 16 bytes.
SyntaxError - Thrown if the assigned value is not of type
<str> and cannot be converted to type <str>.
"""
return( self._signature )
@signature.setter
def signature( self, signature ):
sig = str( signature )
assert( 16 == len( sig ) ), "Exactly 16 bytes required."
self._signature = sig
# Flag bitfield properties.
# _flag[S|G]et() generically handles getting and setting of
# individual flag bits.
def _flagGet( self, flag ):
return( bool( flag & self._flags ) )
def _flagSet( self, flag, bitState ):
if( bitState ):
self._flags |= flag
else:
self._flags &= ~flag
@property
def flagReply( self ):
"""Get/set the SMB2_FLAGS_SERVER_TO_REDIR (Reply) bit.
The assigned value is evaluated as a boolean:
True = set the bit; False = clear it.
"""
return( self._flagGet( SMB2_FLAGS_SERVER_TO_REDIR ) )
@flagReply.setter
def flagReply( self, bitState ):
self._flagSet( SMB2_FLAGS_SERVER_TO_REDIR, bitState )
@property
def flagAsync( self ):
"""Get/set the SMB2_FLAGS_ASYNC_COMMAND (Async) bit.
The assigned value is evaluated as a boolean:
True = set the bit; False = clear it.
"""
return( self._flagGet( SMB2_FLAGS_ASYNC_COMMAND ) )
@flagAsync.setter
def flagAsync( self, bitState ):
self._flagSet( SMB2_FLAGS_ASYNC_COMMAND, bitState )
@property
def flagNext( self ):
"""Get/set the SMB2_FLAGS_RELATED_OPERATIONS (Next) bit.
The assigned value is evaluated as a boolean:
True = set the bit; False = clear it.
"""
return( self._flagGet( SMB2_FLAGS_RELATED_OPERATIONS ) )
@flagNext.setter
def flagNext( self, bitState ):
self._flagSet( SMB2_FLAGS_RELATED_OPERATIONS, bitState )
@property
def flagSigned( self ):
"""Get/set the SMB2_FLAGS_SIGNED (Signed) bit.
The assigned value is evaluated as a boolean:
True = set the bit; False = clear it.
"""
return( self._flagGet( SMB2_FLAGS_SIGNED ) )
@flagSigned.setter
def flagSigned( self, bitState ):
self._flagSet( SMB2_FLAGS_SIGNED, bitState )
@property
def flagDFS( self ):
"""Get/set the SMB2_FLAGS_DFS_OPERATIONS (DFS) bit.
The assigned value is evaluated as a boolean:
True = set the bit; False = clear it.
"""
return( self._flagGet( SMB2_FLAGS_DFS_OPERATIONS ) )
@flagDFS.setter
def flagDFS( self, bitState ):
self._flagSet( SMB2_FLAGS_DFS_OPERATIONS, bitState )
@property
def flagReplay( self ):
"""Get/set the SMB2_FLAGS_REPLAY_OPERATION (Replay) bit.
The assigned value is evaluated as a boolean:
True = set the bit; False = clear it.
"""
return( self._flagGet( SMB2_FLAGS_REPLAY_OPERATION ) )
@flagReplay.setter
def flagReplay( self, bitState ):
self._flagSet( SMB2_FLAGS_REPLAY_OPERATION, bitState )
@property
def flagPriority( self ):
"""Get/set the SMBv3.1.1+ Priority subfield.
This value is actually a 3-bit integer (in the range 0..7).
Errors:
ValueError - Thrown if the assigned value is outside of the
valid range.
"""
return( (self._flags & SMB2_FLAGS_PRIORITY_MASK) >> 4 )
@flagPriority.setter
def flagPriority( self, prioVal ):
if( prioVal not in range( 8 ) ):
raise ValueError( "Assigned value (%d) out of range." % prioVal )
self._flags &= ~SMB2_FLAGS_PRIORITY_MASK
self._flags |= (prioVal << 4)
def dump( self, indent=0 ):
# Produce a nicely formatted dump of the SMB2 header.
#
# Input:
# indent - Number of spaces to indent the formatted output.
#
# Output: A string, presentng the formatted SMB2 header fields.
#
# Notes: If the message is a request and the dialect is at least
# 0x0300, the ChannelSequence (and a Reserved field) will
# replace the Status field (which would otherwise go unused
# in a request). This is a protocol modification introduced
# with the 3.0 dialect.
#
ind = ' ' * indent
cmdName = self.commandName( self._command )
cmdName = "<unknown>" if( not cmdName ) else cmdName
statName = NTStatus( self._status )
statName = "\n" if( statName is None ) else " [%s]\n" % statName.name
# Stuff...
s = ind + "ProtocolId...: %s\n" % hexstr( self._protocolId[:4] )
s += ind + "StructureSize: 0x{0:04X} ({0:d})\n".format( self._headerSize )
s += ind + "CreditCharge.: 0x{0:04X} ({0:d})\n".format( self._creditCharge )
# Status/Reserved1
if( self.flagReply or self._dialect < SMB2_DIALECT_300 ):
s += ind + "Status.......: 0x{0:08X}".format( self._status ) + statName
else:
s += ind + "ChannelSeq...: 0x{0:04X} ({0:d})\n".format( self._channelSeq )
s += ind + "Reserved1....: 0x{0:04X} ({0:d})\n".format( self._reserved1 )
# More stuff...
s += ind + "Command......: 0x{0:02X} ({0:d})".format( self._command ) \
+ " [{0:s}]\n".format( self.commandName( self._command ) )
s += ind + "CreditReqResp: 0x{0:04X} ({0:d})\n".format( self.creditReqResp )
s += ind + "Flags........: 0x{0:08X} ({0:d})\n".format( self._flags )
# Flag subfields.
s += ind + " Response.....: %s\n" % self.flagReply
s += ind + " Async........: %s\n" % self.flagAsync
s += ind + " Related Op...: %s\n" % self.flagNext
s += ind + " Signed.......: %s\n" % self.flagSigned
if( self._dialect >= SMB2_DIALECT_311 ):
s += ind + " Priority.....: {0:d}\n".format( self.flagPriority )
s += ind + " DFS Operation: %s\n" % self.flagDFS
s += ind + " SMB3.x Replay: %s\n" % self.flagReplay
# Yet more stuff...
s += ind + "NextCommand..: 0x{0:08X} ({0:d})\n".format( self._nextCommand )
s += ind + "MessageId....: 0x{0:016X} ({0:d})\n".format( self._messageId )
# AsyncId/Reserved2+TreeId
if( self.flagAsync ):
s += ind + "AsyncId......: 0x{0:016X} ({0:d})\n".format( self._asyncId )
else:
s += ind + "Reserved2....: 0x{0:08X} ({0:d})\n".format( self._reserved2 )
s += ind + "TreeId.......: 0x{0:08X} ({0:d})\n".format( self._treeId )
# SessionId and Signature
s += ind + "SessionId....: 0x{0:016X} ({0:d})\n".format( self._sessionId )
s += ind + "Signature....: ["
tmp = (16 + indent)
s += ('\n' + (' ' * tmp)).join( hexstrchop( self._signature, 32 ) ) + "]\n"
return( s )
def compose( self ):
# Marshall the SMB2 header fields into a stream of bytes.
#
# Output: A string of bytes; the wire format of the SMB2 header.
#
# Notes: It's probably okay if the dialect version isn't
# specified. The default values of <channelSeq> and
# <reserved1> are zero, so the encoded format would be
# zero for either interpretation.
#
if( self.flagReply or (self._dialect < 0x0300) ):
# Bytes 8..11 are <status>
if( self.flagAsync ):
# Bytes 32..39 are <async>
msg = self._format_SMB2_StatAsync.pack( self._protocolId,
self._headerSize,
self._creditCharge,
self._status,
self._command,
self._creditReqResp,
self._flags,
self._nextCommand,
self._messageId,
self._asyncId,
self._sessionId,
self._signature )
else:
# Bytes 32..39 are <reserved2>/<treeId>
msg = self._format_SMB2_StatTreeId.pack( self._protocolId,
self._headerSize,
self._creditCharge,
self._status,
self._command,
self._creditReqResp,
self._flags,
self._nextCommand,
self._messageId,
self._reserved2,
self._treeId,
self._sessionId,
self._signature )
else:
# Bytes 8..11 are <channelSeq>/<reserved1>
if( self.flagAsync ):
# Bytes 32..39 are <async>
msg = self._format_SMB2_cSeqAsync.pack( self._protocolId,
self._headerSize,
self._creditCharge,
self._channelSeq,
self._reserved1,
self._command,
self._creditReqResp,
self._flags,
self._nextCommand,
self._messageId,
self._asyncId,
self._sessionId,
self._signature )
else:
# Bytes 32..39 are <reserved2>/<treeId>
msg = self._format_SMB2_cSeqTreeId.pack( self._protocolId,
self._headerSize,
self._creditCharge,
self._channelSeq,
self._reserved1,
self._command,
self._creditReqResp,
self._flags,
self._nextCommand,
self._messageId,
self._reserved2,
self._treeId,
self._sessionId,
self._signature )
return( msg )
# Unit Tests ----------------------------------------------------------------- #
#
def _unit_test():
# Module unit tests.
#
"""
Doctest:
>>> _unit_test()
Success
"""
if( __debug__ ):
# 1.Baseline test.
# Just verify that we can store and retrieve the basic attributes
# of an _SMB2_Header object.
#
hdr = _SMB2_Header( SMB2_COM_LOGOFF, SMB2_DIALECT_302 )
hdr.creditCharge = 213
hdr.channelSeq = 42607
hdr.creditReqResp = 42
hdr.flagReply = False
hdr.flagAsync = False
hdr.flagNext = False
hdr.flagSigned = False
hdr.flagPriority = 5
hdr.flagDFS = True
hdr.flagReplay = False
hdr.nextCommand = 0x87654321
hdr.messageId = _SMB2_Header._format_Q.unpack( "Fooberry" )[0]
hdr.treeId = 0xBEADED
hdr.sessionId = _SMB2_Header._format_Q.unpack( "Icecream" )[0]
hdr.signature = "Reginald".center( 16 )
# Create a header dump, compose a message, then parse the message.
dmp0 = hdr.dump()
msg = hdr.compose()
hdr = _SMB2_Header.parseMsg( msg, SMB2_DIALECT_302 )
# Dump the newly reparsed header, and compare against the original.
dmp1 = hdr.dump()
if( dmp0 != dmp1 ):
print "Failure: Reparsing a composed header resulted in differences."
print "As composed:\n", dmp0
print "As parsed:\n", dmp1
return
# 2.Add additional tests hereafter.
# Bottom line.
print "Success"
# ============================================================================ #
# Reginald fidgeted uneasily in his seat. "I realize", he said, pensively,
# "that I do have unusually large dorsal fins, for a carrot".
# ============================================================================ #<|fim▁end|> | |
<|file_name|>aws_lambda.py<|end_file_name|><|fim▁begin|>import hashlib
import json
import logging
import os
import subprocess
import sys
import time
from collections import defaultdict
from shutil import copy
from shutil import copyfile
from shutil import copystat
from shutil import copytree
from tempfile import mkdtemp
import boto3
import botocore
import yaml
import sys
from .helpers import archive
from .helpers import get_environment_variable_value
from .helpers import LambdaContext
from .helpers import mkdir
from .helpers import read
from .helpers import timestamp
ARN_PREFIXES = {
"cn-north-1": "aws-cn",
"cn-northwest-1": "aws-cn",
"us-gov-west-1": "aws-us-gov",
}
log = logging.getLogger(__name__)
def load_source(module_name, module_path):
"""Loads a python module from the path of the corresponding file."""
if sys.version_info[0] == 3 and sys.version_info[1] >= 5:
import importlib.util
spec = importlib.util.spec_from_file_location(module_name, module_path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
elif sys.version_info[0] == 3 and sys.version_info[1] < 5:
import importlib.machinery
loader = importlib.machinery.SourceFileLoader(module_name, module_path)
module = loader.load_module()
return module
def cleanup_old_versions(
src, keep_last_versions, config_file="config.yaml", profile_name=None,
):
"""Deletes old deployed versions of the function in AWS Lambda.
Won't delete $Latest and any aliased version
:param str src:
The path to your Lambda ready project (folder must contain a valid
config.yaml and handler module (e.g.: service.py).
:param int keep_last_versions:
The number of recent versions to keep and not delete
"""
if keep_last_versions <= 0:
print("Won't delete all versions. Please do this manually")
else:
path_to_config_file = os.path.join(src, config_file)
cfg = read_cfg(path_to_config_file, profile_name)
profile_name = cfg.get("profile")
aws_access_key_id = cfg.get("aws_access_key_id")
aws_secret_access_key = cfg.get("aws_secret_access_key")
client = get_client(
"lambda",
profile_name,
aws_access_key_id,
aws_secret_access_key,
cfg.get("region"),
)
response = client.list_versions_by_function(
FunctionName=cfg.get("function_name"),
)
versions = response.get("Versions")
if len(response.get("Versions")) < keep_last_versions:
print("Nothing to delete. (Too few versions published)")
else:
version_numbers = [
elem.get("Version") for elem in versions[1:-keep_last_versions]
]
for version_number in version_numbers:
try:
client.delete_function(
FunctionName=cfg.get("function_name"),
Qualifier=version_number,
)
except botocore.exceptions.ClientError as e:
print(f"Skipping Version {version_number}: {e}")
def deploy(
src,
requirements=None,
local_package=None,
config_file="config.yaml",
profile_name=None,
preserve_vpc=False,
):
"""Deploys a new function to AWS Lambda.
:param str src:
The path to your Lambda ready project (folder must contain a valid
config.yaml and handler module (e.g.: service.py).
:param str local_package:
The path to a local package with should be included in the deploy as
well (and/or is not available on PyPi)
"""
# Load and parse the config file.
path_to_config_file = os.path.join(src, config_file)
cfg = read_cfg(path_to_config_file, profile_name)
# Copy all the pip dependencies required to run your code into a temporary
# folder then add the handler file in the root of this directory.
# Zip the contents of this folder into a single file and output to the dist
# directory.
path_to_zip_file = build(
src,
config_file=config_file,
requirements=requirements,
local_package=local_package,
)
existing_config = get_function_config(cfg)
if existing_config:
update_function(
cfg, path_to_zip_file, existing_config, preserve_vpc=preserve_vpc
)
else:
create_function(cfg, path_to_zip_file)
def deploy_s3(
src,
requirements=None,
local_package=None,
config_file="config.yaml",
profile_name=None,
preserve_vpc=False,
):
"""Deploys a new function via AWS S3.
:param str src:
The path to your Lambda ready project (folder must contain a valid
config.yaml and handler module (e.g.: service.py).
:param str local_package:
The path to a local package with should be included in the deploy as
well (and/or is not available on PyPi)
"""
# Load and parse the config file.
path_to_config_file = os.path.join(src, config_file)
cfg = read_cfg(path_to_config_file, profile_name)
# Copy all the pip dependencies required to run your code into a temporary
# folder then add the handler file in the root of this directory.
# Zip the contents of this folder into a single file and output to the dist
# directory.
path_to_zip_file = build(
src,
config_file=config_file,
requirements=requirements,
local_package=local_package,
)
use_s3 = True
s3_file = upload_s3(cfg, path_to_zip_file, use_s3)
existing_config = get_function_config(cfg)
if existing_config:
update_function(
cfg,
path_to_zip_file,
existing_config,
use_s3=use_s3,
s3_file=s3_file,
preserve_vpc=preserve_vpc,
)
else:
create_function(cfg, path_to_zip_file, use_s3=use_s3, s3_file=s3_file)
def upload(
src,
requirements=None,
local_package=None,
config_file="config.yaml",
profile_name=None,
):
"""Uploads a new function to AWS S3.
:param str src:
The path to your Lambda ready project (folder must contain a valid
config.yaml and handler module (e.g.: service.py).
:param str local_package:
The path to a local package with should be included in the deploy as
well (and/or is not available on PyPi)
"""
# Load and parse the config file.
path_to_config_file = os.path.join(src, config_file)
cfg = read_cfg(path_to_config_file, profile_name)
# Copy all the pip dependencies required to run your code into a temporary
# folder then add the handler file in the root of this directory.
# Zip the contents of this folder into a single file and output to the dist
# directory.
path_to_zip_file = build(
src,
config_file=config_file,
requirements=requirements,
local_package=local_package,
)
upload_s3(cfg, path_to_zip_file)
def invoke(
src,
event_file="event.json",
config_file="config.yaml",
profile_name=None,
verbose=False,
):
"""Simulates a call to your function.
:param str src:
The path to your Lambda ready project (folder must contain a valid
config.yaml and handler module (e.g.: service.py).
:param str alt_event:
An optional argument to override which event file to use.
:param bool verbose:
Whether to print out verbose details.
"""
# Load and parse the config file.
path_to_config_file = os.path.join(src, config_file)
cfg = read_cfg(path_to_config_file, profile_name)
# Set AWS_PROFILE environment variable based on `--profile` option.
if profile_name:
os.environ["AWS_PROFILE"] = profile_name
# Load environment variables from the config file into the actual
# environment.
env_vars = cfg.get("environment_variables")
if env_vars:
for key, value in env_vars.items():
os.environ[key] = get_environment_variable_value(value)
# Load and parse event file.
path_to_event_file = os.path.join(src, event_file)
event = read(path_to_event_file, loader=json.loads)
# Tweak to allow module to import local modules
try:
sys.path.index(src)
except ValueError:
sys.path.append(src)
handler = cfg.get("handler")
# Inspect the handler string (<module>.<function name>) and translate it
# into a function we can execute.
fn = get_callable_handler_function(src, handler)
timeout = cfg.get("timeout")
if timeout:
context = LambdaContext(cfg.get("function_name"), timeout)
else:
context = LambdaContext(cfg.get("function_name"))
start = time.time()
results = fn(event, context)
end = time.time()
print("{0}".format(results))
if verbose:
print(
"\nexecution time: {:.8f}s\nfunction execution "
"timeout: {:2}s".format(end - start, cfg.get("timeout", 15))
)
def init(src, minimal=False):
"""Copies template files to a given directory.
:param str src:
The path to output the template lambda project files.
:param bool minimal:
Minimal possible template files (excludes event.json).
"""
templates_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "project_templates",
)
for filename in os.listdir(templates_path):
if (minimal and filename == "event.json") or filename.endswith(".pyc"):
continue
dest_path = os.path.join(templates_path, filename)
if not os.path.isdir(dest_path):
copy(dest_path, src)
def build(
src,
requirements=None,
local_package=None,
config_file="config.yaml",
profile_name=None,
):
"""Builds the file bundle.
:param str src:
The path to your Lambda ready project (folder must contain a valid
config.yaml and handler module (e.g.: service.py).
:param str local_package:
The path to a local package with should be included in the deploy as
well (and/or is not available on PyPi)
"""
# Load and parse the config file.
path_to_config_file = os.path.join(src, config_file)
cfg = read_cfg(path_to_config_file, profile_name)
# Get the absolute path to the output directory and create it if it doesn't
# already exist.
dist_directory = cfg.get("dist_directory", "dist")
path_to_dist = os.path.join(src, dist_directory)
mkdir(path_to_dist)
# Combine the name of the Lambda function with the current timestamp to use
# for the output filename.
function_name = cfg.get("function_name")
output_filename = "{0}-{1}.zip".format(timestamp(), function_name)
path_to_temp = mkdtemp(prefix="aws-lambda")
pip_install_to_target(
path_to_temp, requirements=requirements, local_package=local_package,
)
# Hack for Zope.
if "zope" in os.listdir(path_to_temp):
print(
"Zope packages detected; fixing Zope package paths to "
"make them importable.",
)
# Touch.
with open(os.path.join(path_to_temp, "zope/__init__.py"), "wb"):
pass
# Gracefully handle whether ".zip" was included in the filename or not.
output_filename = (
"{0}.zip".format(output_filename)
if not output_filename.endswith(".zip")
else output_filename
)
# Allow definition of source code directories we want to build into our
# zipped package.
build_config = defaultdict(**cfg.get("build", {}))
build_source_directories = build_config.get("source_directories", "")
build_source_directories = (
build_source_directories
if build_source_directories is not None
else ""
)
source_directories = [
d.strip() for d in build_source_directories.split(",")
]
files = []
for filename in os.listdir(src):
if os.path.isfile(filename):
if filename == ".DS_Store":
continue
if filename == config_file:
continue
print("Bundling: %r" % filename)
files.append(os.path.join(src, filename))
elif os.path.isdir(filename) and filename in source_directories:
print("Bundling directory: %r" % filename)
files.append(os.path.join(src, filename))
# "cd" into `temp_path` directory.
os.chdir(path_to_temp)
for f in files:
if os.path.isfile(f):
_, filename = os.path.split(f)
# Copy handler file into root of the packages folder.
copyfile(f, os.path.join(path_to_temp, filename))
copystat(f, os.path.join(path_to_temp, filename))
elif os.path.isdir(f):
src_path_length = len(src) + 1
destination_folder = os.path.join(
path_to_temp, f[src_path_length:]
)
copytree(f, destination_folder)
# Zip them together into a single file.
# TODO: Delete temp directory created once the archive has been compiled.
path_to_zip_file = archive("./", path_to_dist, output_filename)
return path_to_zip_file
def get_callable_handler_function(src, handler):
"""Translate a string of the form "module.function" into a callable
function.
:param str src:
The path to your Lambda project containing a valid handler file.
:param str handler:
A dot delimited string representing the `<module>.<function name>`.
"""
# "cd" into `src` directory.
os.chdir(src)
module_name, function_name = handler.split(".")
filename = get_handler_filename(handler)
path_to_module_file = os.path.join(src, filename)
module = load_source(module_name, path_to_module_file)
return getattr(module, function_name)
def get_handler_filename(handler):
"""Shortcut to get the filename from the handler string.
:param str handler:
A dot delimited string representing the `<module>.<function name>`.
"""
module_name, _ = handler.split(".")
return "{0}.py".format(module_name)
def _install_packages(path, packages):
"""Install all packages listed to the target directory.
Ignores any package that includes Python itself and python-lambda as well
since its only needed for deploying and not running the code
:param str path:
Path to copy installed pip packages to.
:param list packages:
A list of packages to be installed via pip.
"""
def _filter_blacklist(package):
blacklist = ["-i", "#", "Python==", "python-lambda=="]
return all(package.startswith(entry) is False for entry in blacklist)
filtered_packages = filter(_filter_blacklist, packages)
for package in filtered_packages:
if package.startswith("-e "):
package = package.replace("-e ", "")
print("Installing {package}".format(package=package))
subprocess.check_call(
[
sys.executable,
"-m",
"pip",
"install",
package,
"-t",
path,
"--ignore-installed",
]
)
print(
"Install directory contents are now: {directory}".format(
directory=os.listdir(path)
)
)
def pip_install_to_target(path, requirements=None, local_package=None):
"""For a given active virtualenv, gather all installed pip packages then
copy (re-install) them to the path provided.
:param str path:
Path to copy installed pip packages to.
:param str requirements:
If set, only the packages in the supplied requirements file are
installed.
If not set then installs all packages found via pip freeze.
:param str local_package:
The path to a local package with should be included in the deploy as
well (and/or is not available on PyPi)
"""
packages = []
if not requirements:
print("Gathering pip packages")
pkgStr = subprocess.check_output(
[sys.executable, "-m", "pip", "freeze"]
)
packages.extend(pkgStr.decode("utf-8").splitlines())
else:
if os.path.exists(requirements):
print("Gathering requirement packages")
data = read(requirements)
packages.extend(data.splitlines())
if not packages:
print("No dependency packages installed!")
if local_package is not None:
if not isinstance(local_package, (list, tuple)):
local_package = [local_package]
for l_package in local_package:
packages.append(l_package)
_install_packages(path, packages)
def get_role_name(region, account_id, role):
"""Shortcut to insert the `account_id` and `role` into the iam string."""
prefix = ARN_PREFIXES.get(region, "aws")
return "arn:{0}:iam::{1}:role/{2}".format(prefix, account_id, role)
def get_account_id(
profile_name, aws_access_key_id, aws_secret_access_key, region=None,
):
"""Query STS for a users' account_id"""
client = get_client(
"sts", profile_name, aws_access_key_id, aws_secret_access_key, region,
)
return client.get_caller_identity().get("Account")
def get_client(
client,
profile_name,
aws_access_key_id,
aws_secret_access_key,
region=None,
):
"""Shortcut for getting an initialized instance of the boto3 client."""
boto3.setup_default_session(
profile_name=profile_name,
aws_access_key_id=aws_access_key_id,<|fim▁hole|> return boto3.client(client)
def create_function(cfg, path_to_zip_file, use_s3=False, s3_file=None):
"""Register and upload a function to AWS Lambda."""
print("Creating your new Lambda function")
byte_stream = read(path_to_zip_file, binary_file=True)
profile_name = cfg.get("profile")
aws_access_key_id = cfg.get("aws_access_key_id")
aws_secret_access_key = cfg.get("aws_secret_access_key")
account_id = get_account_id(
profile_name,
aws_access_key_id,
aws_secret_access_key,
cfg.get("region",),
)
role = get_role_name(
cfg.get("region"),
account_id,
cfg.get("role", "lambda_basic_execution"),
)
client = get_client(
"lambda",
profile_name,
aws_access_key_id,
aws_secret_access_key,
cfg.get("region"),
)
# Do we prefer development variable over config?
buck_name = os.environ.get("S3_BUCKET_NAME") or cfg.get("bucket_name")
func_name = os.environ.get("LAMBDA_FUNCTION_NAME") or cfg.get(
"function_name"
)
print("Creating lambda function with name: {}".format(func_name))
if use_s3:
kwargs = {
"FunctionName": func_name,
"Runtime": cfg.get("runtime", "python2.7"),
"Role": role,
"Handler": cfg.get("handler"),
"Code": {
"S3Bucket": "{}".format(buck_name),
"S3Key": "{}".format(s3_file),
},
"Description": cfg.get("description", ""),
"Timeout": cfg.get("timeout", 15),
"MemorySize": cfg.get("memory_size", 512),
"VpcConfig": {
"SubnetIds": cfg.get("subnet_ids", []),
"SecurityGroupIds": cfg.get("security_group_ids", []),
},
"Publish": True,
}
else:
kwargs = {
"FunctionName": func_name,
"Runtime": cfg.get("runtime", "python2.7"),
"Role": role,
"Handler": cfg.get("handler"),
"Code": {"ZipFile": byte_stream},
"Description": cfg.get("description", ""),
"Timeout": cfg.get("timeout", 15),
"MemorySize": cfg.get("memory_size", 512),
"VpcConfig": {
"SubnetIds": cfg.get("subnet_ids", []),
"SecurityGroupIds": cfg.get("security_group_ids", []),
},
"Publish": True,
}
if "tags" in cfg:
kwargs.update(
Tags={key: str(value) for key, value in cfg.get("tags").items()}
)
if "environment_variables" in cfg:
kwargs.update(
Environment={
"Variables": {
key: get_environment_variable_value(value)
for key, value in cfg.get("environment_variables").items()
},
},
)
client.create_function(**kwargs)
concurrency = get_concurrency(cfg)
if concurrency > 0:
client.put_function_concurrency(
FunctionName=func_name, ReservedConcurrentExecutions=concurrency
)
def update_function(
cfg,
path_to_zip_file,
existing_cfg,
use_s3=False,
s3_file=None,
preserve_vpc=False,
):
"""Updates the code of an existing Lambda function"""
print("Updating your Lambda function")
byte_stream = read(path_to_zip_file, binary_file=True)
profile_name = cfg.get("profile")
aws_access_key_id = cfg.get("aws_access_key_id")
aws_secret_access_key = cfg.get("aws_secret_access_key")
account_id = get_account_id(
profile_name,
aws_access_key_id,
aws_secret_access_key,
cfg.get("region",),
)
role = get_role_name(
cfg.get("region"),
account_id,
cfg.get("role", "lambda_basic_execution"),
)
client = get_client(
"lambda",
profile_name,
aws_access_key_id,
aws_secret_access_key,
cfg.get("region"),
)
# Do we prefer development variable over config?
buck_name = os.environ.get("S3_BUCKET_NAME") or cfg.get("bucket_name")
if use_s3:
client.update_function_code(
FunctionName=cfg.get("function_name"),
S3Bucket="{}".format(buck_name),
S3Key="{}".format(s3_file),
Publish=True,
)
else:
client.update_function_code(
FunctionName=cfg.get("function_name"),
ZipFile=byte_stream,
Publish=True,
)
kwargs = {
"FunctionName": cfg.get("function_name"),
"Role": role,
"Runtime": cfg.get("runtime"),
"Handler": cfg.get("handler"),
"Description": cfg.get("description", ""),
"Timeout": cfg.get("timeout", 15),
"MemorySize": cfg.get("memory_size", 512),
}
if preserve_vpc:
kwargs["VpcConfig"] = existing_cfg.get("Configuration", {}).get(
"VpcConfig"
)
if kwargs["VpcConfig"] is None:
kwargs["VpcConfig"] = {
"SubnetIds": cfg.get("subnet_ids", []),
"SecurityGroupIds": cfg.get("security_group_ids", []),
}
else:
del kwargs["VpcConfig"]["VpcId"]
else:
kwargs["VpcConfig"] = {
"SubnetIds": cfg.get("subnet_ids", []),
"SecurityGroupIds": cfg.get("security_group_ids", []),
}
if "environment_variables" in cfg:
kwargs.update(
Environment={
"Variables": {
key: str(get_environment_variable_value(value))
for key, value in cfg.get("environment_variables").items()
},
},
)
ret = client.update_function_configuration(**kwargs)
concurrency = get_concurrency(cfg)
if concurrency > 0:
client.put_function_concurrency(
FunctionName=cfg.get("function_name"),
ReservedConcurrentExecutions=concurrency,
)
elif "Concurrency" in existing_cfg:
client.delete_function_concurrency(
FunctionName=cfg.get("function_name")
)
if "tags" in cfg:
tags = {key: str(value) for key, value in cfg.get("tags").items()}
if tags != existing_cfg.get("Tags"):
if existing_cfg.get("Tags"):
client.untag_resource(
Resource=ret["FunctionArn"],
TagKeys=list(existing_cfg["Tags"].keys()),
)
client.tag_resource(Resource=ret["FunctionArn"], Tags=tags)
def upload_s3(cfg, path_to_zip_file, *use_s3):
"""Upload a function to AWS S3."""
print("Uploading your new Lambda function")
profile_name = cfg.get("profile")
aws_access_key_id = cfg.get("aws_access_key_id")
aws_secret_access_key = cfg.get("aws_secret_access_key")
client = get_client(
"s3",
profile_name,
aws_access_key_id,
aws_secret_access_key,
cfg.get("region"),
)
byte_stream = b""
with open(path_to_zip_file, mode="rb") as fh:
byte_stream = fh.read()
s3_key_prefix = cfg.get("s3_key_prefix", "/dist")
checksum = hashlib.new("md5", byte_stream).hexdigest()
timestamp = str(time.time())
filename = "{prefix}{checksum}-{ts}.zip".format(
prefix=s3_key_prefix, checksum=checksum, ts=timestamp,
)
# Do we prefer development variable over config?
buck_name = os.environ.get("S3_BUCKET_NAME") or cfg.get("bucket_name")
func_name = os.environ.get("LAMBDA_FUNCTION_NAME") or cfg.get(
"function_name"
)
kwargs = {
"Bucket": "{}".format(buck_name),
"Key": "{}".format(filename),
"Body": byte_stream,
}
client.put_object(**kwargs)
print("Finished uploading {} to S3 bucket {}".format(func_name, buck_name))
if use_s3:
return filename
def get_function_config(cfg):
"""Check whether a function exists or not and return its config"""
function_name = cfg.get("function_name")
profile_name = cfg.get("profile")
aws_access_key_id = cfg.get("aws_access_key_id")
aws_secret_access_key = cfg.get("aws_secret_access_key")
client = get_client(
"lambda",
profile_name,
aws_access_key_id,
aws_secret_access_key,
cfg.get("region"),
)
try:
return client.get_function(FunctionName=function_name)
except client.exceptions.ResourceNotFoundException as e:
if "Function not found" in str(e):
return False
def get_concurrency(cfg):
"""Return the Reserved Concurrent Executions if present in the config"""
concurrency = int(cfg.get("concurrency", 0))
return max(0, concurrency)
def read_cfg(path_to_config_file, profile_name):
cfg = read(path_to_config_file, loader=yaml.full_load)
if profile_name is not None:
cfg["profile"] = profile_name
elif "AWS_PROFILE" in os.environ:
cfg["profile"] = os.environ["AWS_PROFILE"]
return cfg<|fim▁end|> | aws_secret_access_key=aws_secret_access_key,
region_name=region,
) |
<|file_name|>conf.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Glance Release Notes documentation build configuration file, created by
# sphinx-quickstart on Tue Nov 3 17:40:50 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.<|fim▁hole|># If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'oslosphinx',
'reno.sphinxext',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'cellar Release Notes'
copyright = u'2016, OpenStack Foundation'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
# The full version, including alpha/beta/rc tags.
release = ''
# The short X.Y version.
version = ''
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'GlanceReleaseNotesdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'GlanceReleaseNotes.tex', u'Glance Release Notes Documentation',
u'Glance Developers', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'glancereleasenotes', u'Glance Release Notes Documentation',
[u'Glance Developers'], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'GlanceReleaseNotes', u'Glance Release Notes Documentation',
u'Glance Developers', 'GlanceReleaseNotes',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False<|fim▁end|> | # sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
|
<|file_name|>train_utils.py<|end_file_name|><|fim▁begin|># Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Training utility functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six import string_types
import random
import re
import json
import numpy as np
import traceback
from cognitive import stim_generator as sg
import cognitive.constants as const
_R_MEAN = 123.68
_G_MEAN = 116.78
_B_MEAN = 103.94
def convert_to_grid(xy_coord, prefs):
"""Given a x-y coordinate, return the target activity for a grid of neurons.
Args:
xy_coord : numpy 2-D array (batch_size, 2)
prefs: numpy 2-D array (n_out_pnt, 2). x and y preferences.
Returns:
activity: numpy array (batch_size, GRID_SIZE**2)
"""
sigma2 = 0.02 # 2*sigma-squared
activity = np.exp(-((xy_coord[:, 0:1] - prefs[:, 0])**2 +
(xy_coord[:, 1:2] - prefs[:, 1])**2) / sigma2)
activity = (activity.T / np.sum(activity, axis=1)).T
return activity
def map_sentence2ints(sentence):
"""Map a sentence to a list of words."""
word_list = re.findall(r"[\w']+|[.,!?;]", sentence)
int_list = [const.INPUTVOCABULARY.index(word) for word in word_list]
return np.array(int_list).astype(np.int32)
def preprocess(in_imgs_, vis_type):
"""Pre-process images."""
if (vis_type == 'vgg') or (vis_type == 'vgg_pretrain'):
in_imgs_ -= np.array([_R_MEAN, _G_MEAN, _B_MEAN], dtype=np.float32)
else:
in_imgs_ /= 255.
in_imgs_ -= np.mean(in_imgs_)
return in_imgs_
def tasks_to_rules(tasks):
"""Generate in_rule and seq_length arrays.
Args:
tasks: a list of tg.Task instances or string rules, length is batch_size.
"""
batch_size = len(tasks)
in_rule = np.zeros((const.MAXSEQLENGTH, batch_size), dtype=np.int64)
seq_length = np.zeros((batch_size,), dtype=np.int64)
for i_task, task in enumerate(tasks):
word_list = re.findall(r"[\w']+|[.,!?;]", str(task))
seq_length[i_task] = len(word_list)
for i_word, word in enumerate(word_list):
in_rule[i_word, i_task] = const.INPUTVOCABULARY.index(word)
return in_rule, seq_length
def set_outputs_from_tasks(n_epoch, tasks, objsets,
out_pnt_xy, out_word,
mask_pnt, mask_word):
j = 0
for epoch_now in range(n_epoch):
for task, objset in zip(tasks, objsets):
target = task(objset, epoch_now)
if target is const.INVALID:
# For invalid target, no loss is used. Everything remains zero.
pass
elif isinstance(target, sg.Loc):
# minimize point loss
out_pnt_xy[j, :] = target.value
mask_pnt[j] = 1.
elif isinstance(target, bool) or isinstance(target, sg.Attribute):
if isinstance(target, bool):
target = 'true' if target else 'false'
else:
target = target.value
# For boolean target, only minimize word loss
out_word[j] = const.OUTPUTVOCABULARY.index(target)
mask_word[j] = 1.
else:
raise TypeError('Unknown target type.')
j += 1
def set_outputs_from_targets(n_epoch, objsets,
out_pnt_xy, out_word,
mask_pnt, mask_word):
j = 0
for epoch_now in range(n_epoch):
for objset in objsets:
target = objset.targets[epoch_now]
if target == 'invalid':
# For invalid target, no loss is used. Everything remains zero.
pass
elif isinstance(target, (list, tuple)):
assert len(target) == 2, "Expected 2-D target. Got " + str(target)
# minimize point loss
out_pnt_xy[j, :] = target
mask_pnt[j] = 1.
elif isinstance(target, string_types):
out_word[j] = const.OUTPUTVOCABULARY.index(target)
mask_word[j] = 1.
else:
raise TypeError('Unknown target type: %s %s' % (type(target), target))
j += 1
def generate_batch(tasks,
n_epoch=30,
img_size=224,
objsets=None,
n_distractor=1,
average_memory_span=2):
"""Generate a batch of trials.
Return numpy arrays to feed the tensorflow placeholders.
Args:
tasks: a list of tg.Task instances, length is batch_size.
n_epoch: int, number of epochs
img_size: int, image size
objsets: None or list of ObjectSet/StaticObjectSet instances
n_distractor: int, number of distractors to add
average_memory_span: int, the average number of epochs by which an object
need to be held in working memory, if needed at all
Returns:
All variables are numpy array of float32
in_imgs: (n_epoch*batch_size, img_size, img_size, 3)
in_rule: (max_seq_length, batch_size) the rule language input, type int32
seq_length: (batch_size,) the length of each task instruction
out_pnt: (n_epoch*batch_size, n_out_pnt)
out_pnt_xy: (n_epoch*batch_size, 2)
out_word: (n_epoch*batch_size, n_out_word)
mask_pnt: (n_epoch*batch_size)
mask_word: (n_epoch*batch_size)
Raises:
TypeError: when target type is incorrect.
"""
batch_size = len(tasks)
if objsets is None:
objsets = list()
for task in tasks:
objsets.append(
task.generate_objset(n_epoch,
n_distractor=n_distractor,
average_memory_span=average_memory_span))
max_objset_epoch = max([objset.n_epoch for objset in objsets])
assert max_objset_epoch == n_epoch, '%d != %d' % (max_objset_epoch, n_epoch)
in_imgs = sg.render(objsets, img_size)
# The rendered images are batch major
in_imgs = np.reshape(in_imgs, [batch_size, n_epoch, img_size, img_size, 3])
# Swap to time major
in_imgs = np.swapaxes(in_imgs, 0, 1)
# Outputs and masks
out_pnt_xy = np.zeros((n_epoch * batch_size, 2), dtype=np.float32)
out_word = np.zeros((n_epoch * batch_size), dtype=np.int64)
mask_pnt = np.zeros((n_epoch * batch_size), dtype=np.float32)
mask_word = np.zeros((n_epoch * batch_size), dtype=np.float32)
if isinstance(objsets[0], sg.StaticObjectSet):
set_outputs_from_targets(n_epoch, objsets,
out_pnt_xy, out_word,
mask_pnt, mask_word)
else:
set_outputs_from_tasks(n_epoch, tasks, objsets,
out_pnt_xy, out_word,
mask_pnt, mask_word)
# Process outputs
out_pnt = convert_to_grid(out_pnt_xy, const.PREFS)
# Generate rule inputs, padded to maximum number of words in a sentence
in_rule, seq_length = tasks_to_rules(tasks)
return (in_imgs, in_rule, seq_length, out_pnt, out_pnt_xy, out_word, mask_pnt,
mask_word)
def static_objsets_from_examples(examples):
"""Returns a list of StaticObjectSet objects.
Args:
examples: an iterable of dictionaries decoded from json examples.
"""
static_objsets = []
for e in examples:
static_objs = [o for multi_epoch_obj in e['objects']
for o in sg.static_objects_from_dict(multi_epoch_obj)]
static_objset = sg.StaticObjectSet(n_epoch=e['epochs'],
static_objects=static_objs,
targets=e['answers'])
static_objsets.append(static_objset)
return static_objsets<|fim▁hole|>
def json_to_feeds(json_examples):
if isinstance(json_examples, string_types):
json_examples = [json_examples]
examples = []
families = []
rules = []
for je in json_examples:
try:
e = json.loads(je)
except (ValueError, TypeError):
traceback.print_exc()
raise
rules.append(e['question'])
examples.append(e)
families.append(e['family'])
epochs = examples[0]['epochs']
static_objsets = static_objsets_from_examples(examples)
values = generate_batch(rules, n_epoch=epochs,
img_size=112, objsets=static_objsets,
# not used when objsets are given
n_distractor=0,
# not used when objsets are given
average_memory_span=0)
values = values + (families,)
return values
def generate_feeds(tasks, hparams, dataparams=None):
"""Generate feed dict for placeholders.
Args:
tasks: a list of tg.Task instances, length is batch_size.
hparams: hyperparameters in tf.HParams format.
dataparams: dictionary of parameters for the dataset
Returns:
feed_dict: the tensorflow feed_dict dictionary
"""
if isinstance(hparams.n_epoch, int):
n_epoch = hparams.n_epoch
else:
n_epoch = random.randrange(hparams.n_epoch[0], hparams.n_epoch[1] + 1)
# in_imgs, in_rule, seq_length, out_pnt, out_pnt_xy, out_word, mask_pnt,
# mask_word
return generate_batch(
tasks,
n_epoch=n_epoch,
img_size=112,
n_distractor=dataparams['n_distractor'],
average_memory_span=dataparams['average_memory_span']
)<|fim▁end|> | |
<|file_name|>neutron_port_update.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# This script updates the allowed address pairs in Neutron with the
# 'neutron port-update' command. This is required by Calico in OpenStack,
# otherwise BGP will not be working. We query OpenStack API directly to prevent
# installing any dependencies such as python-neutronclient.
#
# USAGE: script_name arg1 arg2...argN
# arg1 - Calico network, i.e. 192.168.0.0/24
# arg2...argN - VMs MAC addresses
#
# Script exit codes (for Ansible)
# 0 - port has been updated
# 1 - error
# 2 - no update to port [default]
import json
import os
import requests
import sys
def credentials():
"""Retrieves credentials"""
username = os.environ.get('OS_USERNAME')
password = os.environ.get('OS_PASSWORD')
tenant_name = os.environ.get('OS_TENANT_NAME')
auth_url = os.environ.get('OS_AUTH_URL')
if not all((username, password, tenant_name, auth_url)):
sys.stderr.write("ERROR: Unable to get Keystone credentials\n")
exit(1)
return {
'username': username,
'password': password,
'tenant_name': tenant_name,
'auth_url': auth_url
}
def get_catalog():
"""Get service catalog from Keystone with token and all endpoints"""
creds = credentials()
headers = {'Content-Type': 'application/json'}
payload = {
"auth":
{
"tenantName": creds['tenant_name'],
"passwordCredentials": {
"username": creds['username'],
"password": creds['password']
}
}
}
auth_url = creds['auth_url'] + "/tokens"
r = requests.post(auth_url, headers=headers, data=json.dumps(payload))
parsed_json = json.loads(r.text)
if not parsed_json or 'error' in parsed_json:
sys.stderr.write("ERROR: Unable to get authentication token\n")
exit(1)
return parsed_json
def get_token(catalog):
"""Get Keystone authentication token"""
return catalog['access']['token']['id']
def neutron_public_url(catalog):
"""Get Neutron publicURL"""
for i in catalog['access']['serviceCatalog']:
if i['type'] == 'network':
for endpoint in i['endpoints']:
return endpoint['publicURL']
def list_ports(token, public_url):
"""List Neutron ports"""
headers = {'X-Auth-Token': token}
auth_url = public_url + "v2.0/ports"
r = requests.get(auth_url, headers=headers)
if r.text:
parsed_json = json.loads(r.text)
return parsed_json['ports']
else:
sys.stderr.write("ERROR: Unable to retrieve Neutron ports list\n")
exit(1)
def update_port(token, public_url, port_id, mac_address, calico_network):
"""Update Neutron port with the allowed address pairs"""
headers = {'Content-Type': 'application/json', 'X-Auth-Token': token}
payload = {
"port": {
"allowed_address_pairs": [
{
"ip_address": calico_network,
"mac_address": mac_address
}
]
}
}
auth_url = public_url + "v2.0/ports/" + port_id<|fim▁hole|> if r.status_code != 200 or 'NeutronError' in parsed_json:
sys.stderr.write("ERROR: Unable to update port: %s\n" % parsed_json['NeutronError'])
exit(1)
else:
return r.status_code
if __name__ == "__main__":
if len(sys.argv) < 3:
sys.stderr.write("ERROR: Please run script with the correct arguments\n")
exit(1)
calico_network = sys.argv[1]
vms_mac_addresses = sys.argv[2:]
catalog = get_catalog()
token = get_token(catalog)
public_url = neutron_public_url(catalog)
ports = list_ports(token, public_url)
exit_code = 0 # no update to port
for port in ports:
port_id = port['id']
mac_address = port['mac_address']
if mac_address in vms_mac_addresses and not port['allowed_address_pairs']:
status_code = update_port(token, public_url, port_id, mac_address, calico_network)
if status_code == 200:
exit_code = 2 # port has been updated
exit(exit_code)<|fim▁end|> | r = requests.put(auth_url, headers=headers, data=json.dumps(payload))
parsed_json = json.loads(r.text) |
<|file_name|>type.js<|end_file_name|><|fim▁begin|>module.exports = {
name: 'basis.type',
test: [
require('./type/string.js'),
require('./type/number.js'),
require('./type/int.js'),
require('./type/enum.js'),
require('./type/array.js'),
require('./type/object.js'),
require('./type/date.js'),
{
name: 'definition of new types',
init: function(){
var basis = window.basis.createSandbox();
var type = basis.require('basis.type');
var catchWarnings = basis.require('./helpers/common.js').catchWarnings;
},
test: [
{
name: 'simple case',
test: function(){
var any = function(value){
return value;
};
type.defineType('Any', any);
assert(type.getTypeByName('Any') === any);
}
},
{
name: 'corner case',
test: function(){
var toStringType = type.getTypeByName('toString');
var warned = catchWarnings(function(){
toStringType({});
});
assert(warned);
assert(type.getTypeByNameIfDefined('toString') === undefined);
type.defineType('toString', basis.fn.$self);
var warnedAgain = catchWarnings(function(){
toStringType({});
});<|fim▁hole|> assert(type.getTypeByNameIfDefined('toString') === basis.fn.$self);
}
},
{
name: 'deferred type definition',
test: function(){
var DeferredType = type.getTypeByName('DeferredType');
var warnedBefore = catchWarnings(function(){
assert(DeferredType('234.55') === undefined);
});
assert(warnedBefore);
type.defineType('DeferredType', type.int);
var warnedAfter = catchWarnings(function(){
assert(DeferredType('234.55') === 234);
});
assert(warnedAfter === false);
}
},
{
name: 'deffered type definition with specifying type host',
test: function(){
var typeHost = {};
var HostedType = type.getTypeByName('HostedType', typeHost, 'someType');
var warnedBefore = catchWarnings(function(){
assert(HostedType('234.55') === undefined);
});
assert(warnedBefore);
type.defineType('HostedType', type.number);
var warnedAfter = catchWarnings(function(){
assert(HostedType('234.55') === 234.55);
});
assert(warnedAfter === false);
assert(typeHost.someType === type.number);
}
},
{
name: 'double define',
test: function(){
var DoubleTypeA = type.defineType('DoubleType', type.string);
var DoubleTypeB;
var warned = catchWarnings(function(){
var DoubleTypeB = type.defineType('DoubleType', type.date);
});
assert(warned);
assert(type.getTypeByName('DoubleType') === type.date);
}
},
{
name: 'type definition with non string value',
test: function(){
var warned = catchWarnings(function(){
var Three = type.defineType(3, type.object);
});
assert(warned);
}
},
{
name: 'validation',
test: function(){
var StringType = type.getTypeByName('StringType');
var NumberType = type.getTypeByName('NumberType');
type.defineType('StringType', type.string);
var warned = catchWarnings(function(){
type.validate();
});
assert(warned);
type.defineType('NumberType', type.number);
var warnedAgain = catchWarnings(function(){
type.validate();
});
assert(!warnedAgain);
}
}
]
}
]
};<|fim▁end|> |
assert(!warnedAgain); |
<|file_name|>url_scraper_dict.py<|end_file_name|><|fim▁begin|>UL_CATEGORY_LI = '//ul[@class="category"]/li'
H2_A_TITLELINK = './h2/a[@class="titlelink"]'
SPAN_A_TITLELINK = './span/a[@class="titlelink"]'
DIV_BODYFIELD_P = '//div[contains(@class,"bodyfield")]/p'
CATEGORY_H2_XPATH = [ UL_CATEGORY_LI, H2_A_TITLELINK ]
BODYFIELD_SPAN_XPATH = [ DIV_BODYFIELD_P, SPAN_A_TITLELINK ]
"""Mapping of relative URL (for EOPSS pages) to the xpath needed
to extract documents (1st xpath for section, 2nd xpath for document link)
"""
MASSGOV_DICT = {
'homeland-sec/grants/docs/':
[
UL_CATEGORY_LI,
'./h2/span/a[@class="titlelink"]'
],
'homeland-sec/grants/hs-grant-guidance-and-policies.html':
BODYFIELD_SPAN_XPATH,
'homeland-sec/grants/standard-documents.html':
[
'//div[contains(@class,"bodyfield")]/ul/li',
SPAN_A_TITLELINK
],
'law-enforce/grants/': CATEGORY_H2_XPATH,
'law-enforce/grants/2017-muni-public-safety-staffing-grant.html':
BODYFIELD_SPAN_XPATH,
'law-enforce/grants/le-grants-public-records.html':
BODYFIELD_SPAN_XPATH,
'justice-and-prev/grants/': CATEGORY_H2_XPATH,
'justice-and-prev/grants/bgp/': CATEGORY_H2_XPATH,
'hwy-safety/grants/': CATEGORY_H2_XPATH,
'hwy-safety/grants/ffy-2017-traffic-enforcement-grant-program.html':
BODYFIELD_SPAN_XPATH,
'hwy-safety/grants/ffy2017-hsd-grant-opportunities.html':
BODYFIELD_SPAN_XPATH,
'hwy-safety/grants/ffy-2017-step.html': BODYFIELD_SPAN_XPATH,
'hwy-safety/grants/highway-safety-grants-public-records.html':
BODYFIELD_SPAN_XPATH<|fim▁hole|> }<|fim▁end|> | |
<|file_name|>preferences.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
"""
Preferences Frame
Copyright (c) 2014, 2015 Andrew Hawkins
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
"""
Import Declarations
"""
import os
import pickle
import wx
from forsteri.interface import data as idata
"""
Constant Declarations
"""
"""
Frame Class
"""
class PreferencesFrame(wx.Frame):
"""
"""
def __init__(self, *args, **kwargs):
"""
"""
## Panel
# Initialize the parents constructor.
super(PreferencesFrame, self).__init__(*args, **kwargs)
# Create the master panel.
masterPanel = wx.Panel(self)
# Create the master sizer.
masterSizer = wx.BoxSizer(wx.VERTICAL)
## Reporting
# Create the reporting static box.
reportSB = wx.StaticBox(masterPanel, label="Reporting")
# Create the reporting sizer.
reportSizer = wx.StaticBoxSizer(reportSB, wx.VERTICAL)
# Create the first rows sizer.
row1Sizer = wx.BoxSizer(wx.HORIZONTAL)
# Create the label for the first row.
row1Label = wx.StaticText(masterPanel, label="Forecast Type")
# Create the list of choices for the first row.
choice1 = ["Auto", "MLR", "EMA", "Naive"]
# Create the object for the first row.
self.row1Obj = wx.ComboBox(masterPanel, size=(150, -1),
choices=choice1, style=wx.CB_READONLY)
# Add the contents to the row 1 sizer.
row1Sizer.Add(row1Label, flag=wx.ALIGN_CENTER|wx.RIGHT, border=5)
row1Sizer.Add(self.row1Obj, flag=wx.ALIGN_CENTER)
# Add all rows to the report sizer.
reportSizer.Add(row1Sizer, flag=wx.ALL, border=5)
#
## Finish Buttons
# Create the finish sizer.
finishSizer = wx.BoxSizer(wx.HORIZONTAL)
# Create the buttons.
okButton = wx.Button(masterPanel, id=wx.ID_OK)
cancelButton = wx.Button(masterPanel, id=wx.ID_CANCEL)
# Set the OK button to be the dafault button.
okButton.SetDefault()
# Add the buttons to the finish sizer.
finishSizer.AddMany([okButton, (5, 0), cancelButton, (5, 0)])
# Bind button presses to functions.
okButton.Bind(wx.EVT_BUTTON, self.onOK)
cancelButton.Bind(wx.EVT_BUTTON, self.onCancel)
## Panel Operations
# Add everything to the master sizer.
masterSizer.Add(reportSizer, flag=wx.ALL, border=5)
masterSizer.AddSpacer(9)
masterSizer.Add(wx.StaticLine(masterPanel, size=(585, 2)),
flag=wx.ALIGN_CENTER)
masterSizer.AddSpacer(9)
masterSizer.Add(finishSizer,<|fim▁hole|>
# Load the prefernces.
self.loadPref()
# Set the sizer for the master panel.
masterPanel.SetSizer(masterSizer)
# Bind closing the frame to a function.
self.Bind(wx.EVT_CLOSE, self.onClose)
# Set window properties.
self.SetSize((600, 400))
self.SetTitle("Preferences")
self.Centre()
self.Show(True)
"""
Helper Functions
"""
def loadPref(self):
"""
"""
# Load the preferences from the pickle file.
pref = pickle.load(open(os.path.join(idata.DATA, "Forsteri",
"pref.p"), "rb"))
# Set all of the prefernce objects.
self.row1Obj.SetValue(pref["report_type"])
return True
def savePref(self):
"""
"""
# Initialize the preferences dictionary.
pref = {}
# Get all of the preference objects data.
pref["report_type"] = self.row1Obj.GetValue()
# Save the preferences into the pickle file.
pickle.dump(pref, open(os.path.join(idata.DATA, "Forsteri",
"pref.p"), "wb"))
return True
"""
Event Handlers
"""
def onOK(self, event):
"""
"""
# Save the preferences.
self.savePref()
self.Close()
def onCancel(self, event):
"""
"""
self.Close()
def onClose(self, event):
"""
"""
self.Destroy()
def main():
"""
When the file is called independently create and display the manager frame.
"""
app = wx.App()
PreferencesFrame(None, style=wx.DEFAULT_FRAME_STYLE)#^wx.RESIZE_BORDER)
app.MainLoop()
if __name__ == '__main__':
main()<|fim▁end|> | flag=wx.BOTTOM|wx.ALIGN_RIGHT, border=5) |
<|file_name|>pearson-3d-main.cpp<|end_file_name|><|fim▁begin|>#include <algorithm>
#include <map>
#include <math.h>
#include <mpi.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <time.h>
#ifndef HUGE_JOB_MODE
#include <fj_tool/fapp.h>
#include <fjcoll.h>
#endif
using namespace std;
#include "pearson-3d.h"
bool EXTEND_MISSION=false;
int T_MAX;
int T_MONITOR;
int mpi_my_rank;
const double PI=3.14159265358979323846;
float frand() {
return rand() / float(RAND_MAX);
}
double wctime() {
struct timeval tv;
gettimeofday(&tv,NULL);
return (double)tv.tv_sec + (double)tv.tv_usec*1e-6;
}
/*
void init() {
for(int ix = navi.lower_x; ix < navi.upper_x; ++ix) {
for(int iy = navi.lower_y; iy < navi.upper_y; ++iy) {
for(int iz = navi.lower_z; iz < navi.upper_z; ++iz) {
double x = double(navi.offset_x + ix)/NX;
double y = double(navi.offset_y + iy)/NY;
double z = double(navi.offset_z + iz)/NZ;
U[ix][iy][iz] = 1.0;
V[ix][iy][iz] = 0.0;
if (z > 0.49 && z < 0.51 && x > 0.4 && x < 0.6) {
U[ix][iy][iz] = 0.5;
V[ix][iy][iz] = 0.25;
}
}
}
}
}*/
double gaussian(double x, double y,double z) {
double d = sqrt(x*x+y*y+z*z);
return 1.0/(1.0+exp( (d-3.0)*3.0 ));
}
typedef pair<int,pair<int,int> > Key;
void init() {
if (NZ<500){
const int NI=4,NJ=7;
double oxs[NI*NJ], pat_x[NI] = {230,80, 40,170};
double oys[NI*NJ], pat_y[NI] = {131,131,131,131};
double ozs[NI*NJ], pat_z[NI] = { 50,80,120,190};
for (int i=0;i<NI;++i) {
for (int j=0;j<NJ;++j) {
oxs[j*4+i] = pat_x[i] + 2.0 * j*(2*frand()-1);
oys[j*4+i] = pat_y[i] + 2.0 * j*(2*frand()-1);
ozs[j*4+i] = pat_z[i] + 2.0 * j*(2*frand()-1);
}
}
for(int ix = navi.lower_x; ix < navi.upper_x; ++ix) {
for(int iy = navi.lower_y; iy < navi.upper_y; ++iy) {
for(int iz = navi.lower_z; iz < navi.upper_z; ++iz) {
U[ix][iy][iz] = 1.0;<|fim▁hole|> g += gaussian(iz-oz, ix-ox ,iy-oy);
}
if (g>=1.0) g=1.0;
U[ix][iy][iz] -= 0.5 *g;
V[ix][iy][iz] += 0.25 *g;
}
}
}
}else{
map<Key ,double> seeds;
for(int ix = navi.lower_x; ix < navi.upper_x; ++ix) {
for(int iy = navi.lower_y; iy < navi.upper_y; ++iy) {
for(int iz = navi.lower_z; iz < navi.upper_z; ++iz) {
Key k (ix/16, pair<int,int>(iy/16, iz/16));
U[ix][iy][iz] = 1.0;
V[ix][iy][iz] = 0.0;
double s = seeds[k];
if (s==0) {
s = frand();
seeds[k]=s;
}
if (s < 0.1 ) {
U[ix][iy][iz] = 0.5;
V[ix][iy][iz] = 0.25;
}
}
}
}
}
}
void write_monitor() {
int global_position[6];
global_position[0] = navi.offset_x + navi.lower_x;
global_position[1] = navi.offset_y + navi.lower_y;
global_position[2] = navi.offset_z + navi.lower_z;
global_position[3] = navi.upper_x - navi.lower_x;
global_position[4] = navi.upper_y - navi.lower_y;
global_position[5] = navi.upper_z - navi.lower_z;
int x_size = navi.upper_x - navi.lower_x;
int y_size = navi.upper_y - navi.lower_y;
int z_size = navi.upper_z - navi.lower_z;
if (navi.offset_x + navi.lower_x == 0) {
char fn[256];
sprintf(fn, "out/monitorX-%06d-%d.txt", navi.time_step, mpi_my_rank);
FILE *fp = fopen(fn,"wb");
fwrite(global_position, sizeof(int), 6, fp);
{
const int x=navi.lower_x + x_size/2;
for(int y = navi.lower_y; y < navi.upper_y; ++y) fwrite(U[x][y]+navi.lower_z, sizeof(double),z_size, fp);
for(int y = navi.lower_y; y < navi.upper_y; ++y) fwrite(V[x][y]+navi.lower_z, sizeof(double),z_size, fp);
}
fclose(fp);
}
if (navi.offset_y + navi.lower_y == 0) {
char fn[256];
sprintf(fn, "out/monitorY-%06d-%d.txt", navi.time_step, mpi_my_rank);
FILE *fp = fopen(fn,"wb");
fwrite(global_position, sizeof(int), 6, fp);
{
const int y=navi.lower_y + y_size/2;
for(int x = navi.lower_x; x < navi.upper_x; ++x) fwrite(U[x][y]+navi.lower_z, sizeof(double),z_size, fp);
for(int x = navi.lower_x; x < navi.upper_x; ++x) fwrite(V[x][y]+navi.lower_z, sizeof(double),z_size, fp);
}
fclose(fp);
}
}
int main (int argc, char **argv) {
MPI_Init(&argc, &argv);
Formura_Init(&navi, MPI_COMM_WORLD);
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_my_rank);
srand(time(NULL)+mpi_my_rank*65537);
if (argc <= 1) {
T_MAX=8192;
}else{
sscanf(argv[1], "%d", &T_MAX);
}
if (argc <= 2) {
T_MONITOR=8192;
}else{
sscanf(argv[2], "%d", &T_MONITOR);
}
if (argc >= 4) {
EXTEND_MISSION = true;
}
init();
double t_begin = wctime(), t_end;
int last_monitor_t = -T_MONITOR;
char benchmark_name[256];
sprintf(benchmark_name,"main");
for(;;){
double t = wctime();
bool monitor_flag = navi.time_step >= last_monitor_t + T_MONITOR;
// if(monitor_flag || navi.time_step <= 3 * T_MONITOR ) {
// if(mpi_my_rank==0){
// //printf("marked %d step @ %lf sec\n", navi.time_step, t-t_begin);
// }
// }
if(monitor_flag) {
write_monitor();
if(mpi_my_rank==0){
printf("monitor %d step @ %lf sec\n", navi.time_step, t-t_begin);
}
}
if(monitor_flag) {
last_monitor_t += T_MONITOR;
}
if (navi.time_step >= T_MAX) {
if (EXTEND_MISSION){
T_MAX*=2;
T_MONITOR*=2;
sprintf(benchmark_name,"extend-%d",T_MAX);
#ifndef HUGE_JOB_MODE
start_collection(benchmark_name);
fapp_start(benchmark_name, 0,0);
#endif
}else{
break;
}
}
if (navi.time_step == 0) {
t_begin = wctime();
#ifndef HUGE_JOB_MODE
start_collection(benchmark_name);
fapp_start(benchmark_name, 0,0);
#endif
}
Formura_Forward(&navi); // navi.time_step increases
MPI_Barrier(MPI_COMM_WORLD); // TODO: test the effect of synchronization
if (navi.time_step >= T_MAX) {
t_end = wctime();
#ifndef HUGE_JOB_MODE
stop_collection(benchmark_name);
fapp_stop(benchmark_name, 0,0);
#endif
}
}
//printf("total wct = %lf sec\n",t_end - t_begin);
MPI_Barrier(MPI_COMM_WORLD);
MPI_Finalize();
}<|fim▁end|> | V[ix][iy][iz] = 0.0;
double g=0;
for (int i=0;i<NI*NJ;++i) {
double oz=ozs[i], oy=oys[i],ox=oxs[i]; |
<|file_name|>main.cpp<|end_file_name|><|fim▁begin|>// (C) Copyright 2009, Chong Wang, David Blei and Li Fei-Fei
// written by Chong Wang, [email protected]
// This file is part of slda.
// slda is free software; you can redistribute it and/or modify it under
// the terms of the GNU General Public License as published by the Free
// Software Foundation; either version 2 of the License, or (at your
// option) any later version.
// slda is distributed in the hope that it will be useful, but WITHOUT
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
// for more details.
// You should have received a copy of the GNU General Public License
// along with this program; if not, write to the Free Software
// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
// USA
#include <stdio.h>
#include <string.h>
#include "corpus.h"
#include "utils.h"
#include "maslda.h"
void help( void )
{
printf("usage: maslda [est] [data] [answers] [settings] [alpha] [tau] [k] [random/seeded/model_path] [seed] [directory]\n");
printf(" maslda [estinf] [data] [answers] [settings] [alpha] [tau] [k] [random/seeded/model_path] [seed] [directory]\n");
printf(" maslda [inf] [data] [label] [settings] [model] [directory]\n");
}
int main(int argc, char* argv[])
{
if (argc < 2)
{
help();
return 0;
}
if (strcmp(argv[1], "est") == 0)
{
corpus c;
char * data_filename = argv[2];
char * answers_filename = argv[3];
<|fim▁hole|>
if(!setting.read_settings(setting_filename))
return 0;
int labels_file = c.read_data(data_filename, answers_filename, 1, setting.LABELS_TRN_FILE);
double alpha = atof(argv[5]);
printf("alpha %lf\n", alpha);
double tau = atof(argv[6]);
printf("tau %lf\n", tau);
int num_topics = atoi(argv[7]);
printf("number of topics is %d\n", num_topics);
char * init_method = argv[8];
double init_seed = atof(argv[9]);
char * directory = argv[10];
printf("models will be saved in %s\n", directory);
make_directory(directory);
slda model;
model.init(alpha, tau, num_topics, &c, &setting, init_seed, labels_file);
model.v_em(&c, init_method, directory, &setting);
}
if (strcmp(argv[1], "estinf") == 0)
{
corpus c;
char * data_filename = argv[2];
char * answers_filename = argv[3];
settings setting;
char * setting_filename = argv[4];
printf("setting_filename %s\n", setting_filename);
if(!setting.read_settings(setting_filename))
return 0;
int labels_file = c.read_data(data_filename, answers_filename, 2, setting.LABELS_TRN_FILE);
double alpha = atof(argv[5]);
printf("alpha %lf\n", alpha);
double tau = atof(argv[6]);
printf("tau %lf\n", tau);
int num_topics = atoi(argv[7]);
printf("number of topics is %d\n", num_topics);
char * init_method = argv[8];
double init_seed = atof(argv[9]);
char * directory = argv[10];
printf("models will be saved in %s\n", directory);
make_directory(directory);
slda model;
model.init(alpha, tau, num_topics, &c, &setting, init_seed, labels_file);
model.v_em(&c, init_method, directory, &setting);
model.infer_only(&c, directory);
}
if (strcmp(argv[1], "inf") == 0)
{
corpus c;
char * data_filename = argv[2];
char * answers_filename = argv[3];
c.read_data(data_filename, answers_filename, 0, NULL);
settings setting;
char * setting_filename = argv[4];
setting.read_settings(setting_filename);
char * model_filename = argv[5];
char * directory = argv[6];
printf("\nresults will be saved in %s\n", directory);
make_directory(directory);
slda model;
model.load_model(model_filename, &setting);
model.infer_only(&c, directory);
}
return 0;
}<|fim▁end|> | settings setting;
char * setting_filename = argv[4];
printf("setting_filename %s\n", setting_filename);
|
<|file_name|>condvar.rs<|end_file_name|><|fim▁begin|>// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use prelude::v1::*;
use sync::atomic::{AtomicUsize, Ordering};
use sync::{mutex, MutexGuard, PoisonError};
use sys_common::condvar as sys;
use sys_common::mutex as sys_mutex;
use sys_common::poison::{self, LockResult};
use sys::time::SteadyTime;
use time::Duration;
/// A Condition Variable
///
/// Condition variables represent the ability to block a thread such that it
/// consumes no CPU time while waiting for an event to occur. Condition
/// variables are typically associated with a boolean predicate (a condition)
/// and a mutex. The predicate is always verified inside of the mutex before
/// determining that thread must block.
///
/// Functions in this module will block the current **thread** of execution and
/// are bindings to system-provided condition variables where possible. Note
/// that this module places one additional restriction over the system condition
/// variables: each condvar can be used with precisely one mutex at runtime. Any
/// attempt to use multiple mutexes on the same condition variable will result
/// in a runtime panic. If this is not desired, then the unsafe primitives in
/// `sys` do not have this restriction but may result in undefined behavior.
///
/// # Examples
///
/// ```
/// use std::sync::{Arc, Mutex, Condvar};
/// use std::thread;
///
/// let pair = Arc::new((Mutex::new(false), Condvar::new()));
/// let pair2 = pair.clone();
///
/// // Inside of our lock, spawn a new thread, and then wait for it to start
/// thread::spawn(move|| {
/// let &(ref lock, ref cvar) = &*pair2;
/// let mut started = lock.lock().unwrap();
/// *started = true;
/// cvar.notify_one();
/// });
///
/// // wait for the thread to start up
/// let &(ref lock, ref cvar) = &*pair;
/// let mut started = lock.lock().unwrap();
/// while !*started {
/// started = cvar.wait(started).unwrap();
/// }
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Condvar { inner: Box<StaticCondvar> }
/// Statically allocated condition variables.
///
/// This structure is identical to `Condvar` except that it is suitable for use
/// in static initializers for other structures.
///
/// # Examples
///
/// ```
/// #![feature(static_condvar)]
///
/// use std::sync::{StaticCondvar, CONDVAR_INIT};
///
/// static CVAR: StaticCondvar = CONDVAR_INIT;
/// ```
#[unstable(feature = "static_condvar",
reason = "may be merged with Condvar in the future")]
pub struct StaticCondvar {
inner: sys::Condvar,
mutex: AtomicUsize,
}
/// Constant initializer for a statically allocated condition variable.
#[unstable(feature = "static_condvar",
reason = "may be merged with Condvar in the future")]
pub const CONDVAR_INIT: StaticCondvar = StaticCondvar::new();
impl Condvar {
/// Creates a new condition variable which is ready to be waited on and
/// notified.
#[stable(feature = "rust1", since = "1.0.0")]
pub fn new() -> Condvar {
Condvar {
inner: box StaticCondvar {
inner: sys::Condvar::new(),
mutex: AtomicUsize::new(0),
}
}<|fim▁hole|> /// Blocks the current thread until this condition variable receives a
/// notification.
///
/// This function will atomically unlock the mutex specified (represented by
/// `mutex_guard`) and block the current thread. This means that any calls
/// to `notify_*()` which happen logically after the mutex is unlocked are
/// candidates to wake this thread up. When this function call returns, the
/// lock specified will have been re-acquired.
///
/// Note that this function is susceptible to spurious wakeups. Condition
/// variables normally have a boolean predicate associated with them, and
/// the predicate must always be checked each time this function returns to
/// protect against spurious wakeups.
///
/// # Failure
///
/// This function will return an error if the mutex being waited on is
/// poisoned when this thread re-acquires the lock. For more information,
/// see information about poisoning on the Mutex type.
///
/// # Panics
///
/// This function will `panic!()` if it is used with more than one mutex
/// over time. Each condition variable is dynamically bound to exactly one
/// mutex to ensure defined behavior across platforms. If this functionality
/// is not desired, then unsafe primitives in `sys` are provided.
#[stable(feature = "rust1", since = "1.0.0")]
pub fn wait<'a, T>(&self, guard: MutexGuard<'a, T>)
-> LockResult<MutexGuard<'a, T>> {
unsafe {
let me: &'static Condvar = &*(self as *const _);
me.inner.wait(guard)
}
}
/// Waits on this condition variable for a notification, timing out after a
/// specified duration.
///
/// The semantics of this function are equivalent to `wait()`
/// except that the thread will be blocked for roughly no longer
/// than `ms` milliseconds. This method should not be used for
/// precise timing due to anomalies such as preemption or platform
/// differences that may not cause the maximum amount of time
/// waited to be precisely `ms`.
///
/// The returned boolean is `false` only if the timeout is known
/// to have elapsed.
///
/// Like `wait`, the lock specified will be re-acquired when this function
/// returns, regardless of whether the timeout elapsed or not.
#[stable(feature = "rust1", since = "1.0.0")]
pub fn wait_timeout_ms<'a, T>(&self, guard: MutexGuard<'a, T>, ms: u32)
-> LockResult<(MutexGuard<'a, T>, bool)> {
unsafe {
let me: &'static Condvar = &*(self as *const _);
me.inner.wait_timeout_ms(guard, ms)
}
}
/// Waits on this condition variable for a notification, timing out after a
/// specified duration.
///
/// The semantics of this function are equivalent to `wait()` except that
/// the thread will be blocked for roughly no longer than `dur`. This
/// method should not be used for precise timing due to anomalies such as
/// preemption or platform differences that may not cause the maximum
/// amount of time waited to be precisely `dur`.
///
/// The returned boolean is `false` only if the timeout is known
/// to have elapsed.
///
/// Like `wait`, the lock specified will be re-acquired when this function
/// returns, regardless of whether the timeout elapsed or not.
#[unstable(feature = "wait_timeout", reason = "waiting for Duration")]
pub fn wait_timeout<'a, T>(&self, guard: MutexGuard<'a, T>,
dur: Duration)
-> LockResult<(MutexGuard<'a, T>, bool)> {
unsafe {
let me: &'static Condvar = &*(self as *const _);
me.inner.wait_timeout(guard, dur)
}
}
/// Waits on this condition variable for a notification, timing out after a
/// specified duration.
///
/// The semantics of this function are equivalent to `wait_timeout` except
/// that the implementation will repeatedly wait while the duration has not
/// passed and the provided function returns `false`.
#[unstable(feature = "wait_timeout_with",
reason = "unsure if this API is broadly needed or what form it should take")]
pub fn wait_timeout_with<'a, T, F>(&self,
guard: MutexGuard<'a, T>,
dur: Duration,
f: F)
-> LockResult<(MutexGuard<'a, T>, bool)>
where F: FnMut(LockResult<&mut T>) -> bool {
unsafe {
let me: &'static Condvar = &*(self as *const _);
me.inner.wait_timeout_with(guard, dur, f)
}
}
/// Wakes up one blocked thread on this condvar.
///
/// If there is a blocked thread on this condition variable, then it will
/// be woken up from its call to `wait` or `wait_timeout`. Calls to
/// `notify_one` are not buffered in any way.
///
/// To wake up all threads, see `notify_all()`.
#[stable(feature = "rust1", since = "1.0.0")]
pub fn notify_one(&self) { unsafe { self.inner.inner.notify_one() } }
/// Wakes up all blocked threads on this condvar.
///
/// This method will ensure that any current waiters on the condition
/// variable are awoken. Calls to `notify_all()` are not buffered in any
/// way.
///
/// To wake up only one thread, see `notify_one()`.
#[stable(feature = "rust1", since = "1.0.0")]
pub fn notify_all(&self) { unsafe { self.inner.inner.notify_all() } }
}
#[stable(feature = "rust1", since = "1.0.0")]
impl Drop for Condvar {
fn drop(&mut self) {
unsafe { self.inner.inner.destroy() }
}
}
impl StaticCondvar {
/// Creates a new condition variable
#[unstable(feature = "static_condvar",
reason = "may be merged with Condvar in the future")]
pub const fn new() -> StaticCondvar {
StaticCondvar {
inner: sys::Condvar::new(),
mutex: AtomicUsize::new(0),
}
}
/// Blocks the current thread until this condition variable receives a
/// notification.
///
/// See `Condvar::wait`.
#[unstable(feature = "static_condvar",
reason = "may be merged with Condvar in the future")]
pub fn wait<'a, T>(&'static self, guard: MutexGuard<'a, T>)
-> LockResult<MutexGuard<'a, T>> {
let poisoned = unsafe {
let lock = mutex::guard_lock(&guard);
self.verify(lock);
self.inner.wait(lock);
mutex::guard_poison(&guard).get()
};
if poisoned {
Err(PoisonError::new(guard))
} else {
Ok(guard)
}
}
/// Waits on this condition variable for a notification, timing out after a
/// specified duration.
///
/// See `Condvar::wait_timeout`.
#[unstable(feature = "static_condvar",
reason = "may be merged with Condvar in the future")]
pub fn wait_timeout_ms<'a, T>(&'static self, guard: MutexGuard<'a, T>, ms: u32)
-> LockResult<(MutexGuard<'a, T>, bool)> {
self.wait_timeout(guard, Duration::from_millis(ms as u64))
}
/// Waits on this condition variable for a notification, timing out after a
/// specified duration.
///
/// See `Condvar::wait_timeout`.
#[unstable(feature = "static_condvar",
reason = "may be merged with Condvar in the future")]
pub fn wait_timeout<'a, T>(&'static self,
guard: MutexGuard<'a, T>,
timeout: Duration)
-> LockResult<(MutexGuard<'a, T>, bool)> {
let (poisoned, success) = unsafe {
let lock = mutex::guard_lock(&guard);
self.verify(lock);
let success = self.inner.wait_timeout(lock, timeout);
(mutex::guard_poison(&guard).get(), success)
};
if poisoned {
Err(PoisonError::new((guard, success)))
} else {
Ok((guard, success))
}
}
/// Waits on this condition variable for a notification, timing out after a
/// specified duration.
///
/// The implementation will repeatedly wait while the duration has not
/// passed and the function returns `false`.
///
/// See `Condvar::wait_timeout_with`.
#[unstable(feature = "static_condvar",
reason = "may be merged with Condvar in the future")]
pub fn wait_timeout_with<'a, T, F>(&'static self,
guard: MutexGuard<'a, T>,
dur: Duration,
mut f: F)
-> LockResult<(MutexGuard<'a, T>, bool)>
where F: FnMut(LockResult<&mut T>) -> bool {
// This could be made more efficient by pushing the implementation into
// sys::condvar
let start = SteadyTime::now();
let mut guard_result: LockResult<MutexGuard<'a, T>> = Ok(guard);
while !f(guard_result
.as_mut()
.map(|g| &mut **g)
.map_err(|e| PoisonError::new(&mut **e.get_mut()))) {
let now = SteadyTime::now();
let consumed = &now - &start;
let guard = guard_result.unwrap_or_else(|e| e.into_inner());
let (new_guard_result, no_timeout) = if consumed > dur {
(Ok(guard), false)
} else {
match self.wait_timeout(guard, dur - consumed) {
Ok((new_guard, no_timeout)) => (Ok(new_guard), no_timeout),
Err(err) => {
let (new_guard, no_timeout) = err.into_inner();
(Err(PoisonError::new(new_guard)), no_timeout)
}
}
};
guard_result = new_guard_result;
if !no_timeout {
let result = f(guard_result
.as_mut()
.map(|g| &mut **g)
.map_err(|e| PoisonError::new(&mut **e.get_mut())));
return poison::map_result(guard_result, |g| (g, result));
}
}
poison::map_result(guard_result, |g| (g, true))
}
/// Wakes up one blocked thread on this condvar.
///
/// See `Condvar::notify_one`.
#[unstable(feature = "static_condvar",
reason = "may be merged with Condvar in the future")]
pub fn notify_one(&'static self) { unsafe { self.inner.notify_one() } }
/// Wakes up all blocked threads on this condvar.
///
/// See `Condvar::notify_all`.
#[unstable(feature = "static_condvar",
reason = "may be merged with Condvar in the future")]
pub fn notify_all(&'static self) { unsafe { self.inner.notify_all() } }
/// Deallocates all resources associated with this static condvar.
///
/// This method is unsafe to call as there is no guarantee that there are no
/// active users of the condvar, and this also doesn't prevent any future
/// users of the condvar. This method is required to be called to not leak
/// memory on all platforms.
#[unstable(feature = "static_condvar",
reason = "may be merged with Condvar in the future")]
pub unsafe fn destroy(&'static self) {
self.inner.destroy()
}
fn verify(&self, mutex: &sys_mutex::Mutex) {
let addr = mutex as *const _ as usize;
match self.mutex.compare_and_swap(0, addr, Ordering::SeqCst) {
// If we got out 0, then we have successfully bound the mutex to
// this cvar.
0 => {}
// If we get out a value that's the same as `addr`, then someone
// already beat us to the punch.
n if n == addr => {}
// Anything else and we're using more than one mutex on this cvar,
// which is currently disallowed.
_ => panic!("attempted to use a condition variable with two \
mutexes"),
}
}
}
#[cfg(test)]
mod tests {
use prelude::v1::*;
use super::StaticCondvar;
use sync::mpsc::channel;
use sync::{StaticMutex, Condvar, Mutex, Arc};
use sync::atomic::{AtomicUsize, Ordering};
use thread;
use time::Duration;
use u32;
#[test]
fn smoke() {
let c = Condvar::new();
c.notify_one();
c.notify_all();
}
#[test]
fn static_smoke() {
static C: StaticCondvar = StaticCondvar::new();
C.notify_one();
C.notify_all();
unsafe { C.destroy(); }
}
#[test]
fn notify_one() {
static C: StaticCondvar = StaticCondvar::new();
static M: StaticMutex = StaticMutex::new();
let g = M.lock().unwrap();
let _t = thread::spawn(move|| {
let _g = M.lock().unwrap();
C.notify_one();
});
let g = C.wait(g).unwrap();
drop(g);
unsafe { C.destroy(); M.destroy(); }
}
#[test]
fn notify_all() {
const N: usize = 10;
let data = Arc::new((Mutex::new(0), Condvar::new()));
let (tx, rx) = channel();
for _ in 0..N {
let data = data.clone();
let tx = tx.clone();
thread::spawn(move|| {
let &(ref lock, ref cond) = &*data;
let mut cnt = lock.lock().unwrap();
*cnt += 1;
if *cnt == N {
tx.send(()).unwrap();
}
while *cnt != 0 {
cnt = cond.wait(cnt).unwrap();
}
tx.send(()).unwrap();
});
}
drop(tx);
let &(ref lock, ref cond) = &*data;
rx.recv().unwrap();
let mut cnt = lock.lock().unwrap();
*cnt = 0;
cond.notify_all();
drop(cnt);
for _ in 0..N {
rx.recv().unwrap();
}
}
#[test]
fn wait_timeout_ms() {
static C: StaticCondvar = StaticCondvar::new();
static M: StaticMutex = StaticMutex::new();
let g = M.lock().unwrap();
let (g, _no_timeout) = C.wait_timeout_ms(g, 1).unwrap();
// spurious wakeups mean this isn't necessarily true
// assert!(!no_timeout);
let _t = thread::spawn(move || {
let _g = M.lock().unwrap();
C.notify_one();
});
let (g, no_timeout) = C.wait_timeout_ms(g, u32::MAX).unwrap();
assert!(no_timeout);
drop(g);
unsafe { C.destroy(); M.destroy(); }
}
#[test]
fn wait_timeout_with() {
static C: StaticCondvar = StaticCondvar::new();
static M: StaticMutex = StaticMutex::new();
static S: AtomicUsize = AtomicUsize::new(0);
let g = M.lock().unwrap();
let (g, success) = C.wait_timeout_with(g, Duration::new(0, 1000), |_| {
false
}).unwrap();
assert!(!success);
let (tx, rx) = channel();
let _t = thread::spawn(move || {
rx.recv().unwrap();
let g = M.lock().unwrap();
S.store(1, Ordering::SeqCst);
C.notify_one();
drop(g);
rx.recv().unwrap();
let g = M.lock().unwrap();
S.store(2, Ordering::SeqCst);
C.notify_one();
drop(g);
rx.recv().unwrap();
let _g = M.lock().unwrap();
S.store(3, Ordering::SeqCst);
C.notify_one();
});
let mut state = 0;
let day = 24 * 60 * 60;
let (_g, success) = C.wait_timeout_with(g, Duration::new(day, 0), |_| {
assert_eq!(state, S.load(Ordering::SeqCst));
tx.send(()).unwrap();
state += 1;
match state {
1|2 => false,
_ => true,
}
}).unwrap();
assert!(success);
}
#[test]
#[should_panic]
fn two_mutexes() {
static M1: StaticMutex = StaticMutex::new();
static M2: StaticMutex = StaticMutex::new();
static C: StaticCondvar = StaticCondvar::new();
let mut g = M1.lock().unwrap();
let _t = thread::spawn(move|| {
let _g = M1.lock().unwrap();
C.notify_one();
});
g = C.wait(g).unwrap();
drop(g);
let _ = C.wait(M2.lock().unwrap()).unwrap();
}
}<|fim▁end|> | }
|
<|file_name|>types.py<|end_file_name|><|fim▁begin|># Copyright (c) 2011 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2011 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS<|fim▁hole|># SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
"""
Some utility functions to deal with mapping Amazon DynamoDB types to
Python types and vice-versa.
"""
import base64
from decimal import (Decimal, DecimalException, Context,
Clamped, Overflow, Inexact, Underflow, Rounded)
from exceptions import DynamoDBNumberError
DYNAMODB_CONTEXT = Context(
Emin=-128, Emax=126, rounding=None, prec=38,
traps=[Clamped, Overflow, Inexact, Rounded, Underflow])
# python2.6 cannot convert floats directly to
# Decimals. This is taken from:
# http://docs.python.org/release/2.6.7/library/decimal.html#decimal-faq
def float_to_decimal(f):
n, d = f.as_integer_ratio()
numerator, denominator = Decimal(n), Decimal(d)
ctx = DYNAMODB_CONTEXT
result = ctx.divide(numerator, denominator)
while ctx.flags[Inexact]:
ctx.flags[Inexact] = False
ctx.prec *= 2
result = ctx.divide(numerator, denominator)
return result
def is_num(n):
types = (int, long, float, bool, Decimal)
return isinstance(n, types) or n in types
def is_str(n):
return isinstance(n, basestring) or (isinstance(n, type) and
issubclass(n, basestring))
def is_binary(n):
return isinstance(n, Binary)
def serialize_num(val):
"""Cast a number to a string and perform
validation to ensure no loss of precision.
"""
if isinstance(val, bool):
return str(int(val))
return str(val)
def convert_num(s):
if '.' in s:
n = float(s)
else:
n = int(s)
return n
def convert_binary(n):
return Binary(base64.b64decode(n))
def get_dynamodb_type(val):
"""
Take a scalar Python value and return a string representing
the corresponding Amazon DynamoDB type. If the value passed in is
not a supported type, raise a TypeError.
"""
dynamodb_type = None
if is_num(val):
dynamodb_type = 'N'
elif is_str(val):
dynamodb_type = 'S'
elif isinstance(val, (set, frozenset)):
if False not in map(is_num, val):
dynamodb_type = 'NS'
elif False not in map(is_str, val):
dynamodb_type = 'SS'
elif False not in map(is_binary, val):
dynamodb_type = 'BS'
elif isinstance(val, Binary):
dynamodb_type = 'B'
if dynamodb_type is None:
msg = 'Unsupported type "%s" for value "%s"' % (type(val), val)
raise TypeError(msg)
return dynamodb_type
def dynamize_value(val):
"""
Take a scalar Python value and return a dict consisting
of the Amazon DynamoDB type specification and the value that
needs to be sent to Amazon DynamoDB. If the type of the value
is not supported, raise a TypeError
"""
dynamodb_type = get_dynamodb_type(val)
if dynamodb_type == 'N':
val = {dynamodb_type: serialize_num(val)}
elif dynamodb_type == 'S':
val = {dynamodb_type: val}
elif dynamodb_type == 'NS':
val = {dynamodb_type: map(serialize_num, val)}
elif dynamodb_type == 'SS':
val = {dynamodb_type: [n for n in val]}
elif dynamodb_type == 'B':
val = {dynamodb_type: val.encode()}
elif dynamodb_type == 'BS':
val = {dynamodb_type: [n.encode() for n in val]}
return val
class Binary(object):
def __init__(self, value):
if not isinstance(value, basestring):
raise TypeError('Value must be a string of binary data!')
self.value = value
def encode(self):
return base64.b64encode(self.value)
def __eq__(self, other):
if isinstance(other, Binary):
return self.value == other.value
else:
return self.value == other
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return 'Binary(%s)' % self.value
def __str__(self):
return self.value
def __hash__(self):
return hash(self.value)
def item_object_hook(dct):
"""
A custom object hook for use when decoding JSON item bodys.
This hook will transform Amazon DynamoDB JSON responses to something
that maps directly to native Python types.
"""
if len(dct.keys()) > 1:
return dct
if 'S' in dct:
return dct['S']
if 'N' in dct:
return convert_num(dct['N'])
if 'SS' in dct:
return set(dct['SS'])
if 'NS' in dct:
return set(map(convert_num, dct['NS']))
if 'B' in dct:
return convert_binary(dct['B'])
if 'BS' in dct:
return set(map(convert_binary, dct['BS']))
return dct
class Dynamizer(object):
"""Control serialization/deserialization of types.
This class controls the encoding of python types to the
format that is expected by the DynamoDB API, as well as
taking DynamoDB types and constructing the appropriate
python types.
If you want to customize this process, you can subclass
this class and override the encoding/decoding of
specific types. For example::
'foo' (Python type)
|
v
encode('foo')
|
v
_encode_s('foo')
|
v
{'S': 'foo'} (Encoding sent to/received from DynamoDB)
|
V
decode({'S': 'foo'})
|
v
_decode_s({'S': 'foo'})
|
v
'foo' (Python type)
"""
def _get_dynamodb_type(self, attr):
return get_dynamodb_type(attr)
def encode(self, attr):
"""
Encodes a python type to the format expected
by DynamoDB.
"""
dynamodb_type = self._get_dynamodb_type(attr)
try:
encoder = getattr(self, '_encode_%s' % dynamodb_type.lower())
except AttributeError:
raise ValueError("Unable to encode dynamodb type: %s" %
dynamodb_type)
return {dynamodb_type: encoder(attr)}
def _encode_n(self, attr):
try:
if isinstance(attr, float) and not hasattr(Decimal, 'from_float'):
# python2.6 does not support creating Decimals directly
# from floats so we have to do this ourself.
n = str(float_to_decimal(attr))
else:
n = str(DYNAMODB_CONTEXT.create_decimal(attr))
if filter(lambda x: x in n, ('Infinity', 'NaN')):
raise TypeError('Infinity and NaN not supported')
return n
except (TypeError, DecimalException), e:
msg = '{0} numeric for `{1}`\n{2}'.format(
e.__class__.__name__, attr, str(e) or '')
raise DynamoDBNumberError(msg)
def _encode_s(self, attr):
if isinstance(attr, unicode):
attr = attr.encode('utf-8')
elif not isinstance(attr, str):
attr = str(attr)
return attr
def _encode_ns(self, attr):
return map(self._encode_n, attr)
def _encode_ss(self, attr):
return [self._encode_s(n) for n in attr]
def _encode_b(self, attr):
return attr.encode()
def _encode_bs(self, attr):
return [self._encode_b(n) for n in attr]
def decode(self, attr):
"""
Takes the format returned by DynamoDB and constructs
the appropriate python type.
"""
if len(attr) > 1 or not attr:
return attr
dynamodb_type = attr.keys()[0]
if dynamodb_type.lower() == dynamodb_type:
# It's not an actual type, just a single character attr that
# overlaps with the DDB types. Return it.
return attr
try:
decoder = getattr(self, '_decode_%s' % dynamodb_type.lower())
except AttributeError:
return attr
return decoder(attr[dynamodb_type])
def _decode_n(self, attr):
return DYNAMODB_CONTEXT.create_decimal(attr)
def _decode_s(self, attr):
return attr
def _decode_ns(self, attr):
return set(map(self._decode_n, attr))
def _decode_ss(self, attr):
return set(map(self._decode_s, attr))
def _decode_b(self, attr):
return convert_binary(attr)
def _decode_bs(self, attr):
return set(map(self._decode_b, attr))
class LossyFloatDynamizer(Dynamizer):
"""Use float/int instead of Decimal for numeric types.
This class is provided for backwards compatibility. Instead of
using Decimals for the 'N', 'NS' types it uses ints/floats.
This class is deprecated and its usage is not encouraged,
as doing so may result in loss of precision. Use the
`Dynamizer` class instead.
"""
def _encode_n(self, attr):
return serialize_num(attr)
def _encode_ns(self, attr):
return [str(i) for i in attr]
def _decode_n(self, attr):
return convert_num(attr)
def _decode_ns(self, attr):
return set(map(self._decode_n, attr))<|fim▁end|> | # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT |
<|file_name|>zhttpto.rs<|end_file_name|><|fim▁begin|>//
// zhttpto.rs
//
// Starting code for PS1
// Running on Rust 0.9
//
// Note that this code has serious security risks! You should not run it
// on any system with access to sensitive files.
//
// University of Virginia - cs4414 Spring 2014
// Weilin Xu and David Evans
// Version 0.3
#[feature(globs)];
use std::io::*;
use std::io::net::ip::{SocketAddr};
use std::{str};
static IP: &'static str = "127.0.0.1";
static PORT: int = 4414;
static mut visitor_count: int = 0;
fn main() {
let addr = from_str::<SocketAddr>(format!("{:s}:{:d}", IP, PORT)).unwrap();
let mut acceptor = net::tcp::TcpListener::bind(addr).listen();
println(format!("Listening on [{:s}] ...", addr.to_str()));
for stream in acceptor.incoming() {
// Spawn a task to handle the connection
unsafe {
visitor_count = visitor_count + 1;
}
do spawn {
let mut stream = stream;
match stream {
Some(ref mut s) => {
match s.peer_name() {
Some(pn) => {println(format!("Received connection from: [{:s}]", pn.to_str()));},
None => ()
}
},
None => ()
}
let mut buf = [0, ..500];
stream.read(buf);
let request_str = str::from_utf8(buf);
println(format!("Received request :\n{:s}", request_str));
let mut index: int = 0;
let mut file_name = ~"";
for splitted in request_str.split(' ') {
if(index == 1) {
file_name.push_str(splitted);
break;
}
index = index + 1;
}
let mut response = ~"";
let homepage = match file_name.len() {
1 => true,
_ => false
};
if(homepage) {
response.push_str("HTTP/1.1 200 OK\r\nContent-Type: text/html; charset=UTF-8\r\n\r\n
<doctype !html><html><head><title>Hello, Rust!</title>
<style>body { background-color: #111; color: #FFEEAA }
h1 { font-size:2cm; text-align: center; color: black; text-shadow: 0 0 4mm red}
h2 { font-size:2cm; text-align: center; color: black; text-shadow: 0 0 4mm green}
</style></head>
<body>
<h1>Greetings, Krusty!</h1>
</body></html>\r\n");
}
else {
let mut valid = true;
if(file_name.len() < 7) {
valid = false;
}
if(valid) {
let extension = file_name.slice_from(file_name.len() - 5).to_owned();
let html_extension = ~".html";
if(str::eq(&extension, &html_extension)) {
let file_name_abs = file_name.slice_from(1);
let path = Path::new(file_name_abs);
let opened_file: Option<File>;
if path.exists() && path.is_file() {
opened_file = File::open(&path);
}
else {
opened_file = None;
}
match opened_file {
Some(html_file) => {
let mut html_file_mut = html_file;
let msg_bytes: ~[u8] = html_file_mut.read_to_end();
response.push_str(str::from_utf8(msg_bytes));
},
None => {
println("not found!");
valid = false;
}
}
}
else {
valid = false;
}
}
if(!valid) {
response.push_str("HTTP/1.1 200 OK\r\nContent-Type: text/html; charset=UTF-8\r\n\r\n
<doctype !html><html><head><title>HTTP 403 Not Found</title>
</head>
<body><|fim▁hole|> <h1>HTTP 403 Error</h1>
<p>Sorry, the page you requested does not exist. Please check the url.</p>
</body></html>\r\n");
}
}
stream.write(response.as_bytes());
println!("Connection terminates.");
unsafe {
println!("Visitor count: {:d}", visitor_count);
}
}
}
}<|fim▁end|> | |
<|file_name|>viewport_array.py<|end_file_name|><|fim▁begin|>from OpenGLCffi.GLES3 import params
@params(api='gles3', prms=['first', 'count', 'v'])
def glViewportArrayvNV(first, count, v):
pass
<|fim▁hole|>@params(api='gles3', prms=['index', 'x', 'y', 'w', 'h'])
def glViewportIndexedfNV(index, x, y, w, h):
pass
@params(api='gles3', prms=['index', 'v'])
def glViewportIndexedfvNV(index, v):
pass
@params(api='gles3', prms=['first', 'count', 'v'])
def glScissorArrayvNV(first, count, v):
pass
@params(api='gles3', prms=['index', 'left', 'bottom', 'width', 'height'])
def glScissorIndexedNV(index, left, bottom, width, height):
pass
@params(api='gles3', prms=['index', 'v'])
def glScissorIndexedvNV(index, v):
pass
@params(api='gles3', prms=['first', 'count', 'v'])
def glDepthRangeArrayfvNV(first, count, v):
pass
@params(api='gles3', prms=['index', 'n', 'f'])
def glDepthRangeIndexedfNV(index, n, f):
pass
@params(api='gles3', prms=['target', 'index', 'data'])
def glGetFloati_vNV(target, index):
pass
@params(api='gles3', prms=['target', 'index'])
def glEnableiNV(target, index):
pass
@params(api='gles3', prms=['target', 'index'])
def glDisableiNV(target, index):
pass
@params(api='gles3', prms=['target', 'index'])
def glIsEnablediNV(target, index):
pass<|fim▁end|> | |
<|file_name|>MessageDigestHashFunctionTest.java<|end_file_name|><|fim▁begin|>/*
* Copyright (C) 2011 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.hash;
import com.google.common.base.Charsets;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
import junit.framework.TestCase;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
import java.util.Arrays;
/**
* Tests for the MessageDigestHashFunction.
*
* @author Kurt Alfred Kluever
*/
public class MessageDigestHashFunctionTest extends TestCase {
private static final ImmutableSet<String> INPUTS = ImmutableSet.of("", "Z", "foobar");
// From "How Provider Implementations Are Requested and Supplied" from
// http://docs.oracle.com/javase/6/docs/technotes/guides/security/crypto/CryptoSpec.html
// - Some providers may choose to also include alias names.
// - For example, the "SHA-1" algorithm might be referred to as "SHA1".
// - The algorithm name is not case-sensitive.
private static final ImmutableMap<String, HashFunction> ALGORITHMS =
new ImmutableMap.Builder<String, HashFunction>()
.put("MD5", Hashing.md5())
.put("SHA", Hashing.sha1()) // Not the official name, but still works
.put("SHA1", Hashing.sha1()) // Not the official name, but still works
.put("sHa-1", Hashing.sha1()) // Not the official name, but still works
.put("SHA-1", Hashing.sha1())
.put("SHA-256", Hashing.sha256())
.put("SHA-384", Hashing.sha384())
.put("SHA-512", Hashing.sha512())
.build();
public void testHashing() {
for (String stringToTest : INPUTS) {
for (String algorithmToTest : ALGORITHMS.keySet()) {
assertMessageDigestHashing(HashTestUtils.ascii(stringToTest), algorithmToTest);
}
}
}
public void testPutAfterHash() {
Hasher sha1 = Hashing.sha1().newHasher();
assertEquals("2fd4e1c67a2d28fced849ee1bb76e7391b93eb12",
sha1.putString("The quick brown fox jumps over the lazy dog", Charsets.UTF_8)
.hash()
.toString());
try {
sha1.putInt(42);
fail();
} catch (IllegalStateException expected) {
}
}
public void testHashTwice() {
Hasher sha1 = Hashing.sha1().newHasher();<|fim▁hole|>
assertEquals("2fd4e1c67a2d28fced849ee1bb76e7391b93eb12",
sha1.putString("The quick brown fox jumps over the lazy dog", Charsets.UTF_8)
.hash()
.toString());
try {
HashCode unused = sha1.hash();
fail();
} catch (IllegalStateException expected) {
}
}
public void testToString() {
assertEquals("Hashing.md5()", Hashing.md5().toString());
assertEquals("Hashing.sha1()", Hashing.sha1().toString());
assertEquals("Hashing.sha256()", Hashing.sha256().toString());
assertEquals("Hashing.sha512()", Hashing.sha512().toString());
}
private static void assertMessageDigestHashing(byte[] input, String algorithmName) {
try {
MessageDigest digest = MessageDigest.getInstance(algorithmName);
assertEquals(
HashCode.fromBytes(digest.digest(input)),
ALGORITHMS.get(algorithmName).hashBytes(input));
for (int bytes = 4; bytes <= digest.getDigestLength(); bytes++) {
assertEquals(
HashCode.fromBytes(Arrays.copyOf(digest.digest(input), bytes)),
new MessageDigestHashFunction(algorithmName, bytes, algorithmName).hashBytes(input));
}
try {
int maxSize = digest.getDigestLength();
new MessageDigestHashFunction(algorithmName, maxSize + 1, algorithmName);
fail();
} catch (IllegalArgumentException expected) {
}
} catch (NoSuchAlgorithmException nsae) {
throw new AssertionError(nsae);
}
}
}<|fim▁end|> | |
<|file_name|>export.filter.func.import.js<|end_file_name|><|fim▁begin|>require("kaoscript/register");
var Type = require("@kaoscript/runtime").Type;
module.exports = function() {
var Shape = require("../export/export.class.default.ks")().Shape;
function foobar() {
if(arguments.length === 1 && Type.isString(arguments[0])) {
let __ks_i = -1;
let x = arguments[++__ks_i];
if(x === void 0 || x === null) {
throw new TypeError("'x' is not nullable");
}
else if(!Type.isString(x)) {
throw new TypeError("'x' is not of type 'String'");
}
return x;
}
else if(arguments.length === 1) {
let __ks_i = -1;
let x = arguments[++__ks_i];
if(x === void 0 || x === null) {
throw new TypeError("'x' is not nullable");
}
else if(!Type.isClassInstance(x, Shape)) {
throw new TypeError("'x' is not of type 'Shape'");
}
return x;
}
else {
throw new SyntaxError("Wrong number of arguments");<|fim▁hole|> return {
foobar: foobar
};
};<|fim▁end|> | }
}; |
<|file_name|>rfc2045.rs<|end_file_name|><|fim▁begin|>//! Module for dealing with RFC2045 style headers.
use super::rfc5322::Rfc5322Parser;
use std::collections::HashMap;
/// Parser over RFC 2045 style headers.
///
/// Things of the style `value; param1=foo; param2="bar"`
pub struct Rfc2045Parser<'s> {
parser: Rfc5322Parser<'s>,
}
impl<'s> Rfc2045Parser<'s> {
/// Create a new parser over `s`
pub fn new(s: &str) -> Rfc2045Parser {
Rfc2045Parser {
parser: Rfc5322Parser::new(s),
}
}
fn consume_token(&mut self) -> Option<String> {
let token = self.parser.consume_while(|c| {
match c {
// Not any tspecials
'(' | ')' | '<' | '>' | '@' | ',' | ';' | ':' | '\\' | '\"' | '/' | '[' | ']'
| '?' | '=' => false,
'!'..='~' => true,
_ => false,
}
});
if !token.is_empty() {
Some(token)
} else {
None
}
}
/// Consume up to all of the input into the value and a hashmap
/// over parameters to values.
pub fn consume_all(&mut self) -> (String, HashMap<String, String>) {
let value = self.parser.consume_while(|c| c != ';');
// Find the parameters
let mut params = HashMap::new();
while !self.parser.eof() {
// Eat the ; and any whitespace
assert_eq!(self.parser.consume_char(), Some(';'));
// RFC ignorant mail systems may append a ';' without a parameter after.
// This violates the RFC but does happen, so deal with it.
if self.parser.eof() {
break;
}
self.parser.consume_linear_whitespace();
let attribute = self.consume_token();
self.parser.consume_linear_whitespace();
assert_eq!(self.parser.consume_char(), Some('='));
self.parser.consume_linear_whitespace();
// Value can be token or quoted-string
let value = if self.parser.peek() == '"' {
self.parser.consume_quoted_string()
} else {
self.consume_token()
};
if let (Some(attrib), Some(val)) = (attribute, value) {
params.insert(attrib, val);
}
}
(value, params)
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::collections::HashMap;
struct ParserTestCase<'s> {
input: &'s str,
output: (&'s str, Vec<(&'s str, &'s str)>),
name: &'s str,
}
#[test]
pub fn test_foo() {
let tests = vec![
ParserTestCase {
input: "foo/bar",
output: ("foo/bar", vec![]),
name: "Basic value",
},
ParserTestCase {
input: "foo/bar; foo=bar",
output: ("foo/bar", vec![("foo", "bar")]),
name: "Basic value with parameter",
},
ParserTestCase {
input: "foo/bar; foo=\"bar\"",
output: ("foo/bar", vec![("foo", "bar")]),
name: "Basic value with quoted parameter",
},
ParserTestCase {
input: "foo/bar; foo=\"bar\"; baz=qux",
output: ("foo/bar", vec![("foo", "bar"), ("baz", "qux")]),
name: "Multiple values",
},
ParserTestCase {
input: "foo/bar; foo = \"bar\"; baz=qux",
output: ("foo/bar", vec![("foo", "bar"), ("baz", "qux")]),
name: "Parameter with space",
},
];
for test in tests.into_iter() {
let (expected_value, expected_param_list) = test.output;
let mut expected_params = HashMap::new();
for &(param_name, param_value) in expected_param_list.iter() {
expected_params.insert(param_name.to_string(), param_value.to_string());
}
let mut parser = Rfc2045Parser::new(test.input);
let (value, parameters) = parser.consume_all();<|fim▁hole|> }
}<|fim▁end|> |
assert!(value == expected_value.to_string(), test.name);
assert!(parameters == expected_params, test.name);
} |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>"""
run tests against a webserver running in the same reactor
NOTE: this test uses port 8888 on localhost
"""
import os
import ujson as json
import cyclone.httpclient
from twisted.internet import defer
from twisted.application import internet
from twisted.trial.unittest import TestCase
from twisted.python import log
from txbitwrap.api import factory as Api
from txbitwrap.machine import set_pnml_path
import txbitwrap.event
IFACE = '127.0.0.1'
PORT = 8888
OPTIONS = {
'listen-ip': IFACE,
'listen-port': PORT,
'machine-path': os.path.abspath(os.path.dirname(__file__) + '/../../schemata'),
'pg-host': '127.0.0.1',
'pg-port': 5432,
'pg-username': 'bitwrap',
'pg-password': 'bitwrap',
'pg-database': 'bitwrap'
}
class ApiTest(TestCase):
""" setup rpc endpoint and invoke ping method """
def setUp(self):
""" start tcp endpoint """
set_pnml_path(OPTIONS['machine-path'])
self.options = OPTIONS
#pylint: disable=no-member
self.service = internet.TCPServer(PORT, Api(self.options), interface=self.options['listen-ip'])
#pylint: enable=no-member
self.service.startService()
@defer.inlineCallbacks
def tearDown(self):
""" stop tcp endpoint """
self.service.stopService()
yield txbitwrap.event.rdq.stop()
@staticmethod
def url(resource):
""" bulid a url using test endpoint """
return 'http://%s:%s/%s' % (IFACE, PORT, resource)
@staticmethod
def client(resource):<|fim▁hole|> """ rpc client """
return cyclone.httpclient.JsonRPC(ApiTest.url(resource))
@staticmethod
def fetch(resource, **kwargs):
""" async request with httpclient"""
return cyclone.httpclient.fetch(ApiTest.url(resource), **kwargs)
@staticmethod
def dispatch(**event):
""" rpc client """
resource = 'dispatch/%s/%s/%s' % (event['schema'], event['oid'], event['action'])
url = ApiTest.url(resource)
if isinstance(event['payload'], str):
data = event['payload']
else:
data = json.dumps(event['payload'])
return cyclone.httpclient.fetch(url, postdata=data)
@staticmethod
def broadcast(**event):
""" rpc client """
resource = 'broadcast/%s/%s' % (event['schema'], event['id'])
url = ApiTest.url(resource)
data = json.dumps(event)
return cyclone.httpclient.fetch(url, postdata=data)<|fim▁end|> | |
<|file_name|>backbone.states.js<|end_file_name|><|fim▁begin|>var State = Backbone.State = function(options) {
options || (options = {});
};
_.extend(State.prototype, Backbone.Events, {
classes: [],
before: null,
on: null,
after: null,
events: {},
triggers: [],
multistate: [],
_isStateDescribedInSet: function( set, state ) {
var isDescribed = false;
_.each( set, function( entry ) {
if( state.match( entry ) ) {
isDescribed = true;
}
});
return isDescribed;
},
isStateDescribedInAllowed: function( state ) {
return _isStateDescribedInSet( this.multistate.allow, state );
},
isStateDescribedInDisallowed: function( state ) {
return _isStateDescribedInSet( this.multistate.disallow, state );
},
isStatePermitted: function( state ) {
var allowed = false;
if (this.multistate == "any" || this.multistate == "all") {
return true;
}
if(this.isStateDescribedInAllowed( state )) return true;
if(this.isStateDescribedInDisallowed( state )) return false;
},
});
// Generic State Machine
var StateMachine = Backbone.StateMachine = function(options) {
options || (options = {});
if( options.el ) {
this.setElement( options.el );
}
}
_.extend(StateMachine.prototype, Backbone.Events, {
states: {},
state: null,
el: null,
$el: null,
setElement: function( el ) {
this.el = el;
this.$el = this.el ? $(this.el) : null;
},
get classes( ) {
if(this.$el) { return _.toArray( this.$el.attr('class').split(/\s+/) ); }
else { return null; }
},
set classes( newClasses ) {
if( !$this.el)
if( typeof newClasses === 'string' ) {
this.$el.attr('class', newClasses);
} else {
this.$el.attr('class', newClasses.join(' '));
}
},
createState: function( name, state ) {
state = (typeof state === State) ? state : new State( state );
this.states[name] = state;
return this;
},
// Private method for applying a state
_applyState: function( state ) {
this.events = _.union( this.events, state.events);
this.classes = _.union( this.classes, state.classes);
this.state = state;
},
<|fim▁hole|> this.classes = _.difference( this.classes, state.classes );
this.state = null;
},
//Public method for changing the state
pushState: function( name ) {
var oldState = this.state,
newState = this.states[name];
// Old State
if(oldState) {
this._removeState( state );
if(oldState.after) oldState.after( name );
}
// New State
if(newState && newState.before) {
newState.before();
}
this._applyState( newState );
if(this.state && this.state.on) {
this.state.on();
}
this.trigger("stateChanged", { oldState: oldState, newState: newState });
}
});<|fim▁end|> | // Private method for cleaning up the previous state
_removeState: function( state ) {
this.events = _.difference( this.events, state.events ); |
<|file_name|>JavaFxControllerFieldSearcher.java<|end_file_name|><|fim▁begin|>/*
* Copyright 2000-2013 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.jetbrains.plugins.javaFX.fxml.refs;
import com.intellij.openapi.application.ApplicationManager;
import com.intellij.openapi.project.Project;
import com.intellij.openapi.util.Computable;
import com.intellij.openapi.vfs.VirtualFile;
import com.intellij.psi.*;
import com.intellij.psi.search.GlobalSearchScope;
import com.intellij.psi.search.LocalSearchScope;
import com.intellij.psi.search.SearchScope;
import com.intellij.psi.search.searches.ReferencesSearch;
import com.intellij.psi.util.PsiUtilCore;
import com.intellij.psi.xml.XmlAttribute;
import com.intellij.psi.xml.XmlAttributeValue;<|fim▁hole|>import com.intellij.util.QueryExecutor;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.plugins.javaFX.fxml.FxmlConstants;
import org.jetbrains.plugins.javaFX.indexing.JavaFxControllerClassIndex;
import java.util.List;
/**
* User: anna
* Date: 3/29/13
*/
public class JavaFxControllerFieldSearcher implements QueryExecutor<PsiReference, ReferencesSearch.SearchParameters>{
@Override
public boolean execute(@NotNull final ReferencesSearch.SearchParameters queryParameters, @NotNull final Processor<PsiReference> consumer) {
final PsiElement elementToSearch = queryParameters.getElementToSearch();
if (elementToSearch instanceof PsiField) {
final PsiField field = (PsiField)elementToSearch;
final PsiClass containingClass = ApplicationManager.getApplication().runReadAction(new Computable<PsiClass>() {
@Override
public PsiClass compute() {
return field.getContainingClass();
}
});
if (containingClass != null) {
final String qualifiedName = ApplicationManager.getApplication().runReadAction(new Computable<String>() {
@Override
public String compute() {
return containingClass.getQualifiedName();
}
});
if (qualifiedName != null) {
Project project = PsiUtilCore.getProjectInReadAction(containingClass);
final List<PsiFile> fxmlWithController =
JavaFxControllerClassIndex.findFxmlWithController(project, qualifiedName);
for (final PsiFile file : fxmlWithController) {
ApplicationManager.getApplication().runReadAction(() -> {
final String fieldName = field.getName();
if (fieldName == null) return;
final VirtualFile virtualFile = file.getViewProvider().getVirtualFile();
final SearchScope searchScope = queryParameters.getEffectiveSearchScope();
boolean contains = searchScope instanceof LocalSearchScope ? ((LocalSearchScope)searchScope).isInScope(virtualFile) :
((GlobalSearchScope)searchScope).contains(virtualFile);
if (contains) {
file.accept(new XmlRecursiveElementVisitor() {
@Override
public void visitXmlAttributeValue(final XmlAttributeValue value) {
final PsiReference reference = value.getReference();
if (reference != null) {
final PsiElement resolve = reference.resolve();
if (resolve instanceof XmlAttributeValue) {
final PsiElement parent = resolve.getParent();
if (parent instanceof XmlAttribute) {
final XmlAttribute attribute = (XmlAttribute)parent;
if (FxmlConstants.FX_ID.equals(attribute.getName()) && fieldName.equals(attribute.getValue())) {
consumer.process(reference);
}
}
}
}
}
});
}
});
}
}
}
}
return true;
}
}<|fim▁end|> | import com.intellij.util.Processor; |
<|file_name|>content.py<|end_file_name|><|fim▁begin|># coding: utf-8
from django.db import models
from django.utils.translation import ugettext_lazy as _, pgettext_lazy
class ContentBase(models.Model):
"""
Base class for models that share content attributes
The attributes added by this mixin are ``title``, ``description``,
``content`` and ``is_visible``.
Attributes:
:is_visible: whether the content should be displayed by normal users
:title: title of the content, at most 192 characters
:description: most content objects have a description, with an unlimited size
:content: actual content of the object, with an unlimited size
"""
# Fields
is_visible = models.BooleanField(default=True, verbose_name=pgettext_lazy('content', "visible"))
title = models.CharField(blank=False, max_length=192, verbose_name=_("title"))
description = models.TextField(blank=True, verbose_name=_("description"))
content = models.TextField(blank=False, verbose_name=_("content"))
# Metadata
class Meta:<|fim▁hole|> abstract = True
def save(self, *args, **kwargs):
""" Save the object to the database """
super(self.__class__, self).save(*args, **kwargs)<|fim▁end|> | |
<|file_name|>compress.py<|end_file_name|><|fim▁begin|>"""
Demonstrates how the bz2 module may be used to create a compressed object
which represents a bitarray.
"""
import bz2
from bitarray import bitarray
def compress(ba):
"""
Given a bitarray, return an object which represents all information
within the bitarray in a compresed form.
The function `decompress` can be used to restore the bitarray from the
compresed object.
"""
assert isinstance(ba, bitarray)
return ba.length(), bz2.compress(ba.tobytes()), ba.endian()
def decompress(obj):
"""
Given an object (created by `compress`), return the a copy of the
original bitarray.<|fim▁hole|> """
n, data, endian = obj
res = bitarray(endian=endian)
res.frombytes(bz2.decompress(data))
del res[n:]
return res
if __name__ == '__main__':
a = bitarray(12345)
a.setall(0)
a[::10] = True
c = compress(a)
print(c)
b = decompress(c)
assert a == b, a.endian() == b.endian()<|fim▁end|> | |
<|file_name|>index.js<|end_file_name|><|fim▁begin|>var prependChar = '#';
var util = require('util');
function convertToLines(str) {
return str.split('\n').map(function(newStr) {
return prependChar + newStr;
});
}
var newConsole = {
log : function log() {
convertToLines(util.format.apply(this, arguments)).forEach(function(line) {
console.log(line);
});<|fim▁hole|> });
}
};
module.exports = newConsole;<|fim▁end|> | },
error : function error() {
convertToLines(util.format.apply(this, arguments)).forEach(function(line) {
console.error(line); |
<|file_name|>net.py<|end_file_name|><|fim▁begin|># Copyright 2018 Flight Lab authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Library for network related helpers."""
import socket
def get_ip():
"""Get primary IP (the one with a default route) of local machine.<|fim▁hole|> """
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
# doesn't even have to be reachable
s.connect(('10.255.255.255', 1))
return s.getsockname()[0]
except:
return '127.0.0.1'
finally:
s.close()<|fim▁end|> |
This works on both Linux and Windows platforms, and doesn't require working
internet connection. |
<|file_name|>bool.py<|end_file_name|><|fim▁begin|># This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
from ..._input_field import InputField
class BoolInput(InputField):
"""Simple input that controls a boolean variable.
GUI indications
----------------<|fim▁hole|> _true_strings = ("t", "true")
dtype = bool
_type = 'bool'
_default = {}
def parse(self, val):
if val is None:
pass
elif isinstance(val, str):
val = val.lower()
if val in self._true_strings:
val = True
elif val in self._false_strings:
val = False
else:
raise ValueError(f"String '{val}' is not understood by {self.__class__.__name__}")
elif not isinstance(val, bool):
self._raise_type_error(val)
return val<|fim▁end|> | It can be implemented as a switch or a checkbox, for example.
"""
_false_strings = ("f", "false") |
<|file_name|>Server.java<|end_file_name|><|fim▁begin|>/*
Copyright 2011-2012 Frederic Menou and others referred in AUTHORS file.
This file is part of Magrit.
Magrit is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
Magrit is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public
License along with Magrit.
If not, see <http://www.gnu.org/licenses/>.
*/
package org.kercoin.magrit.sshd;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.ArrayList;
import java.util.List;
import org.apache.sshd.SshServer;
import org.apache.sshd.common.NamedFactory;
import org.apache.sshd.common.util.SecurityUtils;
import org.apache.sshd.server.CommandFactory;
import org.apache.sshd.server.ForwardingFilter;
import org.apache.sshd.server.PublickeyAuthenticator;
import org.apache.sshd.server.UserAuth;
import org.apache.sshd.server.auth.UserAuthNone;
import org.apache.sshd.server.auth.UserAuthPublicKey;
import org.apache.sshd.server.keyprovider.PEMGeneratorHostKeyProvider;
import org.apache.sshd.server.keyprovider.SimpleGeneratorHostKeyProvider;
import org.apache.sshd.server.session.ServerSession;
import org.kercoin.magrit.core.Configuration;
import org.kercoin.magrit.core.Context;
import org.kercoin.magrit.core.Configuration.Authentication;
import org.kercoin.magrit.core.services.Service;
import org.kercoin.magrit.core.services.ServiceException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.inject.Inject;
import com.google.inject.Singleton;
@Singleton
public class Server implements Service.UseTCP {
protected final Logger log = LoggerFactory.getLogger(getClass());
private SshServer sshd;
private final int port;
@Inject
public Server(final Context ctx, CommandFactory factory) {
port = ctx.configuration().getSshPort();
sshd = SshServer.setUpDefaultServer();
if (SecurityUtils.isBouncyCastleRegistered()) {
sshd.setKeyPairProvider(new PEMGeneratorHostKeyProvider("key.pem"));
} else {
sshd.setKeyPairProvider(new SimpleGeneratorHostKeyProvider("key.ser"));
}
PublickeyAuthenticator auth = null;
if (ctx.configuration().getAuthentication() == Configuration.Authentication.SSH_PUBLIC_KEYS) {
auth = ctx.getInjector().getInstance(PublickeyAuthenticator.class);
}
setupUserAuth(auth);
sshd.setCommandFactory(factory);
if (!ctx.configuration().isRemoteAllowed()) {
sshd.setSessionFactory(new LocalOnlySessionFactory());
}
sshd.setForwardingFilter(new ForwardingFilter() {
public boolean canForwardAgent(ServerSession session) {
return false;
}
public boolean canForwardX11(ServerSession session) {
return false;
}
public boolean canListen(InetSocketAddress address, ServerSession session) {
return false;
}
public boolean canConnect(InetSocketAddress address, ServerSession session) {
return false;
}
});
}
private void setupUserAuth(PublickeyAuthenticator auth) {
List<NamedFactory<UserAuth>> list = new ArrayList<NamedFactory<UserAuth>>();
if (auth != null) {
list.add(new UserAuthPublicKey.Factory());
sshd.setPublickeyAuthenticator(auth);
} else {
list.add(new UserAuthNone.Factory());
}
sshd.setUserAuthFactories(list);
}
@Override
public void start() throws ServiceException {
sshd.setPort(port);
try {
sshd.start();
} catch (IOException e) {
throw new ServiceException(e);
}
}
@Override<|fim▁hole|>
@Override
public int getTCPPort() {
return port;
}
@Override
public void logConfig(ConfigurationLogger log, Configuration cfg) {
log.logKey("SSHd", cfg.getSshPort());
log.logKey("Listening", cfg.isRemoteAllowed() ? "everybody" : "localhost");
log.logKey("Authent", cfg.getAuthentication().external());
if (cfg.getAuthentication() == Authentication.SSH_PUBLIC_KEYS) {
log.logSubKey("Keys dir", cfg.getPublickeyRepositoryDir());
}
log.logKey("Home dir", cfg.getRepositoriesHomeDir());
log.logKey("Work dir", cfg.getWorkHomeDir());
}
}<|fim▁end|> | public String getName() {
return "SSH Service";
} |
<|file_name|>context_processors.py<|end_file_name|><|fim▁begin|><|fim▁hole|>def site_news(request):
"""
Inserts the currently active news items into the template context.
This ignores MAX_SITE_NEWS_ITEMS.
"""
# Grab all active items in proper date/time range.
items = SiteNewsItem.current_and_active.all()
return {'site_news_items': items}<|fim▁end|> | from django.conf import settings
from site_news.models import SiteNewsItem
|
<|file_name|>file_test.go<|end_file_name|><|fim▁begin|>/*
Copyright 2014 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package config
import (
"encoding/json"
"io/ioutil"
"os"
"sort"
"strings"
"testing"
"time"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/v1beta1"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/validation"
"github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet"
"github.com/GoogleCloudPlatform/kubernetes/pkg/types"
)
func ExampleManifestAndPod(id string) (v1beta1.ContainerManifest, api.BoundPod) {
manifest := v1beta1.ContainerManifest{
ID: id,
UUID: types.UID(id),
Containers: []v1beta1.Container{
{
Name: "c" + id,
Image: "foo",
TerminationMessagePath: "/somepath",
},
},
Volumes: []v1beta1.Volume{
{
Name: "host-dir",
Source: v1beta1.VolumeSource{
HostDir: &v1beta1.HostPath{"/dir/path"},
},
},
},
}
expectedPod := api.BoundPod{
ObjectMeta: api.ObjectMeta{
Name: id,
UID: types.UID(id),
},
Spec: api.PodSpec{
Containers: []api.Container{
{
Name: "c" + id,
Image: "foo",
},
},
Volumes: []api.Volume{
{
Name: "host-dir",
Source: api.VolumeSource{
HostPath: &api.HostPath{"/dir/path"},
},
},
},
},
}
return manifest, expectedPod
}
func TestExtractFromNonExistentFile(t *testing.T) {
ch := make(chan interface{}, 1)
c := sourceFile{"/some/fake/file", ch}
err := c.extractFromPath()
if err == nil {
t.Errorf("Expected error")
}
}
func TestUpdateOnNonExistentFile(t *testing.T) {
ch := make(chan interface{})
NewSourceFile("random_non_existent_path", time.Millisecond, ch)
select {
case got := <-ch:
update := got.(kubelet.PodUpdate)
expected := CreatePodUpdate(kubelet.SET, kubelet.FileSource)
if !api.Semantic.DeepDerivative(expected, update) {
t.Fatalf("Expected %#v, Got %#v", expected, update)
}
case <-time.After(time.Second):
t.Errorf("Expected update, timeout instead")
}
}
func writeTestFile(t *testing.T, dir, name string, contents string) *os.File {
file, err := ioutil.TempFile(os.TempDir(), "test_pod_config")
if err != nil {
t.Fatalf("Unable to create test file %#v", err)
}
file.Close()
if err := ioutil.WriteFile(file.Name(), []byte(contents), 0555); err != nil {
t.Fatalf("Unable to write test file %#v", err)
}
return file
}
func TestReadFromFile(t *testing.T) {
file := writeTestFile(t, os.TempDir(), "test_pod_config",
`{
"version": "v1beta1",
"uuid": "12345",
"id": "test",
"containers": [{ "image": "test/image", imagePullPolicy: "PullAlways"}]
}`)
defer os.Remove(file.Name())
ch := make(chan interface{})
NewSourceFile(file.Name(), time.Millisecond, ch)
select {
case got := <-ch:
update := got.(kubelet.PodUpdate)
expected := CreatePodUpdate(kubelet.SET, kubelet.FileSource, api.BoundPod{
ObjectMeta: api.ObjectMeta{
Name: "test",
UID: "12345",
Namespace: "",
SelfLink: "",
},
Spec: api.PodSpec{Containers: []api.Container{{Image: "test/image"}}},
})
// There's no way to provide namespace in ContainerManifest, so
// it will be defaulted.
if !strings.HasPrefix(update.Pods[0].ObjectMeta.Namespace, "file-") {
t.Errorf("Unexpected namespace: %s", update.Pods[0].ObjectMeta.Namespace)
}
update.Pods[0].ObjectMeta.Namespace = ""
// SelfLink depends on namespace.
if !strings.HasPrefix(update.Pods[0].ObjectMeta.SelfLink, "/api/") {
t.Errorf("Unexpected selflink: %s", update.Pods[0].ObjectMeta.SelfLink)
}
update.Pods[0].ObjectMeta.SelfLink = ""
if !api.Semantic.DeepDerivative(expected, update) {
t.Fatalf("Expected %#v, Got %#v", expected, update)
}
case <-time.After(time.Second):
t.Errorf("Expected update, timeout instead")
}
}
func TestReadFromFileWithoutID(t *testing.T) {
file := writeTestFile(t, os.TempDir(), "test_pod_config",
`{
"version": "v1beta1",
"uuid": "12345",
"containers": [{ "image": "test/image", imagePullPolicy: "PullAlways"}]
}`)
defer os.Remove(file.Name())
ch := make(chan interface{})
NewSourceFile(file.Name(), time.Millisecond, ch)
select {
case got := <-ch:
update := got.(kubelet.PodUpdate)
expected := CreatePodUpdate(kubelet.SET, kubelet.FileSource, api.BoundPod{
ObjectMeta: api.ObjectMeta{
Name: "",
UID: "12345",
Namespace: "",
SelfLink: "",
},
Spec: api.PodSpec{Containers: []api.Container{{Image: "test/image"}}},
})
if len(update.Pods[0].ObjectMeta.Name) == 0 {
t.Errorf("Name did not get defaulted")
}
update.Pods[0].ObjectMeta.Name = ""
update.Pods[0].ObjectMeta.Namespace = ""
update.Pods[0].ObjectMeta.SelfLink = ""
if !api.Semantic.DeepDerivative(expected, update) {
t.Fatalf("Expected %#v, Got %#v", expected, update)
}
case <-time.After(time.Second):
t.Errorf("Expected update, timeout instead")
}
}
func TestReadV1Beta2FromFile(t *testing.T) {
file := writeTestFile(t, os.TempDir(), "test_pod_config",
`{
"version": "v1beta2",
"uuid": "12345",
"id": "test",
"containers": [{ "image": "test/image", imagePullPolicy: "PullAlways"}]
}`)
defer os.Remove(file.Name())
ch := make(chan interface{})
NewSourceFile(file.Name(), time.Millisecond, ch)
select {
case got := <-ch:
update := got.(kubelet.PodUpdate)
expected := CreatePodUpdate(kubelet.SET, kubelet.FileSource, api.BoundPod{
ObjectMeta: api.ObjectMeta{
Name: "test",
UID: "12345",
Namespace: "",
SelfLink: "",
},
Spec: api.PodSpec{Containers: []api.Container{{Image: "test/image"}}},
})
update.Pods[0].ObjectMeta.Namespace = ""
update.Pods[0].ObjectMeta.SelfLink = ""
if !api.Semantic.DeepDerivative(expected, update) {
t.Fatalf("Expected %#v, Got %#v", expected, update)
}
case <-time.After(time.Second):
t.Errorf("Expected update, timeout instead")
}
}
func TestReadFromFileWithDefaults(t *testing.T) {
file := writeTestFile(t, os.TempDir(), "test_pod_config",
`{
"version": "v1beta1",
"id": "test",
"containers": [{ "image": "test/image" }]
}`)
defer os.Remove(file.Name())
ch := make(chan interface{})
NewSourceFile(file.Name(), time.Millisecond, ch)
select {
case got := <-ch:
update := got.(kubelet.PodUpdate)
if update.Pods[0].ObjectMeta.UID == "" {
t.Errorf("Unexpected UID: %s", update.Pods[0].ObjectMeta.UID)
}
case <-time.After(time.Second):
t.Errorf("Expected update, timeout instead")
}
}
func TestExtractFromBadDataFile(t *testing.T) {
file := writeTestFile(t, os.TempDir(), "test_pod_config", string([]byte{1, 2, 3}))
defer os.Remove(file.Name())
ch := make(chan interface{}, 1)
c := sourceFile{file.Name(), ch}
err := c.extractFromPath()
if err == nil {
t.Fatalf("Expected error")
}
expectEmptyChannel(t, ch)
}
func TestExtractFromEmptyDir(t *testing.T) {
dirName, err := ioutil.TempDir("", "foo")
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
defer os.RemoveAll(dirName)
ch := make(chan interface{}, 1)
c := sourceFile{dirName, ch}
err = c.extractFromPath()
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
update := (<-ch).(kubelet.PodUpdate)
expected := CreatePodUpdate(kubelet.SET, kubelet.FileSource)
if !api.Semantic.DeepDerivative(expected, update) {
t.Errorf("Expected %#v, Got %#v", expected, update)
}
}<|fim▁hole|> manifest2, expectedPod2 := ExampleManifestAndPod("2")
manifests := []v1beta1.ContainerManifest{manifest, manifest2}
pods := []api.BoundPod{expectedPod, expectedPod2}
files := make([]*os.File, len(manifests))
dirName, err := ioutil.TempDir("", "foo")
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
for i, manifest := range manifests {
data, err := json.Marshal(manifest)
if err != nil {
t.Errorf("Unexpected error: %v", err)
continue
}
file, err := ioutil.TempFile(dirName, manifest.ID)
if err != nil {
t.Errorf("Unexpected error: %v", err)
continue
}
name := file.Name()
if err := file.Close(); err != nil {
t.Errorf("Unexpected error: %v", err)
continue
}
ioutil.WriteFile(name, data, 0755)
files[i] = file
}
ch := make(chan interface{}, 1)
c := sourceFile{dirName, ch}
err = c.extractFromPath()
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
update := (<-ch).(kubelet.PodUpdate)
for i := range update.Pods {
update.Pods[i].Namespace = "foobar"
update.Pods[i].SelfLink = ""
}
expected := CreatePodUpdate(kubelet.SET, kubelet.FileSource, pods...)
for i := range expected.Pods {
expected.Pods[i].Namespace = "foobar"
}
sort.Sort(sortedPods(update.Pods))
sort.Sort(sortedPods(expected.Pods))
if !api.Semantic.DeepDerivative(expected, update) {
t.Fatalf("Expected %#v, Got %#v", expected, update)
}
for i := range update.Pods {
if errs := validation.ValidateBoundPod(&update.Pods[i]); len(errs) != 0 {
t.Errorf("Expected no validation errors on %#v, Got %#v", update.Pods[i], errs)
}
}
}<|fim▁end|> |
func TestExtractFromDir(t *testing.T) {
manifest, expectedPod := ExampleManifestAndPod("1") |
<|file_name|>integration_tests.py<|end_file_name|><|fim▁begin|># Copyright (c) 2013-2016 Cinchapi Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nose.tools import *
import os
import time
from subprocess import *
import signal
from . import test_data
from concourse import Concourse, Tag, Link, Diff, Operator, constants
from concourse.thriftapi.shared.ttypes import Type
from concourse.utils import python_to_thrift
import ujson
from tests import ignore
import socket
class IntegrationBaseTest(object):
"""
Base class for unit tests that use Mockcourse.
"""
port = None
process = None
client = None
expected_network_latency = 0.05
@classmethod
def setup_class(cls):
""" Fixture method to start Mockcourse and connect before the tests start to run.
"""
port = IntegrationBaseTest.get_open_port()
dir = os.path.dirname(os.path.realpath(__file__)) + '/../../mockcourse'
script = dir + '/mockcourse '+str(port)
cls.process = Popen(script, shell=True, preexec_fn=os.setsid)
cls.client = None
tries = 5
while tries > 0 and cls.client is None:
tries -= 1
time.sleep(1) # Wait for Mockcourse to start
try:
cls.client = Concourse.connect(port=port)
except RuntimeError as e:
if tries == 0:
raise e
else:
continue
@classmethod
def teardown_class(cls):
""" Fixture method to kill Mockcourse after all the tests have fun.
"""
os.killpg(cls.process.pid, signal.SIGTERM)
def tearDown(self):
"""" Logout" and clear all the data that the client stored in Mockcourse after each test. This ensures that the
environment for each test is clean and predicatable.
"""
self.client.logout() # Mockcourse logout simply clears the content of the datastore
def get_time_anchor(self):
""" Return a time anchor and sleep for long enough to account for network latency
"""
anchor = test_data.current_time_millis()
time.sleep(self.expected_network_latency)
return anchor
@staticmethod
def get_open_port():
"""Return an open port that is chosen by the OS
"""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(("localhost", 0))
port = sock.getsockname()[1]
sock.close()
return port
class TestPythonClientDriver(IntegrationBaseTest):
"""
Implementations for standard unit tests that verify the Python client driver
conforms to the Concourse standard
"""
def __do_test_value_round_trip(self, value, ttype):
"""
Do the round_trip test logic for the specified value of the specified type
:param value:
"""
key = test_data.random_string()
record = self.client.add(key=key, value=value)
stored = self.client.get(key=key, record=record)
assert_equal(value, stored)
assert_equal(python_to_thrift(stored).type, ttype)
def test_string_round_trip(self):
self.__do_test_value_round_trip(test_data.random_string(), Type.STRING)
def test_bool_round_trip(self):
self.__do_test_value_round_trip(test_data.random_bool(), Type.BOOLEAN)
def test_tag_round_trip(self):
self.__do_test_value_round_trip(Tag.create(test_data.random_string()), Type.TAG)
def test_link_round_trip(self):
self.__do_test_value_round_trip(Link.to(test_data.random_int()), Type.LINK)
def test_int_round_trip(self):
self.__do_test_value_round_trip(test_data.random_int(), Type.INTEGER)
self.__do_test_value_round_trip(2147483647, Type.INTEGER)
self.__do_test_value_round_trip(-2147483648, Type.INTEGER)
def test_long_round_trip(self):
self.__do_test_value_round_trip(2147483648, Type.LONG)
self.__do_test_value_round_trip(-2147483649, Type.LONG)
self.__do_test_value_round_trip(test_data.random_long(), Type.LONG)
def test_float_round_trip(self):
self.__do_test_value_round_trip(3.4028235E38, Type.DOUBLE)
self.__do_test_value_round_trip(-1.4E-45, Type.DOUBLE)
def test_abort(self):
self.client.stage()
key = test_data.random_string()
value = "some value"
record = 1
self.client.add(key=key, value=value, record=record)
self.client.abort()
assert_is_none(self.client.get(key=key, record=record))
def test_add_key_value(self):
key = test_data.random_string()
value = "static value"
record = self.client.add(key=key, value=value)
assert_is_not_none(record)
stored = self.client.get(key=key, record=record)
assert_equal(stored, value)
def test_add_key_value_record(self):
key = test_data.random_string()
value = "static value"
record = 17
assert_true(self.client.add(key=key, value=value, record=record))
stored = self.client.get(key=key, record=record)
assert_equal(stored, value)
def test_add_key_value_records(self):
key = test_data.random_string()
value = "static value"
records = [1, 2, 3]
result = self.client.add(key=key, value=value, records=records)
assert_true(isinstance(result, dict))
assert_true(result.get(1))
assert_true(result.get(2))
assert_true(result.get(3))
def test_audit_key_record(self):
key = test_data.random_string()
values = ["one", "two", "three"]
record = 1000
for value in values:
self.client.set(key, value, record)
audit = self.client.audit(key, record)
assert_equal(5, len(audit))
expected = 'ADD'
for k, v in audit.items():
assert_true(v.startswith(expected))
expected = 'REMOVE' if expected == 'ADD' else 'ADD'
def test_audit_key_record_start(self):
key = test_data.random_string()
values = ["one", "two", "three"]
record = 1001
for value in values:
self.client.set(key, value, record)
start = self.client.time()
values = [4, 5, 6]
for value in values:
self.client.set(key, value, record)
audit = self.client.audit(key, record, start=start)
assert_equal(6, len(audit))
def test_audit_key_record_start_end(self):
key = test_data.random_string()
values = ["one", "two", "three"]
record = 1002
for value in values:
self.client.set(key, value, record)
start = self.client.time()
values = [4, 5, 6]
for value in values:
self.client.set(key, value, record)
end = self.client.time()
values = [True, False]
for value in values:
self.client.set(key, value, record)
audit = self.client.audit(key, record, start=start, end=end)
assert_equal(6, len(audit))
def test_audit_key_record_startstr(self):
key = test_data.random_string()
values = ["one", "two", "three"]
record = 1001
for value in values:
self.client.set(key, value, record)
anchor = self.get_time_anchor()
values = [4, 5, 6]
for value in values:
self.client.set(key, value, record)
start = test_data.get_elapsed_millis_string(anchor)
audit = self.client.audit(key, record, start=start)
assert_equal(6, len(audit))
def test_audit_key_record_startstr_endstr(self):
key = test_data.random_string()
values = ["one", "two", "three"]
record = 1002
for value in values:
self.client.set(key, value, record)
start_anchor = self.get_time_anchor()
values = [4, 5, 6]
for value in values:
self.client.set(key, value, record)
end_anchor = self.get_time_anchor()
values = [True, False]
for value in values:
self.client.set(key, value, record)
start = test_data.get_elapsed_millis_string(start_anchor)
end = test_data.get_elapsed_millis_string(end_anchor)
audit = self.client.audit(key, record, start=start, end=end)
assert_equal(6, len(audit))
def test_audit_record(self):
key1 = test_data.random_string()
key2 = test_data.random_string()
key3 = test_data.random_string()
value = "foo"
record = 1002
self.client.add(key1, value, record)
self.client.add(key2, value, record)
self.client.add(key3, value, record)
audit = self.client.audit(record)
assert_equal(3, len(audit))
def test_audit_record_start(self):
key1 = test_data.random_string()
key2 = test_data.random_string()
key3 = test_data.random_string()
value = "bar"
record = 344
self.client.add(key1, value, record)
self.client.add(key2, value, record)
self.client.add(key3, value, record)
start = self.client.time()
self.client.remove(key1, value, record)
self.client.remove(key2, value, record)
self.client.remove(key3, value, record)
audit = self.client.audit(record, start=start)
assert_equal(3, len(audit))
def test_audit_record_start_end(self):
key1 = test_data.random_string()
key2 = test_data.random_string()
key3 = test_data.random_string()
value = "bar"
record = 344
self.client.add(key1, value, record)
self.client.add(key2, value, record)
self.client.add(key3, value, record)
start = self.client.time()
self.client.remove(key1, value, record)
self.client.remove(key2, value, record)
self.client.remove(key3, value, record)
end = self.client.time()
self.client.add(key1, value, record)
self.client.add(key2, value, record)
self.client.add(key3, value, record)
audit = self.client.audit(record, start=start, end=end)
assert_equal(3, len(audit))
def test_audit_record_startstr(self):
key1 = test_data.random_string()
key2 = test_data.random_string()
key3 = test_data.random_string()
value = "bar"
record = 344
self.client.add(key1, value, record)
self.client.add(key2, value, record)
self.client.add(key3, value, record)
anchor = self.get_time_anchor()
self.client.remove(key1, value, record)
self.client.remove(key2, value, record)
self.client.remove(key3, value, record)
start = test_data.get_elapsed_millis_string(anchor)
audit = self.client.audit(record, start=start)
assert_equal(3, len(audit))
def test_audit_record_startstr_endstr(self):
key1 = test_data.random_string()
key2 = test_data.random_string()
key3 = test_data.random_string()
value = "bar"
record = 344
self.client.add(key1, value, record)
self.client.add(key2, value, record)
self.client.add(key3, value, record)
start_anchor = self.get_time_anchor()
self.client.remove(key1, value, record)
self.client.remove(key2, value, record)
self.client.remove(key3, value, record)
end_anchor = self.get_time_anchor()
self.client.add(key1, value, record)
self.client.add(key2, value, record)
self.client.add(key3, value, record)
start = test_data.get_elapsed_millis_string(start_anchor)
end = test_data.get_elapsed_millis_string(end_anchor)
audit = self.client.audit(record, start=start, end=end)
assert_equal(3, len(audit))
def test_browse_key(self):
key = test_data.random_string()
value = 10
self.client.add(key, value, [1, 2, 3])
value = test_data.random_string()
self.client.add(key, value, [10, 20, 30])
data = self.client.browse(key)
assert_equal([1, 2, 3], data.get(10))
assert_equal([20, 10, 30], data.get(value))
def test_browse_keys(self):
key1 = test_data.random_string()
key2 = test_data.random_string()
key3 = test_data.random_string()
value1 = "A"
value2 = "B"
value3 = "C"
record1 = 1
record2 = 2
record3 = 3
self.client.add(key1, value1, record1)
self.client.add(key2, value2, record2)
self.client.add(key3, value3, record3)
data = self.client.browse([key1, key2, key3])
assert_equal({value1: [record1]}, data.get(key1))
assert_equal({value2: [record2]}, data.get(key2))
assert_equal({value3: [record3]}, data.get(key3))
def test_browse_keys_time(self):
key1 = test_data.random_string()
key2 = test_data.random_string()
key3 = test_data.random_string()
value1 = "A"
value2 = "B"
value3 = "C"
record1 = 1
record2 = 2
record3 = 3
self.client.add(key1, value1, record1)
self.client.add(key2, value2, record2)
self.client.add(key3, value3, record3)
time = self.client.time()
self.client.add(key1, "Foo")
self.client.add(key2, "Foo")
self.client.add(key3, "Foo")
data = self.client.browse([key1, key2, key3], time=time)
assert_equal({value1: [record1]}, data.get(key1))
assert_equal({value2: [record2]}, data.get(key2))
assert_equal({value3: [record3]}, data.get(key3))
def test_browse_key_timestr(self):
key1 = test_data.random_string()
key2 = test_data.random_string()
key3 = test_data.random_string()
value1 = "A"
value2 = "B"
value3 = "C"
record1 = 1
record2 = 2
record3 = 3
self.client.add(key1, value1, record1)
self.client.add(key2, value2, record2)
self.client.add(key3, value3, record3)
ts = test_data.get_elapsed_millis_string(self.get_time_anchor())
data = self.client.browse([key1, key2, key3], time=ts)
assert_equal({value1: [record1]}, data.get(key1))
assert_equal({value2: [record2]}, data.get(key2))
assert_equal({value3: [record3]}, data.get(key3))
@ignore
def test_browse_keys_timestr(self):
key1 = test_data.random_string()
key2 = test_data.random_string()
key3 = test_data.random_string()
value1 = "A"
value2 = "B"
value3 = "C"
record1 = 1
record2 = 2
record3 = 3
self.client.add(key1, value1, record1)
self.client.add(key2, value2, record2)
self.client.add(key3, value3, record3)
anchor = self.get_time_anchor()
self.client.add(key1, "D", record1)
ts = test_data.get_elapsed_millis_string(anchor)
data = self.client.browse([key1, key2, key3], time=ts)
assert_equal({value1: [record1]}, data.get(key1))
assert_equal({value2: [record2]}, data.get(key2))
assert_equal({value3: [record3]}, data.get(key3))
def test_browse_key_time(self):
key = test_data.random_string()
value = 10
self.client.add(key, value, [1, 2, 3])
value = test_data.random_string()
self.client.add(key, value, [10, 20, 30])
timestamp = self.client.time()
self.client.add(key=key, value=True)
data = self.client.browse(key, timestamp)
assert_equal([1, 2, 3], data.get(10))
assert_equal([20, 10, 30], data.get(value))
def test_chronologize_key_record(self):
key = test_data.random_string()
record = test_data.random_long()
self.client.add(key, 1, record)
self.client.add(key, 2, record)
self.client.add(key, 3, record)
self.client.remove(key, 1, record)
self.client.remove(key, 2, record)
self.client.remove(key, 3, record)
data = self.client.chronologize(key, record)
assert_equal([[1], [1, 2], [1, 2, 3], [2, 3], [3]], list(data.values()))
def test_chronologize_key_record_start(self):
key = test_data.random_string()
record = test_data.random_long()
self.client.add(key, 1, record)
self.client.add(key, 2, record)
self.client.add(key, 3, record)
start = self.client.time()
self.client.remove(key, 1, record)
self.client.remove(key, 2, record)
self.client.remove(key, 3, record)
data = self.client.chronologize(key, record, time=start)
assert_equal([[2, 3], [3]], list(data.values()))
def test_chronologize_key_record_start_end(self):
key = test_data.random_string()
record = test_data.random_long()
self.client.add(key, 1, record)
self.client.add(key, 2, record)
self.client.add(key, 3, record)
start = self.client.time()
self.client.remove(key, 1, record)
end = self.client.time()
self.client.remove(key, 2, record)
self.client.remove(key, 3, record)
data = self.client.chronologize(key, record, timestamp=start, end=end)
assert_equal([[2, 3]], list(data.values()))
def test_chronologize_key_record_startstr(self):
key = test_data.random_string()
record = test_data.random_long()
self.client.add(key, 1, record)
self.client.add(key, 2, record)
self.client.add(key, 3, record)
anchor = self.get_time_anchor()
self.client.remove(key, 1, record)
self.client.remove(key, 2, record)
self.client.remove(key, 3, record)
start = test_data.get_elapsed_millis_string(anchor)
data = self.client.chronologize(key, record, time=start)
assert_equal([[2, 3], [3]], list(data.values()))
def test_chronologize_key_record_startstr_endstr(self):
key = test_data.random_string()
record = test_data.random_long()
self.client.add(key, 1, record)
self.client.add(key, 2, record)
self.client.add(key, 3, record)
start_anchor = self.get_time_anchor()
self.client.remove(key, 1, record)
end_anchor = self.get_time_anchor()
self.client.remove(key, 2, record)
self.client.remove(key, 3, record)
start = test_data.get_elapsed_millis_string(start_anchor)
end = test_data.get_elapsed_millis_string(end_anchor)
data = self.client.chronologize(key, record, timestamp=start, end=end)
assert_equal([[2, 3]], list(data.values()))
def test_clear_key_record(self):
key = test_data.random_string()
record = test_data.random_long()
self.client.add(key, 1, record)
self.client.add(key, 2, record)
self.client.add(key, 3, record)
self.client.clear(key=key, record=record)
data = self.client.select(key=key, record=record)
assert_equal([], data)
def test_clear_key_records(self):
key = test_data.random_string()
records = [1, 2, 3]
self.client.add(key, 1, records)
self.client.add(key, 2, records)
self.client.add(key, 3, records)
self.client.clear(key=key, records=records)
data = self.client.select(key=key, records=records)
assert_equal({}, data)
def test_clear_keys_record(self):
key1 = test_data.random_string(6)
key2 = test_data.random_string(7)
key3 = test_data.random_string(8)
record = test_data.random_long()
self.client.add(key1, 1, record)
self.client.add(key2, 2, record)
self.client.add(key3, 3, record)
self.client.clear(keys=[key1, key2, key3], record=record)
data = self.client.select(keys=[key1, key2, key3], record=record)
assert_equal({}, data)
def test_clear_keys_records(self):
data = {
'a': 'A',
'b': 'B',
'c': ['C', True],
'd': 'D'
}
records = [1, 2, 3]
self.client.insert(data=data, records=records)
self.client.clear(keys=['a', 'b', 'c'], records=records)
data = self.client.get(key='d', records=records)
assert_equal({
1: 'D',
2: 'D',
3: 'D'
}, data)
def test_clear_record(self):
data = {
'a': 'A',
'b': 'B',
'c': ['C', True]
}
record = next(iter(self.client.insert(data)))
self.client.clear(record=record)
data = self.client.select(record=record)
assert_equal({}, data)
def test_clear_records(self):
data = {
'a': 'A',
'b': 'B',
'c': ['C', True],
'd': 'D'
}
records = [1, 2, 3]
self.client.insert(data=data, records=records)
self.client.clear(records=records)
data = self.client.select(records=records)
assert_equal({1: {}, 2: {}, 3: {}}, data)
def test_commit(self):
self.client.stage()
record = self.client.add("name", "jeff nelson")
self.client.commit()
assert_equal(['name'], list(self.client.describe(record)))
def test_describe_record(self):
self.client.set('name', 'tom brady', 1)
self.client.set('age', 100, 1)
self.client.set('team', 'new england patriots', 1)
keys = self.client.describe(1)
assert_equals(['age', 'name', 'team'], keys)
def test_describe_record_time(self):
self.client.set('name', 'tom brady', 1)
self.client.set('age', 100, 1)
self.client.set('team', 'new england patriots', 1)
timestamp = self.client.time()
self.client.clear('name', 1)
keys = self.client.describe(1, time=timestamp)
assert_equals(['age', 'name', 'team'], keys)
def test_describe_record_timestr(self):
self.client.set('name', 'tom brady', 1)
self.client.set('age', 100, 1)
self.client.set('team', 'new england patriots', 1)
anchor = self.get_time_anchor()
self.client.clear('name', 1)
timestamp = test_data.get_elapsed_millis_string(anchor)
keys = self.client.describe(1, time=timestamp)
assert_equals(['age', 'name', 'team'], keys)
def test_describe_records(self):
records = [1, 2, 3]
self.client.set('name', 'tom brady', records)
self.client.set('age', 100, records)
self.client.set('team', 'new england patriots', records)
keys = self.client.describe(records)
assert_equals(['age', 'name', 'team'], keys[1])
assert_equals(['age', 'name', 'team'], keys[2])
assert_equals(['age', 'name', 'team'], keys[3])
def test_describe_records_time(self):
records = [1, 2, 3]
self.client.set('name', 'tom brady', records)
self.client.set('age', 100, records)
self.client.set('team', 'new england patriots', records)
timestamp = self.client.time()
self.client.clear(records=records)
keys = self.client.describe(records, timestamp=timestamp)
assert_equals(['age', 'name', 'team'], keys[1])
assert_equals(['age', 'name', 'team'], keys[2])
assert_equals(['age', 'name', 'team'], keys[3])
def test_describe_records_timestr(self):
records = [1, 2, 3]
self.client.set('name', 'tom brady', records)
self.client.set('age', 100, records)
self.client.set('team', 'new england patriots', records)
anchor = self.get_time_anchor()
self.client.clear(records=records)
timestamp = test_data.get_elapsed_millis_string(anchor)
keys = self.client.describe(records, timestamp=timestamp)
assert_equals(['age', 'name', 'team'], keys[1])
assert_equals(['age', 'name', 'team'], keys[2])
assert_equals(['age', 'name', 'team'], keys[3])
def test_diff_key_record_start(self):
key = test_data.random_string()
record = test_data.random_long()
self.client.add(key, 1, record)
start = self.client.time()
self.client.add(key, 2, record)
self.client.remove(key, 1, record)
diff = self.client.diff(key, record, start)
assert_equal([2], diff.get(Diff.ADDED))
assert_equal([1], diff.get(Diff.REMOVED))
def test_diff_key_record_startstr(self):
key = test_data.random_string()
record = test_data.random_long()
self.client.add(key, 1, record)
anchor = self.get_time_anchor()
self.client.add(key, 2, record)
self.client.remove(key, 1, record)
start = test_data.get_elapsed_millis_string(anchor)
diff = self.client.diff(key, record, start)
assert_equal([2], diff.get(Diff.ADDED))
assert_equal([1], diff.get(Diff.REMOVED))
def test_diff_key_record_start_end(self):
key = test_data.random_string()
record = test_data.random_long()
self.client.add(key, 1, record)
start = self.client.time()
self.client.add(key, 2, record)
self.client.remove(key, 1, record)
end = self.client.time()
self.client.set(key, 3, record)
diff = self.client.diff(key, record, start, end)
assert_equal([2], diff.get(Diff.ADDED))
assert_equal([1], diff.get(Diff.REMOVED))
def test_diff_key_record_startstr_endstr(self):
key = test_data.random_string()
record = test_data.random_long()
self.client.add(key, 1, record)
start_anchor = self.get_time_anchor()
self.client.add(key, 2, record)
self.client.remove(key, 1, record)
end_anchor = self.get_time_anchor()
self.client.set(key, 3, record)
start = test_data.get_elapsed_millis_string(start_anchor)
end = test_data.get_elapsed_millis_string(end_anchor)
diff = self.client.diff(key, record, start, end)
assert_equal([2], diff.get(Diff.ADDED))
assert_equal([1], diff.get(Diff.REMOVED))
def test_diff_key_start(self):
key = test_data.random_string()
self.client.add(key=key, value=1, record=1)
start = self.client.time()
self.client.add(key=key, value=2, record=1)
self.client.add(key=key, value=1, record=2)
self.client.add(key=key, value=3, record=3)
self.client.remove(key=key, value=1, record=2)
diff = self.client.diff(key=key, start=start)
assert_equal(2, len(diff.keys()))
diff2 = diff.get(2)
diff3 = diff.get(3)
assert_equal([1], diff2.get(Diff.ADDED))
assert_equal([3], diff3.get(Diff.ADDED))
assert_is_none(diff2.get(Diff.REMOVED))
assert_is_none(diff3.get(Diff.REMOVED))
def test_diff_key_startstr(self):
key = test_data.random_string()
self.client.add(key=key, value=1, record=1)
anchor = self.get_time_anchor()
self.client.add(key=key, value=2, record=1)
self.client.add(key=key, value=1, record=2)
self.client.add(key=key, value=3, record=3)
self.client.remove(key=key, value=1, record=2)
start = test_data.get_elapsed_millis_string(anchor)
diff = self.client.diff(key=key, start=start)
assert_equal(2, len(diff.keys()))
diff2 = diff.get(2)
diff3 = diff.get(3)
assert_equal([1], diff2.get(Diff.ADDED))
assert_equal([3], diff3.get(Diff.ADDED))
assert_is_none(diff2.get(Diff.REMOVED))
assert_is_none(diff3.get(Diff.REMOVED))
def test_diff_key_start_end(self):
key = test_data.random_string()
self.client.add(key=key, value=1, record=1)
start = self.client.time()
self.client.add(key=key, value=2, record=1)
self.client.add(key=key, value=1, record=2)
self.client.add(key=key, value=3, record=3)
self.client.remove(key=key, value=1, record=2)
end = self.client.time()
self.client.add(key=key, value=4, record=1)
diff = self.client.diff(key=key, start=start, end=end)
assert_equal(2, len(diff.keys()))
diff2 = diff.get(2)
diff3 = diff.get(3)
assert_equal([1], diff2.get(Diff.ADDED))
assert_equal([3], diff3.get(Diff.ADDED))
assert_is_none(diff2.get(Diff.REMOVED))
assert_is_none(diff3.get(Diff.REMOVED))
def test_diff_key_startstr_endstr(self):
key = test_data.random_string()
self.client.add(key=key, value=1, record=1)
start_anchor = self.get_time_anchor()
self.client.add(key=key, value=2, record=1)
self.client.add(key=key, value=1, record=2)
self.client.add(key=key, value=3, record=3)
self.client.remove(key=key, value=1, record=2)
end_anchor = self.get_time_anchor()
self.client.add(key=key, value=4, record=1)
start = test_data.get_elapsed_millis_string(start_anchor)
end = test_data.get_elapsed_millis_string(end_anchor)
diff = self.client.diff(key=key, start=start, end=end)
assert_equal(2, len(diff.keys()))
diff2 = diff.get(2)
diff3 = diff.get(3)
assert_equal([1], diff2.get(Diff.ADDED))
assert_equal([3], diff3.get(Diff.ADDED))
assert_is_none(diff2.get(Diff.REMOVED))
assert_is_none(diff3.get(Diff.REMOVED))
def test_diff_record_start(self):
self.client.add(key="foo", value=1, record=1)
start = self.client.time()
self.client.set(key="foo", value=2, record=1)
self.client.add(key="bar", value=True, record=1)
diff = self.client.diff(record=1, time=start)
assert_equal([1], diff.get('foo').get(Diff.REMOVED))
assert_equal([2], diff.get('foo').get(Diff.ADDED))
assert_equal([True], diff.get('bar').get(Diff.ADDED))
def test_diff_record_startstr(self):
self.client.add(key="foo", value=1, record=1)
anchor = self.get_time_anchor()
self.client.set(key="foo", value=2, record=1)
self.client.add(key="bar", value=True, record=1)
start = test_data.get_elapsed_millis_string(anchor)
diff = self.client.diff(record=1, time=start)
assert_equal([1], diff.get('foo').get(Diff.REMOVED))
assert_equal([2], diff.get('foo').get(Diff.ADDED))
assert_equal([True], diff.get('bar').get(Diff.ADDED))
def test_diff_record_start_end(self):
self.client.add(key="foo", value=1, record=1)
start = self.client.time()
self.client.set(key="foo", value=2, record=1)
self.client.add(key="bar", value=True, record=1)
end = self.client.time()
self.client.set(key="car", value=100, record=1)
diff = self.client.diff(record=1, time=start, end=end)
assert_equal([1], diff.get('foo').get(Diff.REMOVED))
assert_equal([2], diff.get('foo').get(Diff.ADDED))
assert_equal([True], diff.get('bar').get(Diff.ADDED))
def test_diff_record_startstr_endstr(self):
self.client.add(key="foo", value=1, record=1)
start_anchor = self.get_time_anchor()
self.client.set(key="foo", value=2, record=1)
self.client.add(key="bar", value=True, record=1)
end_anchor = self.get_time_anchor()
self.client.set(key="car", value=100, record=1)
start = test_data.get_elapsed_millis_string(start_anchor)
end = test_data.get_elapsed_millis_string(end_anchor)
diff = self.client.diff(record=1, time=start, end=end)
assert_equal([1], diff.get('foo').get(Diff.REMOVED))
assert_equal([2], diff.get('foo').get(Diff.ADDED))
assert_equal([True], diff.get('bar').get(Diff.ADDED))
def test_find_ccl(self):
key = test_data.random_string()
for n in range(0, 10):
self.client.add(key=key, value=n, record=n)
records = list(self.client.find(key+' > 3'))
assert_equal(list(range(4, 10)), records)
@raises(Exception)
def test_find_ccl_handle_parse_exception(self):
self.client.find(ccl="throw parse exception")
def test_find_key_operator_value(self):
key = test_data.random_string()
for n in range(0, 10):
self.client.add(key=key, value=n, record=n)
records = list(self.client.find(key=key, operator=Operator.GREATER_THAN, value=3))
assert_equal(list(range(4, 10)), records)
def test_find_key_operator_values(self):
key = test_data.random_string()
for n in range(0, 10):
self.client.add(key=key, value=n, record=n)
records = list(self.client.find(key=key, operator=Operator.BETWEEN, values=[3, 6]))
assert_equal([3, 4, 5], records)
def test_find_key_operator_value_time(self):
key = test_data.random_string()
for n in range(0, 10):
self.client.add(key=key, value=n, record=n)
ts = self.client.time()
for n in range(0, 10):
self.client.add(key=key, value=n, record=n+1)
records = list(self.client.find(key=key, operator=Operator.GREATER_THAN, value=3, time=ts))
assert_equal(list(range(4, 10)), records)
def test_find_key_operator_value_timestr(self):
key = test_data.random_string()
for n in range(0, 10):
self.client.add(key=key, value=n, record=n)
anchor = self.get_time_anchor()
for n in range(0, 10):
self.client.add(key=key, value=n, record=n+1)
ts = test_data.get_elapsed_millis_string(anchor)
records = list(self.client.find(key=key, operator=Operator.GREATER_THAN, value=3, time=ts))
assert_equal(list(range(4, 10)), records)
def test_find_key_operator_values_time(self):
key = test_data.random_string()
for n in range(0, 10):
self.client.add(key=key, value=n, record=n)
ts = self.client.time()
for n in range(0, 10):
self.client.add(key=key, value=n, record=n+1)
records = list(self.client.find(key=key, operator=Operator.BETWEEN, values=[3, 6], time=ts))
assert_equal([3, 4, 5], records)
def test_find_key_operator_values_timestr(self):
key = test_data.random_string()
for n in range(0, 10):
self.client.add(key=key, value=n, record=n)
anchor = self.get_time_anchor()
for n in range(0, 10):
self.client.add(key=key, value=n, record=n+1)
ts = test_data.get_elapsed_millis_string(anchor)
records = list(self.client.find(key=key, operator=Operator.BETWEEN, values=[3, 6], time=ts))
assert_equal([3, 4, 5], records)
def test_find_key_operatorstr_values_time(self):
key = test_data.random_string()
for n in range(0, 10):
self.client.add(key=key, value=n, record=n)
ts = self.client.time()
for n in range(0, 10):
self.client.add(key=key, value=n, record=n+1)
records = list(self.client.find(key=key, operator="bw", values=[3, 6], time=ts))
assert_equal([3, 4, 5], records)
def test_find_key_operatorstr_values(self):
key = test_data.random_string()
for n in range(0, 10):
self.client.add(key=key, value=n, record=n)
records = list(self.client.find(key=key, operator="bw", values=[3, 6]))
assert_equal([3, 4, 5], records)
def test_find_key_operatorstr_values_timestr(self):
key = test_data.random_string()
for n in range(0, 10):
self.client.add(key=key, value=n, record=n)
anchor = self.get_time_anchor()
for n in range(0, 10):
self.client.add(key=key, value=n, record=n+1)
ts = test_data.get_elapsed_millis_string(anchor)
records = list(self.client.find(key=key, operator="bw", values=[3, 6], time=ts))
assert_equal([3, 4, 5], records)
def test_find_key_operatorstr_value(self):
key = test_data.random_string()
for n in range(0, 10):
self.client.add(key=key, value=n, record=n)
records = list(self.client.find(key=key, operator="gt", value=3))
assert_equal(list(range(4, 10)), records)
def test_find_key_operatorstr_value_time(self):
key = test_data.random_string()
for n in range(0, 10):
self.client.add(key=key, value=n, record=n)
ts = self.client.time()
for n in range(0, 10):
self.client.add(key=key, value=n, record=n+1)
records = list(self.client.find(key=key, operator="gt", value=3, time=ts))
assert_equal(list(range(4, 10)), records)
def test_find_key_operatorstr_value_timestr(self):
key = test_data.random_string()
for n in range(0, 10):
self.client.add(key=key, value=n, record=n)
anchor = self.get_time_anchor()
for n in range(0, 10):
self.client.add(key=key, value=n, record=n+1)
ts = test_data.get_elapsed_millis_string(anchor)
records = list(self.client.find(key=key, operator="gt", value=3, time=ts))
assert_equal(list(range(4, 10)), records)
def test_get_ccl(self):
key1 = test_data.random_string()
key2 = test_data.random_string()
record1 = test_data.random_long()
record2 = test_data.random_long()
self.client.add(key=key1, value=1, records=[record1, record2])
self.client.add(key=key1, value=2, records=[record1, record2])
self.client.add(key=key1, value=3, records=[record1, record2])
self.client.add(key=key2, value=10, records=[record1, record2])
ccl = key2 + ' = 10'
data = self.client.get(ccl=ccl)
expected = {
key1: 3,
key2: 10
}
assert_equal(data.get(record1), expected)
assert_equal(data.get(record2), expected)
def test_get_ccl_time(self):
key1 = test_data.random_string()
key2 = test_data.random_string()
record1 = test_data.random_long()
record2 = test_data.random_long()
self.client.add(key=key1, value=1, records=[record1, record2])
self.client.add(key=key1, value=2, records=[record1, record2])
self.client.add(key=key1, value=3, records=[record1, record2])
self.client.add(key=key2, value=10, records=[record1, record2])
ts = self.client.time()
self.client.set(key=key2, value=11, records=[record1, record2])
ccl = key2 + ' > 10'
data = self.client.get(ccl=ccl, time=ts)
expected = {
key1: 3,
key2: 10
}
assert_equal(data.get(record1), expected)
assert_equal(data.get(record2), expected)
def test_get_ccl_timestr(self):
key1 = test_data.random_string()
key2 = test_data.random_string()
record1 = test_data.random_long()
record2 = test_data.random_long()
self.client.add(key=key1, value=1, records=[record1, record2])
self.client.add(key=key1, value=2, records=[record1, record2])
self.client.add(key=key1, value=3, records=[record1, record2])
self.client.add(key=key2, value=10, records=[record1, record2])
anchor = self.get_time_anchor()
self.client.set(key=key2, value=11, records=[record1, record2])
ccl = key2 + ' > 10'
ts = test_data.get_elapsed_millis_string(anchor)
data = self.client.get(ccl=ccl, time=ts)
expected = {
key1: 3,
key2: 10
}
assert_equal(data.get(record1), expected)
assert_equal(data.get(record2), expected)
def test_get_key_ccl(self):
key1 = test_data.random_string()
key2 = test_data.random_string()
record1 = test_data.random_long()
record2 = test_data.random_long()
self.client.add(key=key1, value=1, records=[record1, record2])
self.client.add(key=key1, value=2, records=[record1, record2])
self.client.add(key=key1, value=3, records=[record1, record2])
self.client.add(key=key2, value=10, records=[record1, record2])
self.client.add(key=key1, value=4, record=record2)
ccl = key2 + ' = 10'
data = self.client.get(key=key1, ccl=ccl)
expected = {
record1: 3,
record2: 4
}
assert_equal(expected, data)
def test_get_keys_ccl(self):
key1 = test_data.random_string()
key2 = test_data.random_string()
record1 = test_data.random_long()
record2 = test_data.random_long()
self.client.add(key=key1, value=1, records=[record1, record2])
self.client.add(key=key1, value=2, records=[record1, record2])
self.client.add(key=key1, value=3, records=[record1, record2])
self.client.add(key=key2, value=10, records=[record1, record2])
self.client.add(key=key1, value=4, record=record2)
ccl = key2 + ' = 10'
data = self.client.get(keys=[key1, key2], ccl=ccl)
expected = {
record1: {key1: 3, key2: 10},
record2: {key1: 4, key2: 10},
}
assert_equal(expected, data)
def test_get_key_ccl_time(self):
key1 = test_data.random_string()
key2 = test_data.random_string()
record1 = test_data.random_long()
record2 = test_data.random_long()
self.client.add(key=key1, value=1, records=[record1, record2])
self.client.add(key=key1, value=2, records=[record1, record2])
self.client.add(key=key1, value=3, records=[record1, record2])
self.client.add(key=key2, value=10, records=[record1, record2])
self.client.add(key=key1, value=4, record=record2)
ts = self.client.time()
ccl = key2 + ' = 10'
self.client.set(key=key1, value=100, record=[record2, record1])
data = self.client.get(key=key1, ccl=ccl, time=ts)
expected = {
record1: 3,
record2: 4
}
assert_equal(expected, data)
def test_get_keys_ccl_time(self):
key1 = test_data.random_string()
key2 = test_data.random_string()
record1 = test_data.random_long()
record2 = test_data.random_long()
self.client.add(key=key1, value=1, records=[record1, record2])
self.client.add(key=key1, value=2, records=[record1, record2])
self.client.add(key=key1, value=3, records=[record1, record2])
self.client.add(key=key2, value=10, records=[record1, record2])
self.client.add(key=key1, value=4, record=record2)
ts = self.client.time()
ccl = key2 + ' = 10'
self.client.set(key=key1, value=100, record=[record2, record1])
data = self.client.get(key=[key1, key2], ccl=ccl, time=ts)
expected = {
record1: {key1: 3, key2: 10},
record2: {key1: 4, key2: 10},
}
assert_equal(expected, data)
def test_get_key_ccl_timestr(self):
key1 = test_data.random_string()
key2 = test_data.random_string()
record1 = test_data.random_long()
record2 = test_data.random_long()
self.client.add(key=key1, value=1, records=[record1, record2])
self.client.add(key=key1, value=2, records=[record1, record2])
self.client.add(key=key1, value=3, records=[record1, record2])
self.client.add(key=key2, value=10, records=[record1, record2])
self.client.add(key=key1, value=4, record=record2)
anchor = self.get_time_anchor()
ccl = key2 + ' = 10'
self.client.set(key=key1, value=100, record=[record2, record1])
ts = test_data.get_elapsed_millis_string(anchor)
data = self.client.get(key=key1, ccl=ccl, time=ts)
expected = {
record1: 3,
record2: 4
}
assert_equal(expected, data)
def test_get_keys_ccl_timestr(self):
key1 = test_data.random_string()
key2 = test_data.random_string()
record1 = test_data.random_long()
record2 = test_data.random_long()
self.client.add(key=key1, value=1, records=[record1, record2])
self.client.add(key=key1, value=2, records=[record1, record2])
self.client.add(key=key1, value=3, records=[record1, record2])
self.client.add(key=key2, value=10, records=[record1, record2])
self.client.add(key=key1, value=4, record=record2)
anchor = self.get_time_anchor()
ccl = key2 + ' = 10'
self.client.set(key=key1, value=100, record=[record2, record1])
ts = test_data.get_elapsed_millis_string(anchor)
data = self.client.get(key=[key1, key2], ccl=ccl, time=ts)
expected = {
record1: {key1: 3, key2: 10},
record2: {key1: 4, key2: 10},
}
assert_equal(expected, data)
def test_get_key_record(self):
self.client.add('foo', 1, 1)
self.client.add('foo', 2, 1)
self.client.add('foo', 3, 1)
assert_equal(3, self.client.get(key='foo', record=1))
def test_get_key_record_time(self):
self.client.add('foo', 1, 1)
self.client.add('foo', 2, 1)
self.client.add('foo', 3, 1)
ts = self.client.time()
self.client.add('foo', 4, 1)
assert_equal(3, self.client.get(key='foo', record=1, time=ts))
def test_get_key_record_timestr(self):
self.client.add('foo', 1, 1)
self.client.add('foo', 2, 1)
self.client.add('foo', 3, 1)
anchor = self.get_time_anchor()
self.client.add('foo', 4, 1)
ts = test_data.get_elapsed_millis_string(anchor)
assert_equal(3, self.client.get(key='foo', record=1, time=ts))
def test_get_key_records(self):
self.client.add('foo', 1, [1, 2, 3])
self.client.add('foo', 2, [1, 2, 3])
self.client.add('foo', 3, [1, 2, 3])
assert_equal({
1: 3,
2: 3,
3: 3
}, self.client.get(key='foo', record=[1, 2, 3]))
def test_get_key_records_time(self):
self.client.add('foo', 1, [1, 2, 3])
self.client.add('foo', 2, [1, 2, 3])
self.client.add('foo', 3, [1, 2, 3])
ts = self.client.time()
self.client.add('foo', 4, [1, 2, 3])
assert_equal({
1: 3,
2: 3,
3: 3
}, self.client.get(key='foo', record=[1, 2, 3], time=ts))
def test_get_key_records_timestr(self):
self.client.add('foo', 1, [1, 2, 3])
self.client.add('foo', 2, [1, 2, 3])
self.client.add('foo', 3, [1, 2, 3])
anchor = self.get_time_anchor()
self.client.add('foo', 4, [1, 2, 3])
ts = test_data.get_elapsed_millis_string(anchor)
assert_equal({
1: 3,
2: 3,
3: 3
}, self.client.get(key='foo', record=[1, 2, 3], time=ts))
def test_get_keys_record(self):
self.client.add('foo', 1, 1)
self.client.add('foo', 2, 1)
self.client.add('bar', 1, 1)
self.client.add('bar', 2, 1)
data = self.client.get(keys=['foo', 'bar'], record=1)
expected = {
'foo': 2,
'bar': 2
}
assert_equal(expected, data)
def test_get_keys_record_time(self):
self.client.add('foo', 1, 1)
self.client.add('foo', 2, 1)
self.client.add('bar', 1, 1)
self.client.add('bar', 2, 1)
ts = self.client.time()
self.client.add('foo', 3, 1)
self.client.add('bar', 3, 1)
data = self.client.get(keys=['foo', 'bar'], record=1, time=ts)
expected = {
'foo': 2,
'bar': 2
}
assert_equal(expected, data)
def test_get_keys_record_timestr(self):
self.client.add('foo', 1, 1)
self.client.add('foo', 2, 1)
self.client.add('bar', 1, 1)
self.client.add('bar', 2, 1)
anchor = self.get_time_anchor()
self.client.add('foo', 3, 1)
self.client.add('bar', 3, 1)
ts = test_data.get_elapsed_millis_string(anchor)
data = self.client.get(keys=['foo', 'bar'], record=1, time=ts)
expected = {
'foo': 2,
'bar': 2
}
assert_equal(expected, data)
def test_get_keys_records_time(self):
self.client.add('foo', 1, [1, 2])
self.client.add('foo', 2, [1, 2])
self.client.add('bar', 1, [1, 2])
self.client.add('bar', 2, [1, 2])
ts = self.client.time()
self.client.add('foo', 3, [1, 2])
self.client.add('bar', 3, [1, 2])
data = self.client.get(keys=['foo', 'bar'], records=[1, 2], time=ts)
expected = {
'foo': 2,
'bar': 2
}
assert_equal({
1: expected,
2: expected
}, data)
def test_get_keys_records_timestr(self):
self.client.add('foo', 1, [1, 2])
self.client.add('foo', 2, [1, 2])
self.client.add('bar', 1, [1, 2])
self.client.add('bar', 2, [1, 2])
anchor = self.get_time_anchor()
self.client.add('foo', 3, [1, 2])
self.client.add('bar', 3, [1, 2])
ts = test_data.get_elapsed_millis_string(anchor)
data = self.client.get(keys=['foo', 'bar'], records=[1, 2], time=ts)
expected = {
'foo': 2,
'bar': 2
}
assert_equal({
1: expected,
2: expected
}, data)
def test_get_keys_records(self):
self.client.add('foo', 1, [1, 2])
self.client.add('foo', 2, [1, 2])
self.client.add('bar', 1, [1, 2])
self.client.add('bar', 2, [1, 2])
data = self.client.get(keys=['foo', 'bar'], records=[1, 2])
expected = {
'foo': 2,
'bar': 2
}
assert_equal({
1: expected,
2: expected
}, data)
def test_insert_dict(self):
data = {
'string': 'a',
'int': 1,
'double': 3.14,
'bool': True,
'multi': ['a', 1, 3.14, True]
}
record = self.client.insert(data=data)[0]
assert_equal('a', self.client.get(key='string', record=record))
assert_equal(1, self.client.get(key='int', record=record))
assert_equal(3.14, self.client.get(key='double', record=record))
assert_equal(True, self.client.get(key='bool', record=record))
assert_equal(['a', 1, 3.14, True], self.client.select(key='multi', record=record))
def test_insert_dicts(self):
data = [
{
'foo': 1
},
{
'foo': 2
},
{
'foo': 3
}
]
records = self.client.insert(data=data)
assert_equal(len(data), len(records))
def test_insert_json(self):
data = {
'string': 'a',
'int': 1,
'double': 3.14,
'bool': True,
'multi': ['a', 1, 3.14, True]
}
data = ujson.dumps(data)
record = self.client.insert(data=data)[0]
assert_equal('a', self.client.get(key='string', record=record))
assert_equal(1, self.client.get(key='int', record=record))
assert_equal(3.14, self.client.get(key='double', record=record))
assert_equal(True, self.client.get(key='bool', record=record))
assert_equal(['a', 1, 3.14, True], self.client.select(key='multi', record=record))
def test_insert_json_list(self):
data = [
{
'foo': 1
},
{
'foo': 2
},
{
'foo': 3
}
]
count = len(data)
data = ujson.dumps(data)
records = self.client.insert(data=data)
assert_equal(count, len(records))
def test_insert_dict_record(self):
record = test_data.random_long()
data = {
'string': 'a',
'int': 1,
'double': 3.14,
'bool': True,
'multi': ['a', 1, 3.14, True]
}
result = self.client.insert(data=data, record=record)
assert_true(result)
assert_equal('a', self.client.get(key='string', record=record))
assert_equal(1, self.client.get(key='int', record=record))
assert_equal(3.14, self.client.get(key='double', record=record))
assert_equal(True, self.client.get(key='bool', record=record))
assert_equal(['a', 1, 3.14, True], self.client.select(key='multi', record=record))
def test_insert_json_record(self):
record = test_data.random_long()
data = {
'string': 'a',
'int': 1,
'double': 3.14,
'bool': True,
'multi': ['a', 1, 3.14, True]
}
data = ujson.dumps(data)
result = self.client.insert(data=data, record=record)
assert_true(result)
assert_equal('a', self.client.get(key='string', record=record))
assert_equal(1, self.client.get(key='int', record=record))
assert_equal(3.14, self.client.get(key='double', record=record))
assert_equal(True, self.client.get(key='bool', record=record))
assert_equal(['a', 1, 3.14, True], self.client.select(key='multi', record=record))
def test_insert_dict_records(self):
record1 = test_data.random_long()
record2 = test_data.random_long()
record3 = test_data.random_long()
data = {
'string': 'a',
'int': 1,
'double': 3.14,
'bool': True,
'multi': ['a', 1, 3.14, True]
}
result = self.client.insert(data=data, records=[record1, record2, record3])
assert_true({
record1: True,
record2: True,
record3: True
}, result)
def test_insert_json_records(self):
record1 = test_data.random_long()
record2 = test_data.random_long()
record3 = test_data.random_long()
data = {
'string': 'a',
'int': 1,
'double': 3.14,
'bool': True,
'multi': ['a', 1, 3.14, True]
}
data = ujson.dumps(data)
result = self.client.insert(data=data, records=[record1, record2, record3])
assert_true({
record1: True,
record2: True,
record3: True
}, result)
def test_inventory(self):
records = [1, 2, 3, 4, 5, 6, 7]
self.client.add(key='foo', value=17, records=records)
assert_equal(records, self.client.inventory())
def test_jsonify_records(self):
record1 = 1
record2 = 2
data = {
'int': 1,
'multi': [1, 2, 3, 4]
}
self.client.insert(data=data, records=[record1, record2])
dump = self.client.jsonify(records=[record1, record2])
data = {
'int': [1],
'multi': [1, 2, 3, 4]
}
assert_equal([data, data], ujson.loads(dump))
def test_jsonify_records_identifier(self):
record1 = 1
record2 = 2
data = {
'int': 1,
'multi': [1, 2, 3, 4]
}
self.client.insert(data=data, records=[record1, record2])
dump = self.client.jsonify(records=[record1, record2], id=True)
data1 = {
'int': [1],
'multi': [1, 2, 3, 4],
constants.JSON_RESERVED_IDENTIFIER_NAME: 1
}
data2 = {
'int': [1],
'multi': [1, 2, 3, 4],
constants.JSON_RESERVED_IDENTIFIER_NAME: 2
}
assert_equal([data1, data2], ujson.loads(dump))
def test_jsonify_records_time(self):
record1 = 1
record2 = 2
data = {
'int': 1,
'multi': [1, 2, 3, 4]
}
self.client.insert(data=data, records=[record1, record2])
ts = self.client.time()
self.client.add('foo', 10, [record1, record2])
dump = self.client.jsonify(records=[record1, record2], time=ts)
data = {
'int': [1],
'multi': [1, 2, 3, 4]
}
assert_equal([data, data], ujson.loads(dump))
@ignore
def test_jsonify_records_timestr(self):
record1 = 1
record2 = 2
data = {
'int': 1,
'multi': [1, 2, 3, 4]
}
self.client.insert(data=data, records=[record1, record2])
anchor = self.get_time_anchor()
self.client.add('foo', 10, [record1, record2])
ts = test_data.get_elapsed_millis_string(anchor)
dump = self.client.jsonify(records=[record1, record2], time=ts)
data = {
'int': [1],
'multi': [1, 2, 3, 4]
}
assert_equal([data, data], ujson.loads(dump))
def test_jsonify_records_identifier_time(self):
record1 = 1
record2 = 2
data = {
'int': 1,
'multi': [1, 2, 3, 4]
}
self.client.insert(data=data, records=[record1, record2])
ts = self.client.time()
self.client.add(key='foo', value=True, records=[record1, record2])
dump = self.client.jsonify(records=[record1, record2], id=True, time=ts)
data1 = {
'int': [1],
'multi': [1, 2, 3, 4],
constants.JSON_RESERVED_IDENTIFIER_NAME: 1
}
data2 = {
'int': [1],
'multi': [1, 2, 3, 4],
constants.JSON_RESERVED_IDENTIFIER_NAME: 2
}
assert_equal([data1, data2], ujson.loads(dump))
def test_jsonify_records_identifier_timestr(self):
record1 = 1
record2 = 2
data = {
'int': 1,
'multi': [1, 2, 3, 4]
}
self.client.insert(data=data, records=[record1, record2])
anchor = self.get_time_anchor()
self.client.add(key='foo', value=True, records=[record1, record2])
ts = test_data.get_elapsed_millis_string(anchor)
dump = self.client.jsonify(records=[record1, record2], id=True, time=ts)
data1 = {
'int': [1],
'multi': [1, 2, 3, 4],
constants.JSON_RESERVED_IDENTIFIER_NAME: 1
}
data2 = {
'int': [1],
'multi': [1, 2, 3, 4],
constants.JSON_RESERVED_IDENTIFIER_NAME: 2
}
assert_equal([data1, data2], ujson.loads(dump))
def test_ping_record(self):
record = 1
assert_false(self.client.ping(record))
self.client.add(key='foo', value=1, record=record)
assert_true(self.client.ping(record))
self.client.clear(key='foo', record=record)
assert_false(self.client.ping(record))
def test_ping_records(self):
self.client.add(key='foo', value=1, records=[1, 2])
data = self.client.ping([1, 2, 3])
assert_equal({
1: True,
2: True,
3: False
}, data)
def test_remove_key_value_record(self):
key = 'foo'
value = 1
record = 1
assert_false(self.client.remove(key, value, record))
self.client.add(key, value, record)
assert_true(self.client.remove(key=key, record=record, value=value))
def test_remove_key_value_records(self):
key = 'foo'
value = 1
self.client.add(key, value, records=[1, 2])
data = self.client.remove(key, value, records=[1, 2, 3])
assert_equal({
1: True,
2: True,
3: False
}, data)
def test_revert_key_records_time(self):
data1 = {
'one': 1,
'two': 2,
'three': 3
}
data2 = {
'one': True,
'two': True,
'three': True
}
self.client.insert(data=data1, records=[1, 2, 3])
ts = self.client.time()
self.client.insert(data=data2, records=[1, 2, 3])
self.client.revert(key='one', records=[1, 2, 3], time=ts)
data = self.client.select(key='one', record=[1, 2, 3])
assert_equal({
1: [1],
2: [1],
3: [1]
}, data)
def test_revert_key_records_timestr(self):
data1 = {
'one': 1,
'two': 2,
'three': 3
}
data2 = {
'one': True,
'two': True,
'three': True
}
self.client.insert(data=data1, records=[1, 2, 3])
anchor = self.get_time_anchor()
self.client.insert(data=data2, records=[1, 2, 3])
ts = test_data.get_elapsed_millis_string(anchor)
self.client.revert(key='one', records=[1, 2, 3], time=ts)
data = self.client.select(key='one', record=[1, 2, 3])
assert_equal({
1: [1],
2: [1],
3: [1]
}, data)
def test_revert_keys_records_time(self):
data1 = {
'one': 1,
'two': 2,
'three': 3
}
data2 = {
'one': True,
'two': True,
'three': True
}
self.client.insert(data=data1, records=[1, 2, 3])
ts = self.client.time()
self.client.insert(data=data2, records=[1, 2, 3])
self.client.revert(keys=['one', 'two', 'three'], records=[1, 2, 3], time=ts)
data = self.client.select(key=['one', 'two', 'three'], record=[1, 2, 3])
data3 = {
'one': [1],
'two': [2],
'three': [3]
}
assert_equal({
1: data3,
2: data3,
3: data3
}, data)
def test_revert_keys_records_timestr(self):
data1 = {
'one': 1,
'two': 2,
'three': 3
}
data2 = {
'one': True,
'two': True,
'three': True
}
self.client.insert(data=data1, records=[1, 2, 3])
anchor = self.get_time_anchor()
self.client.insert(data=data2, records=[1, 2, 3])
ts = test_data.get_elapsed_millis_string(anchor)
self.client.revert(keys=['one', 'two', 'three'], records=[1, 2, 3], time=ts)
data = self.client.select(key=['one', 'two', 'three'], record=[1, 2, 3])
data3 = {
'one': [1],
'two': [2],
'three': [3]
}
assert_equal({
1: data3,
2: data3,
3: data3
}, data)
def test_revert_keys_record_time(self):
data1 = {
'one': 1,
'two': 2,
'three': 3
}
data2 = {
'one': True,
'two': True,
'three': True
}
self.client.insert(data=data1, records=[1, 2, 3])
ts = self.client.time()
self.client.insert(data=data2, records=[1, 2, 3])
self.client.revert(key=['one', 'two', 'three'], records=1, time=ts)
data = self.client.select(key=['one', 'two', 'three'], record=1)
assert_equal({
'one': [1],
'two': [2],
'three': [3]
}, data)
def test_revert_keys_record_timestr(self):
data1 = {
'one': 1,
'two': 2,
'three': 3
}
data2 = {
'one': True,
'two': True,
'three': True
}
self.client.insert(data=data1, records=[1, 2, 3])
anchor = self.get_time_anchor()
self.client.insert(data=data2, records=[1, 2, 3])
ts = test_data.get_elapsed_millis_string(anchor)
self.client.revert(key=['one', 'two', 'three'], records=1, time=ts)
data = self.client.select(key=['one', 'two', 'three'], record=1)
assert_equal({
'one': [1],
'two': [2],
'three': [3]
}, data)
def test_revert_key_record_time(self):
data1 = {
'one': 1,
'two': 2,
'three': 3
}
data2 = {
'one': True,
'two': True,
'three': True
}
self.client.insert(data=data1, records=[1, 2, 3])
ts = self.client.time()
self.client.insert(data=data2, records=[1, 2, 3])
self.client.revert(key='one', records=1, time=ts)
data = self.client.select(key='one', record=1)
assert_equal([1], data)
def test_revert_key_record_timestr(self):
data1 = {
'one': 1,
'two': 2,
'three': 3
}
data2 = {
'one': True,
'two': True,
'three': True
}
self.client.insert(data=data1, records=[1, 2, 3])
anchor = self.get_time_anchor()
self.client.insert(data=data2, records=[1, 2, 3])
ts = test_data.get_elapsed_millis_string(anchor)
self.client.revert(key='one', records=1, time=ts)
data = self.client.select(key='one', record=1)
assert_equal([1], data)
def test_search(self):
self.client.add(key="name", value="jeff", record=1)
self.client.add(key="name", value="jeffery", record=2)
self.client.add(key="name", value="jeremy", record=3)
self.client.add(key="name", value="ben jefferson", record=4)
records = self.client.search(key="name", query="jef")
assert_equal([1, 2, 4], records)
def test_select_ccl(self):
key1 = test_data.random_string()
key2 = test_data.random_string()
record1 = test_data.random_long()
record2 = test_data.random_long()
self.client.add(key=key1, value=1, records=[record1, record2])
self.client.add(key=key1, value=2, records=[record1, record2])
self.client.add(key=key1, value=3, records=[record1, record2])
self.client.add(key=key2, value=10, records=[record1, record2])
ccl = key2 + ' = 10'
data = self.client.select(ccl=ccl)
expected = {
key1: [1, 2, 3],
key2: [10]
}
assert_equal(data.get(record1), expected)
assert_equal(data.get(record2), expected)
def test_select_ccl_time(self):
key1 = test_data.random_string()
key2 = test_data.random_string()
record1 = test_data.random_long()
record2 = test_data.random_long()
self.client.add(key=key1, value=1, records=[record1, record2])
self.client.add(key=key1, value=2, records=[record1, record2])
self.client.add(key=key1, value=3, records=[record1, record2])
self.client.add(key=key2, value=10, records=[record1, record2])
ts = self.client.time()
self.client.set(key=key2, value=11, records=[record1, record2])
ccl = key2 + ' > 10'
data = self.client.select(ccl=ccl, time=ts)
expected = {
key1: [1, 2, 3],
key2: [10]
}
assert_equal(data.get(record1), expected)
assert_equal(data.get(record2), expected)
def test_select_ccl_timestr(self):
key1 = test_data.random_string()
key2 = test_data.random_string()
record1 = test_data.random_long()
record2 = test_data.random_long()
self.client.add(key=key1, value=1, records=[record1, record2])
self.client.add(key=key1, value=2, records=[record1, record2])
self.client.add(key=key1, value=3, records=[record1, record2])
self.client.add(key=key2, value=10, records=[record1, record2])
anchor = self.get_time_anchor()
self.client.set(key=key2, value=11, records=[record1, record2])
ccl = key2 + ' > 10'
ts = test_data.get_elapsed_millis_string(anchor)
data = self.client.select(ccl=ccl, time=ts)
expected = {
key1: [1, 2, 3],
key2: [10]<|fim▁hole|>
def test_select_key_ccl(self):
key1 = test_data.random_string()
key2 = test_data.random_string()
record1 = test_data.random_long()
record2 = test_data.random_long()
self.client.add(key=key1, value=1, records=[record1, record2])
self.client.add(key=key1, value=2, records=[record1, record2])
self.client.add(key=key1, value=3, records=[record1, record2])
self.client.add(key=key2, value=10, records=[record1, record2])
self.client.add(key=key1, value=4, record=record2)
ccl = key2 + ' = 10'
data = self.client.select(key=key1, ccl=ccl)
expected = {
record1: [1, 2, 3],
record2: [1, 2, 3, 4]
}
assert_equal(expected, data)
def test_select_keys_ccl(self):
key1 = test_data.random_string()
key2 = test_data.random_string()
record1 = test_data.random_long()
record2 = test_data.random_long()
self.client.add(key=key1, value=1, records=[record1, record2])
self.client.add(key=key1, value=2, records=[record1, record2])
self.client.add(key=key1, value=3, records=[record1, record2])
self.client.add(key=key2, value=10, records=[record1, record2])
self.client.add(key=key1, value=4, record=record2)
ccl = key2 + ' = 10'
data = self.client.select(keys=[key1, key2], ccl=ccl)
expected = {
record1: {key1: [1, 2, 3], key2: [10]},
record2: {key1: [1, 2, 3, 4], key2: [10]},
}
assert_equal(expected, data)
def test_select_key_ccl_time(self):
key1 = test_data.random_string()
key2 = test_data.random_string()
record1 = test_data.random_long()
record2 = test_data.random_long()
self.client.add(key=key1, value=1, records=[record1, record2])
self.client.add(key=key1, value=2, records=[record1, record2])
self.client.add(key=key1, value=3, records=[record1, record2])
self.client.add(key=key2, value=10, records=[record1, record2])
self.client.add(key=key1, value=4, record=record2)
ts = self.client.time()
ccl = key2 + ' = 10'
self.client.set(key=key1, value=100, record=[record2, record1])
data = self.client.select(key=key1, ccl=ccl, time=ts)
expected = {
record1: [1, 2, 3],
record2: [1, 2, 3, 4]
}
assert_equal(expected, data)
def test_select_keys_ccl_time(self):
key1 = test_data.random_string()
key2 = test_data.random_string()
record1 = test_data.random_long()
record2 = test_data.random_long()
self.client.add(key=key1, value=1, records=[record1, record2])
self.client.add(key=key1, value=2, records=[record1, record2])
self.client.add(key=key1, value=3, records=[record1, record2])
self.client.add(key=key2, value=10, records=[record1, record2])
self.client.add(key=key1, value=4, record=record2)
ts = self.client.time()
ccl = key2 + ' = 10'
self.client.set(key=key1, value=100, record=[record2, record1])
data = self.client.select(key=[key1, key2], ccl=ccl, time=ts)
expected = {
record1: {key1: [1, 2, 3], key2: [10]},
record2: {key1: [1, 2, 3, 4], key2: [10]},
}
assert_equal(expected, data)
def test_select_key_ccl_timestr(self):
key1 = test_data.random_string()
key2 = test_data.random_string()
record1 = test_data.random_long()
record2 = test_data.random_long()
self.client.add(key=key1, value=1, records=[record1, record2])
self.client.add(key=key1, value=2, records=[record1, record2])
self.client.add(key=key1, value=3, records=[record1, record2])
self.client.add(key=key2, value=10, records=[record1, record2])
self.client.add(key=key1, value=4, record=record2)
anchor = self.get_time_anchor()
ccl = key2 + ' = 10'
self.client.set(key=key1, value=100, record=[record2, record1])
ts = test_data.get_elapsed_millis_string(anchor)
data = self.client.select(key=key1, ccl=ccl, time=ts)
expected = {
record1: [1, 2, 3],
record2: [1, 2, 3, 4]
}
assert_equal(expected, data)
def test_select_keys_ccl_timestr(self):
key1 = test_data.random_string()
key2 = test_data.random_string()
record1 = test_data.random_long()
record2 = test_data.random_long()
self.client.add(key=key1, value=1, records=[record1, record2])
self.client.add(key=key1, value=2, records=[record1, record2])
self.client.add(key=key1, value=3, records=[record1, record2])
self.client.add(key=key2, value=10, records=[record1, record2])
self.client.add(key=key1, value=4, record=record2)
anchor = self.get_time_anchor()
ccl = key2 + ' = 10'
self.client.set(key=key1, value=100, record=[record2, record1])
ts = test_data.get_elapsed_millis_string(anchor)
data = self.client.select(key=[key1, key2], ccl=ccl, time=ts)
expected = {
record1: {key1: [1, 2, 3], key2: [10]},
record2: {key1: [1, 2, 3, 4], key2: [10]},
}
assert_equal(expected, data)
def test_select_key_record(self):
self.client.add('foo', 1, 1)
self.client.add('foo', 2, 1)
self.client.add('foo', 3, 1)
assert_equal([1, 2, 3], self.client.select(key='foo', record=1))
def test_select_key_record_time(self):
self.client.add('foo', 1, 1)
self.client.add('foo', 2, 1)
self.client.add('foo', 3, 1)
ts = self.client.time()
self.client.add('foo', 4, 1)
assert_equal([1, 2, 3], self.client.select(key='foo', record=1, time=ts))
def test_select_key_record_timestr(self):
self.client.add('foo', 1, 1)
self.client.add('foo', 2, 1)
self.client.add('foo', 3, 1)
anchor = self.get_time_anchor()
self.client.add('foo', 4, 1)
ts = test_data.get_elapsed_millis_string(anchor)
assert_equal([1, 2, 3], self.client.select(key='foo', record=1, time=ts))
def test_select_key_records(self):
self.client.add('foo', 1, [1, 2, 3])
self.client.add('foo', 2, [1, 2, 3])
self.client.add('foo', 3, [1, 2, 3])
assert_equal({
1: [1, 2, 3],
2: [1, 2, 3],
3: [1, 2, 3]
}, self.client.select(key='foo', record=[1, 2, 3]))
def test_select_key_records_time(self):
self.client.add('foo', 1, [1, 2, 3])
self.client.add('foo', 2, [1, 2, 3])
self.client.add('foo', 3, [1, 2, 3])
ts = self.client.time()
self.client.add('foo', 4, [1, 2, 3])
assert_equal({
1: [1, 2, 3],
2: [1, 2, 3],
3: [1, 2, 3]
}, self.client.select(key='foo', record=[1, 2, 3], time=ts))
def test_select_key_records_timestr(self):
self.client.add('foo', 1, [1, 2, 3])
self.client.add('foo', 2, [1, 2, 3])
self.client.add('foo', 3, [1, 2, 3])
anchor = self.get_time_anchor()
self.client.add('foo', 4, [1, 2, 3])
ts = test_data.get_elapsed_millis_string(anchor)
assert_equal({
1: [1, 2, 3],
2: [1, 2, 3],
3: [1, 2, 3]
}, self.client.select(key='foo', record=[1, 2, 3], time=ts))
def test_select_keys_record(self):
self.client.add('foo', 1, 1)
self.client.add('foo', 2, 1)
self.client.add('bar', 1, 1)
self.client.add('bar', 2, 1)
data = self.client.select(keys=['foo', 'bar'], record=1)
expected = {
'foo': [1, 2],
'bar': [1, 2]
}
assert_equal(expected, data)
def test_select_keys_record_time(self):
self.client.add('foo', 1, 1)
self.client.add('foo', 2, 1)
self.client.add('bar', 1, 1)
self.client.add('bar', 2, 1)
ts = self.client.time()
self.client.add('foo', 3, 1)
self.client.add('bar', 3, 1)
data = self.client.select(keys=['foo', 'bar'], record=1, time=ts)
expected = {
'foo': [1, 2],
'bar': [1, 2]
}
assert_equal(expected, data)
def test_select_keys_record_timestr(self):
self.client.add('foo', 1, 1)
self.client.add('foo', 2, 1)
self.client.add('bar', 1, 1)
self.client.add('bar', 2, 1)
anchor = self.get_time_anchor()
self.client.add('foo', 3, 1)
self.client.add('bar', 3, 1)
ts = test_data.get_elapsed_millis_string(anchor)
data = self.client.select(keys=['foo', 'bar'], record=1, time=ts)
expected = {
'foo': [1, 2],
'bar': [1, 2]
}
assert_equal(expected, data)
def test_select_keys_records_time(self):
self.client.add('foo', 1, [1, 2])
self.client.add('foo', 2, [1, 2])
self.client.add('bar', 1, [1, 2])
self.client.add('bar', 2, [1, 2])
ts = self.client.time()
self.client.add('foo', 3, [1, 2])
self.client.add('bar', 3, [1, 2])
data = self.client.select(keys=['foo', 'bar'], records=[1, 2], time=ts)
expected = {
'foo': [1, 2],
'bar': [1, 2]
}
assert_equal({
1: expected,
2: expected
}, data)
def test_select_keys_records_timestr(self):
self.client.add('foo', 1, [1, 2])
self.client.add('foo', 2, [1, 2])
self.client.add('bar', 1, [1, 2])
self.client.add('bar', 2, [1, 2])
anchor = self.get_time_anchor()
self.client.add('foo', 3, [1, 2])
self.client.add('bar', 3, [1, 2])
ts = test_data.get_elapsed_millis_string(anchor)
data = self.client.select(keys=['foo', 'bar'], records=[1, 2], time=ts)
expected = {
'foo': [1, 2],
'bar': [1, 2]
}
assert_equal({
1: expected,
2: expected
}, data)
def test_select_keys_records(self):
self.client.add('foo', 1, [1, 2])
self.client.add('foo', 2, [1, 2])
self.client.add('bar', 1, [1, 2])
self.client.add('bar', 2, [1, 2])
data = self.client.select(keys=['foo', 'bar'], records=[1, 2])
expected = {
'foo': [1, 2],
'bar': [1, 2]
}
assert_equal({
1: expected,
2: expected
}, data)
def test_select_record(self):
self.client.add('foo', 1, [1, 2])
self.client.add('foo', 2, [1, 2])
self.client.add('bar', 1, [1, 2])
self.client.add('bar', 2, [1, 2])
data = self.client.select(record=1)
expected = {
'foo': [1, 2],
'bar': [1, 2]
}
assert_equal(expected, data)
def test_select_record_time(self):
self.client.add('foo', 1, [1, 2])
self.client.add('foo', 2, [1, 2])
self.client.add('bar', 1, [1, 2])
self.client.add('bar', 2, [1, 2])
ts = self.client.time()
self.client.add('foo', 3, [1, 2])
self.client.add('bar', 3, [1, 2])
data = self.client.select(record=2, time=ts)
expected = {
'foo': [1, 2],
'bar': [1, 2]
}
assert_equal(expected, data)
def test_select_record_timestr(self):
self.client.add('foo', 1, [1, 2])
self.client.add('foo', 2, [1, 2])
self.client.add('bar', 1, [1, 2])
self.client.add('bar', 2, [1, 2])
anchor = self.get_time_anchor()
self.client.add('foo', 3, [1, 2])
self.client.add('bar', 3, [1, 2])
ts = test_data.get_elapsed_millis_string(anchor)
data = self.client.select(record=2, time=ts)
expected = {
'foo': [1, 2],
'bar': [1, 2]
}
assert_equal(expected, data)
def test_select_records(self):
self.client.add('foo', 1, [1, 2])
self.client.add('foo', 2, [1, 2])
self.client.add('bar', 1, [1, 2])
self.client.add('bar', 2, [1, 2])
data = self.client.select(records=[1, 2])
expected = {
'foo': [1, 2],
'bar': [1, 2]
}
assert_equal({
1: expected,
2: expected
}, data)
def test_select_records_time(self):
self.client.add('foo', 1, [1, 2])
self.client.add('foo', 2, [1, 2])
self.client.add('bar', 1, [1, 2])
self.client.add('bar', 2, [1, 2])
ts = self.client.time()
self.client.add('foo', 3, [1, 2])
self.client.add('bar', 3, [1, 2])
data = self.client.select( records=[1, 2], time=ts)
expected = {
'foo': [1, 2],
'bar': [1, 2]
}
assert_equal({
1: expected,
2: expected
}, data)
def test_select_records_timestr(self):
self.client.add('foo', 1, [1, 2])
self.client.add('foo', 2, [1, 2])
self.client.add('bar', 1, [1, 2])
self.client.add('bar', 2, [1, 2])
anchor = self.get_time_anchor()
self.client.add('foo', 3, [1, 2])
self.client.add('bar', 3, [1, 2])
ts = test_data.get_elapsed_millis_string(anchor)
data = self.client.select( records=[1, 2], time=ts)
expected = {
'foo': [1, 2],
'bar': [1, 2]
}
assert_equal({
1: expected,
2: expected
}, data)
def test_set_key_value(self):
key = "foo"
value = 1
record = self.client.set(key=key, value=value)
data = self.client.select(record=record)
assert_equal({
'foo': [1]
}, data)
def test_set_key_value_record(self):
key = "foo"
value = 1
record = 1
self.client.add(key=key, value=2, record=record)
self.client.add(key=key, value=2, record=record)
self.client.set(key=key, value=value, record=record)
data = self.client.select(record=record)
assert_equal({
'foo': [1]
}, data)
def test_set_key_value_records(self):
key = "foo"
value = 1
records = [1, 2, 3]
self.client.add(key=key, value=2, record=records)
self.client.add(key=key, value=2, record=records)
self.client.set(key=key, value=value, record=records)
data = self.client.select(record=records)
expected = {
'foo': [1]
}
assert_equal({
1: expected,
2: expected,
3: expected
}, data)
def test_stage(self):
assert_is_none(self.client.transaction)
self.client.stage()
assert_is_not_none(self.client.transaction)
self.client.abort()
def test_time(self):
assert_true(isinstance(self.client.time(), int))
def test_time_phrase(self):
assert_true(isinstance(self.client.time("3 seconds ago"), int))
def test_verify_and_swap(self):
self.client.add("foo", 2, 2)
assert_false(self.client.verify_and_swap(key='foo', expected=1, record=2, replacement=3))
assert_true(self.client.verify_and_swap(key='foo', expected=2, record=2, replacement=3))
assert_equal(3, self.client.get(key='foo', record=2))
def test_verify_or_set(self):
self.client.add("foo", 2, 2)
self.client.verify_or_set(key='foo', value=3, record=2)
assert_equal(3, self.client.get(key='foo', record=2))
def test_verify_key_value_record(self):
self.client.add('name', 'jeff', 1)
self.client.add('name', 'jeffery', 1)
self.client.add('name', 'bob', 1)
assert_true(self.client.verify('name', 'jeff', 1))
self.client.remove('name', 'jeff', 1)
assert_false(self.client.verify('name', 'jeff', 1))
def test_verify_key_value_record_time(self):
self.client.add('name', 'jeff', 1)
self.client.add('name', 'jeffery', 1)
self.client.add('name', 'bob', 1)
ts = self.client.time()
self.client.remove('name', 'jeff', 1)
assert_true(self.client.verify('name', 'jeff', 1, time=ts))
def test_verify_key_value_record_timestr(self):
self.client.add('name', 'jeff', 1)
self.client.add('name', 'jeffery', 1)
self.client.add('name', 'bob', 1)
anchor = self.get_time_anchor()
self.client.remove('name', 'jeff', 1)
ts = test_data.get_elapsed_millis_string(anchor)
assert_true(self.client.verify('name', 'jeff', 1, time=ts))
def test_link_key_source_destination(self):
assert_true(self.client.link(key='friends', source=1, destination=2))
assert_equal(Link.to(2), self.client.get('friends', record=1))
def test_link_key_source_destinations(self):
assert_equal({
2: True,
3: True,
4: True
}, self.client.link(key='friends', source=1, destination=[2, 3, 4]))
def test_unlink_key_source_destination(self):
assert_true(self.client.link(key='friends', source=1, destination=2))
assert_true(self.client.unlink(key='friends', source=1, destination=2))
def test_unlink_key_source_destinations(self):
assert_true(self.client.link(key='friends', source=1, destination=2))
assert_equal({
2: True,
3: False
}, self.client.unlink(key='friends', source=1, destination=[2, 3]))
def test_find_or_add_key_value(self):
record = self.client.find_or_add("age", 23)
assert_equal(23, self.client.get("age", record))
def test_find_or_insert_ccl_json(self):
data = {
'name': 'jeff nelson'
}
data = ujson.dumps(data)
record = self.client.find_or_insert(criteria="age > 10", data=data)
assert_equal('jeff nelson', self.client.get("name", record))
def test_find_or_insert_ccl_dict(self):
data = {
'name': 'jeff nelson'
}
record = self.client.find_or_insert(criteria="age > 10", data=data)
assert_equal('jeff nelson', self.client.get("name", record))
def test_insert_dict_with_link(self):
data = {
'foo': Link.to(1)
}
record = self.client.insert(data=data)[0]
assert_equal(Link.to(1), self.client.get(key='foo', record=record))
def test_insert_dict_with_resolvable_link(self):
record1 = self.client.add('foo', 1)
record2 = self.client.insert(data={
'foo': Link.to_where('foo = 1')
})[0]
assert_equal(Link.to(record1), self.client.get(key='foo', record=record2))<|fim▁end|> | }
assert_equal(data.get(record1), expected)
assert_equal(data.get(record2), expected) |
<|file_name|>sinusoid_regression.py<|end_file_name|><|fim▁begin|>'''
This file loads in data from /data/interim/interim_data.hdf and then
applies a sinusoidal regression model to the temperature data to remove
the yearly predictable variation.
NOTE: This file is intended to be executed by make from the top
level of the project directory hierarchy. We rely on os.getcwd()
and it will not work if run directly as a script from this directory.
'''
import numpy as np
import pandas as pd
import multiprocessing
import datetime
from scipy.optimize import least_squares
from src.conf import HDF_INTERIM_FILE, LOCATIONS_KEY, TEMPERATURE_TS_ROOT
f_yr = 0.00273515063053 # Frequency of yearly variation in 1/days
# Error function for sinusoidal regression
def err_lstsqr(theta, f, x, t): # No frequency optimization
a, phi = theta
s = a * np.sin(2 * np.pi * f * t + phi)
return x - s
def sinregress_to_hdf(key: str):<|fim▁hole|> given by hdf_path. We then regress the T series on a sinusoid with
a period of 1 yr. We subtract this from T and store it back in
a 'T_sinregress' column. This series will be centered as well.
'''
with pd.HDFStore(ghdf_path, mode='r') as hdf:
D = hdf[key] # Read D from disk
t = D.index
T = D['T'].values
unix_birth = datetime.datetime(1970, 1, 1)
def time_in_days(t): # Convert time to raw numbers
# 86400 = datetime.timedelta(days=1).total_seconds()
return (t - unix_birth).total_seconds() / 86400
t_days = np.fromiter(map(time_in_days, t), np.float64)
res = least_squares(err_lstsqr, (1., 0.), method='lm', verbose=0,
kwargs={'f': f_yr, 'x': T, 't': t_days})
a, phi = res.x
print('a:', a, 'phi:', phi) # Watch for odd outliers
T_hat = a * np.sin(2 * np.pi * f_yr * t_days + phi)
T_sinregress = T - T_hat
T_sinregress = T_sinregress - np.mean(T_sinregress)
D['T_sinregress'] = T_sinregress
with ghdf_lock:
D.to_hdf(ghdf_path, key=key)
return
def init_lock(lock: multiprocessing.Lock, hdf_path: str):
'''
Initializes the global lock used by interpolate_to_hdf
'''
global ghdf_lock
global ghdf_path
ghdf_lock = lock
ghdf_path = hdf_path
return
def main():
hdf_path = HDF_INTERIM_FILE
# Get location data
D_loc = pd.read_hdf(hdf_path, key=LOCATIONS_KEY)
# We have a cpu bound task
hdf_lock = multiprocessing.Lock()
pool = multiprocessing.Pool(processes=5,
initializer=init_lock,
initargs=(hdf_lock,
hdf_path),
maxtasksperchild=4)
hdf_group = '/' + TEMPERATURE_TS_ROOT + '/wban_'
pool.map(sinregress_to_hdf, (hdf_group + row['WBAN'] + '/D' for i, row in
D_loc.iterrows()))
pool.close()
pool.join()
return
if __name__ == '__main__':
main()<|fim▁end|> | '''
Loads in a pandas dataframe from the key location in the hdf store |
<|file_name|>en_tune.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
import os,sys,time
import numpy as np
import bitarray
import tables as tb
import logging
import yaml
import matplotlib.pyplot as plt
import monopix_daq.scan_base as scan_base
import monopix_daq.analysis.interpreter as interpreter
local_configuration={"exp_time": 1.0,
"cnt_th": 1,
"n_pix": 512,
"th_start": 0.85,
"th_stop": 0.5,
"th_step":[-0.01,-0.002,-0.0005]
}
class EnTune(scan_base.ScanBase):
scan_id = "en_tune"
def scan(self,**kwargs):
th=kwargs.pop("th_start",0.85)
th_stop=kwargs.pop("th_stop",0.5)
th_step=kwargs.pop("th_step",[-0.01,-0.002,-0.0005])
cnt_th=kwargs.pop("cnt_th",1)
exp_time=kwargs.pop("exp_time",1.0)
n_pix=kwargs.pop("n_pix",512)
####################
## create a table for scan_params
param_dtype=[("scan_param_id","<i4"),("th","<f2")]
description=np.zeros((1,),dtype=param_dtype).dtype
self.scan_param_table = self.h5_file.create_table(self.h5_file.root,
name='scan_parameters', title='scan_parameters',<|fim▁hole|> en_org=np.copy(self.dut.PIXEL_CONF["PREAMP_EN"][:,:])
th_step_i=0
fig,ax=plt.subplots(2,2)
plt.ion()
while th > th_stop or th_step_i==len(th_step):
self.monopix.set_th(th)
en=np.copy(self.dut.PIXEL_CONF["PREAMP_EN"][:,:])
self.monopix.set_monoread()
with self.readout(scan_param_id=scan_param_id,fill_buffer=True,clear_buffer=True,
readout_interval=0.005):
time.sleep(exp_time)
self.monopix.stop_monoread()
scan_param_id=scan_param_id+1
##########################
### get data from buffer
buf = self.fifo_readout.data
if len(buf)==0:
self.logger.info("en_tune:th=%.4f pix=%d, no data"%(th,len(np.argwhere(en))))
th=th+th_step[th_step_i]
continue
elif th_step_i!=(len(th_step)-1):
self.logger.info("en_tune:th=%.4f step=%.4f "%(th,th_step[th_step_i]))
th=th-th_step[th_step_i]
th_step_i=th_step_i+1
continue
data = np.concatenate([buf.popleft()[0] for i in range(len(buf))])
img=interpreter.raw2img(data,delete_noise=False)
##########################
## showing status
self.logger.info("en_tune:==== %.4f===data %d=====cnt %d======en %d====="%(
th,len(data),np.sum(img), len(en[en])))
ax[0,0].cla()
ax[0,0].imshow(np.transpose(img),vmax=min(np.max(img),100),origin="low",aspect="auto")
ax[0,0].set_title("th=%.4f"%th)
ax[1,0].cla()
ax[1,0].imshow(np.transpose(self.monopix.get_tdac_memory()),vmax=16,vmin=0,origin="low",aspect="auto")
ax[0,1].cla()
ax[0,1].imshow(np.transpose(en),vmax=1,vmin=0,origin="low",aspect="auto")
ax[0,1].set_title("en=%d"%len(np.where(en)))
fig.tight_layout()
fig.savefig(os.path.join(self.working_dir,"last_scan.png"),format="png")
plt.pause(0.003)
##########################
### find noisy
arg=np.argwhere(img>cnt_th)
s="en_tune:noisy pixel %d"%len(arg)
for a in arg:
s="[%d,%d]=%d"%(a[0],a[1],img[a[0],a[1]]),
self.logger.info(s)
self.logger.info("en_tune:th=%.4f en=%d"%(th,len(np.argwhere(en))))
en=np.bitwise_and(en,img<=cnt_th)
if n_pix >= len(np.argwhere(en)):
self.monopix.set_th(th-th_step[th_step_i])
break
else:
th=th+th_step[th_step_i]
self.monopix.set_preamp_en(en)
self.logger.info("en_tune:th=%.4f en=%d"%(
self.dut.SET_VALUE["TH"],
len(np.argwhere(self.dut.PIXEL_CONF["PREAMP_EN"][:,:]))
))
def analyze(self):
pass
def plot(self):
fraw = self.output_filename +'.h5'
fpdf = self.output_filename +'.pdf'
import monopix_daq.analysis.plotting_base as plotting_base
with plotting_base.PlottingBase(fpdf,save_png=True) as plotting:
with tb.open_file(fraw) as f:
firmware=yaml.load(f.root.meta_data.attrs.firmware)
## DAC Configuration page
dat=yaml.load(f.root.meta_data.attrs.dac_status)
dat.update(yaml.load(f.root.meta_data.attrs.power_status))
plotting.table_1value(dat,page_title="Chip configuration")
## Pixel Configuration page (before tuning)
dat=yaml.load(f.root.meta_data.attrs.pixel_conf_before)
plotting.plot_2d_pixel_4(
[dat["PREAMP_EN"],dat["INJECT_EN"],dat["MONITOR_EN"],dat["TRIM_EN"]],
page_title="Pixel configuration before tuninig",
title=["Preamp","Inj","Mon","TDAC"],
z_min=[0,0,0,0], z_max=[1,1,1,15])
## Preamp Configuration
dat=yaml.load(f.root.meta_data.attrs.pixel_conf)
plotting.plot_2d_pixel_hist(np.array(dat["PREAMP_EN"]),
title="Enabled preamp",
z_max=1)
if __name__ == "__main__":
from monopix_daq import monopix
import argparse
parser = argparse.ArgumentParser(usage="python en_tune.py",
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument("--config_file", type=str, default=None)
parser.add_argument('-e',"--exp_time", type=float, default=local_configuration["exp_time"])
parser.add_argument('-npix',"--n_pix", type=float, default=local_configuration["n_pix"])
parser.add_argument('-t',"--th_start", type=float, default=local_configuration["th_start"])
parser.add_argument("-f","--flavor", type=str, default="28:32")
parser.add_argument("--tdac", type=int, default=None)
parser.add_argument("--LSBdacL", type=int, default=None)
parser.add_argument("-p","--power_reset", action='store_const', const=1, default=0) ## defualt=True: skip power reset
parser.add_argument("-fout","--output_file", type=str, default=None)
args=parser.parse_args()
local_configuration["exp_time"]=args.exp_time
local_configuration["n_pix"]=args.n_pix
local_configuration["th_start"]=args.th_start
m=monopix.Monopix(no_power_reset=not bool(args.power_reset))
scan = EnTune(m, fout=args.output_file, online_monitor_addr="tcp://127.0.0.1:6500")
if args.config_file is not None:
m.load_config(args.config_file)
if args.flavor is not None:
m.set_preamp_en("none")
if args.flavor=="all":
collist=np.arange(0,36,1)
else:
tmp=args.flavor.split(":")
collist=np.arange(int(tmp[0]),int(tmp[1]),1)
en=np.copy(m.dut.PIXEL_CONF["PREAMP_EN"][:,:])
for c in collist:
en[c,:]=True
m.set_preamp_en(en)
if args.tdac is not None:
m.set_tdac(args.tdac)
if args.LSBdacL is not None:
m.set_global(LSBdacL=args.LSBdacL)
scan.start(**local_configuration)
#scan.analyze()
scan.plot()<|fim▁end|> | description=description, filters=self.filter_tables)
scan_param_id=0 |
<|file_name|>taylor.py<|end_file_name|><|fim▁begin|>''' A taylor series visualization graph. This example demonstrates
the ability of Bokeh for inputted expressions to reflect on a chart.
'''
import numpy as np
import sympy as sy
from bokeh.core.properties import value
from bokeh.io import curdoc
from bokeh.layouts import column
from bokeh.models import (ColumnDataSource, Legend, LegendItem,
PreText, Slider, TextInput)
from bokeh.plotting import figure
xs = sy.Symbol('x')
expr = sy.exp(-xs)*sy.sin(xs)
def taylor(fx, xs, order, x_range=(0, 1), n=200):
x0, x1 = x_range
x = np.linspace(float(x0), float(x1), n)
fy = sy.lambdify(xs, fx, modules=['numpy'])(x)
tx = fx.series(xs, n=order).removeO()
if tx.is_Number:
ty = np.zeros_like(x)
ty.fill(float(tx))
else:
ty = sy.lambdify(xs, tx, modules=['numpy'])(x)
return x, fy, ty
source = ColumnDataSource(data=dict(x=[], fy=[], ty=[]))
p = figure(x_range=(-7,7), y_range=(-100, 200), width=800, height=400)
line_f = p.line(x="x", y="fy", line_color="navy", line_width=2, source=source)
line_t = p.line(x="x", y="ty", line_color="firebrick", line_width=2, source=source)
p.background_fill_color = "lightgrey"
legend = Legend(location="top_right")
legend.items = [<|fim▁hole|>
def update():
try:
expr = sy.sympify(text.value, dict(x=xs))
except Exception as exception:
errbox.text = str(exception)
else:
errbox.text = ""
x, fy, ty = taylor(expr, xs, slider.value, (-2*sy.pi, 2*sy.pi), 200)
p.title.text = "Taylor (n=%d) expansion comparison for: %s" % (slider.value, expr)
legend.items[0].label = value(f"{expr}")
legend.items[1].label = value(f"taylor({expr})")
source.data = dict(x=x, fy=fy, ty=ty)
slider = Slider(start=1, end=20, value=1, step=1, title="Order")
slider.on_change('value', lambda attr, old, new: update())
text = TextInput(value=str(expr), title="Expression:")
text.on_change('value', lambda attr, old, new: update())
errbox = PreText()
update()
inputs = column(text, slider, errbox, width=400)
curdoc().add_root(column(inputs, p))<|fim▁end|> | LegendItem(label=value(f"{expr}"), renderers=[line_f]),
LegendItem(label=value(f"taylor({expr})"), renderers=[line_t]),
]
p.add_layout(legend) |
<|file_name|>importfavs.ts<|end_file_name|><|fim▁begin|>import { RootState } from '../reducers';<|fim▁hole|><|fim▁end|> |
export const importfavsSelector = (s: RootState) => s.importfavs; |
<|file_name|>download.py<|end_file_name|><|fim▁begin|><|fim▁hole|>"""
Download NLTK data
"""
__author__ = "Manan Kalra"
__email__ = "[email protected]"
import nltk
nltk.download()<|fim▁end|> | #!/usr/bin/env python
|
<|file_name|>GetStockHistoryInfoTask.py<|end_file_name|><|fim▁begin|># coding=utf-8
import sys
import os
sys.path.insert(0, os.path.abspath(os.path.join(os.getcwd(), "..")))
from db.MysqlUtil import initMysql, execute, select, batchInsert, disconnect
from common.JsonHelper import loadJsonConfig
from api.tushareApi import getSimpleHistoryData
from datetime import datetime, timedelta
from common.LoggerHelper import writeErrorLog, writeWarningLog, writeInfoLog, writeDebugLog, writeLog, writeExceptionLog
from wechat.weChatSender import sendMessageToMySelf
from common.HttpHelper import httpGet
from common.FileHelper import saveFile
import time
import json
# 从同花顺抓取历史行情数据(前复权)
def updateStockHistoryInfoByTHS(stockList):
for stock in stockList:
code = stock[0]
i = 2010
thisYear = datetime.now().year
while (i <= thisYear):
# time.sleep(1)
infos = getStockInfos(code, i)
if infos is None:
continue
for date in infos:
open = infos.get(date).get('open')
close = infos.get(date).get('close')
high = infos.get(date).get('high')
low = infos.get(date).get('low')
volume = infos.get(date).get('volume')
amount = infos.get(date).get('amount')
checkExistSql = unicode("select count(*) from s_stock where code='{0}' and date='{1}'").format(code,
date)
count = select(checkExistSql, False)[0]
if count > 0:
updateSql = unicode(
"update s_stock set volume={2},highPrice={3},lowPrice={4},openPrice={5},closePrice={6},amount='{7}' where code='{0}' and date='{1}'").format(
code, date, volume, high, low, open, close, amount)
execute(updateSql)
print code, date, updateSql
else:
insertSql = unicode(
"insert into s_stock(code,date,timestamp,volume,highPrice,lowPrice,openPrice,closePrice,amount) VALUES ('{0}','{1}',{2},{3},{4},{5},{6},{7},'{8}')").format(
code, date, int(time.mktime(time.strptime(date, '%Y-%m-%d'))), volume, high, low, open, close,
amount)
execute(insertSql)
print code, date, insertSql
i = i + 1
# 解析同花顺年行情数据(前复权)
def getStockInfos(code, year):
try:
url = "http://d.10jqka.com.cn/v2/line/hs_{0}/01/{1}.js".format(code, year)
res = httpGet(url).decode("utf-8")
index = res.find("(")
if (index < 0):
writeErrorLog(unicode("解析行情失败: code:{0}, year:{1}, res:{2}").format(code, year, res))
return []
res = res[index + 1:-1]
writeLog(unicode("获取股票历史行情: code: {0}, year:{1}").format(code, year))
jo = json.loads(res)
dataInfo = jo['data'].split(';')
result = {}
for item in dataInfo:
infos = item.split(',')
dic = {}
dic['open'] = infos[1]
dic['high'] = infos[2]
dic['low'] = infos[3]
dic['close'] = infos[4]
dic['volume'] = infos[5]
dic['amount'] = "{0}亿".format(round(float(infos[6]) / 100000000, 1))
result[datetime.strptime(infos[0], '%Y%m%d').strftime('%Y-%m-%d')] = dic
return result
except Exception, e:
writeErrorLog(unicode("解析行情失败: code:{0}, year:{1}, e:{2}").format(code, year, str(e)))
if "404" in str(e):
return []
else:
return None
def getStockHistoryInfoFromDb():
sql = unicode("SELECT code,count(*) from s_stock GROUP by code HAVING count(*)<20")
data = select(sql)
updateStockHistoryInfoByTHS(data)
def getStockHistoryInfoFromConfig():
stockList = loadJsonConfig(os.path.abspath(os.path.join(os.getcwd(), "../config/newStockList.json")))
updateStockHistoryInfoByTHS(stockList)
def updateAllStockHistoryInfo():
sql = unicode("select code,name from s_stock_info order by code asc")
data = select(sql)
updateStockHistoryInfoByTHS(data)
def updateStockOtherInfo():
sql = unicode("select code,name from s_stock_info order by code asc")
stockList = select(sql)
for stock in stockList:
code = stock[0]
if int(code) < 601126:
continue
selectInfoSql = unicode("select date,closePrice from s_stock where code='{0}' order by date asc").format(code)
data = select(selectInfoSql)
writeLog(unicode("更新股票其他指标数据: code: {0}").format(code))
updataStockBias(code, data, 6)
updataStockBias(code, data, 12)
updataStockBias(code, data, 24)
updateStockMA(code, data, 5)
updateStockMA(code, data, 10)
updateStockMA(code, data, 20)
updateStockMA(code, data, 30)
updateStockMA(code, data, 60)
updateStockMA(code, data, 120)
updateStockMA(code, data, 250)
updateStockChangePercent(code, data)
def updateStockChangePercent(code, data):
for i in range(1, len(data)):
try:
changeAmount = data[i][1] - data[i - 1][1]
changePercent = round(changeAmount * 100 / data[i - 1][1], 2)
updateSql = unicode(
"update s_stock set changePercent={0},changeAmount={1} where code='{2}' and date='{3}'").format(
changePercent, changeAmount, code, data[i][0])
execute(updateSql)
except Exception, e:
writeErrorLog(
unicode("更新涨幅数据失败: code:{0}, i:{1}, date:{2}, closePrice:{3}").format(code, i, data[i][0], data[i][1]))
def updateStockMA(code, data, n):
for i in range(n - 1, len(data)):
j = i
sum = 0
while (i - j < n):
sum = sum + data[j][1]
j = j - 1
avg = round(sum / n, 2)
sql = unicode("update s_stock set MA{0}={1} where code='{2}' and date='{3}'").format(n, avg, code, data[i][0])
execute(sql)
def updataStockBias(code, data, n):
for i in range(n - 1, len(data)):
j = i
sum = 0
while (i - j < n):
sum = sum + data[j][1]
j = j - 1
avg = round(sum / n, 2)
todayClosePrice = float(data[i][1])
bias = 0 if avg == 0 else round((todayClosePrice - avg) * 100 / avg, 2)
number = 1 if n == 6 else (2 if n == 12 else 3)
sql = unicode("update s_stock set BIAS{0}={1} where code='{2}' and date='{3}'").format(number, bias, code,
data[i][0])
execute(sql)
def main(argv):
try:
reload(sys)
sys.setdefaultencoding('utf-8')
# sendMessageToMySelf(unicode("开始查询股票历史行情数据"))
begin = datetime.now()
initMysql()
# getStockHistoryInfoFromDb()
# getStockHistoryInfoFromConfig()
updateStockOtherInfo()
disconnect()
end = datetime.now()<|fim▁hole|>
message = unicode("查询股票历史行情数据的任务执行完毕,当前时间:{0},执行用时:{1}").format(datetime.now(), end - begin)
writeLog(message)
sendMessageToMySelf(message)
except:
writeExceptionLog('RealTimeRemindTask Error.')
if __name__ == '__main__':
main(sys.argv)<|fim▁end|> | |
<|file_name|>index.js<|end_file_name|><|fim▁begin|>'use strict';
const fs = require('fs');
const path = require('path');
const Router = require('koa-router');
const apiRouter = require('./api');
const ask = require('../lib/ask');
const config = require('config');
const indexFilePath = path.resolve(__dirname, '..', 'views', 'index.html');
const router = new Router();
const apiAddress = config.port ?
`//${config.path}:${config.port}/api` : `//${config.path}/api`;
router.use('/api', apiRouter.routes(), apiRouter.allowedMethods());
router.get('/*', index);
const appScripts = getStartScripts();
let indexFileContents = null;
async function index (ctx) {
if (indexFileContents === null) {
let indexFile = await ask(fs, 'readFile', indexFilePath);
indexFileContents = new Function ('state', `return \`${indexFile}\``)({
apiAddress,
appScripts
});
}
ctx.body = indexFileContents;
}
function getStartScripts() {
if (process.env.NODE_ENV === 'production') {
return `<script type="text/javascript" src="/js/app.bundle.js"></script>`;
} else {
return `<script type="text/javascript" src="/js/simple-require.js"></script>
<script type="text/javascript" src="/js/index.js"></script>`;
}
<|fim▁hole|><|fim▁end|> | }
module.exports = router; |
<|file_name|>rosalind_gs.py<|end_file_name|><|fim▁begin|>import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '../tools'))
import files
import graphs
def main(argv):
k, Gs = files.read_graphs(argv[0])
sinks = []
for G in Gs:
n, m = G[:2]
edges = G[2]
nodes = [n for n in xrange(1, n + 1)]
sinks.append(graphs.general_sink(nodes, edges))
print ' '.join(str(sink) for sink in sinks)<|fim▁hole|>if __name__ == "__main__":
sys.setrecursionlimit(1048576)
main(sys.argv[1:])<|fim▁end|> | |
<|file_name|>async-auto-completer-test.js<|end_file_name|><|fim▁begin|>#!/usr/bin/env node
/*
The Cedric's Swiss Knife (CSK) - CSK terminal toolbox test suite
Copyright (c) 2009 - 2015 Cédric Ronvel
The MIT License (MIT)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
"use strict" ;
/* jshint unused:false */
var fs = require( 'fs' ) ;
var termkit = require( '../lib/termkit.js' ) ;
termkit.getDetectedTerminal( function( error , term ) {
var autoCompleter = function autoCompleter( inputString , callback )
{
fs.readdir( __dirname , function( error , files ) {
//console.log( files ) ;
callback( undefined , termkit.autoComplete( files , inputString , true ) ) ;
} ) ;
} ;
function question()
{
term( 'Choose a file: ' ) ;
term.inputField( { autoComplete: autoCompleter , autoCompleteMenu: true } , function( error , input ) {
if ( error )
{<|fim▁hole|> term.red.bold( "\nAn error occurs: " + error + "\n" ) ;
question() ;
}
else
{
term.green( "\nYour file is '%s'\n" , input ) ;
terminate() ;
}
} ) ;
}
function terminate()
{
term.grabInput( false ) ;
// Add a 100ms delay, so the terminal will be ready when the process effectively exit, preventing bad escape sequences drop
setTimeout( function() { process.exit() ; } , 100 ) ;
}
term.bold.cyan( 'Async auto-completer, type something and hit the ENTER key...\n' ) ;
question() ;
} ) ;<|fim▁end|> | |
<|file_name|>34.py<|end_file_name|><|fim▁begin|>import json
with open("birthdays.json", "r") as damnJson:
birthDays = json.load(damnJson)
print("We know the birth days of: ")
for i in birthDays:
print(i)
print("\nWould you like to add or retrieve a birth day?")
lol = input().strip().lower()
if lol == "add":
person = input("Who's the lucky one? ")
date = input("What's his birth day? ")
birthDays[person] = date
with open("birthdays.json", "w") as damnJson:
json.dump(birthDays, damnJson)
print("\nk thx\n")
elif lol == "retrieve":
print("\nWho would you like to know the birth date of? ")
person = input()
print(birthDays[person])
<|fim▁hole|>else:
print("fk u m8")<|fim▁end|> | |
<|file_name|>display.py<|end_file_name|><|fim▁begin|>def __load():
import imp, os, sys
ext = 'pygame/display.so'
for path in sys.path:
if not path.endswith('lib-dynload'):
continue
ext_path = os.path.join(path, ext)
if os.path.exists(ext_path):
mod = imp.load_dynamic(__name__, ext_path)
break<|fim▁hole|>del __load<|fim▁end|> | else:
raise ImportError(repr(ext) + " not found")
__load() |
<|file_name|>0073_remove_product_order_index.py<|end_file_name|><|fim▁begin|># Generated by Django 2.2.24 on 2021-10-02 14:40
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('products', '0072_product_order_index'),
]<|fim▁hole|> model_name='product',
name='order_index',
),
]<|fim▁end|> |
operations = [
migrations.RemoveField( |
<|file_name|>Flyby.py<|end_file_name|><|fim▁begin|>import Gears as gears
from .. import *
try:
from OpenGL.GL import *
from OpenGL.GLU import *
except:
print ('ERROR: PyOpenGL not installed properly.')
import random
def box() :
glBegin(GL_QUADS)
glColor3f(0.0,1.0,0.0)
glVertex3f(1.0, 1.0,-1.0)
glVertex3f(-1.0, 1.0,-1.0)
glVertex3f(-1.0, 1.0, 1.0)
glVertex3f(1.0, 1.0, 1.0)
glColor3f(1.0,0.5,0.0)
glVertex3f(1.0,-1.0, 1.0)
glVertex3f(-1.0,-1.0, 1.0)
glVertex3f(-1.0,-1.0,-1.0)
glVertex3f(1.0,-1.0,-1.0)
glColor3f(1.0,0.0,0.0)
glVertex3f(1.0, 1.0, 1.0)
glVertex3f(-1.0, 1.0, 1.0)
glVertex3f(-1.0,-1.0, 1.0)
glVertex3f(1.0,-1.0, 1.0)
glColor3f(1.0,1.0,0.0)
glVertex3f(1.0,-1.0,-1.0)
glVertex3f(-1.0,-1.0,-1.0)
glVertex3f(-1.0, 1.0,-1.0)
glVertex3f(1.0, 1.0,-1.0)
glColor3f(0.0,0.0,1.0)
glVertex3f(-1.0, 1.0, 1.0)
glVertex3f(-1.0, 1.0,-1.0)
glVertex3f(-1.0,-1.0,-1.0)
glVertex3f(-1.0,-1.0, 1.0)
glColor3f(1.0,0.0,1.0)
glVertex3f(1.0, 1.0,-1.0)
glVertex3f(1.0, 1.0, 1.0)
glVertex3f(1.0,-1.0, 1.0)
glVertex3f(1.0,-1.0,-1.0)
glEnd()
class Flyby() :
args = None
def __init__(self, **args):
self.args = args
def apply(self, stimulus) :
self.applyWithArgs(stimulus, **self.args)
def applyWithArgs(
self,
stimulus,
) :
stimulus.enableColorMode()
stimulus.setForwardRenderingCallback(self.render)
stimulus.registerCallback(gears.StimulusStartEvent.typeId, self.start)
stimulus.registerCallback(gears.StimulusEndEvent.typeId, self.finish)
def start( self, event ):
print('hello start flyby')<|fim▁hole|> self.glist = glGenLists(1)
glNewList(self.glist, GL_COMPILE)
for i in range(0, 400) :
glPushMatrix()
glTranslated(
random.uniform( a = -20, b = 20),
random.uniform( a = -20, b = 20),
random.uniform( a = -20, b = 20),
)
box()
glPopMatrix()
glEndList()
def finish( self, event ):
glDeleteLists(self.glist, 1)
def render(self, iFrame):
glEnable(GL_DEPTH_TEST)
glDepthMask(GL_TRUE);
glClearColor(0.0, 0.0, 0.0, 1.0 )
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(45, 1, 0.1, 1000)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
gluLookAt(0, 0, 20- iFrame * 0.1, 0, 0, 19 - iFrame * 0.1, 0, 1, 0)
glTranslated(0, 0, -40 * (iFrame // 400))
glCallList(self.glist)
glTranslated(0, 0, -40)
glCallList(self.glist)
glDisable(GL_DEPTH_TEST)
glDepthMask(GL_FALSE);<|fim▁end|> | |
<|file_name|>hyperstart.go<|end_file_name|><|fim▁begin|>package libhyperstart
import (
"io"
"syscall"
hyperstartapi "github.com/hyperhq/runv/hyperstart/api/json"
)
type Hyperstart interface {
Close()
LastStreamSeq() uint64<|fim▁hole|> ProcessAsyncEvents() (<-chan hyperstartapi.ProcessAsyncEvent, error)
NewContainer(c *hyperstartapi.Container) (io.WriteCloser, io.ReadCloser, io.ReadCloser, error)
RestoreContainer(c *hyperstartapi.Container) (io.WriteCloser, io.ReadCloser, io.ReadCloser, error)
AddProcess(container string, p *hyperstartapi.Process) (io.WriteCloser, io.ReadCloser, io.ReadCloser, error)
SignalProcess(container, process string, signal syscall.Signal) error
TtyWinResize(container, process string, row, col uint16) error
StartSandbox(pod *hyperstartapi.Pod) error
DestroySandbox() error
WriteFile(container, path string, data []byte) error
ReadFile(container, path string) ([]byte, error)
AddRoute(r []hyperstartapi.Route) error
UpdateInterface(dev, ip, mask string) error
OnlineCpuMem() error
}<|fim▁end|> |
APIVersion() (uint32, error) |
<|file_name|>tests.py<|end_file_name|><|fim▁begin|>from django.utils import unittest
from restclients.test.uwnetid.subscription import EmailForwardingTest
from restclients.test.util.date_formator import formatorTest
from restclients.test.hfs.idcard import HfsTest
from restclients.test.library.mylibinfo import MyLibInfoTest
from restclients.test.digitlib.curric import DigitLibTest
from restclients.test.sws.compatible import SWSTest
from restclients.test.sws.financial import SWSFinance
from restclients.test.sws.notice import SWSNotice
from restclients.test.sws.term import SWSTestTerm
from restclients.test.sws.err404.dao import SWSTestDAO404
from restclients.test.sws.err500.dao import SWSTestDAO500
from restclients.test.sws.invalid_dao import SWSTestInvalidDAO
from restclients.test.sws.file_implementation.dao import SWSTestFileDAO
from restclients.test.sws.schedule_data import SWSTestScheduleData
from restclients.test.sws.enrollment import SWSTestEnrollments
from restclients.test.sws.section import SWSTestSectionData
from restclients.test.sws.section_status import SWSTestSectionStatusData
from restclients.test.sws.independent_study import SWSIndependentStudy
from restclients.test.sws.instructor_no_regid import SWSMissingRegid
from restclients.test.sws.registrations import SWSTestRegistrations
from restclients.test.sws.campus import SWSTestCampus
from restclients.test.sws.college import SWSTestCollege
from restclients.test.sws.department import SWSTestDepartment
from restclients.test.sws.curriculum import SWSTestCurriculum
from restclients.test.sws.graderoster import SWSTestGradeRoster<|fim▁hole|>
from restclients.test.sws.dates import SWSTestDates
from restclients.test.pws.person import PWSTestPersonData
from restclients.test.pws.entity import PWSTestEntityData
from restclients.test.pws.idcard import TestIdCardPhoto
from restclients.test.pws.err404.dao import PWSTestDAO404
from restclients.test.pws.err404.pws import PWSTest404
from restclients.test.pws.err500.dao import PWSTestDAO500
from restclients.test.pws.err500.pws import PWSTest500
from restclients.test.pws.invalid_dao import PWSTestInvalidDAO
from restclients.test.pws.file_implementation.dao import PWSTestFileDAO
from restclients.test.gws.group import GWSGroupBasics
from restclients.test.gws.course_group import GWSCourseGroupBasics
from restclients.test.gws.search import GWSGroupSearch
from restclients.test.cache.none import NoCacheTest
from restclients.test.cache.time import TimeCacheTest
from restclients.test.cache.etag import ETagCacheTest
from restclients.test.book.by_schedule import BookstoreScheduleTest
from restclients.test.amazon_sqs.queues import SQSQueue
from restclients.test.sms.send import SMS
from restclients.test.sms.invalid_phone_number import SMSInvalidNumbers
from restclients.test.nws.subscription import NWSTestSubscription
from restclients.test.nws.channel import NWSTestChannel
from restclients.test.nws.endpoint import NWSTestEndpoint
from restclients.test.nws.message import NWSTestMessage
from restclients.test.nws.person import NWSTestPerson
from restclients.test.canvas.enrollments import CanvasTestEnrollment
from restclients.test.canvas.accounts import CanvasTestAccounts
from restclients.test.canvas.admins import CanvasTestAdmins
from restclients.test.canvas.roles import CanvasTestRoles
from restclients.test.canvas.courses import CanvasTestCourses
from restclients.test.canvas.sections import CanvasTestSections
from restclients.test.canvas.bad_sis_ids import CanvasBadSISIDs
from restclients.test.canvas.terms import CanvasTestTerms
from restclients.test.canvas.users import CanvasTestUsers
from restclients.test.canvas.submissions import CanvasTestSubmissions
from restclients.test.canvas.assignments import CanvasTestAssignments
from restclients.test.canvas.quizzes import CanvasTestQuizzes
from restclients.test.catalyst.gradebook import CatalystTestGradebook
from restclients.test.trumba.accounts import TrumbaTestAccounts
from restclients.test.trumba.calendar import TestCalendarParse
from restclients.test.trumba.calendars import TrumbaTestCalendars
from restclients.test.gws.trumba_group import TestGwsTrumbaGroup
from restclients.test.r25.events import R25TestEvents
from restclients.test.r25.spaces import R25TestSpaces
from restclients.test.myplan import MyPlanTestData
from restclients.test.thread import ThreadsTest
from restclients.test.view import ViewTest
from restclients.test.dao_implementation.mock import TestMock
from restclients.test.irws import IRWSTest
from restclients.test.iasystem.evaluation import IASystemTest<|fim▁end|> | |
<|file_name|>schema.d.ts<|end_file_name|><|fim▁begin|>/**
* @license
* Copyright Google Inc. All Rights Reserved.
*
* Use of this source code is governed by an MIT-style license that can be
* found in the LICENSE file at https://angular.io/license
*/
/**
* Options for Bazel Builder
*/
export interface Schema {
/**
* Common commands supported by Bazel.
*/
bazelCommand: BazelCommand;
/**
* Target to be executed under Bazel.
*/
targetLabel: string;
/**<|fim▁hole|> watch?: boolean;
}
/**
* Common commands supported by Bazel.
*/
export enum BazelCommand {
Build = 'build',
Run = 'run',
Test = 'test',
}<|fim▁end|> | * If true, watch the filesystem using ibazel.
*/ |
<|file_name|>test_person.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# This work was created by participants in the DataONE project, and is
# jointly copyrighted by participating institutions in DataONE. For
# more information on DataONE, see our web site at http://dataone.org.
#
# Copyright 2009-2019 DataONE<|fim▁hole|>#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import d1_test.d1_test_case
import d1_test.instance_generator.person
# ===============================================================================
@d1_test.d1_test_case.reproducible_random_decorator("TestPerson")
class TestPerson(d1_test.d1_test_case.D1TestCase):
def test_1000(self):
"""generate()"""
person_list = [
d1_test.instance_generator.person.generate().toxml("utf-8")
for _ in range(3)
]
self.sample.assert_equals(person_list, "inst_gen_person")<|fim▁end|> | |
<|file_name|>visit.rs<|end_file_name|><|fim▁begin|>use rustc_middle::thir::{self, *};
use rustc_middle::ty::Const;
pub trait Visitor<'a, 'tcx: 'a>: Sized {
fn thir(&self) -> &'a Thir<'tcx>;
fn visit_expr(&mut self, expr: &Expr<'tcx>) {
walk_expr(self, expr);
}
fn visit_stmt(&mut self, stmt: &Stmt<'tcx>) {
walk_stmt(self, stmt);
}
fn visit_block(&mut self, block: &Block) {
walk_block(self, block);
}
fn visit_arm(&mut self, arm: &Arm<'tcx>) {
walk_arm(self, arm);
}
fn visit_pat(&mut self, pat: &Pat<'tcx>) {
walk_pat(self, pat);
}
fn visit_const(&mut self, _cnst: &'tcx Const<'tcx>) {}
}
pub fn walk_expr<'a, 'tcx: 'a, V: Visitor<'a, 'tcx>>(visitor: &mut V, expr: &Expr<'tcx>) {
use ExprKind::*;
match expr.kind {
Scope { value, region_scope: _, lint_level: _ } => {
visitor.visit_expr(&visitor.thir()[value])
}
Box { value } => visitor.visit_expr(&visitor.thir()[value]),
If { cond, then, else_opt, if_then_scope: _ } => {
visitor.visit_expr(&visitor.thir()[cond]);
visitor.visit_expr(&visitor.thir()[then]);
if let Some(else_expr) = else_opt {
visitor.visit_expr(&visitor.thir()[else_expr]);
}
}
Call { fun, ref args, ty: _, from_hir_call: _, fn_span: _ } => {
visitor.visit_expr(&visitor.thir()[fun]);
for &arg in &**args {
visitor.visit_expr(&visitor.thir()[arg]);
}
}
Deref { arg } => visitor.visit_expr(&visitor.thir()[arg]),
Binary { lhs, rhs, op: _ } | LogicalOp { lhs, rhs, op: _ } => {
visitor.visit_expr(&visitor.thir()[lhs]);
visitor.visit_expr(&visitor.thir()[rhs]);
}
Unary { arg, op: _ } => visitor.visit_expr(&visitor.thir()[arg]),
Cast { source } => visitor.visit_expr(&visitor.thir()[source]),
Use { source } => visitor.visit_expr(&visitor.thir()[source]),
NeverToAny { source } => visitor.visit_expr(&visitor.thir()[source]),
Pointer { source, cast: _ } => visitor.visit_expr(&visitor.thir()[source]),
Let { expr, .. } => {
visitor.visit_expr(&visitor.thir()[expr]);
}
Loop { body } => visitor.visit_expr(&visitor.thir()[body]),
Match { scrutinee, ref arms } => {
visitor.visit_expr(&visitor.thir()[scrutinee]);
for &arm in &**arms {
visitor.visit_arm(&visitor.thir()[arm]);
}
}
Block { ref body } => visitor.visit_block(body),
Assign { lhs, rhs } | AssignOp { lhs, rhs, op: _ } => {
visitor.visit_expr(&visitor.thir()[lhs]);
visitor.visit_expr(&visitor.thir()[rhs]);
}
Field { lhs, name: _ } => visitor.visit_expr(&visitor.thir()[lhs]),
Index { lhs, index } => {
visitor.visit_expr(&visitor.thir()[lhs]);
visitor.visit_expr(&visitor.thir()[index]);
}
VarRef { id: _ } | UpvarRef { closure_def_id: _, var_hir_id: _ } => {}
Borrow { arg, borrow_kind: _ } => visitor.visit_expr(&visitor.thir()[arg]),
AddressOf { arg, mutability: _ } => visitor.visit_expr(&visitor.thir()[arg]),
Break { value, label: _ } => {
if let Some(value) = value {
visitor.visit_expr(&visitor.thir()[value])
}
}
Continue { label: _ } => {}
Return { value } => {
if let Some(value) = value {
visitor.visit_expr(&visitor.thir()[value])
}
}
ConstBlock { value } => visitor.visit_const(value),
Repeat { value, count } => {
visitor.visit_expr(&visitor.thir()[value]);
visitor.visit_const(count);
}
Array { ref fields } | Tuple { ref fields } => {
for &field in &**fields {
visitor.visit_expr(&visitor.thir()[field]);
}
}
Adt(box thir::Adt {
ref fields,
ref base,
adt_def: _,
variant_index: _,
substs: _,
user_ty: _,
}) => {
for field in &**fields {
visitor.visit_expr(&visitor.thir()[field.expr]);
}
if let Some(base) = base {
visitor.visit_expr(&visitor.thir()[base.base]);
}
}
PlaceTypeAscription { source, user_ty: _ } | ValueTypeAscription { source, user_ty: _ } => {
visitor.visit_expr(&visitor.thir()[source])
}
Closure { closure_id: _, substs: _, upvars: _, movability: _, fake_reads: _ } => {}
Literal { literal, user_ty: _, const_id: _ } => visitor.visit_const(literal),
StaticRef { literal, def_id: _ } => visitor.visit_const(literal),
InlineAsm { ref operands, template: _, options: _, line_spans: _ } => {
for op in &**operands {
use InlineAsmOperand::*;
match op {
In { expr, reg: _ }
| Out { expr: Some(expr), reg: _, late: _ }
| InOut { expr, reg: _, late: _ }
| SymFn { expr } => visitor.visit_expr(&visitor.thir()[*expr]),
SplitInOut { in_expr, out_expr, reg: _, late: _ } => {
visitor.visit_expr(&visitor.thir()[*in_expr]);
if let Some(out_expr) = out_expr {
visitor.visit_expr(&visitor.thir()[*out_expr]);
}
}
Out { expr: None, reg: _, late: _ }
| Const { value: _, span: _ }
| SymStatic { def_id: _ } => {}
}
}
}
ThreadLocalRef(_) => {}
LlvmInlineAsm { ref outputs, ref inputs, asm: _ } => {
for &out_expr in &**outputs {
visitor.visit_expr(&visitor.thir()[out_expr]);
}
for &in_expr in &**inputs {
visitor.visit_expr(&visitor.thir()[in_expr]);
}
}
Yield { value } => visitor.visit_expr(&visitor.thir()[value]),
}
}
pub fn walk_stmt<'a, 'tcx: 'a, V: Visitor<'a, 'tcx>>(visitor: &mut V, stmt: &Stmt<'tcx>) {
match &stmt.kind {
StmtKind::Expr { expr, scope: _ } => visitor.visit_expr(&visitor.thir()[*expr]),
StmtKind::Let {
initializer,
remainder_scope: _,
init_scope: _,
ref pattern,
lint_level: _,
} => {
if let Some(init) = initializer {
visitor.visit_expr(&visitor.thir()[*init]);
}
visitor.visit_pat(pattern);
}
}
}
pub fn walk_block<'a, 'tcx: 'a, V: Visitor<'a, 'tcx>>(visitor: &mut V, block: &Block) {
for &stmt in &*block.stmts {
visitor.visit_stmt(&visitor.thir()[stmt]);
}
if let Some(expr) = block.expr {
visitor.visit_expr(&visitor.thir()[expr]);
}
}
pub fn walk_arm<'a, 'tcx: 'a, V: Visitor<'a, 'tcx>>(visitor: &mut V, arm: &Arm<'tcx>) {
match arm.guard {
Some(Guard::If(expr)) => visitor.visit_expr(&visitor.thir()[expr]),
Some(Guard::IfLet(ref pat, expr)) => {
visitor.visit_pat(pat);
visitor.visit_expr(&visitor.thir()[expr]);
}
None => {}
}
visitor.visit_pat(&arm.pattern);
visitor.visit_expr(&visitor.thir()[arm.body]);
}
pub fn walk_pat<'a, 'tcx: 'a, V: Visitor<'a, 'tcx>>(visitor: &mut V, pat: &Pat<'tcx>) {
use PatKind::*;
match pat.kind.as_ref() {
AscribeUserType { subpattern, ascription: _ }
| Deref { subpattern }
| Binding {
subpattern: Some(subpattern),
mutability: _,<|fim▁hole|> mode: _,
var: _,
ty: _,
is_primary: _,
name: _,
} => visitor.visit_pat(&subpattern),
Binding { .. } | Wild => {}
Variant { subpatterns, adt_def: _, substs: _, variant_index: _ } | Leaf { subpatterns } => {
for subpattern in subpatterns {
visitor.visit_pat(&subpattern.pattern);
}
}
Constant { value } => visitor.visit_const(value),
Range(range) => {
visitor.visit_const(range.lo);
visitor.visit_const(range.hi);
}
Slice { prefix, slice, suffix } | Array { prefix, slice, suffix } => {
for subpattern in prefix {
visitor.visit_pat(&subpattern);
}
if let Some(pat) = slice {
visitor.visit_pat(pat);
}
for subpattern in suffix {
visitor.visit_pat(&subpattern);
}
}
Or { pats } => {
for pat in pats {
visitor.visit_pat(&pat);
}
}
};
}<|fim▁end|> | |
<|file_name|>1763.js<|end_file_name|><|fim▁begin|>var __v=[
{
"Id": 3568,
"Panel": 1763,
"Name": "紋理動畫",<|fim▁hole|> "Sort": 0,
"Str": ""
},
{
"Id": 3569,
"Panel": 1763,
"Name": "相關API",
"Sort": 0,
"Str": ""
},
{
"Id": 3570,
"Panel": 1763,
"Name": "Example",
"Sort": 0,
"Str": ""
}
]<|fim▁end|> | |
<|file_name|>util.js<|end_file_name|><|fim▁begin|>import { expect } from 'chai'
import browser from '../../src/util/browser'<|fim▁hole|>describe('util (node)', () => {
describe('browser', () => {
it('is false', () => {
expect(browser).to.be.false
})
})
})<|fim▁end|> | |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|># mailstat.utils
# Utilities and functions for the mailstat package
#
# Author: Benjamin Bengfort <[email protected]>
# Created: Sun Dec 29 17:27:38 2013 -0600
#
# Copyright (C) 2013 Bengfort.com
# For license information, see LICENSE.txt<|fim▁hole|>#
# ID: __init__.py [] [email protected] $
"""
Utilities and functions for the mailstat package
"""
##########################################################################
## Imports
##########################################################################<|fim▁end|> | |
<|file_name|>nested.rs<|end_file_name|><|fim▁begin|>// edition:2018
// compile-flags: --crate-version 1.0.0
// @is nested.json "$.crate_version" \"1.0.0\"
// @is - "$.index[*][?(@.name=='nested')].kind" \"module\"
// @is - "$.index[*][?(@.name=='nested')].inner.is_crate" true
// @count - "$.index[*][?(@.name=='nested')].inner.items[*]" 1
// @is nested.json "$.index[*][?(@.name=='l1')].kind" \"module\"
// @is - "$.index[*][?(@.name=='l1')].inner.is_crate" false
// @count - "$.index[*][?(@.name=='l1')].inner.items[*]" 2
pub mod l1 {
// @is nested.json "$.index[*][?(@.name=='l3')].kind" \"module\"
// @is - "$.index[*][?(@.name=='l3')].inner.is_crate" false
// @count - "$.index[*][?(@.name=='l3')].inner.items[*]" 1
// @set l3_id = - "$.index[*][?(@.name=='l3')].id"
// @has - "$.index[*][?(@.name=='l1')].inner.items[*]" $l3_id
pub mod l3 {
// @is nested.json "$.index[*][?(@.name=='L4')].kind" \"struct\"
// @is - "$.index[*][?(@.name=='L4')].inner.struct_type" \"unit\"
// @set l4_id = - "$.index[*][?(@.name=='L4')].id"
// @has - "$.index[*][?(@.name=='l3')].inner.items[*]" $l4_id
pub struct L4;
}
// @is nested.json "$.index[*][?(@.inner.source=='l3::L4')].kind" \"import\"
// @is - "$.index[*][?(@.inner.source=='l3::L4')].inner.glob" false
pub use l3::L4;<|fim▁hole|><|fim▁end|> | } |
<|file_name|>Lambda.java<|end_file_name|><|fim▁begin|>package com.jason.showcase.lambdas;
/**
* Created by Qinjianf on 2016/7/19.
*/
public class Lambda {
public void execute(Action action) {<|fim▁hole|> action.run("Hello Lambda!");
}
public void test() {
execute(System.out::println);
}
public static void main(String[] args) {
new Lambda().test();
}
}<|fim▁end|> |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.