code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
# Webhooks for external integrations.
from typing import Optional
from django.http import HttpRequest, HttpResponse
from zerver.decorator import api_key_only_webhook_view
from zerver.lib.actions import check_send_private_message
from zerver.lib.request import REQ, has_request_variables
from zerver.lib.response import json_success
from zerver.models import UserProfile, get_user
@api_key_only_webhook_view('Yo', notify_bot_owner_on_invalid_json=False)
@has_request_variables
def api_yo_app_webhook(request: HttpRequest, user_profile: UserProfile,
email: str = REQ(default=""),
username: str = REQ(default='Yo Bot'),
topic: Optional[str] = REQ(default=None),
user_ip: Optional[str] = REQ(default=None)) -> HttpResponse:
body = f'Yo from {username}'
receiving_user = get_user(email, user_profile.realm)
check_send_private_message(user_profile, request.client, receiving_user, body)
return json_success()
| brainwane/zulip | zerver/webhooks/yo/view.py | Python | apache-2.0 | 1,015 |
"""
This file is part of GASATaD.
GASATaD is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
GASATaD is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with GASATaD. If not, see <http://www.gnu.org/licenses/>.
"""
import wx
import wx.lib.agw.floatspin as fs
import os
class AddColumnInterface(wx.Dialog):
spinControlList = []
textControlList = []
sizerList = []
# List where the application save the BitmapButtons
plusList = []
minusList = []
paarList = []
factorsAndValues = {}
def __init__(self, parent, listOfVariables, namesColumns, minimum, maximum):
wx.Dialog.__init__(self, parent, id=wx.ID_ANY, title="Add column", pos=wx.DefaultPosition, size=wx.DefaultSize,
style=wx.DEFAULT_FRAME_STYLE | wx.TAB_TRAVERSAL)
self.SetSizeHints(wx.DefaultSize, wx.DefaultSize)
mainSizer = wx.BoxSizer(wx.VERTICAL)
gbSizer1 = wx.GridBagSizer(0, 0)
gbSizer1.SetFlexibleDirection(wx.BOTH)
gbSizer1.SetNonFlexibleGrowMode(wx.FLEX_GROWMODE_SPECIFIED)
mainSizer.Add(gbSizer1)
sbSizer1 = wx.StaticBoxSizer(wx.StaticBox(self, wx.ID_ANY, u"Source column"), wx.VERTICAL)
fgVarSizer = wx.FlexGridSizer(0, 2, 0, 0)
fgVarSizer.SetFlexibleDirection(wx.BOTH)
fgVarSizer.SetNonFlexibleGrowMode(wx.FLEX_GROWMODE_SPECIFIED)
# Here the RadioButtons are created
for var in listOfVariables:
self.radioBtn = wx.RadioButton(self, wx.ID_ANY, var, wx.DefaultPosition, wx.DefaultSize, 0)
fgVarSizer.Add(self.radioBtn, 0, wx.ALL, 1)
self.Bind(wx.EVT_RADIOBUTTON, self.updateSelectedRadioButton, self.radioBtn)
sbSizer1.Add(fgVarSizer, 1, wx.EXPAND, 5)
gbSizer1.Add(sbSizer1, wx.GBPosition(0, 0), wx.GBSpan(1, 1), wx.ALIGN_RIGHT | wx.EXPAND | wx.ALL, 10)
gbSizer2 = wx.GridBagSizer(0, 0)
gbSizer2.SetFlexibleDirection(wx.BOTH)
gbSizer2.SetNonFlexibleGrowMode(wx.FLEX_GROWMODE_SPECIFIED)
bSizer2 = wx.BoxSizer(wx.HORIZONTAL)
self.staticTextCol = wx.StaticText(self, wx.ID_ANY, u"New column name:", wx.DefaultPosition, wx.DefaultSize, 0)
self.staticTextCol.Wrap(-1)
bSizer2.Add(self.staticTextCol, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)
self.textCtrlCol = wx.TextCtrl(self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size(300,-1), 0)
bSizer2.Add(self.textCtrlCol, 0, wx.ALL | wx.EXPAND, 5)
gbSizer2.Add(bSizer2, wx.GBPosition(0, 0), wx.GBSpan(1, 1), wx.ALL | wx.EXPAND, 5)
self.fgSizer9 = wx.FlexGridSizer(0, 1, 0, 0)
self.fgSizer9.SetFlexibleDirection(wx.BOTH)
self.fgSizer9.SetNonFlexibleGrowMode(wx.FLEX_GROWMODE_SPECIFIED)
self.fgSizer10 = wx.FlexGridSizer(0, 8, 0, 0)
self.fgSizer10.SetFlexibleDirection(wx.BOTH)
self.fgSizer10.SetNonFlexibleGrowMode(wx.FLEX_GROWMODE_SPECIFIED)
self.fgSizer9.Add(self.fgSizer10, 0, wx.ALL, 5)
self.m_staticText9 = wx.StaticText(self, wx.ID_ANY, u"From:", wx.DefaultPosition, wx.DefaultSize, 0)
self.m_staticText9.Wrap(-1)
self.fgSizer10.Add(self.m_staticText9, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)
self.floatSpinCtrl_1 = fs.FloatSpin(self, wx.ID_ANY, pos=wx.DefaultPosition, size=(140,-1), min_val=None, max_val=None,
increment=0.001, value=0.000, agwStyle=fs.FS_LEFT)
self.floatSpinCtrl_1.SetFormat("%f")
self.floatSpinCtrl_1.SetDigits(3)
self.fgSizer10.Add(self.floatSpinCtrl_1, 0, wx.ALL, 5)
self.m_staticText10 = wx.StaticText(self, wx.ID_ANY, u"to:", wx.DefaultPosition, wx.DefaultSize, 0)
self.m_staticText10.Wrap(-1)
self.fgSizer10.Add(self.m_staticText10, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)
self.floatSpinCtrl_2 = fs.FloatSpin(self, wx.ID_ANY, pos=wx.DefaultPosition, size=(140,-1), min_val=None, max_val=None,
increment=0.001, value=0.000, agwStyle=fs.FS_LEFT)
self.floatSpinCtrl_2.SetFormat("%f")
self.floatSpinCtrl_2.SetDigits(3)
self.fgSizer10.Add(self.floatSpinCtrl_2, 0, wx.ALL, 5)
self.staticTextNameVar = wx.StaticText(self, wx.ID_ANY, u"name:", wx.DefaultPosition, wx.DefaultSize, 0)
self.staticTextNameVar.Wrap(-1)
self.fgSizer10.Add(self.staticTextNameVar, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)
self.m_textCtrl1 = wx.TextCtrl(self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0,
name="Factor1")
self.fgSizer10.Add(self.m_textCtrl1, 0, wx.ALL | wx.EXPAND, 5)
self.plusBmp = wx.Image(str(os.path.dirname(__file__)) + "/icons/mas.png", wx.BITMAP_TYPE_ANY).ConvertToBitmap()
self.minusBmp = wx.Image(str(os.path.dirname(__file__)) + "/icons/menos.png",
wx.BITMAP_TYPE_ANY).ConvertToBitmap()
self.m_bpButton2 = wx.BitmapButton(self, wx.ID_ANY, self.plusBmp, wx.DefaultPosition, wx.Size(-1, -1),
wx.BU_AUTODRAW)
self.fgSizer10.Add(self.m_bpButton2, 0, wx.ALL, 5)
self.m_bpButton3 = wx.BitmapButton(self, wx.ID_ANY, self.minusBmp, wx.DefaultPosition, wx.DefaultSize,
wx.BU_AUTODRAW)
self.m_bpButton3.Hide()
# List to control the plus and minus buttons
self.plusList.append(self.m_bpButton2)
self.minusList.append(self.m_bpButton3)
self.fgSizer10.Add(self.m_bpButton3, 0, wx.ALL, 5)
gbSizer2.Add(self.fgSizer9, wx.GBPosition(1, 0), wx.GBSpan(1, 1), wx.EXPAND, 5)
fgSizer4 = wx.FlexGridSizer(0, 2, 0, 0)
fgSizer4.AddGrowableCol(1)
fgSizer4.SetFlexibleDirection(wx.HORIZONTAL)
fgSizer4.SetNonFlexibleGrowMode(wx.FLEX_GROWMODE_SPECIFIED)
self.staticTextDefaultValue = wx.StaticText(self, wx.ID_ANY, u"Default value:", wx.DefaultPosition,
wx.DefaultSize, 0)
self.staticTextDefaultValue.Wrap(-1)
fgSizer4.Add(self.staticTextDefaultValue, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)
self.textCtrlDefaultVal = wx.TextCtrl(self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0,
ValidatorForFactors(self.spinControlList, self.textControlList,
self.textCtrlCol, namesColumns))
fgSizer4.Add(self.textCtrlDefaultVal, 0, wx.ALL | wx.EXPAND, 5)
gbSizer2.Add(fgSizer4, wx.GBPosition(2, 0), wx.GBSpan(1, 1), wx.ALL | wx.EXPAND, 5)
okay = wx.Button(self, wx.ID_OK)
cancel = wx.Button(self, wx.ID_CANCEL)
btns = wx.StdDialogButtonSizer()
btns.AddButton(okay)
btns.AddButton(cancel)
btns.Realize()
mainSizer.Add(btns, 0, wx.BOTTOM | wx.ALIGN_RIGHT, 10)
gbSizer1.Add(gbSizer2, wx.GBPosition(0, 1), wx.GBSpan(2, 1), wx.EXPAND | wx.LEFT, 5)
self.SetSizer(mainSizer)
self.Layout()
self.Fit()
self.Centre(wx.BOTH)
# List to control sizers
self.sizerList.append(self.fgSizer10)
# Add the spinContrl to a list to have all of them controlled and to gain access to them
self.spinControlList.append(self.floatSpinCtrl_1)
self.spinControlList.append(self.floatSpinCtrl_2)
# Text field where the name of the field is written
self.textControlList.append(self.m_textCtrl1)
# Variable to save the selected Radiobutton
self.selectedRadioButton = listOfVariables[0]
# Necesitamos un índice para controlar os valores dos distintos combos numéricos e así poder diferencialos
self.indiceSpinCtrl = 1
# Bindings
self.Bind(wx.EVT_BUTTON, self.createNewFactor, self.m_bpButton2)
self.Bind(wx.EVT_BUTTON, self.deleteFactor, self.m_bpButton3)
self.Fit()
self.Show(True)
def updateSelectedRadioButton(self, event):
radioButton = event.GetEventObject()
self.selectedRadioButton = radioButton.GetLabelText()
def createNewFactor(self, event):
# Aumentamos el índice para así poder diferenciar los diferentes spinCtrl
if self.indiceSpinCtrl == 5:
self.showWarningTooManyIntervals()
else:
self.indiceSpinCtrl += 1
fgSizer = wx.FlexGridSizer(0, 8, 0, 0)
fgSizer.SetFlexibleDirection(wx.BOTH)
fgSizer.SetNonFlexibleGrowMode(wx.FLEX_GROWMODE_SPECIFIED)
self.fgSizer9.Add(fgSizer, 0, wx.ALL, 5)
self.m_staticText9 = wx.StaticText(self, wx.ID_ANY, u"From:", wx.DefaultPosition, wx.DefaultSize, 0)
self.m_staticText9.Wrap(-1)
fgSizer.Add(self.m_staticText9, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)
self.floatSpinCtrl_3 = fs.FloatSpin(self, wx.ID_ANY, pos=wx.DefaultPosition, min_val=None, max_val=None,
increment=0.001, size=(140,-1),
value=0.000, agwStyle=fs.FS_LEFT)
self.floatSpinCtrl_3.SetFormat("%f")
self.floatSpinCtrl_3.SetDigits(3)
fgSizer.Add(self.floatSpinCtrl_3, 0, wx.ALL, 5)
self.m_staticText10 = wx.StaticText(self, wx.ID_ANY, u"to:", wx.DefaultPosition, wx.DefaultSize, 0)
self.m_staticText10.Wrap(-1)
fgSizer.Add(self.m_staticText10, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)
self.floatSpinCtrl_4 = fs.FloatSpin(self, wx.ID_ANY, pos=wx.DefaultPosition, min_val=None, max_val=None,
increment=0.001, size=(140,-1),
value=0.000, agwStyle=fs.FS_LEFT)
self.floatSpinCtrl_4.SetFormat("%f")
self.floatSpinCtrl_4.SetDigits(3)
fgSizer.Add(self.floatSpinCtrl_4, 0, wx.ALL, 5)
self.staticTextNameVar = wx.StaticText(self, wx.ID_ANY, u"name:", wx.DefaultPosition, wx.DefaultSize, 0)
self.staticTextNameVar.Wrap(-1)
fgSizer.Add(self.staticTextNameVar, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)
self.m_textCtrl1 = wx.TextCtrl(self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0)
fgSizer.Add(self.m_textCtrl1, 0, wx.ALL | wx.EXPAND, 5)
self.m_bpButton2 = wx.BitmapButton(self, wx.ID_ANY, self.plusBmp, wx.DefaultPosition, wx.Size(-1, -1),
wx.BU_AUTODRAW)
fgSizer.Add(self.m_bpButton2, 0, wx.ALL, 5)
self.m_bpButton3 = wx.BitmapButton(self, wx.ID_ANY, self.minusBmp, wx.DefaultPosition, wx.DefaultSize,
wx.BU_AUTODRAW)
fgSizer.Add(self.m_bpButton3, 0, wx.ALL, 5)
# Se deshabilitan los últimos bitmapbutton antes de introducir los que se han creado
self.plusList[-1].Enable(False)
self.minusList[-1].Enable(False)
# Se añaden los nuevos
self.plusList.append(self.m_bpButton2)
self.minusList.append(self.m_bpButton3)
self.Bind(wx.EVT_BUTTON, self.createNewFactor, self.m_bpButton2)
self.Bind(wx.EVT_BUTTON, self.deleteFactor, self.m_bpButton3)
# Se añade a la lista de sizers
self.sizerList.append(fgSizer)
# Añadios los spinCotrl a una lista para tener un control de todos los que tenemos creados y así acceder a ellos
self.spinControlList.append(self.floatSpinCtrl_3)
self.spinControlList.append(self.floatSpinCtrl_4)
# Se añade el recuadro de texto dode se escribe el nombre que tendrá el factor
self.textControlList.append(self.m_textCtrl1)
self.Fit()
self.Layout()
def deleteFactor(self, event):
if self.indiceSpinCtrl == 1:
self.showWarningNonDeleted()
else:
self.indiceSpinCtrl -= 1
self.sizerList.pop().Clear(True)
self.fgSizer9.Layout()
del self.spinControlList[(len(self.spinControlList) - 2):]
self.textControlList.pop()
# Se eliminan los dos bitmapbutton de la lista
self.plusList.pop()
self.minusList.pop()
# Se habilitan los dos anteriores
self.plusList[-1].Enable()
self.minusList[-1].Enable()
self.Layout()
self.Fit()
'''
This function creates a dictionary where the key is a name and the value is a list of two numbers.(Example Old:(75,90))
The name is the string that have to be taken for each row which value (of the column selected previouly in the interface)
is in the interval defined by the two values that are passed as a list.
'''
def createVarWithInterval(self):
self.firstValue = -1
self.secondValue = -1
self.factorsAndValues.clear()
for i in range(len(self.spinControlList)):
# Se agrupa el valor inicial y final de cada etiqueta para posteriormente comprobar si hay algún solapamiento
if i % 2 == 0:
self.firstValue = self.spinControlList[i].GetValue()
self.secondValue = self.spinControlList[i + 1].GetValue()
self.paarList.append([self.firstValue, self.secondValue])
index = 0
for textControl in self.textControlList:
'''
Se crea un diccionario cuya clave es el nombre del factor y el valor es el par compuesto por el
límite superior e inferior del intervalo
'''
self.factorsAndValues[textControl.GetValue()] = self.paarList[index]
index += 1
del self.spinControlList[:]
del self.textControlList[:]
del self.paarList[:]
def returnFactors(self):
self.createVarWithInterval()
nameOfFactor = self.textCtrlCol.GetValue()
nameRestValues = self.textCtrlDefaultVal.GetValue()
return self.factorsAndValues, self.selectedRadioButton, nameRestValues, nameOfFactor
def showWarningTooManyIntervals(self):
dlg = wx.MessageDialog(self, "You can not create more Intervals", "ERROR", wx.OK | wx.ICON_EXCLAMATION)
if dlg.ShowModal() == wx.ID_OK:
dlg.Destroy()
def showWarningNonDeleted(self):
dlg = wx.MessageDialog(self, "The interval could not be deleted", "ERROR", wx.OK | wx.ICON_EXCLAMATION)
if dlg.ShowModal() == wx.ID_OK:
dlg.Destroy()
class ValidatorForFactors(wx.Validator):
def __init__(self, spinControlList, textControlList, textCtrlNameCol, listNamesCol):
wx.Validator.__init__(self)
self.spinControlList = spinControlList
self.textControlList = textControlList
self.textCtrlNameCol = textCtrlNameCol
self.listNamesCol = listNamesCol
def Clone(self):
return ValidatorForFactors(self.spinControlList, self.textControlList, self.textCtrlNameCol, self.listNamesCol)
def Validate(self, win):
textCtrl = self.GetWindow()
text = textCtrl.GetValue()
existOverlap = False
erroneousInterval = False
emptyField = False
repeatedName = False
sortedPaarList = []
# Check if the name of the columns is already used
if self.textCtrlNameCol.GetValue() in self.listNamesCol:
repeatedName = True
for i in range(len(self.spinControlList)):
# Se agrupa el valor inicial y final de cada etiqueta para posteriormente comprobar si hay algún solapamiento
if i % 2 == 0:
firstValue = self.spinControlList[i].GetValue()
secondValue = self.spinControlList[i + 1].GetValue()
sortedPaarList.append([firstValue, secondValue])
# Se crea una lista ordenada para comprobar si existe solapamiento entre los intervalos
sortedPaarList.sort()
if len(sortedPaarList) != 1:
for i in range(len(sortedPaarList) - 1):
lowLimit, hightLimit = sortedPaarList[i]
lowLimit2, hightLimit2 = sortedPaarList[i + 1]
if hightLimit >= lowLimit2:
existOverlap = True
if (lowLimit > hightLimit) or (lowLimit2 > hightLimit2):
erroneousInterval = True
else:
lowLimit, hightLimit = sortedPaarList[0]
if (lowLimit > hightLimit):
erroneousInterval = True
for textCtrl in self.textControlList:
if not textCtrl.GetValue():
emptyField = True
if not self.textCtrlNameCol.GetValue():
emptyField = True
if not text:
emptyField = True
if repeatedName:
wx.MessageBox("This name already exists! Please, change it", "Repeated Column Name")
emptyField = False
existOverlap = False
erroneousInterval = False
repeatedName = False
sortedPaarList = []
return False
elif emptyField:
wx.MessageBox("You must fill in all the fields", "Empty Field")
emptyField = False
existOverlap = False
erroneousInterval = False
repeatedName = False
sortedPaarList = []
return False
elif existOverlap:
wx.MessageBox("OverLap in one of the SpinControls", "OverLap")
emptyField = False
existOverlap = False
erroneousInterval = False
repeatedName = False
sortedPaarList = []
return False
elif erroneousInterval:
wx.MessageBox("The value of the FROM item have to be lower or equal than the TO item", "ERROR")
emptyField = False
existOverlap = False
erroneousInterval = False
repeatedName = False
sortedPaarList = []
return False
else:
return True
def TransferToWindow(self):
return True
def TransferFromWindow(self):
return True
| milegroup/gasatad | AddColumnInterface.py | Python | gpl-3.0 | 18,784 |
"""
calculate a p-value of a region using the Stouffer-Liptak method or the
z-score method.
"""
from __future__ import print_function
import argparse
import sys
import numpy as np
import toolshed as ts
from collections import defaultdict
from interlap import InterLap
from _common import bediter, get_col_num
from itertools import chain, groupby
from operator import itemgetter
from slk import gen_sigma_matrix
from acf import acf
from stouffer_liptak import stouffer_liptak, z_score_combine
from scipy.stats import norm
from numpy.linalg import cholesky as chol
qnorm = norm.ppf
pnorm = norm.cdf
def gen_correlated(sigma, n, observed=None):
"""
generate autocorrelated data according to the matrix
sigma. if X is None, then data will be sampled from
the uniform distibution. Otherwise, it will be sampled
from X. Where X is then *all* observed
p-values.
"""
C = np.matrix(chol(sigma))
if observed is None:
X = np.random.uniform(0, 1, size=(n, sigma.shape[0]))
else:
assert n * sigma.shape[0] < observed.shape[0]
idxs = np.random.random_integers(0, len(observed) - 1,
size=sigma.shape[0] * n)
X = observed[idxs].reshape((n, sigma.shape[0]))
Q = np.matrix(qnorm(X))
for row in np.array(1 - norm.sf((Q * C).T)).T:
yield row
def sl_sim(sigma, ps, nsims, sample_distribution=None):
N = 0
print("nsims:", nsims, file=sys.stderr)
w0 = stouffer_liptak(ps, sigma)["p"]
# TODO parallelize here.
for i in range(10):
for prow in gen_correlated(sigma, nsims/10, sample_distribution):
s = stouffer_liptak(prow, sigma)
if not s["OK"]: 1/0
if s["p"] <= w0: N += 1
return N / float(nsims)
def run(args):
col_num = get_col_num(args.c)
# order in results is slk, uniform, sample
for region_line, slk, slk_sidak, sim_p in region_p(args.pvals, args.regions,
col_num, args.step, z=True):
#if sim_p != "NA":
# sim_p = "%.4g" % (sim_p)
print( "%s\t%.4g\t%.4g" % (region_line, slk, slk_sidak))
def _gen_acf(region_info, fpvals, col_num, step):
# calculate the ACF as far out as needed...
# keys of region_info are (chrom, start, end)
max_len = max(int(r[2]) - int(r[1]) for r in region_info)
print("# calculating ACF out to: %i" % max_len, file=sys.stderr)
lags = list(range(1, max_len, step))
if len(lags) == 0:
lags.append(max_len)
if lags[-1] < max_len: lags.append(lags[-1] + step)
if len(lags) > 20:
repr_lags = "[" + ", ".join(map(str, lags[1:4])) + \
" ... " + \
", ".join(map(str, lags[-5:])) + "]"
else:
repr_lags = str(lags)
print("# with %-2i lags: %s" \
% (len(lags), repr_lags), file=sys.stderr)
if len(lags) > 100:
print("# !! this could take a looong time", file=sys.stderr)
print("# !!!! consider using a larger step size (-s)", file=sys.stderr)
acfs = acf(fpvals, lags, col_num, simple=True)
print("# Done with one-time ACF calculation", file=sys.stderr)
return acfs
def get_total_coverage(fpvals, col_num, step, out_val):
"""
Calculate total bases of coverage in `fpvals`.
Used for the sidak correction
"""
total_coverage = 0
for key, chrom_iter in groupby(bediter(fpvals, col_num),
itemgetter('chrom')):
bases = set([])
for feat in chrom_iter:
s, e = feat['start'], feat['end']
if s == e: e += 1
#e = max(e, s + step)
bases.update(range(s, e))
total_coverage += len(bases)
out_val.value = total_coverage
def _get_total_coverage(fpvals, col_num, step):
from multiprocessing import Process, Value
val = Value('f')
p = Process(target=get_total_coverage, args=(fpvals, col_num, step, val))
p.start()
return p, val
def sidak(p, region_length, total_coverage, message=[False]):
"""
see: https://github.com/brentp/combined-pvalues/issues/2
"""
if region_length == 0:
region_length = 1
if not message[0]:
message[0] = True
sys.stderr.write(""""warning: 0-length region found.
does input have 0-length intervals? using length of 1 and not reporting
further 0-length intervals""")
# use 1.1 as heuristic to account for limit in available regions
# of a given size as the region_length increases
# TODO: base that on the actual number of regiosn of this length
# that could be seen based on the distance constraint.
k = total_coverage / (np.float64(region_length)**1.0)
if k < 1: k = total_coverage
p_sidak = 1 - (1 - p)**k
if p_sidak == 0:
assert p < 1e-16, (p, k, total_coverage, region_length)
p_sidak = (1 - (1 - 1e-16)**k) / (p / 1e-16)
p_sidak = min(p_sidak, p * k)
# print "bonferroni:", min(p * k, 1)
return min(p_sidak, 1)
def _get_ps_in_regions(tree, fpvals, col_num):
"""
find the pvalues associated with each region
"""
region_info = defaultdict(list)
for row in bediter(fpvals, col_num):
for region in tree[row['chrom']].find((row['start'], row['end'])):
region_len = max(1, region[1] - region[0])
region_tup = tuple(region[-1])
region_info[region_tup].append(row)
assert sum(len(v) for v in tree.values()) >= len(region_info)
if sum(len(v) for v in tree.values()) > len(region_info):
sys.stderr.write("# note: not all regions contained measurements\n")
return region_info
def read_regions(fregions):
tree = defaultdict(InterLap)
for i, toks in enumerate(ts.reader(fregions, header=False)):
if i == 0 and not (toks[1] + toks[2]).isdigit(): continue
tree[toks[0]].add((int(toks[1]), int(toks[2]), toks))
sys.stderr.write("# read %i regions from %s\n" \
% (sum(len(v) for v in tree.values()), fregions))
return tree
def region_p(fpvals, fregions, col_num, step, z=True):
# just use 2 for col_num, but dont need the p from regions.
tree = read_regions(fregions)
process, total_coverage_sync = _get_total_coverage(fpvals, col_num, step)
region_info = _get_ps_in_regions(tree, fpvals, col_num)
acfs = _gen_acf(region_info, (fpvals,), col_num, step)
process.join()
total_coverage = total_coverage_sync.value
# regions first and then create ACF for the longest one.
print("%i bases used as coverage for sidak correction" % \
(total_coverage), file=sys.stderr)
sample_distribution = np.array([b["p"] for b in bediter(fpvals,
col_num)])
combine = z_score_combine if z else stouffer_liptak
for region, prows in region_info.items():
# gen_sigma expects a list of bed dicts.
sigma = gen_sigma_matrix(prows, acfs)
ps = np.array([prow["p"] for prow in prows])
if ps.shape[0] == 0:
print("bad region", region, file=sys.stderr)
continue
# calculate the SLK for the region.
region_slk = combine(ps, sigma)
if not region_slk["OK"]:
print("problem with:", region_slk, ps, file=sys.stderr)
slk_p = region_slk["p"]
sidak_slk_p = sidak(slk_p, int(region[2]) - int(region[1]), total_coverage)
result = ["\t".join(region), slk_p, sidak_slk_p, "NA"]
yield result
def main():
p = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
p.add_argument("-p", dest="pvals", help="BED containing all the p values"
" used to generate `regions`")
p.add_argument("-r", dest="regions", help="BED containing all the regions")
p.add_argument("-s", "--step", dest="step", type=int, default=50,
help="step size for acf calculation. should be the same "
" value as the step sent to -d arg for acf")
p.add_argument("-c", dest="c", help="column number containing the p-value"
" of interest", type=str, default=-1)
p.add_argument("-z", dest="z", help="use z-score correction",
action="store_true")
args = p.parse_args()
if not (args.regions and args.pvals):
import sys
sys.exit(not p.print_help())
header = next(ts.nopen(args.regions))
if header.startswith("#") or (not header.split("\t")[2].isdigit()):
print("%s\tslk_p\tslk_sidak_p" % (header.rstrip("\r\n"),))
header = ts.header(args.pvals)
if args.c in header:
args.c = header.index(args.c) + 1
else:
args.c = int(args.c)
return run(args)
if __name__ == "__main__":
import doctest
if doctest.testmod(optionflags=doctest.ELLIPSIS |\
doctest.NORMALIZE_WHITESPACE).failed == 0:
main()
| brentp/combined-pvalues | cpv/region_p.py | Python | mit | 8,973 |
""" @package antlr3.dottreegenerator
@brief ANTLR3 runtime package, tree module
This module contains all support classes for AST construction and tree parsers.
"""
# begin[licence]
#
# [The "BSD licence"]
# Copyright (c) 2005-2008 Terence Parr
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# end[licence]
# lot's of docstrings are missing, don't complain for now...
# pylint: disable-msg=C0111
from treewizard import TreeWizard
try:
from antlr3.dottreegen import toDOT
except ImportError, exc:
def toDOT(*args, **kwargs):
raise exc
| ychen820/microblog | y/google-cloud-sdk/platform/google_appengine/lib/antlr3/antlr3/extras.py | Python | bsd-3-clause | 1,913 |
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Installs an Android application, possibly in an incremental way."""
import collections
import hashlib
import logging
import os
import re
import shutil
import subprocess
import sys
import tempfile
import time
import zipfile
from third_party.py import gflags
from third_party.py.concurrent import futures
gflags.DEFINE_string("split_main_apk", None, "The main APK for split install")
gflags.DEFINE_multistring("split_apk", [], "Split APKs to install")
gflags.DEFINE_string("dexmanifest", None, "The .dex manifest")
gflags.DEFINE_multistring("native_lib", None, "Native libraries to install")
gflags.DEFINE_string("resource_apk", None, "The resource .apk")
gflags.DEFINE_string("apk", None, "The app .apk. If not specified, "
"do incremental deployment")
gflags.DEFINE_string("adb", None, "ADB to use")
gflags.DEFINE_string("stub_datafile", None, "The stub data file")
gflags.DEFINE_string("output_marker", None, "The output marker file")
gflags.DEFINE_multistring("extra_adb_arg", [], "Extra arguments to adb")
gflags.DEFINE_string("execroot", ".", "The exec root")
gflags.DEFINE_integer("adb_jobs", 2,
"The number of instances of adb to use in parallel to "
"update files on the device",
lower_bound=1)
gflags.DEFINE_enum("start", "no", ["no", "cold", "warm"], "Whether/how to "
"start the app after installing it. 'cold' and 'warm' will "
"both cause the app to be started, 'warm' will start it "
"with previously saved application state.")
gflags.DEFINE_boolean("start_app", False, "Deprecated, use 'start'.")
gflags.DEFINE_string("user_home_dir", None, "Path to the user's home directory")
gflags.DEFINE_string("flagfile", None,
"Path to a file to read additional flags from")
gflags.DEFINE_string("verbosity", None, "Logging verbosity")
FLAGS = gflags.FLAGS
DEVICE_DIRECTORY = "/data/local/tmp/incrementaldeployment"
# Some devices support ABIs other than those reported by getprop. In this case,
# if the most specific ABI is not available in the .apk, we push the more
# general ones.
COMPATIBLE_ABIS = {
"armeabi-v7a": ["armeabi"],
"arm64-v8a": ["armeabi-v7a", "armeabi"]
}
class AdbError(Exception):
"""An exception class signaling an error in an adb invocation."""
def __init__(self, args, returncode, stdout, stderr):
self.args = args
self.returncode = returncode
self.stdout = stdout
self.stderr = stderr
details = "\n".join([
"adb command: %s" % args,
"return code: %s" % returncode,
"stdout: %s" % stdout,
"stderr: %s" % stderr,
])
super(AdbError, self).__init__(details)
class DeviceNotFoundError(Exception):
"""Raised when the device could not be found."""
class MultipleDevicesError(Exception):
"""Raised when > 1 device is attached and no device serial was given."""
@staticmethod
def CheckError(s):
return re.search("more than one (device and emulator|device|emulator)", s)
class DeviceUnauthorizedError(Exception):
"""Raised when the local machine is not authorized to the device."""
class TimestampException(Exception):
"""Raised when there is a problem with timestamp reading/writing."""
class Adb(object):
"""A class to handle interaction with adb."""
def __init__(self, adb_path, temp_dir, adb_jobs, user_home_dir):
self._adb_path = adb_path
self._temp_dir = temp_dir
self._user_home_dir = user_home_dir
self._file_counter = 1
self._executor = futures.ThreadPoolExecutor(max_workers=adb_jobs)
def _Exec(self, adb_args):
"""Executes the given adb command + args."""
args = [self._adb_path] + FLAGS.extra_adb_arg + adb_args
# TODO(ahumesky): Because multiple instances of adb are executed in
# parallel, these debug logging lines will get interleaved.
logging.debug("Executing: %s", " ".join(args))
# adb sometimes requires the user's home directory to access things in
# $HOME/.android (e.g. keys to authorize with the device). To avoid any
# potential problems with python picking up things in the user's home
# directory, HOME is not set in the environment around python and is instead
# passed explicitly as a flag.
env = {}
if self._user_home_dir:
env["HOME"] = self._user_home_dir
adb = subprocess.Popen(
args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env)
stdout, stderr = adb.communicate()
stdout = stdout.strip()
stderr = stderr.strip()
logging.debug("adb ret: %s", adb.returncode)
logging.debug("adb out: %s", stdout)
logging.debug("adb err: %s", stderr)
# Check these first so that the more specific error gets raised instead of
# the more generic AdbError.
if "device not found" in stderr:
raise DeviceNotFoundError()
elif "device unauthorized" in stderr:
raise DeviceUnauthorizedError()
elif MultipleDevicesError.CheckError(stderr):
# The error messages are from adb's transport.c, but something adds
# "error: " to the beginning, so take it off so that we don't end up
# printing "Error: error: ..."
raise MultipleDevicesError(re.sub("^error: ", "", stderr))
if adb.returncode != 0:
raise AdbError(args, adb.returncode, stdout, stderr)
return adb.returncode, stdout, stderr, args
def _ExecParallel(self, adb_args):
return self._executor.submit(self._Exec, adb_args)
def _CreateLocalFile(self):
"""Returns a path to a temporary local file in the temp directory."""
local = os.path.join(self._temp_dir, "adbfile_%d" % self._file_counter)
self._file_counter += 1
return local
def GetInstallTime(self, package):
"""Get the installation time of a package."""
_, stdout, _, _ = self._Shell("dumpsys package %s" % package)
match = re.search("firstInstallTime=(.*)$", stdout, re.MULTILINE)
if match:
return match.group(1)
else:
raise TimestampException(
"Package '%s' is not installed on the device. At least one "
"non-incremental 'mobile-install' must precede incremental "
"installs." % package)
def GetAbi(self):
"""Returns the ABI the device supports."""
_, stdout, _, _ = self._Shell("getprop ro.product.cpu.abi")
return stdout
def Push(self, local, remote):
"""Invoke 'adb push' in parallel."""
return self._ExecParallel(["push", local, remote])
def PushString(self, contents, remote):
"""Push a given string to a given path on the device in parallel."""
local = self._CreateLocalFile()
with file(local, "w") as f:
f.write(contents)
return self.Push(local, remote)
def Pull(self, remote):
"""Invoke 'adb pull'.
Args:
remote: The path to the remote file to pull.
Returns:
The contents of a file or None if the file didn't exist.
"""
local = self._CreateLocalFile()
try:
self._Exec(["pull", remote, local])
with file(local) as f:
return f.read()
except (AdbError, IOError):
return None
def InstallMultiple(self, apk, pkg=None):
"""Invoke 'adb install-multiple'."""
pkg_args = ["-p", pkg] if pkg else []
ret, stdout, stderr, args = self._Exec(
["install-multiple", "-r"] + pkg_args + [apk])
if "Success" not in stderr and "Success" not in stdout:
raise AdbError(args, ret, stdout, stderr)
def Install(self, apk):
"""Invoke 'adb install'."""
ret, stdout, stderr, args = self._Exec(["install", "-r", apk])
# adb install could fail with a message on stdout like this:
#
# pkg: /data/local/tmp/Gmail_dev_sharded_incremental.apk
# Failure [INSTALL_PARSE_FAILED_INCONSISTENT_CERTIFICATES]
#
# and yet it will still have a return code of 0. At least for the install
# command, it will print "Success" if it succeeded, so check for that in
# standard out instead of relying on the return code.
if "Success" not in stderr and "Success" not in stdout:
raise AdbError(args, ret, stdout, stderr)
def Uninstall(self, pkg):
"""Invoke 'adb uninstall'."""
self._Exec(["uninstall", pkg])
# No error checking. If this fails, we assume that the app was not installed
# in the first place.
def Delete(self, remote):
"""Delete the given file (or directory) on the device."""
self.DeleteMultiple([remote])
def DeleteMultiple(self, remote_files):
"""Delete the given files (or directories) on the device."""
files_str = " ".join(remote_files)
if files_str:
self._Shell("rm -fr %s" % files_str)
def Mkdir(self, d):
"""Invokes mkdir with the specified directory on the device."""
self._Shell("mkdir -p %s" % d)
def StopApp(self, package):
"""Force stops the app with the given package."""
self._Shell("am force-stop %s" % package)
def StopAppAndSaveState(self, package):
"""Stops the app with the given package, saving state for the next run."""
# 'am kill' will only kill processes in the background, so we must make sure
# our process is in the background first. We accomplish this by bringing up
# the app switcher.
self._Shell("input keyevent KEYCODE_APP_SWITCH")
self._Shell("am kill %s" % package)
def StartApp(self, package):
"""Starts the app with the given package."""
self._Shell("monkey -p %s -c android.intent.category.LAUNCHER 1" % package)
def _Shell(self, cmd):
"""Invoke 'adb shell'."""
return self._Exec(["shell", cmd])
ManifestEntry = collections.namedtuple(
"ManifestEntry", ["input_file", "zippath", "installpath", "sha256"])
def ParseManifest(contents):
"""Parses a dexmanifest file.
Args:
contents: the contents of the manifest file to be parsed.
Returns:
A dict of install path -> ManifestEntry.
"""
result = {}
for l in contents.split("\n"):
entry = ManifestEntry(*(l.strip().split(" ")))
result[entry.installpath] = entry
return result
def GetAppPackage(stub_datafile):
"""Returns the app package specified in a stub data file."""
with file(stub_datafile) as f:
return f.readlines()[1].strip()
def UploadDexes(adb, execroot, app_dir, temp_dir, dexmanifest, full_install):
"""Uploads dexes to the device so that the state.
Does the minimum amount of work necessary to make the state of the device
consistent with what was built.
Args:
adb: the Adb instance representing the device to install to
execroot: the execroot
app_dir: the directory things should be installed under on the device
temp_dir: a local temporary directory
dexmanifest: contents of the dex manifest
full_install: whether to do a full install
Returns:
None.
"""
# Fetch the manifest on the device
dex_dir = os.path.join(app_dir, "dex")
adb.Mkdir(dex_dir)
old_manifest = None
if not full_install:
logging.info("Fetching dex manifest from device...")
old_manifest_contents = adb.Pull("%s/manifest" % dex_dir)
if old_manifest_contents:
old_manifest = ParseManifest(old_manifest_contents)
else:
logging.info("Dex manifest not found on device")
if old_manifest is None:
# If the manifest is not found, maybe a previous installation attempt
# was interrupted. Wipe the slate clean. Do this also in case we do a full
# installation.
old_manifest = {}
adb.Delete("%s/*" % dex_dir)
new_manifest = ParseManifest(dexmanifest)
dexes_to_delete = set(old_manifest) - set(new_manifest)
# Figure out which dexes to upload: those that are present in the new manifest
# but not in the old one and those whose checksum was changed
common_dexes = set(new_manifest).intersection(old_manifest)
dexes_to_upload = set(d for d in common_dexes
if new_manifest[d].sha256 != old_manifest[d].sha256)
dexes_to_upload.update(set(new_manifest) - set(old_manifest))
if not dexes_to_delete and not dexes_to_upload:
# If we have nothing to do, don't bother removing and rewriting the manifest
logging.info("Application dexes up-to-date")
return
# Delete the manifest so that we know how to get back to a consistent state
# if we are interrupted.
adb.Delete("%s/manifest" % dex_dir)
# Tuple of (local, remote) files to push to the device.
files_to_push = []
# Sort dexes to be uploaded by the zip file they are in so that we only need
# to open each zip only once.
dexzips_in_upload = set(new_manifest[d].input_file for d in dexes_to_upload
if new_manifest[d].zippath != "-")
for i, dexzip_name in enumerate(dexzips_in_upload):
zip_dexes = [
d for d in dexes_to_upload if new_manifest[d].input_file == dexzip_name]
dexzip_tempdir = os.path.join(temp_dir, "dex", str(i))
with zipfile.ZipFile(os.path.join(execroot, dexzip_name)) as dexzip:
for dex in zip_dexes:
zippath = new_manifest[dex].zippath
dexzip.extract(zippath, dexzip_tempdir)
files_to_push.append(
(os.path.join(dexzip_tempdir, zippath), "%s/%s" % (dex_dir, dex)))
# Now gather all the dexes that are not within a .zip file.
dexes_to_upload = set(
d for d in dexes_to_upload if new_manifest[d].zippath == "-")
for dex in dexes_to_upload:
files_to_push.append(
(new_manifest[dex].input_file, "%s/%s" % (dex_dir, dex)))
num_files = len(dexes_to_delete) + len(files_to_push)
logging.info("Updating %d dex%s...", num_files, "es" if num_files > 1 else "")
# Delete the dexes that are not in the new manifest
adb.DeleteMultiple(os.path.join(dex_dir, dex) for dex in dexes_to_delete)
# Upload all the files.
upload_walltime_start = time.time()
fs = [adb.Push(local, remote) for local, remote in files_to_push]
done, not_done = futures.wait(fs, return_when=futures.FIRST_EXCEPTION)
upload_walltime = time.time() - upload_walltime_start
logging.debug("Dex upload walltime: %s seconds", upload_walltime)
# If there is anything in not_done, then some adb call failed and we
# can cancel the rest.
if not_done:
for f in not_done:
f.cancel()
# If any adb call resulted in an exception, re-raise it.
for f in done:
f.result()
# If no dex upload failed, upload the manifest. If any upload failed, the
# exception should have been re-raised above.
# Call result() to raise the exception if there was one.
adb.PushString(dexmanifest, "%s/manifest" % dex_dir).result()
def Checksum(filename):
"""Compute the SHA-256 checksum of a file."""
h = hashlib.sha256()
with file(filename, "r") as f:
while True:
data = f.read(65536)
if not data:
break
h.update(data)
return h.hexdigest()
def UploadResources(adb, resource_apk, app_dir):
"""Uploads resources to the device.
Args:
adb: The Adb instance representing the device to install to.
resource_apk: Path to the resource apk.
app_dir: The directory things should be installed under on the device.
Returns:
None.
"""
# Compute the checksum of the new resources file
new_checksum = Checksum(resource_apk)
# Fetch the checksum of the resources file on the device, if it exists
device_checksum_file = "%s/%s" % (app_dir, "resources_checksum")
old_checksum = adb.Pull(device_checksum_file)
if old_checksum == new_checksum:
logging.info("Application resources up-to-date")
return
logging.info("Updating application resources...")
# Remove the checksum file on the device so that if the transfer is
# interrupted, we know how to get the device back to a consistent state.
adb.Delete(device_checksum_file)
adb.Push(resource_apk, "%s/%s" % (app_dir, "resources.ap_")).result()
# Write the new checksum to the device.
adb.PushString(new_checksum, device_checksum_file).result()
def ConvertNativeLibs(args):
"""Converts the --native_libs command line argument to an arch -> libs map."""
native_libs = {}
if args is not None:
for native_lib in args:
abi, path = native_lib.split(":")
if abi not in native_libs:
native_libs[abi] = set()
native_libs[abi].add(path)
return native_libs
def FindAbi(device_abi, app_abis):
"""Selects which ABI native libs should be installed for."""
if device_abi in app_abis:
return device_abi
if device_abi in COMPATIBLE_ABIS:
for abi in COMPATIBLE_ABIS[device_abi]:
if abi in app_abis:
logging.warn("App does not have native libs for ABI '%s'. Using ABI "
"'%s'.", device_abi, abi)
return abi
logging.warn("No native libs for device ABI '%s'. App has native libs for "
"ABIs: %s", device_abi, ", ".join(app_abis))
return None
def UploadNativeLibs(adb, native_lib_args, app_dir, full_install):
"""Uploads native libraries to the device."""
native_libs = ConvertNativeLibs(native_lib_args)
libs = set()
if native_libs:
abi = FindAbi(adb.GetAbi(), native_libs.keys())
if abi:
libs = native_libs[abi]
basename_to_path = {}
install_checksums = {}
for lib in sorted(libs):
install_checksums[os.path.basename(lib)] = Checksum(lib)
basename_to_path[os.path.basename(lib)] = lib
device_manifest = None
if not full_install:
device_manifest = adb.Pull("%s/native/native_manifest" % app_dir)
device_checksums = {}
if device_manifest is None:
# If we couldn't fetch the device manifest or if this is a non-incremental
# install, wipe the slate clean
adb.Delete("%s/native" % app_dir)
else:
# Otherwise, parse the manifest. Note that this branch is also taken if the
# manifest is empty.
for manifest_line in device_manifest.split("\n"):
if manifest_line:
name, checksum = manifest_line.split(" ")
device_checksums[name] = checksum
libs_to_delete = set(device_checksums) - set(install_checksums)
libs_to_upload = set(install_checksums) - set(device_checksums)
common_libs = set(install_checksums).intersection(set(device_checksums))
libs_to_upload.update([l for l in common_libs
if install_checksums[l] != device_checksums[l]])
libs_to_push = [(basename_to_path[lib], "%s/native/%s" % (app_dir, lib))
for lib in libs_to_upload]
if not libs_to_delete and not libs_to_push and device_manifest is not None:
logging.info("Native libs up-to-date")
return
num_files = len(libs_to_delete) + len(libs_to_push)
logging.info("Updating %d native lib%s...",
num_files, "s" if num_files != 1 else "")
adb.Delete("%s/native/native_manifest" % app_dir)
if libs_to_delete:
adb.DeleteMultiple([
"%s/native/%s" % (app_dir, lib) for lib in libs_to_delete])
upload_walltime_start = time.time()
fs = [adb.Push(local, remote) for local, remote in libs_to_push]
done, not_done = futures.wait(fs, return_when=futures.FIRST_EXCEPTION)
upload_walltime = time.time() - upload_walltime_start
logging.debug("Native library upload walltime: %s seconds", upload_walltime)
# If there is anything in not_done, then some adb call failed and we
# can cancel the rest.
if not_done:
for f in not_done:
f.cancel()
# If any adb call resulted in an exception, re-raise it.
for f in done:
f.result()
install_manifest = [
name + " " + checksum for name, checksum in install_checksums.iteritems()]
adb.PushString("\n".join(install_manifest),
"%s/native/native_manifest" % app_dir).result()
def VerifyInstallTimestamp(adb, app_package):
"""Verifies that the app is unchanged since the last mobile-install."""
expected_timestamp = adb.Pull("%s/%s/install_timestamp" % (
DEVICE_DIRECTORY, app_package))
if not expected_timestamp:
raise TimestampException(
"Cannot verify last mobile install. At least one non-incremental "
"'mobile-install' must precede incremental installs")
actual_timestamp = adb.GetInstallTime(app_package)
if actual_timestamp != expected_timestamp:
raise TimestampException("Installed app '%s' has an unexpected timestamp. "
"Did you last install the app in a way other than "
"'mobile-install'?" % app_package)
def SplitIncrementalInstall(adb, app_package, execroot, split_main_apk,
split_apks):
"""Does incremental installation using split packages."""
app_dir = os.path.join(DEVICE_DIRECTORY, app_package)
device_manifest_path = "%s/split_manifest" % app_dir
device_manifest = adb.Pull(device_manifest_path)
expected_timestamp = adb.Pull("%s/install_timestamp" % app_dir)
actual_timestamp = adb.GetInstallTime(app_package)
device_checksums = {}
if device_manifest is not None:
for manifest_line in device_manifest.split("\n"):
if manifest_line:
name, checksum = manifest_line.split(" ")
device_checksums[name] = checksum
install_checksums = {}
install_checksums["__MAIN__"] = Checksum(
os.path.join(execroot, split_main_apk))
for apk in split_apks:
install_checksums[apk] = Checksum(os.path.join(execroot, apk))
reinstall_main = False
if (device_manifest is None or actual_timestamp is None or
actual_timestamp != expected_timestamp or
install_checksums["__MAIN__"] != device_checksums["__MAIN__"] or
set(device_checksums.keys()) != set(install_checksums.keys())):
# The main app is not up to date or not present or something happened
# with the on-device manifest. Start from scratch. Notably, we cannot
# uninstall a split package, so if the set of packages changes, we also
# need to do a full reinstall.
reinstall_main = True
device_checksums = {}
apks_to_update = [
apk for apk in split_apks if
apk not in device_checksums or
device_checksums[apk] != install_checksums[apk]]
if not apks_to_update and not reinstall_main:
# Nothing to do
return
# Delete the device manifest so that if something goes wrong, we do a full
# reinstall next time
adb.Delete(device_manifest_path)
if reinstall_main:
logging.info("Installing main APK...")
adb.Uninstall(app_package)
adb.InstallMultiple(os.path.join(execroot, split_main_apk))
adb.PushString(
adb.GetInstallTime(app_package),
"%s/install_timestamp" % app_dir).result()
logging.info("Reinstalling %s APKs...", len(apks_to_update))
for apk in apks_to_update:
adb.InstallMultiple(os.path.join(execroot, apk), app_package)
install_manifest = [
name + " " + checksum for name, checksum in install_checksums.iteritems()]
adb.PushString("\n".join(install_manifest),
"%s/split_manifest" % app_dir).result()
def IncrementalInstall(adb_path, execroot, stub_datafile, output_marker,
adb_jobs, start_type, dexmanifest=None, apk=None,
native_libs=None, resource_apk=None,
split_main_apk=None, split_apks=None,
user_home_dir=None):
"""Performs an incremental install.
Args:
adb_path: Path to the adb executable.
execroot: Exec root.
stub_datafile: The stub datafile containing the app's package name.
output_marker: Path to the output marker file.
adb_jobs: The number of instances of adb to use in parallel.
start_type: A string describing whether/how to start the app after
installing it. Can be 'no', 'cold', or 'warm'.
dexmanifest: Path to the .dex manifest file.
apk: Path to the .apk file. May be None to perform an incremental install.
native_libs: Native libraries to install.
resource_apk: Path to the apk containing the app's resources.
split_main_apk: the split main .apk if split installation is desired.
split_apks: the list of split .apks to be installed.
user_home_dir: Path to the user's home directory.
"""
temp_dir = tempfile.mkdtemp()
try:
adb = Adb(adb_path, temp_dir, adb_jobs, user_home_dir)
app_package = GetAppPackage(os.path.join(execroot, stub_datafile))
app_dir = os.path.join(DEVICE_DIRECTORY, app_package)
if split_main_apk:
SplitIncrementalInstall(adb, app_package, execroot, split_main_apk,
split_apks)
else:
if not apk:
VerifyInstallTimestamp(adb, app_package)
with file(os.path.join(execroot, dexmanifest)) as f:
dexmanifest = f.read()
UploadDexes(adb, execroot, app_dir, temp_dir, dexmanifest, bool(apk))
# TODO(ahumesky): UploadDexes waits for all the dexes to be uploaded, and
# then UploadResources is called. We could instead enqueue everything
# onto the threadpool so that uploading resources happens sooner.
UploadResources(adb, os.path.join(execroot, resource_apk), app_dir)
UploadNativeLibs(adb, native_libs, app_dir, bool(apk))
if apk:
apk_path = os.path.join(execroot, apk)
adb.Install(apk_path)
future = adb.PushString(
adb.GetInstallTime(app_package),
"%s/%s/install_timestamp" % (DEVICE_DIRECTORY, app_package))
future.result()
else:
if start_type == "warm":
adb.StopAppAndSaveState(app_package)
else:
adb.StopApp(app_package)
if start_type in ["cold", "warm"]:
logging.info("Starting application %s", app_package)
adb.StartApp(app_package)
with file(output_marker, "w") as _:
pass
except DeviceNotFoundError:
sys.exit("Error: Device not found")
except DeviceUnauthorizedError:
sys.exit("Error: Device unauthorized. Please check the confirmation "
"dialog on your device.")
except MultipleDevicesError as e:
sys.exit(
"Error: " + e.message + "\nTry specifying a device serial with " +
"\"blaze mobile-install --adb_arg=-s --adb_arg=$ANDROID_SERIAL\"")
except TimestampException as e:
sys.exit("Error:\n%s" % e.message)
except AdbError as e:
sys.exit("Error:\n%s" % e.message)
finally:
shutil.rmtree(temp_dir, True)
def main():
if FLAGS.verbosity == "1":
level = logging.DEBUG
fmt = "%(levelname)-5s %(asctime)s %(module)s:%(lineno)3d] %(message)s"
else:
level = logging.INFO
fmt = "%(message)s"
logging.basicConfig(stream=sys.stdout, level=level, format=fmt)
start_type = FLAGS.start
if FLAGS.start_app and start_type == "no":
start_type = "cold"
IncrementalInstall(
adb_path=FLAGS.adb,
adb_jobs=FLAGS.adb_jobs,
execroot=FLAGS.execroot,
stub_datafile=FLAGS.stub_datafile,
output_marker=FLAGS.output_marker,
start_type=start_type,
native_libs=FLAGS.native_lib,
split_main_apk=FLAGS.split_main_apk,
split_apks=FLAGS.split_apk,
dexmanifest=FLAGS.dexmanifest,
apk=FLAGS.apk,
resource_apk=FLAGS.resource_apk,
user_home_dir=FLAGS.user_home_dir)
if __name__ == "__main__":
FLAGS(sys.argv)
# process any additional flags in --flagfile
if FLAGS.flagfile:
with open(FLAGS.flagfile) as flagsfile:
FLAGS.Reset()
FLAGS(sys.argv + [line.strip() for line in flagsfile.readlines()])
main()
| dhootha/bazel | tools/android/incremental_install.py | Python | apache-2.0 | 27,838 |
from django.contrib.gis.db import models
from django.contrib import admin
# Create your models here.
class Position(models.Model):
point = models.PointField()
created_date = models.DateTimeField(auto_now=False)
class Location(models.Model):
current_position = models.ForeignKey(to=Position)
last_change = models.DateTimeField(auto_now=False)
class Meta:
abstract = True
admin.site.register(Position)
| JuloWaks/pyar_map | members_map/django_located/models.py | Python | mit | 436 |
from runtests.mpi import MPITest
from nbodykit.lab import *
from nbodykit import setup_logging
from numpy.testing import assert_allclose, assert_array_equal
import os
setup_logging("debug")
# The test result data (threeptcf_sim_result.dat) is computed with
# Daniel Eisenstein's
# C++ implementation on the same input data set for poles up to l=11;
# We shall agree with it to high precision.
#
# If we need to reproduced these files:
# Nick Hand sent the code and instructions to Yu Feng on Aug-20-2018.
data_dir = os.path.join(os.path.split(os.path.abspath(__file__))[0], 'data')
@MPITest([4])
def test_sim_threeptcf(comm):
import tempfile
BoxSize = 400.0
# load the test data
filename = os.path.join(data_dir, 'threeptcf_sim_data.dat')
cat = CSVCatalog(filename, names=['x', 'y', 'z', 'w'], comm=comm)
cat['Position'] = transform.StackColumns(cat['x'], cat['y'], cat['z'])
cat['Position'] *= BoxSize
# r binning
nbins = 8
edges = numpy.linspace(0, 200.0, nbins+1)
# run the algorithm
ells = list(range(0, 11))
r = SimulationBox3PCF(cat, ells, edges, BoxSize=BoxSize, weight='w')
# load the result from file
truth = numpy.empty((8,8,11))
with open(os.path.join(data_dir, 'threeptcf_sim_result.dat'), 'r') as ff:
for line in ff:
fields = line.split()
i, j = int(fields[0]), int(fields[1])
truth[i,j,:] = list(map(float, fields[2:]))
truth[j,i,:] = truth[i,j,:]
# test equality
for i, ell in enumerate(ells):
x = r.poles['corr_%d' %ell]
assert_allclose(x * (4*numpy.pi)**2 / (2*ell+1), truth[...,i], rtol=1e-3, err_msg='mismatch for ell=%d' %ell)
# save to temp file
filename = 'test-threept-cf.json'
r.save(filename)
r2 = SimulationBox3PCF.load(filename, comm=comm)
assert_array_equal(r.poles.data, r2.poles.data)
if comm.rank == 0:
os.remove(filename)
@MPITest([4])
def test_survey_threeptcf(comm):
import tempfile
BoxSize = 400.0
cosmo = cosmology.Planck15
# load the test data
filename = os.path.join(data_dir, 'threeptcf_sim_data.dat')
cat = CSVCatalog(filename, names=['x', 'y', 'z', 'w'], comm=comm)
cat['Position'] = transform.StackColumns(cat['x'], cat['y'], cat['z'])
cat['Position'] *= BoxSize
# place observer at center of box
cat['Position'] -= 0.5*BoxSize
# transform to RA/DEC/Z
# NOTE: we update Position here to get exact same Position as SurveyData3PCF
cat['RA'], cat['DEC'], cat['Z'] = transform.CartesianToSky(cat['Position'], cosmo)
cat['Position'] = transform.SkyToCartesian(cat['RA'], cat['DEC'], cat['Z'], cosmo)
# r binning
nbins = 8
edges = numpy.linspace(0, 200.0, nbins+1)
# run the reference (non-periodic simulation box)
ells = list(range(0, 11))
ref = SimulationBox3PCF(cat, ells, edges, BoxSize=BoxSize, weight='w', periodic=False)
# run the algorithm to test
r = SurveyData3PCF(cat, ells, edges, cosmo, weight='w', ra='RA', dec='DEC', redshift='Z')
# test equality between survey result and reference
for i, ell in enumerate(ells):
a = r.poles['corr_%d' %ell]
b = ref.poles['corr_%d' %ell]
assert_allclose(a, b, rtol=1e-5, err_msg='mismatch for ell=%d' %ell)
# save to temp file
filename = 'test-threept-cf.json'
r.save(filename)
r2 = SurveyData3PCF.load(filename, comm=comm)
assert_array_equal(r.poles.data, r2.poles.data)
if comm.rank == 0:
os.remove(filename)
@MPITest([1])
def test_sim_threeptcf_pedantic(comm):
BoxSize = 400.0
# load the test data
filename = os.path.join(data_dir, 'threeptcf_sim_data.dat')
cat = CSVCatalog(filename, names=['x', 'y', 'z', 'w'], comm=comm)
cat['Position'] = transform.StackColumns(cat['x'], cat['y'], cat['z'])
cat['Position'] *= BoxSize
cat = cat[::20]
# r binning
nbins = 8
edges = numpy.linspace(0, 200.0, nbins+1)
# run the algorithm
ells = [0, 2, 4, 8]
r = SimulationBox3PCF(cat, ells, edges, BoxSize=BoxSize, weight='w')
p_fast = r.run()
p_pedantic = r.run(pedantic=True)
# test equality
for i, ell in enumerate(sorted(ells)):
x1 = p_fast['corr_%d' %ell]
x2 = p_pedantic['corr_%d' %ell]
assert_allclose(x1, x2)
@MPITest([1])
def test_sim_threeptcf_shuffled(comm):
BoxSize = 400.0
# load the test data
filename = os.path.join(data_dir, 'threeptcf_sim_data.dat')
cat = CSVCatalog(filename, names=['x', 'y', 'z', 'w'], comm=comm)
cat['Position'] = transform.StackColumns(cat['x'], cat['y'], cat['z'])
cat['Position'] *= BoxSize
cat = cat
# r binning
nbins = 8
edges = numpy.linspace(0, 200.0, nbins+1)
# run the algorithm
ells = list(range(0, 2))[::-1]
r = SimulationBox3PCF(cat, ells, edges, BoxSize=BoxSize, weight='w')
# load the result from file
truth = numpy.empty((8,8,11))
with open(os.path.join(data_dir, 'threeptcf_sim_result.dat'), 'r') as ff:
for line in ff:
fields = line.split()
i, j = int(fields[0]), int(fields[1])
truth[i,j,:] = list(map(float, fields[2:]))
truth[j,i,:] = truth[i,j,:]
# test equality
for i, ell in enumerate(sorted(ells)):
x = r.poles['corr_%d' %ell]
assert_allclose(x * (4*numpy.pi)**2 / (2*ell+1), truth[...,i], rtol=1e-3, err_msg='mismatch for ell=%d' %ell)
| bccp/nbodykit | nbodykit/algorithms/tests/test_threeptcf.py | Python | gpl-3.0 | 5,475 |
#!/usr/bin/env python
# A small Python utility for synchronizing a local folder with a remote web
# repository.
# The MIT License (MIT)
#
# Copyright (c) 2014-8 Roberto Reale
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import requests
import sys
import urllib.parse
import yaml
from alphabet import alphabet
class RequestsHandler(object):
""" HTTP/S requests handler """
def get(self, url):
response = requests.get(url, verify=self.verify_cert, proxies=self.proxies)
if response:
return response.content
else:
return None
def __init__(self, verify_cert=True, proxies=None):
self.verify_cert = verify_cert
self.proxies = proxies
class RemoteRepo(object):
""" The remote repository """
def get_file(self, url, ignore_base_url=False):
if ignore_base_url:
full_url = url
else:
full_url = urllib.parse.urljoin(self.base_url, url)
return self.requests_handler.get(full_url)
def __init__(self, base_url, requests_handler):
self.base_url = base_url
self.requests_handler = requests_handler
class RemoteDigestList(RemoteRepo):
""" The remote digest list """
def _retrieve_digest_list(self, list_url):
response = self.get_file(list_url, ignore_base_url=True)
if response:
return response.decode().split("\n")
else:
return None
def __init__(self, list_url, requests_handler, ignore_path=False):
RemoteRepo.__init__(self, None, requests_handler) # base_url = None
digest_list = self._retrieve_digest_list(list_url)
if digest_list:
self.digest_list = digest_list
self.ignore_path = ignore_path
self.current = 0
self.high = len(self.digest_list)
else:
# TODO add error handling
pass
def __iter__(self):
return self
def __next__(self):
if self.current >= self.high:
raise StopIteration
else:
self.current += 1
digest_item = self.digest_list[self.current - 1]
try:
[digest, full_path] = digest_item.split()
except ValueError:
return self.__next__() # skip line; TODO warn user
if self.ignore_path:
path = os.path.basename(full_path)
else:
path = full_path
return [path, digest]
class LocalCopy(object):
""" The local working copy """
to_get = []
def check_file(self, path, digest):
""" verifies a file's digest """
with open(path, "rb") as f:
contents = alphabet(f.read())
f.close()
return contents.check(digest)
return None
def compare(self):
local_files = []
for [path, digest] in self.remote_digest_list:
full_path = os.path.join(self.base_path, path)
if not os.path.exists(full_path):
self.to_get.append(path)
elif not self.check_file(full_path, digest):
self.to_get.append(path)
else:
print(("file %s is ok" % path))
def write_file(self, name, contents):
print(("writing file %s" % name))
with open (os.path.join(self.base_path, name), "wb+") as f:
f.write(contents)
def sync(self):
for file in self.to_get:
print(("getting file %s" % file))
self.write_file(file, self.remote.get_file(file))
def __init__(self, base_path, remote, remote_digest_list):
self.base_path = base_path
self.remote = remote
self.remote_digest_list = remote_digest_list
class Wsync(object):
""" Wsync object """
def sync(self):
requests_handlers = RequestsHandler(self.verify_cert, self.proxies)
if not requests_handlers:
print("Cannot instantiate requests handler", file=sys.stderr)
sys.exit(1)
remote_repo = RemoteRepo(self.remote_repo_url, requests_handlers)
remote_digest_list = RemoteDigestList(self.digest_list_url,
requests_handlers,
ignore_path=True)
local = LocalCopy(self.local_copy_path, remote_repo, remote_digest_list)
local.compare()
local.sync()
def set_local_copy_path(self, value):
self.local_copy_path = value
def set_digest_list_url(self, value):
self.digest_list_url = value
def set_remote_repo_url(self, value):
self.remote_repo_url = value
def set_verify_cert(self, value):
self.verify_cert = value
def add_proxy(self, schema, url):
self.proxies[schema] = url
def set_proxies(self, proxies):
for schema, url in proxies:
self.proxies[schema] = url
def __init__(self, digest_list_url=None, remote_repo_url=None, local_copy_path=None):
self.digest_list_url = digest_list_url
self.remote_repo_url = remote_repo_url
if local_copy_path is None:
local_copy_path = os.getcwd()
self.local_copy_path = local_copy_path
self.verify_cert = False
self.proxies = dict()
class WsyncByConfigFile(Wsync):
""" Wsync object, defined by config file """
def search_config(self, config_file):
if not config_file:
return None
# first try user-defined
user_home = os.path.expanduser("~")
path = os.path.join(user_home, ".wsync", config_file)
if os.path.exists(path):
return path
# then try system-wide
path = os.path.join("/etc/wsync", config_file)
if os.path.exists(path):
return path
def load_config(self, config_file):
path = self.search_config(config_file)
if not path:
return False
with open(path, "r") as f:
config = yaml.load(f)
f.close()
for key, value in config.items():
if key == "digest_list_url":
self.set_digest_list_url(value)
elif key == "remote_repo_url":
self.set_remote_repo_url(value)
elif key == "local_copy_path":
self.set_local_copy_path(value)
elif key == "verify_cert":
self.set_verify_cert(value)
elif key == "http_proxy":
self.add_proxy("http", value)
elif key == "https_proxy":
self.add_proxy("https", value)
return True
def __init__(self):
Wsync.__init__(self)
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
| roberto-reale/wsync | wsync/wsync.py | Python | mit | 7,836 |
__all__ = ['bob','fpmatch','utils']
from bob import Rule | robochat/buildbit | buildbit/__init__.py | Python | gpl-3.0 | 58 |
#!/usr/bin/env python
"""Trivial example program from the Spyse tutorial"""
from spyse.app.app import App
App()
| davidko/evolspyse | tut/trivial.py | Python | lgpl-2.1 | 116 |
# Test AXScripting the best we can in an automated fashion...
import win32api, os, sys
import win32com.axscript
import win32com.axscript.client
import unittest
import win32com.test.util
verbose = "-v" in sys.argv
class AXScript(win32com.test.util.TestCase):
def setUp(self):
file = win32api.GetFullPathName(os.path.join(win32com.axscript.client.__path__[0], "pyscript.py"))
from win32com.test.util import RegisterPythonServer
self.verbose = verbose
RegisterPythonServer(file, 'python', verbose=self.verbose)
def testHost(self):
file = win32api.GetFullPathName(os.path.join(win32com.axscript.__path__[0], "test\\testHost.py"))
cmd = '%s "%s"' % (win32api.GetModuleFileName(0), file)
if verbose:
print("Testing Python Scripting host")
win32com.test.util.ExecuteShellCommand(cmd, self)
def testCScript(self):
file = win32api.GetFullPathName(os.path.join(win32com.axscript.__path__[0], "Demos\\Client\\wsh\\test.pys"))
cmd = 'cscript.exe "%s"' % (file)
if verbose:
print("Testing Windows Scripting host with Python script")
win32com.test.util.ExecuteShellCommand(cmd, self)
if __name__=='__main__':
win32com.test.util.testmain()
| sserrot/champion_relationships | venv/Lib/site-packages/win32com/test/testAXScript.py | Python | mit | 1,268 |
import socket
import threading
import time
import struct
import Queue
queue = Queue.Queue()
def udp_sender(ip,port):
'''
send udp package
'''
try:
ADDR = (ip,port)
sock_udp = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
sock_udp.sendto("abcd...",ADDR)
sock_udp.close()
except:
pass
def icmp_receiver(ip,port):
'''
receive icmp package,Determine the udp port state
'''
icmp = socket.getprotobyname("icmp")
try:
sock_icmp = socket.socket(socket.AF_INET, socket.SOCK_RAW, icmp)
except socket.error, (errno, msg):
if errno == 1:
# Operation not permitted
msg = msg + (
" - Note that ICMP messages can only be sent from processes"
" running as root."
)
raise socket.error(msg)
raise # raise the original error
sock_icmp.settimeout(3)
try:
recPacket,addr = sock_icmp.recvfrom(64)
except:
queue.put(True)
return
icmpHeader = recPacket[20:28]
icmpPort = int(recPacket.encode('hex')[100:104],16)
head_type, code, checksum, packetID, sequence = struct.unpack(
"bbHHh", icmpHeader
)
sock_icmp.close()
if code == 3 and icmpPort == port and addr[0] == ip:
queue.put(False)
return
def check_udp(ip,port):
'''
Check the udp port state,
If the port is open,return True
If the port is down,resturn False
'''
thread_udp = threading.Thread(target=udp_sender,args=(ip,port))
thread_icmp = threading.Thread(target=icmp_receiver,args=(ip,port))
thread_udp.daemon= True
thread_icmp.daemon = True
thread_icmp.start()
time.sleep(0.1)
thread_udp.start()
thread_icmp.join()
thread_udp.join()
return queue.get()
def check_tcp(host,port):
'''
Check the tcp port state,
If the port is open,return True
If the port is down,resturn False
'''
sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
try:
sock.connect((host,port))
sock.close()
return True
except Exception:
return False
| spunkzwy/learn_python | monit/monit_port.py | Python | gpl-3.0 | 2,261 |
from .Drive import Drive
from .Item import Item | stanionascu/py1drive | py1drive/OneDrive/resources/__init__.py | Python | mit | 47 |
# Lint as: python2, python3
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from models.keras import model
class ModelTest(tf.test.TestCase):
def testBuildKerasModel(self):
pass
built_model = model._build_keras_model(
hidden_units=[1, 1], learning_rate=0.1) # pylint: disable=protoected-access
self.assertEqual(len(built_model.layers), 10)
built_model = model._build_keras_model(hidden_units=[1], learning_rate=0.1) # pylint: disable=protected-access
self.assertEqual(len(built_model.layers), 9)
if __name__ == '__main__':
tf.test.main()
| GoogleCloudPlatform/mlops-on-gcp | immersion/guided_projects/guided_project_2_solution/models/keras/model_test.py | Python | apache-2.0 | 1,262 |
# -*- coding: utf8 -*-
SQL = (
('list_fonds_report1', """
select
F.FKOD,F.FNAME, (F.A16+if(F.A22,A22,0)) as A16
FROM
`af3_fond` F
WHERE
FNAME like ('%%%(qr)s%%') or A1 like ('%%%(qr)s%%')
ORDER BY FKOD;"""),
)
FOUND_ROWS = True
ROOT = "fonds"
ROOT_PREFIX = None
ROOT_POSTFIX= None
XSL_TEMPLATE = "data/af-web.xsl"
EVENT = None
WHERE = ()
PARAM = ("qr",)
TITLE="Поиск фондов"
MESSAGE="Нет результатов по вашему запросу, вернитесь назад"
ORDER = None
| ffsdmad/af-web | cgi-bin/plugins2/report/fond_search_report1.py | Python | gpl-3.0 | 547 |
import sys, inspect
import os
import unittest
from pytest import raises
import numpy as np
from cs207rbtree import RedBlackTree #import RBTree
from TimeseriesDB.generate_SMTimeseries import generate_time_series #script to generate time series
from Similarity.pick_vantage_points import pick_vantage_points #script to pick vantage pts
from Similarity.find_most_similar import find_most_similiar, sanity_check
from timeseries.SMTimeSeries import SMTimeSeries as ts #use SMTimeseries
from timeseries.ArrayTimeSeries import ArrayTimeSeries as arrayts
from Similarity import distances
global PATH
PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))+'/Similarity/'
class DataBase_tests(unittest.TestCase):
def setUp(self):
if os.path.isfile("/tmp/test.dbdb"):
os.remove("/tmp/test.dbdb")
self.db1 = RedBlackTree.connect("/tmp/test.dbdb")
self.db1.set(2,'database1')
self.db1.set(1,'database2')
self.db1.set(0.5,'database3')
self.db1.set(3.2222,'database4')
self.db1.set(4,'database5')
self.db1.set(5,'database6')
self.db1.set(200,'database7')
self.db1.commit()
self.db1.close()
self.sm = generate_time_series()
self.vp = np.array(pick_vantage_points(20,self.sm))
def tearDown(self):
del self.db1
del self.sm
del self.vp
def test_nodes_less_than(self):
"""tests the function that finds nodes than a certain value"""
db1 = RedBlackTree.connect("/tmp/test.dbdb")
assert (db1.get(3.2222) == 'database4')
assert (db1.get_nodes_less_than(2) == ['database3', 'database2', 'database1'])
assert (db1.get_nodes_less_than(4.5) == ['database3', 'database2', 'database1', 'database4', 'database5'])
assert (db1.get_nodes_less_than(0.5) == ['database3'])
assert (db1.get_nodes_less_than(0) == [])
db1.close()
def test_generate_time_series(self):
"""tests that the generate_time_series """
for i in range(1000):
ts1 = ts.from_db(self.sm,i)
def test_pick_vantage_points(self):
for i in self.vp:
db = RedBlackTree.connect(PATH+"VantagePointDatabases/"+str(i)+".dbdb")
db.close()
def test_find_most_similiar(self):
filename = 200
n = 20
ans = find_most_similiar(filename, n, self.vp, self.sm)
ans2 = sanity_check(filename,n,self.sm)
assert np.array_equal(ans,ans2)
filename = 932
n = 3
ans = find_most_similiar(filename, n, self.vp, self.sm)
ans2 = sanity_check(filename,n, self.sm)
assert np.array_equal(ans,ans2)
filename = 32
n = 5
ans = find_most_similiar(filename, n, self.vp, self.sm)
ans2 = sanity_check(filename,n, self.sm)
assert np.array_equal(ans,ans2)
#check to make sure we caught exception if an invalid id is passed
filename = 100000
n = 5
with raises(KeyError):
ans = find_most_similiar(filename, n, self.vp, self.sm)
def test_find_most_similiar_newTS(self):
filename = distances.tsmaker(100, 100, 1000)
n = 20
ans = find_most_similiar(filename, n, self.vp, self.sm)
ans2 = sanity_check(filename,n,self.sm)
assert np.array_equal(ans,ans2)
filename = distances.tsmaker(100, 100, 1000)
n = 3
ans = find_most_similiar(filename, n, self.vp, self.sm)
ans2 = sanity_check(filename,n, self.sm)
assert np.array_equal(ans,ans2)
if __name__=='__main__':
try: # pragma: no cover
unittest.main() # pragma: no cover
except SystemExit as inst: # pragma: no cover
if inst.args[0] is True: # pragma: no cover
raise # pragma: no cover | slac207/cs207project | tests/test_database.py | Python | mit | 4,014 |
"""add_modified_at_to_users_and_kernels
Revision ID: e35332f8d23d
Revises: da24ff520049
Create Date: 2020-07-01 14:02:11.022032
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
from sqlalchemy.sql.expression import bindparam
from ai.backend.manager.models.base import convention, IDColumn
# revision identifiers, used by Alembic.
revision = 'e35332f8d23d'
down_revision = 'da24ff520049'
branch_labels = None
depends_on = None
def upgrade():
metadata = sa.MetaData(naming_convention=convention)
# partial table to be preserved and referred
users = sa.Table(
'users', metadata,
IDColumn('uuid'),
sa.Column('created_at', sa.DateTime(timezone=True),
server_default=sa.func.now()),
sa.Column('modified_at', sa.DateTime(timezone=True),
server_default=sa.func.now(), onupdate=sa.func.current_timestamp()),
)
keypairs = sa.Table(
'keypairs', metadata,
sa.Column('access_key', sa.String(length=20), primary_key=True),
sa.Column('created_at', sa.DateTime(timezone=True),
server_default=sa.func.now()),
sa.Column('modified_at', sa.DateTime(timezone=True),
server_default=sa.func.now(), onupdate=sa.func.current_timestamp()),
)
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('keypairs', sa.Column('modified_at', sa.DateTime(timezone=True),
server_default=sa.text('now()'), nullable=True))
op.add_column('users', sa.Column('modified_at', sa.DateTime(timezone=True),
server_default=sa.text('now()'), nullable=True))
# ### end Alembic commands ###
conn = op.get_bind()
# Set user's modified_at with the value of created_at.
query = sa.select([users.c.uuid, users.c.created_at]).select_from(users)
updates = []
for row in conn.execute(query).fetchall():
updates.append({'b_uuid': row['uuid'], 'modified_at': row['created_at']})
if updates:
query = (sa.update(users)
.values(modified_at=bindparam('modified_at'))
.where(users.c.uuid == bindparam('b_uuid')))
conn.execute(query, updates)
# Set keypairs's modified_at with the value of created_at.
query = sa.select([keypairs.c.access_key, keypairs.c.created_at]).select_from(keypairs)
updates = []
for row in conn.execute(query).fetchall():
updates.append({'b_access_key': row['access_key'], 'modified_at': row['created_at']})
if updates:
query = (sa.update(keypairs)
.values(modified_at=bindparam('modified_at'))
.where(keypairs.c.access_key == bindparam('b_access_key')))
conn.execute(query, updates)
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('users', 'modified_at')
op.drop_column('keypairs', 'modified_at')
# ### end Alembic commands ###
| lablup/backend.ai-manager | src/ai/backend/manager/models/alembic/versions/e35332f8d23d_add_modified_at_to_users_and_kernels.py | Python | lgpl-3.0 | 3,049 |
#!/usr/bin/env python
import jsk_arc2017_common
graspability = jsk_arc2017_common.get_object_graspability()
for obj_id, obj in enumerate(graspability):
print('{:02}: {}'.format(obj_id+1, obj))
for style in graspability[obj]:
print(' {}: {}'.format(style, graspability[obj][style]))
| start-jsk/jsk_apc | jsk_arc2017_common/scripts/list_graspability.py | Python | bsd-3-clause | 303 |
# pattern seems to be multiplying every pair of digits from different numbers and adding them up
from itertools import product
def test_it(a, b):
return sum(int(d1)*int(d2) for d1,d2 in product(str(a), str(b)))
| SelvorWhim/competitive | Codewars/ThinkingTestingAB.py | Python | unlicense | 217 |
# Volatility
# Copyright (C) 2007-2013 Volatility Foundation
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
"""
@author: Andrew Case
@license: GNU General Public License 2.0
@contact: [email protected]
@organization:
"""
import volatility.commands as commands
import volatility.utils as utils
import volatility.debug as debug
import volatility.obj as obj
MAX_STRING_LENGTH = 256
nsecs_per = 1000000000
class vol_timespec:
def __init__(self, secs, nsecs):
self.tv_sec = secs
self.tv_nsec = nsecs
def set_plugin_members(obj_ref):
obj_ref.addr_space = utils.load_as(obj_ref._config)
if not obj_ref.is_valid_profile(obj_ref.addr_space.profile):
debug.error("This command does not support the selected profile.")
class AbstractLinuxCommand(commands.Command):
def __init__(self, *args, **kwargs):
self.addr_space = None
self.known_addrs = {}
commands.Command.__init__(self, *args, **kwargs)
@property
def profile(self):
if self.addr_space:
return self.addr_space.profile
return None
def execute(self, *args, **kwargs):
commands.Command.execute(self, *args, **kwargs)
@staticmethod
def is_valid_profile(profile):
return profile.metadata.get('os', 'Unknown').lower() == 'linux'
def is_known_address(self, addr, modules):
text = self.profile.get_symbol("_text")
etext = self.profile.get_symbol("_etext")
return (self.addr_space.address_compare(addr, text) != -1 and self.addr_space.address_compare(addr, etext) == -1) or self.address_in_module(addr, modules)
def address_in_module(self, addr, modules):
for (_, start, end) in modules:
if self.addr_space.address_compare(addr, start) != -1 and self.addr_space.address_compare(addr, end) == -1:
return True
return False
def verify_ops(self, ops, op_members, modules):
for check in op_members:
addr = ops.m(check)
if addr and addr != 0:
if addr in self.known_addrs:
known = self.known_addrs[addr]
else:
known = self.is_known_address(addr, modules)
self.known_addrs[addr] = known
if known == 0:
yield (check, addr)
class AbstractLinuxIntelCommand(AbstractLinuxCommand):
@staticmethod
def is_valid_profile(profile):
return AbstractLinuxCommand.is_valid_profile(profile) \
and (profile.metadata.get('arch').lower() == 'x86' \
or profile.metadata.get('arch').lower() == 'x64')
class AbstractLinuxARMCommand(AbstractLinuxCommand):
@staticmethod
def is_valid_profile(profile):
return AbstractLinuxCommand.is_valid_profile(profile) \
and (profile.metadata.get('arch').lower() == 'arm')
def walk_internal_list(struct_name, list_member, list_start, addr_space = None):
if not addr_space:
addr_space = list_start.obj_vm
while list_start:
list_struct = obj.Object(struct_name, vm = addr_space, offset = list_start.v())
yield list_struct
list_start = getattr(list_struct, list_member)
# based on __d_path
def do_get_path(rdentry, rmnt, dentry, vfsmnt):
ret_path = []
inode = dentry.d_inode
if not rdentry.is_valid() or not dentry.is_valid():
return []
while (dentry != rdentry or vfsmnt != rmnt) and dentry.d_name.name.is_valid():
dname = dentry.d_name.name.dereference_as("String", length = MAX_STRING_LENGTH)
ret_path.append(dname.strip('/'))
if dentry == vfsmnt.mnt_root or dentry == dentry.d_parent:
if vfsmnt.mnt_parent == vfsmnt.v():
break
dentry = vfsmnt.mnt_mountpoint
vfsmnt = vfsmnt.mnt_parent
continue
parent = dentry.d_parent
dentry = parent
ret_path.reverse()
if ret_path == []:
return []
ret_val = '/'.join([str(p) for p in ret_path if p != ""])
if ret_val.startswith(("socket:", "pipe:")):
if ret_val.find("]") == -1:
ret_val = ret_val[:-1] + ":[{0}]".format(inode.i_ino)
else:
ret_val = ret_val.replace("/", "")
elif ret_val != "inotify":
ret_val = '/' + ret_val
return ret_val
def get_path(task, filp):
rdentry = task.fs.get_root_dentry()
rmnt = task.fs.get_root_mnt()
dentry = filp.dentry
vfsmnt = filp.vfsmnt
return do_get_path(rdentry, rmnt, dentry, vfsmnt)
| andante20/volatility | volatility/plugins/linux/common.py | Python | gpl-2.0 | 5,223 |
# -*- encoding: utf-8 -*-
#
# Copyright 2014-2015 Openstack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import numpy
import pandas
import six
from gnocchi import aggregates
from oslo_utils import strutils
from pytimeparse import timeparse
class MovingAverage(aggregates.CustomAggregator):
@staticmethod
def check_window_valid(window):
"""Takes in the window parameter string, reformats as a float."""
if window is None:
msg = 'Moving aggregate must have window specified.'
raise aggregates.CustomAggFailure(msg)
try:
return float(timeparse.timeparse(six.text_type(window)))
except Exception:
raise aggregates.CustomAggFailure('Invalid value for window')
@staticmethod
def retrieve_data(storage_obj, metric, start, stop, window):
"""Retrieves finest-res data available from storage."""
all_data = storage_obj.get_measures(metric, start, stop)
try:
min_grain = min(set([row[1] for row in all_data if row[1] == 0
or window % row[1] == 0]))
except Exception:
msg = ("No data available that is either full-res or "
"of a granularity that factors into the window size "
"you specified.")
raise aggregates.CustomAggFailure(msg)
return min_grain, pandas.Series([r[2] for r in all_data
if r[1] == min_grain],
[r[0] for r in all_data
if r[1] == min_grain])
@staticmethod
def aggregate_data(data, func, window, min_grain, center=False,
min_size=1):
"""Calculates moving func of data with sampling width of window.
:param data: Series of timestamp, value pairs
:param func: the function to use when aggregating
:param window: (float) range of data to use in each aggregation.
:param min_grain: granularity of the data being passed in.
:param center: whether to index the aggregated values by the first
timestamp of the values picked up by the window or by the central
timestamp.
:param min_size: if the number of points in the window is less than
min_size, the aggregate is not computed and nan is returned for
that iteration.
"""
if center:
center = strutils.bool_from_string(center)
def moving_window(x):
msec = datetime.timedelta(milliseconds=1)
zero = datetime.timedelta(seconds=0)
half_span = datetime.timedelta(seconds=window / 2)
start = data.index[0]
stop = data.index[-1] + datetime.timedelta(seconds=min_grain)
# min_grain addition necessary since each bin of rolled-up data
# is indexed by leftmost timestamp of bin.
left = half_span if center else zero
right = 2 * half_span - left - msec
# msec subtraction is so we don't include right endpoint in slice.
if x - left >= start and x + right <= stop:
dslice = data[x - left: x + right]
if center and dslice.size % 2 == 0:
return func([func(data[x - msec - left: x - msec + right]),
func(data[x + msec - left: x + msec + right])
])
# (NOTE) atmalagon: the msec shift here is so that we have two
# consecutive windows; one centered at time x - msec,
# and one centered at time x + msec. We then average the
# aggregates from the two windows; this result is centered
# at time x. Doing this double average is a way to return a
# centered average indexed by a timestamp that existed in
# the input data (which wouldn't be the case for an even number
# of points if we did only one centered average).
else:
return numpy.nan
if dslice.size < min_size:
return numpy.nan
return func(dslice)
try:
result = pandas.Series(data.index).apply(moving_window)
# change from integer index to timestamp index
result.index = data.index
return [(t, window, r) for t, r
in six.iteritems(result[~result.isnull()])]
except Exception as e:
raise aggregates.CustomAggFailure(str(e))
def compute(self, storage_obj, metric, start, stop, window=None,
center=False):
"""Returns list of (timestamp, window, aggregated value) tuples.
:param storage_obj: a call is placed to the storage object to retrieve
the stored data.
:param metric: the metric
:param start: start timestamp
:param stop: stop timestamp
:param window: format string specifying the size over which to
aggregate the retrieved data
:param center: how to index the aggregated data (central timestamp or
leftmost timestamp)
"""
window = self.check_window_valid(window)
min_grain, data = self.retrieve_data(storage_obj, metric, start,
stop, window)
return self.aggregate_data(data, numpy.mean, window, min_grain, center,
min_size=1)
| idegtiarov/gnocchi-rep | gnocchi/aggregates/moving_stats.py | Python | apache-2.0 | 6,068 |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tensor utility functions.
@@assert_same_float_dtype
@@assert_scalar_int
@@convert_to_tensor_or_sparse_tensor
@@local_variable
@@reduce_sum_n
@@with_shape
@@with_same_shape
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
__all__ = [
'assert_same_float_dtype', 'assert_scalar_int',
'convert_to_tensor_or_sparse_tensor', 'local_variable', 'reduce_sum_n',
'with_shape', 'with_same_shape',
]
def _assert_same_base_type(items, expected_type=None):
"""Asserts all items are of the same base type.
Args:
items: List of graph items (e.g., `Variable`, `Tensor`, `SparseTensor`,
`Operation`, or `IndexedSlices`). Can include `None` elements, which
will be ignored.
expected_type: Expected type. If not specified, assert all items are
of the same base type.
Returns:
Validated type, or none if neither expected_type nor items provided.
Raises:
ValueError: If any types do not match.
"""
original_item_str = None
for item in items:
if item is not None:
item_type = item.dtype.base_dtype
if not expected_type:
expected_type = item_type
original_item_str = item.name if hasattr(item, 'name') else str(item)
elif expected_type != item_type:
raise ValueError('%s, type=%s, must be of the same type (%s)%s.' % (
item.name if hasattr(item, 'name') else str(item),
item_type, expected_type,
(' as %s' % original_item_str) if original_item_str else ''))
return expected_type
def assert_same_float_dtype(tensors=None, dtype=None):
"""Validate and return float type based on `tensors` and `dtype`.
For ops such as matrix multiplication, inputs and weights must be of the
same float type. This function validates that all `tensors` are the same type,
validates that type is `dtype` (if supplied), and returns the type. Type must
be `dtypes.float32` or `dtypes.float64`. If neither `tensors` nor
`dtype` is supplied, default to `dtypes.float32`.
Args:
tensors: Tensors of input values. Can include `None` elements, which will be
ignored.
dtype: Expected type.
Returns:
Validated type.
Raises:
ValueError: if neither `tensors` nor `dtype` is supplied, or result is not
float.
"""
if tensors:
dtype = _assert_same_base_type(tensors, dtype)
if not dtype:
dtype = dtypes.float32
elif not dtype.is_floating:
raise ValueError('Expected float, got %s.' % dtype)
return dtype
def assert_scalar_int(tensor):
"""Assert `tensor` is 0-D, of type `tf.int32` or `tf.int64`.
Args:
tensor: Tensor to test.
Returns:
`tensor`, for chaining.
Raises:
ValueError: if `tensor` is not 0-D, of type `tf.int32` or `tf.int64`.
"""
data_type = tensor.dtype
if data_type.base_dtype not in [dtypes.int32, dtypes.int64]:
raise ValueError('Unexpected type %s for %s.' % (data_type, tensor.name))
shape = tensor.get_shape()
if shape.ndims != 0:
raise ValueError('Unexpected shape %s for %s.' % (shape, tensor.name))
return tensor
# TODO(ptucker): Move to tf.variables?
def local_variable(initial_value, validate_shape=True, name=None):
"""Create variable and add it to `GraphKeys.LOCAL_VARIABLES` collection.
Args:
initial_value: See variables.Variable.__init__.
validate_shape: See variables.Variable.__init__.
name: See variables.Variable.__init__.
Returns:
New variable.
"""
return variables.Variable(
initial_value, trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
validate_shape=validate_shape, name=name)
def reduce_sum_n(tensors, name=None):
"""Reduce tensors to a scalar sum.
This reduces each tensor in `tensors` to a scalar via `tf.reduce_sum`, then
adds them via `tf.add_n`.
Args:
tensors: List of tensors, all of the same numeric type.
name: Tensor name, and scope for all other ops.
Returns:
Total loss tensor, or None if no losses have been configured.
Raises:
ValueError: if `losses` is missing or empty.
"""
if not tensors:
raise ValueError('No tensors provided.')
tensors = [math_ops.reduce_sum(t, name='%s/sum' % t.op.name) for t in tensors]
if len(tensors) == 1:
return tensors[0]
with ops.op_scope(tensors, name, 'reduce_sum_n') as scope:
return math_ops.add_n(tensors, name=scope)
def _all_equal(tensor0, tensor1):
with ops.op_scope([tensor0, tensor1], 'all_equal') as scope:
return math_ops.reduce_all(
math_ops.equal(tensor0, tensor1, name='equal'), name=scope)
def _is_rank(expected_rank, actual_tensor):
"""Returns whether actual_tensor's rank is expected_rank.
Args:
expected_rank: Integer defining the expected rank, or tensor of same.
actual_tensor: Tensor to test.
Returns:
New tensor.
"""
with ops.op_scope([actual_tensor], 'is_rank') as scope:
expected = ops.convert_to_tensor(expected_rank, name='expected')
actual = array_ops.rank(actual_tensor, name='actual')
return math_ops.equal(expected, actual, name=scope)
def _is_shape(expected_shape, actual_tensor, actual_shape=None):
"""Returns whether actual_tensor's shape is expected_shape.
Args:
expected_shape: Integer list defining the expected shape, or tensor of same.
actual_tensor: Tensor to test.
actual_shape: Shape of actual_tensor, if we already have it.
Returns:
New tensor.
"""
with ops.op_scope([actual_tensor], 'is_shape') as scope:
is_rank = _is_rank(array_ops.size(expected_shape), actual_tensor)
if actual_shape is None:
actual_shape = array_ops.shape(actual_tensor, name='actual')
shape_equal = _all_equal(
ops.convert_to_tensor(expected_shape, name='expected'),
actual_shape)
return math_ops.logical_and(is_rank, shape_equal, name=scope)
def _assert_shape_op(expected_shape, actual_tensor):
"""Asserts actual_tensor's shape is expected_shape.
Args:
expected_shape: List of integers defining the expected shape, or tensor of
same.
actual_tensor: Tensor to test.
Returns:
New assert tensor.
"""
with ops.op_scope([actual_tensor], 'assert_shape') as scope:
actual_shape = array_ops.shape(actual_tensor, name='actual')
is_shape = _is_shape(expected_shape, actual_tensor, actual_shape)
return logging_ops.Assert(
is_shape, [
'Wrong shape for %s [expected] [actual].' % actual_tensor.name,
expected_shape,
actual_shape
], name=scope)
def with_same_shape(expected_tensor, tensor):
"""Assert tensors are the same shape, from the same graph.
Args:
expected_tensor: Tensor with expected shape.
tensor: Tensor of actual values.
Returns:
Tuple of (actual_tensor, label_tensor), possibly with assert ops added.
"""
with ops.op_scope([expected_tensor, tensor], '%s/' % tensor.op.name):
tensor_shape = expected_tensor.get_shape()
expected_shape = (
tensor_shape.as_list() if tensor_shape.is_fully_defined()
else array_ops.shape(expected_tensor, name='expected_shape'))
return with_shape(expected_shape, tensor)
def _is_tensor(t):
return isinstance(t, (ops.Tensor, ops.SparseTensor, variables.Variable))
def with_shape(expected_shape, tensor):
"""Asserts tensor has expected shape.
If tensor shape and expected_shape, are fully defined, assert they match.
Otherwise, add assert op that will validate the shape when tensor is
evaluated, and set shape on tensor.
Args:
expected_shape: Expected shape to assert, as a 1D array of ints, or tensor
of same.
tensor: Tensor whose shape we're validating.
Returns:
tensor, perhaps with a dependent assert operation.
Raises:
ValueError: if tensor has an invalid shape.
"""
if isinstance(tensor, ops.SparseTensor):
raise ValueError('SparseTensor not supported.')
# Shape type must be 1D int32.
if _is_tensor(expected_shape):
if expected_shape.dtype.base_dtype != dtypes.int32:
raise ValueError(
'Invalid dtype %s for shape %s expected of tensor %s.' % (
expected_shape.dtype, expected_shape, tensor.name))
if isinstance(expected_shape, (list, tuple)):
if not expected_shape:
expected_shape = np.asarray([], dtype=np.int32)
else:
np_expected_shape = np.asarray(expected_shape)
expected_shape = (
np.asarray(expected_shape, dtype=np.int32)
if np_expected_shape.dtype == np.int64 else np_expected_shape)
if isinstance(expected_shape, np.ndarray):
if expected_shape.ndim > 1:
raise ValueError(
'Invalid rank %s for shape %s expected of tensor %s.' % (
expected_shape.ndim, expected_shape, tensor.name))
if expected_shape.dtype != np.int32:
raise ValueError(
'Invalid dtype %s for shape %s expected of tensor %s.' % (
expected_shape.dtype, expected_shape, tensor.name))
actual_shape = tensor.get_shape()
if not actual_shape.is_fully_defined() or _is_tensor(expected_shape):
with ops.op_scope([tensor], '%s/' % tensor.op.name):
if not _is_tensor(expected_shape) and (len(expected_shape) < 1):
# TODO(irving): Remove scalar special case
return array_ops.reshape(tensor, [])
with ops.control_dependencies([_assert_shape_op(expected_shape, tensor)]):
result = array_ops.identity(tensor)
if not _is_tensor(expected_shape):
result.set_shape(expected_shape)
return result
if (not _is_tensor(expected_shape) and
not actual_shape.is_compatible_with(expected_shape)):
if (len(expected_shape) < 1) and actual_shape.is_compatible_with([1]):
# TODO(irving): Remove scalar special case.
with ops.op_scope([tensor], '%s/' % tensor.op.name):
return array_ops.reshape(tensor, [])
raise ValueError('Invalid shape for tensor %s, expected %s, got %s.' % (
tensor.name, expected_shape, actual_shape))
return tensor
def convert_to_tensor_or_sparse_tensor(
value, dtype=None, name=None, as_ref=False):
"""Converts value to a `SparseTensor` or `Tensor`.
Args:
value: A `SparseTensor`, `SparseTensorValue`, or an object whose type has a
registered `Tensor` conversion function.
dtype: Optional element type for the returned tensor. If missing, the
type is inferred from the type of `value`.
name: Optional name to use if a new `Tensor` is created.
as_ref: True if we want the result as a ref tensor. Only used if a new
`Tensor` is created.
Returns:
A `SparseTensor` or `Tensor` based on `value`.
Raises:
RuntimeError: If result type is incompatible with `dtype`.
"""
if dtype is not None:
dtype = dtypes.as_dtype(dtype)
if isinstance(value, ops.SparseTensorValue):
value = ops.SparseTensor.from_value(value)
if isinstance(value, ops.SparseTensor):
if dtype and not dtype.is_compatible_with(value.dtype):
raise RuntimeError(
'Sparse dtype: requested = %s, actual = %s' % (
dtype.name, value.dtype.name))
return value
return ops.convert_to_tensor(value, dtype=dtype, name=name, as_ref=as_ref)
| shishaochen/TensorFlow-0.8-Win | tensorflow/contrib/framework/python/framework/tensor_util.py | Python | apache-2.0 | 12,161 |
#encoding=utf8
import re
import zipfile
import HTMLParser
class GetContent(HTMLParser.HTMLParser):
def __init__(self):
HTMLParser.HTMLParser.__init__(self)
self.content = ""
def handle_data(self, data):
self.content += data
re_digits = re.compile(r'(\d+)')
def embedded_numbers(s):
pieces = re_digits.split(s)
pieces[1::2] = map(int, pieces[1::2])
return pieces
def sort_with_embedded_numbers(zipinfo_list):
aux = [(embedded_numbers(zipinfo.filename), zipinfo)
for zipinfo in zipinfo_list]
aux.sort()
return [zipinfo for _, zipinfo in aux]
def dump(filename):
fh = zipfile.ZipFile(filename)
html_list = [
zip_info for zip_info in fh.filelist
if zip_info.filename.endswith("html")]
html_list = sort_with_embedded_numbers(html_list)
content_obj = GetContent()
for html in html_list:
content_obj.feed(fh.read(html))
return content_obj.content
| tshirtman/kpritz | epub2txt.py | Python | gpl-3.0 | 966 |
#!/usr/bin/python
# Copyright (C) 2010, Nokia ([email protected])
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
# Boston, MA 02110-1301, USA.
#
from common.utils.system import TrackerSystemAbstraction
import shutil
import unittest2 as ut
import os
from common.utils import configuration as cfg
from common.utils.helpers import log
import time
TEST_FILE_JPEG = "writeback-test-1.jpeg"
TEST_FILE_TIFF = "writeback-test-2.tif"
TEST_FILE_PNG = "writeback-test-4.png"
WRITEBACK_TMP_DIR = os.path.join (cfg.TEST_MONITORED_TMP_DIR, "writeback")
CONF_OPTIONS = [
(cfg.DCONF_MINER_SCHEMA, "index-recursive-directories", [WRITEBACK_TMP_DIR]),
(cfg.DCONF_MINER_SCHEMA, "index-single-directories", "[]"),
(cfg.DCONF_MINER_SCHEMA, "index-optical-discs", "false"),
(cfg.DCONF_MINER_SCHEMA, "index-removable-devices", "false")
]
def uri (filename):
return "file://" + os.path.join (WRITEBACK_TMP_DIR, filename)
class CommonTrackerWritebackTest (ut.TestCase):
"""
Superclass to share methods. Shouldn't be run by itself.
Start all processes including writeback, miner pointing to HOME/test-writeback-monitored
"""
@classmethod
def __prepare_directories (self):
#
# ~/test-writeback-monitored/
#
for d in ["test-writeback-monitored"]:
directory = os.path.join (WRITEBACK_TMP_DIR, d)
if (os.path.exists (directory)):
shutil.rmtree (directory)
os.makedirs (directory)
if (os.path.exists (os.getcwd() + "/test-writeback-data")):
# Use local directory if available
datadir = os.getcwd() + "/test-writeback-data"
else:
datadir = os.path.join (cfg.DATADIR, "tracker-tests",
"test-writeback-data")
for testfile in [TEST_FILE_JPEG, TEST_FILE_PNG,TEST_FILE_TIFF]:
origin = os.path.join (datadir, testfile)
log ("Copying %s -> %s" % (origin, WRITEBACK_TMP_DIR))
shutil.copy (origin, WRITEBACK_TMP_DIR)
time.sleep (2)
@classmethod
def setUpClass (self):
#print "Starting the daemon in test mode"
self.__prepare_directories ()
self.system = TrackerSystemAbstraction ()
self.system.tracker_writeback_testing_start (CONF_OPTIONS)
# Returns when ready
log ("Ready to go!")
@classmethod
def tearDownClass (self):
#print "Stopping the daemon in test mode (Doing nothing now)"
self.system.tracker_writeback_testing_stop ()
def get_test_filename_jpeg (self):
return uri (TEST_FILE_JPEG)
def get_test_filename_tiff (self):
return uri (TEST_FILE_TIFF)
def get_test_filename_png (self):
return uri (TEST_FILE_PNG)
| Pelagicore/tracker-ivi | tests/functional-tests/common/utils/writebacktest.py | Python | gpl-2.0 | 3,490 |
import os
import subprocess
import shlex
def get_ctags_version(executable=None):
"""
Return the text output from the --version option to ctags or None if ctags
executable cannot be found. Use executable for custom ctags builds and/or
path.
"""
args = shlex.split("ctags --version")
try:
p = subprocess.Popen(args, 0, shell=False, stdout=subprocess.PIPE, executable=executable)
version = p.communicate()[0]
except:
version = None
return version
class Tag(object):
"""
Represents a ctags "tag" found in some source code.
"""
def __init__(self, name):
self.name = name
self.file = None
self.ex_command = None
self.kind = None
self.fields = {}
class Kind(object):
"""
Represents a ctags "kind" found in some source code such as "member" or
"class".
"""
def __init__(self, name):
self.name = name
self.language = None
def group_name(self):
"""
Return the kind name as a group name. For example, 'variable' would
be 'Variables'. Pluralization is complex but this method is not. It
works more often than not.
"""
group = self.name
if self.name[-1] == 's':
group += 'es'
elif self.name[-1] == 'y':
group = self.name[0:-1] + 'ies'
else:
group += 's'
return group.capitalize()
def icon_name(self):
"""
Return the icon name in the form of 'source-<kind>'.
"""
return 'source-' + self.name
class Parser(object):
"""
Ctags Parser
Parses the output of a ctags command into a list of tags and a dictionary
of kinds.
"""
def has_kind(self, kind_name):
"""
Return true if kind_name is found in the list of kinds.
"""
if kind_name in self.kinds:
return True
else:
return False
def __init__(self):
self.tags = []
self.kinds = {}
self.tree = {}
def parse(self, command, executable=None):
"""
Parse ctags tags from the output of a ctags command. For example:
ctags -n --fields=fiKmnsSzt -f - some_file.php
"""
#args = [arg.replace('%20', ' ') for arg in shlex.split(command)]
args = shlex.split(command)
p = subprocess.Popen(args, 0, shell=False, stdout=subprocess.PIPE, executable=executable)
symbols = self._parse_text(p.communicate()[0])
def _parse_text(self, text):
"""
Parses ctags text which may have come from a TAG file or from raw output
from a ctags command.
"""
for line in text.splitlines():
name = None
file = None
ex_command = None
kind = None
for i, field in enumerate(line.split("\t")):
if i == 0: tag = Tag(field)
elif i == 1: tag.file = field
elif i == 2: tag.ex_command = field
elif i > 2:
if ":" in field:
key, value = field.split(":")[0:2]
tag.fields[key] = value
if key == 'kind':
kind = Kind(value)
if not kind in self.kinds:
self.kinds[value] = kind
if kind is not None:
if 'language' in tag.fields:
kind.language = tag.fields['language']
tag.kind = kind
self.tags.append(tag)
"""
def get_tree(self):
tree = {}
for tag in self.tags:
if 'class' in tag.fields:
parent = tag.fields['class']
if "." in parent:
parents = parent.split(".")
node = tree
for p in parents:
if not p in node:
node[p] = {'tag':None, 'children':{}}
node = node[p]
print node
node['tag'] = tag
else:
if not parent in self.tree:
tree[parent] = {'tag':None, 'children':{}}
tree[parent]['children'][tag.name] = {'tag':tag, 'children':{}}
else:
if tag.name in self.tree:
tree[tag.name]['tag'] = tag
else:
tree[tag.name] = {'tag': tag, 'children':{}}
return tree
"""
| Bruno-sm/fguess | training_files/Python/ctags.py | Python | gpl-3.0 | 4,738 |
# REQUIRES: lit-max-individual-test-time
# Check that the per test timeout is enforced when running GTest tests.
#
# RUN: not %{lit} -j 1 -v %{inputs}/googletest-timeout --timeout=1 > %t.cmd.out
# RUN: FileCheck < %t.cmd.out %s
# Check that the per test timeout is enforced when running GTest tests via
# the configuration file
#
# RUN: not %{lit} -j 1 -v %{inputs}/googletest-timeout \
# RUN: --param set_timeout=1 > %t.cfgset.out 2> %t.cfgset.err
# RUN: FileCheck < %t.cfgset.out %s
# CHECK: -- Testing:
# CHECK: PASS: googletest-timeout :: {{[Dd]ummy[Ss]ub[Dd]ir}}/OneTest.py/FirstTest.subTestA
# CHECK: TIMEOUT: googletest-timeout :: {{[Dd]ummy[Ss]ub[Dd]ir}}/OneTest.py/FirstTest.subTestB
# CHECK: TIMEOUT: googletest-timeout :: {{[Dd]ummy[Ss]ub[Dd]ir}}/OneTest.py/FirstTest.subTestC
# CHECK: Expected Passes : 1
# CHECK: Individual Timeouts: 2
# Test per test timeout via a config file and on the command line.
# The value set on the command line should override the config file.
# RUN: not %{lit} -j 1 -v %{inputs}/googletest-timeout \
# RUN: --param set_timeout=1 --timeout=2 > %t.cmdover.out 2> %t.cmdover.err
# RUN: FileCheck < %t.cmdover.out %s
# RUN: FileCheck --check-prefix=CHECK-CMDLINE-OVERRIDE-ERR < %t.cmdover.err %s
# CHECK-CMDLINE-OVERRIDE-ERR: Forcing timeout to be 2 seconds
| GPUOpen-Drivers/llvm | utils/lit/tests/googletest-timeout.py | Python | apache-2.0 | 1,304 |
#!/usr/bin/env python
# Analyse texture
#
# Copyright (c) 2017 Polytechnique Montreal <www.neuro.polymtl.ca>
# Author: Charley
# Modified: 2017-06-22
#
# About the license: see the file LICENSE.TXT
import os
import sys
import itertools
import numpy as np
from skimage.feature import greycomatrix, greycoprops
from spinalcordtoolbox.image import Image, add_suffix, zeros_like, concat_data
from spinalcordtoolbox.utils.shell import SCTArgumentParser, Metavar, ActionCreateFolder
from spinalcordtoolbox.utils.sys import init_sct, printv, sct_progress_bar, set_loglevel
from spinalcordtoolbox.utils.fs import tmp_create, extract_fname, copy, rmtree
def get_parser():
parser = SCTArgumentParser(
description='Extraction of grey level co-occurence matrix (GLCM) texture features from an image within a given '
'mask. The textures features are those defined in the sckit-image implementation: '
'http://scikit-image.org/docs/dev/api/skimage.feature.html#greycoprops. This function outputs '
'one nifti file per texture metric (' + ParamGLCM().feature + ') and per orientation called '
'fnameInput_feature_distance_angle.nii.gz. Also, a file averaging each metric across the angles, '
'called fnameInput_feature_distance_mean.nii.gz, is output.'
)
mandatoryArguments = parser.add_argument_group("\nMANDATORY ARGUMENTS")
mandatoryArguments.add_argument(
"-i",
required=True,
help='Image to analyze. Example: t2.nii.gz',
metavar=Metavar.file,
)
mandatoryArguments.add_argument(
"-m",
required=True,
metavar=Metavar.file,
help='Image mask Example: t2_seg.nii.gz',
)
optional = parser.add_argument_group("\nOPTIONALS ARGUMENTS")
optional.add_argument(
"-h",
"--help",
action="help",
help="Show this help message and exit")
optional.add_argument(
"-feature",
metavar=Metavar.str,
help='List of GLCM texture features (separate arguments with ",").',
required=False,
default=ParamGLCM().feature)
optional.add_argument(
"-distance",
metavar=Metavar.int,
help='Distance offset for GLCM computation, in pixel (suggested distance values between 1 and 5). Example: 1',
required=False,
default=ParamGLCM().distance)
optional.add_argument(
"-angle",
metavar=Metavar.list,
help='List of angles for GLCM computation, separate arguments with ",", in degrees (suggested distance values '
'between 0 and 179). Example: 0,90',
required=False,
default=ParamGLCM().angle)
optional.add_argument(
"-dim",
help="Compute the texture on the axial (ax), sagittal (sag) or coronal (cor) slices.",
required=False,
choices=('ax', 'sag', 'cor'),
default=Param().dim)
optional.add_argument(
"-ofolder",
metavar=Metavar.folder,
help='Output folder. Example: /my_texture/',
action=ActionCreateFolder,
required=False,
default=Param().path_results)
optional.add_argument(
"-igt",
metavar=Metavar.str,
help="File name of ground-truth texture metrics.",
required=False)
optional.add_argument(
"-r",
help="Remove temporary files.",
required=False,
type=int,
choices=(0, 1),
default=int(Param().rm_tmp))
optional.add_argument(
'-v',
metavar=Metavar.int,
type=int,
choices=[0, 1, 2],
default=1,
# Values [0, 1, 2] map to logging levels [WARNING, INFO, DEBUG], but are also used as "if verbose == #" in API
help="Verbosity. 0: Display only errors/warnings, 1: Errors/warnings + info messages, 2: Debug mode")
return parser
class ExtractGLCM:
def __init__(self, param=None, param_glcm=None):
self.param = param if param is not None else Param()
self.param_glcm = param_glcm if param_glcm is not None else ParamGLCM()
# create tmp directory
self.tmp_dir = tmp_create() # path to tmp directory
if self.param.dim == 'ax':
self.orientation_extraction = 'RPI'
elif self.param.dim == 'sag':
self.orientation_extraction = 'IPR'
else:
self.orientation_extraction = 'IRP'
# metric_lst=['property_distance_angle']
self.metric_lst = []
for m in list(itertools.product(self.param_glcm.feature.split(','), self.param_glcm.angle.split(','))):
text_name = m[0] if m[0].upper() != 'asm'.upper() else m[0].upper()
self.metric_lst.append(text_name + '_' + str(self.param_glcm.distance) + '_' + str(m[1]))
# dct_im_seg{'im': list_of_axial_slice, 'seg': list_of_axial_masked_slice}
self.dct_im_seg = {'im': None, 'seg': None}
# to re-orient the data at the end if needed
self.orientation_im = Image(self.param.fname_im).orientation
self.fname_metric_lst = {}
def extract(self):
self.ifolder2tmp()
# fill self.dct_metric --> for each key_metric: create an Image with zero values
# self.init_metric_im()
# fill self.dct_im_seg --> extract axial slices from self.param.fname_im and self.param.fname_seg
self.extract_slices()
# compute texture
self.compute_texture()
# reorient data
if self.orientation_im != self.orientation_extraction:
self.reorient_data()
# mean across angles
self.mean_angle()
# save results to ofolder
self.tmp2ofolder()
return [os.path.join(self.param.path_results, self.fname_metric_lst[f]) for f in self.fname_metric_lst]
def tmp2ofolder(self):
os.chdir(self.curdir) # go back to original directory
printv('\nSave resulting files...', self.param.verbose, 'normal')
for f in self.fname_metric_lst: # Copy from tmp folder to ofolder
copy(os.path.join(self.tmp_dir, self.fname_metric_lst[f]),
os.path.join(self.param.path_results, self.fname_metric_lst[f]))
def ifolder2tmp(self):
self.curdir = os.getcwd()
# copy input image
if self.param.fname_im is not None:
copy(self.param.fname_im, self.tmp_dir)
self.param.fname_im = ''.join(extract_fname(self.param.fname_im)[1:])
else:
printv('ERROR: No input image', self.param.verbose, 'error')
# copy masked image
if self.param.fname_seg is not None:
copy(self.param.fname_seg, self.tmp_dir)
self.param.fname_seg = ''.join(extract_fname(self.param.fname_seg)[1:])
else:
printv('ERROR: No mask image', self.param.verbose, 'error')
os.chdir(self.tmp_dir) # go to tmp directory
def mean_angle(self):
im_metric_lst = [self.fname_metric_lst[f].split('_' + str(self.param_glcm.distance) + '_')[0] + '_' for f in self.fname_metric_lst]
im_metric_lst = list(set(im_metric_lst))
printv('\nMean across angles...', self.param.verbose, 'normal')
extension = extract_fname(self.param.fname_im)[2]
for im_m in im_metric_lst: # Loop across GLCM texture properties
# List images to mean
fname_mean_list = [im_m + str(self.param_glcm.distance) + '_' + a + extension
for a in self.param_glcm.angle.split(',')]
im_mean_list = [Image(fname) for fname in fname_mean_list]
# Average across angles and save it as wrk_folder/fnameIn_feature_distance_mean.extension
fname_out = im_m + str(self.param_glcm.distance) + '_mean' + extension
dim_idx = 3 # img is [x, y, z, angle] so specify 4th dimension (angle)
img = concat_data(im_mean_list, dim_idx).save(fname_out, mutable=True)
if len(np.shape(img.data)) < 4: # in case input volume is 3d and dim=t
img.data = img.data[..., np.newaxis]
img.data = np.mean(img.data, dim_idx)
img.save()
self.fname_metric_lst[im_m + str(self.param_glcm.distance) + '_mean'] = fname_out
def extract_slices(self):
# open image and re-orient it to RPI if needed
im, seg = Image(self.param.fname_im), Image(self.param.fname_seg)
if self.orientation_im != self.orientation_extraction:
im.change_orientation(self.orientation_extraction)
seg.change_orientation(self.orientation_extraction)
# extract axial slices in self.dct_im_seg
self.dct_im_seg['im'], self.dct_im_seg['seg'] = [im.data[:, :, z] for z in range(im.dim[2])], [seg.data[:, :, z] for z in range(im.dim[2])]
def compute_texture(self):
offset = int(self.param_glcm.distance)
printv('\nCompute texture metrics...', self.param.verbose, 'normal')
# open image and re-orient it to RPI if needed
im_tmp = Image(self.param.fname_im)
if self.orientation_im != self.orientation_extraction:
im_tmp.change_orientation(self.orientation_extraction)
dct_metric = {}
for m in self.metric_lst:
im_2save = zeros_like(im_tmp, dtype='float64')
dct_metric[m] = im_2save
# dct_metric[m] = Image(self.fname_metric_lst[m])
with sct_progress_bar() as pbar:
for im_z, seg_z, zz in zip(self.dct_im_seg['im'], self.dct_im_seg['seg'], range(len(self.dct_im_seg['im']))):
for xx in range(im_z.shape[0]):
for yy in range(im_z.shape[1]):
if not seg_z[xx, yy]:
continue
if xx < offset or yy < offset:
continue
if xx > (im_z.shape[0] - offset - 1) or yy > (im_z.shape[1] - offset - 1):
continue # to check if the whole glcm_window is in the axial_slice
if False in np.unique(seg_z[xx - offset: xx + offset + 1, yy - offset: yy + offset + 1]):
continue # to check if the whole glcm_window is in the mask of the axial_slice
glcm_window = im_z[xx - offset: xx + offset + 1, yy - offset: yy + offset + 1]
glcm_window = glcm_window.astype(np.uint8)
dct_glcm = {}
for a in self.param_glcm.angle.split(','): # compute the GLCM for self.param_glcm.distance and for each self.param_glcm.angle
dct_glcm[a] = greycomatrix(glcm_window,
[self.param_glcm.distance], [np.radians(int(a))],
symmetric=self.param_glcm.symmetric,
normed=self.param_glcm.normed)
for m in self.metric_lst: # compute the GLCM property (m.split('_')[0]) of the voxel xx,yy,zz
dct_metric[m].data[xx, yy, zz] = greycoprops(dct_glcm[m.split('_')[2]], m.split('_')[0])[0][0]
pbar.set_postfix(pos="{}/{}".format(zz, len(self.dct_im_seg["im"])))
pbar.update(1)
for m in self.metric_lst:
fname_out = add_suffix("".join(extract_fname(self.param.fname_im)[1:]), '_' + m)
dct_metric[m].save(fname_out)
self.fname_metric_lst[m] = fname_out
def reorient_data(self):
for f in self.fname_metric_lst:
os.rename(self.fname_metric_lst[f], add_suffix("".join(extract_fname(self.param.fname_im)[1:]), '_2reorient'))
im = Image(add_suffix("".join(extract_fname(self.param.fname_im)[1:]), '_2reorient')) \
.change_orientation(self.orientation_im) \
.save(self.fname_metric_lst[f])
class Param:
def __init__(self):
self.fname_im = None
self.fname_seg = None
self.path_results = './texture/'
self.verbose = 1
self.dim = 'ax'
self.rm_tmp = True
class ParamGLCM(object):
def __init__(self, symmetric=True, normed=True, feature='contrast,dissimilarity,homogeneity,energy,correlation,ASM', distance=1, angle='0,45,90,135'):
self.symmetric = True # If True, the output matrix P[:, :, d, theta] is symmetric.
self.normed = True # If True, normalize each matrix P[:, :, d, theta] by dividing by the total number of accumulated co-occurrences for the given offset. The elements of the resulting matrix sum to 1.
self.feature = 'contrast,dissimilarity,homogeneity,energy,correlation,ASM' # The property formulae are detailed here: http://scikit-image.org/docs/dev/api/skimage.feature.html#greycoprops
self.distance = 1 # Size of the window: distance = 1 --> a reference pixel and its immediate neighbor
self.angle = '0,45,90,135' # Rotation angles for co-occurrence matrix
def main(argv=None):
"""
Main function
:param argv:
:return:
"""
parser = get_parser()
arguments = parser.parse_args(argv)
verbose = arguments.v
set_loglevel(verbose=verbose)
# create param object
param = Param()
param_glcm = ParamGLCM()
# set param arguments ad inputted by user
param.fname_im = arguments.i
param.fname_seg = arguments.m
if arguments.ofolder is not None:
param.path_results = arguments.ofolder
if not os.path.isdir(param.path_results) and os.path.exists(param.path_results):
printv("ERROR output directory %s is not a valid directory" % param.path_results, 1, 'error')
if not os.path.exists(param.path_results):
os.makedirs(param.path_results)
if arguments.feature is not None:
param_glcm.feature = arguments.feature
if arguments.distance is not None:
param_glcm.distance = arguments.distance
if arguments.angle is not None:
param_glcm.angle = arguments.angle
if arguments.dim is not None:
param.dim = arguments.dim
if arguments.r is not None:
param.rm_tmp = bool(arguments.r)
# create the GLCM constructor
glcm = ExtractGLCM(param=param, param_glcm=param_glcm)
# run the extraction
fname_out_lst = glcm.extract()
# remove tmp_dir
if param.rm_tmp:
rmtree(glcm.tmp_dir)
printv('\nDone! To view results, type:', param.verbose)
printv('fsleyes ' + arguments.i + ' ' + ' -cm red-yellow -a 70.0 '.join(fname_out_lst) + ' -cm Red-Yellow -a 70.0 & \n', param.verbose, 'info')
if __name__ == "__main__":
init_sct()
main(sys.argv[1:])
| neuropoly/spinalcordtoolbox | spinalcordtoolbox/scripts/sct_analyze_texture.py | Python | mit | 14,764 |
"""
This file implements the lowering for `dict()`
"""
from numba.targets.imputils import lower_builtin
@lower_builtin(dict)
def impl_dict(context, builder, sig, args):
"""
The `dict()` implementation simply forwards the work to `Dict.empty()`.
"""
from numba.typed import Dict
dicttype = sig.return_type
kt, vt = dicttype.key_type, dicttype.value_type
def call_ctor():
return Dict.empty(kt, vt)
return context.compile_internal(builder, call_ctor, sig, args)
| jriehl/numba | numba/targets/dictimpl.py | Python | bsd-2-clause | 504 |
from Tkinter import *
from minimop.conf import Mop
import math
import paho.mqtt.client as mqtt
import json
master = Tk()
width = 1000
height = 500
radar_values = [181]
step = 0
stepsize = 5
w = Canvas(master ,width=width, height=height)
w.pack()
botmid_x = width/2
botmid_y = 500
def on_connect(self, client, userdata, flags, rc):
self.printme("Connected with result code " + str(rc))
client.subscribe(Mop.mqtt_radar)
def on_message(self, client, userdata, msg):
self.printme(msg.topic + " " + str(msg.payload))
info = json.load(msg.payload)
draw_to(info[0], info[1], "green")
def draw_to(angle, dist, color):
# lighten old
# w.create_rectangle(0, 0, width, height, fill="white", stipple="gray50")
# w.create_rectangle(0, 0, width, height, fill="white", stipple="gray25")
length = dist
angle += 180
# find the end point
endy = length * math.sin(math.radians(angle)) + botmid_y
endx = length * math.cos(math.radians(angle)) + botmid_x
w.create_line(botmid_x, botmid_y, endx, endy, fill=color)
client = mqtt.Client()
client.on_connect = on_connect
client.on_message = on_message
client.connect(Mop.mqtt_server, Mop.mqtt_port, 60)
master.mainloop()
| eXpire163/minimop | minimop/run/runRadarUI.py | Python | mit | 1,222 |
#!/usr/bin/env python
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# ansible-vault is a script that encrypts/decrypts YAML files. See
# http://docs.ansible.com/playbooks_vault.html for more details.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import sys
PASSWORD = 'test-vault-password'
def main(args):
print(PASSWORD)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[:]))
| tsdmgz/ansible | test/integration/targets/vault/password-script.py | Python | gpl-3.0 | 1,024 |
from __future__ import (absolute_import, division, print_function)
from mantid.api import FileProperty, WorkspaceProperty, PythonAlgorithm, AlgorithmFactory, FileAction
from mantid.kernel import Direction
from mantid.simpleapi import CreateWorkspace, AddSampleLogMultiple
import numpy
import h5py
class LoadLamp(PythonAlgorithm):
def name(self):
return 'LoadLamp'
def category(self):
return 'DataHandling\\Nexus'
def summary(self):
return 'Loads HDF files exported from LAMP program at the ILL'
def PyInit(self):
self.declareProperty(FileProperty(name="InputFile", defaultValue="", action=FileAction.Load, extensions=["hdf"]))
self.declareProperty(WorkspaceProperty(name="OutputWorkspace", defaultValue="", direction=Direction.Output))
def PyExec(self):
input_file = self.getProperty("InputFile").value
output_ws = self.getPropertyValue("OutputWorkspace")
logs = ''
with h5py.File(input_file, 'r') as hf:
data = numpy.array(hf.get('entry1/data1/DATA'), dtype='float')
if data.ndim > 2:
raise RuntimeError('Data with more than 2 dimensions are not supported.')
errors = numpy.array(hf.get('entry1/data1/errors'), dtype='float')
x = numpy.array(hf.get('entry1/data1/X'), dtype='float')
if "entry1/data1/PARAMETERS" in hf:
logs = str(hf.get('entry1/data1/PARAMETERS')[0])
y = numpy.array([0])
nspec = 1
if data.ndim == 2:
y = numpy.array(hf.get('entry1/data1/Y'), dtype='float')
nspec = data.shape[0]
if x.ndim == 1:
x = numpy.tile(x, nspec)
CreateWorkspace(DataX=x, DataY=data, DataE=errors, NSpec=nspec, VerticalAxisUnit='Label',
VerticalAxisValues=y, OutputWorkspace=output_ws)
if logs:
log_names = []
log_values = []
for log in logs.split('\n'):
split = log.strip().split('=')
if len(split) == 2:
name = split[0]
value = split[1]
if name and value:
log_names.append(name)
log_values.append(value)
if log_names:
try:
AddSampleLogMultiple(Workspace=output_ws, LogNames=log_names, LogValues=log_values)
except RuntimeError as e:
self.log().warning('Unable to set the sample logs, reason: '+str(e))
self.setProperty('OutputWorkspace', output_ws)
AlgorithmFactory.subscribe(LoadLamp)
| ScreamingUdder/mantid | Framework/PythonInterface/plugins/algorithms/LoadLamp.py | Python | gpl-3.0 | 2,688 |
import logging
log = logging.getLogger(__name__)
version = __version__ = "1.0"
| kurgm/gwv | gwv/__init__.py | Python | mit | 81 |
'''
New Integration Test for Ceph Pool Capacity.
@author: Legion
'''
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.test_state as test_state
import time
test_stub = test_lib.lib_get_test_stub()
test_obj_dict = test_state.TestStateDict()
pool_cap = test_stub.PoolCapacity()
def test():
pool_cap.create_vm()
test_obj_dict.add_vm(pool_cap.vm)
time.sleep(200)
pool_cap.get_ceph_pool('Root')
used1 = pool_cap.pool.usedCapacity
avail1 = pool_cap.pool.availableCapacity
pool_name = pool_cap.pool.poolName
pool_cap.check_pool_cap([used1, avail1], pool_name=pool_name)
pool_cap.vm.destroy()
test_obj_dict.rm_vm(pool_cap.vm)
time.sleep(200)
pool_cap.get_ceph_pool('Root')
used2 = pool_cap.pool.usedCapacity
avail2 = pool_cap.pool.availableCapacity
pool_cap.check_pool_cap([used2, avail2], pool_name=pool_name)
test_util.test_pass('Ceph Image Pool Capacity Test Success')
#Will be called only if exception happens in test().
def error_cleanup():
global test_obj_dict
test_lib.lib_error_cleanup(test_obj_dict)
| zstackorg/zstack-woodpecker | integrationtest/vm/virtualrouter/ceph_pool_capacity/test_ceph_root_pool_cap.py | Python | apache-2.0 | 1,147 |
from twisted.internet import threads
from config import config
from enigma import eDBoxLCD, eTimer, iPlayableService, pNavigation
import NavigationInstance
from Tools.Directories import fileExists
from Components.ParentalControl import parentalControl
from Components.ServiceEventTracker import ServiceEventTracker
from Components.SystemInfo import SystemInfo
from boxbranding import getBoxType, getMachineBuild
import Components.RecordingConfig
POLLTIME = 5 # seconds
def SymbolsCheck(session, **kwargs):
global symbolspoller, POLLTIME
if getBoxType() in ('ixussone', 'ixusszero', 'mbmicro', 'e4hd', 'e4hdhybrid', 'dm7020hd', 'dm7020hdv2') or getMachineBuild() in ('dags7362' , 'dags73625', 'dags5'):
POLLTIME = 1
symbolspoller = SymbolsCheckPoller(session)
symbolspoller.start()
class SymbolsCheckPoller:
def __init__(self, session):
self.session = session
self.blink = False
self.led = "0"
self.timer = eTimer()
self.onClose = []
self.__event_tracker = ServiceEventTracker(screen=self,eventmap=
{
iPlayableService.evUpdatedInfo: self.__evUpdatedInfo,
})
def __onClose(self):
pass
def start(self):
if self.symbolscheck not in self.timer.callback:
self.timer.callback.append(self.symbolscheck)
self.timer.startLongTimer(0)
def stop(self):
if self.symbolscheck in self.timer.callback:
self.timer.callback.remove(self.symbolscheck)
self.timer.stop()
def symbolscheck(self):
threads.deferToThread(self.JobTask)
self.timer.startLongTimer(POLLTIME)
def JobTask(self):
self.Recording()
self.PlaySymbol()
self.timer.startLongTimer(POLLTIME)
def __evUpdatedInfo(self):
self.service = self.session.nav.getCurrentService()
self.Subtitle()
self.ParentalControl()
self.PlaySymbol()
del self.service
def Recording(self):
if fileExists("/proc/stb/lcd/symbol_circle"):
recordings = len(NavigationInstance.instance.getRecordings(False,Components.RecordingConfig.recType(config.recording.show_rec_symbol_for_rec_types.getValue())))
if recordings > 0:
open("/proc/stb/lcd/symbol_circle", "w").write("3")
else:
open("/proc/stb/lcd/symbol_circle", "w").write("0")
elif getBoxType() in ('mixosf5', 'mixoslumi', 'mixosf7', 'gi9196m', 'sf3038'):
recordings = len(NavigationInstance.instance.getRecordings(False,Components.RecordingConfig.recType(config.recording.show_rec_symbol_for_rec_types.getValue())))
if recordings > 0:
open("/proc/stb/lcd/symbol_recording", "w").write("1")
else:
open("/proc/stb/lcd/symbol_recording", "w").write("0")
elif getBoxType() in ('ixussone', 'ixusszero'):
recordings = len(NavigationInstance.instance.getRecordings(False,Components.RecordingConfig.recType(config.recording.show_rec_symbol_for_rec_types.getValue())))
self.blink = not self.blink
if recordings > 0:
if self.blink:
open("/proc/stb/lcd/powerled", "w").write("1")
self.led = "1"
else:
open("/proc/stb/lcd/powerled", "w").write("0")
self.led = "0"
elif self.led == "1":
open("/proc/stb/lcd/powerled", "w").write("0")
elif getBoxType() in ('mbmicro', 'e4hd', 'e4hdhybrid'):
recordings = len(NavigationInstance.instance.getRecordings(False,Components.RecordingConfig.recType(config.recording.show_rec_symbol_for_rec_types.getValue())))
self.blink = not self.blink
if recordings > 0:
if self.blink:
open("/proc/stb/lcd/powerled", "w").write("0")
self.led = "1"
else:
open("/proc/stb/lcd/powerled", "w").write("1")
self.led = "0"
elif self.led == "1":
open("/proc/stb/lcd/powerled", "w").write("1")
elif getBoxType() in ('dm7020hd', 'dm7020hdv2'):
recordings = len(NavigationInstance.instance.getRecordings(False,Components.RecordingConfig.recType(config.recording.show_rec_symbol_for_rec_types.getValue())))
self.blink = not self.blink
if recordings > 0:
if self.blink:
open("/proc/stb/fp/led_set", "w").write("0x00000000")
self.led = "1"
else:
open("/proc/stb/fp/led_set", "w").write("0xffffffff")
self.led = "0"
else:
open("/proc/stb/fp/led_set", "w").write("0xffffffff")
elif getMachineBuild() in ('dags7362' , 'dags73625', 'dags5'):
recordings = len(NavigationInstance.instance.getRecordings(False,Components.RecordingConfig.recType(config.recording.show_rec_symbol_for_rec_types.getValue())))
self.blink = not self.blink
if recordings > 0:
if self.blink:
open("/proc/stb/lcd/symbol_rec", "w").write("1")
self.led = "1"
else:
open("/proc/stb/lcd/symbol_rec", "w").write("0")
self.led = "0"
elif self.led == "1":
open("/proc/stb/lcd/symbol_rec", "w").write("0")
else:
if not fileExists("/proc/stb/lcd/symbol_recording") or not fileExists("/proc/stb/lcd/symbol_record_1") or not fileExists("/proc/stb/lcd/symbol_record_2"):
return
recordings = len(NavigationInstance.instance.getRecordings(False,Components.RecordingConfig.recType(config.recording.show_rec_symbol_for_rec_types.getValue())))
if recordings > 0:
open("/proc/stb/lcd/symbol_recording", "w").write("1")
if recordings == 1:
open("/proc/stb/lcd/symbol_record_1", "w").write("1")
open("/proc/stb/lcd/symbol_record_2", "w").write("0")
elif recordings >= 2:
open("/proc/stb/lcd/symbol_record_1", "w").write("1")
open("/proc/stb/lcd/symbol_record_2", "w").write("1")
else:
open("/proc/stb/lcd/symbol_recording", "w").write("0")
open("/proc/stb/lcd/symbol_record_1", "w").write("0")
open("/proc/stb/lcd/symbol_record_2", "w").write("0")
def Subtitle(self):
if not fileExists("/proc/stb/lcd/symbol_smartcard"):
return
subtitle = self.service and self.service.subtitle()
subtitlelist = subtitle and subtitle.getSubtitleList()
if subtitlelist:
subtitles = len(subtitlelist)
if subtitles > 0:
open("/proc/stb/lcd/symbol_smartcard", "w").write("1")
else:
open("/proc/stb/lcd/symbol_smartcard", "w").write("0")
else:
open("/proc/stb/lcd/symbol_smartcard", "w").write("0")
def ParentalControl(self):
if not fileExists("/proc/stb/lcd/symbol_parent_rating"):
return
service = self.session.nav.getCurrentlyPlayingServiceReference()
if service:
if parentalControl.getProtectionLevel(service.toCompareString()) == -1:
open("/proc/stb/lcd/symbol_parent_rating", "w").write("0")
else:
open("/proc/stb/lcd/symbol_parent_rating", "w").write("1")
else:
open("/proc/stb/lcd/symbol_parent_rating", "w").write("0")
def PlaySymbol(self):
if not fileExists("/proc/stb/lcd/symbol_play "):
return
if SystemInfo["SeekStatePlay"]:
open("/proc/stb/lcd/symbol_play ", "w").write("1")
else:
open("/proc/stb/lcd/symbol_play ", "w").write("0")
| popazerty/openhdf-enigma2 | lib/python/Components/VfdSymbols.py | Python | gpl-2.0 | 6,706 |
from django.apps import AppConfig
class UsersConfig(AppConfig):
name = 'datelikeus.users'
verbose_name = "Users"
def ready(self):
"""Override this to put in:
Users system checks
Users signal registration
"""
pass
| jeffminsungkim/datelikeus | src/datelikeus/users/apps.py | Python | mit | 276 |
class GNIRS_SPECT(DataClassification):
name="GNIRS_SPECT"
usage = "Applies to any SPECT dataset from the GNIRS instrument."
parent = "GNIRS"
requirement = ISCLASS('GNIRS') & PHU(ACQMIR='Out')
newtypes.append(GNIRS_SPECT())
| pyrrho314/recipesystem | trunk/dontload-astrodata_Gemini/ADCONFIG_Gemini/classifications/types/GNIRS/gemdtype.GNIRS_SPECT.py | Python | mpl-2.0 | 241 |
#!/usr/bin/env python
"""
8. Optional bonus question--use a queue to get the output data back from the child processes
in question #7. Print this output data to the screen in the main process.
(applied_python)[chudgins@ip-172-30-0-251 class8]$ ./class8_ex8.py
...
Final elapsed time: 0:00:13.378109
"""
__author__ = 'Chip Hudgins'
__email__ = '[email protected]'
import django
from net_system.models import NetworkDevice, Credentials
from netmiko import ConnectHandler
from datetime import datetime
from multiprocessing import Process, Queue
def show_version(a_device, output_q):
output_dict = {}
remote_conn = ConnectHandler(device_type=a_device.device_type,
ip=a_device.ip_address,
username=a_device.credentials.username,
password=a_device.credentials.password,
port=a_device.port, secret='')
output = '#' * 80
output += remote_conn.send_command_expect("show version")
output += '#' * 80
output_dict[a_device.device_name] = output
output_q.put(output_dict)
def main():
django.setup()
start_time = datetime.now()
net_devices = NetworkDevice.objects.all()
output_q = Queue(maxsize=20)
procs = []
for a_device in net_devices:
my_proc = Process(target=show_version, args=(a_device, output_q))
my_proc.start()
procs.append(my_proc)
for a_proc in procs:
a_proc.join()
while not output_q.empty():
my_dict = output_q.get()
for k, val in my_dict.iteritems():
print k
print val
elapsed_time = datetime.now() - start_time
print "Final elapsed time: {}".format(elapsed_time)
if __name__ == '__main__':
main() | mudzi42/pynet_class | class8/class8_ex8.py | Python | apache-2.0 | 1,799 |
#
# Copyright (c) 2013-2016 Quarkslab.
# This file is part of IRMA project.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License in the top-level directory
# of this distribution and at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# No part of the project, including this file, may be copied,
# modified, propagated, or distributed except according to the
# terms contained in the LICENSE file.
import logging
import unittest
from irma.common.exceptions import IrmaDatabaseError, IrmaValueError
from irma.database.nosqlhandler import NoSQLDatabase
from irma.database.nosqlobjects import NoSQLDatabaseObject
from datetime import datetime
from bson import ObjectId
# Test config
test_db_uri = "mongodb://localhost"
test_db_name = "unitest"
test_db_collection = "testobject"
# test object
class TestObject(NoSQLDatabaseObject):
_uri = test_db_uri
_dbname = test_db_name
_collection = test_db_collection
def __init__(self,
dbname=None,
id=None,
save=True):
if dbname is not None:
self._dbname = dbname
self.user = "test"
self.date = datetime.now()
self.dict = {}
self.list = []
super(TestObject, self).__init__(id=id, save=save)
@classmethod
def has_lock_timed_out(cls, id):
return super(TestObject, cls).has_lock_timed_out(id)
@classmethod
def is_lock_free(cls, id):
return super(TestObject, cls).is_lock_free(id)
# =================
# Logging options
# =================
def enable_logging(level=logging.INFO,
handler=None,
formatter=None):
global log
log = logging.getLogger()
if formatter is None:
formatter = logging.Formatter("%(asctime)s [%(name)s] " +
"%(levelname)s: %(message)s")
if handler is None:
handler = logging.StreamHandler()
handler.setFormatter(formatter)
log.addHandler(handler)
log.setLevel(level)
# ============
# Test cases
# ============
class DbTestCase(unittest.TestCase):
def setUp(self):
self.db = NoSQLDatabase(test_db_name, test_db_uri)
if self.db.db_instance() is None:
self.db._connect()
dbh = self.db.db_instance()
database = dbh[test_db_name]
self.collection = database[test_db_collection]
self.collection.remove()
def tearDown(self):
self.db._disconnect()
class CheckSingleton(DbTestCase):
def test_singleton(self):
self.assertEqual(id(NoSQLDatabase(test_db_name, test_db_uri)),
id(NoSQLDatabase(test_db_name, test_db_uri)))
class TestNoSQLDatabaseObject(DbTestCase):
def test_virtualconstructor(self):
with self.assertRaises(IrmaValueError):
NoSQLDatabaseObject()
def test_add_testobject(self):
t1 = TestObject()
self.assertIsNotNone(t1.id)
self.assertEqual(self.collection.count(), 1)
def test_id_type_testobject(self):
t1 = TestObject()
self.assertEqual(type(t1._id), ObjectId)
self.assertEqual(type(t1.id), str)
def test_add_two_testobjects(self):
t1 = TestObject()
t2 = TestObject()
self.assertNotEquals(t1.id, t2.id)
self.assertEqual(self.collection.count(), 2)
def test_check_type(self):
t1 = TestObject()
t2 = TestObject(id=t1.id)
self.assertEqual(self.collection.count(), 1)
self.assertEqual(type(t2.id), str)
self.assertEqual(type(t2.list), list)
self.assertEqual(type(t2.dict), dict)
self.assertEqual(type(t2.user), unicode)
self.assertEqual(type(t2.date), datetime)
def test_check_invalid_id(self):
with self.assertRaises(IrmaDatabaseError):
TestObject(id="coin")
def test_check_not_existing_id(self):
with self.assertRaises(IrmaDatabaseError):
TestObject(id="0")
def test_check_insertion_with_fixed_id(self):
t = TestObject()
t.user = "coin"
t.list.append(1)
t.list.append(2)
t.list.append(3)
fixed_id = ObjectId()
t.id = str(fixed_id)
t.update()
t2 = TestObject(id=str(fixed_id))
self.assertEqual(t2.id, str(fixed_id))
self.assertEqual(t2.user, "coin")
self.assertEqual(t2.list, [1, 2, 3])
def test_init_id(self):
fixed_id = str(ObjectId())
with self.assertRaises(IrmaDatabaseError):
TestObject(id=fixed_id)
t = TestObject.init_id(fixed_id)
t.user = "coin"
t.list.append(1)
t.list.append(2)
t.list.append(3)
t.update()
self.assertEqual(t.id, str(fixed_id))
t1 = TestObject.init_id(fixed_id)
self.assertEqual(t1.id, str(fixed_id))
self.assertEqual(t1.user, "coin")
self.assertEqual(t1.list, [1, 2, 3])
def test_update(self):
t = TestObject()
t.user = "coin"
t.list.append(1)
t.list.append(2)
t.list.append(3)
t.update()
self.assertEqual(t.list, [1, 2, 3])
t.update({'user': "bla"})
t1 = TestObject(id=t.id)
self.assertEqual(t1.user, "bla")
def test_remove(self):
t1 = TestObject()
self.assertEqual(self.collection.count(), 1)
t1.remove()
self.assertEqual(self.collection.count(), 0)
def test_get_temp_instance(self):
with self.assertRaises(NotImplementedError):
NoSQLDatabaseObject.get_temp_instance(0)
t1 = TestObject()
temp_id = t1.id
self.assertIsNotNone(TestObject.get_temp_instance(temp_id))
t1.remove()
with self.assertRaises(IrmaDatabaseError):
TestObject.get_temp_instance(temp_id)
def test_find(self):
with self.assertRaises(NotImplementedError):
NoSQLDatabaseObject.find()
self.assertEqual(TestObject.find().count(), 0)
t1 = TestObject()
self.assertEqual(TestObject.find().count(), 1)
def test_instance_to_str(self):
t1 = TestObject()
self.assertIsInstance(t1.__repr__(), str)
self.assertIsInstance(t1.__str__(), str)
if __name__ == '__main__':
enable_logging()
unittest.main()
| hirokihamasaki/irma | common/tests/test_nosqldatabase.py | Python | apache-2.0 | 6,422 |
from django.db.models import Q
from django.db.utils import IntegrityError
from django.test import TestCase, skipIfDBFeature
from django.forms.models import modelform_factory
from .models import (
Address, Place, Restaurant, Link, CharLink, TextLink,
Person, Contact, Note, Organization, OddRelation1, OddRelation2, Company,
Developer, Team, Guild, Tag, Board, HasLinkThing, A, B, C, D)
class GenericRelationTests(TestCase):
def test_inherited_models_content_type(self):
"""
Test that GenericRelations on inherited classes use the correct content
type.
"""
p = Place.objects.create(name="South Park")
r = Restaurant.objects.create(name="Chubby's")
l1 = Link.objects.create(content_object=p)
l2 = Link.objects.create(content_object=r)
self.assertEqual(list(p.links.all()), [l1])
self.assertEqual(list(r.links.all()), [l2])
def test_reverse_relation_pk(self):
"""
Test that the correct column name is used for the primary key on the
originating model of a query. See #12664.
"""
p = Person.objects.create(account=23, name='Chef')
Address.objects.create(street='123 Anywhere Place',
city='Conifer', state='CO',
zipcode='80433', content_object=p)
qs = Person.objects.filter(addresses__zipcode='80433')
self.assertEqual(1, qs.count())
self.assertEqual('Chef', qs[0].name)
def test_charlink_delete(self):
oddrel = OddRelation1.objects.create(name='clink')
CharLink.objects.create(content_object=oddrel)
oddrel.delete()
def test_textlink_delete(self):
oddrel = OddRelation2.objects.create(name='tlink')
TextLink.objects.create(content_object=oddrel)
oddrel.delete()
def test_q_object_or(self):
"""
Tests that SQL query parameters for generic relations are properly
grouped when OR is used.
Test for bug http://code.djangoproject.com/ticket/11535
In this bug the first query (below) works while the second, with the
query parameters the same but in reverse order, does not.
The issue is that the generic relation conditions do not get properly
grouped in parentheses.
"""
note_contact = Contact.objects.create()
org_contact = Contact.objects.create()
Note.objects.create(note='note', content_object=note_contact)
org = Organization.objects.create(name='org name')
org.contacts.add(org_contact)
# search with a non-matching note and a matching org name
qs = Contact.objects.filter(Q(notes__note__icontains=r'other note') |
Q(organizations__name__icontains=r'org name'))
self.assertTrue(org_contact in qs)
# search again, with the same query parameters, in reverse order
qs = Contact.objects.filter(
Q(organizations__name__icontains=r'org name') |
Q(notes__note__icontains=r'other note'))
self.assertTrue(org_contact in qs)
def test_join_reuse(self):
qs = Person.objects.filter(
addresses__street='foo'
).filter(
addresses__street='bar'
)
self.assertEqual(str(qs.query).count('JOIN'), 2)
def test_generic_relation_ordering(self):
"""
Test that ordering over a generic relation does not include extraneous
duplicate results, nor excludes rows not participating in the relation.
"""
p1 = Place.objects.create(name="South Park")
p2 = Place.objects.create(name="The City")
c = Company.objects.create(name="Chubby's Intl.")
Link.objects.create(content_object=p1)
Link.objects.create(content_object=c)
places = list(Place.objects.order_by('links__id'))
def count_places(place):
return len([p for p in places if p.id == place.id])
self.assertEqual(len(places), 2)
self.assertEqual(count_places(p1), 1)
self.assertEqual(count_places(p2), 1)
def test_target_model_is_unsaved(self):
"""Test related to #13085"""
# Fails with another, ORM-level error
dev1 = Developer(name='Joe')
note = Note(note='Deserves promotion', content_object=dev1)
self.assertRaises(IntegrityError, note.save)
def test_target_model_len_zero(self):
"""Test for #13085 -- __len__() returns 0"""
team1 = Team.objects.create(name='Backend devs')
try:
note = Note(note='Deserve a bonus', content_object=team1)
except Exception as e:
if (issubclass(type(e), Exception) and
str(e) == 'Impossible arguments to GFK.get_content_type!'):
self.fail("Saving model with GenericForeignKey to model instance whose "
"__len__ method returns 0 shouldn't fail.")
raise e
note.save()
def test_target_model_nonzero_false(self):
"""Test related to #13085"""
# __nonzero__() returns False -- This actually doesn't currently fail.
# This test validates that
g1 = Guild.objects.create(name='First guild')
note = Note(note='Note for guild', content_object=g1)
note.save()
@skipIfDBFeature('interprets_empty_strings_as_nulls')
def test_gfk_to_model_with_empty_pk(self):
"""Test related to #13085"""
# Saving model with GenericForeignKey to model instance with an
# empty CharField PK
b1 = Board.objects.create(name='')
tag = Tag(label='VP', content_object=b1)
tag.save()
def test_ticket_20378(self):
hs1 = HasLinkThing.objects.create()
hs2 = HasLinkThing.objects.create()
l1 = Link.objects.create(content_object=hs1)
l2 = Link.objects.create(content_object=hs2)
self.assertQuerysetEqual(
HasLinkThing.objects.filter(links=l1),
[hs1], lambda x: x)
self.assertQuerysetEqual(
HasLinkThing.objects.filter(links=l2),
[hs2], lambda x: x)
self.assertQuerysetEqual(
HasLinkThing.objects.exclude(links=l2),
[hs1], lambda x: x)
self.assertQuerysetEqual(
HasLinkThing.objects.exclude(links=l1),
[hs2], lambda x: x)
def test_ticket_20564(self):
b1 = B.objects.create()
b2 = B.objects.create()
b3 = B.objects.create()
c1 = C.objects.create(b=b1)
c2 = C.objects.create(b=b2)
c3 = C.objects.create(b=b3)
A.objects.create(flag=None, content_object=b1)
A.objects.create(flag=True, content_object=b2)
self.assertQuerysetEqual(
C.objects.filter(b__a__flag=None),
[c1, c3], lambda x: x
)
self.assertQuerysetEqual(
C.objects.exclude(b__a__flag=None),
[c2], lambda x: x
)
def test_ticket_20564_nullable_fk(self):
b1 = B.objects.create()
b2 = B.objects.create()
b3 = B.objects.create()
d1 = D.objects.create(b=b1)
d2 = D.objects.create(b=b2)
d3 = D.objects.create(b=b3)
d4 = D.objects.create()
A.objects.create(flag=None, content_object=b1)
A.objects.create(flag=True, content_object=b1)
A.objects.create(flag=True, content_object=b2)
self.assertQuerysetEqual(
D.objects.exclude(b__a__flag=None),
[d2], lambda x: x
)
self.assertQuerysetEqual(
D.objects.filter(b__a__flag=None),
[d1, d3, d4], lambda x: x
)
self.assertQuerysetEqual(
B.objects.filter(a__flag=None),
[b1, b3], lambda x: x
)
self.assertQuerysetEqual(
B.objects.exclude(a__flag=None),
[b2], lambda x: x
)
def test_extra_join_condition(self):
# A crude check that content_type_id is taken in account in the
# join/subquery condition.
self.assertIn("content_type_id", str(B.objects.exclude(a__flag=None).query).lower())
# No need for any joins - the join from inner query can be trimmed in
# this case (but not in the above case as no a objects at all for given
# B would then fail).
self.assertNotIn(" join ", str(B.objects.exclude(a__flag=True).query).lower())
self.assertIn("content_type_id", str(B.objects.exclude(a__flag=True).query).lower())
def test_editable_generic_rel(self):
GenericRelationForm = modelform_factory(HasLinkThing, fields='__all__')
form = GenericRelationForm()
self.assertIn('links', form.fields)
form = GenericRelationForm({'links': None})
self.assertTrue(form.is_valid())
form.save()
links = HasLinkThing._meta.get_field_by_name('links')[0].field
self.assertEqual(links.save_form_data_calls, 1)
| dex4er/django | tests/generic_relations_regress/tests.py | Python | bsd-3-clause | 9,000 |
# coding=utf-8
# This file is part of SickRage.
#
# URL: https://sickrage.github.io
# Git: https://github.com/SickRage/SickRage.git
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from datetime import datetime
from feedparser.util import FeedParserDict
from hachoir_parser import createParser
import sickbeard
from sickbeard import logger
from sickbeard.classes import Proper, TorrentSearchResult
from sickbeard.common import Quality
from sickbeard.db import DBConnection
from sickrage.helper.common import try_int
from sickrage.helper.exceptions import ex
from sickrage.providers.GenericProvider import GenericProvider
from sickrage.show.Show import Show
class TorrentProvider(GenericProvider):
def __init__(self, name):
GenericProvider.__init__(self, name)
self.ratio = None
self.provider_type = GenericProvider.TORRENT
def find_propers(self, search_date=None):
results = []
db = DBConnection()
placeholder = ','.join([str(x) for x in Quality.DOWNLOADED + Quality.SNATCHED + Quality.SNATCHED_BEST])
sql_results = db.select(
'SELECT s.show_name, e.showid, e.season, e.episode, e.status, e.airdate'
' FROM tv_episodes AS e'
' INNER JOIN tv_shows AS s ON (e.showid = s.indexer_id)'
' WHERE e.airdate >= ' + str(search_date.toordinal()) +
' AND e.status IN (' + placeholder + ') and e.is_proper = 0'
)
for result in sql_results or []:
show = Show.find(sickbeard.showList, int(result[b'showid']))
if show:
episode = show.getEpisode(result[b'season'], result[b'episode'])
for term in self.proper_strings:
search_strings = self._get_episode_search_strings(episode, add_string=term)
for item in self.search(search_strings[0]):
title, url = self._get_title_and_url(item)
results.append(Proper(title, url, datetime.today(), show))
return results
def is_active(self):
return bool(sickbeard.USE_TORRENTS) and self.is_enabled()
@property
def _custom_trackers(self):
if not (sickbeard.TRACKERS_LIST and self.public):
return ''
return '&tr=' + '&tr='.join({x.strip() for x in sickbeard.TRACKERS_LIST.split(',') if x.strip()})
def _get_result(self, episodes):
return TorrentSearchResult(episodes)
def _get_size(self, item):
if isinstance(item, dict):
size = item.get('size', -1)
elif isinstance(item, (list, tuple)) and len(item) > 2:
size = item[2]
else:
size = -1
# Make sure we didn't select seeds/leechers by accident
if not size or size < 1024 * 1024:
size = -1
return try_int(size, -1)
def _get_storage_dir(self):
return sickbeard.TORRENT_DIR
def _get_title_and_url(self, item):
if isinstance(item, (dict, FeedParserDict)):
download_url = item.get('url', '')
title = item.get('title', '')
if not download_url:
download_url = item.get('link', '')
elif isinstance(item, (list, tuple)) and len(item) > 1:
download_url = item[1]
title = item[0]
else:
download_url = ''
title = ''
if title.endswith('DIAMOND'):
logger.log('Skipping DIAMOND release for mass fake releases.')
download_url = title = 'FAKERELEASE'
if download_url:
download_url = download_url.replace('&', '&')
if title:
title = title.replace(' ', '.')
return title, download_url
def _verify_download(self, file_name=None):
try:
parser = createParser(file_name)
if parser:
# pylint: disable=protected-access
# Access to a protected member of a client class
mime_type = parser._getMimeType()
try:
parser.stream._input.close()
except Exception:
pass
if mime_type == 'application/x-bittorrent':
return True
except Exception as e:
logger.log('Failed to validate torrent file: {0}'.format(ex(e)), logger.DEBUG)
logger.log('Result is not a valid torrent file', logger.DEBUG)
return False
def seed_ratio(self):
return self.ratio
| Maximilian-Reuter/SickRage-1 | sickrage/providers/torrent/TorrentProvider.py | Python | gpl-3.0 | 5,136 |
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from scipy import *
from scipy.signal import *
import cStringIO
from PIL import Image
import os
import sys
import util.tools
# Represents a dummy network for testing.
class DummyNetwork:
# Network distortion types.
OneToOne = 1
HFlip = 2
VFlip = 3
Flip = 4
def __init__( self, network_type = OneToOne ):
self.network_type = network_type
self.x_in = 0.0
self.y_in = 0.0
def reinitialize( self ):
pass
def setValue( self, name, value ):
if name == 'X':
self.x_in = float(value)
elif name == 'Y':
self.y_in = float(value)
def update( self ):
pass
def getValue( self, name ):
if name == 'XOUT':
ret_val = self.x_in
if self.network_type == DummyNetwork.HFlip:
ret_val = -ret_val
elif name == 'YOUT':
ret_val = self.y_in
if self.network_type == DummyNetwork.VFlip:
ret_val = -ret_val
if self.network_type == DummyNetwork.Flip:
return -ret_val
else:
return ret_val
# Represents one element in a popluation.
class PopulationItem:
def __init__( self ):
self.network = None
self.distorted_image = None
self.icon = None
self.entropy = None
def update_distortion( self, image, network = None ):
if network != None:
self.network = network
if (image != None) and (self.network != None):
self.distorted_image = self.distort( image, self.network )
def update_icon( self ):
if (self.distorted_image != None):
self.icon = QIcon(QPixmap(self.distorted_image))
#@profile
def distort( self, image_map, network ):
# Convert whatever's sent in to the appropriate image format.
image = QImage( image_map )
image.convertToFormat( QImage.Format_RGB32 )
# Extract channel data from the image.
buffer = QBuffer( )
buffer.open( QBuffer.ReadWrite )
image.save( buffer, 'PNG' )
strio = cStringIO.StringIO()
strio.write( buffer.data( ) )
buffer.close( )
strio.seek( 0 )
pil_image = Image.open( strio )
pil_image.load()
image_chan_i = pil_image.split()
image_chan = [
image_chan_i[0].load(),
image_chan_i[1].load(),
image_chan_i[2].load()]
# Create a new image of the same size.
distorted_image = QImage( image.width(), image.height(), QImage.Format_RGB32 )
# Determine normalized coordinates.
x_norm = image.width() / 2.0
y_norm = image.height() / 2.0
# For each pixel, run the network with the destination point to determine
# which pixels to blend.
for y in xrange(image.height()):
for x in xrange(image.width()):
y_norm_in = (y - y_norm) / y_norm
x_norm_in = (x - x_norm) / x_norm
# Evaluate network.
network.reinitialize( )
network.setValue( 'X', x_norm_in )
network.setValue( 'Y', y_norm_in )
network.setValue( 'Bias', .5 )
network.update( )
x_norm_out = network.getValue( 'XOUT' )
y_norm_out = network.getValue( 'YOUT' )
# Determine pixel coordinates and clamp to the image boundaries.
y_out = y_norm_out * y_norm + y_norm
x_out = x_norm_out * x_norm + x_norm
if y_out < 0.0:
y_out = 0.0
elif y_out > image.height() - 1:
y_out = image.height() - 1
if x_out < 0.0:
x_out = 0.0
elif x_out > image.width() - 1:
x_out = image.width() - 1
# Determine row and column pixels and weights.
x_o1 = int(x_out)
x_o2 = x_o1 + 1 if x_o1 < image.width() - 1 else x_o1
y_o1 = int(y_out)
y_o2 = y_o1 + 1 if y_o1 < image.height() - 1 else y_o1
x_w = x_out - x_o1
y_w = y_out - y_o1
# Combine pixels.
p = [0, 0, 0]
for i in xrange(3):
p1 = int(round(
image_chan[i][x_o1, y_o1]*(1-x_w) +
image_chan[i][x_o2, y_o1]*(x_w) ))
p2 = int(round(
image_chan[i][x_o1, y_o2]*(1-x_w) +
image_chan[i][x_o2, y_o2]*(x_w) ))
p[i] = p1*(1-y_w) + p2*(y_w)
# Set value.
distorted_image.setPixel(
x, y, qRgb(p[0], p[1], p[2]) )
distorted_pil_image = util.tools.convertQ2PIL( distorted_image )
self.entropy = self.calculate_entropy( distorted_pil_image )
return distorted_image
def get_distorted_image( self ):
return QVariant() if self.distorted_image == None else self.distorted_image
def get_icon( self ):
return QVariant() if self.icon == None else self.icon
# Determines the perceived brightness of each pixel.
# This is based on http://www.nbdtech.com/Blog/archive/2008/04/27/Calculating-the-Perceived-Brightness-of-a-Color.aspx
# and http://alienryderflex.com/hsp.html
# which apparently gives a better brightness indicator than the W3C method.
#
# @param image Expects a PIL image.
# @return Returns the normalized brightness matrix.
def calculate_entropy( self, image ):
image_array = util.tools.PIL2array( image )
image_array = sqrt(inner((image_array/255.0)**2, [0.241, 0.691, 0.068]))
return (image_array - image_array.mean()) / image_array.std()
# A simple class for handling a population model.
class PopulationModel(QAbstractListModel):
def __init__( self, population_size, parent = None ):
super(PopulationModel, self).__init__( parent )
self.image_number = 0
self.population_size = population_size
self.original_image = None
self.population = [PopulationItem() for x in xrange(self.population_size)]
def set_original_image( self, image ):
self.original_image = image
for i in xrange(self.population_size):
self.update_item( i )
self.update_icon( i )
def update_item( self, index, network = None ):
self.population[index].update_distortion( self.original_image, network )
def update_icon( self, index ):
self.population[index].update_icon()
def image_entropy(self,index):
return self.population[index].entropy
def distort( self, index, image_map ):
return self.population[index].distort(
image_map, self.population[index].network )
def rowCount( self, parent = QModelIndex() ):
return len(self.population)
def correlate_image( self, image_entropy_1, image_entropy_2 ):
return fftconvolve(image_entropy_1, image_entropy_2[::-1,::-1], mode='same').max()
def data( self, index, role ):
if (not index.isValid()) or (index.row() >= self.population_size):
return None
if role == Qt.DecorationRole:
return self.population[index.row()].get_icon( )
elif role == Qt.SizeHintRole:
return QSize(120, 120)
else:
return QVariant()
| anthonytw/neat-deform | src/gui/PopulationModel.py | Python | gpl-3.0 | 7,546 |
import hashlib
from shiftmedia import exceptions as x
from shiftmedia import utils
class PathBuilder:
def __init__(self, secret_key):
"""
Path builder constructor
Initializes path builder service.
:param secret_key: string - secret key from config
"""
self.secret_key = secret_key
def generate_signature(self, id, filename):
"""
Generate signature
Accepts storage id and a filename to generate hash signature.
Signatures are used to prevent brute force attack against
resizing service.
:param id: string - storage id
:param filename: - string, resize filename
:return: string - signature
"""
sign_me = bytes(id + filename + self.secret_key, 'utf-8')
signature = hashlib.md5()
signature.update(sign_me)
return signature.hexdigest()
def validate_signature(self, id, filename):
"""
Validate signature
Accepts storage id and a filename and validates hash signature.
Signatures are used to prevent brute force attack against
resizing service.
:param id: string - storage id
:param filename: - string, resize filename
:return:
"""
parts = filename.split('-')
if len(parts) != 5:
return False
extension = parts[4][parts[4].index('.'):]
non_signed_filename = '-'.join(parts[:4]) + extension
signature = parts[4].replace(extension, '')
return signature == self.generate_signature(id, non_signed_filename)
def get_auto_crop_filename(
self,
id,
size,
factor,
output_format=None,
upscale=True,
quality=65
):
"""
Get auto crop filename
Encodes parameters for automatic cropping/resizing into a filename.
Resulting filename will contain hash signature.
:param id: string - storage id (used to generate signature)
:param size: string - width x height
:param factor: string - crop factor, fit/fill
:param output_format: string - output format
:param upscale: bool - enlarge smaller original
:param quality: string - differs per format. i.e. 0-100 for jpg
:return: string - signed filename
"""
# validate size
err = False
dimensions = size.lower().split('x')
if len(dimensions) != 2:
err = True
for dimension in dimensions:
if not dimension.isdigit() or int(dimension) <= 0:
err = True
if err:
err = 'Invalid size provided must be in 100x200 format'
raise x.InvalidArgumentException(err)
# validate factor
if factor not in ['fit', 'fill']:
err = 'Auto crop factor must be either fit or fill'
raise x.InvalidArgumentException(err)
# validate quality
if not str(quality).isdigit():
err = 'Quality must be numeric'
raise x.InvalidArgumentException(err)
# guess format from original if not specified
if not output_format:
parts = id.split('-')
output_format= parts[5][parts[5].index('.') + 1:]
# prepare upscale
upscale = 'upscale' if bool(upscale) else 'noupscale'
# create filename filename
schema = '{size}-{factor}-{quality}-{upscale}.{format}'
signed_schema = '{size}-{factor}-{quality}-{upscale}-{sig}.{format}'
params = dict(
size=size,
factor=factor,
quality=quality,
upscale=upscale,
format=output_format
)
unsigend_filename = schema.format(**params)
# sign
params['sig'] = self.generate_signature(id, unsigend_filename)
signed_filename = signed_schema.format(**params)
return signed_filename
def get_manual_crop_filename(
self,
id,
sample_size,
target_size,
output_format=None,
upscale=True,
quality=65
):
"""
Get manual crop filename
Encodes parameters for automatic cropping/resizing into a filename.
Resulting filename will contain hash signature.
:param id: string - storage id (used to generate signature)
:param target_size: string - width x height
:param sample_size: string - width x height, must be proportional
:param output_format: string - output format
:param upscale: bool - enlarge smaller original
:param quality: string - differs per format. i.e. 0-100 for jpg
:return: string - signed filename
"""
# validate sample size
err = False
sample_dimensions = sample_size.lower().split('x')
if len(sample_dimensions) != 2:
err = True
for dimension in sample_dimensions:
if not dimension.isdigit() or int(dimension) <= 0:
err = True
if err:
err = 'Invalid sample size provided must be in 100x200 format'
raise x.InvalidArgumentException(err)
# validate target size
err = False
target_dimensions = target_size.lower().split('x')
if len(target_dimensions) != 2:
err = True
for dimension in target_dimensions:
if not dimension.isdigit() or int(dimension) <= 0:
err = True
if err:
err = 'Invalid target size provided must be in 100x200 format'
raise x.InvalidArgumentException(err)
# validate sample and target sizes being proportional
sw = int(sample_dimensions[0])
sh = int(sample_dimensions[1])
tw = int(target_dimensions[0])
th = int(target_dimensions[1])
if (sw/sh) != (tw/th):
err = 'Sample size and target size must be proportional'
raise x.InvalidArgumentException(err)
# validate quality
if not str(quality).isdigit():
err = 'Quality must be numeric'
raise x.InvalidArgumentException(err)
# guess format from original if not specified
if not output_format:
parts = id.split('-')
output_format= parts[5][parts[5].index('.') + 1:]
# prepare upscale
upscale = 'upscale' if bool(upscale) else 'noupscale'
# initial filename
schema = '{target}-{sample}-{quality}-{upscale}.{format}'
signed_schema = '{target}-{sample}-{quality}-{upscale}-{sig}.{format}'
params = dict(
sample=sample_size,
target=target_size,
quality=quality,
upscale=upscale,
format=output_format
)
unsigend_filename = schema.format(**params)
# sign
params['sig'] = self.generate_signature(id, unsigend_filename)
signed_filename = signed_schema.format(**params)
return signed_filename
def filename_to_resize_params(self, id, filename):
"""
Filename to parameters
Parses resize filename to a set of usable parameters. Will perform
filename signature checking and throw an exception if requested
resize filename is malformed.
:param id: string - unique storage id
:param filename: string - resize filename
:return: dict of parameters
"""
# validate signature
if not self.validate_signature(id, filename):
err = 'Unable to parse filename: bad signature'
raise x.InvalidArgumentException(err)
# get parts
parts = filename.split('-')
target_size,sample_size,quality,upscale,rest = parts
target_format = rest[rest.index('.') + 1:]
# detect manual/auto
if sample_size in ['fill', 'fit']:
resize = 'auto'
else:
err = False
sample_size = sample_size.split('x')
for dimension in sample_size:
if not dimension.isdigit() or int(dimension) <= 0:
err = True
break
if err or len(sample_size) != 2:
err = 'Unable to parse filename: bad sample size or crop factor'
raise x.InvalidArgumentException(err)
else:
resize = 'manual'
# validate size
err = False
target_size = target_size.split('x')
for dimension in target_size:
if not dimension.isdigit() or int(dimension) <= 0:
err = True
break
if err or len(target_size) != 2:
err = 'Unable to parse filename: bad target size'
raise x.InvalidArgumentException(err)
# validate quality
if not str(quality).isdigit():
err = 'Quality must be numeric'
raise x.InvalidArgumentException(err)
# prepare upscale
upscale = True if upscale == 'upscale' else False
# prepare result
result = dict(
id=id,
resize_mode=resize,
target_size='x'.join(target_size),
output_format=target_format,
quality=int(quality),
filename=filename,
upscale=upscale
)
if resize == 'auto':
result['factor'] = sample_size
if resize == 'manual':
result['sample_size'] = 'x'.join(sample_size)
return result
| projectshift/shift-media | shiftmedia/paths.py | Python | mit | 9,544 |
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for classes and methods relating to user rights."""
from core.domain import collection_services
from core.domain import exp_domain
from core.domain import exp_services
from core.domain import rights_manager
from core.tests import test_utils
import feconf
class ExplorationRightsTests(test_utils.GenericTestBase):
"""Test that rights for actions on explorations work as expected."""
EXP_ID = 'exp_id'
def setUp(self):
super(ExplorationRightsTests, self).setUp()
self.signup('[email protected]', 'A')
self.signup('[email protected]', 'B')
self.signup('[email protected]', 'C')
self.signup('[email protected]', 'D')
self.signup('[email protected]', 'E')
self.signup(self.ADMIN_EMAIL, username=self.ADMIN_USERNAME)
self.user_id_a = self.get_user_id_from_email('[email protected]')
self.user_id_b = self.get_user_id_from_email('[email protected]')
self.user_id_c = self.get_user_id_from_email('[email protected]')
self.user_id_d = self.get_user_id_from_email('[email protected]')
self.user_id_e = self.get_user_id_from_email('[email protected]')
self.user_id_admin = self.get_user_id_from_email(self.ADMIN_EMAIL)
self.set_admins([self.ADMIN_USERNAME])
def test_get_exploration_rights_for_nonexistent_exploration(self):
non_exp_id = 'this_exp_does_not_exist_id'
with self.assertRaisesRegexp(
Exception,
'Entity for class ExplorationRightsModel with id '
'this_exp_does_not_exist_id not found'
):
rights_manager.get_exploration_rights(non_exp_id)
self.assertIsNone(
rights_manager.get_exploration_rights(non_exp_id, strict=False))
def test_demo_exploration(self):
exp_services.load_demo('1')
rights_manager.release_ownership_of_exploration(
feconf.SYSTEM_COMMITTER_ID, '1')
self.assertTrue(
rights_manager.Actor(self.user_id_a).can_play(
feconf.ACTIVITY_TYPE_EXPLORATION, '1'))
self.assertTrue(
rights_manager.Actor(self.user_id_a).can_view(
feconf.ACTIVITY_TYPE_EXPLORATION, '1'))
self.assertTrue(
rights_manager.Actor(self.user_id_a).can_edit(
feconf.ACTIVITY_TYPE_EXPLORATION, '1'))
self.assertFalse(
rights_manager.Actor(self.user_id_a).can_delete(
feconf.ACTIVITY_TYPE_EXPLORATION, '1'))
self.assertTrue(
rights_manager.Actor(self.user_id_admin).can_play(
feconf.ACTIVITY_TYPE_EXPLORATION, '1'))
self.assertTrue(
rights_manager.Actor(self.user_id_admin).can_view(
feconf.ACTIVITY_TYPE_EXPLORATION, '1'))
self.assertTrue(
rights_manager.Actor(self.user_id_admin).can_edit(
feconf.ACTIVITY_TYPE_EXPLORATION, '1'))
self.assertTrue(
rights_manager.Actor(self.user_id_admin).can_delete(
feconf.ACTIVITY_TYPE_EXPLORATION, '1'))
def test_non_splash_page_demo_exploration(self):
# Note: there is no difference between permissions for demo
# explorations, whether or not they are on the splash page.
exp_services.load_demo('3')
rights_manager.release_ownership_of_exploration(
feconf.SYSTEM_COMMITTER_ID, '3')
self.assertTrue(
rights_manager.Actor(self.user_id_a).can_play(
feconf.ACTIVITY_TYPE_EXPLORATION, '3'))
self.assertTrue(
rights_manager.Actor(self.user_id_a).can_view(
feconf.ACTIVITY_TYPE_EXPLORATION, '3'))
self.assertTrue(rights_manager.Actor(
self.user_id_a).can_edit(
feconf.ACTIVITY_TYPE_EXPLORATION, '3'))
self.assertFalse(rights_manager.Actor(
self.user_id_a).can_delete(
feconf.ACTIVITY_TYPE_EXPLORATION, '3'))
self.assertTrue(
rights_manager.Actor(self.user_id_admin).can_play(
feconf.ACTIVITY_TYPE_EXPLORATION, '3'))
self.assertTrue(
rights_manager.Actor(self.user_id_admin).can_view(
feconf.ACTIVITY_TYPE_EXPLORATION, '3'))
self.assertTrue(rights_manager.Actor(
self.user_id_admin).can_edit(
feconf.ACTIVITY_TYPE_EXPLORATION, '3'))
self.assertTrue(
rights_manager.Actor(self.user_id_admin).can_delete(
feconf.ACTIVITY_TYPE_EXPLORATION, '3'))
def test_ownership_of_exploration(self):
exp = exp_domain.Exploration.create_default_exploration(self.EXP_ID)
exp_services.save_new_exploration(self.user_id_a, exp)
rights_manager.assign_role_for_exploration(
self.user_id_a, self.EXP_ID, self.user_id_b,
rights_manager.ROLE_EDITOR)
self.assertTrue(
rights_manager.Actor(self.user_id_a).is_owner(
feconf.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_b).is_owner(
feconf.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_admin).is_owner(
feconf.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
def test_newly_created_exploration(self):
exp = exp_domain.Exploration.create_default_exploration(self.EXP_ID)
exp_services.save_new_exploration(self.user_id_a, exp)
self.assertTrue(
rights_manager.Actor(self.user_id_a).can_play(
feconf.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
self.assertTrue(
rights_manager.Actor(self.user_id_a).can_view(
feconf.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
self.assertTrue(
rights_manager.Actor(self.user_id_a).can_edit(
feconf.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
self.assertTrue(
rights_manager.Actor(self.user_id_a).can_delete(
feconf.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
self.assertTrue(
rights_manager.Actor(self.user_id_admin).can_play(
feconf.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
self.assertTrue(
rights_manager.Actor(self.user_id_admin).can_view(
feconf.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_admin).can_edit(
feconf.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_admin).can_delete(
feconf.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_play(
feconf.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_view(
feconf.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_edit(
feconf.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_delete(
feconf.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
def test_inviting_collaborator_to_exploration(self):
exp = exp_domain.Exploration.create_default_exploration(self.EXP_ID)
exp_services.save_new_exploration(self.user_id_a, exp)
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_play(
feconf.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_view(
feconf.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_edit(
feconf.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_delete(
feconf.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
rights_manager.assign_role_for_exploration(
self.user_id_a, self.EXP_ID, self.user_id_b,
rights_manager.ROLE_EDITOR)
self.assertTrue(
rights_manager.Actor(self.user_id_b).can_play(
feconf.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
self.assertTrue(
rights_manager.Actor(self.user_id_b).can_view(
feconf.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
self.assertTrue(
rights_manager.Actor(self.user_id_b).can_edit(
feconf.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_delete(
feconf.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
def test_inviting_playtester_to_exploration(self):
exp = exp_domain.Exploration.create_default_exploration(self.EXP_ID)
exp_services.save_new_exploration(self.user_id_a, exp)
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_play(
feconf.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_view(
feconf.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_edit(
feconf.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_delete(
feconf.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
rights_manager.assign_role_for_exploration(
self.user_id_a, self.EXP_ID, self.user_id_b,
rights_manager.ROLE_VIEWER)
self.assertTrue(
rights_manager.Actor(self.user_id_b).can_play(
feconf.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
self.assertTrue(
rights_manager.Actor(self.user_id_b).can_view(
feconf.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_edit(
feconf.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_delete(
feconf.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
def test_setting_rights_of_exploration(self):
exp = exp_domain.Exploration.create_default_exploration(self.EXP_ID)
exp_services.save_new_exploration(self.user_id_a, exp)
rights_manager.assign_role_for_exploration(
self.user_id_a, self.EXP_ID, self.user_id_b,
rights_manager.ROLE_VIEWER)
with self.assertRaisesRegexp(Exception, 'Could not assign new role.'):
rights_manager.assign_role_for_exploration(
self.user_id_b, self.EXP_ID, self.user_id_c,
rights_manager.ROLE_VIEWER)
rights_manager.assign_role_for_exploration(
self.user_id_a, self.EXP_ID, self.user_id_b,
rights_manager.ROLE_EDITOR)
with self.assertRaisesRegexp(Exception, 'Could not assign new role.'):
rights_manager.assign_role_for_exploration(
self.user_id_b, self.EXP_ID, self.user_id_c,
rights_manager.ROLE_VIEWER)
rights_manager.assign_role_for_exploration(
self.user_id_a, self.EXP_ID, self.user_id_b,
rights_manager.ROLE_OWNER)
rights_manager.assign_role_for_exploration(
self.user_id_b, self.EXP_ID, self.user_id_c,
rights_manager.ROLE_OWNER)
rights_manager.assign_role_for_exploration(
self.user_id_b, self.EXP_ID, self.user_id_d,
rights_manager.ROLE_EDITOR)
rights_manager.assign_role_for_exploration(
self.user_id_b, self.EXP_ID, self.user_id_e,
rights_manager.ROLE_VIEWER)
def test_publishing_and_unpublishing_exploration(self):
exp = exp_domain.Exploration.create_default_exploration(
self.EXP_ID, title='A title', category='A category')
exp_services.save_new_exploration(self.user_id_a, exp)
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_play(
feconf.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_view(
feconf.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
rights_manager.publish_exploration(self.user_id_a, self.EXP_ID)
self.assertTrue(
rights_manager.Actor(self.user_id_b).can_play(
feconf.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
self.assertTrue(
rights_manager.Actor(self.user_id_b).can_view(
feconf.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_a).can_unpublish(
feconf.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
rights_manager.unpublish_exploration(self.user_id_admin, self.EXP_ID)
self.assertTrue(
rights_manager.Actor(self.user_id_a).can_play(
feconf.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
self.assertTrue(
rights_manager.Actor(self.user_id_a).can_view(
feconf.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_play(
feconf.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_view(
feconf.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
def test_can_only_delete_unpublished_explorations(self):
exp = exp_domain.Exploration.create_default_exploration(
self.EXP_ID, title='A title', category='A category')
exp_services.save_new_exploration(self.user_id_a, exp)
self.assertTrue(
rights_manager.Actor(self.user_id_a).can_delete(
feconf.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
rights_manager.publish_exploration(self.user_id_a, self.EXP_ID)
self.assertFalse(
rights_manager.Actor(self.user_id_a).can_delete(
feconf.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
rights_manager.unpublish_exploration(self.user_id_admin, self.EXP_ID)
self.assertTrue(
rights_manager.Actor(self.user_id_a).can_delete(
feconf.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
def test_can_publicize_exploration(self):
exp = exp_domain.Exploration.create_default_exploration(
self.EXP_ID, title='A title', category='A category')
exp_services.save_new_exploration(self.user_id_a, exp)
rights_manager.publish_exploration(self.user_id_a, self.EXP_ID)
self.assertFalse(
rights_manager.Actor(self.user_id_a).can_publicize(
feconf.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
self.assertTrue(
rights_manager.Actor(self.user_id_admin).can_publicize(
feconf.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
def test_changing_viewability_of_exploration(self):
exp = exp_domain.Exploration.create_default_exploration(
self.EXP_ID, title='A title', category='A category')
exp_services.save_new_exploration(self.user_id_a, exp)
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_view(
feconf.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
self.assertTrue(rights_manager.Actor(
self.user_id_a).can_change_private_viewability(
feconf.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
self.assertFalse(rights_manager.Actor(
self.user_id_b).can_change_private_viewability(
feconf.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
self.assertTrue(rights_manager.Actor(
self.user_id_admin).can_change_private_viewability(
feconf.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
with self.assertRaisesRegexp(Exception, 'already the current value'):
rights_manager.set_private_viewability_of_exploration(
self.user_id_a, self.EXP_ID, False)
with self.assertRaisesRegexp(Exception, 'cannot be changed'):
rights_manager.set_private_viewability_of_exploration(
self.user_id_b, self.EXP_ID, True)
rights_manager.set_private_viewability_of_exploration(
self.user_id_a, self.EXP_ID, True)
self.assertTrue(
rights_manager.Actor(self.user_id_a).can_view(
feconf.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
self.assertTrue(
rights_manager.Actor(self.user_id_b).can_view(
feconf.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
rights_manager.set_private_viewability_of_exploration(
self.user_id_a, self.EXP_ID, False)
self.assertTrue(
rights_manager.Actor(self.user_id_a).can_view(
feconf.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_view(
feconf.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
rights_manager.publish_exploration(self.user_id_a, self.EXP_ID)
self.assertFalse(rights_manager.Actor(
self.user_id_a).can_change_private_viewability(
feconf.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
rights_manager.unpublish_exploration(self.user_id_admin, self.EXP_ID)
self.assertTrue(rights_manager.Actor(
self.user_id_a).can_change_private_viewability(
feconf.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
self.assertFalse(rights_manager.Actor(
self.user_id_b).can_change_private_viewability(
feconf.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
self.assertTrue(rights_manager.Actor(
self.user_id_admin).can_change_private_viewability(
feconf.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
class CollectionRightsTests(test_utils.GenericTestBase):
"""Test that rights for actions on collections work as expected."""
COLLECTION_ID = 'collection_id'
EXP_ID_FOR_COLLECTION = 'exp_id_for_collection'
def setUp(self):
super(CollectionRightsTests, self).setUp()
self.signup('[email protected]', 'A')
self.signup('[email protected]', 'B')
self.signup('[email protected]', 'C')
self.signup('[email protected]', 'D')
self.signup('[email protected]', 'E')
self.signup(self.ADMIN_EMAIL, username=self.ADMIN_USERNAME)
self.user_id_a = self.get_user_id_from_email('[email protected]')
self.user_id_b = self.get_user_id_from_email('[email protected]')
self.user_id_c = self.get_user_id_from_email('[email protected]')
self.user_id_d = self.get_user_id_from_email('[email protected]')
self.user_id_e = self.get_user_id_from_email('[email protected]')
self.user_id_admin = self.get_user_id_from_email(self.ADMIN_EMAIL)
self.set_admins([self.ADMIN_USERNAME])
def test_get_collection_rights_for_nonexistent_collection(self):
non_col_id = 'this_collection_does_not_exist_id'
with self.assertRaisesRegexp(
Exception,
'Entity for class CollectionRightsModel with id '
'this_collection_does_not_exist_id not found'
):
rights_manager.get_collection_rights(non_col_id)
self.assertIsNone(
rights_manager.get_collection_rights(non_col_id, strict=False))
def test_demo_collection(self):
collection_services.load_demo('0')
rights_manager.release_ownership_of_collection(
feconf.SYSTEM_COMMITTER_ID, '0')
self.assertTrue(
rights_manager.Actor(self.user_id_a).can_play(
feconf.ACTIVITY_TYPE_COLLECTION, '0'))
self.assertTrue(
rights_manager.Actor(self.user_id_a).can_view(
feconf.ACTIVITY_TYPE_COLLECTION, '0'))
self.assertTrue(
rights_manager.Actor(self.user_id_a).can_edit(
feconf.ACTIVITY_TYPE_COLLECTION, '0'))
self.assertFalse(
rights_manager.Actor(self.user_id_a).can_delete(
feconf.ACTIVITY_TYPE_COLLECTION, '0'))
self.assertTrue(
rights_manager.Actor(self.user_id_admin).can_play(
feconf.ACTIVITY_TYPE_COLLECTION, '0'))
self.assertTrue(
rights_manager.Actor(self.user_id_admin).can_view(
feconf.ACTIVITY_TYPE_COLLECTION, '0'))
self.assertTrue(
rights_manager.Actor(self.user_id_admin).can_edit(
feconf.ACTIVITY_TYPE_COLLECTION, '0'))
self.assertTrue(
rights_manager.Actor(self.user_id_admin).can_delete(
feconf.ACTIVITY_TYPE_COLLECTION, '0'))
def test_ownership_of_collection(self):
self.save_new_default_collection(self.COLLECTION_ID, self.user_id_a)
rights_manager.assign_role_for_collection(
self.user_id_a, self.COLLECTION_ID, self.user_id_b,
rights_manager.ROLE_EDITOR)
self.assertListEqual(['A'],
rights_manager.get_collection_owner_names(
self.COLLECTION_ID))
self.assertTrue(
rights_manager.Actor(self.user_id_a).is_owner(
feconf.ACTIVITY_TYPE_COLLECTION, self.COLLECTION_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_b).is_owner(
feconf.ACTIVITY_TYPE_COLLECTION, self.COLLECTION_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_admin).is_owner(
feconf.ACTIVITY_TYPE_COLLECTION, self.COLLECTION_ID))
def test_newly_created_collection(self):
self.save_new_default_collection(self.COLLECTION_ID, self.user_id_a)
self.assertListEqual(['A'],
rights_manager.get_collection_owner_names(
self.COLLECTION_ID))
self.assertTrue(
rights_manager.Actor(self.user_id_a).can_play(
feconf.ACTIVITY_TYPE_COLLECTION, self.COLLECTION_ID))
self.assertTrue(
rights_manager.Actor(self.user_id_a).can_view(
feconf.ACTIVITY_TYPE_COLLECTION, self.COLLECTION_ID))
self.assertTrue(
rights_manager.Actor(self.user_id_a).can_edit(
feconf.ACTIVITY_TYPE_COLLECTION, self.COLLECTION_ID))
self.assertTrue(
rights_manager.Actor(self.user_id_a).can_delete(
feconf.ACTIVITY_TYPE_COLLECTION, self.COLLECTION_ID))
self.assertTrue(
rights_manager.Actor(self.user_id_admin).can_play(
feconf.ACTIVITY_TYPE_COLLECTION, self.COLLECTION_ID))
self.assertTrue(
rights_manager.Actor(self.user_id_admin).can_view(
feconf.ACTIVITY_TYPE_COLLECTION, self.COLLECTION_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_admin).can_edit(
feconf.ACTIVITY_TYPE_COLLECTION, self.COLLECTION_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_admin).can_delete(
feconf.ACTIVITY_TYPE_COLLECTION, self.COLLECTION_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_play(
feconf.ACTIVITY_TYPE_COLLECTION, self.COLLECTION_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_view(
feconf.ACTIVITY_TYPE_COLLECTION, self.COLLECTION_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_edit(
feconf.ACTIVITY_TYPE_COLLECTION, self.COLLECTION_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_delete(
feconf.ACTIVITY_TYPE_COLLECTION, self.COLLECTION_ID))
def test_inviting_collaborator_to_collection(self):
self.save_new_valid_collection(
self.COLLECTION_ID, self.user_id_a,
exploration_id=self.EXP_ID_FOR_COLLECTION)
# Verify initial editor permissions for the collection.
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_play(
feconf.ACTIVITY_TYPE_COLLECTION, self.COLLECTION_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_view(
feconf.ACTIVITY_TYPE_COLLECTION, self.COLLECTION_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_edit(
feconf.ACTIVITY_TYPE_COLLECTION, self.COLLECTION_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_delete(
feconf.ACTIVITY_TYPE_COLLECTION, self.COLLECTION_ID))
# Verify initial editor permissions for the exploration within the
# collection.
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_play(
feconf.ACTIVITY_TYPE_EXPLORATION,
self.EXP_ID_FOR_COLLECTION))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_view(
feconf.ACTIVITY_TYPE_EXPLORATION,
self.EXP_ID_FOR_COLLECTION))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_edit(
feconf.ACTIVITY_TYPE_EXPLORATION,
self.EXP_ID_FOR_COLLECTION))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_delete(
feconf.ACTIVITY_TYPE_EXPLORATION,
self.EXP_ID_FOR_COLLECTION))
# User A adds user B to the collection as an editor.
rights_manager.assign_role_for_collection(
self.user_id_a, self.COLLECTION_ID, self.user_id_b,
rights_manager.ROLE_EDITOR)
# Ensure User A is the only user in the owner names list.
self.assertListEqual(['A'],
rights_manager.get_collection_owner_names(
self.COLLECTION_ID))
# Ensure User B is now an editor of the collection.
self.assertTrue(
rights_manager.Actor(self.user_id_b).can_play(
feconf.ACTIVITY_TYPE_COLLECTION, self.COLLECTION_ID))
self.assertTrue(
rights_manager.Actor(self.user_id_b).can_view(
feconf.ACTIVITY_TYPE_COLLECTION, self.COLLECTION_ID))
self.assertTrue(
rights_manager.Actor(self.user_id_b).can_edit(
feconf.ACTIVITY_TYPE_COLLECTION, self.COLLECTION_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_delete(
feconf.ACTIVITY_TYPE_COLLECTION, self.COLLECTION_ID))
# Ensure User B is not an editor of the exploration within the
# collection.
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_play(
feconf.ACTIVITY_TYPE_EXPLORATION,
self.EXP_ID_FOR_COLLECTION))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_view(
feconf.ACTIVITY_TYPE_EXPLORATION,
self.EXP_ID_FOR_COLLECTION))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_edit(
feconf.ACTIVITY_TYPE_EXPLORATION,
self.EXP_ID_FOR_COLLECTION))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_delete(
feconf.ACTIVITY_TYPE_EXPLORATION,
self.EXP_ID_FOR_COLLECTION))
def test_inviting_playtester_to_collection(self):
self.save_new_valid_collection(
self.COLLECTION_ID, self.user_id_a,
exploration_id=self.EXP_ID_FOR_COLLECTION)
# Verify initial viewer permissions for the collection.
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_play(
feconf.ACTIVITY_TYPE_COLLECTION, self.COLLECTION_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_view(
feconf.ACTIVITY_TYPE_COLLECTION, self.COLLECTION_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_edit(
feconf.ACTIVITY_TYPE_COLLECTION, self.COLLECTION_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_delete(
feconf.ACTIVITY_TYPE_COLLECTION, self.COLLECTION_ID))
# Verify initial viewer permissions for the exploration within the
# collection.
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_play(
feconf.ACTIVITY_TYPE_EXPLORATION,
self.EXP_ID_FOR_COLLECTION))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_view(
feconf.ACTIVITY_TYPE_EXPLORATION,
self.EXP_ID_FOR_COLLECTION))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_edit(
feconf.ACTIVITY_TYPE_EXPLORATION,
self.EXP_ID_FOR_COLLECTION))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_delete(
feconf.ACTIVITY_TYPE_EXPLORATION,
self.EXP_ID_FOR_COLLECTION))
# User A adds user B to the collection as a viewer.
rights_manager.assign_role_for_collection(
self.user_id_a, self.COLLECTION_ID, self.user_id_b,
rights_manager.ROLE_VIEWER)
# Ensure User B is now a viewer of the collection.
self.assertTrue(
rights_manager.Actor(self.user_id_b).can_play(
feconf.ACTIVITY_TYPE_COLLECTION, self.COLLECTION_ID))
self.assertTrue(
rights_manager.Actor(self.user_id_b).can_view(
feconf.ACTIVITY_TYPE_COLLECTION, self.COLLECTION_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_edit(
feconf.ACTIVITY_TYPE_COLLECTION, self.COLLECTION_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_delete(
feconf.ACTIVITY_TYPE_COLLECTION, self.COLLECTION_ID))
# Ensure User B cannot view the exploration just because he/she has
# access to the collection containing it.
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_play(
feconf.ACTIVITY_TYPE_EXPLORATION,
self.EXP_ID_FOR_COLLECTION))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_view(
feconf.ACTIVITY_TYPE_EXPLORATION,
self.EXP_ID_FOR_COLLECTION))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_edit(
feconf.ACTIVITY_TYPE_EXPLORATION,
self.EXP_ID_FOR_COLLECTION))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_delete(
feconf.ACTIVITY_TYPE_EXPLORATION,
self.EXP_ID_FOR_COLLECTION))
def test_setting_rights_of_collection(self):
self.save_new_default_collection(self.COLLECTION_ID, self.user_id_a)
rights_manager.assign_role_for_collection(
self.user_id_a, self.COLLECTION_ID, self.user_id_b,
rights_manager.ROLE_VIEWER)
with self.assertRaisesRegexp(Exception, 'Could not assign new role.'):
rights_manager.assign_role_for_collection(
self.user_id_b, self.COLLECTION_ID, self.user_id_c,
rights_manager.ROLE_VIEWER)
rights_manager.assign_role_for_collection(
self.user_id_a, self.COLLECTION_ID, self.user_id_b,
rights_manager.ROLE_EDITOR)
with self.assertRaisesRegexp(Exception, 'Could not assign new role.'):
rights_manager.assign_role_for_collection(
self.user_id_b, self.COLLECTION_ID, self.user_id_c,
rights_manager.ROLE_VIEWER)
rights_manager.assign_role_for_collection(
self.user_id_a, self.COLLECTION_ID, self.user_id_b,
rights_manager.ROLE_OWNER)
rights_manager.assign_role_for_collection(
self.user_id_b, self.COLLECTION_ID, self.user_id_c,
rights_manager.ROLE_OWNER)
rights_manager.assign_role_for_collection(
self.user_id_b, self.COLLECTION_ID, self.user_id_d,
rights_manager.ROLE_EDITOR)
rights_manager.assign_role_for_collection(
self.user_id_b, self.COLLECTION_ID, self.user_id_e,
rights_manager.ROLE_VIEWER)
def test_publishing_and_unpublishing_collection(self):
self.save_new_default_collection(self.COLLECTION_ID, self.user_id_a)
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_play(
feconf.ACTIVITY_TYPE_COLLECTION, self.COLLECTION_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_view(
feconf.ACTIVITY_TYPE_COLLECTION, self.COLLECTION_ID))
rights_manager.publish_collection(self.user_id_a, self.COLLECTION_ID)
self.assertTrue(
rights_manager.Actor(self.user_id_b).can_play(
feconf.ACTIVITY_TYPE_COLLECTION, self.COLLECTION_ID))
self.assertTrue(
rights_manager.Actor(self.user_id_b).can_view(
feconf.ACTIVITY_TYPE_COLLECTION, self.COLLECTION_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_a).can_unpublish(
feconf.ACTIVITY_TYPE_COLLECTION, self.COLLECTION_ID))
rights_manager.unpublish_collection(
self.user_id_admin, self.COLLECTION_ID)
self.assertTrue(
rights_manager.Actor(self.user_id_a).can_play(
feconf.ACTIVITY_TYPE_COLLECTION, self.COLLECTION_ID))
self.assertTrue(
rights_manager.Actor(self.user_id_a).can_view(
feconf.ACTIVITY_TYPE_COLLECTION, self.COLLECTION_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_play(
feconf.ACTIVITY_TYPE_COLLECTION, self.COLLECTION_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_view(
feconf.ACTIVITY_TYPE_COLLECTION, self.COLLECTION_ID))
def test_can_only_delete_unpublished_collections(self):
self.save_new_default_collection(self.COLLECTION_ID, self.user_id_a)
self.assertTrue(
rights_manager.Actor(self.user_id_a).can_delete(
feconf.ACTIVITY_TYPE_COLLECTION, self.COLLECTION_ID))
rights_manager.publish_collection(self.user_id_a, self.COLLECTION_ID)
self.assertFalse(
rights_manager.Actor(self.user_id_a).can_delete(
feconf.ACTIVITY_TYPE_COLLECTION, self.COLLECTION_ID))
rights_manager.unpublish_collection(
self.user_id_admin, self.COLLECTION_ID)
self.assertTrue(
rights_manager.Actor(self.user_id_a).can_delete(
feconf.ACTIVITY_TYPE_COLLECTION, self.COLLECTION_ID))
def test_can_publicize_collection(self):
self.save_new_default_collection(self.COLLECTION_ID, self.user_id_a)
rights_manager.publish_collection(self.user_id_a, self.COLLECTION_ID)
self.assertFalse(
rights_manager.Actor(self.user_id_a).can_publicize(
feconf.ACTIVITY_TYPE_COLLECTION, self.COLLECTION_ID))
self.assertTrue(
rights_manager.Actor(self.user_id_admin).can_publicize(
feconf.ACTIVITY_TYPE_COLLECTION, self.COLLECTION_ID))
| amgowano/oppia | core/domain/rights_manager_test.py | Python | apache-2.0 | 36,355 |
from smtplib import SMTPException
import logging
from django.core.mail import send_mail
from django.conf import settings
logger = logging.getLogger(__name__)
def send_email(user_objects_to_send_to, subject, message):
if not settings.DEVILRY_SEND_EMAIL_TO_USERS:
return
message += "\n\n--\n"
message += settings.DEVILRY_EMAIL_SIGNATURE
emails = []
for u in user_objects_to_send_to:
if u.email == None or u.email.strip() == '':
errmsg = "User {0} has no email address.".format(u.username)
logger.error(errmsg)
else:
emails.append(u.email)
subject = settings.EMAIL_SUBJECT_PREFIX + subject
try:
send_mail(subject, message, settings.DEVILRY_EMAIL_DEFAULT_FROM,
emails, fail_silently=False)
except SMTPException, e:
errormsg = ('SMTPException when sending email to users {users} on addresses {emails}. '
'Exception: {exception}'.format(users = ','.join([u.username for u in user_objects_to_send_to]),
exception = e))
logger.error(errormsg)
else:
if settings.DEBUG:
logger.debug('Email sent to: {emails}\nSubject: {subject}\n'
'Body:\n{message}'.format(emails = ','.join(emails),
subject = subject,
message = message))
| vegarang/devilry-django | devilry/utils/devilry_email.py | Python | bsd-3-clause | 1,476 |
import datetime
import pytest
from pretix.base.models import Event, EventPermission, Organizer, User
@pytest.fixture
def env(client):
orga = Organizer.objects.create(name='CCC', slug='ccc')
event = Event.objects.create(
organizer=orga, name='30C3', slug='30c3',
date_from=datetime.datetime(2013, 12, 26, tzinfo=datetime.timezone.utc),
plugins='pretix.plugins.stripe',
live=True
)
event.settings.set('attendee_names_asked', False)
event.settings.set('payment_stripe__enabled', True)
user = User.objects.create_user('[email protected]', 'dummy')
EventPermission.objects.create(user=user, event=event, can_change_settings=True)
client.force_login(user)
return client, event
@pytest.mark.django_db
def test_settings(env):
client, event = env
response = client.get('/control/event/%s/%s/settings/payment' % (event.organizer.slug, event.slug), follow=True)
assert response.status_code == 200
assert 'stripe__enabled' in response.rendered_content
| Flamacue/pretix | src/tests/plugins/stripe/test_settings.py | Python | apache-2.0 | 1,030 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Event.in_menus'
db.add_column(u'calendar_event', 'in_menus',
self.gf('mezzanine.pages.fields.MenusField')(default=(1, 2, 3, 4, 5), max_length=100, null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Event.in_menus'
db.delete_column(u'calendar_event', 'in_menus')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'calendar.dummytable': {
'Meta': {'object_name': 'DummyTable'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'calendar.event': {
'Meta': {'object_name': 'Event'},
'_meta_title': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'events'", 'to': u"orm['sfpirgapp.Category']"}),
'content': ('mezzanine.core.fields.RichTextField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'end': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'expiry_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'featured_image': ('mezzanine.core.fields.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'gen_description': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_menus': ('mezzanine.pages.fields.MenusField', [], {'default': '(1, 2, 3, 4, 5)', 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'in_sitemap': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'keywords': ('mezzanine.generic.fields.KeywordsField', [], {'object_id_field': "'object_pk'", 'to': u"orm['generic.AssignedKeyword']", 'frozen_by_south': 'True'}),
'keywords_string': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'short_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'start': ('django.db.models.fields.DateTimeField', [], {}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'theme_color': ('django.db.models.fields.CharField', [], {'default': "'grey'", 'max_length': '255'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['calendar.EventType']", 'null': 'True', 'blank': 'True'}),
'zip_import': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
},
u'calendar.eventimage': {
'Meta': {'ordering': "('_order',)", 'object_name': 'EventImage'},
'_order': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'blank': 'True'}),
'file': ('mezzanine.core.fields.FileField', [], {'max_length': '200'}),
'gallery': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'images'", 'to': u"orm['calendar.Event']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'calendar.eventtype': {
'Meta': {'ordering': "['name']", 'object_name': 'EventType'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'generic.assignedkeyword': {
'Meta': {'ordering': "('_order',)", 'object_name': 'AssignedKeyword'},
'_order': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'keyword': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'assignments'", 'to': u"orm['generic.Keyword']"}),
'object_pk': ('django.db.models.fields.IntegerField', [], {})
},
u'generic.keyword': {
'Meta': {'object_name': 'Keyword'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
u'sfpirgapp.category': {
'Meta': {'ordering': "('_order',)", 'object_name': 'Category'},
'_meta_title': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'_order': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'content': ('mezzanine.core.fields.RichTextField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'expiry_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'featured_image': ('sfpirgapp.fields.MyImageField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'gen_description': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_sitemap': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'keywords': ('mezzanine.generic.fields.KeywordsField', [], {'object_id_field': "'object_pk'", 'to': u"orm['generic.AssignedKeyword']", 'frozen_by_south': 'True'}),
'keywords_string': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'short_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'theme_color': ('django.db.models.fields.CharField', [], {'default': "'grey'", 'max_length': '255'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'titles': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'categorys'", 'to': u"orm['auth.User']"})
},
u'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['calendar'] | orlenko/sfpirg | mezzanine/calendar/migrations/0005_auto__add_field_event_in_menus.py | Python | bsd-2-clause | 11,513 |
# Copyright (c) 2013 Nicolas Dandrimont <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import sys
from functools import wraps
from rply import ParserGenerator
from hy.models.complex import HyComplex
from hy.models.cons import HyCons
from hy.models.dict import HyDict
from hy.models.expression import HyExpression
from hy.models.float import HyFloat
from hy.models.integer import HyInteger
from hy.models.keyword import HyKeyword
from hy.models.list import HyList
from hy.models.set import HySet
from hy.models.string import HyString
from hy.models.symbol import HySymbol
from .lexer import lexer
from .exceptions import LexException, PrematureEndOfInput
pg = ParserGenerator(
[rule.name for rule in lexer.rules] + ['$end'],
cache_id="hy_parser"
)
def set_boundaries(fun):
@wraps(fun)
def wrapped(p):
start = p[0].source_pos
end = p[-1].source_pos
ret = fun(p)
ret.start_line = start.lineno
ret.start_column = start.colno
if start is not end:
ret.end_line = end.lineno
ret.end_column = end.colno
else:
ret.end_line = start.lineno
ret.end_column = start.colno + len(p[0].value)
return ret
return wrapped
def set_quote_boundaries(fun):
@wraps(fun)
def wrapped(p):
start = p[0].source_pos
ret = fun(p)
ret.start_line = start.lineno
ret.start_column = start.colno
ret.end_line = p[-1].end_line
ret.end_column = p[-1].end_column
return ret
return wrapped
@pg.production("main : HASHBANG real_main")
def main_hashbang(p):
return p[1]
@pg.production("main : real_main")
def main(p):
return p[0]
@pg.production("real_main : list_contents")
def real_main(p):
return p[0]
@pg.production("real_main : $end")
def real_main_empty(p):
return []
def reject_spurious_dots(*items):
"Reject the spurious dots from items"
for list in items:
for tok in list:
if tok == "." and type(tok) == HySymbol:
raise LexException("Malformed dotted list",
tok.start_line, tok.start_column)
@pg.production("paren : LPAREN list_contents RPAREN")
@set_boundaries
def paren(p):
cont = p[1]
# Dotted lists are expressions of the form
# (a b c . d)
# that evaluate to nested cons cells of the form
# (a . (b . (c . d)))
if len(cont) >= 3 and isinstance(cont[-2], HySymbol) and cont[-2] == ".":
reject_spurious_dots(cont[:-2], cont[-1:])
if len(cont) == 3:
# Two-item dotted list: return the cons cell directly
return HyCons(cont[0], cont[2])
else:
# Return a nested cons cell
return HyCons(cont[0], paren([p[0], cont[1:], p[2]]))
# Warn preemptively on a malformed dotted list.
# Only check for dots after the first item to allow for a potential
# attribute accessor shorthand
reject_spurious_dots(cont[1:])
return HyExpression(p[1])
@pg.production("paren : LPAREN RPAREN")
@set_boundaries
def empty_paren(p):
return HyExpression([])
@pg.production("list_contents : term list_contents")
def list_contents(p):
return [p[0]] + p[1]
@pg.production("list_contents : term")
def list_contents_single(p):
return [p[0]]
@pg.production("term : identifier")
@pg.production("term : paren")
@pg.production("term : dict")
@pg.production("term : list")
@pg.production("term : set")
@pg.production("term : string")
def term(p):
return p[0]
@pg.production("term : QUOTE term")
@set_quote_boundaries
def term_quote(p):
return HyExpression([HySymbol("quote"), p[1]])
@pg.production("term : QUASIQUOTE term")
@set_quote_boundaries
def term_quasiquote(p):
return HyExpression([HySymbol("quasiquote"), p[1]])
@pg.production("term : UNQUOTE term")
@set_quote_boundaries
def term_unquote(p):
return HyExpression([HySymbol("unquote"), p[1]])
@pg.production("term : UNQUOTESPLICE term")
@set_quote_boundaries
def term_unquote_splice(p):
return HyExpression([HySymbol("unquote_splice"), p[1]])
@pg.production("term : HASHREADER term")
@set_quote_boundaries
def hash_reader(p):
st = p[0].getstr()[1]
str_object = HyString(st)
expr = p[1]
return HyExpression([HySymbol("dispatch_reader_macro"), str_object, expr])
@pg.production("set : HLCURLY list_contents RCURLY")
@set_boundaries
def t_set(p):
return HySet(p[1])
@pg.production("set : HLCURLY RCURLY")
@set_boundaries
def empty_set(p):
return HySet([])
@pg.production("dict : LCURLY list_contents RCURLY")
@set_boundaries
def t_dict(p):
return HyDict(p[1])
@pg.production("dict : LCURLY RCURLY")
@set_boundaries
def empty_dict(p):
return HyDict([])
@pg.production("list : LBRACKET list_contents RBRACKET")
@set_boundaries
def t_list(p):
return HyList(p[1])
@pg.production("list : LBRACKET RBRACKET")
@set_boundaries
def t_empty_list(p):
return HyList([])
if sys.version_info[0] >= 3:
def uni_hystring(s):
return HyString(eval(s))
else:
def uni_hystring(s):
return HyString(eval('u'+s))
@pg.production("string : STRING")
@set_boundaries
def t_string(p):
# remove trailing quote
s = p[0].value[:-1]
# get the header
header, s = s.split('"', 1)
# remove unicode marker
header = header.replace("u", "")
# build python string
s = header + '"""' + s + '"""'
return uni_hystring(s)
@pg.production("string : PARTIAL_STRING")
def t_partial_string(p):
# Any unterminated string requires more input
raise PrematureEndOfInput("Premature end of input")
@pg.production("identifier : IDENTIFIER")
@set_boundaries
def t_identifier(p):
obj = p[0].value
try:
return HyInteger(obj)
except ValueError:
pass
try:
return HyFloat(obj)
except ValueError:
pass
if obj != 'j':
try:
return HyComplex(obj)
except ValueError:
pass
table = {
"true": "True",
"false": "False",
"nil": "None",
"null": "None",
}
if obj in table:
return HySymbol(table[obj])
if obj.startswith(":"):
return HyKeyword(obj)
def mangle(p):
if p.startswith("*") and p.endswith("*") and p not in ("*", "**"):
p = p[1:-1].upper()
if "-" in p and p != "-":
p = p.replace("-", "_")
if p.endswith("?") and p != "?":
p = "is_%s" % (p[:-1])
return p
obj = ".".join([mangle(part) for part in obj.split(".")])
return HySymbol(obj)
@pg.error
def error_handler(token):
tokentype = token.gettokentype()
if tokentype == '$end':
raise PrematureEndOfInput("Premature end of input")
else:
raise LexException(
"Ran into a %s where it wasn't expected." % tokentype,
token.source_pos.lineno, token.source_pos.colno)
parser = pg.build()
| zackmdavis/hy | hy/lex/parser.py | Python | mit | 8,013 |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
import sys
import unittest
from libcloud.utils.py3 import httplib
from libcloud.common.types import InvalidCredsError, LibcloudError
from libcloud.dns.types import RecordType, ZoneDoesNotExistError
from libcloud.dns.types import RecordDoesNotExistError
from libcloud.dns.drivers.zerigo import ZerigoDNSDriver, ZerigoError
from libcloud.test import MockHttp
from libcloud.test.file_fixtures import DNSFileFixtures
from libcloud.test.secrets import DNS_PARAMS_ZERIGO
class ZerigoTests(unittest.TestCase):
def setUp(self):
ZerigoDNSDriver.connectionCls.conn_class = ZerigoMockHttp
ZerigoMockHttp.type = None
self.driver = ZerigoDNSDriver(*DNS_PARAMS_ZERIGO)
def test_invalid_credentials(self):
ZerigoMockHttp.type = 'INVALID_CREDS'
try:
list(self.driver.list_zones())
except InvalidCredsError:
pass
else:
self.fail('Exception was not thrown')
def test_list_record_types(self):
record_types = self.driver.list_record_types()
self.assertEqual(len(record_types), 13)
self.assertTrue(RecordType.A in record_types)
def test_list_zones_success(self):
zones = self.driver.list_zones()
self.assertEqual(len(zones), 1)
self.assertEqual(zones[0].domain, 'example.com')
self.assertEqual(zones[0].type, 'master')
self.assertEqual(zones[0].extra['notes'], 'test foo bar')
def test_list_zones_no_results(self):
ZerigoMockHttp.type = 'NO_RESULTS'
zones = self.driver.list_zones()
self.assertEqual(len(zones), 0)
def test_list_records_success(self):
zone = self.driver.list_zones()[0]
records = list(self.driver.list_records(zone=zone))
self.assertEqual(len(records), 4)
self.assertEqual(records[0].name, 'www')
self.assertEqual(records[0].type, RecordType.A)
self.assertEqual(records[0].data, '172.16.16.1')
self.assertEqual(records[0].extra['fqdn'], 'www.example.com')
self.assertIsNone(records[0].extra['notes'])
self.assertIsNone(records[0].extra['priority'])
self.assertEqual(records[1].name, 'test')
self.assertEqual(records[1].extra['ttl'], 3600)
def test_record_with_empty_name(self):
zone = self.driver.list_zones()[0]
record1 = list(self.driver.list_records(zone=zone))[-1]
record2 = list(self.driver.list_records(zone=zone))[-2]
self.assertIsNone(record1.name)
self.assertIsNone(record2.name)
def test_list_records_no_results(self):
zone = self.driver.list_zones()[0]
ZerigoMockHttp.type = 'NO_RESULTS'
records = list(self.driver.list_records(zone=zone))
self.assertEqual(len(records), 0)
def test_list_records_zone_does_not_exist(self):
zone = self.driver.list_zones()[0]
ZerigoMockHttp.type = 'ZONE_DOES_NOT_EXIST'
try:
list(self.driver.list_records(zone=zone))
except ZoneDoesNotExistError:
e = sys.exc_info()[1]
self.assertEqual(e.zone_id, zone.id)
else:
self.fail('Exception was not thrown')
pass
def test_get_zone_success(self):
zone = self.driver.get_zone(zone_id=12345678)
self.assertEqual(zone.id, '12345678')
self.assertEqual(zone.domain, 'example.com')
self.assertEqual(zone.extra['hostmaster'], '[email protected]')
self.assertEqual(zone.type, 'master')
def test_get_zone_does_not_exist(self):
ZerigoMockHttp.type = 'DOES_NOT_EXIST'
try:
self.driver.get_zone(zone_id='4444')
except ZoneDoesNotExistError:
e = sys.exc_info()[1]
self.assertEqual(e.zone_id, '4444')
else:
self.fail('Exception was not thrown')
def test_get_record_success(self):
record = self.driver.get_record(zone_id='12345678',
record_id='23456789')
self.assertEqual(record.id, '23456789')
self.assertEqual(record.name, 'www')
self.assertEqual(record.type, RecordType.A)
def test_get_record_zone_does_not_exist(self):
ZerigoMockHttp.type = 'ZONE_DOES_NOT_EXIST'
try:
self.driver.get_record(zone_id='444', record_id='28536')
except ZoneDoesNotExistError:
pass
else:
self.fail('Exception was not thrown')
def test_get_record_record_does_not_exist(self):
ZerigoMockHttp.type = 'RECORD_DOES_NOT_EXIST'
try:
self.driver.get_record(zone_id='12345678',
record_id='28536')
except RecordDoesNotExistError:
pass
else:
self.fail('Exception was not thrown')
def test_create_zone_success(self):
ZerigoMockHttp.type = 'CREATE_ZONE'
zone = self.driver.create_zone(domain='foo.bar.com', type='master',
ttl=None, extra=None)
self.assertEqual(zone.id, '12345679')
self.assertEqual(zone.domain, 'foo.bar.com')
def test_create_zone_validaton_error(self):
ZerigoMockHttp.type = 'CREATE_ZONE_VALIDATION_ERROR'
try:
self.driver.create_zone(domain='foo.bar.com', type='master',
ttl=10, extra=None)
except ZerigoError:
e = sys.exc_info()[1]
self.assertEqual(len(e.errors), 2)
else:
self.fail('Exception was not thrown')
def test_update_zone_success(self):
zone = self.driver.list_zones()[0]
updated_zone = self.driver.update_zone(zone=zone,
ttl=10,
extra={'notes':
'bar foo'})
self.assertEqual(zone.extra['notes'], 'test foo bar')
self.assertEqual(updated_zone.id, zone.id)
self.assertEqual(updated_zone.domain, 'example.com')
self.assertEqual(updated_zone.type, zone.type)
self.assertEqual(updated_zone.ttl, 10)
self.assertEqual(updated_zone.extra['notes'], 'bar foo')
def test_update_zone_domain_cannot_be_changed(self):
zone = self.driver.list_zones()[0]
try:
self.driver.update_zone(zone=zone, domain='libcloud.org')
except LibcloudError:
pass
else:
self.fail('Exception was not thrown')
def test_create_record_success(self):
zone = self.driver.list_zones()[0]
ZerigoMockHttp.type = 'CREATE_RECORD'
record = self.driver.create_record(name='www', zone=zone,
type=RecordType.A, data='127.0.0.1')
self.assertEqual(record.id, '23456780')
self.assertEqual(record.name, 'www')
self.assertEqual(record.zone, zone)
self.assertEqual(record.type, RecordType.A)
self.assertEqual(record.data, '127.0.0.1')
def test_update_record_success(self):
zone = self.driver.list_zones()[0]
record = self.driver.list_records(zone=zone)[0]
updated_record = self.driver.update_record(record=record, name='www',
type=RecordType.AAAA,
data='::1')
self.assertEqual(record.data, '172.16.16.1')
self.assertEqual(updated_record.id, record.id)
self.assertEqual(updated_record.name, 'www')
self.assertEqual(updated_record.zone, record.zone)
self.assertEqual(updated_record.type, RecordType.AAAA)
self.assertEqual(updated_record.data, '::1')
def test_delete_zone_success(self):
zone = self.driver.list_zones()[0]
status = self.driver.delete_zone(zone=zone)
self.assertTrue(status)
def test_delete_zone_does_not_exist(self):
zone = self.driver.list_zones()[0]
ZerigoMockHttp.type = 'ZONE_DOES_NOT_EXIST'
try:
self.driver.delete_zone(zone=zone)
except ZoneDoesNotExistError:
e = sys.exc_info()[1]
self.assertEqual(e.zone_id, zone.id)
else:
self.fail('Exception was not thrown')
def test_delete_record_success(self):
zone = self.driver.list_zones()[0]
record = self.driver.list_records(zone=zone)[0]
status = self.driver.delete_record(record=record)
self.assertTrue(status)
def test_delete_record_does_not_exist(self):
zone = self.driver.list_zones()[0]
record = self.driver.list_records(zone=zone)[0]
ZerigoMockHttp.type = 'RECORD_DOES_NOT_EXIST'
try:
self.driver.delete_record(record=record)
except RecordDoesNotExistError:
e = sys.exc_info()[1]
self.assertEqual(e.record_id, record.id)
else:
self.fail('Exception was not thrown')
class ZerigoMockHttp(MockHttp):
fixtures = DNSFileFixtures('zerigo')
def _api_1_1_zones_xml_INVALID_CREDS(self, method, url, body, headers):
body = 'HTTP Basic: Access denied.\n'
return (httplib.UNAUTHORIZED, body, {}, httplib.responses[httplib.OK])
def _api_1_1_zones_xml(self, method, url, body, headers):
body = self.fixtures.load('list_zones.xml')
return (httplib.OK, body, {'x-query-count': '1'},
httplib.responses[httplib.OK])
def _api_1_1_zones_xml_NO_RESULTS(self, method, url, body, headers):
body = self.fixtures.load('list_zones_no_results.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _api_1_1_zones_12345678_hosts_xml(self, method, url, body, headers):
body = self.fixtures.load('list_records.xml')
return (httplib.OK, body, {'x-query-count': '1'},
httplib.responses[httplib.OK])
def _api_1_1_zones_12345678_hosts_xml_NO_RESULTS(self, method, url, body,
headers):
body = self.fixtures.load('list_records_no_results.xml')
return (httplib.OK, body, {'x-query-count': '0'},
httplib.responses[httplib.OK])
def _api_1_1_zones_12345678_hosts_xml_ZONE_DOES_NOT_EXIST(self, method,
url, body,
headers):
body = ''
return (httplib.NOT_FOUND, body, {}, httplib.responses[httplib.OK])
def _api_1_1_zones_12345678_xml(self, method, url, body, headers):
body = self.fixtures.load('get_zone.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _api_1_1_zones_4444_xml_DOES_NOT_EXIST(self, method, url, body,
headers):
body = ''
return (httplib.NOT_FOUND, body, {}, httplib.responses[httplib.OK])
def _api_1_1_hosts_23456789_xml(self, method, url, body, headers):
body = self.fixtures.load('get_record.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _api_1_1_zones_444_xml_ZONE_DOES_NOT_EXIST(self, method, url, body,
headers):
body = ''
return (httplib.NOT_FOUND, body, {}, httplib.responses[httplib.OK])
def _api_1_1_zones_12345678_xml_RECORD_DOES_NOT_EXIST(self, method, url,
body, headers):
body = self.fixtures.load('get_zone.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _api_1_1_hosts_28536_xml_RECORD_DOES_NOT_EXIST(self, method, url, body,
headers):
body = ''
return (httplib.NOT_FOUND, body, {}, httplib.responses[httplib.OK])
def _api_1_1_zones_xml_CREATE_ZONE(self, method, url, body, headers):
body = self.fixtures.load('create_zone.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _api_1_1_zones_xml_CREATE_ZONE_VALIDATION_ERROR(self, method, url,
body, headers):
body = self.fixtures.load('create_zone_validation_error.xml')
return (httplib.UNPROCESSABLE_ENTITY, body, {},
httplib.responses[httplib.OK])
def _api_1_1_zones_12345678_hosts_xml_CREATE_RECORD(self, method, url,
body, headers):
body = self.fixtures.load('create_record.xml')
return (httplib.CREATED, body, {}, httplib.responses[httplib.OK])
def _api_1_1_zones_12345678_xml_ZONE_DOES_NOT_EXIST(self, method, url,
body, headers):
body = ''
return (httplib.NOT_FOUND, body, {}, httplib.responses[httplib.OK])
def _api_1_1_hosts_23456789_xml_RECORD_DOES_NOT_EXIST(self, method, url,
body, headers):
body = ''
return (httplib.NOT_FOUND, body, {}, httplib.responses[httplib.OK])
"""
def (self, method, url, body, headers):
body = self.fixtures.load('.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def (self, method, url, body, headers):
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def (self, method, url, body, headers):
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def (self, method, url, body, headers):
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def (self, method, url, body, headers):
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def (self, method, url, body, headers):
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
"""
if __name__ == '__main__':
sys.exit(unittest.main())
| Scalr/libcloud | libcloud/test/dns/test_zerigo.py | Python | apache-2.0 | 14,732 |
# -*- coding: utf-8 -*-
"""
Dashboard stuff for admin_tools
"""
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from admin_tools.dashboard import modules, Dashboard, AppIndexDashboard
from admin_tools.utils import get_admin_site_name
class CustomIndexDashboard(Dashboard):
"""
Custom index dashboard for project.
"""
def init_with_context(self, context):
site_name = get_admin_site_name(context)
# append a link list module for "quick links"
self.children.append(modules.LinkList(
_('Quick links'),
layout='inline',
draggable=False,
deletable=False,
collapsible=False,
children=[
[_('Return to site'), '/'],
[_('Change password'),
reverse('%s:password_change' % site_name)],
[_('Log out'), reverse('%s:logout' % site_name)],
]
))
# append an app list module for "Applications"
self.children.append(modules.AppList(
_('Applications'),
exclude=('django.contrib.*',),
))
# append an app list module for "Administration"
self.children.append(modules.AppList(
_('Administration'),
models=('django.contrib.*',),
))
# append a recent actions module
self.children.append(modules.RecentActions(_('Recent Actions'), 5))
## append a feed module
#self.children.append(modules.Feed(
#_('Latest Django News'),
#feed_url='http://www.djangoproject.com/rss/weblog/',
#limit=5
#))
## append another link list module for "support".
#self.children.append(modules.LinkList(
#_('Support'),
#children=[
#{
#'title': _('Django documentation'),
#'url': 'http://docs.djangoproject.com/',
#'external': True,
#},
#{
#'title': _('Django "django-users" mailing list'),
#'url': 'http://groups.google.com/group/django-users',
#'external': True,
#},
#{
#'title': _('Django irc channel'),
#'url': 'irc://irc.freenode.net/django',
#'external': True,
#},
#]
#))
class CustomAppIndexDashboard(AppIndexDashboard):
"""
Custom app index dashboard for project.
"""
# we disable title because its redundant with the model list module
title = ''
def __init__(self, *args, **kwargs):
AppIndexDashboard.__init__(self, *args, **kwargs)
# append a model list module and a recent actions module
self.children += [
modules.ModelList(self.app_title, self.models),
modules.RecentActions(
_('Recent Actions'),
include_list=self.get_app_content_types(),
limit=5
)
]
def init_with_context(self, context):
"""
Use this method if you need to access the request context.
"""
return super(CustomAppIndexDashboard, self).init_with_context(context)
| emencia/emencia_paste_djangocms_3 | emencia_paste_djangocms_3/django_buildout/project/mods_available/admin_tools/dashboard.py | Python | mit | 3,336 |
from setuptools import setup, find_packages
import os
DESCRIPTION = """
MEANS: python package for Moment Expansion Approximation, iNference and Simulation
A free, user-friendly tool implementing an efficient moment expansion approximation with parametric closures
that integrates well with the IPython interactive environment.
Our package enables the analysis of complex stochastic systems without any constraints
on the number of species and moments studied and the type of rate laws in the system.
In addition to the approximation method our package provides numerous tools to help
non-expert users in stochastic analysis.
"""
# -- Assimulo dependency checks ---------------------------------------------------------------------
# numpy and cython are required for assimulo installation, yet their setup.py is badly done
# so the setup fails. Add these checks so our error message is displayed instead
# can be removed once assimulo ups their game.
try:
import cython
except ImportError:
raise ImportError('Please install cython first `pip install cython`')
try:
import numpy
except ImportError:
raise ImportError('Please install numpy first `pip install numpy`')
# ---------------------------------------------------------------------------------------------------------
setup(
name='means',
version='1.0.0',
description='Moment Expansion Approximation method implementation with simulation and inference packages',
long_description=DESCRIPTION,
author='Sisi Fan, Quentin Geissmann, Eszter Lakatos, Saulius Lukauskas, '
'Angelique Ale, Ann C. Babtie, Paul D.W. Kirk, Michael P.H. Stumpf',
author_email='[email protected],[email protected]',
url='https://github.com/theosysbio/means',
license='MIT',
classifiers=[
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Topic :: Scientific/Engineering'
],
keywords=['moment expansion', 'approximation', 'simulation', 'inference'],
packages=find_packages(),
setup_requires=[
'numpy>=1.6.1', # Numpy has to be installed before others
],
install_requires=[
"numpy>=1.6.1",
"sympy>=0.7.5",
"matplotlib>=1.1.0",
"scipy>=0.10.1",
"PyYAML>=3.10",
"Assimulo>=2.5.1",
],
tests_require=['nose'],
test_suite='nose.collector'
)
| lukauskas/means | src/setup.py | Python | mit | 2,401 |
##########################################################################
#
# Copyright (c) 2008-2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import os.path
import IECore
import IECoreGL
IECoreGL.init( False )
class TestSelection( unittest.TestCase ) :
def testSelect( self ) :
r = IECoreGL.Renderer()
r.setOption( "gl:mode", IECore.StringData( "deferred" ) )
r.setOption( "gl:searchPath:shader", IECore.StringData( os.path.dirname( __file__ ) + "/shaders" ) )
with IECore.WorldBlock( r ) :
r.concatTransform( IECore.M44f.createTranslated( IECore.V3f( 0, 0, -5 ) ) )
r.setAttribute( "name", IECore.StringData( "one" ) )
r.shader( "surface", "color", { "colorValue" : IECore.Color3fData( IECore.Color3f( 1, 0, 0 ) ) } )
r.geometry( "sphere", {}, {} )
r.concatTransform( IECore.M44f.createTranslated( IECore.V3f( -1, 0, 0 ) ) )
r.setAttribute( "name", IECore.StringData( "two" ) )
r.geometry( "sphere", {}, {} )
r.concatTransform( IECore.M44f.createTranslated( IECore.V3f( 2, 0, 0 ) ) )
r.setAttribute( "name", IECore.StringData( "three" ) )
r.geometry( "sphere", {}, {} )
s = r.scene()
s.setCamera( IECoreGL.PerspectiveCamera() )
ss = s.select( IECoreGL.Selector.Mode.GLSelect, IECore.Box2f( IECore.V2f( 0 ), IECore.V2f( 1 ) ) )
names = [ x.name.value() for x in ss ]
self.assertEqual( len( names ), 3 )
self.assert_( "one" in names )
self.assert_( "two" in names )
self.assert_( "three" in names )
def testRegionSelect( self ) :
r = IECoreGL.Renderer()
r.setOption( "gl:mode", IECore.StringData( "deferred" ) )
r.setOption( "gl:searchPath:shader", IECore.StringData( os.path.dirname( __file__ ) + "/shaders" ) )
with IECore.WorldBlock( r ) :
r.concatTransform( IECore.M44f.createTranslated( IECore.V3f( 0, 0, -5 ) ) )
r.shader( "surface", "color", { "colorValue" : IECore.Color3fData( IECore.Color3f( 1, 0, 0 ) ) } )
r.concatTransform( IECore.M44f.createTranslated( IECore.V3f( -2, -2, 0 ) ) )
r.setAttribute( "name", IECore.StringData( "red" ) )
r.geometry( "sphere", {}, {} )
r.shader( "surface", "color", { "colorValue" : IECore.Color3fData( IECore.Color3f( 0, 1, 0 ) ) } )
r.concatTransform( IECore.M44f.createTranslated( IECore.V3f( 0, 4, 0 ) ) )
r.setAttribute( "name", IECore.StringData( "green" ) )
r.geometry( "sphere", {}, {} )
r.shader( "surface", "color", { "colorValue" : IECore.Color3fData( IECore.Color3f( 0, 0, 1 ) ) } )
r.concatTransform( IECore.M44f.createTranslated( IECore.V3f( 4, 0, 0 ) ) )
r.setAttribute( "name", IECore.StringData( "blue" ) )
r.geometry( "sphere", {}, {} )
r.shader( "surface", "color", { "colorValue" : IECore.Color3fData( IECore.Color3f( 1, 1, 1 ) ) } )
r.concatTransform( IECore.M44f.createTranslated( IECore.V3f( 0, -4, 0 ) ) )
r.setAttribute( "name", IECore.StringData( "white" ) )
r.geometry( "sphere", {}, {} )
s = r.scene()
s.setCamera( IECoreGL.PerspectiveCamera() )
ss = s.select( IECoreGL.Selector.Mode.GLSelect, IECore.Box2f( IECore.V2f( 0, 0.5 ), IECore.V2f( 0.5, 1 ) ) )
self.assertEqual( len( ss ), 1 )
self.assertEqual( ss[0].name.value(), "red" )
ss = s.select( IECoreGL.Selector.Mode.GLSelect, IECore.Box2f( IECore.V2f( 0 ), IECore.V2f( 0.5 ) ) )
self.assertEqual( len( ss ), 1 )
self.assertEqual( ss[0].name.value(), "green" )
ss = s.select( IECoreGL.Selector.Mode.GLSelect, IECore.Box2f( IECore.V2f( 0.5, 0 ), IECore.V2f( 1, 0.5 ) ) )
self.assertEqual( len( ss ), 1 )
self.assertEqual( ss[0].name.value(), "blue" )
ss = s.select( IECoreGL.Selector.Mode.GLSelect, IECore.Box2f( IECore.V2f( 0.5 ), IECore.V2f( 1 ) ) )
self.assertEqual( len( ss ), 1 )
self.assertEqual( ss[0].name.value(), "white" )
def testIDSelect( self ) :
r = IECoreGL.Renderer()
r.setOption( "gl:mode", IECore.StringData( "deferred" ) )
with IECore.WorldBlock( r ) :
r.concatTransform( IECore.M44f.createTranslated( IECore.V3f( 0, 0, -5 ) ) )
r.concatTransform( IECore.M44f.createTranslated( IECore.V3f( -1, 0, 0 ) ) )
r.setAttribute( "name", IECore.StringData( "frontLeft" ) )
r.geometry( "sphere", {}, {} )
r.concatTransform( IECore.M44f.createTranslated( IECore.V3f( 0, 0, -1 ) ) )
r.setAttribute( "name", IECore.StringData( "backLeft" ) )
r.geometry( "sphere", {}, {} )
r.concatTransform( IECore.M44f.createTranslated( IECore.V3f( 2, 0, 1 ) ) )
r.setAttribute( "name", IECore.StringData( "frontRight" ) )
r.geometry( "sphere", {}, {} )
r.concatTransform( IECore.M44f.createTranslated( IECore.V3f( 0, 0, -1 ) ) )
r.setAttribute( "name", IECore.StringData( "backRight" ) )
r.geometry( "sphere", {}, {} )
s = r.scene()
s.setCamera( IECoreGL.OrthographicCamera() )
ss = s.select( IECoreGL.Selector.Mode.IDRender, IECore.Box2f( IECore.V2f( 0.25, 0.5 ), IECore.V2f( 0.26, 0.51 ) ) )
self.assertEqual( len( ss ), 1 )
self.assertEqual( ss[0].name.value(), "frontLeft" )
ss = s.select( IECoreGL.Selector.Mode.IDRender, IECore.Box2f( IECore.V2f( 0.75, 0.5 ), IECore.V2f( 0.76, 0.51 ) ) )
self.assertEqual( len( ss ), 1 )
self.assertEqual( ss[0].name.value(), "frontRight" )
def testIDSelectDepths( self ) :
r = IECoreGL.Renderer()
r.setOption( "gl:mode", IECore.StringData( "deferred" ) )
with IECore.WorldBlock( r ) :
r.concatTransform( IECore.M44f.createTranslated( IECore.V3f( 0, 0, -5 ) ) )
r.setAttribute( "name", IECore.StringData( "ball" ) )
r.geometry( "sphere", {}, {} )
scene = r.scene()
scene.setCamera( IECoreGL.OrthographicCamera() )
s1 = scene.select( IECoreGL.Selector.Mode.GLSelect, IECore.Box2f( IECore.V2f( 0 ), IECore.V2f( 1 ) ) )
self.assertEqual( len( s1 ), 1 )
self.assertEqual( s1[0].name.value(), "ball" )
s2 = scene.select( IECoreGL.Selector.Mode.IDRender, IECore.Box2f( IECore.V2f( 0 ), IECore.V2f( 1 ) ) )
self.assertEqual( len( s2 ), 1 )
self.assertEqual( s2[0].name.value(), "ball" )
self.assertAlmostEqual( s1[0].depthMin, s2[0].depthMin, 5 )
def testOcclusionQuerySelect( self ) :
r = IECoreGL.Renderer()
r.setOption( "gl:mode", IECore.StringData( "deferred" ) )
with IECore.WorldBlock( r ) :
r.concatTransform( IECore.M44f.createTranslated( IECore.V3f( 0, 0, -5 ) ) )
r.concatTransform( IECore.M44f.createTranslated( IECore.V3f( -1, 0, 0 ) ) )
r.setAttribute( "name", IECore.StringData( "frontLeft" ) )
r.geometry( "sphere", {}, {} )
r.concatTransform( IECore.M44f.createTranslated( IECore.V3f( 0, 0, -1 ) ) )
r.setAttribute( "name", IECore.StringData( "backLeft" ) )
r.geometry( "sphere", {}, {} )
r.concatTransform( IECore.M44f.createTranslated( IECore.V3f( 2, 0, 1 ) ) )
r.setAttribute( "name", IECore.StringData( "frontRight" ) )
r.geometry( "sphere", {}, {} )
r.concatTransform( IECore.M44f.createTranslated( IECore.V3f( 0, 0, -1 ) ) )
r.setAttribute( "name", IECore.StringData( "backRight" ) )
r.geometry( "sphere", {}, {} )
s = r.scene()
s.setCamera( IECoreGL.OrthographicCamera() )
ss = s.select( IECoreGL.Selector.Mode.OcclusionQuery, IECore.Box2f( IECore.V2f( 0, 0 ), IECore.V2f( 0.25, 1 ) ) )
self.assertEqual( len( ss ), 2 )
self.assertEqual( set( [ x.name.value() for x in ss ] ), set( ( "frontLeft", "backLeft" ) ) )
ss = s.select( IECoreGL.Selector.Mode.OcclusionQuery, IECore.Box2f( IECore.V2f( 0.75, 0 ), IECore.V2f( 1, 1 ) ) )
self.assertEqual( len( ss ), 2 )
self.assertEqual( set( [ x.name.value() for x in ss ] ), set( ( "frontRight", "backRight" ) ) )
def testIDSelectWithAdditionalDisplayStyles( self ) :
r = IECoreGL.Renderer()
r.setOption( "gl:mode", IECore.StringData( "deferred" ) )
with IECore.WorldBlock( r ) :
r.setAttribute( "gl:primitive:wireframe", IECore.BoolData( True ) )
r.setAttribute( "gl:primitive:bound", IECore.BoolData( True ) )
r.setAttribute( "gl:primitive:outline", IECore.BoolData( True ) )
r.setAttribute( "gl:primitive:points", IECore.BoolData( True ) )
r.concatTransform( IECore.M44f.createTranslated( IECore.V3f( 0, 0, -5 ) ) )
r.concatTransform( IECore.M44f.createTranslated( IECore.V3f( -1, 0, 0 ) ) )
r.setAttribute( "name", IECore.StringData( "frontLeft" ) )
r.geometry( "sphere", {}, {} )
r.concatTransform( IECore.M44f.createTranslated( IECore.V3f( 0, 0, -1 ) ) )
r.setAttribute( "name", IECore.StringData( "backLeft" ) )
r.geometry( "sphere", {}, {} )
r.concatTransform( IECore.M44f.createTranslated( IECore.V3f( 2, 0, 1 ) ) )
r.setAttribute( "name", IECore.StringData( "frontRight" ) )
r.geometry( "sphere", {}, {} )
r.concatTransform( IECore.M44f.createTranslated( IECore.V3f( 0, 0, -1 ) ) )
r.setAttribute( "name", IECore.StringData( "backRight" ) )
r.geometry( "sphere", {}, {} )
s = r.scene()
s.setCamera( IECoreGL.OrthographicCamera() )
ss = s.select( IECoreGL.Selector.Mode.IDRender, IECore.Box2f( IECore.V2f( 0.25, 0.5 ), IECore.V2f( 0.26, 0.51 ) ) )
self.assertEqual( len( ss ), 1 )
self.assertEqual( ss[0].name.value(), "frontLeft" )
ss = s.select( IECoreGL.Selector.Mode.IDRender, IECore.Box2f( IECore.V2f( 0.75, 0.5 ), IECore.V2f( 0.76, 0.51 ) ) )
self.assertEqual( len( ss ), 1 )
self.assertEqual( ss[0].name.value(), "frontRight" )
def testPointsPrimitiveSelect( self ) :
r = IECoreGL.Renderer()
r.setOption( "gl:mode", IECore.StringData( "deferred" ) )
r.setOption( "gl:searchPath:shader", IECore.StringData( os.path.dirname( __file__ ) + "/shaders" ) )
with IECore.WorldBlock( r ) :
r.concatTransform( IECore.M44f.createTranslated( IECore.V3f( 0, 0, -5 ) ) )
r.setAttribute( "name", IECore.StringData( "pointsNeedSelectingToo" ) )
r.points( 1, { "P" : IECore.PrimitiveVariable( IECore.PrimitiveVariable.Interpolation.Vertex, IECore.V3fVectorData( [ IECore.V3f( 0 ) ] ) ) } )
s = r.scene()
s.setCamera( IECoreGL.PerspectiveCamera() )
for mode in ( IECoreGL.Selector.Mode.GLSelect, IECoreGL.Selector.Mode.OcclusionQuery, IECoreGL.Selector.Mode.IDRender ) :
ss = s.select( mode, IECore.Box2f( IECore.V2f( 0 ), IECore.V2f( 1 ) ) )
names = [ x.name.value() for x in ss ]
self.assertEqual( len( names ), 1 )
self.assertEqual( names[0], "pointsNeedSelectingToo" )
def testContextManager( self ) :
r = IECoreGL.Renderer()
r.setOption( "gl:mode", IECore.StringData( "deferred" ) )
r.setOption( "gl:searchPath:shader", IECore.StringData( os.path.dirname( __file__ ) + "/shaders" ) )
with IECore.WorldBlock( r ) :
r.concatTransform( IECore.M44f.createTranslated( IECore.V3f( 0, 0, -5 ) ) )
r.setAttribute( "name", IECore.StringData( "one" ) )
r.shader( "surface", "color", { "colorValue" : IECore.Color3fData( IECore.Color3f( 1, 0, 0 ) ) } )
r.geometry( "sphere", {}, {} )
r.concatTransform( IECore.M44f.createTranslated( IECore.V3f( -1, 0, 0 ) ) )
r.setAttribute( "name", IECore.StringData( "two" ) )
r.geometry( "sphere", {}, {} )
scene = r.scene()
scene.setCamera( None )
IECoreGL.PerspectiveCamera().render( IECoreGL.State.defaultState() )
hits = []
with IECoreGL.Selector( IECore.Box2f( IECore.V2f( 0 ), IECore.V2f( 1 ) ), IECoreGL.Selector.Mode.IDRender, hits ) as selector :
IECoreGL.State.bindBaseState()
selector.baseState().bind()
scene.root().render( selector.baseState() )
names = [ x.name.value() for x in hits ]
self.assertEqual( len( names ), 2 )
self.assert_( "one" in names )
self.assert_( "two" in names )
if __name__ == "__main__":
unittest.main()
| dneg/cortex | test/IECoreGL/Selection.py | Python | bsd-3-clause | 13,164 |
# -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in root directory
##############################################################################
from . import stock
from . import stock_quant
| slevenhagen/odoomrp-wip-npg | stock_picking_wave_package_info/models/__init__.py | Python | agpl-3.0 | 307 |
import unittest
from funcytaskengine.event_fulfillment.return_values import EmptyValues, ValuesContainer, ValuesContainer
from funcytaskengine.transition_conditions.transformations import DictExtractFields
class DictExtractFieldsTestCase(unittest.TestCase):
def test_no_values(self):
extractor = DictExtractFields(type=None, fields=['test'])
self.assertEqual(
EmptyValues().values(),
extractor.apply(EmptyValues()).values(),
)
def test_single_value(self):
extractor = DictExtractFields(type=None, fields=['test'])
values = ValuesContainer(
{
'hi': 'hello',
'test': 'ok',
},
)
self.assertEqual(
(
{
'test': 'ok',
},
),
extractor.apply(values).values()
)
def test_multiple_values(self):
extractor = DictExtractFields(type=None, fields=[
'test',
'hi',
])
values = ValuesContainer((
{
'hi': 'hi',
'test': 'ok',
},
{
'hi': 'hi2',
'test': 'test2',
},
))
self.assertEqual((
{
'test': 'ok',
'hi': 'hi',
},
{
'hi': 'hi2',
'test': 'test2',
}
),
extractor.apply(values).values()
)
| dm03514/func-y-task-engine | tests/unit/transition_conditions/transformations/test_dict_extract_fields.py | Python | gpl-3.0 | 1,539 |
#!/usr/bin/env python
import os, random, glob, shutil, itertools
from PIL import Image
from joblib import Parallel, delayed
IN_SUFFIX = '_in.png'
OUT_SUFFIX = '_out.png'
TRAIN_SIZE = 0.7
VAL_SIZE = 0.2
RESIZE_TO_X = 500
RESIZE_TO_Y_FACTOR = 1.32
RESIZE_TO = (RESIZE_TO_X, int(RESIZE_TO_X * RESIZE_TO_Y_FACTOR))
WINDOW_SIZE = RESIZE_TO
STRIDE = (50, 50)
OUT_SCALE_TO = (256, 256)
OUT_MODE = 'RGB'
ROTATIONS = (0, 90, 180, 270)
SCALES = [(1, 1), (1, 1.2), (1.2, 1)]
def augment_image_deterministic(fname, out_dir,
window_size=WINDOW_SIZE, stride=STRIDE, scales=SCALES,
out_scale_to=OUT_SCALE_TO, rotations=ROTATIONS):
out_i = 0
base_fname = os.path.splitext(os.path.basename(fname))[0]
if '_' in base_fname:
base_id, kind = base_fname.rsplit('_', 1)
else:
base_id = base_fname
kind = None
src_img = Image.open(fname).resize(RESIZE_TO) #.convert('RGB').convert(OUT_MODE)
out_fname = os.path.join(out_dir,
'_'.join((base_id, str(out_i)) + (() if kind is None else (kind,))) + '.png')
src_img.save(out_fname)
return [(1, 1, 0, 0, 0, out_fname)]
#alpha = src_img.convert('RGBA').split()[-1]
#bg = Image.new("RGBA", src_img.size, (255, 255, 255, 255))
#bg.paste(src_img, mask=alpha)
#src_img = bg.convert(OUT_MODE)
offset_gen = itertools.product(range(0,
src_img.size[0] - window_size[0],
stride[0]),
range(0,
src_img.size[1] - window_size[1],
stride[1]))
result = []
for scale_x, scale_y in scales:
scaled_image = src_img.resize((int(src_img.size[0] * scale_x),
int(src_img.size[1] * scale_y)),
resample=Image.BILINEAR)
for x_off, y_off in offset_gen:
new_image = scaled_image.crop((x_off,
y_off,
x_off + window_size[0],
y_off + window_size[1]))
new_image.thumbnail(out_scale_to)
for angle in rotations:
out_fname = os.path.join(out_dir,
'_'.join((base_id, str(out_i)) + (() if kind is None else (kind,))) + '.png')
new_image.rotate(angle).save(out_fname)
out_i += 1
result.append((scale_x, scale_y, x_off, y_off, angle, out_fname))
return result
def copy_one_augmented(src_dir, prefix, target_dir):
augment_image_deterministic(os.path.join(src_dir,
prefix + IN_SUFFIX),
target_dir)
augment_image_deterministic(os.path.join(src_dir,
prefix + OUT_SUFFIX),
target_dir)
def copy_all_augmented(src_dir, prefixes, target_dir, n_jobs=-1):
if not os.path.exists(target_dir):
os.makedirs(target_dir)
Parallel(n_jobs=n_jobs)(delayed(copy_one_augmented)(src_dir, prefix, target_dir)
for prefix in prefixes)
def copy_all_svg(svg_dir, prefixes, target_dir):
if not os.path.exists(target_dir):
os.makedirs(target_dir)
for prefix in prefixes:
src_file = os.path.join(svg_dir, prefix + '.svg')
if os.path.exists(src_file):
shutil.copy2(src_file,
os.path.join(target_dir, prefix + '.svg'))
def shuffle_split(in_dir='/notebook/data/4_inout_pairs/', out_dir='/notebook/data/5_ready/', svg_dir='/notebook/data/3_prepared_images/', train_size=TRAIN_SIZE, val_size=VAL_SIZE):
all_sample_names = list(set(os.path.basename(fname)[:-len(IN_SUFFIX)]
for fname in glob.glob(os.path.join(in_dir, '*' + IN_SUFFIX))))
random.shuffle(all_sample_names)
total_samples = len(all_sample_names)
train_number = int(total_samples * train_size)
val_number = int(total_samples * val_size)
train_prefixes = all_sample_names[:train_number]
val_prefixes = all_sample_names[train_number:train_number+val_number]
test_prefixes = all_sample_names[train_number+val_number:]
copy_all_augmented(in_dir,
train_prefixes,
os.path.join(out_dir, 'train'))
copy_all_augmented(in_dir,
val_prefixes,
os.path.join(out_dir, 'val'))
copy_all_svg(svg_dir,
test_prefixes,
os.path.join(out_dir, 'test'))
if __name__ == '__main__':
shuffle_split()
| windj007/tablex-dataset | train_test_augment.py | Python | apache-2.0 | 4,822 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Bottle is a fast and simple micro-framework for small web applications. It
offers request dispatching (Routes) with url parameter support, templates,
a built-in HTTP Server and adapters for many third party WSGI/HTTP-server and
template engines - all in a single file and with no dependencies other than the
Python Standard Library.
Homepage and documentation: http://bottlepy.org/
Copyright (c) 2012, Marcel Hellkamp.
License: MIT (see LICENSE for details)
"""
from __future__ import with_statement
__author__ = 'Marcel Hellkamp'
__version__ = '0.12-dev'
__license__ = 'MIT'
# The gevent server adapter needs to patch some modules before they are imported
# This is why we parse the commandline parameters here but handle them later
if __name__ == '__main__':
from optparse import OptionParser
_cmd_parser = OptionParser(usage="usage: %prog [options] package.module:app")
_opt = _cmd_parser.add_option
_opt("--version", action="store_true", help="show version number.")
_opt("-b", "--bind", metavar="ADDRESS", help="bind socket to ADDRESS.")
_opt("-s", "--server", default='wsgiref', help="use SERVER as backend.")
_opt("-p", "--plugin", action="append", help="install additional plugin/s.")
_opt("--debug", action="store_true", help="start server in debug mode.")
_opt("--reload", action="store_true", help="auto-reload on file changes.")
_cmd_options, _cmd_args = _cmd_parser.parse_args()
if _cmd_options.server and _cmd_options.server.startswith('gevent'):
import gevent.monkey; gevent.monkey.patch_all()
import base64, cgi, email.utils, functools, hmac, imp, itertools, mimetypes,\
os, re, subprocess, sys, tempfile, threading, time, warnings
from datetime import date as datedate, datetime, timedelta
from tempfile import TemporaryFile
from traceback import format_exc, print_exc
from inspect import getargspec
try: from simplejson import dumps as json_dumps, loads as json_lds
except ImportError: # pragma: no cover
try: from json import dumps as json_dumps, loads as json_lds
except ImportError:
try: from django.utils.simplejson import dumps as json_dumps, loads as json_lds
except ImportError:
def json_dumps(data):
raise ImportError("JSON support requires Python 2.6 or simplejson.")
json_lds = json_dumps
# We now try to fix 2.5/2.6/3.1/3.2 incompatibilities.
# It ain't pretty but it works... Sorry for the mess.
py = sys.version_info
py3k = py >= (3, 0, 0)
py25 = py < (2, 6, 0)
py31 = (3, 1, 0) <= py < (3, 2, 0)
# Workaround for the missing "as" keyword in py3k.
def _e(): return sys.exc_info()[1]
# Workaround for the "print is a keyword/function" Python 2/3 dilemma
# and a fallback for mod_wsgi (resticts stdout/err attribute access)
try:
_stdout, _stderr = sys.stdout.write, sys.stderr.write
except IOError:
_stdout = lambda x: sys.stdout.write(x)
_stderr = lambda x: sys.stderr.write(x)
# Lots of stdlib and builtin differences.
if py3k:
import http.client as httplib
import _thread as thread
from urllib.parse import urljoin, SplitResult as UrlSplitResult
from urllib.parse import urlencode, quote as urlquote, unquote as urlunquote
urlunquote = functools.partial(urlunquote, encoding='latin1')
from http.cookies import SimpleCookie
from collections import MutableMapping as DictMixin
import pickle
from io import BytesIO
from configparser import ConfigParser
basestring = str
unicode = str
json_loads = lambda s: json_lds(touni(s))
callable = lambda x: hasattr(x, '__call__')
imap = map
def _raise(*a): raise a[0](a[1]).with_traceback(a[2])
else: # 2.x
import httplib
import thread
from urlparse import urljoin, SplitResult as UrlSplitResult
from urllib import urlencode, quote as urlquote, unquote as urlunquote
from Cookie import SimpleCookie
from itertools import imap
import cPickle as pickle
from StringIO import StringIO as BytesIO
from ConfigParser import SafeConfigParser as ConfigParser
if py25:
msg = "Python 2.5 support may be dropped in future versions of Bottle."
warnings.warn(msg, DeprecationWarning)
from UserDict import DictMixin
def next(it): return it.next()
bytes = str
else: # 2.6, 2.7
from collections import MutableMapping as DictMixin
json_loads = json_lds
eval(compile('def _raise(*a): raise a[0], a[1], a[2]', '<py3fix>', 'exec'))
# Some helpers for string/byte handling
def tob(s, enc='utf8'):
return s.encode(enc) if isinstance(s, unicode) else bytes(s)
def touni(s, enc='utf8', err='strict'):
return s.decode(enc, err) if isinstance(s, bytes) else unicode(s)
tonat = touni if py3k else tob
# 3.2 fixes cgi.FieldStorage to accept bytes (which makes a lot of sense).
# 3.1 needs a workaround.
if py31:
from io import TextIOWrapper
class NCTextIOWrapper(TextIOWrapper):
def close(self): pass # Keep wrapped buffer open.
# A bug in functools causes it to break if the wrapper is an instance method
def update_wrapper(wrapper, wrapped, *a, **ka):
try: functools.update_wrapper(wrapper, wrapped, *a, **ka)
except AttributeError: pass
# These helpers are used at module level and need to be defined first.
# And yes, I know PEP-8, but sometimes a lower-case classname makes more sense.
def depr(message, hard=False):
warnings.warn(message, DeprecationWarning, stacklevel=3)
def makelist(data): # This is just to handy
if isinstance(data, (tuple, list, set, dict)): return list(data)
elif data: return [data]
else: return []
class DictProperty(object):
''' Property that maps to a key in a local dict-like attribute. '''
def __init__(self, attr, key=None, read_only=False):
self.attr, self.key, self.read_only = attr, key, read_only
def __call__(self, func):
functools.update_wrapper(self, func, updated=[])
self.getter, self.key = func, self.key or func.__name__
return self
def __get__(self, obj, cls):
if obj is None: return self
key, storage = self.key, getattr(obj, self.attr)
if key not in storage: storage[key] = self.getter(obj)
return storage[key]
def __set__(self, obj, value):
if self.read_only: raise AttributeError("Read-Only property.")
getattr(obj, self.attr)[self.key] = value
def __delete__(self, obj):
if self.read_only: raise AttributeError("Read-Only property.")
del getattr(obj, self.attr)[self.key]
class cached_property(object):
''' A property that is only computed once per instance and then replaces
itself with an ordinary attribute. Deleting the attribute resets the
property. '''
def __init__(self, func):
self.__doc__ = getattr(func, '__doc__')
self.func = func
def __get__(self, obj, cls):
if obj is None: return self
value = obj.__dict__[self.func.__name__] = self.func(obj)
return value
class lazy_attribute(object):
''' A property that caches itself to the class object. '''
def __init__(self, func):
functools.update_wrapper(self, func, updated=[])
self.getter = func
def __get__(self, obj, cls):
value = self.getter(cls)
setattr(cls, self.__name__, value)
return value
###############################################################################
# Exceptions and Events ########################################################
###############################################################################
class BottleException(Exception):
""" A base class for exceptions used by bottle. """
pass
###############################################################################
# Routing ######################################################################
###############################################################################
class RouteError(BottleException):
""" This is a base class for all routing related exceptions """
class RouteReset(BottleException):
""" If raised by a plugin or request handler, the route is reset and all
plugins are re-applied. """
class RouterUnknownModeError(RouteError): pass
class RouteSyntaxError(RouteError):
""" The route parser found something not supported by this router """
class RouteBuildError(RouteError):
""" The route could not been built """
def _re_flatten(p):
''' Turn all capturing groups in a regular expression pattern into
non-capturing groups. '''
if '(' not in p: return p
return re.sub(r'(\\*)(\(\?P<[^>]+>|\((?!\?))',
lambda m: m.group(0) if len(m.group(1)) % 2 else m.group(1) + '(?:', p)
class Router(object):
''' A Router is an ordered collection of route->target pairs. It is used to
efficiently match WSGI requests against a number of routes and return
the first target that satisfies the request. The target may be anything,
usually a string, ID or callable object. A route consists of a path-rule
and a HTTP method.
The path-rule is either a static path (e.g. `/contact`) or a dynamic
path that contains wildcards (e.g. `/wiki/<page>`). The wildcard syntax
and details on the matching order are described in docs:`routing`.
'''
default_pattern = '[^/]+'
default_filter = 're'
#: The current CPython regexp implementation does not allow more
#: than 99 matching groups per regular expression.
_MAX_GROUPS_PER_PATTERN = 99
def __init__(self, strict=False):
self.rules = [] # All rules in order
self._groups = {} # index of regexes to find them in dyna_routes
self.builder = {} # Data structure for the url builder
self.static = {} # Search structure for static routes
self.dyna_routes = {}
self.dyna_regexes = {} # Search structure for dynamic routes
#: If true, static routes are no longer checked first.
self.strict_order = strict
self.filters = {
're': lambda conf:
(_re_flatten(conf or self.default_pattern), None, None),
'int': lambda conf: (r'-?\d+', int, lambda x: str(int(x))),
'float': lambda conf: (r'-?[\d.]+', float, lambda x: str(float(x))),
'path': lambda conf: (r'.+?', None, None)}
def add_filter(self, name, func):
''' Add a filter. The provided function is called with the configuration
string as parameter and must return a (regexp, to_python, to_url) tuple.
The first element is a string, the last two are callables or None. '''
self.filters[name] = func
rule_syntax = re.compile('(\\\\*)'\
'(?:(?::([a-zA-Z_][a-zA-Z_0-9]*)?()(?:#(.*?)#)?)'\
'|(?:<([a-zA-Z_][a-zA-Z_0-9]*)?(?::([a-zA-Z_]*)'\
'(?::((?:\\\\.|[^\\\\>]+)+)?)?)?>))')
def _itertokens(self, rule):
offset, prefix = 0, ''
for match in self.rule_syntax.finditer(rule):
prefix += rule[offset:match.start()]
g = match.groups()
if len(g[0])%2: # Escaped wildcard
prefix += match.group(0)[len(g[0]):]
offset = match.end()
continue
if prefix:
yield prefix, None, None
name, filtr, conf = g[4:7] if g[2] is None else g[1:4]
yield name, filtr or 'default', conf or None
offset, prefix = match.end(), ''
if offset <= len(rule) or prefix:
yield prefix+rule[offset:], None, None
def add(self, rule, method, target, name=None):
''' Add a new rule or replace the target for an existing rule. '''
anons = 0 # Number of anonymous wildcards found
keys = [] # Names of keys
pattern = '' # Regular expression pattern with named groups
filters = [] # Lists of wildcard input filters
builder = [] # Data structure for the URL builder
is_static = True
for key, mode, conf in self._itertokens(rule):
if mode:
is_static = False
if mode == 'default': mode = self.default_filter
mask, in_filter, out_filter = self.filters[mode](conf)
if not key:
pattern += '(?:%s)' % mask
key = 'anon%d' % anons
anons += 1
else:
pattern += '(?P<%s>%s)' % (key, mask)
keys.append(key)
if in_filter: filters.append((key, in_filter))
builder.append((key, out_filter or str))
elif key:
pattern += re.escape(key)
builder.append((None, key))
self.builder[rule] = builder
if name: self.builder[name] = builder
if is_static and not self.strict_order:
self.static.setdefault(method, {})
self.static[method][self.build(rule)] = (target, None)
return
try:
re_pattern = re.compile('^(%s)$' % pattern)
re_match = re_pattern.match
except re.error:
raise RouteSyntaxError("Could not add Route: %s (%s)" % (rule, _e()))
if filters:
def getargs(path):
url_args = re_match(path).groupdict()
for name, wildcard_filter in filters:
try:
url_args[name] = wildcard_filter(url_args[name])
except ValueError:
raise HTTPError(400, 'Path has wrong format.')
return url_args
elif re_pattern.groupindex:
def getargs(path):
return re_match(path).groupdict()
else:
getargs = None
flatpat = _re_flatten(pattern)
whole_rule = (rule, flatpat, target, getargs)
if (flatpat, method) in self._groups:
if DEBUG:
msg = 'Route <%s %s> overwrites a previously defined route'
warnings.warn(msg % (method, rule), RuntimeWarning)
self.dyna_routes[method][self._groups[flatpat, method]] = whole_rule
else:
self.dyna_routes.setdefault(method, []).append(whole_rule)
self._groups[flatpat, method] = len(self.dyna_routes[method]) - 1
self._compile(method)
def _compile(self, method):
all_rules = self.dyna_routes[method]
comborules = self.dyna_regexes[method] = []
maxgroups = self._MAX_GROUPS_PER_PATTERN
for x in range(0, len(all_rules), maxgroups):
some = all_rules[x:x+maxgroups]
combined = (flatpat for (_, flatpat, _, _) in some)
combined = '|'.join('(^%s$)' % flatpat for flatpat in combined)
combined = re.compile(combined).match
rules = [(target, getargs) for (_, _, target, getargs) in some]
comborules.append((combined, rules))
def build(self, _name, *anons, **query):
''' Build an URL by filling the wildcards in a rule. '''
builder = self.builder.get(_name)
if not builder: raise RouteBuildError("No route with that name.", _name)
try:
for i, value in enumerate(anons): query['anon%d'%i] = value
url = ''.join([f(query.pop(n)) if n else f for (n,f) in builder])
return url if not query else url+'?'+urlencode(query)
except KeyError:
raise RouteBuildError('Missing URL argument: %r' % _e().args[0])
def match(self, environ):
''' Return a (target, url_agrs) tuple or raise HTTPError(400/404/405). '''
verb = environ['REQUEST_METHOD'].upper()
path = environ['PATH_INFO'] or '/'
target = None
methods = [verb, 'GET', 'ANY'] if verb == 'HEAD' else [verb, 'ANY']
for method in methods:
if method in self.static and path in self.static[method]:
target, getargs = self.static[method][path]
return target, getargs(path) if getargs else {}
elif method in self.dyna_regexes:
for combined, rules in self.dyna_regexes[method]:
match = combined(path)
if match:
target, getargs = rules[match.lastindex - 1]
return target, getargs(path) if getargs else {}
# No matching route found. Collect alternative methods for 405 response
allowed = set([])
nocheck = set(methods)
for method in set(self.static) - nocheck:
if path in self.static[method]:
allowed.add(verb)
for method in set(self.dyna_regexes) - allowed - nocheck:
for combined, rules in self.dyna_regexes[method]:
match = combined(path)
if match:
allowed.add(method)
if allowed:
allow_header = ",".join(sorted(allowed))
raise HTTPError(405, "Method not allowed.", Allow=allow_header)
# No matching route and no alternative method found. We give up
raise HTTPError(404, "Not found: " + repr(path))
class Route(object):
''' This class wraps a route callback along with route specific metadata and
configuration and applies Plugins on demand. It is also responsible for
turing an URL path rule into a regular expression usable by the Router.
'''
def __init__(self, app, rule, method, callback, name=None,
plugins=None, skiplist=None, **config):
#: The application this route is installed to.
self.app = app
#: The path-rule string (e.g. ``/wiki/:page``).
self.rule = rule
#: The HTTP method as a string (e.g. ``GET``).
self.method = method
#: The original callback with no plugins applied. Useful for introspection.
self.callback = callback
#: The name of the route (if specified) or ``None``.
self.name = name or None
#: A list of route-specific plugins (see :meth:`Bottle.route`).
self.plugins = plugins or []
#: A list of plugins to not apply to this route (see :meth:`Bottle.route`).
self.skiplist = skiplist or []
#: Additional keyword arguments passed to the :meth:`Bottle.route`
#: decorator are stored in this dictionary. Used for route-specific
#: plugin configuration and meta-data.
self.config = ConfigDict().load_dict(config)
def __call__(self, *a, **ka):
depr("Some APIs changed to return Route() instances instead of"\
" callables. Make sure to use the Route.call method and not to"\
" call Route instances directly.")
return self.call(*a, **ka)
@cached_property
def call(self):
''' The route callback with all plugins applied. This property is
created on demand and then cached to speed up subsequent requests.'''
return self._make_callback()
def reset(self):
''' Forget any cached values. The next time :attr:`call` is accessed,
all plugins are re-applied. '''
self.__dict__.pop('call', None)
def prepare(self):
''' Do all on-demand work immediately (useful for debugging).'''
self.call
@property
def _context(self):
depr('Switch to Plugin API v2 and access the Route object directly.')
return dict(rule=self.rule, method=self.method, callback=self.callback,
name=self.name, app=self.app, config=self.config,
apply=self.plugins, skip=self.skiplist)
def all_plugins(self):
''' Yield all Plugins affecting this route. '''
unique = set()
for p in reversed(self.app.plugins + self.plugins):
if True in self.skiplist: break
name = getattr(p, 'name', False)
if name and (name in self.skiplist or name in unique): continue
if p in self.skiplist or type(p) in self.skiplist: continue
if name: unique.add(name)
yield p
def _make_callback(self):
callback = self.callback
for plugin in self.all_plugins():
try:
if hasattr(plugin, 'apply'):
api = getattr(plugin, 'api', 1)
context = self if api > 1 else self._context
callback = plugin.apply(callback, context)
else:
callback = plugin(callback)
except RouteReset: # Try again with changed configuration.
return self._make_callback()
if not callback is self.callback:
update_wrapper(callback, self.callback)
return callback
def get_undecorated_callback(self):
''' Return the callback. If the callback is a decorated function, try to
recover the original function. '''
func = self.callback
func = getattr(func, '__func__' if py3k else 'im_func', func)
closure_attr = '__closure__' if py3k else 'func_closure'
while hasattr(func, closure_attr) and getattr(func, closure_attr):
func = getattr(func, closure_attr)[0].cell_contents
return func
def get_callback_args(self):
''' Return a list of argument names the callback (most likely) accepts
as keyword arguments. If the callback is a decorated function, try
to recover the original function before inspection. '''
return getargspec(self.get_undecorated_callback())[0]
def get_config(key, default=None):
''' Lookup a config field and return its value, first checking the
route.config, then route.app.config.'''
for conf in (self.config, self.app.conifg):
if key in conf: return conf[key]
return default
def __repr__(self):
cb = self.get_undecorated_callback()
return '<%s %r %r>' % (self.method, self.rule, cb)
###############################################################################
# Application Object ###########################################################
###############################################################################
class Bottle(object):
""" Each Bottle object represents a single, distinct web application and
consists of routes, callbacks, plugins, resources and configuration.
Instances are callable WSGI applications.
:param catchall: If true (default), handle all exceptions. Turn off to
let debugging middleware handle exceptions.
"""
def __init__(self, catchall=True, autojson=True):
#: A :class:`ConfigDict` for app specific configuration.
self.config = ConfigDict()
self.config._on_change = functools.partial(self.trigger_hook, 'config')
self.config.meta_set('autojson', 'validate', bool)
self.config.meta_set('catchall', 'validate', bool)
self.config['catchall'] = catchall
self.config['autojson'] = autojson
#: A :class:`ResourceManager` for application files
self.resources = ResourceManager()
self.routes = [] # List of installed :class:`Route` instances.
self.router = Router() # Maps requests to :class:`Route` instances.
self.error_handler = {}
# Core plugins
self.plugins = [] # List of installed plugins.
if self.config['autojson']:
self.install(JSONPlugin())
self.install(TemplatePlugin())
#: If true, most exceptions are caught and returned as :exc:`HTTPError`
catchall = DictProperty('config', 'catchall')
__hook_names = 'before_request', 'after_request', 'app_reset', 'config'
__hook_reversed = 'after_request'
@cached_property
def _hooks(self):
return dict((name, []) for name in self.__hook_names)
def add_hook(self, name, func):
''' Attach a callback to a hook. Three hooks are currently implemented:
before_request
Executed once before each request. The request context is
available, but no routing has happened yet.
after_request
Executed once after each request regardless of its outcome.
app_reset
Called whenever :meth:`Bottle.reset` is called.
'''
if name in self.__hook_reversed:
self._hooks[name].insert(0, func)
else:
self._hooks[name].append(func)
def remove_hook(self, name, func):
''' Remove a callback from a hook. '''
if name in self._hooks and func in self._hooks[name]:
self._hooks[name].remove(func)
return True
def trigger_hook(self, __name, *args, **kwargs):
''' Trigger a hook and return a list of results. '''
return [hook(*args, **kwargs) for hook in self._hooks[__name][:]]
def hook(self, name):
""" Return a decorator that attaches a callback to a hook. See
:meth:`add_hook` for details."""
def decorator(func):
self.add_hook(name, func)
return func
return decorator
def mount(self, prefix, app, **options):
''' Mount an application (:class:`Bottle` or plain WSGI) to a specific
URL prefix. Example::
root_app.mount('/admin/', admin_app)
:param prefix: path prefix or `mount-point`. If it ends in a slash,
that slash is mandatory.
:param app: an instance of :class:`Bottle` or a WSGI application.
All other parameters are passed to the underlying :meth:`route` call.
'''
if isinstance(app, basestring):
depr('Parameter order of Bottle.mount() changed.', True) # 0.10
segments = [p for p in prefix.split('/') if p]
if not segments: raise ValueError('Empty path prefix.')
path_depth = len(segments)
def mountpoint_wrapper():
try:
request.path_shift(path_depth)
rs = HTTPResponse([])
def start_response(status, headerlist, exc_info=None):
if exc_info:
try:
_raise(*exc_info)
finally:
exc_info = None
rs.status = status
for name, value in headerlist: rs.add_header(name, value)
return rs.body.append
body = app(request.environ, start_response)
if body and rs.body: body = itertools.chain(rs.body, body)
rs.body = body or rs.body
return rs
finally:
request.path_shift(-path_depth)
options.setdefault('skip', True)
options.setdefault('method', 'ANY')
options.setdefault('mountpoint', {'prefix': prefix, 'target': app})
options['callback'] = mountpoint_wrapper
self.route('/%s/<:re:.*>' % '/'.join(segments), **options)
if not prefix.endswith('/'):
self.route('/' + '/'.join(segments), **options)
def merge(self, routes):
''' Merge the routes of another :class:`Bottle` application or a list of
:class:`Route` objects into this application. The routes keep their
'owner', meaning that the :data:`Route.app` attribute is not
changed. '''
if isinstance(routes, Bottle):
routes = routes.routes
for route in routes:
self.add_route(route)
def install(self, plugin):
''' Add a plugin to the list of plugins and prepare it for being
applied to all routes of this application. A plugin may be a simple
decorator or an object that implements the :class:`Plugin` API.
'''
if hasattr(plugin, 'setup'): plugin.setup(self)
if not callable(plugin) and not hasattr(plugin, 'apply'):
raise TypeError("Plugins must be callable or implement .apply()")
self.plugins.append(plugin)
self.reset()
return plugin
def uninstall(self, plugin):
''' Uninstall plugins. Pass an instance to remove a specific plugin, a type
object to remove all plugins that match that type, a string to remove
all plugins with a matching ``name`` attribute or ``True`` to remove all
plugins. Return the list of removed plugins. '''
removed, remove = [], plugin
for i, plugin in list(enumerate(self.plugins))[::-1]:
if remove is True or remove is plugin or remove is type(plugin) \
or getattr(plugin, 'name', True) == remove:
removed.append(plugin)
del self.plugins[i]
if hasattr(plugin, 'close'): plugin.close()
if removed: self.reset()
return removed
def reset(self, route=None):
''' Reset all routes (force plugins to be re-applied) and clear all
caches. If an ID or route object is given, only that specific route
is affected. '''
if route is None: routes = self.routes
elif isinstance(route, Route): routes = [route]
else: routes = [self.routes[route]]
for route in routes: route.reset()
if DEBUG:
for route in routes: route.prepare()
self.trigger_hook('app_reset')
def close(self):
''' Close the application and all installed plugins. '''
for plugin in self.plugins:
if hasattr(plugin, 'close'): plugin.close()
self.stopped = True
def run(self, **kwargs):
''' Calls :func:`run` with the same parameters. '''
run(self, **kwargs)
def match(self, environ):
""" Search for a matching route and return a (:class:`Route` , urlargs)
tuple. The second value is a dictionary with parameters extracted
from the URL. Raise :exc:`HTTPError` (404/405) on a non-match."""
return self.router.match(environ)
def get_url(self, routename, **kargs):
""" Return a string that matches a named route """
scriptname = request.environ.get('SCRIPT_NAME', '').strip('/') + '/'
location = self.router.build(routename, **kargs).lstrip('/')
return urljoin(urljoin('/', scriptname), location)
def add_route(self, route):
''' Add a route object, but do not change the :data:`Route.app`
attribute.'''
self.routes.append(route)
self.router.add(route.rule, route.method, route, name=route.name)
if DEBUG: route.prepare()
def route(self, path=None, method='GET', callback=None, name=None,
apply=None, skip=None, **config):
""" A decorator to bind a function to a request URL. Example::
@app.route('/hello/:name')
def hello(name):
return 'Hello %s' % name
The ``:name`` part is a wildcard. See :class:`Router` for syntax
details.
:param path: Request path or a list of paths to listen to. If no
path is specified, it is automatically generated from the
signature of the function.
:param method: HTTP method (`GET`, `POST`, `PUT`, ...) or a list of
methods to listen to. (default: `GET`)
:param callback: An optional shortcut to avoid the decorator
syntax. ``route(..., callback=func)`` equals ``route(...)(func)``
:param name: The name for this route. (default: None)
:param apply: A decorator or plugin or a list of plugins. These are
applied to the route callback in addition to installed plugins.
:param skip: A list of plugins, plugin classes or names. Matching
plugins are not installed to this route. ``True`` skips all.
Any additional keyword arguments are stored as route-specific
configuration and passed to plugins (see :meth:`Plugin.apply`).
"""
if callable(path): path, callback = None, path
plugins = makelist(apply)
skiplist = makelist(skip)
def decorator(callback):
# TODO: Documentation and tests
if isinstance(callback, basestring): callback = load(callback)
for rule in makelist(path) or yieldroutes(callback):
for verb in makelist(method):
verb = verb.upper()
route = Route(self, rule, verb, callback, name=name,
plugins=plugins, skiplist=skiplist, **config)
self.add_route(route)
return callback
return decorator(callback) if callback else decorator
def get(self, path=None, method='GET', **options):
""" Equals :meth:`route`. """
return self.route(path, method, **options)
def post(self, path=None, method='POST', **options):
""" Equals :meth:`route` with a ``POST`` method parameter. """
return self.route(path, method, **options)
def put(self, path=None, method='PUT', **options):
""" Equals :meth:`route` with a ``PUT`` method parameter. """
return self.route(path, method, **options)
def delete(self, path=None, method='DELETE', **options):
""" Equals :meth:`route` with a ``DELETE`` method parameter. """
return self.route(path, method, **options)
def error(self, code=500):
""" Decorator: Register an output handler for a HTTP error code"""
def wrapper(handler):
self.error_handler[int(code)] = handler
return handler
return wrapper
def handle(self, path, method='GET'):
""" (deprecated) Execute the first matching route callback and return
the result. :exc:`HTTPResponse` exceptions are caught and returned.
If :attr:`Bottle.catchall` is true, other exceptions are caught as
well and returned as :exc:`HTTPError` instances (500).
"""
depr("This method will change semantics in 0.10. Try to avoid it.")
if isinstance(path, dict):
return self._handle(path)
return self._handle({'PATH_INFO': path, 'REQUEST_METHOD': method.upper()})
def default_error_handler(self, res):
return tob(template(ERROR_PAGE_TEMPLATE, e=res))
def _handle(self, environ):
try:
environ['bottle.app'] = self
request.bind(environ)
response.bind()
try:
self.trigger_hook('before_request')
route, args = self.router.match(environ)
environ['route.handle'] = route
environ['bottle.route'] = route
environ['route.url_args'] = args
return route.call(**args)
finally:
self.trigger_hook('after_request')
except HTTPResponse:
return _e()
except RouteReset:
route.reset()
return self._handle(environ)
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except Exception:
if not self.catchall: raise
stacktrace = format_exc()
environ['wsgi.errors'].write(stacktrace)
return HTTPError(500, "Internal Server Error", _e(), stacktrace)
def _cast(self, out, peek=None):
""" Try to convert the parameter into something WSGI compatible and set
correct HTTP headers when possible.
Support: False, str, unicode, dict, HTTPResponse, HTTPError, file-like,
iterable of strings and iterable of unicodes
"""
# Empty output is done here
if not out:
if 'Content-Length' not in response:
response['Content-Length'] = 0
return []
# Join lists of byte or unicode strings. Mixed lists are NOT supported
if isinstance(out, (tuple, list))\
and isinstance(out[0], (bytes, unicode)):
out = out[0][0:0].join(out) # b'abc'[0:0] -> b''
# Encode unicode strings
if isinstance(out, unicode):
out = out.encode(response.charset)
# Byte Strings are just returned
if isinstance(out, bytes):
if 'Content-Length' not in response:
response['Content-Length'] = len(out)
return [out]
# HTTPError or HTTPException (recursive, because they may wrap anything)
# TODO: Handle these explicitly in handle() or make them iterable.
if isinstance(out, HTTPError):
out.apply(response)
out = self.error_handler.get(out.status_code, self.default_error_handler)(out)
return self._cast(out)
if isinstance(out, HTTPResponse):
out.apply(response)
return self._cast(out.body)
# File-like objects.
if hasattr(out, 'read'):
if 'wsgi.file_wrapper' in request.environ:
return request.environ['wsgi.file_wrapper'](out)
elif hasattr(out, 'close') or not hasattr(out, '__iter__'):
return WSGIFileWrapper(out)
# Handle Iterables. We peek into them to detect their inner type.
try:
iout = iter(out)
first = next(iout)
while not first:
first = next(iout)
except StopIteration:
return self._cast('')
except HTTPResponse:
first = _e()
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except Exception:
if not self.catchall: raise
first = HTTPError(500, 'Unhandled exception', _e(), format_exc())
# These are the inner types allowed in iterator or generator objects.
if isinstance(first, HTTPResponse):
return self._cast(first)
elif isinstance(first, bytes):
new_iter = itertools.chain([first], iout)
elif isinstance(first, unicode):
encoder = lambda x: x.encode(response.charset)
new_iter = imap(encoder, itertools.chain([first], iout))
else:
msg = 'Unsupported response type: %s' % type(first)
return self._cast(HTTPError(500, msg))
if hasattr(out, 'close'):
new_iter = _closeiter(new_iter, out.close)
return new_iter
def wsgi(self, environ, start_response):
""" The bottle WSGI-interface. """
try:
out = self._cast(self._handle(environ))
# rfc2616 section 4.3
if response._status_code in (100, 101, 204, 304)\
or environ['REQUEST_METHOD'] == 'HEAD':
if hasattr(out, 'close'): out.close()
out = []
start_response(response._status_line, response.headerlist)
return out
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except Exception:
if not self.catchall: raise
err = '<h1>Critical error while processing request: %s</h1>' \
% html_escape(environ.get('PATH_INFO', '/'))
if DEBUG:
err += '<h2>Error:</h2>\n<pre>\n%s\n</pre>\n' \
'<h2>Traceback:</h2>\n<pre>\n%s\n</pre>\n' \
% (html_escape(repr(_e())), html_escape(format_exc()))
environ['wsgi.errors'].write(err)
headers = [('Content-Type', 'text/html; charset=UTF-8')]
start_response('500 INTERNAL SERVER ERROR', headers, sys.exc_info())
return [tob(err)]
def __call__(self, environ, start_response):
''' Each instance of :class:'Bottle' is a WSGI application. '''
return self.wsgi(environ, start_response)
###############################################################################
# HTTP and WSGI Tools ##########################################################
###############################################################################
class BaseRequest(object):
""" A wrapper for WSGI environment dictionaries that adds a lot of
convenient access methods and properties. Most of them are read-only.
Adding new attributes to a request actually adds them to the environ
dictionary (as 'bottle.request.ext.<name>'). This is the recommended
way to store and access request-specific data.
"""
__slots__ = ('environ')
#: Maximum size of memory buffer for :attr:`body` in bytes.
MEMFILE_MAX = 102400
#: Maximum number pr GET or POST parameters per request
MAX_PARAMS = 100
def __init__(self, environ=None):
""" Wrap a WSGI environ dictionary. """
#: The wrapped WSGI environ dictionary. This is the only real attribute.
#: All other attributes actually are read-only properties.
self.environ = {} if environ is None else environ
self.environ['bottle.request'] = self
@DictProperty('environ', 'bottle.app', read_only=True)
def app(self):
''' Bottle application handling this request. '''
raise RuntimeError('This request is not connected to an application.')
@DictProperty('environ', 'bottle.route', read_only=True)
def route(self):
""" The bottle :class:`Route` object that matches this request. """
raise RuntimeError('This request is not connected to a route.')
@DictProperty('environ', 'route.url_args', read_only=True)
def url_args(self):
""" The arguments extracted from the URL. """
raise RuntimeError('This request is not connected to a route.')
@property
def path(self):
''' The value of ``PATH_INFO`` with exactly one prefixed slash (to fix
broken clients and avoid the "empty path" edge case). '''
return '/' + self.environ.get('PATH_INFO','').lstrip('/')
@property
def method(self):
''' The ``REQUEST_METHOD`` value as an uppercase string. '''
return self.environ.get('REQUEST_METHOD', 'GET').upper()
@DictProperty('environ', 'bottle.request.headers', read_only=True)
def headers(self):
''' A :class:`WSGIHeaderDict` that provides case-insensitive access to
HTTP request headers. '''
return WSGIHeaderDict(self.environ)
def get_header(self, name, default=None):
''' Return the value of a request header, or a given default value. '''
return self.headers.get(name, default)
@DictProperty('environ', 'bottle.request.cookies', read_only=True)
def cookies(self):
""" Cookies parsed into a :class:`FormsDict`. Signed cookies are NOT
decoded. Use :meth:`get_cookie` if you expect signed cookies. """
cookies = SimpleCookie(self.environ.get('HTTP_COOKIE','')).values()
if len(cookies) > self.MAX_PARAMS:
raise HTTPError(413, 'Too many cookies')
return FormsDict((c.key, c.value) for c in cookies)
def get_cookie(self, key, default=None, secret=None):
""" Return the content of a cookie. To read a `Signed Cookie`, the
`secret` must match the one used to create the cookie (see
:meth:`BaseResponse.set_cookie`). If anything goes wrong (missing
cookie or wrong signature), return a default value. """
value = self.cookies.get(key)
if secret and value:
dec = cookie_decode(value, secret) # (key, value) tuple or None
return dec[1] if dec and dec[0] == key else default
return value or default
@DictProperty('environ', 'bottle.request.query', read_only=True)
def query(self):
''' The :attr:`query_string` parsed into a :class:`FormsDict`. These
values are sometimes called "URL arguments" or "GET parameters", but
not to be confused with "URL wildcards" as they are provided by the
:class:`Router`. '''
get = self.environ['bottle.get'] = FormsDict()
pairs = _parse_qsl(self.environ.get('QUERY_STRING', ''))
if len(pairs) > self.MAX_PARAMS:
raise HTTPError(413, 'Too many parameters')
for key, value in pairs:
get[key] = value
return get
@DictProperty('environ', 'bottle.request.forms', read_only=True)
def forms(self):
""" Form values parsed from an `url-encoded` or `multipart/form-data`
encoded POST or PUT request body. The result is returned as a
:class:`FormsDict`. All keys and values are strings. File uploads
are stored separately in :attr:`files`. """
forms = FormsDict()
for name, item in self.POST.allitems():
if not isinstance(item, FileUpload):
forms[name] = item
return forms
@DictProperty('environ', 'bottle.request.params', read_only=True)
def params(self):
""" A :class:`FormsDict` with the combined values of :attr:`query` and
:attr:`forms`. File uploads are stored in :attr:`files`. """
params = FormsDict()
for key, value in self.query.allitems():
params[key] = value
for key, value in self.forms.allitems():
params[key] = value
return params
@DictProperty('environ', 'bottle.request.files', read_only=True)
def files(self):
""" File uploads parsed from `multipart/form-data` encoded POST or PUT
request body. The values are instances of :class:`FileUpload`.
"""
files = FormsDict()
for name, item in self.POST.allitems():
if isinstance(item, FileUpload):
files[name] = item
return files
@DictProperty('environ', 'bottle.request.json', read_only=True)
def json(self):
''' If the ``Content-Type`` header is ``application/json``, this
property holds the parsed content of the request body. Only requests
smaller than :attr:`MEMFILE_MAX` are processed to avoid memory
exhaustion. '''
if 'application/json' in self.environ.get('CONTENT_TYPE', ''):
return json_loads(self._get_body_string())
return None
@DictProperty('environ', 'bottle.request.body', read_only=True)
def _body(self):
maxread = max(0, self.content_length)
stream = self.environ['wsgi.input']
body = BytesIO() if maxread < self.MEMFILE_MAX else TemporaryFile(mode='w+b')
while maxread > 0:
part = stream.read(min(maxread, self.MEMFILE_MAX))
if not part: break
body.write(part)
maxread -= len(part)
self.environ['wsgi.input'] = body
body.seek(0)
return body
def _get_body_string(self):
''' read body until content-length or MEMFILE_MAX into a string. Raise
HTTPError(413) on requests that are to large. '''
clen = self.content_length
if clen > self.MEMFILE_MAX:
raise HTTPError(413, 'Request to large')
if clen < 0: clen = self.MEMFILE_MAX + 1
data = self.body.read(clen)
if len(data) > self.MEMFILE_MAX: # Fail fast
raise HTTPError(413, 'Request to large')
return data
@property
def body(self):
""" The HTTP request body as a seek-able file-like object. Depending on
:attr:`MEMFILE_MAX`, this is either a temporary file or a
:class:`io.BytesIO` instance. Accessing this property for the first
time reads and replaces the ``wsgi.input`` environ variable.
Subsequent accesses just do a `seek(0)` on the file object. """
self._body.seek(0)
return self._body
#: An alias for :attr:`query`.
GET = query
@DictProperty('environ', 'bottle.request.post', read_only=True)
def POST(self):
""" The values of :attr:`forms` and :attr:`files` combined into a single
:class:`FormsDict`. Values are either strings (form values) or
instances of :class:`cgi.FieldStorage` (file uploads).
"""
post = FormsDict()
# We default to application/x-www-form-urlencoded for everything that
# is not multipart and take the fast path (also: 3.1 workaround)
if not self.content_type.startswith('multipart/'):
pairs = _parse_qsl(tonat(self._get_body_string(), 'latin1'))
if len(pairs) > self.MAX_PARAMS:
raise HTTPError(413, 'Too many parameters')
for key, value in pairs:
post[key] = value
return post
safe_env = {'QUERY_STRING':''} # Build a safe environment for cgi
for key in ('REQUEST_METHOD', 'CONTENT_TYPE', 'CONTENT_LENGTH'):
if key in self.environ: safe_env[key] = self.environ[key]
args = dict(fp=self.body, environ=safe_env, keep_blank_values=True)
if py31:
args['fp'] = NCTextIOWrapper(args['fp'], encoding='latin1',
newline='\n')
elif py3k:
args['encoding'] = 'latin1'
data = cgi.FieldStorage(**args)
data = data.list or []
if len(data) > self.MAX_PARAMS:
raise HTTPError(413, 'Too many parameters')
for item in data:
if item.filename:
post[item.name] = FileUpload(item.file, item.name,
item.filename, item.headers)
else:
post[item.name] = item.value
return post
@property
def COOKIES(self):
''' Alias for :attr:`cookies` (deprecated). '''
depr('BaseRequest.COOKIES was renamed to BaseRequest.cookies (lowercase).')
return self.cookies
@property
def url(self):
""" The full request URI including hostname and scheme. If your app
lives behind a reverse proxy or load balancer and you get confusing
results, make sure that the ``X-Forwarded-Host`` header is set
correctly. """
return self.urlparts.geturl()
@DictProperty('environ', 'bottle.request.urlparts', read_only=True)
def urlparts(self):
''' The :attr:`url` string as an :class:`urlparse.SplitResult` tuple.
The tuple contains (scheme, host, path, query_string and fragment),
but the fragment is always empty because it is not visible to the
server. '''
env = self.environ
http = env.get('HTTP_X_FORWARDED_PROTO') or env.get('wsgi.url_scheme', 'http')
host = env.get('HTTP_X_FORWARDED_HOST') or env.get('HTTP_HOST')
if not host:
# HTTP 1.1 requires a Host-header. This is for HTTP/1.0 clients.
host = env.get('SERVER_NAME', '127.0.0.1')
port = env.get('SERVER_PORT')
if port and port != ('80' if http == 'http' else '443'):
host += ':' + port
path = urlquote(self.fullpath)
return UrlSplitResult(http, host, path, env.get('QUERY_STRING'), '')
@property
def fullpath(self):
""" Request path including :attr:`script_name` (if present). """
return urljoin(self.script_name, self.path.lstrip('/'))
@property
def query_string(self):
""" The raw :attr:`query` part of the URL (everything in between ``?``
and ``#``) as a string. """
return self.environ.get('QUERY_STRING', '')
@property
def script_name(self):
''' The initial portion of the URL's `path` that was removed by a higher
level (server or routing middleware) before the application was
called. This script path is returned with leading and tailing
slashes. '''
script_name = self.environ.get('SCRIPT_NAME', '').strip('/')
return '/' + script_name + '/' if script_name else '/'
def path_shift(self, shift=1):
''' Shift path segments from :attr:`path` to :attr:`script_name` and
vice versa.
:param shift: The number of path segments to shift. May be negative
to change the shift direction. (default: 1)
'''
script = self.environ.get('SCRIPT_NAME','/')
self['SCRIPT_NAME'], self['PATH_INFO'] = path_shift(script, self.path, shift)
@property
def content_length(self):
''' The request body length as an integer. The client is responsible to
set this header. Otherwise, the real length of the body is unknown
and -1 is returned. In this case, :attr:`body` will be empty. '''
return int(self.environ.get('CONTENT_LENGTH') or -1)
@property
def content_type(self):
''' The Content-Type header as a lowercase-string (default: empty). '''
return self.environ.get('CONTENT_TYPE', '').lower()
@property
def is_xhr(self):
''' True if the request was triggered by a XMLHttpRequest. This only
works with JavaScript libraries that support the `X-Requested-With`
header (most of the popular libraries do). '''
requested_with = self.environ.get('HTTP_X_REQUESTED_WITH','')
return requested_with.lower() == 'xmlhttprequest'
@property
def is_ajax(self):
''' Alias for :attr:`is_xhr`. "Ajax" is not the right term. '''
return self.is_xhr
@property
def auth(self):
""" HTTP authentication data as a (user, password) tuple. This
implementation currently supports basic (not digest) authentication
only. If the authentication happened at a higher level (e.g. in the
front web-server or a middleware), the password field is None, but
the user field is looked up from the ``REMOTE_USER`` environ
variable. On any errors, None is returned. """
basic = parse_auth(self.environ.get('HTTP_AUTHORIZATION',''))
if basic: return basic
ruser = self.environ.get('REMOTE_USER')
if ruser: return (ruser, None)
return None
@property
def remote_route(self):
""" A list of all IPs that were involved in this request, starting with
the client IP and followed by zero or more proxies. This does only
work if all proxies support the ```X-Forwarded-For`` header. Note
that this information can be forged by malicious clients. """
proxy = self.environ.get('HTTP_X_FORWARDED_FOR')
if proxy: return [ip.strip() for ip in proxy.split(',')]
remote = self.environ.get('REMOTE_ADDR')
return [remote] if remote else []
@property
def remote_addr(self):
""" The client IP as a string. Note that this information can be forged
by malicious clients. """
route = self.remote_route
return route[0] if route else None
def copy(self):
""" Return a new :class:`Request` with a shallow :attr:`environ` copy. """
return Request(self.environ.copy())
def get(self, value, default=None): return self.environ.get(value, default)
def __getitem__(self, key): return self.environ[key]
def __delitem__(self, key): self[key] = ""; del(self.environ[key])
def __iter__(self): return iter(self.environ)
def __len__(self): return len(self.environ)
def keys(self): return self.environ.keys()
def __setitem__(self, key, value):
""" Change an environ value and clear all caches that depend on it. """
if self.environ.get('bottle.request.readonly'):
raise KeyError('The environ dictionary is read-only.')
self.environ[key] = value
todelete = ()
if key == 'wsgi.input':
todelete = ('body', 'forms', 'files', 'params', 'post', 'json')
elif key == 'QUERY_STRING':
todelete = ('query', 'params')
elif key.startswith('HTTP_'):
todelete = ('headers', 'cookies')
for key in todelete:
self.environ.pop('bottle.request.'+key, None)
def __repr__(self):
return '<%s: %s %s>' % (self.__class__.__name__, self.method, self.url)
def __getattr__(self, name):
''' Search in self.environ for additional user defined attributes. '''
try:
var = self.environ['bottle.request.ext.%s'%name]
return var.__get__(self) if hasattr(var, '__get__') else var
except KeyError:
raise AttributeError('Attribute %r not defined.' % name)
def __setattr__(self, name, value):
if name == 'environ': return object.__setattr__(self, name, value)
self.environ['bottle.request.ext.%s'%name] = value
def _hkey(s):
return s.title().replace('_','-')
class HeaderProperty(object):
def __init__(self, name, reader=None, writer=str, default=''):
self.name, self.default = name, default
self.reader, self.writer = reader, writer
self.__doc__ = 'Current value of the %r header.' % name.title()
def __get__(self, obj, cls):
if obj is None: return self
value = obj.headers.get(self.name, self.default)
return self.reader(value) if self.reader else value
def __set__(self, obj, value):
obj.headers[self.name] = self.writer(value)
def __delete__(self, obj):
del obj.headers[self.name]
class BaseResponse(object):
""" Storage class for a response body as well as headers and cookies.
This class does support dict-like case-insensitive item-access to
headers, but is NOT a dict. Most notably, iterating over a response
yields parts of the body and not the headers.
:param body: The response body as one of the supported types.
:param status: Either an HTTP status code (e.g. 200) or a status line
including the reason phrase (e.g. '200 OK').
:param headers: A dictionary or a list of name-value pairs.
Additional keyword arguments are added to the list of headers.
Underscores in the header name are replaced with dashes.
"""
default_status = 200
default_content_type = 'text/html; charset=UTF-8'
# Header blacklist for specific response codes
# (rfc2616 section 10.2.3 and 10.3.5)
bad_headers = {
204: set(('Content-Type',)),
304: set(('Allow', 'Content-Encoding', 'Content-Language',
'Content-Length', 'Content-Range', 'Content-Type',
'Content-Md5', 'Last-Modified'))}
def __init__(self, body='', status=None, headers=None, **more_headers):
self._cookies = None
self._headers = {}
self.body = body
self.status = status or self.default_status
if headers:
if isinstance(headers, dict):
headers = headers.items()
for name, value in headers:
self.add_header(name, value)
if more_headers:
for name, value in more_headers.items():
self.add_header(name, value)
def copy(self, cls=None):
''' Returns a copy of self. '''
cls = cls or BaseResponse
assert issubclass(cls, BaseResponse)
copy = cls()
copy.status = self.status
copy._headers = dict((k, v[:]) for (k, v) in self._headers.items())
copy.COOKIES.load(self.COOKIES.output())
return copy
def __iter__(self):
return iter(self.body)
def close(self):
if hasattr(self.body, 'close'):
self.body.close()
@property
def status_line(self):
''' The HTTP status line as a string (e.g. ``404 Not Found``).'''
return self._status_line
@property
def status_code(self):
''' The HTTP status code as an integer (e.g. 404).'''
return self._status_code
def _set_status(self, status):
if isinstance(status, int):
code, status = status, _HTTP_STATUS_LINES.get(status)
elif ' ' in status:
status = status.strip()
code = int(status.split()[0])
else:
raise ValueError('String status line without a reason phrase.')
if not 100 <= code <= 999: raise ValueError('Status code out of range.')
self._status_code = code
self._status_line = str(status or ('%d Unknown' % code))
def _get_status(self):
return self._status_line
status = property(_get_status, _set_status, None,
''' A writeable property to change the HTTP response status. It accepts
either a numeric code (100-999) or a string with a custom reason
phrase (e.g. "404 Brain not found"). Both :data:`status_line` and
:data:`status_code` are updated accordingly. The return value is
always a status string. ''')
del _get_status, _set_status
@property
def headers(self):
''' An instance of :class:`HeaderDict`, a case-insensitive dict-like
view on the response headers. '''
hdict = HeaderDict()
hdict.dict = self._headers
return hdict
def __contains__(self, name): return _hkey(name) in self._headers
def __delitem__(self, name): del self._headers[_hkey(name)]
def __getitem__(self, name): return self._headers[_hkey(name)][-1]
def __setitem__(self, name, value): self._headers[_hkey(name)] = [str(value)]
def get_header(self, name, default=None):
''' Return the value of a previously defined header. If there is no
header with that name, return a default value. '''
return self._headers.get(_hkey(name), [default])[-1]
def set_header(self, name, value):
''' Create a new response header, replacing any previously defined
headers with the same name. '''
self._headers[_hkey(name)] = [str(value)]
def add_header(self, name, value):
''' Add an additional response header, not removing duplicates. '''
self._headers.setdefault(_hkey(name), []).append(str(value))
def iter_headers(self):
''' Yield (header, value) tuples, skipping headers that are not
allowed with the current response status code. '''
return self.headerlist
def wsgiheader(self):
depr('The wsgiheader method is deprecated. See headerlist.') #0.10
return self.headerlist
@property
def headerlist(self):
''' WSGI conform list of (header, value) tuples. '''
out = []
headers = list(self._headers.items())
if 'Content-Type' not in self._headers:
headers.append(('Content-Type', [self.default_content_type]))
if self._status_code in self.bad_headers:
bad_headers = self.bad_headers[self._status_code]
headers = [h for h in headers if h[0] not in bad_headers]
out += [(name, val) for name, vals in headers for val in vals]
if self._cookies:
for c in self._cookies.values():
out.append(('Set-Cookie', c.OutputString()))
return out
content_type = HeaderProperty('Content-Type')
content_length = HeaderProperty('Content-Length', reader=int)
expires = HeaderProperty('Expires',
reader=lambda x: datetime.utcfromtimestamp(parse_date(x)),
writer=lambda x: http_date(x))
@property
def charset(self, default='UTF-8'):
""" Return the charset specified in the content-type header (default: utf8). """
if 'charset=' in self.content_type:
return self.content_type.split('charset=')[-1].split(';')[0].strip()
return default
@property
def COOKIES(self):
""" A dict-like SimpleCookie instance. This should not be used directly.
See :meth:`set_cookie`. """
depr('The COOKIES dict is deprecated. Use `set_cookie()` instead.') # 0.10
if not self._cookies:
self._cookies = SimpleCookie()
return self._cookies
def set_cookie(self, name, value, secret=None, **options):
''' Create a new cookie or replace an old one. If the `secret` parameter is
set, create a `Signed Cookie` (described below).
:param name: the name of the cookie.
:param value: the value of the cookie.
:param secret: a signature key required for signed cookies.
Additionally, this method accepts all RFC 2109 attributes that are
supported by :class:`cookie.Morsel`, including:
:param max_age: maximum age in seconds. (default: None)
:param expires: a datetime object or UNIX timestamp. (default: None)
:param domain: the domain that is allowed to read the cookie.
(default: current domain)
:param path: limits the cookie to a given path (default: current path)
:param secure: limit the cookie to HTTPS connections (default: off).
:param httponly: prevents client-side javascript to read this cookie
(default: off, requires Python 2.6 or newer).
If neither `expires` nor `max_age` is set (default), the cookie will
expire at the end of the browser session (as soon as the browser
window is closed).
Signed cookies may store any pickle-able object and are
cryptographically signed to prevent manipulation. Keep in mind that
cookies are limited to 4kb in most browsers.
Warning: Signed cookies are not encrypted (the client can still see
the content) and not copy-protected (the client can restore an old
cookie). The main intention is to make pickling and unpickling
save, not to store secret information at client side.
'''
if not self._cookies:
self._cookies = SimpleCookie()
if secret:
value = touni(cookie_encode((name, value), secret))
elif not isinstance(value, basestring):
raise TypeError('Secret key missing for non-string Cookie.')
if len(value) > 4096: raise ValueError('Cookie value to long.')
self._cookies[name] = value
for key, value in options.items():
if key == 'max_age':
if isinstance(value, timedelta):
value = value.seconds + value.days * 24 * 3600
if key == 'expires':
if isinstance(value, (datedate, datetime)):
value = value.timetuple()
elif isinstance(value, (int, float)):
value = time.gmtime(value)
value = time.strftime("%a, %d %b %Y %H:%M:%S GMT", value)
self._cookies[name][key.replace('_', '-')] = value
def delete_cookie(self, key, **kwargs):
''' Delete a cookie. Be sure to use the same `domain` and `path`
settings as used to create the cookie. '''
kwargs['max_age'] = -1
kwargs['expires'] = 0
self.set_cookie(key, '', **kwargs)
def __repr__(self):
out = ''
for name, value in self.headerlist:
out += '%s: %s\n' % (name.title(), value.strip())
return out
#: Thread-local storage for :class:`LocalRequest` and :class:`LocalResponse`
#: attributes.
_lctx = threading.local()
def local_property(name):
def fget(self):
try:
return getattr(_lctx, name)
except AttributeError:
raise RuntimeError("Request context not initialized.")
def fset(self, value): setattr(_lctx, name, value)
def fdel(self): delattr(_lctx, name)
return property(fget, fset, fdel,
'Thread-local property stored in :data:`_lctx.%s`' % name)
class LocalRequest(BaseRequest):
''' A thread-local subclass of :class:`BaseRequest` with a different
set of attribues for each thread. There is usually only one global
instance of this class (:data:`request`). If accessed during a
request/response cycle, this instance always refers to the *current*
request (even on a multithreaded server). '''
bind = BaseRequest.__init__
environ = local_property('request_environ')
class LocalResponse(BaseResponse):
''' A thread-local subclass of :class:`BaseResponse` with a different
set of attribues for each thread. There is usually only one global
instance of this class (:data:`response`). Its attributes are used
to build the HTTP response at the end of the request/response cycle.
'''
bind = BaseResponse.__init__
_status_line = local_property('response_status_line')
_status_code = local_property('response_status_code')
_cookies = local_property('response_cookies')
_headers = local_property('response_headers')
body = local_property('response_body')
Request = BaseRequest
Response = BaseResponse
class HTTPResponse(Response, BottleException):
def __init__(self, body='', status=None, headers=None,
header=None, **more_headers):
if header or 'output' in more_headers:
depr('Call signature changed (for the better). See BaseResponse')
if header: more_headers.update(header)
if 'output' in more_headers: body = more_headers.pop('output')
super(HTTPResponse, self).__init__(body, status, headers, **more_headers)
def apply(self, response):
response._status_code = self._status_code
response._status_line = self._status_line
response._headers = self._headers
response._cookies = self._cookies
response.body = self.body
def _output(self, value=None):
depr('Use HTTPResponse.body instead of HTTPResponse.output')
if value is None: return self.body
self.body = value
output = property(_output, _output, doc='Alias for .body')
class HTTPError(HTTPResponse):
default_status = 500
def __init__(self, status=None, body=None, exception=None, traceback=None,
**options):
self.exception = exception
self.traceback = traceback
super(HTTPError, self).__init__(body, status, **options)
###############################################################################
# Plugins ######################################################################
###############################################################################
class PluginError(BottleException): pass
class JSONPlugin(object):
name = 'json'
api = 2
def __init__(self, json_dumps=json_dumps):
self.json_dumps = json_dumps
def apply(self, callback, route):
dumps = self.json_dumps
if not dumps: return callback
def wrapper(*a, **ka):
try:
rv = callback(*a, **ka)
except HTTPError:
rv = _e()
if isinstance(rv, dict):
#Attempt to serialize, raises exception on failure
json_response = dumps(rv)
#Set content type only if serialization succesful
response.content_type = 'application/json'
return json_response
elif isinstance(rv, HTTPResponse) and isinstance(rv.body, dict):
rv.body = dumps(rv.body)
rv.content_type = 'application/json'
return rv
return wrapper
class TemplatePlugin(object):
''' This plugin applies the :func:`view` decorator to all routes with a
`template` config parameter. If the parameter is a tuple, the second
element must be a dict with additional options (e.g. `template_engine`)
or default variables for the template. '''
name = 'template'
api = 2
def apply(self, callback, route):
conf = route.config.get('template')
if isinstance(conf, (tuple, list)) and len(conf) == 2:
return view(conf[0], **conf[1])(callback)
elif isinstance(conf, str):
return view(conf)(callback)
else:
return callback
#: Not a plugin, but part of the plugin API. TODO: Find a better place.
class _ImportRedirect(object):
def __init__(self, name, impmask):
''' Create a virtual package that redirects imports (see PEP 302). '''
self.name = name
self.impmask = impmask
self.module = sys.modules.setdefault(name, imp.new_module(name))
self.module.__dict__.update({'__file__': __file__, '__path__': [],
'__all__': [], '__loader__': self})
sys.meta_path.append(self)
def find_module(self, fullname, path=None):
if '.' not in fullname: return
packname, modname = fullname.rsplit('.', 1)
if packname != self.name: return
return self
def load_module(self, fullname):
if fullname in sys.modules: return sys.modules[fullname]
packname, modname = fullname.rsplit('.', 1)
realname = self.impmask % modname
__import__(realname)
module = sys.modules[fullname] = sys.modules[realname]
setattr(self.module, modname, module)
module.__loader__ = self
return module
###############################################################################
# Common Utilities #############################################################
###############################################################################
class MultiDict(DictMixin):
""" This dict stores multiple values per key, but behaves exactly like a
normal dict in that it returns only the newest value for any given key.
There are special methods available to access the full list of values.
"""
def __init__(self, *a, **k):
self.dict = dict((k, [v]) for (k, v) in dict(*a, **k).items())
def __len__(self): return len(self.dict)
def __iter__(self): return iter(self.dict)
def __contains__(self, key): return key in self.dict
def __delitem__(self, key): del self.dict[key]
def __getitem__(self, key): return self.dict[key][-1]
def __setitem__(self, key, value): self.append(key, value)
def keys(self): return self.dict.keys()
if py3k:
def values(self): return (v[-1] for v in self.dict.values())
def items(self): return ((k, v[-1]) for k, v in self.dict.items())
def allitems(self):
return ((k, v) for k, vl in self.dict.items() for v in vl)
iterkeys = keys
itervalues = values
iteritems = items
iterallitems = allitems
else:
def values(self): return [v[-1] for v in self.dict.values()]
def items(self): return [(k, v[-1]) for k, v in self.dict.items()]
def iterkeys(self): return self.dict.iterkeys()
def itervalues(self): return (v[-1] for v in self.dict.itervalues())
def iteritems(self):
return ((k, v[-1]) for k, v in self.dict.iteritems())
def iterallitems(self):
return ((k, v) for k, vl in self.dict.iteritems() for v in vl)
def allitems(self):
return [(k, v) for k, vl in self.dict.iteritems() for v in vl]
def get(self, key, default=None, index=-1, type=None):
''' Return the most recent value for a key.
:param default: The default value to be returned if the key is not
present or the type conversion fails.
:param index: An index for the list of available values.
:param type: If defined, this callable is used to cast the value
into a specific type. Exception are suppressed and result in
the default value to be returned.
'''
try:
val = self.dict[key][index]
return type(val) if type else val
except Exception:
pass
return default
def append(self, key, value):
''' Add a new value to the list of values for this key. '''
self.dict.setdefault(key, []).append(value)
def replace(self, key, value):
''' Replace the list of values with a single value. '''
self.dict[key] = [value]
def getall(self, key):
''' Return a (possibly empty) list of values for a key. '''
return self.dict.get(key) or []
#: Aliases for WTForms to mimic other multi-dict APIs (Django)
getone = get
getlist = getall
class FormsDict(MultiDict):
''' This :class:`MultiDict` subclass is used to store request form data.
Additionally to the normal dict-like item access methods (which return
unmodified data as native strings), this container also supports
attribute-like access to its values. Attributes are automatically de-
or recoded to match :attr:`input_encoding` (default: 'utf8'). Missing
attributes default to an empty string. '''
#: Encoding used for attribute values.
input_encoding = 'utf8'
#: If true (default), unicode strings are first encoded with `latin1`
#: and then decoded to match :attr:`input_encoding`.
recode_unicode = True
def _fix(self, s, encoding=None):
if isinstance(s, unicode) and self.recode_unicode: # Python 3 WSGI
s = s.encode('latin1')
if isinstance(s, bytes): # Python 2 WSGI
return s.decode(encoding or self.input_encoding)
return s
def decode(self, encoding=None):
''' Returns a copy with all keys and values de- or recoded to match
:attr:`input_encoding`. Some libraries (e.g. WTForms) want a
unicode dictionary. '''
copy = FormsDict()
enc = copy.input_encoding = encoding or self.input_encoding
copy.recode_unicode = False
for key, value in self.allitems():
copy.append(self._fix(key, enc), self._fix(value, enc))
return copy
def getunicode(self, name, default=None, encoding=None):
''' Return the value as a unicode string, or the default. '''
try:
return self._fix(self[name], encoding)
except (UnicodeError, KeyError):
return default
def __getattr__(self, name, default=unicode()):
# Without this guard, pickle generates a cryptic TypeError:
if name.startswith('__') and name.endswith('__'):
return super(FormsDict, self).__getattr__(name)
return self.getunicode(name, default=default)
class HeaderDict(MultiDict):
""" A case-insensitive version of :class:`MultiDict` that defaults to
replace the old value instead of appending it. """
def __init__(self, *a, **ka):
self.dict = {}
if a or ka: self.update(*a, **ka)
def __contains__(self, key): return _hkey(key) in self.dict
def __delitem__(self, key): del self.dict[_hkey(key)]
def __getitem__(self, key): return self.dict[_hkey(key)][-1]
def __setitem__(self, key, value): self.dict[_hkey(key)] = [str(value)]
def append(self, key, value):
self.dict.setdefault(_hkey(key), []).append(str(value))
def replace(self, key, value): self.dict[_hkey(key)] = [str(value)]
def getall(self, key): return self.dict.get(_hkey(key)) or []
def get(self, key, default=None, index=-1):
return MultiDict.get(self, _hkey(key), default, index)
def filter(self, names):
for name in [_hkey(n) for n in names]:
if name in self.dict:
del self.dict[name]
class WSGIHeaderDict(DictMixin):
''' This dict-like class wraps a WSGI environ dict and provides convenient
access to HTTP_* fields. Keys and values are native strings
(2.x bytes or 3.x unicode) and keys are case-insensitive. If the WSGI
environment contains non-native string values, these are de- or encoded
using a lossless 'latin1' character set.
The API will remain stable even on changes to the relevant PEPs.
Currently PEP 333, 444 and 3333 are supported. (PEP 444 is the only one
that uses non-native strings.)
'''
#: List of keys that do not have a ``HTTP_`` prefix.
cgikeys = ('CONTENT_TYPE', 'CONTENT_LENGTH')
def __init__(self, environ):
self.environ = environ
def _ekey(self, key):
''' Translate header field name to CGI/WSGI environ key. '''
key = key.replace('-','_').upper()
if key in self.cgikeys:
return key
return 'HTTP_' + key
def raw(self, key, default=None):
''' Return the header value as is (may be bytes or unicode). '''
return self.environ.get(self._ekey(key), default)
def __getitem__(self, key):
return tonat(self.environ[self._ekey(key)], 'latin1')
def __setitem__(self, key, value):
raise TypeError("%s is read-only." % self.__class__)
def __delitem__(self, key):
raise TypeError("%s is read-only." % self.__class__)
def __iter__(self):
for key in self.environ:
if key[:5] == 'HTTP_':
yield key[5:].replace('_', '-').title()
elif key in self.cgikeys:
yield key.replace('_', '-').title()
def keys(self): return [x for x in self]
def __len__(self): return len(self.keys())
def __contains__(self, key): return self._ekey(key) in self.environ
class ConfigDict(dict):
''' A dict-like configuration storage with additional support for
namespaces, validators, meta-data, on_change listeners and more.
'''
__slots__ = ('_meta', '_on_change')
def __init__(self, *a, **ka):
self._meta = {}
self._on_change = lambda name, value: None
if a or ka:
depr('Constructor does no longer accept parameters.')
self.update(*a, **ka)
def load_config(self, filename):
''' Load values from an *.ini style config file.
If the config file contains sections, their names are used as
namespaces for the values within. The two special sections
``DEFAULT`` and ``bottle`` refer to the root namespace (no prefix).
'''
conf = ConfigParser()
conf.read(filename)
for section in conf.sections():
for key, value in conf.items(section):
if section not in ('DEFAULT', 'bottle'):
key = section + '.' + key
self[key] = value
return self
def load_dict(self, source, namespace=''):
''' Load values from a dictionary structure. Nesting can be used to
represent namespaces.
>>> c.load_dict({'some': {'namespace': {'key': 'value'} } })
{'some.namespace.key': 'value'}
'''
for key, value in source.items():
if isinstance(key, str):
nskey = (namespace + '.' + key).strip('.')
if isinstance(value, dict):
self.load_dict(value, namespace=nskey)
else:
self[nskey] = value
else:
raise TypeError('Key has type %r (not a string)' % type(key))
return self
def update(self, *a, **ka):
''' If the first parameter is a string, all keys are prefixed with this
namespace. Apart from that it works just as the usual dict.update().
Example: ``update('some.namespace', key='value')`` '''
prefix = ''
if a and isinstance(a[0], str):
prefix = a[0].strip('.') + '.'
a = a[1:]
for key, value in dict(*a, **ka).items():
self[prefix+key] = value
def setdefault(self, key, value):
if key not in self:
self[key] = value
def __setitem__(self, key, value):
if not isinstance(key, str):
raise TypeError('Key has type %r (not a string)' % type(key))
value = self.meta_get(key, 'filter', lambda x: x)(value)
if key in self and self[key] is value:
return
self._on_change(key, value)
dict.__setitem__(self, key, value)
def __delitem__(self, key):
self._on_change(key, None)
dict.__delitem__(self, key)
def meta_get(self, key, metafield, default=None):
''' Return the value of a meta field for a key. '''
return self._meta.get(key, {}).get(metafield, default)
def meta_set(self, key, metafield, value):
''' Set the meta field for a key to a new value. This triggers the
on-change handler for existing keys. '''
self._meta.setdefault(key, {})[metafield] = value
if key in self:
self[key] = self[key]
def meta_list(self, key):
''' Return an iterable of meta field names defined for a key. '''
return self._meta.get(key, {}).keys()
# Deprecated ConfigDict features
def __getattr__(self, key):
depr('Attribute access is deprecated.') #0.12
if key not in self and key[0].isupper():
self[key] = ConfigDict()
return self.get(key)
def __setattr__(self, key, value):
if key in self.__slots__:
return dict.__setattr__(self, key, value)
depr('Attribute assignment is deprecated.') #0.12
if hasattr(dict, key):
raise AttributeError('Read-only attribute.')
if key in self and self[key] and isinstance(self[key], ConfigDict):
raise AttributeError('Non-empty namespace attribute.')
self[key] = value
def __delattr__(self, key):
if key in self: del self[key]
def __call__(self, *a, **ka):
depr('Calling ConfDict is deprecated. Use the update() method.') #0.12
self.update(*a, **ka)
return self
class AppStack(list):
""" A stack-like list. Calling it returns the head of the stack. """
def __call__(self):
""" Return the current default application. """
return self[-1]
def push(self, value=None):
""" Add a new :class:`Bottle` instance to the stack """
if not isinstance(value, Bottle):
value = Bottle()
self.append(value)
return value
class WSGIFileWrapper(object):
def __init__(self, fp, buffer_size=1024*64):
self.fp, self.buffer_size = fp, buffer_size
for attr in ('fileno', 'close', 'read', 'readlines', 'tell', 'seek'):
if hasattr(fp, attr): setattr(self, attr, getattr(fp, attr))
def __iter__(self):
buff, read = self.buffer_size, self.read
while True:
part = read(buff)
if not part: return
yield part
class _closeiter(object):
''' This only exists to be able to attach a .close method to iterators that
do not support attribute assignment (most of itertools). '''
def __init__(self, iterator, close=None):
self.iterator = iterator
self.close_callbacks = makelist(close)
def __iter__(self):
return iter(self.iterator)
def close(self):
for func in self.close_callbacks:
func()
class ResourceManager(object):
''' This class manages a list of search paths and helps to find and open
application-bound resources (files).
:param base: default value for :meth:`add_path` calls.
:param opener: callable used to open resources.
:param cachemode: controls which lookups are cached. One of 'all',
'found' or 'none'.
'''
def __init__(self, base='./', opener=open, cachemode='all'):
self.opener = open
self.base = base
self.cachemode = cachemode
#: A list of search paths. See :meth:`add_path` for details.
self.path = []
#: A cache for resolved paths. ``res.cache.clear()`` clears the cache.
self.cache = {}
def add_path(self, path, base=None, index=None, create=False):
''' Add a new path to the list of search paths. Return False if the
path does not exist.
:param path: The new search path. Relative paths are turned into
an absolute and normalized form. If the path looks like a file
(not ending in `/`), the filename is stripped off.
:param base: Path used to absolutize relative search paths.
Defaults to :attr:`base` which defaults to ``os.getcwd()``.
:param index: Position within the list of search paths. Defaults
to last index (appends to the list).
The `base` parameter makes it easy to reference files installed
along with a python module or package::
res.add_path('./resources/', __file__)
'''
base = os.path.abspath(os.path.dirname(base or self.base))
path = os.path.abspath(os.path.join(base, os.path.dirname(path)))
path += os.sep
if path in self.path:
self.path.remove(path)
if create and not os.path.isdir(path):
os.makedirs(path)
if index is None:
self.path.append(path)
else:
self.path.insert(index, path)
self.cache.clear()
return os.path.exists(path)
def __iter__(self):
''' Iterate over all existing files in all registered paths. '''
search = self.path[:]
while search:
path = search.pop()
if not os.path.isdir(path): continue
for name in os.listdir(path):
full = os.path.join(path, name)
if os.path.isdir(full): search.append(full)
else: yield full
def lookup(self, name):
''' Search for a resource and return an absolute file path, or `None`.
The :attr:`path` list is searched in order. The first match is
returend. Symlinks are followed. The result is cached to speed up
future lookups. '''
if name not in self.cache or DEBUG:
for path in self.path:
fpath = os.path.join(path, name)
if os.path.isfile(fpath):
if self.cachemode in ('all', 'found'):
self.cache[name] = fpath
return fpath
if self.cachemode == 'all':
self.cache[name] = None
return self.cache[name]
def open(self, name, mode='r', *args, **kwargs):
''' Find a resource and return a file object, or raise IOError. '''
fname = self.lookup(name)
if not fname: raise IOError("Resource %r not found." % name)
return self.opener(fname, mode=mode, *args, **kwargs)
class FileUpload(object):
def __init__(self, fileobj, name, filename, headers=None):
''' Wrapper for file uploads. '''
#: Open file(-like) object (BytesIO buffer or temporary file)
self.file = fileobj
#: Name of the upload form field
self.name = name
#: Raw filename as sent by the client (may contain unsafe characters)
self.raw_filename = filename
#: A :class:`HeaderDict` with additional headers (e.g. content-type)
self.headers = HeaderDict(headers) if headers else HeaderDict()
content_type = HeaderProperty('Content-Type')
content_length = HeaderProperty('Content-Length', reader=int, default=-1)
@cached_property
def filename(self):
''' Name of the file on the client file system, but normalized to ensure
file system compatibility (lowercase, no whitespace, no path
separators, no unsafe characters, ASCII only). An empty filename
is returned as 'empty'.
'''
from unicodedata import normalize #TODO: Module level import?
fname = self.raw_filename
if isinstance(fname, unicode):
fname = normalize('NFKD', fname).encode('ASCII', 'ignore')
fname = fname.decode('ASCII', 'ignore')
fname = os.path.basename(fname.replace('\\', os.path.sep))
fname = re.sub(r'[^a-zA-Z0-9-_.\s]', '', fname).strip().lower()
fname = re.sub(r'[-\s]+', '-', fname.strip('.').strip())
return fname or 'empty'
def _copy_file(self, fp, chunk_size=2**16):
read, write, offset = self.file.read, fp.write, self.file.tell()
while 1:
buf = read(chunk_size)
if not buf: break
write(buf)
self.file.seek(offset)
def save(self, destination, overwrite=False, chunk_size=2**16):
''' Save file to disk or copy its content to an open file(-like) object.
If *destination* is a directory, :attr:`filename` is added to the
path. Existing files are not overwritten by default (IOError).
:param destination: File path, directory or file(-like) object.
:param overwrite: If True, replace existing files. (default: False)
:param chunk_size: Bytes to read at a time. (default: 64kb)
'''
if isinstance(destination, basestring): # Except file-likes here
if os.path.isdir(destination):
destination = os.path.join(destination, self.filename)
if not overwrite and os.path.exists(destination):
raise IOError('File exists.')
with open(destination, 'wb') as fp:
self._copy_file(fp, chunk_size)
else:
self._copy_file(destination, chunk_size)
###############################################################################
# Application Helper ###########################################################
###############################################################################
def abort(code=500, text='Unknown Error: Application stopped.'):
""" Aborts execution and causes a HTTP error. """
raise HTTPError(code, text)
def redirect(url, code=None):
""" Aborts execution and causes a 303 or 302 redirect, depending on
the HTTP protocol version. """
if not code:
code = 303 if request.get('SERVER_PROTOCOL') == "HTTP/1.1" else 302
res = response.copy(cls=HTTPResponse)
res.status = code
res.body = ""
res.set_header('Location', urljoin(request.url, url))
raise res
def _file_iter_range(fp, offset, bytes, maxread=1024*1024):
''' Yield chunks from a range in a file. No chunk is bigger than maxread.'''
fp.seek(offset)
while bytes > 0:
part = fp.read(min(bytes, maxread))
if not part: break
bytes -= len(part)
yield part
def static_file(filename, root, mimetype='auto', download=False):
""" Open a file in a safe way and return :exc:`HTTPResponse` with status
code 200, 305, 401 or 404. Set Content-Type, Content-Encoding,
Content-Length and Last-Modified header. Obey If-Modified-Since header
and HEAD requests.
"""
root = os.path.abspath(root) + os.sep
filename = os.path.abspath(os.path.join(root, filename.strip('/\\')))
headers = dict()
if not filename.startswith(root):
return HTTPError(403, "Access denied.")
if not os.path.exists(filename) or not os.path.isfile(filename):
return HTTPError(404, "File does not exist.")
if not os.access(filename, os.R_OK):
return HTTPError(403, "You do not have permission to access this file.")
if mimetype == 'auto':
mimetype, encoding = mimetypes.guess_type(filename)
if mimetype: headers['Content-Type'] = mimetype
if encoding: headers['Content-Encoding'] = encoding
elif mimetype:
headers['Content-Type'] = mimetype
if download:
download = os.path.basename(filename if download == True else download)
headers['Content-Disposition'] = 'attachment; filename="%s"' % download
stats = os.stat(filename)
headers['Content-Length'] = clen = stats.st_size
lm = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(stats.st_mtime))
headers['Last-Modified'] = lm
ims = request.environ.get('HTTP_IF_MODIFIED_SINCE')
if ims:
ims = parse_date(ims.split(";")[0].strip())
if ims is not None and ims >= int(stats.st_mtime):
headers['Date'] = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime())
return HTTPResponse(status=304, **headers)
body = '' if request.method == 'HEAD' else open(filename, 'rb')
headers["Accept-Ranges"] = "bytes"
ranges = request.environ.get('HTTP_RANGE')
if 'HTTP_RANGE' in request.environ:
ranges = list(parse_range_header(request.environ['HTTP_RANGE'], clen))
if not ranges:
return HTTPError(416, "Requested Range Not Satisfiable")
offset, end = ranges[0]
headers["Content-Range"] = "bytes %d-%d/%d" % (offset, end-1, clen)
headers["Content-Length"] = str(end-offset)
if body: body = _file_iter_range(body, offset, end-offset)
return HTTPResponse(body, status=206, **headers)
return HTTPResponse(body, **headers)
###############################################################################
# HTTP Utilities and MISC (TODO) ###############################################
###############################################################################
def debug(mode=True):
""" Change the debug level.
There is only one debug level supported at the moment."""
global DEBUG
if mode: warnings.simplefilter('default')
DEBUG = bool(mode)
def http_date(value):
if isinstance(value, (datedate, datetime)):
value = value.utctimetuple()
elif isinstance(value, (int, float)):
value = time.gmtime(value)
if not isinstance(value, basestring):
value = time.strftime("%a, %d %b %Y %H:%M:%S GMT", value)
return value
def parse_date(ims):
""" Parse rfc1123, rfc850 and asctime timestamps and return UTC epoch. """
try:
ts = email.utils.parsedate_tz(ims)
return time.mktime(ts[:8] + (0,)) - (ts[9] or 0) - time.timezone
except (TypeError, ValueError, IndexError, OverflowError):
return None
def parse_auth(header):
""" Parse rfc2617 HTTP authentication header string (basic) and return (user,pass) tuple or None"""
try:
method, data = header.split(None, 1)
if method.lower() == 'basic':
user, pwd = touni(base64.b64decode(tob(data))).split(':',1)
return user, pwd
except (KeyError, ValueError):
return None
def parse_range_header(header, maxlen=0):
''' Yield (start, end) ranges parsed from a HTTP Range header. Skip
unsatisfiable ranges. The end index is non-inclusive.'''
if not header or header[:6] != 'bytes=': return
ranges = [r.split('-', 1) for r in header[6:].split(',') if '-' in r]
for start, end in ranges:
try:
if not start: # bytes=-100 -> last 100 bytes
start, end = max(0, maxlen-int(end)), maxlen
elif not end: # bytes=100- -> all but the first 99 bytes
start, end = int(start), maxlen
else: # bytes=100-200 -> bytes 100-200 (inclusive)
start, end = int(start), min(int(end)+1, maxlen)
if 0 <= start < end <= maxlen:
yield start, end
except ValueError:
pass
def _parse_qsl(qs):
r = []
for pair in qs.replace(';','&').split('&'):
if not pair: continue
nv = pair.split('=', 1)
if len(nv) != 2: nv.append('')
key = urlunquote(nv[0].replace('+', ' '))
value = urlunquote(nv[1].replace('+', ' '))
r.append((key, value))
return r
def _lscmp(a, b):
''' Compares two strings in a cryptographically safe way:
Runtime is not affected by length of common prefix. '''
return not sum(0 if x==y else 1 for x, y in zip(a, b)) and len(a) == len(b)
def cookie_encode(data, key):
''' Encode and sign a pickle-able object. Return a (byte) string '''
msg = base64.b64encode(pickle.dumps(data, -1))
sig = base64.b64encode(hmac.new(tob(key), msg).digest())
return tob('!') + sig + tob('?') + msg
def cookie_decode(data, key):
''' Verify and decode an encoded string. Return an object or None.'''
data = tob(data)
if cookie_is_encoded(data):
sig, msg = data.split(tob('?'), 1)
if _lscmp(sig[1:], base64.b64encode(hmac.new(tob(key), msg).digest())):
return pickle.loads(base64.b64decode(msg))
return None
def cookie_is_encoded(data):
''' Return True if the argument looks like a encoded cookie.'''
return bool(data.startswith(tob('!')) and tob('?') in data)
def html_escape(string):
''' Escape HTML special characters ``&<>`` and quotes ``'"``. '''
return string.replace('&','&').replace('<','<').replace('>','>')\
.replace('"','"').replace("'",''')
def html_quote(string):
''' Escape and quote a string to be used as an HTTP attribute.'''
return '"%s"' % html_escape(string).replace('\n','%#10;')\
.replace('\r',' ').replace('\t','	')
def yieldroutes(func):
""" Return a generator for routes that match the signature (name, args)
of the func parameter. This may yield more than one route if the function
takes optional keyword arguments. The output is best described by example::
a() -> '/a'
b(x, y) -> '/b/<x>/<y>'
c(x, y=5) -> '/c/<x>' and '/c/<x>/<y>'
d(x=5, y=6) -> '/d' and '/d/<x>' and '/d/<x>/<y>'
"""
path = '/' + func.__name__.replace('__','/').lstrip('/')
spec = getargspec(func)
argc = len(spec[0]) - len(spec[3] or [])
path += ('/<%s>' * argc) % tuple(spec[0][:argc])
yield path
for arg in spec[0][argc:]:
path += '/<%s>' % arg
yield path
def path_shift(script_name, path_info, shift=1):
''' Shift path fragments from PATH_INFO to SCRIPT_NAME and vice versa.
:return: The modified paths.
:param script_name: The SCRIPT_NAME path.
:param script_name: The PATH_INFO path.
:param shift: The number of path fragments to shift. May be negative to
change the shift direction. (default: 1)
'''
if shift == 0: return script_name, path_info
pathlist = path_info.strip('/').split('/')
scriptlist = script_name.strip('/').split('/')
if pathlist and pathlist[0] == '': pathlist = []
if scriptlist and scriptlist[0] == '': scriptlist = []
if shift > 0 and shift <= len(pathlist):
moved = pathlist[:shift]
scriptlist = scriptlist + moved
pathlist = pathlist[shift:]
elif shift < 0 and shift >= -len(scriptlist):
moved = scriptlist[shift:]
pathlist = moved + pathlist
scriptlist = scriptlist[:shift]
else:
empty = 'SCRIPT_NAME' if shift < 0 else 'PATH_INFO'
raise AssertionError("Cannot shift. Nothing left from %s" % empty)
new_script_name = '/' + '/'.join(scriptlist)
new_path_info = '/' + '/'.join(pathlist)
if path_info.endswith('/') and pathlist: new_path_info += '/'
return new_script_name, new_path_info
def auth_basic(check, realm="private", text="Access denied"):
''' Callback decorator to require HTTP auth (basic).
TODO: Add route(check_auth=...) parameter. '''
def decorator(func):
def wrapper(*a, **ka):
user, password = request.auth or (None, None)
if user is None or not check(user, password):
err = HTTPError(401, text)
err.add_header('WWW-Authenticate', 'Basic realm="%s"' % realm)
return err
return func(*a, **ka)
return wrapper
return decorator
# Shortcuts for common Bottle methods.
# They all refer to the current default application.
def make_default_app_wrapper(name):
''' Return a callable that relays calls to the current default app. '''
@functools.wraps(getattr(Bottle, name))
def wrapper(*a, **ka):
return getattr(app(), name)(*a, **ka)
return wrapper
route = make_default_app_wrapper('route')
get = make_default_app_wrapper('get')
post = make_default_app_wrapper('post')
put = make_default_app_wrapper('put')
delete = make_default_app_wrapper('delete')
error = make_default_app_wrapper('error')
mount = make_default_app_wrapper('mount')
hook = make_default_app_wrapper('hook')
install = make_default_app_wrapper('install')
uninstall = make_default_app_wrapper('uninstall')
url = make_default_app_wrapper('get_url')
###############################################################################
# Server Adapter ###############################################################
###############################################################################
class ServerAdapter(object):
quiet = False
def __init__(self, host='127.0.0.1', port=8080, **options):
self.options = options
self.host = host
self.port = int(port)
def run(self, handler): # pragma: no cover
pass
def __repr__(self):
args = ', '.join(['%s=%s'%(k,repr(v)) for k, v in self.options.items()])
return "%s(%s)" % (self.__class__.__name__, args)
class CGIServer(ServerAdapter):
quiet = True
def run(self, handler): # pragma: no cover
from wsgiref.handlers import CGIHandler
def fixed_environ(environ, start_response):
environ.setdefault('PATH_INFO', '')
return handler(environ, start_response)
CGIHandler().run(fixed_environ)
class FlupFCGIServer(ServerAdapter):
def run(self, handler): # pragma: no cover
import flup.server.fcgi
self.options.setdefault('bindAddress', (self.host, self.port))
flup.server.fcgi.WSGIServer(handler, **self.options).run()
class WSGIRefServer(ServerAdapter):
def run(self, handler): # pragma: no cover
from wsgiref.simple_server import make_server, WSGIRequestHandler
if self.quiet:
class QuietHandler(WSGIRequestHandler):
def log_request(*args, **kw): pass
self.options['handler_class'] = QuietHandler
srv = make_server(self.host, self.port, handler, **self.options)
srv.serve_forever()
class CherryPyServer(ServerAdapter):
def run(self, handler): # pragma: no cover
from cherrypy import wsgiserver
server = wsgiserver.CherryPyWSGIServer((self.host, self.port), handler)
try:
server.start()
finally:
server.stop()
class WaitressServer(ServerAdapter):
def run(self, handler):
from waitress import serve
serve(handler, host=self.host, port=self.port)
class PasteServer(ServerAdapter):
def run(self, handler): # pragma: no cover
from paste import httpserver
from paste.translogger import TransLogger
handler = TransLogger(handler, setup_console_handler=(not self.quiet))
httpserver.serve(handler, host=self.host, port=str(self.port),
**self.options)
class MeinheldServer(ServerAdapter):
def run(self, handler):
from meinheld import server
server.listen((self.host, self.port))
server.run(handler)
class FapwsServer(ServerAdapter):
""" Extremely fast webserver using libev. See http://www.fapws.org/ """
def run(self, handler): # pragma: no cover
import fapws._evwsgi as evwsgi
from fapws import base, config
port = self.port
if float(config.SERVER_IDENT[-2:]) > 0.4:
# fapws3 silently changed its API in 0.5
port = str(port)
evwsgi.start(self.host, port)
# fapws3 never releases the GIL. Complain upstream. I tried. No luck.
if 'BOTTLE_CHILD' in os.environ and not self.quiet:
_stderr("WARNING: Auto-reloading does not work with Fapws3.\n")
_stderr(" (Fapws3 breaks python thread support)\n")
evwsgi.set_base_module(base)
def app(environ, start_response):
environ['wsgi.multiprocess'] = False
return handler(environ, start_response)
evwsgi.wsgi_cb(('', app))
evwsgi.run()
class TornadoServer(ServerAdapter):
""" The super hyped asynchronous server by facebook. Untested. """
def run(self, handler): # pragma: no cover
import tornado.wsgi, tornado.httpserver, tornado.ioloop
container = tornado.wsgi.WSGIContainer(handler)
server = tornado.httpserver.HTTPServer(container)
server.listen(port=self.port,address=self.host)
tornado.ioloop.IOLoop.instance().start()
class AppEngineServer(ServerAdapter):
""" Adapter for Google App Engine. """
quiet = True
def run(self, handler):
from google.appengine.ext.webapp import util
# A main() function in the handler script enables 'App Caching'.
# Lets makes sure it is there. This _really_ improves performance.
module = sys.modules.get('__main__')
if module and not hasattr(module, 'main'):
module.main = lambda: util.run_wsgi_app(handler)
util.run_wsgi_app(handler)
class TwistedServer(ServerAdapter):
""" Untested. """
def run(self, handler):
from twisted.web import server, wsgi
from twisted.python.threadpool import ThreadPool
from twisted.internet import reactor
thread_pool = ThreadPool()
thread_pool.start()
reactor.addSystemEventTrigger('after', 'shutdown', thread_pool.stop)
factory = server.Site(wsgi.WSGIResource(reactor, thread_pool, handler))
reactor.listenTCP(self.port, factory, interface=self.host)
reactor.run()
class DieselServer(ServerAdapter):
""" Untested. """
def run(self, handler):
from diesel.protocols.wsgi import WSGIApplication
app = WSGIApplication(handler, port=self.port)
app.run()
class GeventServer(ServerAdapter):
""" Untested. Options:
* `fast` (default: False) uses libevent's http server, but has some
issues: No streaming, no pipelining, no SSL.
* See gevent.wsgi.WSGIServer() documentation for more options.
"""
def run(self, handler):
from gevent import wsgi, pywsgi, local
if not isinstance(_lctx, local.local):
msg = "Bottle requires gevent.monkey.patch_all() (before import)"
raise RuntimeError(msg)
if not self.options.pop('fast', None): wsgi = pywsgi
self.options['log'] = None if self.quiet else 'default'
address = (self.host, self.port)
wsgi.WSGIServer(address, handler, **self.options).serve_forever()
class GunicornServer(ServerAdapter):
""" Untested. See http://gunicorn.org/configure.html for options. """
def run(self, handler):
from gunicorn.app.base import Application
config = {'bind': "%s:%d" % (self.host, int(self.port))}
config.update(self.options)
class GunicornApplication(Application):
def init(self, parser, opts, args):
return config
def load(self):
return handler
GunicornApplication().run()
class EventletServer(ServerAdapter):
""" Untested """
def run(self, handler):
from eventlet import wsgi, listen
try:
wsgi.server(listen((self.host, self.port)), handler,
log_output=(not self.quiet))
except TypeError:
# Fallback, if we have old version of eventlet
wsgi.server(listen((self.host, self.port)), handler)
class RocketServer(ServerAdapter):
""" Untested. """
def run(self, handler):
from rocket import Rocket
server = Rocket((self.host, self.port), 'wsgi', { 'wsgi_app' : handler })
server.start()
class BjoernServer(ServerAdapter):
""" Fast server written in C: https://github.com/jonashaag/bjoern """
def run(self, handler):
from bjoern import run
run(handler, self.host, self.port)
class AutoServer(ServerAdapter):
""" Untested. """
adapters = [WaitressServer, PasteServer, TwistedServer, CherryPyServer, WSGIRefServer]
def run(self, handler):
for sa in self.adapters:
try:
return sa(self.host, self.port, **self.options).run(handler)
except ImportError:
pass
server_names = {
'cgi': CGIServer,
'flup': FlupFCGIServer,
'wsgiref': WSGIRefServer,
'waitress': WaitressServer,
'cherrypy': CherryPyServer,
'paste': PasteServer,
'fapws3': FapwsServer,
'tornado': TornadoServer,
'gae': AppEngineServer,
'twisted': TwistedServer,
'diesel': DieselServer,
'meinheld': MeinheldServer,
'gunicorn': GunicornServer,
'eventlet': EventletServer,
'gevent': GeventServer,
'rocket': RocketServer,
'bjoern' : BjoernServer,
'auto': AutoServer,
}
###############################################################################
# Application Control ##########################################################
###############################################################################
def load(target, **namespace):
""" Import a module or fetch an object from a module.
* ``package.module`` returns `module` as a module object.
* ``pack.mod:name`` returns the module variable `name` from `pack.mod`.
* ``pack.mod:func()`` calls `pack.mod.func()` and returns the result.
The last form accepts not only function calls, but any type of
expression. Keyword arguments passed to this function are available as
local variables. Example: ``import_string('re:compile(x)', x='[a-z]')``
"""
module, target = target.split(":", 1) if ':' in target else (target, None)
if module not in sys.modules: __import__(module)
if not target: return sys.modules[module]
if target.isalnum(): return getattr(sys.modules[module], target)
package_name = module.split('.')[0]
namespace[package_name] = sys.modules[package_name]
return eval('%s.%s' % (module, target), namespace)
def load_app(target):
""" Load a bottle application from a module and make sure that the import
does not affect the current default application, but returns a separate
application object. See :func:`load` for the target parameter. """
global NORUN; NORUN, nr_old = True, NORUN
try:
tmp = default_app.push() # Create a new "default application"
rv = load(target) # Import the target module
return rv if callable(rv) else tmp
finally:
default_app.remove(tmp) # Remove the temporary added default application
NORUN = nr_old
_debug = debug
def run(app=None, server='wsgiref', host='127.0.0.1', port=8080,
interval=1, reloader=False, quiet=False, plugins=None,
debug=None, **kargs):
""" Start a server instance. This method blocks until the server terminates.
:param app: WSGI application or target string supported by
:func:`load_app`. (default: :func:`default_app`)
:param server: Server adapter to use. See :data:`server_names` keys
for valid names or pass a :class:`ServerAdapter` subclass.
(default: `wsgiref`)
:param host: Server address to bind to. Pass ``0.0.0.0`` to listens on
all interfaces including the external one. (default: 127.0.0.1)
:param port: Server port to bind to. Values below 1024 require root
privileges. (default: 8080)
:param reloader: Start auto-reloading server? (default: False)
:param interval: Auto-reloader interval in seconds (default: 1)
:param quiet: Suppress output to stdout and stderr? (default: False)
:param options: Options passed to the server adapter.
"""
if NORUN: return
if reloader and not os.environ.get('BOTTLE_CHILD'):
try:
lockfile = None
fd, lockfile = tempfile.mkstemp(prefix='bottle.', suffix='.lock')
os.close(fd) # We only need this file to exist. We never write to it
while os.path.exists(lockfile):
args = [sys.executable] + sys.argv
environ = os.environ.copy()
environ['BOTTLE_CHILD'] = 'true'
environ['BOTTLE_LOCKFILE'] = lockfile
p = subprocess.Popen(args, env=environ)
while p.poll() is None: # Busy wait...
os.utime(lockfile, None) # I am alive!
time.sleep(interval)
if p.poll() != 3:
if os.path.exists(lockfile): os.unlink(lockfile)
sys.exit(p.poll())
except KeyboardInterrupt:
pass
finally:
if os.path.exists(lockfile):
os.unlink(lockfile)
return
try:
if debug is not None: _debug(debug)
app = app or default_app()
if isinstance(app, basestring):
app = load_app(app)
if not callable(app):
raise ValueError("Application is not callable: %r" % app)
for plugin in plugins or []:
app.install(plugin)
if server in server_names:
server = server_names.get(server)
if isinstance(server, basestring):
server = load(server)
if isinstance(server, type):
server = server(host=host, port=port, **kargs)
if not isinstance(server, ServerAdapter):
raise ValueError("Unknown or unsupported server: %r" % server)
server.quiet = server.quiet or quiet
if not server.quiet:
_stderr("Bottle v%s server starting up (using %s)...\n" % (__version__, repr(server)))
_stderr("Listening on http://%s:%d/\n" % (server.host, server.port))
_stderr("Hit Ctrl-C to quit.\n\n")
if reloader:
lockfile = os.environ.get('BOTTLE_LOCKFILE')
bgcheck = FileCheckerThread(lockfile, interval)
with bgcheck:
server.run(app)
if bgcheck.status == 'reload':
sys.exit(3)
else:
server.run(app)
except KeyboardInterrupt:
pass
except (SystemExit, MemoryError):
raise
except:
if not reloader: raise
if not getattr(server, 'quiet', quiet):
print_exc()
time.sleep(interval)
sys.exit(3)
class FileCheckerThread(threading.Thread):
''' Interrupt main-thread as soon as a changed module file is detected,
the lockfile gets deleted or gets to old. '''
def __init__(self, lockfile, interval):
threading.Thread.__init__(self)
self.lockfile, self.interval = lockfile, interval
#: Is one of 'reload', 'error' or 'exit'
self.status = None
def run(self):
exists = os.path.exists
mtime = lambda path: os.stat(path).st_mtime
files = dict()
for module in list(sys.modules.values()):
path = getattr(module, '__file__', '')
if path[-4:] in ('.pyo', '.pyc'): path = path[:-1]
if path and exists(path): files[path] = mtime(path)
while not self.status:
if not exists(self.lockfile)\
or mtime(self.lockfile) < time.time() - self.interval - 5:
self.status = 'error'
thread.interrupt_main()
for path, lmtime in list(files.items()):
if not exists(path) or mtime(path) > lmtime:
self.status = 'reload'
thread.interrupt_main()
break
time.sleep(self.interval)
def __enter__(self):
self.start()
def __exit__(self, exc_type, exc_val, exc_tb):
if not self.status: self.status = 'exit' # silent exit
self.join()
return exc_type is not None and issubclass(exc_type, KeyboardInterrupt)
###############################################################################
# Template Adapters ############################################################
###############################################################################
class TemplateError(HTTPError):
def __init__(self, message):
HTTPError.__init__(self, 500, message)
class BaseTemplate(object):
""" Base class and minimal API for template adapters """
extensions = ['tpl','html','thtml','stpl']
settings = {} #used in prepare()
defaults = {} #used in render()
def __init__(self, source=None, name=None, lookup=[], encoding='utf8', **settings):
""" Create a new template.
If the source parameter (str or buffer) is missing, the name argument
is used to guess a template filename. Subclasses can assume that
self.source and/or self.filename are set. Both are strings.
The lookup, encoding and settings parameters are stored as instance
variables.
The lookup parameter stores a list containing directory paths.
The encoding parameter should be used to decode byte strings or files.
The settings parameter contains a dict for engine-specific settings.
"""
self.name = name
self.source = source.read() if hasattr(source, 'read') else source
self.filename = source.filename if hasattr(source, 'filename') else None
self.lookup = [os.path.abspath(x) for x in lookup]
self.encoding = encoding
self.settings = self.settings.copy() # Copy from class variable
self.settings.update(settings) # Apply
if not self.source and self.name:
self.filename = self.search(self.name, self.lookup)
if not self.filename:
raise TemplateError('Template %s not found.' % repr(name))
if not self.source and not self.filename:
raise TemplateError('No template specified.')
self.prepare(**self.settings)
@classmethod
def search(cls, name, lookup=[]):
""" Search name in all directories specified in lookup.
First without, then with common extensions. Return first hit. """
if not lookup:
depr('The template lookup path list should not be empty.')
lookup = ['.']
if os.path.isabs(name) and os.path.isfile(name):
depr('Absolute template path names are deprecated.')
return os.path.abspath(name)
for spath in lookup:
spath = os.path.abspath(spath) + os.sep
fname = os.path.abspath(os.path.join(spath, name))
if not fname.startswith(spath): continue
if os.path.isfile(fname): return fname
for ext in cls.extensions:
if os.path.isfile('%s.%s' % (fname, ext)):
return '%s.%s' % (fname, ext)
@classmethod
def global_config(cls, key, *args):
''' This reads or sets the global settings stored in class.settings. '''
if args:
cls.settings = cls.settings.copy() # Make settings local to class
cls.settings[key] = args[0]
else:
return cls.settings[key]
def prepare(self, **options):
""" Run preparations (parsing, caching, ...).
It should be possible to call this again to refresh a template or to
update settings.
"""
raise NotImplementedError
def render(self, *args, **kwargs):
""" Render the template with the specified local variables and return
a single byte or unicode string. If it is a byte string, the encoding
must match self.encoding. This method must be thread-safe!
Local variables may be provided in dictionaries (args)
or directly, as keywords (kwargs).
"""
raise NotImplementedError
class MakoTemplate(BaseTemplate):
def prepare(self, **options):
from mako.template import Template
from mako.lookup import TemplateLookup
options.update({'input_encoding':self.encoding})
options.setdefault('format_exceptions', bool(DEBUG))
lookup = TemplateLookup(directories=self.lookup, **options)
if self.source:
self.tpl = Template(self.source, lookup=lookup, **options)
else:
self.tpl = Template(uri=self.name, filename=self.filename, lookup=lookup, **options)
def render(self, *args, **kwargs):
for dictarg in args: kwargs.update(dictarg)
_defaults = self.defaults.copy()
_defaults.update(kwargs)
return self.tpl.render(**_defaults)
class CheetahTemplate(BaseTemplate):
def prepare(self, **options):
from Cheetah.Template import Template
self.context = threading.local()
self.context.vars = {}
options['searchList'] = [self.context.vars]
if self.source:
self.tpl = Template(source=self.source, **options)
else:
self.tpl = Template(file=self.filename, **options)
def render(self, *args, **kwargs):
for dictarg in args: kwargs.update(dictarg)
self.context.vars.update(self.defaults)
self.context.vars.update(kwargs)
out = str(self.tpl)
self.context.vars.clear()
return out
class Jinja2Template(BaseTemplate):
def prepare(self, filters=None, tests=None, **kwargs):
from jinja2 import Environment, FunctionLoader
if 'prefix' in kwargs: # TODO: to be removed after a while
raise RuntimeError('The keyword argument `prefix` has been removed. '
'Use the full jinja2 environment name line_statement_prefix instead.')
self.env = Environment(loader=FunctionLoader(self.loader), **kwargs)
if filters: self.env.filters.update(filters)
if tests: self.env.tests.update(tests)
if self.source:
self.tpl = self.env.from_string(self.source)
else:
self.tpl = self.env.get_template(self.filename)
def render(self, *args, **kwargs):
for dictarg in args: kwargs.update(dictarg)
_defaults = self.defaults.copy()
_defaults.update(kwargs)
return self.tpl.render(**_defaults)
def loader(self, name):
fname = self.search(name, self.lookup)
if not fname: return
with open(fname, "rb") as f:
return f.read().decode(self.encoding)
class SimpleTemplate(BaseTemplate):
def prepare(self, escape_func=html_escape, noescape=False, syntax=None, **ka):
self.cache = {}
enc = self.encoding
self._str = lambda x: touni(x, enc)
self._escape = lambda x: escape_func(touni(x, enc))
self.syntax = syntax
if noescape:
self._str, self._escape = self._escape, self._str
@cached_property
def co(self):
return compile(self.code, self.filename or '<string>', 'exec')
@cached_property
def code(self):
source = self.source or open(self.filename, 'rb').read()
try:
source, encoding = touni(source), 'utf8'
except UnicodeError:
depr('Template encodings other than utf8 are no longer supported.')
source, encoding = touni(source, 'latin1'), 'latin1'
parser = StplParser(source, encoding=encoding, syntax=self.syntax)
code = parser.translate()
self.encoding = parser.encoding
return code
def _rebase(self, _env, _name, **kwargs):
_env['_rebase'] = (_name, kwargs)
def _include(self, _env, _name, **kwargs):
env = _env.copy()
env.update(kwargs)
if _name not in self.cache:
self.cache[_name] = self.__class__(name=_name, lookup=self.lookup)
return self.cache[_name].execute(env['_stdout'], env)
def execute(self, _stdout, kwargs):
env = self.defaults.copy()
env.update(kwargs)
env.update({'_stdout': _stdout, '_printlist': _stdout.extend,
'include': functools.partial(self._include, env),
'rebase': functools.partial(self._rebase, env), '_rebase': None,
'_str': self._str, '_escape': self._escape, 'get': env.get,
'setdefault': env.setdefault, 'defined': env.__contains__ })
eval(self.co, env)
if env.get('_rebase'):
subtpl, rargs = env.pop('_rebase')
rargs['body'] = ''.join(_stdout) #copy stdout
del _stdout[:] # clear stdout
return self._include(env, subtpl, **rargs)
return env
def render(self, *args, **kwargs):
""" Render the template using keyword arguments as local variables. """
env = {}; stdout = []
for dictarg in args: env.update(dictarg)
env.update(kwargs)
self.execute(stdout, env)
return ''.join(stdout)
class StplSyntaxError(TemplateError): pass
class StplParser(object):
''' Parser for stpl templates. '''
_re_cache = {} #: Cache for compiled re patterns
# This huge pile of voodoo magic splits python code into 8 different tokens.
# 1: All kinds of python strings (trust me, it works)
_re_tok = '((?m)[urbURB]?(?:\'\'(?!\')|""(?!")|\'{6}|"{6}' \
'|\'(?:[^\\\\\']|\\\\.)+?\'|"(?:[^\\\\"]|\\\\.)+?"' \
'|\'{3}(?:[^\\\\]|\\\\.|\\n)+?\'{3}' \
'|"{3}(?:[^\\\\]|\\\\.|\\n)+?"{3}))'
_re_inl = _re_tok.replace('|\\n','') # We re-use this string pattern later
# 2: Comments (until end of line, but not the newline itself)
_re_tok += '|(#.*)'
# 3,4: Keywords that start or continue a python block (only start of line)
_re_tok += '|^([ \\t]*(?:if|for|while|with|try|def|class)\\b)' \
'|^([ \\t]*(?:elif|else|except|finally)\\b)'
# 5: Our special 'end' keyword (but only if it stands alone)
_re_tok += '|((?:^|;)[ \\t]*end[ \\t]*(?=(?:%(block_close)s[ \\t]*)?\\r?$|;|#))'
# 6: A customizable end-of-code-block template token (only end of line)
_re_tok += '|(%(block_close)s[ \\t]*(?=$))'
# 7: And finally, a single newline. The 8th token is 'everything else'
_re_tok += '|(\\r?\\n)'
# Match the start tokens of code areas in a template
_re_split = '(?m)^[ \t]*(?:(%(line_start)s)|(%(block_start)s))'
# Match inline statements (may contain python strings)
_re_inl = '%%(inline_start)s((?:%s|[^\'"\n]*?)+)%%(inline_end)s' % _re_inl
default_syntax = '<% %> % {{ }}'
def __init__(self, source, syntax=None, encoding='utf8'):
self.source, self.encoding = touni(source, encoding), encoding
self.set_syntax(syntax or self.default_syntax)
self.code_buffer, self.text_buffer = [], []
self.lineno, self.offset = 1, 0
self.indent, self.indent_mod = 0, 0
def get_syntax(self):
''' Tokens as a space separated string (default: <% %> % {{ }}) '''
return self._syntax
def set_syntax(self, syntax):
self._syntax = syntax
self._tokens = syntax.split()
if not syntax in self._re_cache:
names = 'block_start block_close line_start inline_start inline_end'
etokens = map(re.escape, self._tokens)
pattern_vars = dict(zip(names.split(), etokens))
patterns = (self._re_split, self._re_tok, self._re_inl)
patterns = [re.compile(p%pattern_vars) for p in patterns]
self._re_cache[syntax] = patterns
self.re_split, self.re_tok, self.re_inl = self._re_cache[syntax]
syntax = property(get_syntax, set_syntax)
def translate(self):
if self.offset: raise RuntimeError('Parser is a one time instance.')
while True:
m = self.re_split.search(self.source[self.offset:])
if m:
text = self.source[self.offset:self.offset+m.start()]
self.text_buffer.append(text)
self.offset += m.end()
if self.source[self.offset] == '%': # Escaped code stuff
line, sep, _ = self.source[self.offset+1:].partition('\n')
self.text_buffer.append(m.group(0)+line+sep)
self.offset += len(line+sep)+1
continue
self.flush_text()
self.read_code(multiline=bool(m.group(2)))
else: break
self.text_buffer.append(self.source[self.offset:])
self.flush_text()
return ''.join(self.code_buffer)
def read_code(self, multiline):
code_line, comment, start_line = '', '', self.lineno
while True:
m = self.re_tok.search(self.source[self.offset:])
if not m:
code_line += self.source[self.offset:]
self.offset = len(self.source)
self.write_code(code_line.strip(), comment)
return
code_line += self.source[self.offset:self.offset+m.start()]
self.offset += m.end()
_str, _com, _blk1, _blk2, _end, _cend, _nl = m.groups()
if _str: # Python string
code_line += _str
elif _com: # Python comment (up to EOL)
comment = _com
if multiline and _com.strip().endswith(self._tokens[1]):
multiline = False # Allow end-of-block in comments
elif _blk1: # Start-block keyword (if/for/while/def/try/...)
code_line, self.indent_mod = _blk1, -1
self.indent += 1
elif _blk2: # Continue-block keyword (else/elif/except/...)
code_line, self.indent_mod = _blk2, -1
elif _end: # The non-standard 'end'-keyword (ends a block)
self.indent -= 1
elif _cend: # The end-code-block template token (usually '%>')
if multiline: multiline = False
else: code_line += _cend
else: # \n
self.write_code(code_line.strip(), comment)
self.lineno += 1
code_line, comment, self.indent_mod = '', '', 0
if not multiline:
break
def flush_text(self):
text = ''.join(self.text_buffer)
del self.text_buffer[:]
if not text: return
parts, pos, nl = [], 0, '\\\n'+' '*self.indent
for m in self.re_inl.finditer(text):
prefix, pos = text[pos:m.start()], m.end()
if prefix:
parts.append(nl.join(map(repr, prefix.splitlines(True))))
if prefix.endswith('\n'): parts[-1] += nl
parts.append(self.process_inline(m.group(1).strip()))
if pos < len(text):
prefix = text[pos:]
lines = prefix.splitlines(True)
if lines[-1].endswith('\\\\\n'): lines[-1] = lines[-1][:-3]
parts.append(nl.join(map(repr, lines)))
code = '_printlist((%s,))' % ', '.join(parts)
self.lineno += code.count('\n')+1
self.write_code(code)
def process_inline(self, chunk):
if chunk[0] == '!': return '_str(%s)' % chunk[1:]
return '_escape(%s)' % chunk
def write_code(self, line, comment=''):
line, comment = self.fix_brackward_compatibility(line, comment)
code = ' ' * (self.indent+self.indent_mod)
code += line.lstrip() + comment + '\n'
self.code_buffer.append(code)
def fix_brackward_compatibility(self, line, comment):
parts = line.strip().split(None, 2)
if parts and parts[0] in ('include', 'rebase'):
depr('The include and rebase keywords are functions now.')
if len(parts) == 1: return "_printlist([body])", comment
elif len(parts) == 2: return "_=%s(%r)" % tuple(parts), comment
else: return "_=%s(%r, %s)" % tuple(parts), comment
if self.lineno <= 2 and not line.strip() and 'coding' in comment:
m = re.match(r"#.*coding[:=]\s*([-\w.]+)", comment)
if m:
depr('PEP263 encoding strings in templates are deprecated.')
enc = m.group(1)
self.source = self.source.encode(self.encoding).decode(enc)
self.encoding = enc
return line, comment.replace('coding','coding*')
return line, comment
def template(*args, **kwargs):
'''
Get a rendered template as a string iterator.
You can use a name, a filename or a template string as first parameter.
Template rendering arguments can be passed as dictionaries
or directly (as keyword arguments).
'''
tpl = args[0] if args else None
adapter = kwargs.pop('template_adapter', SimpleTemplate)
lookup = kwargs.pop('template_lookup', TEMPLATE_PATH)
tplid = (id(lookup), tpl)
if tplid not in TEMPLATES or DEBUG:
settings = kwargs.pop('template_settings', {})
if isinstance(tpl, adapter):
TEMPLATES[tplid] = tpl
if settings: TEMPLATES[tplid].prepare(**settings)
elif "\n" in tpl or "{" in tpl or "%" in tpl or '$' in tpl:
TEMPLATES[tplid] = adapter(source=tpl, lookup=lookup, **settings)
else:
TEMPLATES[tplid] = adapter(name=tpl, lookup=lookup, **settings)
if not TEMPLATES[tplid]:
abort(500, 'Template (%s) not found' % tpl)
for dictarg in args[1:]: kwargs.update(dictarg)
return TEMPLATES[tplid].render(kwargs)
mako_template = functools.partial(template, template_adapter=MakoTemplate)
cheetah_template = functools.partial(template, template_adapter=CheetahTemplate)
jinja2_template = functools.partial(template, template_adapter=Jinja2Template)
def view(tpl_name, **defaults):
''' Decorator: renders a template for a handler.
The handler can control its behavior like that:
- return a dict of template vars to fill out the template
- return something other than a dict and the view decorator will not
process the template, but return the handler result as is.
This includes returning a HTTPResponse(dict) to get,
for instance, JSON with autojson or other castfilters.
'''
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
result = func(*args, **kwargs)
if isinstance(result, (dict, DictMixin)):
tplvars = defaults.copy()
tplvars.update(result)
return template(tpl_name, **tplvars)
elif result is None:
return template(tpl_name, defaults)
return result
return wrapper
return decorator
mako_view = functools.partial(view, template_adapter=MakoTemplate)
cheetah_view = functools.partial(view, template_adapter=CheetahTemplate)
jinja2_view = functools.partial(view, template_adapter=Jinja2Template)
###############################################################################
# Constants and Globals ########################################################
###############################################################################
TEMPLATE_PATH = ['./', './views/']
TEMPLATES = {}
DEBUG = False
NORUN = False # If set, run() does nothing. Used by load_app()
#: A dict to map HTTP status codes (e.g. 404) to phrases (e.g. 'Not Found')
HTTP_CODES = httplib.responses
HTTP_CODES[418] = "I'm a teapot" # RFC 2324
HTTP_CODES[428] = "Precondition Required"
HTTP_CODES[429] = "Too Many Requests"
HTTP_CODES[431] = "Request Header Fields Too Large"
HTTP_CODES[511] = "Network Authentication Required"
_HTTP_STATUS_LINES = dict((k, '%d %s'%(k,v)) for (k,v) in HTTP_CODES.items())
#: The default template used for error pages. Override with @error()
ERROR_PAGE_TEMPLATE = """
%%try:
%%from %s import DEBUG, HTTP_CODES, request, touni
<!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML 2.0//EN">
<html>
<head>
<title>Error: {{e.status}}</title>
<style type="text/css">
html {background-color: #eee; font-family: sans;}
body {background-color: #fff; border: 1px solid #ddd;
padding: 15px; margin: 15px;}
pre {background-color: #eee; border: 1px solid #ddd; padding: 5px;}
</style>
</head>
<body>
<h1>Error: {{e.status}}</h1>
<p>Sorry, the requested URL <tt>{{repr(request.url)}}</tt>
caused an error:</p>
<pre>{{e.body}}</pre>
%%if DEBUG and e.exception:
<h2>Exception:</h2>
<pre>{{repr(e.exception)}}</pre>
%%end
%%if DEBUG and e.traceback:
<h2>Traceback:</h2>
<pre>{{e.traceback}}</pre>
%%end
</body>
</html>
%%except ImportError:
<b>ImportError:</b> Could not generate the error page. Please add bottle to
the import path.
%%end
""" % __name__
#: A thread-safe instance of :class:`LocalRequest`. If accessed from within a
#: request callback, this instance always refers to the *current* request
#: (even on a multithreaded server).
request = LocalRequest()
#: A thread-safe instance of :class:`LocalResponse`. It is used to change the
#: HTTP response for the *current* request.
response = LocalResponse()
#: A thread-safe namespace. Not used by Bottle.
local = threading.local()
# Initialize app stack (create first empty Bottle app)
# BC: 0.6.4 and needed for run()
app = default_app = AppStack()
app.push()
#: A virtual package that redirects import statements.
#: Example: ``import bottle.ext.sqlite`` actually imports `bottle_sqlite`.
ext = _ImportRedirect('bottle.ext' if __name__ == '__main__' else __name__+".ext", 'bottle_%s').module
if __name__ == '__main__':
opt, args, parser = _cmd_options, _cmd_args, _cmd_parser
if opt.version:
_stdout('Bottle %s\n'%__version__)
sys.exit(0)
if not args:
parser.print_help()
_stderr('\nError: No application specified.\n')
sys.exit(1)
sys.path.insert(0, '.')
sys.modules.setdefault('bottle', sys.modules['__main__'])
host, port = (opt.bind or 'localhost'), 8080
if ':' in host:
host, port = host.rsplit(':', 1)
run(args[0], host=host, port=port, server=opt.server,
reloader=opt.reload, plugins=opt.plugin, debug=opt.debug)
# THE END
| Benoss/EazyReport | libs/bottle.py | Python | gpl-3.0 | 142,001 |
import os
import os.path
import shutil
import socket
import subprocess
import sys
import tempfile
import threading
import time
import random
try:
import pytest
except ImportError:
print >> sys.stderr, "Integ tests require pytests!"
sys.exit(1)
def pytest_funcarg__servers(request):
"Returns a new APIHandler with a set manager"
# Create tmpdir and delete after
tmpdir = tempfile.mkdtemp()
port = random.randint(2000, 60000)
# Write the configuration
config_path = os.path.join(tmpdir, "config.cfg")
conf = """[hlld]
data_dir = %(dir)s
port = %(port)d
""" % {"dir": tmpdir, "port": port}
open(config_path, "w").write(conf)
# Start the process
proc = subprocess.Popen("./hlld -f %s" % config_path, shell=True)
proc.poll()
assert proc.returncode is None
# Define a cleanup handler
def cleanup():
try:
subprocess.Popen("kill -9 %s" % proc.pid, shell=True)
time.sleep(1)
shutil.rmtree(tmpdir)
except:
pass
request.addfinalizer(cleanup)
# Make a connection to the server
connected = False
for x in xrange(3):
try:
conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
conn.settimeout(1)
conn.connect(("localhost", port))
connected = True
break
except Exception, e:
print e
time.sleep(1)
# Die now
if not connected:
raise EnvironmentError("Failed to connect!")
# Make a second connection
conn2 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
conn2.settimeout(1)
conn2.connect(("localhost", port))
# Return the connection
return conn, conn2
class TestInteg(object):
def test_list_empty(self, servers):
"Tests doing a list on a fresh server"
server, _ = servers
fh = server.makefile()
server.sendall("list\n")
assert fh.readline() == "START\n"
assert fh.readline() == "END\n"
def test_create(self, servers):
"Tests creating a set"
server, _ = servers
fh = server.makefile()
server.sendall("create foobar\n")
assert fh.readline() == "Done\n"
server.sendall("list\n")
assert fh.readline() == "START\n"
assert "foobar" in fh.readline()
assert fh.readline() == "END\n"
def test_list_prefix(self, servers):
"Tests creating a set"
server, _ = servers
fh = server.makefile()
server.sendall("create foobar\n")
assert fh.readline() == "Done\n"
server.sendall("create foobaz\n")
assert fh.readline() == "Done\n"
server.sendall("create test\n")
assert fh.readline() == "Done\n"
time.sleep(2)
server.sendall("list foo\n")
assert fh.readline() == "START\n"
assert "foobar" in fh.readline()
assert "foobaz" in fh.readline()
assert fh.readline() == "END\n"
def test_create_bad(self, servers):
"Tests creating a set"
server, _ = servers
fh = server.makefile()
server.sendall("create " + ("foo"*100) + "\n")
assert fh.readline() == "Client Error: Bad set name\n"
def test_doublecreate(self, servers):
"Tests creating a set twice"
server, _ = servers
fh = server.makefile()
server.sendall("create foobar\n")
assert fh.readline() == "Done\n"
server.sendall("create foobar\n")
assert fh.readline() == "Exists\n"
def test_drop(self, servers):
"Tests dropping a set"
server, _ = servers
fh = server.makefile()
server.sendall("create foobar\n")
assert fh.readline() == "Done\n"
server.sendall("list\n")
assert fh.readline() == "START\n"
assert "foobar" in fh.readline()
assert fh.readline() == "END\n"
server.sendall("drop foobar\n")
assert fh.readline() == "Done\n"
server.sendall("list\n")
assert fh.readline() == "START\n"
assert fh.readline() == "END\n"
def test_close(self, servers):
"Tests closing a set"
server, _ = servers
fh = server.makefile()
server.sendall("create foobar\n")
assert fh.readline() == "Done\n"
server.sendall("list\n")
assert fh.readline() == "START\n"
assert "foobar" in fh.readline()
assert fh.readline() == "END\n"
server.sendall("close foobar\n")
assert fh.readline() == "Done\n"
server.sendall("list\n")
assert fh.readline() == "START\n"
assert "foobar" in fh.readline()
assert fh.readline() == "END\n"
def test_clear(self, servers):
"Tests clearing a set"
server, _ = servers
fh = server.makefile()
server.sendall("create cleartest\n")
assert fh.readline() == "Done\n"
server.sendall("list\n")
assert fh.readline() == "START\n"
assert "cleartest" in fh.readline()
assert fh.readline() == "END\n"
server.sendall("clear cleartest\n")
assert fh.readline() == "Set is not proxied. Close it first.\n"
server.sendall("list\n")
assert fh.readline() == "START\n"
assert "cleartest" in fh.readline()
assert fh.readline() == "END\n"
server.sendall("close cleartest\n")
assert fh.readline() == "Done\n"
server.sendall("clear cleartest\n")
assert fh.readline() == "Done\n"
server.sendall("list\n")
assert fh.readline() == "START\n"
assert fh.readline() == "END\n"
# Load + Drop the set
time.sleep(3) # Required for vacuuming
server.sendall("create cleartest\n")
assert fh.readline() == "Done\n"
server.sendall("drop cleartest\n")
assert fh.readline() == "Done\n"
def test_set(self, servers):
"Tests setting a value"
server, _ = servers
fh = server.makefile()
server.sendall("create foobar\n")
assert fh.readline() == "Done\n"
server.sendall("set foobar test\n")
assert fh.readline() == "Done\n"
def test_bulk(self, servers):
"Tests setting bulk values"
server, _ = servers
fh = server.makefile()
server.sendall("create foobar\n")
assert fh.readline() == "Done\n"
server.sendall("bulk foobar test blah\n")
assert fh.readline() == "Done\n"
def test_aliases(self, servers):
"Tests aliases"
server, _ = servers
fh = server.makefile()
server.sendall("create foobar\n")
assert fh.readline() == "Done\n"
server.sendall("b foobar test test1 test2\n")
assert fh.readline() == "Done\n"
server.sendall("s foobar test\n")
assert fh.readline() == "Done\n"
def test_concurrent_drop(self, servers):
"Tests setting values and do a concurrent drop on the DB"
server, server2 = servers
fh = server.makefile()
def loopset():
for x in xrange(10000):
server.sendall("set pingpong test%d\n" % x)
resp = fh.readline()
if resp != "Done\n":
assert resp == "Set does not exist\n" and x > 100
return
else:
assert resp == "Done\n"
assert False
def drop():
time.sleep(0.2)
server2.sendall("drop pingpong\n")
server.sendall("create pingpong\n")
fh.readline() == "Done\n"
t = threading.Thread(target=drop)
t.start()
loopset()
def test_concurrent_close(self, servers):
"Tests setting values and do a concurrent close on the DB"
server, server2 = servers
fh = server.makefile()
def loopset():
for x in xrange(100000):
server.sendall("set pingpong test%d\n" % x)
resp = fh.readline()
assert resp == "Done\n"
def close():
time.sleep(0.1)
server2.sendall("close pingpong\n")
server.sendall("create pingpong\n")
fh.readline() == "Done\n"
t = threading.Thread(target=close)
t.start()
loopset()
def test_concurrent_flush(self, servers):
"Tests setting values and do a concurrent flush"
server, server2 = servers
fh = server.makefile()
def loopset():
for x in xrange(10000):
server.sendall("set pingpong test%d\n" % x)
resp = fh.readline()
assert resp == "Done\n"
def flush():
for x in xrange(3):
time.sleep(0.1)
server2.sendall("flush pingpong\n")
server.sendall("create pingpong\n")
fh.readline() == "Done\n"
t = threading.Thread(target=flush)
t.start()
loopset()
def test_concurrent_create(self, servers):
"Tests creating a set with concurrent sets"
server, server2 = servers
fh = server.makefile()
def loopset():
for x in xrange(1000):
server.sendall("set pingpong test%d\n" % x)
resp = fh.readline()
assert resp == "Done\n"
for r in xrange(3):
for x in xrange(1000):
server.sendall("set pingpong%d test%d\n" % (r, x))
resp = fh.readline()
assert resp == "Done\n"
def create():
for x in xrange(10):
server2.sendall("create pingpong%d\n" % x)
server.sendall("create pingpong\n")
fh.readline() == "Done\n"
t = threading.Thread(target=create)
t.start()
loopset()
def test_create_in_memory(self, servers):
"Tests creating a set in_memory, tries flush"
server, _ = servers
fh = server.makefile()
server.sendall("create foobar in_memory=1\n")
assert fh.readline() == "Done\n"
server.sendall("list\n")
assert fh.readline() == "START\n"
assert "foobar" in fh.readline()
assert fh.readline() == "END\n"
server.sendall("flush foobar\n")
assert fh.readline() == "Done\n"
def test_set_check_in_memory(self, servers):
"Tests setting and checking many values"
server, _ = servers
fh = server.makefile()
server.sendall("create foobar in_memory=1\n")
assert fh.readline() == "Done\n"
for x in xrange(1000):
server.sendall("set foobar test%d\n" % x)
assert fh.readline() == "Done\n"
def test_drop_in_memory(self, servers):
"Tests dropping a set"
server, _ = servers
fh = server.makefile()
server.sendall("create foobar in_memory=1\n")
assert fh.readline() == "Done\n"
server.sendall("drop foobar\n")
assert fh.readline() == "Done\n"
server.sendall("list\n")
assert fh.readline() == "START\n"
assert fh.readline() == "END\n"
def test_in_progress_drop(self, servers):
"Tests creating/dropping a set and getting the 'Delete in progress'"
server, _ = servers
fh = server.makefile()
for x in xrange(10):
# Create and drop should cause the vacuum to fall behind
server.sendall("create drop_in_prog\n")
assert fh.readline() == "Done\n"
server.sendall("drop drop_in_prog\n")
assert fh.readline() == "Done\n"
# Create after drop should fail
server.sendall("create drop_in_prog\n")
resp = fh.readline()
if resp == "Delete in progress\n":
return
elif resp == "Done\n":
server.sendall("drop drop_in_prog\n")
fh.readline()
assert False, "Failed to do a concurrent create"
def test_create_huge_prefix(self, servers):
"Tests creating a set"
server, _ = servers
fh = server.makefile()
server.sendall("create filter:test:very:long:common:prefix:1\n")
server.sendall("create filter:test:very:long:common:prefix:2\n")
server.sendall("create filter:test:very:long:sub:prefix:1\n")
assert fh.readline() == "Done\n"
assert fh.readline() == "Done\n"
assert fh.readline() == "Done\n"
time.sleep(2)
server.sendall("list filter:test\n")
assert fh.readline() == "START\n"
assert "filter:test:very:long:common:prefix:1" in fh.readline()
assert "filter:test:very:long:common:prefix:2" in fh.readline()
assert "filter:test:very:long:sub:prefix:1" in fh.readline()
assert fh.readline() == "END\n"
if __name__ == "__main__":
sys.exit(pytest.main(args="-k TestInteg."))
| armon/hlld | integ/test_integ.py | Python | bsd-3-clause | 12,915 |
from django.conf.urls.defaults import *
urlpatterns = patterns('',
url(r'^static/(?P<path>.*)$', 'staticfiles.views.serve'),
)
| zodman/django-staticfiles | staticfiles/tests/urls/default.py | Python | bsd-3-clause | 132 |
"""Constants for the Risco integration."""
from homeassistant.const import (
CONF_SCAN_INTERVAL,
STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_HOME,
)
DOMAIN = "risco"
RISCO_EVENT = "risco_event"
DATA_COORDINATOR = "risco"
EVENTS_COORDINATOR = "risco_events"
DEFAULT_SCAN_INTERVAL = 30
CONF_CODE_ARM_REQUIRED = "code_arm_required"
CONF_CODE_DISARM_REQUIRED = "code_disarm_required"
CONF_RISCO_STATES_TO_HA = "risco_states_to_ha"
CONF_HA_STATES_TO_RISCO = "ha_states_to_risco"
RISCO_GROUPS = ["A", "B", "C", "D"]
RISCO_ARM = "arm"
RISCO_PARTIAL_ARM = "partial_arm"
RISCO_STATES = [RISCO_ARM, RISCO_PARTIAL_ARM, *RISCO_GROUPS]
DEFAULT_RISCO_GROUPS_TO_HA = {group: STATE_ALARM_ARMED_HOME for group in RISCO_GROUPS}
DEFAULT_RISCO_STATES_TO_HA = {
RISCO_ARM: STATE_ALARM_ARMED_AWAY,
RISCO_PARTIAL_ARM: STATE_ALARM_ARMED_HOME,
**DEFAULT_RISCO_GROUPS_TO_HA,
}
DEFAULT_HA_STATES_TO_RISCO = {
STATE_ALARM_ARMED_AWAY: RISCO_ARM,
STATE_ALARM_ARMED_HOME: RISCO_PARTIAL_ARM,
}
DEFAULT_OPTIONS = {
CONF_SCAN_INTERVAL: DEFAULT_SCAN_INTERVAL,
CONF_CODE_ARM_REQUIRED: False,
CONF_CODE_DISARM_REQUIRED: False,
CONF_RISCO_STATES_TO_HA: DEFAULT_RISCO_STATES_TO_HA,
CONF_HA_STATES_TO_RISCO: DEFAULT_HA_STATES_TO_RISCO,
}
| lukas-hetzenecker/home-assistant | homeassistant/components/risco/const.py | Python | apache-2.0 | 1,258 |
# -*- coding: utf-8 -*-
## This file is part of Invenio.
## Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
# pylint: disable=C0301
__revision__ = "$Id$"
import time
import cgi
import string
import re
import locale
from urllib import quote, urlencode
from xml.sax.saxutils import escape as xml_escape
from invenio.config import \
CFG_WEBSEARCH_LIGHTSEARCH_PATTERN_BOX_WIDTH, \
CFG_WEBSEARCH_SIMPLESEARCH_PATTERN_BOX_WIDTH, \
CFG_WEBSEARCH_ADVANCEDSEARCH_PATTERN_BOX_WIDTH, \
CFG_WEBSEARCH_AUTHOR_ET_AL_THRESHOLD, \
CFG_WEBSEARCH_USE_ALEPH_SYSNOS, \
CFG_WEBSEARCH_SPLIT_BY_COLLECTION, \
CFG_WEBSEARCH_DEF_RECORDS_IN_GROUPS, \
CFG_BIBRANK_SHOW_READING_STATS, \
CFG_BIBRANK_SHOW_DOWNLOAD_STATS, \
CFG_BIBRANK_SHOW_DOWNLOAD_GRAPHS, \
CFG_BIBRANK_SHOW_CITATION_LINKS, \
CFG_BIBRANK_SHOW_CITATION_STATS, \
CFG_BIBRANK_SHOW_CITATION_GRAPHS, \
CFG_WEBSEARCH_RSS_TTL, \
CFG_SITE_LANG, \
CFG_SITE_NAME, \
CFG_SITE_NAME_INTL, \
CFG_VERSION, \
CFG_SITE_URL, \
CFG_SITE_SUPPORT_EMAIL, \
CFG_SITE_ADMIN_EMAIL, \
CFG_CERN_SITE, \
CFG_INSPIRE_SITE, \
CFG_WEBSEARCH_DEFAULT_SEARCH_INTERFACE, \
CFG_WEBSEARCH_ENABLED_SEARCH_INTERFACES, \
CFG_WEBSEARCH_MAX_RECORDS_IN_GROUPS, \
CFG_BIBINDEX_CHARS_PUNCTUATION, \
CFG_WEBCOMMENT_ALLOW_COMMENTS, \
CFG_WEBCOMMENT_ALLOW_REVIEWS, \
CFG_WEBSEARCH_WILDCARD_LIMIT, \
CFG_WEBSEARCH_SHOW_COMMENT_COUNT, \
CFG_WEBSEARCH_SHOW_REVIEW_COUNT, \
CFG_SITE_RECORD, \
CFG_WEBSEARCH_PREV_NEXT_HIT_LIMIT
from invenio.search_engine_config import CFG_WEBSEARCH_RESULTS_OVERVIEW_MAX_COLLS_TO_PRINT
from invenio.dbquery import run_sql
from invenio.messages import gettext_set_language
from invenio.urlutils import make_canonical_urlargd, drop_default_urlargd, create_html_link, create_url
from invenio.htmlutils import nmtoken_from_string
from invenio.webinterface_handler import wash_urlargd
from invenio.bibrank_citation_searcher import get_cited_by_count
from invenio.webuser import session_param_get
from invenio.intbitset import intbitset
from invenio.websearch_external_collections import external_collection_get_state, get_external_collection_engine
from invenio.websearch_external_collections_utils import get_collection_id
from invenio.websearch_external_collections_config import CFG_EXTERNAL_COLLECTION_MAXRESULTS
from invenio.search_engine_utils import get_fieldvalues
_RE_PUNCTUATION = re.compile(CFG_BIBINDEX_CHARS_PUNCTUATION)
_RE_SPACES = re.compile(r"\s+")
class Template:
# This dictionary maps Invenio language code to locale codes (ISO 639)
tmpl_localemap = {
'bg': 'bg_BG',
'ar': 'ar_AR',
'ca': 'ca_ES',
'de': 'de_DE',
'el': 'el_GR',
'en': 'en_US',
'es': 'es_ES',
'pt': 'pt_BR',
'fa': 'fa_IR',
'fr': 'fr_FR',
'it': 'it_IT',
'ka': 'ka_GE',
'lt': 'lt_LT',
'ro': 'ro_RO',
'ru': 'ru_RU',
'rw': 'rw_RW',
'sk': 'sk_SK',
'cs': 'cs_CZ',
'no': 'no_NO',
'sv': 'sv_SE',
'uk': 'uk_UA',
'ja': 'ja_JA',
'pl': 'pl_PL',
'hr': 'hr_HR',
'zh_CN': 'zh_CN',
'zh_TW': 'zh_TW',
'hu': 'hu_HU',
'af': 'af_ZA',
'gl': 'gl_ES'
}
tmpl_default_locale = "en_US" # which locale to use by default, useful in case of failure
# Type of the allowed parameters for the web interface for search results
search_results_default_urlargd = {
'cc': (str, CFG_SITE_NAME),
'c': (list, []),
'p': (str, ""), 'f': (str, ""),
'rg': (int, CFG_WEBSEARCH_DEF_RECORDS_IN_GROUPS),
'sf': (str, ""),
'so': (str, "d"),
'sp': (str, ""),
'rm': (str, ""),
'of': (str, "hb"),
'ot': (list, []),
'em': (str,""),
'aas': (int, CFG_WEBSEARCH_DEFAULT_SEARCH_INTERFACE),
'as': (int, CFG_WEBSEARCH_DEFAULT_SEARCH_INTERFACE),
'p1': (str, ""), 'f1': (str, ""), 'm1': (str, ""), 'op1':(str, ""),
'p2': (str, ""), 'f2': (str, ""), 'm2': (str, ""), 'op2':(str, ""),
'p3': (str, ""), 'f3': (str, ""), 'm3': (str, ""),
'sc': (int, 0),
'jrec': (int, 0),
'recid': (int, -1), 'recidb': (int, -1), 'sysno': (str, ""),
'id': (int, -1), 'idb': (int, -1), 'sysnb': (str, ""),
'action': (str, "search"),
'action_search': (str, ""),
'action_browse': (str, ""),
'd1': (str, ""),
'd1y': (int, 0), 'd1m': (int, 0), 'd1d': (int, 0),
'd2': (str, ""),
'd2y': (int, 0), 'd2m': (int, 0), 'd2d': (int, 0),
'dt': (str, ""),
'ap': (int, 1),
'verbose': (int, 0),
'ec': (list, []),
'wl': (int, CFG_WEBSEARCH_WILDCARD_LIMIT),
}
# ...and for search interfaces
search_interface_default_urlargd = {
'aas': (int, CFG_WEBSEARCH_DEFAULT_SEARCH_INTERFACE),
'as': (int, CFG_WEBSEARCH_DEFAULT_SEARCH_INTERFACE),
'verbose': (int, 0),
'em' : (str, "")}
# ...and for RSS feeds
rss_default_urlargd = {'c' : (list, []),
'cc' : (str, ""),
'p' : (str, ""),
'f' : (str, ""),
'p1' : (str, ""),
'f1' : (str, ""),
'm1' : (str, ""),
'op1': (str, ""),
'p2' : (str, ""),
'f2' : (str, ""),
'm2' : (str, ""),
'op2': (str, ""),
'p3' : (str, ""),
'f3' : (str, ""),
'm3' : (str, ""),
'wl' : (int, CFG_WEBSEARCH_WILDCARD_LIMIT)}
tmpl_openurl_accepted_args = {
'id' : (list, []),
'genre' : (str, ''),
'aulast' : (str, ''),
'aufirst' : (str, ''),
'auinit' : (str, ''),
'auinit1' : (str, ''),
'auinitm' : (str, ''),
'issn' : (str, ''),
'eissn' : (str, ''),
'coden' : (str, ''),
'isbn' : (str, ''),
'sici' : (str, ''),
'bici' : (str, ''),
'title' : (str, ''),
'stitle' : (str, ''),
'atitle' : (str, ''),
'volume' : (str, ''),
'part' : (str, ''),
'issue' : (str, ''),
'spage' : (str, ''),
'epage' : (str, ''),
'pages' : (str, ''),
'artnum' : (str, ''),
'date' : (str, ''),
'ssn' : (str, ''),
'quarter' : (str, ''),
'url_ver' : (str, ''),
'ctx_ver' : (str, ''),
'rft_val_fmt' : (str, ''),
'rft_id' : (list, []),
'rft.atitle' : (str, ''),
'rft.title' : (str, ''),
'rft.jtitle' : (str, ''),
'rft.stitle' : (str, ''),
'rft.date' : (str, ''),
'rft.volume' : (str, ''),
'rft.issue' : (str, ''),
'rft.spage' : (str, ''),
'rft.epage' : (str, ''),
'rft.pages' : (str, ''),
'rft.artnumber' : (str, ''),
'rft.issn' : (str, ''),
'rft.eissn' : (str, ''),
'rft.aulast' : (str, ''),
'rft.aufirst' : (str, ''),
'rft.auinit' : (str, ''),
'rft.auinit1' : (str, ''),
'rft.auinitm' : (str, ''),
'rft.ausuffix' : (str, ''),
'rft.au' : (list, []),
'rft.aucorp' : (str, ''),
'rft.isbn' : (str, ''),
'rft.coden' : (str, ''),
'rft.sici' : (str, ''),
'rft.genre' : (str, 'unknown'),
'rft.chron' : (str, ''),
'rft.ssn' : (str, ''),
'rft.quarter' : (int, ''),
'rft.part' : (str, ''),
'rft.btitle' : (str, ''),
'rft.isbn' : (str, ''),
'rft.atitle' : (str, ''),
'rft.place' : (str, ''),
'rft.pub' : (str, ''),
'rft.edition' : (str, ''),
'rft.tpages' : (str, ''),
'rft.series' : (str, ''),
}
tmpl_opensearch_rss_url_syntax = "%(CFG_SITE_URL)s/rss?p={searchTerms}&jrec={startIndex}&rg={count}&ln={language}" % {'CFG_SITE_URL': CFG_SITE_URL}
tmpl_opensearch_html_url_syntax = "%(CFG_SITE_URL)s/search?p={searchTerms}&jrec={startIndex}&rg={count}&ln={language}" % {'CFG_SITE_URL': CFG_SITE_URL}
def tmpl_openurl2invenio(self, openurl_data):
""" Return an Invenio url corresponding to a search with the data
included in the openurl form map.
"""
def isbn_to_isbn13_isbn10(isbn):
isbn = isbn.replace(' ', '').replace('-', '')
if len(isbn) == 10 and isbn.isdigit():
## We already have isbn10
return ('', isbn)
if len(isbn) != 13 and isbn.isdigit():
return ('', '')
isbn13, isbn10 = isbn, isbn[3:-1]
checksum = 0
weight = 10
for char in isbn10:
checksum += int(char) * weight
weight -= 1
checksum = 11 - (checksum % 11)
if checksum == 10:
isbn10 += 'X'
if checksum == 11:
isbn10 += '0'
else:
isbn10 += str(checksum)
return (isbn13, isbn10)
from invenio.search_engine import perform_request_search
doi = ''
pmid = ''
bibcode = ''
oai = ''
issn = ''
isbn = ''
for elem in openurl_data['id']:
if elem.startswith('doi:'):
doi = elem[len('doi:'):]
elif elem.startswith('pmid:'):
pmid = elem[len('pmid:'):]
elif elem.startswith('bibcode:'):
bibcode = elem[len('bibcode:'):]
elif elem.startswith('oai:'):
oai = elem[len('oai:'):]
for elem in openurl_data['rft_id']:
if elem.startswith('info:doi/'):
doi = elem[len('info:doi/'):]
elif elem.startswith('info:pmid/'):
pmid = elem[len('info:pmid/'):]
elif elem.startswith('info:bibcode/'):
bibcode = elem[len('info:bibcode/'):]
elif elem.startswith('info:oai/'):
oai = elem[len('info:oai/')]
elif elem.startswith('urn:ISBN:'):
isbn = elem[len('urn:ISBN:'):]
elif elem.startswith('urn:ISSN:'):
issn = elem[len('urn:ISSN:'):]
## Building author query
aulast = openurl_data['rft.aulast'] or openurl_data['aulast']
aufirst = openurl_data['rft.aufirst'] or openurl_data['aufirst']
auinit = openurl_data['rft.auinit'] or \
openurl_data['auinit'] or \
openurl_data['rft.auinit1'] + ' ' + openurl_data['rft.auinitm'] or \
openurl_data['auinit1'] + ' ' + openurl_data['auinitm'] or aufirst[:1]
auinit = auinit.upper()
if aulast and aufirst:
author_query = 'author:"%s, %s" or author:"%s, %s"' % (aulast, aufirst, aulast, auinit)
elif aulast and auinit:
author_query = 'author:"%s, %s"' % (aulast, auinit)
else:
author_query = ''
## Building title query
title = openurl_data['rft.atitle'] or \
openurl_data['atitle'] or \
openurl_data['rft.btitle'] or \
openurl_data['rft.title'] or \
openurl_data['title']
if title:
title_query = 'title:"%s"' % title
title_query_cleaned = 'title:"%s"' % _RE_SPACES.sub(' ', _RE_PUNCTUATION.sub(' ', title))
else:
title_query = ''
## Building journal query
jtitle = openurl_data['rft.stitle'] or \
openurl_data['stitle'] or \
openurl_data['rft.jtitle'] or \
openurl_data['title']
if jtitle:
journal_query = 'journal:"%s"' % jtitle
else:
journal_query = ''
## Building isbn query
isbn = isbn or openurl_data['rft.isbn'] or \
openurl_data['isbn']
isbn13, isbn10 = isbn_to_isbn13_isbn10(isbn)
if isbn13:
isbn_query = 'isbn:"%s" or isbn:"%s"' % (isbn13, isbn10)
elif isbn10:
isbn_query = 'isbn:"%s"' % isbn10
else:
isbn_query = ''
## Building issn query
issn = issn or openurl_data['rft.eissn'] or \
openurl_data['eissn'] or \
openurl_data['rft.issn'] or \
openurl_data['issn']
if issn:
issn_query = 'issn:"%s"' % issn
else:
issn_query = ''
## Building coden query
coden = openurl_data['rft.coden'] or openurl_data['coden']
if coden:
coden_query = 'coden:"%s"' % coden
else:
coden_query = ''
## Building doi query
if False: #doi: #FIXME Temporaly disabled until doi field is properly setup
doi_query = 'doi:"%s"' % doi
else:
doi_query = ''
## Trying possible searches
if doi_query:
if perform_request_search(p=doi_query):
return '%s/search?%s' % (CFG_SITE_URL, urlencode({
'p' : doi_query,
'sc' : CFG_WEBSEARCH_SPLIT_BY_COLLECTION,
'of' : 'hd'}))
if isbn_query:
if perform_request_search(p=isbn_query):
return '%s/search?%s' % (CFG_SITE_URL, urlencode({
'p' : isbn_query,
'sc' : CFG_WEBSEARCH_SPLIT_BY_COLLECTION,
'of' : 'hd'}))
if coden_query:
if perform_request_search(p=coden_query):
return '%s/search?%s' % (CFG_SITE_URL, urlencode({
'p' : coden_query,
'sc' : CFG_WEBSEARCH_SPLIT_BY_COLLECTION,
'of' : 'hd'}))
if author_query and title_query:
if perform_request_search(p='%s and %s' % (title_query, author_query)):
return '%s/search?%s' % (CFG_SITE_URL, urlencode({
'p' : '%s and %s' % (title_query, author_query),
'sc' : CFG_WEBSEARCH_SPLIT_BY_COLLECTION,
'of' : 'hd'}))
if title_query:
result = len(perform_request_search(p=title_query))
if result == 1:
return '%s/search?%s' % (CFG_SITE_URL, urlencode({
'p' : title_query,
'sc' : CFG_WEBSEARCH_SPLIT_BY_COLLECTION,
'of' : 'hd'}))
elif result > 1:
return '%s/search?%s' % (CFG_SITE_URL, urlencode({
'p' : title_query,
'sc' : CFG_WEBSEARCH_SPLIT_BY_COLLECTION,
'of' : 'hb'}))
## Nothing worked, let's return a search that the user can improve
if author_query and title_query:
return '%s/search%s' % (CFG_SITE_URL, make_canonical_urlargd({
'p' : '%s and %s' % (title_query_cleaned, author_query),
'sc' : CFG_WEBSEARCH_SPLIT_BY_COLLECTION,
'of' : 'hb'}, {}))
elif title_query:
return '%s/search%s' % (CFG_SITE_URL, make_canonical_urlargd({
'p' : title_query_cleaned,
'sc' : CFG_WEBSEARCH_SPLIT_BY_COLLECTION,
'of' : 'hb'}, {}))
else:
## Mmh. Too few information provided.
return '%s/search%s' % (CFG_SITE_URL, make_canonical_urlargd({
'p' : 'recid:-1',
'sc' : CFG_WEBSEARCH_SPLIT_BY_COLLECTION,
'of' : 'hb'}, {}))
def tmpl_opensearch_description(self, ln):
""" Returns the OpenSearch description file of this site.
"""
_ = gettext_set_language(ln)
return """<OpenSearchDescription xmlns="http://a9.com/-/spec/opensearch/1.1/"
xmlns:moz="http://www.mozilla.org/2006/browser/search/">
<ShortName>%(short_name)s</ShortName>
<LongName>%(long_name)s</LongName>
<Description>%(description)s</Description>
<InputEncoding>UTF-8</InputEncoding>
<OutputEncoding>UTF-8</OutputEncoding>
<Language>*</Language>
<Contact>%(CFG_SITE_ADMIN_EMAIL)s</Contact>
<Query role="example" searchTerms="a" />
<Developer>Powered by Invenio</Developer>
<Url type="text/html" indexOffset="1" rel="results" template="%(html_search_syntax)s" />
<Url type="application/rss+xml" indexOffset="1" rel="results" template="%(rss_search_syntax)s" />
<Url type="application/opensearchdescription+xml" rel="self" template="%(CFG_SITE_URL)s/opensearchdescription" />
<moz:SearchForm>%(CFG_SITE_URL)s</moz:SearchForm>
</OpenSearchDescription>""" % \
{'CFG_SITE_URL': CFG_SITE_URL,
'short_name': CFG_SITE_NAME_INTL.get(ln, CFG_SITE_NAME)[:16],
'long_name': CFG_SITE_NAME_INTL.get(ln, CFG_SITE_NAME),
'description': (_("Search on %(x_CFG_SITE_NAME_INTL)s") % \
{'x_CFG_SITE_NAME_INTL': CFG_SITE_NAME_INTL.get(ln, CFG_SITE_NAME)})[:1024],
'CFG_SITE_ADMIN_EMAIL': CFG_SITE_ADMIN_EMAIL,
'rss_search_syntax': self.tmpl_opensearch_rss_url_syntax,
'html_search_syntax': self.tmpl_opensearch_html_url_syntax
}
def build_search_url(self, known_parameters={}, **kargs):
""" Helper for generating a canonical search
url. 'known_parameters' is the list of query parameters you
inherit from your current query. You can then pass keyword
arguments to modify this query.
build_search_url(known_parameters, of="xm")
The generated URL is absolute.
"""
parameters = {}
parameters.update(known_parameters)
parameters.update(kargs)
# Now, we only have the arguments which have _not_ their default value
parameters = drop_default_urlargd(parameters, self.search_results_default_urlargd)
# Treat `as' argument specially:
if parameters.has_key('aas'):
parameters['as'] = parameters['aas']
del parameters['aas']
# Asking for a recid? Return a /CFG_SITE_RECORD/<recid> URL
if 'recid' in parameters:
target = "%s/%s/%s" % (CFG_SITE_URL, CFG_SITE_RECORD, parameters['recid'])
del parameters['recid']
target += make_canonical_urlargd(parameters, self.search_results_default_urlargd)
return target
return "%s/search%s" % (CFG_SITE_URL, make_canonical_urlargd(parameters, self.search_results_default_urlargd))
def build_search_interface_url(self, known_parameters={}, **kargs):
""" Helper for generating a canonical search interface URL."""
parameters = {}
parameters.update(known_parameters)
parameters.update(kargs)
c = parameters['c']
del parameters['c']
# Now, we only have the arguments which have _not_ their default value
parameters = drop_default_urlargd(parameters, self.search_results_default_urlargd)
# Treat `as' argument specially:
if parameters.has_key('aas'):
parameters['as'] = parameters['aas']
del parameters['aas']
if c and c != CFG_SITE_NAME:
base = CFG_SITE_URL + '/collection/' + quote(c)
else:
base = CFG_SITE_URL
return create_url(base, parameters)
def build_rss_url(self, known_parameters, **kargs):
"""Helper for generating a canonical RSS URL"""
parameters = {}
parameters.update(known_parameters)
parameters.update(kargs)
# Keep only interesting parameters
argd = wash_urlargd(parameters, self.rss_default_urlargd)
if argd:
# Handle 'c' differently since it is a list
c = argd.get('c', [])
del argd['c']
# Create query, and drop empty params
args = make_canonical_urlargd(argd, self.rss_default_urlargd)
if c != []:
# Add collections
c = [quote(coll) for coll in c]
if args == '':
args += '?'
else:
args += '&'
args += 'c=' + '&c='.join(c)
return CFG_SITE_URL + '/rss' + args
def tmpl_record_page_header_content(self, req, recid, ln):
"""
Provide extra information in the header of /CFG_SITE_RECORD pages
Return (title, description, keywords), not escaped for HTML
"""
_ = gettext_set_language(ln)
title = get_fieldvalues(recid, "245__a") or \
get_fieldvalues(recid, "111__a")
if title:
title = title[0]
else:
title = _("Record") + ' #%d' % recid
keywords = ', '.join(get_fieldvalues(recid, "6531_a"))
description = ' '.join(get_fieldvalues(recid, "520__a"))
description += "\n"
description += '; '.join(get_fieldvalues(recid, "100__a") + get_fieldvalues(recid, "700__a"))
return (title, description, keywords)
def tmpl_exact_author_browse_help_link(self, p, p1, p2, p3, f, f1, f2, f3, rm, cc, ln, jrec, rg, aas, action, link_name):
"""
Creates the 'exact author' help link for browsing.
"""
_ = gettext_set_language(ln)
url = create_html_link(self.build_search_url(p=p,
p1=p1,
p2=p2,
p3=p3,
f=f,
f1=f1,
f2=f2,
f3=f3,
rm=rm,
cc=cc,
ln=ln,
jrec=jrec,
rg=rg,
aas=aas,
action=action),
{}, _(link_name), {'class': 'nearestterms'})
return "Did you mean to browse in %s index?" % url
def tmpl_navtrail_links(self, aas, ln, dads):
"""
Creates the navigation bar at top of each search page (*Home > Root collection > subcollection > ...*)
Parameters:
- 'aas' *int* - Should we display an advanced search box?
- 'ln' *string* - The language to display
- 'separator' *string* - The separator between two consecutive collections
- 'dads' *list* - A list of parent links, eachone being a dictionary of ('name', 'longname')
"""
out = []
for url, name in dads:
args = {'c': url, 'as': aas, 'ln': ln}
out.append(create_html_link(self.build_search_interface_url(**args), {}, cgi.escape(name), {'class': 'navtrail'}))
return ' > '.join(out)
def tmpl_webcoll_body(self, ln, collection, te_portalbox,
searchfor, np_portalbox, narrowsearch,
focuson, instantbrowse, ne_portalbox, show_body=True):
""" Creates the body of the main search page.
Parameters:
- 'ln' *string* - language of the page being generated
- 'collection' - collection id of the page being generated
- 'te_portalbox' *string* - The HTML code for the portalbox on top of search
- 'searchfor' *string* - The HTML code for the search for box
- 'np_portalbox' *string* - The HTML code for the portalbox on bottom of search
- 'narrowsearch' *string* - The HTML code for the search categories (left bottom of page)
- 'focuson' *string* - The HTML code for the "focuson" categories (right bottom of page)
- 'ne_portalbox' *string* - The HTML code for the bottom of the page
"""
if not narrowsearch:
narrowsearch = instantbrowse
body = '''
<form name="search" action="%(siteurl)s/search" method="get">
%(searchfor)s
%(np_portalbox)s''' % {
'siteurl' : CFG_SITE_URL,
'searchfor' : searchfor,
'np_portalbox' : np_portalbox
}
if show_body:
body += '''
<table cellspacing="0" cellpadding="0" border="0" class="narrowandfocusonsearchbox">
<tr>
<td valign="top">%(narrowsearch)s</td>
''' % { 'narrowsearch' : narrowsearch }
if focuson:
body += """<td valign="top">""" + focuson + """</td>"""
body += """</tr></table>"""
elif focuson:
body += focuson
body += """%(ne_portalbox)s
</form>""" % {'ne_portalbox' : ne_portalbox}
return body
def tmpl_portalbox(self, title, body):
"""Creates portalboxes based on the parameters
Parameters:
- 'title' *string* - The title of the box
- 'body' *string* - The HTML code for the body of the box
"""
out = """<div class="portalbox">
<div class="portalboxheader">%(title)s</div>
<div class="portalboxbody">%(body)s</div>
</div>""" % {'title' : cgi.escape(title), 'body' : body}
return out
def tmpl_searchfor_light(self, ln, collection_id, collection_name, record_count,
example_search_queries): # EXPERIMENTAL
"""Produces light *Search for* box for the current collection.
Parameters:
- 'ln' *string* - *str* The language to display
- 'collection_id' - *str* The collection id
- 'collection_name' - *str* The collection name in current language
- 'example_search_queries' - *list* List of search queries given as example for this collection
"""
# load the right message language
_ = gettext_set_language(ln)
out = '''
<!--create_searchfor_light()-->
'''
argd = drop_default_urlargd({'ln': ln, 'sc': CFG_WEBSEARCH_SPLIT_BY_COLLECTION},
self.search_results_default_urlargd)
# Only add non-default hidden values
for field, value in argd.items():
out += self.tmpl_input_hidden(field, value)
header = _("Search %s records for:") % \
self.tmpl_nbrecs_info(record_count, "", "")
asearchurl = self.build_search_interface_url(c=collection_id,
aas=max(CFG_WEBSEARCH_ENABLED_SEARCH_INTERFACES),
ln=ln)
# Build example of queries for this collection
example_search_queries_links = [create_html_link(self.build_search_url(p=example_query,
ln=ln,
aas= -1,
c=collection_id),
{},
cgi.escape(example_query),
{'class': 'examplequery'}) \
for example_query in example_search_queries]
example_query_html = ''
if len(example_search_queries) > 0:
example_query_link = example_search_queries_links[0]
# offers more examples if possible
more = ''
if len(example_search_queries_links) > 1:
more = '''
<script type="text/javascript">
function toggle_more_example_queries_visibility(){
var more = document.getElementById('more_example_queries');
var link = document.getElementById('link_example_queries');
var sep = document.getElementById('more_example_sep');
if (more.style.display=='none'){
more.style.display = '';
link.innerHTML = "%(show_less)s"
link.style.color = "rgb(204,0,0)";
sep.style.display = 'none';
} else {
more.style.display = 'none';
link.innerHTML = "%(show_more)s"
link.style.color = "rgb(0,0,204)";
sep.style.display = '';
}
return false;
}
</script>
<span id="more_example_queries" style="display:none;text-align:right"><br/>%(more_example_queries)s<br/></span>
<a id="link_example_queries" href="#" onclick="toggle_more_example_queries_visibility()" style="display:none"></a>
<script type="text/javascript">
var link = document.getElementById('link_example_queries');
var sep = document.getElementById('more_example_sep');
link.style.display = '';
link.innerHTML = "%(show_more)s";
sep.style.display = '';
</script>
''' % {'more_example_queries': '<br/>'.join(example_search_queries_links[1:]),
'show_less':_("less"),
'show_more':_("more")}
example_query_html += '''<p style="text-align:right;margin:0px;">
%(example)s<span id="more_example_sep" style="display:none;"> :: </span>%(more)s
</p>
''' % {'example': _("Example: %(x_sample_search_query)s") % \
{'x_sample_search_query': example_query_link},
'more': more}
# display options to search in current collection or everywhere
search_in = ''
if collection_name != CFG_SITE_NAME_INTL.get(ln, CFG_SITE_NAME):
search_in += '''
<input type="radio" name="cc" value="%(collection_id)s" id="searchCollection" checked="checked"/>
<label for="searchCollection">%(search_in_collection_name)s</label>
<input type="radio" name="cc" value="%(root_collection_name)s" id="searchEverywhere" />
<label for="searchEverywhere">%(search_everywhere)s</label>
''' % {'search_in_collection_name': _("Search in %(x_collection_name)s") % \
{'x_collection_name': collection_name},
'collection_id': collection_id,
'root_collection_name': CFG_SITE_NAME,
'search_everywhere': _("Search everywhere")}
# print commentary start:
out += '''
<table class="searchbox lightsearch">
<tbody>
<tr valign="baseline">
<td class="searchboxbody" align="right"><input type="text" name="p" size="%(sizepattern)d" value="" class="lightsearchfield"/><br/>
<small><small>%(example_query_html)s</small></small>
</td>
<td class="searchboxbody" align="left">
<input class="formbutton" type="submit" name="action_search" value="%(msg_search)s" />
</td>
<td class="searchboxbody" align="left" rowspan="2" valign="top">
<small><small>
<a href="%(siteurl)s/help/search-tips%(langlink)s">%(msg_search_tips)s</a><br/>
%(asearch)s
</small></small>
</td>
</tr></table>
<!--<tr valign="baseline">
<td class="searchboxbody" colspan="2" align="left">
<small>
--><small>%(search_in)s</small><!--
</small>
</td>
</tr>
</tbody>
</table>-->
<!--/create_searchfor_light()-->
''' % {'ln' : ln,
'sizepattern' : CFG_WEBSEARCH_LIGHTSEARCH_PATTERN_BOX_WIDTH,
'langlink': ln != CFG_SITE_LANG and '?ln=' + ln or '',
'siteurl' : CFG_SITE_URL,
'asearch' : create_html_link(asearchurl, {}, _('Advanced Search')),
'header' : header,
'msg_search' : _('Search'),
'msg_browse' : _('Browse'),
'msg_search_tips' : _('Search Tips'),
'search_in': search_in,
'example_query_html': example_query_html}
return out
def tmpl_searchfor_simple(self, ln, collection_id, collection_name, record_count, middle_option):
"""Produces simple *Search for* box for the current collection.
Parameters:
- 'ln' *string* - *str* The language to display
- 'collection_id' - *str* The collection id
- 'collection_name' - *str* The collection name in current language
- 'record_count' - *str* Number of records in this collection
- 'middle_option' *string* - HTML code for the options (any field, specific fields ...)
"""
# load the right message language
_ = gettext_set_language(ln)
out = '''
<!--create_searchfor_simple()-->
'''
argd = drop_default_urlargd({'ln': ln, 'cc': collection_id, 'sc': CFG_WEBSEARCH_SPLIT_BY_COLLECTION},
self.search_results_default_urlargd)
# Only add non-default hidden values
for field, value in argd.items():
out += self.tmpl_input_hidden(field, value)
header = _("Search %s records for:") % \
self.tmpl_nbrecs_info(record_count, "", "")
asearchurl = self.build_search_interface_url(c=collection_id,
aas=max(CFG_WEBSEARCH_ENABLED_SEARCH_INTERFACES),
ln=ln)
# print commentary start:
out += '''
<table class="searchbox simplesearch">
<thead>
<tr align="left">
<th colspan="3" class="searchboxheader">%(header)s</th>
</tr>
</thead>
<tbody>
<tr valign="baseline">
<td class="searchboxbody" align="left"><input type="text" name="p" size="%(sizepattern)d" value="" class="simplesearchfield"/></td>
<td class="searchboxbody" align="left">%(middle_option)s</td>
<td class="searchboxbody" align="left">
<input class="formbutton" type="submit" name="action_search" value="%(msg_search)s" />
<input class="formbutton" type="submit" name="action_browse" value="%(msg_browse)s" /></td>
</tr>
<tr valign="baseline">
<td class="searchboxbody" colspan="3" align="right">
<small>
<a href="%(siteurl)s/help/search-tips%(langlink)s">%(msg_search_tips)s</a> ::
%(asearch)s
</small>
</td>
</tr>
</tbody>
</table>
<!--/create_searchfor_simple()-->
''' % {'ln' : ln,
'sizepattern' : CFG_WEBSEARCH_SIMPLESEARCH_PATTERN_BOX_WIDTH,
'langlink': ln != CFG_SITE_LANG and '?ln=' + ln or '',
'siteurl' : CFG_SITE_URL,
'asearch' : create_html_link(asearchurl, {}, _('Advanced Search')),
'header' : header,
'middle_option' : middle_option,
'msg_search' : _('Search'),
'msg_browse' : _('Browse'),
'msg_search_tips' : _('Search Tips')}
return out
def tmpl_searchfor_advanced(self,
ln, # current language
collection_id,
collection_name,
record_count,
middle_option_1, middle_option_2, middle_option_3,
searchoptions,
sortoptions,
rankoptions,
displayoptions,
formatoptions
):
"""
Produces advanced *Search for* box for the current collection.
Parameters:
- 'ln' *string* - The language to display
- 'middle_option_1' *string* - HTML code for the first row of options (any field, specific fields ...)
- 'middle_option_2' *string* - HTML code for the second row of options (any field, specific fields ...)
- 'middle_option_3' *string* - HTML code for the third row of options (any field, specific fields ...)
- 'searchoptions' *string* - HTML code for the search options
- 'sortoptions' *string* - HTML code for the sort options
- 'rankoptions' *string* - HTML code for the rank options
- 'displayoptions' *string* - HTML code for the display options
- 'formatoptions' *string* - HTML code for the format options
"""
# load the right message language
_ = gettext_set_language(ln)
out = '''
<!--create_searchfor_advanced()-->
'''
argd = drop_default_urlargd({'ln': ln, 'aas': 1, 'cc': collection_id, 'sc': CFG_WEBSEARCH_SPLIT_BY_COLLECTION},
self.search_results_default_urlargd)
# Only add non-default hidden values
for field, value in argd.items():
out += self.tmpl_input_hidden(field, value)
header = _("Search %s records for") % \
self.tmpl_nbrecs_info(record_count, "", "")
header += ':'
ssearchurl = self.build_search_interface_url(c=collection_id, aas=min(CFG_WEBSEARCH_ENABLED_SEARCH_INTERFACES), ln=ln)
out += '''
<table class="searchbox advancedsearch">
<thead>
<tr>
<th class="searchboxheader" colspan="3">%(header)s</th>
</tr>
</thead>
<tbody>
<tr valign="bottom">
<td class="searchboxbody" style="white-space: nowrap;">
%(matchbox_m1)s<input type="text" name="p1" size="%(sizepattern)d" value="" class="advancedsearchfield"/>
</td>
<td class="searchboxbody" style="white-space: nowrap;">%(middle_option_1)s</td>
<td class="searchboxbody">%(andornot_op1)s</td>
</tr>
<tr valign="bottom">
<td class="searchboxbody" style="white-space: nowrap;">
%(matchbox_m2)s<input type="text" name="p2" size="%(sizepattern)d" value="" class="advancedsearchfield"/>
</td>
<td class="searchboxbody">%(middle_option_2)s</td>
<td class="searchboxbody">%(andornot_op2)s</td>
</tr>
<tr valign="bottom">
<td class="searchboxbody" style="white-space: nowrap;">
%(matchbox_m3)s<input type="text" name="p3" size="%(sizepattern)d" value="" class="advancedsearchfield"/>
</td>
<td class="searchboxbody">%(middle_option_3)s</td>
<td class="searchboxbody" style="white-space: nowrap;">
<input class="formbutton" type="submit" name="action_search" value="%(msg_search)s" />
<input class="formbutton" type="submit" name="action_browse" value="%(msg_browse)s" /></td>
</tr>
<tr valign="bottom">
<td colspan="3" class="searchboxbody" align="right">
<small>
<a href="%(siteurl)s/help/search-tips%(langlink)s">%(msg_search_tips)s</a> ::
%(ssearch)s
</small>
</td>
</tr>
</tbody>
</table>
<!-- @todo - more imports -->
''' % {'ln' : ln,
'sizepattern' : CFG_WEBSEARCH_ADVANCEDSEARCH_PATTERN_BOX_WIDTH,
'langlink': ln != CFG_SITE_LANG and '?ln=' + ln or '',
'siteurl' : CFG_SITE_URL,
'ssearch' : create_html_link(ssearchurl, {}, _("Simple Search")),
'header' : header,
'matchbox_m1' : self.tmpl_matchtype_box('m1', ln=ln),
'middle_option_1' : middle_option_1,
'andornot_op1' : self.tmpl_andornot_box('op1', ln=ln),
'matchbox_m2' : self.tmpl_matchtype_box('m2', ln=ln),
'middle_option_2' : middle_option_2,
'andornot_op2' : self.tmpl_andornot_box('op2', ln=ln),
'matchbox_m3' : self.tmpl_matchtype_box('m3', ln=ln),
'middle_option_3' : middle_option_3,
'msg_search' : _("Search"),
'msg_browse' : _("Browse"),
'msg_search_tips' : _("Search Tips")}
if (searchoptions):
out += """<table class="searchbox">
<thead>
<tr>
<th class="searchboxheader">
%(searchheader)s
</th>
</tr>
</thead>
<tbody>
<tr valign="bottom">
<td class="searchboxbody">%(searchoptions)s</td>
</tr>
</tbody>
</table>""" % {
'searchheader' : _("Search options:"),
'searchoptions' : searchoptions
}
out += """<table class="searchbox">
<thead>
<tr>
<th class="searchboxheader">
%(added)s
</th>
<th class="searchboxheader">
%(until)s
</th>
</tr>
</thead>
<tbody>
<tr valign="bottom">
<td class="searchboxbody">%(added_or_modified)s %(date_added)s</td>
<td class="searchboxbody">%(date_until)s</td>
</tr>
</tbody>
</table>
<table class="searchbox">
<thead>
<tr>
<th class="searchboxheader">
%(msg_sort)s
</th>
<th class="searchboxheader">
%(msg_display)s
</th>
<th class="searchboxheader">
%(msg_format)s
</th>
</tr>
</thead>
<tbody>
<tr valign="bottom">
<td class="searchboxbody">%(sortoptions)s %(rankoptions)s</td>
<td class="searchboxbody">%(displayoptions)s</td>
<td class="searchboxbody">%(formatoptions)s</td>
</tr>
</tbody>
</table>
<!--/create_searchfor_advanced()-->
""" % {
'added' : _("Added/modified since:"),
'until' : _("until:"),
'added_or_modified': self.tmpl_inputdatetype(ln=ln),
'date_added' : self.tmpl_inputdate("d1", ln=ln),
'date_until' : self.tmpl_inputdate("d2", ln=ln),
'msg_sort' : _("Sort by:"),
'msg_display' : _("Display results:"),
'msg_format' : _("Output format:"),
'sortoptions' : sortoptions,
'rankoptions' : rankoptions,
'displayoptions' : displayoptions,
'formatoptions' : formatoptions
}
return out
def tmpl_matchtype_box(self, name='m', value='', ln='en'):
"""Returns HTML code for the 'match type' selection box.
Parameters:
- 'name' *string* - The name of the produced select
- 'value' *string* - The selected value (if any value is already selected)
- 'ln' *string* - the language to display
"""
# load the right message language
_ = gettext_set_language(ln)
out = """
<select name="%(name)s">
<option value="a"%(sela)s>%(opta)s</option>
<option value="o"%(selo)s>%(opto)s</option>
<option value="e"%(sele)s>%(opte)s</option>
<option value="p"%(selp)s>%(optp)s</option>
<option value="r"%(selr)s>%(optr)s</option>
</select>
""" % {'name' : name,
'sela' : self.tmpl_is_selected('a', value),
'opta' : _("All of the words:"),
'selo' : self.tmpl_is_selected('o', value),
'opto' : _("Any of the words:"),
'sele' : self.tmpl_is_selected('e', value),
'opte' : _("Exact phrase:"),
'selp' : self.tmpl_is_selected('p', value),
'optp' : _("Partial phrase:"),
'selr' : self.tmpl_is_selected('r', value),
'optr' : _("Regular expression:")
}
return out
def tmpl_is_selected(self, var, fld):
"""
Checks if *var* and *fld* are equal, and if yes, returns ' selected="selected"'. Useful for select boxes.
Parameters:
- 'var' *string* - First value to compare
- 'fld' *string* - Second value to compare
"""
if var == fld:
return ' selected="selected"'
else:
return ""
def tmpl_andornot_box(self, name='op', value='', ln='en'):
"""
Returns HTML code for the AND/OR/NOT selection box.
Parameters:
- 'name' *string* - The name of the produced select
- 'value' *string* - The selected value (if any value is already selected)
- 'ln' *string* - the language to display
"""
# load the right message language
_ = gettext_set_language(ln)
out = """
<select name="%(name)s">
<option value="a"%(sela)s>%(opta)s</option>
<option value="o"%(selo)s>%(opto)s</option>
<option value="n"%(seln)s>%(optn)s</option>
</select>
""" % {'name' : name,
'sela' : self.tmpl_is_selected('a', value), 'opta' : _("AND"),
'selo' : self.tmpl_is_selected('o', value), 'opto' : _("OR"),
'seln' : self.tmpl_is_selected('n', value), 'optn' : _("AND NOT")
}
return out
def tmpl_inputdate(self, name, ln, sy=0, sm=0, sd=0):
"""
Produces *From Date*, *Until Date* kind of selection box. Suitable for search options.
Parameters:
- 'name' *string* - The base name of the produced selects
- 'ln' *string* - the language to display
"""
# load the right message language
_ = gettext_set_language(ln)
box = """
<select name="%(name)sd">
<option value=""%(sel)s>%(any)s</option>
""" % {
'name' : name,
'any' : _("any day"),
'sel' : self.tmpl_is_selected(sd, 0)
}
for day in range(1, 32):
box += """<option value="%02d"%s>%02d</option>""" % (day, self.tmpl_is_selected(sd, day), day)
box += """</select>"""
# month
box += """
<select name="%(name)sm">
<option value=""%(sel)s>%(any)s</option>
""" % {
'name' : name,
'any' : _("any month"),
'sel' : self.tmpl_is_selected(sm, 0)
}
# trailing space in May distinguishes short/long form of the month name
for mm, month in [(1, _("January")), (2, _("February")), (3, _("March")), (4, _("April")), \
(5, _("May ")), (6, _("June")), (7, _("July")), (8, _("August")), \
(9, _("September")), (10, _("October")), (11, _("November")), (12, _("December"))]:
box += """<option value="%02d"%s>%s</option>""" % (mm, self.tmpl_is_selected(sm, mm), month.strip())
box += """</select>"""
# year
box += """
<select name="%(name)sy">
<option value=""%(sel)s>%(any)s</option>
""" % {
'name' : name,
'any' : _("any year"),
'sel' : self.tmpl_is_selected(sy, 0)
}
this_year = int(time.strftime("%Y", time.localtime()))
for year in range(this_year - 20, this_year + 1):
box += """<option value="%d"%s>%d</option>""" % (year, self.tmpl_is_selected(sy, year), year)
box += """</select>"""
return box
def tmpl_inputdatetype(self, dt='', ln=CFG_SITE_LANG):
"""
Produces input date type selection box to choose
added-or-modified date search option.
Parameters:
- 'dt' *string - date type (c=created, m=modified)
- 'ln' *string* - the language to display
"""
# load the right message language
_ = gettext_set_language(ln)
box = """<select name="dt">
<option value="">%(added)s </option>
<option value="m"%(sel)s>%(modified)s </option>
</select>
""" % { 'added': _("Added since:"),
'modified': _("Modified since:"),
'sel': self.tmpl_is_selected(dt, 'm'),
}
return box
def tmpl_narrowsearch(self, aas, ln, type, father,
has_grandchildren, sons, display_grandsons,
grandsons):
"""
Creates list of collection descendants of type *type* under title *title*.
If aas==1, then links to Advanced Search interfaces; otherwise Simple Search.
Suitable for 'Narrow search' and 'Focus on' boxes.
Parameters:
- 'aas' *bool* - Should we display an advanced search box?
- 'ln' *string* - The language to display
- 'type' *string* - The type of the produced box (virtual collections or normal collections)
- 'father' *collection* - The current collection
- 'has_grandchildren' *bool* - If the current collection has grand children
- 'sons' *list* - The list of the sub-collections (first level)
- 'display_grandsons' *bool* - If the grand children collections should be displayed (2 level deep display)
- 'grandsons' *list* - The list of sub-collections (second level)
"""
# load the right message language
_ = gettext_set_language(ln)
title = {'r': _("Narrow by collection:"),
'v': _("Focus on:")}[type]
if has_grandchildren:
style_prolog = "<strong>"
style_epilog = "</strong>"
else:
style_prolog = ""
style_epilog = ""
out = """<table class="%(narrowsearchbox)s">
<thead>
<tr>
<th colspan="2" align="left" class="%(narrowsearchbox)sheader">
%(title)s
</th>
</tr>
</thead>
<tbody>""" % {'title' : title,
'narrowsearchbox': {'r': 'narrowsearchbox',
'v': 'focusonsearchbox'}[type]}
# iterate through sons:
i = 0
for son in sons:
out += """<tr><td class="%(narrowsearchbox)sbody" valign="top">""" % \
{ 'narrowsearchbox': {'r': 'narrowsearchbox',
'v': 'focusonsearchbox'}[type]}
if type == 'r':
if son.restricted_p() and son.restricted_p() != father.restricted_p():
out += """<input type="checkbox" name="c" value="%(name)s" /></td>""" % {'name' : cgi.escape(son.name) }
# hosted collections are checked by default only when configured so
elif str(son.dbquery).startswith("hostedcollection:"):
external_collection_engine = get_external_collection_engine(str(son.name))
if external_collection_engine and external_collection_engine.selected_by_default:
out += """<input type="checkbox" name="c" value="%(name)s" checked="checked" /></td>""" % {'name' : cgi.escape(son.name) }
elif external_collection_engine and not external_collection_engine.selected_by_default:
out += """<input type="checkbox" name="c" value="%(name)s" /></td>""" % {'name' : cgi.escape(son.name) }
else:
# strangely, the external collection engine was never found. In that case,
# why was the hosted collection here in the first place?
out += """<input type="checkbox" name="c" value="%(name)s" /></td>""" % {'name' : cgi.escape(son.name) }
else:
out += """<input type="checkbox" name="c" value="%(name)s" checked="checked" /></td>""" % {'name' : cgi.escape(son.name) }
else:
out += '</td>'
out += """<td valign="top">%(link)s%(recs)s """ % {
'link': create_html_link(self.build_search_interface_url(c=son.name, ln=ln, aas=aas),
{}, style_prolog + cgi.escape(son.get_name(ln)) + style_epilog),
'recs' : self.tmpl_nbrecs_info(son.nbrecs, ln=ln)}
# the following prints the "external collection" arrow just after the name and
# number of records of the hosted collection
# 1) we might want to make the arrow work as an anchor to the hosted collection as well.
# That would probably require a new separate function under invenio.urlutils
# 2) we might want to place the arrow between the name and the number of records of the hosted collection
# That would require to edit/separate the above out += ...
if type == 'r':
if str(son.dbquery).startswith("hostedcollection:"):
out += """<img src="%(siteurl)s/img/external-icon-light-8x8.gif" border="0" alt="%(name)s"/>""" % \
{ 'siteurl' : CFG_SITE_URL, 'name' : cgi.escape(son.name), }
if son.restricted_p():
out += """ <small class="warning">[%(msg)s]</small> """ % { 'msg' : _("restricted") }
if display_grandsons and len(grandsons[i]):
# iterate trough grandsons:
out += """<br />"""
for grandson in grandsons[i]:
out += """ <small>%(link)s%(nbrec)s</small> """ % {
'link': create_html_link(self.build_search_interface_url(c=grandson.name, ln=ln, aas=aas),
{},
cgi.escape(grandson.get_name(ln))),
'nbrec' : self.tmpl_nbrecs_info(grandson.nbrecs, ln=ln)}
# the following prints the "external collection" arrow just after the name and
# number of records of the hosted collection
# Some relatives comments have been made just above
if type == 'r':
if str(grandson.dbquery).startswith("hostedcollection:"):
out += """<img src="%(siteurl)s/img/external-icon-light-8x8.gif" border="0" alt="%(name)s"/>""" % \
{ 'siteurl' : CFG_SITE_URL, 'name' : cgi.escape(grandson.name), }
out += """</td></tr>"""
i += 1
out += "</tbody></table>"
return out
def tmpl_searchalso(self, ln, engines_list, collection_id):
_ = gettext_set_language(ln)
box_name = _("Search also:")
html = """<table cellspacing="0" cellpadding="0" border="0">
<tr><td valign="top"><table class="searchalsosearchbox">
<thead><tr><th colspan="2" align="left" class="searchalsosearchboxheader">%(box_name)s
</th></tr></thead><tbody>
""" % locals()
for engine in engines_list:
internal_name = engine.name
name = _(internal_name)
base_url = engine.base_url
if external_collection_get_state(engine, collection_id) == 3:
checked = ' checked="checked"'
else:
checked = ''
html += """<tr><td class="searchalsosearchboxbody" valign="top">
<input type="checkbox" name="ec" id="%(id)s" value="%(internal_name)s" %(checked)s /></td>
<td valign="top" class="searchalsosearchboxbody">
<div style="white-space: nowrap"><label for="%(id)s">%(name)s</label>
<a href="%(base_url)s">
<img src="%(siteurl)s/img/external-icon-light-8x8.gif" border="0" alt="%(name)s"/></a>
</div></td></tr>""" % \
{ 'checked': checked,
'base_url': base_url,
'internal_name': internal_name,
'name': cgi.escape(name),
'id': "extSearch" + nmtoken_from_string(name),
'siteurl': CFG_SITE_URL, }
html += """</tbody></table></td></tr></table>"""
return html
def tmpl_nbrecs_info(self, number, prolog=None, epilog=None, ln=CFG_SITE_LANG):
"""
Return information on the number of records.
Parameters:
- 'number' *string* - The number of records
- 'prolog' *string* (optional) - An HTML code to prefix the number (if **None**, will be
'<small class="nbdoccoll">(')
- 'epilog' *string* (optional) - An HTML code to append to the number (if **None**, will be
')</small>')
"""
if number is None:
number = 0
if prolog is None:
prolog = ''' <small class="nbdoccoll">('''
if epilog is None:
epilog = ''')</small>'''
return prolog + self.tmpl_nice_number(number, ln) + epilog
def tmpl_box_restricted_content(self, ln):
"""
Displays a box containing a *restricted content* message
Parameters:
- 'ln' *string* - The language to display
"""
# load the right message language
_ = gettext_set_language(ln)
return _("This collection is restricted. If you are authorized to access it, please click on the Search button.")
def tmpl_box_hosted_collection(self, ln):
"""
Displays a box containing a *hosted collection* message
Parameters:
- 'ln' *string* - The language to display
"""
# load the right message language
_ = gettext_set_language(ln)
return _("This is a hosted external collection. Please click on the Search button to see its content.")
def tmpl_box_no_records(self, ln):
"""
Displays a box containing a *no content* message
Parameters:
- 'ln' *string* - The language to display
"""
# load the right message language
_ = gettext_set_language(ln)
return _("This collection does not contain any document yet.")
def tmpl_instant_browse(self, aas, ln, recids, more_link=None, grid_layout=False):
"""
Formats a list of records (given in the recids list) from the database.
Parameters:
- 'aas' *int* - Advanced Search interface or not (0 or 1)
- 'ln' *string* - The language to display
- 'recids' *list* - the list of records from the database
- 'more_link' *string* - the "More..." link for the record. If not given, will not be displayed
"""
# load the right message language
_ = gettext_set_language(ln)
body = '''<table class="latestadditionsbox">'''
if grid_layout:
body += '<tr><td><div>'
for recid in recids:
if grid_layout:
body += '''
<abbr class="unapi-id" title="%(recid)s"></abbr>
%(body)s
''' % {
'recid': recid['id'],
'body': recid['body']}
else:
body += '''
<tr>
<td class="latestadditionsboxtimebody">%(date)s</td>
<td class="latestadditionsboxrecordbody">
<abbr class="unapi-id" title="%(recid)s"></abbr>
%(body)s
</td>
</tr>''' % {
'recid': recid['id'],
'date': recid['date'],
'body': recid['body']
}
if grid_layout:
body += '''<div style="clear:both"></div>'''
body += '''</div></td></tr>'''
body += "</table>"
if more_link:
body += '<div align="right"><small>' + \
create_html_link(more_link, {}, '[>> %s]' % _("more")) + \
'</small></div>'
return '''
<table class="narrowsearchbox">
<thead>
<tr>
<th class="narrowsearchboxheader">%(header)s</th>
</tr>
</thead>
<tbody>
<tr>
<td class="narrowsearchboxbody">%(body)s</td>
</tr>
</tbody>
</table>''' % {'header' : _("Latest additions:"),
'body' : body,
}
def tmpl_searchwithin_select(self, ln, fieldname, selected, values):
"""
Produces 'search within' selection box for the current collection.
Parameters:
- 'ln' *string* - The language to display
- 'fieldname' *string* - the name of the select box produced
- 'selected' *string* - which of the values is selected
- 'values' *list* - the list of values in the select
"""
out = '<select name="%(fieldname)s">' % {'fieldname': fieldname}
if values:
for pair in values:
out += """<option value="%(value)s"%(selected)s>%(text)s</option>""" % {
'value' : cgi.escape(pair['value']),
'selected' : self.tmpl_is_selected(pair['value'], selected),
'text' : cgi.escape(pair['text'])
}
out += """</select>"""
return out
def tmpl_select(self, fieldname, values, selected=None, css_class=''):
"""
Produces a generic select box
Parameters:
- 'css_class' *string* - optional, a css class to display this select with
- 'fieldname' *list* - the name of the select box produced
- 'selected' *string* - which of the values is selected
- 'values' *list* - the list of values in the select
"""
if css_class != '':
class_field = ' class="%s"' % css_class
else:
class_field = ''
out = '<select name="%(fieldname)s"%(class)s>' % {
'fieldname' : fieldname,
'class' : class_field
}
for pair in values:
if pair.get('selected', False) or pair['value'] == selected:
flag = ' selected="selected"'
else:
flag = ''
out += '<option value="%(value)s"%(selected)s>%(text)s</option>' % {
'value' : cgi.escape(str(pair['value'])),
'selected' : flag,
'text' : cgi.escape(pair['text'])
}
out += """</select>"""
return out
def tmpl_record_links(self, recid, ln, sf='', so='d', sp='', rm=''):
"""
Displays the *More info* and *Find similar* links for a record
Parameters:
- 'ln' *string* - The language to display
- 'recid' *string* - the id of the displayed record
"""
# load the right message language
_ = gettext_set_language(ln)
out = '''<br /><span class="moreinfo">%(detailed)s - %(similar)s</span>''' % {
'detailed': create_html_link(self.build_search_url(recid=recid, ln=ln),
{},
_("Detailed record"), {'class': "moreinfo"}),
'similar': create_html_link(self.build_search_url(p="recid:%d" % recid, rm='wrd', ln=ln),
{},
_("Similar records"),
{'class': "moreinfo"})}
if CFG_BIBRANK_SHOW_CITATION_LINKS:
num_timescited = get_cited_by_count(recid)
if num_timescited:
out += '''<span class="moreinfo"> - %s </span>''' % \
create_html_link(self.build_search_url(p='refersto:recid:%d' % recid,
sf=sf,
so=so,
sp=sp,
rm=rm,
ln=ln),
{}, _("Cited by %i records") % num_timescited, {'class': "moreinfo"})
return out
def tmpl_record_body(self, titles, authors, dates, rns, abstracts, urls_u, urls_z, ln):
"""
Displays the "HTML basic" format of a record
Parameters:
- 'authors' *list* - the authors (as strings)
- 'dates' *list* - the dates of publication
- 'rns' *list* - the quicknotes for the record
- 'abstracts' *list* - the abstracts for the record
- 'urls_u' *list* - URLs to the original versions of the record
- 'urls_z' *list* - Not used
"""
out = ""
for title in titles:
out += "<strong>%(title)s</strong> " % {
'title' : cgi.escape(title)
}
if authors:
out += " / "
for author in authors[:CFG_WEBSEARCH_AUTHOR_ET_AL_THRESHOLD]:
out += '%s ' % \
create_html_link(self.build_search_url(p=author, f='author', ln=ln),
{}, cgi.escape(author))
if len(authors) > CFG_WEBSEARCH_AUTHOR_ET_AL_THRESHOLD:
out += "<em>et al</em>"
for date in dates:
out += " %s." % cgi.escape(date)
for rn in rns:
out += """ <small class="quicknote">[%(rn)s]</small>""" % {'rn' : cgi.escape(rn)}
for abstract in abstracts:
out += "<br /><small>%(abstract)s [...]</small>" % {'abstract' : cgi.escape(abstract[:1 + string.find(abstract, '.')]) }
for idx in range(0, len(urls_u)):
out += """<br /><small class="note"><a class="note" href="%(url)s">%(name)s</a></small>""" % {
'url' : urls_u[idx],
'name' : urls_u[idx]
}
return out
def tmpl_search_in_bibwords(self, p, f, ln, nearest_box):
"""
Displays the *Words like current ones* links for a search
Parameters:
- 'p' *string* - Current search words
- 'f' *string* - the fields in which the search was done
- 'nearest_box' *string* - the HTML code for the "nearest_terms" box - most probably from a create_nearest_terms_box call
"""
# load the right message language
_ = gettext_set_language(ln)
out = '<p>'
if f:
out += _("Words nearest to %(x_word)s inside %(x_field)s in any collection are:") % {'x_word': '<em>' + cgi.escape(p) + '</em>',
'x_field': '<em>' + cgi.escape(f) + '</em>'}
else:
out += _("Words nearest to %(x_word)s in any collection are:") % {'x_word': '<em>' + cgi.escape(p) + '</em>'}
out += '<br />' + nearest_box + '</p>'
return out
def tmpl_nearest_term_box(self, p, ln, f, terminfo, intro):
"""
Displays the *Nearest search terms* box
Parameters:
- 'p' *string* - Current search words
- 'f' *string* - a collection description (if the search has been completed in a collection)
- 'ln' *string* - The language to display
- 'terminfo': tuple (term, hits, argd) for each near term
- 'intro' *string* - the intro HTML to prefix the box with
"""
out = '''<table class="nearesttermsbox" cellpadding="0" cellspacing="0" border="0">'''
for term, hits, argd in terminfo:
if hits:
hitsinfo = str(hits)
else:
hitsinfo = '-'
argd['f'] = f
argd['p'] = term
term = cgi.escape(term)
# FIXME this is hack to get correct links to nearest terms
from flask import has_request_context, request
if has_request_context() and request.values.get('of', '') != argd.get('of', ''):
if 'of' in request.values:
argd['of'] = request.values.get('of')
else:
del argd['of']
if term == p: # print search word for orientation:
nearesttermsboxbody_class = "nearesttermsboxbodyselected"
if hits > 0:
term = create_html_link(self.build_search_url(argd), {},
term, {'class': "nearesttermsselected"})
else:
nearesttermsboxbody_class = "nearesttermsboxbody"
term = create_html_link(self.build_search_url(argd), {},
term, {'class': "nearestterms"})
out += '''\
<tr>
<td class="%(nearesttermsboxbody_class)s" align="right">%(hits)s</td>
<td class="%(nearesttermsboxbody_class)s" width="15"> </td>
<td class="%(nearesttermsboxbody_class)s" align="left">%(term)s</td>
</tr>
''' % {'hits': hitsinfo,
'nearesttermsboxbody_class': nearesttermsboxbody_class,
'term': term}
out += "</table>"
return intro + "<blockquote>" + out + "</blockquote>"
def tmpl_browse_pattern(self, f, fn, ln, browsed_phrases_in_colls, colls, rg):
"""
Displays the *Nearest search terms* box
Parameters:
- 'f' *string* - field (*not* i18nized)
- 'fn' *string* - field name (i18nized)
- 'ln' *string* - The language to display
- 'browsed_phrases_in_colls' *array* - the phrases to display
- 'colls' *array* - the list of collection parameters of the search (c's)
- 'rg' *int* - the number of records
"""
# load the right message language
_ = gettext_set_language(ln)
out = """<table class="searchresultsbox">
<thead>
<tr>
<th class="searchresultsboxheader" style="text-align: right;" width="15">
%(hits)s
</th>
<th class="searchresultsboxheader" width="15">
</th>
<th class="searchresultsboxheader" style="text-align: left;">
%(fn)s
</th>
</tr>
</thead>
<tbody>""" % {
'hits' : _("Hits"),
'fn' : cgi.escape(fn)
}
if len(browsed_phrases_in_colls) == 1:
# one hit only found:
phrase, nbhits = browsed_phrases_in_colls[0][0], browsed_phrases_in_colls[0][1]
query = {'c': colls,
'ln': ln,
'p': '"%s"' % phrase.replace('"', '\\"'),
'f': f,
'rg' : rg}
out += """<tr>
<td class="searchresultsboxbody" style="text-align: right;">
%(nbhits)s
</td>
<td class="searchresultsboxbody" width="15">
</td>
<td class="searchresultsboxbody" style="text-align: left;">
%(link)s
</td>
</tr>""" % {'nbhits': nbhits,
'link': create_html_link(self.build_search_url(query),
{}, cgi.escape(phrase))}
elif len(browsed_phrases_in_colls) > 1:
# first display what was found but the last one:
for phrase, nbhits in browsed_phrases_in_colls[:-1]:
query = {'c': colls,
'ln': ln,
'p': '"%s"' % phrase.replace('"', '\\"'),
'f': f,
'rg' : rg}
out += """<tr>
<td class="searchresultsboxbody" style="text-align: right;">
%(nbhits)s
</td>
<td class="searchresultsboxbody" width="15">
</td>
<td class="searchresultsboxbody" style="text-align: left;">
%(link)s
</td>
</tr>""" % {'nbhits' : nbhits,
'link': create_html_link(self.build_search_url(query),
{},
cgi.escape(phrase))}
# now display last hit as "previous term":
phrase, nbhits = browsed_phrases_in_colls[0]
query_previous = {'c': colls,
'ln': ln,
'p': '"%s"' % phrase.replace('"', '\\"'),
'f': f,
'rg' : rg}
# now display last hit as "next term":
phrase, nbhits = browsed_phrases_in_colls[-1]
query_next = {'c': colls,
'ln': ln,
'p': '"%s"' % phrase.replace('"', '\\"'),
'f': f,
'rg' : rg}
out += """<tr><td colspan="2" class="normal">
</td>
<td class="normal">
%(link_previous)s
<img src="%(siteurl)s/img/sp.gif" alt="" border="0" />
<img src="%(siteurl)s/img/sn.gif" alt="" border="0" />
%(link_next)s
</td>
</tr>""" % {'link_previous': create_html_link(self.build_search_url(query_previous, action='browse'), {}, _("Previous")),
'link_next': create_html_link(self.build_search_url(query_next, action='browse'),
{}, _("next")),
'siteurl' : CFG_SITE_URL}
out += """</tbody>
</table>"""
return out
def tmpl_search_box(self, ln, aas, cc, cc_intl, ot, sp,
action, fieldslist, f1, f2, f3, m1, m2, m3,
p1, p2, p3, op1, op2, rm, p, f, coll_selects,
d1y, d2y, d1m, d2m, d1d, d2d, dt, sort_fields,
sf, so, ranks, sc, rg, formats, of, pl, jrec, ec,
show_colls=True, show_title=True):
"""
Displays the *Nearest search terms* box
Parameters:
- 'ln' *string* - The language to display
- 'aas' *bool* - Should we display an advanced search box? -1 -> 1, from simpler to more advanced
- 'cc_intl' *string* - the i18nized current collection name, used for display
- 'cc' *string* - the internal current collection name
- 'ot', 'sp' *string* - hidden values
- 'action' *string* - the action demanded by the user
- 'fieldslist' *list* - the list of all fields available, for use in select within boxes in advanced search
- 'p, f, f1, f2, f3, m1, m2, m3, p1, p2, p3, op1, op2, op3, rm' *strings* - the search parameters
- 'coll_selects' *array* - a list of lists, each containing the collections selects to display
- 'd1y, d2y, d1m, d2m, d1d, d2d' *int* - the search between dates
- 'dt' *string* - the dates' types (creation dates, modification dates)
- 'sort_fields' *array* - the select information for the sort fields
- 'sf' *string* - the currently selected sort field
- 'so' *string* - the currently selected sort order ("a" or "d")
- 'ranks' *array* - ranking methods
- 'rm' *string* - selected ranking method
- 'sc' *string* - split by collection or not
- 'rg' *string* - selected results/page
- 'formats' *array* - available output formats
- 'of' *string* - the selected output format
- 'pl' *string* - `limit to' search pattern
- show_colls *bool* - propose coll selection box?
- show_title *bool* show cc_intl in page title?
"""
# load the right message language
_ = gettext_set_language(ln)
# These are hidden fields the user does not manipulate
# directly
if aas == -1:
argd = drop_default_urlargd({
'ln': ln, 'aas': aas,
'ot': ot, 'sp': sp, 'ec': ec,
}, self.search_results_default_urlargd)
else:
argd = drop_default_urlargd({
'cc': cc, 'ln': ln, 'aas': aas,
'ot': ot, 'sp': sp, 'ec': ec,
}, self.search_results_default_urlargd)
out = ""
if show_title:
# display cc name if asked for
out += '''
<h1 class="headline">%(ccname)s</h1>''' % {'ccname' : cgi.escape(cc_intl), }
out += '''
<form name="search" action="%(siteurl)s/search" method="get">
''' % {'siteurl' : CFG_SITE_URL}
# Only add non-default hidden values
for field, value in argd.items():
out += self.tmpl_input_hidden(field, value)
leadingtext = _("Search")
if action == 'browse':
leadingtext = _("Browse")
if aas == 1:
# print Advanced Search form:
# define search box elements:
out += '''
<table class="searchbox advancedsearch">
<thead>
<tr>
<th colspan="3" class="searchboxheader">
%(leading)s:
</th>
</tr>
</thead>
<tbody>
<tr valign="top" style="white-space:nowrap;">
<td class="searchboxbody">%(matchbox1)s
<input type="text" name="p1" size="%(sizepattern)d" value="%(p1)s" class="advancedsearchfield"/>
</td>
<td class="searchboxbody">%(searchwithin1)s</td>
<td class="searchboxbody">%(andornot1)s</td>
</tr>
<tr valign="top">
<td class="searchboxbody">%(matchbox2)s
<input type="text" name="p2" size="%(sizepattern)d" value="%(p2)s" class="advancedsearchfield"/>
</td>
<td class="searchboxbody">%(searchwithin2)s</td>
<td class="searchboxbody">%(andornot2)s</td>
</tr>
<tr valign="top">
<td class="searchboxbody">%(matchbox3)s
<input type="text" name="p3" size="%(sizepattern)d" value="%(p3)s" class="advancedsearchfield"/>
</td>
<td class="searchboxbody">%(searchwithin3)s</td>
<td class="searchboxbody" style="white-space:nowrap;">
<input class="formbutton" type="submit" name="action_search" value="%(search)s" />
<input class="formbutton" type="submit" name="action_browse" value="%(browse)s" />
</td>
</tr>
<tr valign="bottom">
<td colspan="3" align="right" class="searchboxbody">
<small>
<a href="%(siteurl)s/help/search-tips%(langlink)s">%(search_tips)s</a> ::
%(simple_search)s
</small>
</td>
</tr>
</tbody>
</table>
''' % {
'simple_search': create_html_link(self.build_search_url(p=p1, f=f1, rm=rm, cc=cc, ln=ln, jrec=jrec, rg=rg),
{}, _("Simple Search")),
'leading' : leadingtext,
'sizepattern' : CFG_WEBSEARCH_ADVANCEDSEARCH_PATTERN_BOX_WIDTH,
'matchbox1' : self.tmpl_matchtype_box('m1', m1, ln=ln),
'p1' : cgi.escape(p1, 1),
'searchwithin1' : self.tmpl_searchwithin_select(
ln=ln,
fieldname='f1',
selected=f1,
values=self._add_mark_to_field(value=f1, fields=fieldslist, ln=ln)
),
'andornot1' : self.tmpl_andornot_box(
name='op1',
value=op1,
ln=ln
),
'matchbox2' : self.tmpl_matchtype_box('m2', m2, ln=ln),
'p2' : cgi.escape(p2, 1),
'searchwithin2' : self.tmpl_searchwithin_select(
ln=ln,
fieldname='f2',
selected=f2,
values=self._add_mark_to_field(value=f2, fields=fieldslist, ln=ln)
),
'andornot2' : self.tmpl_andornot_box(
name='op2',
value=op2,
ln=ln
),
'matchbox3' : self.tmpl_matchtype_box('m3', m3, ln=ln),
'p3' : cgi.escape(p3, 1),
'searchwithin3' : self.tmpl_searchwithin_select(
ln=ln,
fieldname='f3',
selected=f3,
values=self._add_mark_to_field(value=f3, fields=fieldslist, ln=ln)
),
'search' : _("Search"),
'browse' : _("Browse"),
'siteurl' : CFG_SITE_URL,
'ln' : ln,
'langlink': ln != CFG_SITE_LANG and '?ln=' + ln or '',
'search_tips': _("Search Tips")
}
elif aas == 0:
# print Simple Search form:
out += '''
<table class="searchbox simplesearch">
<thead>
<tr>
<th colspan="3" class="searchboxheader">
%(leading)s:
</th>
</tr>
</thead>
<tbody>
<tr valign="top">
<td class="searchboxbody"><input type="text" name="p" size="%(sizepattern)d" value="%(p)s" class="simplesearchfield"/></td>
<td class="searchboxbody">%(searchwithin)s</td>
<td class="searchboxbody">
<input class="formbutton" type="submit" name="action_search" value="%(search)s" />
<input class="formbutton" type="submit" name="action_browse" value="%(browse)s" />
</td>
</tr>
<tr valign="bottom">
<td colspan="3" align="right" class="searchboxbody">
<small>
<a href="%(siteurl)s/help/search-tips%(langlink)s">%(search_tips)s</a> ::
%(advanced_search)s
</small>
</td>
</tr>
</tbody>
</table>
''' % {
'advanced_search': create_html_link(self.build_search_url(p1=p,
f1=f,
rm=rm,
aas=max(CFG_WEBSEARCH_ENABLED_SEARCH_INTERFACES),
cc=cc,
jrec=jrec,
ln=ln,
rg=rg),
{}, _("Advanced Search")),
'leading' : leadingtext,
'sizepattern' : CFG_WEBSEARCH_SIMPLESEARCH_PATTERN_BOX_WIDTH,
'p' : cgi.escape(p, 1),
'searchwithin' : self.tmpl_searchwithin_select(
ln=ln,
fieldname='f',
selected=f,
values=self._add_mark_to_field(value=f, fields=fieldslist, ln=ln)
),
'search' : _("Search"),
'browse' : _("Browse"),
'siteurl' : CFG_SITE_URL,
'ln' : ln,
'langlink': ln != CFG_SITE_LANG and '?ln=' + ln or '',
'search_tips': _("Search Tips")
}
else:
# EXPERIMENTAL
# print light search form:
search_in = ''
if cc_intl != CFG_SITE_NAME_INTL.get(ln, CFG_SITE_NAME):
search_in = '''
<input type="radio" name="cc" value="%(collection_id)s" id="searchCollection" checked="checked"/>
<label for="searchCollection">%(search_in_collection_name)s</label>
<input type="radio" name="cc" value="%(root_collection_name)s" id="searchEverywhere" />
<label for="searchEverywhere">%(search_everywhere)s</label>
''' % {'search_in_collection_name': _("Search in %(x_collection_name)s") % \
{'x_collection_name': cgi.escape(cc_intl)},
'collection_id': cc,
'root_collection_name': CFG_SITE_NAME,
'search_everywhere': _("Search everywhere")}
out += '''
<table class="searchbox lightsearch">
<tr valign="top">
<td class="searchboxbody"><input type="text" name="p" size="%(sizepattern)d" value="%(p)s" class="lightsearchfield"/></td>
<td class="searchboxbody">
<input class="formbutton" type="submit" name="action_search" value="%(search)s" />
</td>
<td class="searchboxbody" align="left" rowspan="2" valign="top">
<small><small>
<a href="%(siteurl)s/help/search-tips%(langlink)s">%(search_tips)s</a><br/>
%(advanced_search)s
</td>
</tr>
</table>
<small>%(search_in)s</small>
''' % {
'sizepattern' : CFG_WEBSEARCH_LIGHTSEARCH_PATTERN_BOX_WIDTH,
'advanced_search': create_html_link(self.build_search_url(p1=p,
f1=f,
rm=rm,
aas=max(CFG_WEBSEARCH_ENABLED_SEARCH_INTERFACES),
cc=cc,
jrec=jrec,
ln=ln,
rg=rg),
{}, _("Advanced Search")),
'leading' : leadingtext,
'p' : cgi.escape(p, 1),
'searchwithin' : self.tmpl_searchwithin_select(
ln=ln,
fieldname='f',
selected=f,
values=self._add_mark_to_field(value=f, fields=fieldslist, ln=ln)
),
'search' : _("Search"),
'browse' : _("Browse"),
'siteurl' : CFG_SITE_URL,
'ln' : ln,
'langlink': ln != CFG_SITE_LANG and '?ln=' + ln or '',
'search_tips': _("Search Tips"),
'search_in': search_in
}
## secondly, print Collection(s) box:
if show_colls and aas > -1:
# display collections only if there is more than one
selects = ''
for sel in coll_selects:
selects += self.tmpl_select(fieldname='c', values=sel)
out += """
<table class="searchbox">
<thead>
<tr>
<th colspan="3" class="searchboxheader">
%(leading)s %(msg_coll)s:
</th>
</tr>
</thead>
<tbody>
<tr valign="bottom">
<td valign="top" class="searchboxbody">
%(colls)s
</td>
</tr>
</tbody>
</table>
""" % {
'leading' : leadingtext,
'msg_coll' : _("collections"),
'colls' : selects,
}
## thirdly, print search limits, if applicable:
if action != _("Browse") and pl:
out += """<table class="searchbox">
<thead>
<tr>
<th class="searchboxheader">
%(limitto)s
</th>
</tr>
</thead>
<tbody>
<tr valign="bottom">
<td class="searchboxbody">
<input type="text" name="pl" size="%(sizepattern)d" value="%(pl)s" />
</td>
</tr>
</tbody>
</table>""" % {
'limitto' : _("Limit to:"),
'sizepattern' : CFG_WEBSEARCH_ADVANCEDSEARCH_PATTERN_BOX_WIDTH,
'pl' : cgi.escape(pl, 1),
}
## fourthly, print from/until date boxen, if applicable:
if action == _("Browse") or (d1y == 0 and d1m == 0 and d1d == 0 and d2y == 0 and d2m == 0 and d2d == 0):
pass # do not need it
else:
cell_6_a = self.tmpl_inputdatetype(dt, ln) + self.tmpl_inputdate("d1", ln, d1y, d1m, d1d)
cell_6_b = self.tmpl_inputdate("d2", ln, d2y, d2m, d2d)
out += """<table class="searchbox">
<thead>
<tr>
<th class="searchboxheader">
%(added)s
</th>
<th class="searchboxheader">
%(until)s
</th>
</tr>
</thead>
<tbody>
<tr valign="bottom">
<td class="searchboxbody">%(added_or_modified)s %(date1)s</td>
<td class="searchboxbody">%(date2)s</td>
</tr>
</tbody>
</table>""" % {
'added' : _("Added/modified since:"),
'until' : _("until:"),
'added_or_modified': self.tmpl_inputdatetype(dt, ln),
'date1' : self.tmpl_inputdate("d1", ln, d1y, d1m, d1d),
'date2' : self.tmpl_inputdate("d2", ln, d2y, d2m, d2d),
}
## fifthly, print Display results box, including sort/rank, formats, etc:
if action != _("Browse") and aas > -1:
rgs = []
for i in [10, 25, 50, 100, 250, 500]:
if i <= CFG_WEBSEARCH_MAX_RECORDS_IN_GROUPS:
rgs.append({ 'value' : i, 'text' : "%d %s" % (i, _("results"))})
# enrich sort fields list if we are sorting by some MARC tag:
sort_fields = self._add_mark_to_field(value=sf, fields=sort_fields, ln=ln)
# create sort by HTML box:
out += """<table class="searchbox">
<thead>
<tr>
<th class="searchboxheader">
%(sort_by)s
</th>
<th class="searchboxheader">
%(display_res)s
</th>
<th class="searchboxheader">
%(out_format)s
</th>
</tr>
</thead>
<tbody>
<tr valign="bottom">
<td class="searchboxbody">
%(select_sf)s %(select_so)s %(select_rm)s
</td>
<td class="searchboxbody">
%(select_rg)s %(select_sc)s
</td>
<td class="searchboxbody">%(select_of)s</td>
</tr>
</tbody>
</table>""" % {
'sort_by' : _("Sort by:"),
'display_res' : _("Display results:"),
'out_format' : _("Output format:"),
'select_sf' : self.tmpl_select(fieldname='sf', values=sort_fields, selected=sf, css_class='address'),
'select_so' : self.tmpl_select(fieldname='so', values=[{
'value' : 'a',
'text' : _("asc.")
}, {
'value' : 'd',
'text' : _("desc.")
}], selected=so, css_class='address'),
'select_rm' : self.tmpl_select(fieldname='rm', values=ranks, selected=rm, css_class='address'),
'select_rg' : self.tmpl_select(fieldname='rg', values=rgs, selected=rg, css_class='address'),
'select_sc' : self.tmpl_select(fieldname='sc', values=[{
'value' : 0,
'text' : _("single list")
}, {
'value' : 1,
'text' : _("split by collection")
}], selected=sc, css_class='address'),
'select_of' : self.tmpl_select(
fieldname='of',
selected=of,
values=self._add_mark_to_field(value=of, fields=formats, chars=3, ln=ln),
css_class='address'),
}
## last but not least, print end of search box:
out += """</form>"""
return out
def tmpl_input_hidden(self, name, value):
"Produces the HTML code for a hidden field "
if isinstance(value, list):
list_input = [self.tmpl_input_hidden(name, val) for val in value]
return "\n".join(list_input)
# # Treat `as', `aas' arguments specially:
if name == 'aas':
name = 'as'
return """<input type="hidden" name="%(name)s" value="%(value)s" />""" % {
'name' : cgi.escape(str(name), 1),
'value' : cgi.escape(str(value), 1),
}
def _add_mark_to_field(self, value, fields, ln, chars=1):
"""Adds the current value as a MARC tag in the fields array
Useful for advanced search"""
# load the right message language
_ = gettext_set_language(ln)
out = fields
if value and str(value[0:chars]).isdigit():
out.append({'value' : value,
'text' : str(value) + " " + _("MARC tag")
})
return out
def tmpl_search_pagestart(self, ln) :
"page start for search page. Will display after the page header"
return """<div class="pagebody"><div class="pagebodystripemiddle">"""
def tmpl_search_pageend(self, ln) :
"page end for search page. Will display just before the page footer"
return """</div></div>"""
def tmpl_print_search_info(self, ln, middle_only,
collection, collection_name, collection_id,
aas, sf, so, rm, rg, nb_found, of, ot, p, f, f1,
f2, f3, m1, m2, m3, op1, op2, p1, p2,
p3, d1y, d1m, d1d, d2y, d2m, d2d, dt,
all_fieldcodes, cpu_time, pl_in_url,
jrec, sc, sp):
"""Prints stripe with the information on 'collection' and 'nb_found' results and CPU time.
Also, prints navigation links (beg/next/prev/end) inside the results set.
If middle_only is set to 1, it will only print the middle box information (beg/netx/prev/end/etc) links.
This is suitable for displaying navigation links at the bottom of the search results page.
Parameters:
- 'ln' *string* - The language to display
- 'middle_only' *bool* - Only display parts of the interface
- 'collection' *string* - the collection name
- 'collection_name' *string* - the i18nized current collection name
- 'aas' *bool* - if we display the advanced search interface
- 'sf' *string* - the currently selected sort format
- 'so' *string* - the currently selected sort order ("a" or "d")
- 'rm' *string* - selected ranking method
- 'rg' *int* - selected results/page
- 'nb_found' *int* - number of results found
- 'of' *string* - the selected output format
- 'ot' *string* - hidden values
- 'p' *string* - Current search words
- 'f' *string* - the fields in which the search was done
- 'f1, f2, f3, m1, m2, m3, p1, p2, p3, op1, op2' *strings* - the search parameters
- 'jrec' *int* - number of first record on this page
- 'd1y, d2y, d1m, d2m, d1d, d2d' *int* - the search between dates
- 'dt' *string* the dates' type (creation date, modification date)
- 'all_fieldcodes' *array* - all the available fields
- 'cpu_time' *float* - the time of the query in seconds
"""
# load the right message language
_ = gettext_set_language(ln)
out = ""
# left table cells: print collection name
if not middle_only:
out += '''
<a name="%(collection_id)s"></a>
<form action="%(siteurl)s/search" method="get">
<table class="searchresultsbox"><tr><td class="searchresultsboxheader" align="left">
<strong><big>%(collection_link)s</big></strong></td>
''' % {
'collection_id': collection_id,
'siteurl' : CFG_SITE_URL,
'collection_link': create_html_link(self.build_search_interface_url(c=collection, aas=aas, ln=ln),
{}, cgi.escape(collection_name))
}
else:
out += """
<div style="clear:both"></div>
<form action="%(siteurl)s/search" method="get"><div align="center">
""" % { 'siteurl' : CFG_SITE_URL }
# middle table cell: print beg/next/prev/end arrows:
if not middle_only:
out += """<td class="searchresultsboxheader" align="center">
%(recs_found)s """ % {
'recs_found' : _("%s records found") % ('<strong>' + self.tmpl_nice_number(nb_found, ln) + '</strong>')
}
else:
out += "<small>"
if nb_found > rg:
out += "" + cgi.escape(collection_name) + " : " + _("%s records found") % ('<strong>' + self.tmpl_nice_number(nb_found, ln) + '</strong>') + " "
if nb_found > rg: # navig.arrows are needed, since we have many hits
query = {'p': p, 'f': f,
'cc': collection,
'sf': sf, 'so': so,
'sp': sp, 'rm': rm,
'of': of, 'ot': ot,
'aas': aas, 'ln': ln,
'p1': p1, 'p2': p2, 'p3': p3,
'f1': f1, 'f2': f2, 'f3': f3,
'm1': m1, 'm2': m2, 'm3': m3,
'op1': op1, 'op2': op2,
'sc': 0,
'd1y': d1y, 'd1m': d1m, 'd1d': d1d,
'd2y': d2y, 'd2m': d2m, 'd2d': d2d,
'dt': dt,
}
# @todo here
def img(gif, txt):
return '<img src="%(siteurl)s/img/%(gif)s.gif" alt="%(txt)s" border="0" />' % {
'txt': txt, 'gif': gif, 'siteurl': CFG_SITE_URL}
if jrec - rg > 1:
out += create_html_link(self.build_search_url(query, jrec=1, rg=rg),
{}, img('sb', _("begin")),
{'class': 'img'})
if jrec > 1:
out += create_html_link(self.build_search_url(query, jrec=max(jrec - rg, 1), rg=rg),
{}, img('sp', _("previous")),
{'class': 'img'})
if jrec + rg - 1 < nb_found:
out += "%d - %d" % (jrec, jrec + rg - 1)
else:
out += "%d - %d" % (jrec, nb_found)
if nb_found >= jrec + rg:
out += create_html_link(self.build_search_url(query,
jrec=jrec + rg,
rg=rg),
{}, img('sn', _("next")),
{'class':'img'})
if nb_found >= jrec + rg + rg:
out += create_html_link(self.build_search_url(query,
jrec=nb_found - rg + 1,
rg=rg),
{}, img('se', _("end")),
{'class': 'img'})
# still in the navigation part
cc = collection
sc = 0
for var in ['p', 'cc', 'f', 'sf', 'so', 'of', 'rg', 'aas', 'ln', 'p1', 'p2', 'p3', 'f1', 'f2', 'f3', 'm1', 'm2', 'm3', 'op1', 'op2', 'sc', 'd1y', 'd1m', 'd1d', 'd2y', 'd2m', 'd2d', 'dt']:
out += self.tmpl_input_hidden(name=var, value=vars()[var])
for var in ['ot', 'sp', 'rm']:
if vars()[var]:
out += self.tmpl_input_hidden(name=var, value=vars()[var])
if pl_in_url:
fieldargs = cgi.parse_qs(pl_in_url)
for fieldcode in all_fieldcodes:
# get_fieldcodes():
if fieldargs.has_key(fieldcode):
for val in fieldargs[fieldcode]:
out += self.tmpl_input_hidden(name=fieldcode, value=val)
out += """ %(jump)s <input type="text" name="jrec" size="4" value="%(jrec)d" />""" % {
'jump' : _("jump to record:"),
'jrec' : jrec,
}
if not middle_only:
out += "</td>"
else:
out += "</small>"
# right table cell: cpu time info
if not middle_only:
if cpu_time > -1:
out += """<td class="searchresultsboxheader" align="right"><small>%(time)s</small> </td>""" % {
'time' : _("Search took %s seconds.") % ('%.2f' % cpu_time),
}
out += "</tr></table>"
else:
out += "</div>"
out += "</form>"
return out
def tmpl_print_hosted_search_info(self, ln, middle_only,
collection, collection_name, collection_id,
aas, sf, so, rm, rg, nb_found, of, ot, p, f, f1,
f2, f3, m1, m2, m3, op1, op2, p1, p2,
p3, d1y, d1m, d1d, d2y, d2m, d2d, dt,
all_fieldcodes, cpu_time, pl_in_url,
jrec, sc, sp):
"""Prints stripe with the information on 'collection' and 'nb_found' results and CPU time.
Also, prints navigation links (beg/next/prev/end) inside the results set.
If middle_only is set to 1, it will only print the middle box information (beg/netx/prev/end/etc) links.
This is suitable for displaying navigation links at the bottom of the search results page.
Parameters:
- 'ln' *string* - The language to display
- 'middle_only' *bool* - Only display parts of the interface
- 'collection' *string* - the collection name
- 'collection_name' *string* - the i18nized current collection name
- 'aas' *bool* - if we display the advanced search interface
- 'sf' *string* - the currently selected sort format
- 'so' *string* - the currently selected sort order ("a" or "d")
- 'rm' *string* - selected ranking method
- 'rg' *int* - selected results/page
- 'nb_found' *int* - number of results found
- 'of' *string* - the selected output format
- 'ot' *string* - hidden values
- 'p' *string* - Current search words
- 'f' *string* - the fields in which the search was done
- 'f1, f2, f3, m1, m2, m3, p1, p2, p3, op1, op2' *strings* - the search parameters
- 'jrec' *int* - number of first record on this page
- 'd1y, d2y, d1m, d2m, d1d, d2d' *int* - the search between dates
- 'dt' *string* the dates' type (creation date, modification date)
- 'all_fieldcodes' *array* - all the available fields
- 'cpu_time' *float* - the time of the query in seconds
"""
# load the right message language
_ = gettext_set_language(ln)
out = ""
# left table cells: print collection name
if not middle_only:
out += '''
<a name="%(collection_id)s"></a>
<form action="%(siteurl)s/search" method="get">
<table class="searchresultsbox"><tr><td class="searchresultsboxheader" align="left">
<strong><big>%(collection_link)s</big></strong></td>
''' % {
'collection_id': collection_id,
'siteurl' : CFG_SITE_URL,
'collection_link': create_html_link(self.build_search_interface_url(c=collection, aas=aas, ln=ln),
{}, cgi.escape(collection_name))
}
else:
out += """
<form action="%(siteurl)s/search" method="get"><div align="center">
""" % { 'siteurl' : CFG_SITE_URL }
# middle table cell: print beg/next/prev/end arrows:
if not middle_only:
# in case we have a hosted collection that timed out do not print its number of records, as it is yet unknown
if nb_found != -963:
out += """<td class="searchresultsboxheader" align="center">
%(recs_found)s """ % {
'recs_found' : _("%s records found") % ('<strong>' + self.tmpl_nice_number(nb_found, ln) + '</strong>')
}
#elif nb_found = -963:
# out += """<td class="searchresultsboxheader" align="center">
# %(recs_found)s """ % {
# 'recs_found' : _("%s records found") % ('<strong>' + self.tmpl_nice_number(nb_found, ln) + '</strong>')
# }
else:
out += "<small>"
# we do not care about timed out hosted collections here, because the bumber of records found will never be bigger
# than rg anyway, since it's negative
if nb_found > rg:
out += "" + cgi.escape(collection_name) + " : " + _("%s records found") % ('<strong>' + self.tmpl_nice_number(nb_found, ln) + '</strong>') + " "
if nb_found > rg: # navig.arrows are needed, since we have many hits
query = {'p': p, 'f': f,
'cc': collection,
'sf': sf, 'so': so,
'sp': sp, 'rm': rm,
'of': of, 'ot': ot,
'aas': aas, 'ln': ln,
'p1': p1, 'p2': p2, 'p3': p3,
'f1': f1, 'f2': f2, 'f3': f3,
'm1': m1, 'm2': m2, 'm3': m3,
'op1': op1, 'op2': op2,
'sc': 0,
'd1y': d1y, 'd1m': d1m, 'd1d': d1d,
'd2y': d2y, 'd2m': d2m, 'd2d': d2d,
'dt': dt,
}
# @todo here
def img(gif, txt):
return '<img src="%(siteurl)s/img/%(gif)s.gif" alt="%(txt)s" border="0" />' % {
'txt': txt, 'gif': gif, 'siteurl': CFG_SITE_URL}
if jrec - rg > 1:
out += create_html_link(self.build_search_url(query, jrec=1, rg=rg),
{}, img('sb', _("begin")),
{'class': 'img'})
if jrec > 1:
out += create_html_link(self.build_search_url(query, jrec=max(jrec - rg, 1), rg=rg),
{}, img('sp', _("previous")),
{'class': 'img'})
if jrec + rg - 1 < nb_found:
out += "%d - %d" % (jrec, jrec + rg - 1)
else:
out += "%d - %d" % (jrec, nb_found)
if nb_found >= jrec + rg:
out += create_html_link(self.build_search_url(query,
jrec=jrec + rg,
rg=rg),
{}, img('sn', _("next")),
{'class':'img'})
if nb_found >= jrec + rg + rg:
out += create_html_link(self.build_search_url(query,
jrec=nb_found - rg + 1,
rg=rg),
{}, img('se', _("end")),
{'class': 'img'})
# still in the navigation part
cc = collection
sc = 0
for var in ['p', 'cc', 'f', 'sf', 'so', 'of', 'rg', 'aas', 'ln', 'p1', 'p2', 'p3', 'f1', 'f2', 'f3', 'm1', 'm2', 'm3', 'op1', 'op2', 'sc', 'd1y', 'd1m', 'd1d', 'd2y', 'd2m', 'd2d', 'dt']:
out += self.tmpl_input_hidden(name=var, value=vars()[var])
for var in ['ot', 'sp', 'rm']:
if vars()[var]:
out += self.tmpl_input_hidden(name=var, value=vars()[var])
if pl_in_url:
fieldargs = cgi.parse_qs(pl_in_url)
for fieldcode in all_fieldcodes:
# get_fieldcodes():
if fieldargs.has_key(fieldcode):
for val in fieldargs[fieldcode]:
out += self.tmpl_input_hidden(name=fieldcode, value=val)
out += """ %(jump)s <input type="text" name="jrec" size="4" value="%(jrec)d" />""" % {
'jump' : _("jump to record:"),
'jrec' : jrec,
}
if not middle_only:
out += "</td>"
else:
out += "</small>"
# right table cell: cpu time info
if not middle_only:
if cpu_time > -1:
out += """<td class="searchresultsboxheader" align="right"><small>%(time)s</small> </td>""" % {
'time' : _("Search took %s seconds.") % ('%.2f' % cpu_time),
}
out += "</tr></table>"
else:
out += "</div>"
out += "</form>"
return out
def tmpl_nice_number(self, number, ln=CFG_SITE_LANG, thousands_separator=',', max_ndigits_after_dot=None):
"""
Return nicely printed number NUMBER in language LN using
given THOUSANDS_SEPARATOR character.
If max_ndigits_after_dot is specified and the number is float, the
number is rounded by taking in consideration up to max_ndigits_after_dot
digit after the dot.
This version does not pay attention to locale. See
tmpl_nice_number_via_locale().
"""
if type(number) is float:
if max_ndigits_after_dot is not None:
number = round(number, max_ndigits_after_dot)
int_part, frac_part = str(number).split('.')
return '%s.%s' % (self.tmpl_nice_number(int(int_part), ln, thousands_separator), frac_part)
else:
chars_in = list(str(number))
number = len(chars_in)
chars_out = []
for i in range(0, number):
if i % 3 == 0 and i != 0:
chars_out.append(thousands_separator)
chars_out.append(chars_in[number - i - 1])
chars_out.reverse()
return ''.join(chars_out)
def tmpl_nice_number_via_locale(self, number, ln=CFG_SITE_LANG):
"""
Return nicely printed number NUM in language LN using the locale.
See also version tmpl_nice_number().
"""
if number is None:
return None
# Temporarily switch the numeric locale to the requested one, and format the number
# In case the system has no locale definition, use the vanilla form
ol = locale.getlocale(locale.LC_NUMERIC)
try:
locale.setlocale(locale.LC_NUMERIC, self.tmpl_localemap.get(ln, self.tmpl_default_locale))
except locale.Error:
return str(number)
try:
number = locale.format('%d', number, True)
except TypeError:
return str(number)
locale.setlocale(locale.LC_NUMERIC, ol)
return number
def tmpl_record_format_htmlbrief_header(self, ln):
"""Returns the header of the search results list when output
is html brief. Note that this function is called for each collection
results when 'split by collection' is enabled.
See also: tmpl_record_format_htmlbrief_footer,
tmpl_record_format_htmlbrief_body
Parameters:
- 'ln' *string* - The language to display
"""
# load the right message language
_ = gettext_set_language(ln)
out = """
<form action="%(siteurl)s/yourbaskets/add" method="post">
<table>
""" % {
'siteurl' : CFG_SITE_URL,
}
return out
def tmpl_record_format_htmlbrief_footer(self, ln, display_add_to_basket=True):
"""Returns the footer of the search results list when output
is html brief. Note that this function is called for each collection
results when 'split by collection' is enabled.
See also: tmpl_record_format_htmlbrief_header(..),
tmpl_record_format_htmlbrief_body(..)
Parameters:
- 'ln' *string* - The language to display
- 'display_add_to_basket' *bool* - whether to display Add-to-basket button
"""
# load the right message language
_ = gettext_set_language(ln)
out = """</table>
<br />
<input type="hidden" name="colid" value="0" />
%(add_to_basket)s
</form>""" % {
'add_to_basket': display_add_to_basket and """<input class="formbutton" type="submit" name="action" value="%s" />""" % _("Add to basket") or "",
}
return out
def tmpl_record_format_htmlbrief_body(self, ln, recid,
row_number, relevance,
record, relevances_prologue,
relevances_epilogue,
display_add_to_basket=True):
"""Returns the html brief format of one record. Used in the
search results list for each record.
See also: tmpl_record_format_htmlbrief_header(..),
tmpl_record_format_htmlbrief_footer(..)
Parameters:
- 'ln' *string* - The language to display
- 'row_number' *int* - The position of this record in the list
- 'recid' *int* - The recID
- 'relevance' *string* - The relevance of the record
- 'record' *string* - The formatted record
- 'relevances_prologue' *string* - HTML code to prepend the relevance indicator
- 'relevances_epilogue' *string* - HTML code to append to the relevance indicator (used mostly for formatting)
"""
# load the right message language
_ = gettext_set_language(ln)
checkbox_for_baskets = """<input name="recid" type="checkbox" value="%(recid)s" />""" % \
{'recid': recid, }
if not display_add_to_basket:
checkbox_for_baskets = ''
out = """
<tr><td valign="top" align="right" style="white-space: nowrap;">
%(checkbox_for_baskets)s
<abbr class="unapi-id" title="%(recid)s"></abbr>
%(number)s.
""" % {'recid': recid,
'number': row_number,
'checkbox_for_baskets': checkbox_for_baskets}
if relevance:
out += """<br /><div class="rankscoreinfo"><a title="rank score">%(prologue)s%(relevance)s%(epilogue)s</a></div>""" % {
'prologue' : relevances_prologue,
'epilogue' : relevances_epilogue,
'relevance' : relevance
}
out += """</td><td valign="top">%s</td></tr>""" % record
return out
def tmpl_print_results_overview(self, ln, results_final_nb_total, cpu_time, results_final_nb, colls, ec, hosted_colls_potential_results_p=False):
"""Prints results overview box with links to particular collections below.
Parameters:
- 'ln' *string* - The language to display
- 'results_final_nb_total' *int* - The total number of hits for the query
- 'colls' *array* - The collections with hits, in the format:
- 'coll[code]' *string* - The code of the collection (canonical name)
- 'coll[name]' *string* - The display name of the collection
- 'results_final_nb' *array* - The number of hits, indexed by the collection codes:
- 'cpu_time' *string* - The time the query took
- 'url_args' *string* - The rest of the search query
- 'ec' *array* - selected external collections
- 'hosted_colls_potential_results_p' *boolean* - check if there are any hosted collections searches
that timed out during the pre-search
"""
if len(colls) == 1 and not ec:
# if one collection only and no external collections, print nothing:
return ""
# load the right message language
_ = gettext_set_language(ln)
# first find total number of hits:
# if there were no hosted collections that timed out during the pre-search print out the exact number of records found
if not hosted_colls_potential_results_p:
out = """<table class="searchresultsbox">
<thead><tr><th class="searchresultsboxheader">%(founds)s</th></tr></thead>
<tbody><tr><td class="searchresultsboxbody"> """ % {
'founds' : _("%(x_fmt_open)sResults overview:%(x_fmt_close)s Found %(x_nb_records)s records in %(x_nb_seconds)s seconds.") % \
{'x_fmt_open': '<strong>',
'x_fmt_close': '</strong>',
'x_nb_records': '<strong>' + self.tmpl_nice_number(results_final_nb_total, ln) + '</strong>',
'x_nb_seconds': '%.2f' % cpu_time}
}
# if there were (only) hosted_collections that timed out during the pre-search print out a fuzzier message
else:
if results_final_nb_total == 0:
out = """<table class="searchresultsbox">
<thead><tr><th class="searchresultsboxheader">%(founds)s</th></tr></thead>
<tbody><tr><td class="searchresultsboxbody"> """ % {
'founds' : _("%(x_fmt_open)sResults overview%(x_fmt_close)s") % \
{'x_fmt_open': '<strong>',
'x_fmt_close': '</strong>'}
}
elif results_final_nb_total > 0:
out = """<table class="searchresultsbox">
<thead><tr><th class="searchresultsboxheader">%(founds)s</th></tr></thead>
<tbody><tr><td class="searchresultsboxbody"> """ % {
'founds' : _("%(x_fmt_open)sResults overview:%(x_fmt_close)s Found at least %(x_nb_records)s records in %(x_nb_seconds)s seconds.") % \
{'x_fmt_open': '<strong>',
'x_fmt_close': '</strong>',
'x_nb_records': '<strong>' + self.tmpl_nice_number(results_final_nb_total, ln) + '</strong>',
'x_nb_seconds': '%.2f' % cpu_time}
}
# then print hits per collection:
out += """<script type="text/javascript">
$(document).ready(function() {
$('a.morecolls').click(function() {
$('.morecollslist').show();
$(this).hide();
$('.lesscolls').show();
return false;
});
$('a.lesscolls').click(function() {
$('.morecollslist').hide();
$(this).hide();
$('.morecolls').show();
return false;
});
});
</script>"""
count = 0
for coll in colls:
if results_final_nb.has_key(coll['code']) and results_final_nb[coll['code']] > 0:
count += 1
out += """
<span %(collclass)s><strong><a href="#%(coll)s">%(coll_name)s</a></strong>, <a href="#%(coll)s">%(number)s</a><br /></span>""" % \
{'collclass' : count > CFG_WEBSEARCH_RESULTS_OVERVIEW_MAX_COLLS_TO_PRINT and 'class="morecollslist" style="display:none"' or '',
'coll' : coll['id'],
'coll_name' : cgi.escape(coll['name']),
'number' : _("%s records found") % \
('<strong>' + self.tmpl_nice_number(results_final_nb[coll['code']], ln) + '</strong>')}
# the following is used for hosted collections that have timed out,
# i.e. for which we don't know the exact number of results yet.
elif results_final_nb.has_key(coll['code']) and results_final_nb[coll['code']] == -963:
count += 1
out += """
<span %(collclass)s><strong><a href="#%(coll)s">%(coll_name)s</a></strong><br /></span>""" % \
{'collclass' : count > CFG_WEBSEARCH_RESULTS_OVERVIEW_MAX_COLLS_TO_PRINT and 'class="morecollslist" style="display:none"' or '',
'coll' : coll['id'],
'coll_name' : cgi.escape(coll['name']),
'number' : _("%s records found") % \
('<strong>' + self.tmpl_nice_number(results_final_nb[coll['code']], ln) + '</strong>')}
if count > CFG_WEBSEARCH_RESULTS_OVERVIEW_MAX_COLLS_TO_PRINT:
out += """<a class="lesscolls" style="display:none; color:red; font-size:small" href="#"><i>%s</i></a>""" % _("Show less collections")
out += """<a class="morecolls" style="color:red; font-size:small" href="#"><i>%s</i></a>""" % _("Show all collections")
out += "</td></tr></tbody></table>"
return out
def tmpl_print_hosted_results(self, url_and_engine, ln, of=None, req=None, limit=CFG_EXTERNAL_COLLECTION_MAXRESULTS, display_body=True, display_add_to_basket = True):
"""Print results of a given search engine.
"""
if display_body:
_ = gettext_set_language(ln)
#url = url_and_engine[0]
engine = url_and_engine[1]
#name = _(engine.name)
db_id = get_collection_id(engine.name)
#base_url = engine.base_url
out = ""
results = engine.parser.parse_and_get_results(None, of=of, req=req, limit=limit, parseonly=True)
if len(results) != 0:
if of == 'hb':
out += """
<form action="%(siteurl)s/yourbaskets/add" method="post">
<input type="hidden" name="colid" value="%(col_db_id)s" />
<table>
""" % {
'siteurl' : CFG_SITE_URL,
'col_db_id' : db_id,
}
else:
if of == 'hb':
out += """
<table>
"""
for result in results:
out += result.html.replace('>Detailed record<', '>External record<').replace('>Similar records<', '>Similar external records<')
if len(results) != 0:
if of == 'hb':
out += """</table>
<br />"""
if display_add_to_basket:
out += """<input class="formbutton" type="submit" name="action" value="%(basket)s" />
""" % {'basket' : _("Add to basket")}
out += """</form>"""
else:
if of == 'hb':
out += """
</table>
"""
# we have already checked if there are results or no, maybe the following if should be removed?
if not results:
if of.startswith("h"):
out = _('No results found...') + '<br />'
return out
else:
return ""
def tmpl_print_searchresultbox(self, header, body):
"""print a nicely formatted box for search results """
#_ = gettext_set_language(ln)
# first find total number of hits:
out = '<table class="searchresultsbox"><thead><tr><th class="searchresultsboxheader">' + header + '</th></tr></thead><tbody><tr><td class="searchresultsboxbody">' + body + '</td></tr></tbody></table>'
return out
def tmpl_search_no_boolean_hits(self, ln, nearestterms):
"""No hits found, proposes alternative boolean queries
Parameters:
- 'ln' *string* - The language to display
- 'nearestterms' *array* - Parts of the interface to display, in the format:
- 'nearestterms[nbhits]' *int* - The resulting number of hits
- 'nearestterms[url_args]' *string* - The search parameters
- 'nearestterms[p]' *string* - The search terms
"""
# load the right message language
_ = gettext_set_language(ln)
out = _("Boolean query returned no hits. Please combine your search terms differently.")
out += '''<blockquote><table class="nearesttermsbox" cellpadding="0" cellspacing="0" border="0">'''
for term, hits, argd in nearestterms:
out += '''\
<tr>
<td class="nearesttermsboxbody" align="right">%(hits)s</td>
<td class="nearesttermsboxbody" width="15"> </td>
<td class="nearesttermsboxbody" align="left">
%(link)s
</td>
</tr>''' % {'hits' : hits,
'link': create_html_link(self.build_search_url(argd),
{}, cgi.escape(term),
{'class': "nearestterms"})}
out += """</table></blockquote>"""
return out
def tmpl_similar_author_names(self, authors, ln):
"""No hits found, proposes alternative boolean queries
Parameters:
- 'authors': a list of (name, hits) tuples
- 'ln' *string* - The language to display
"""
# load the right message language
_ = gettext_set_language(ln)
out = '''<a name="googlebox"></a>
<table class="googlebox"><tr><th colspan="2" class="googleboxheader">%(similar)s</th></tr>''' % {
'similar' : _("See also: similar author names")
}
for author, hits in authors:
out += '''\
<tr>
<td class="googleboxbody">%(nb)d</td>
<td class="googleboxbody">%(link)s</td>
</tr>''' % {'link': create_html_link(
self.build_search_url(p=author,
f='author',
ln=ln),
{}, cgi.escape(author), {'class':"google"}),
'nb' : hits}
out += """</table>"""
return out
def tmpl_print_record_detailed(self, recID, ln):
"""Displays a detailed on-the-fly record
Parameters:
- 'ln' *string* - The language to display
- 'recID' *int* - The record id
"""
# okay, need to construct a simple "Detailed record" format of our own:
out = "<p> "
# secondly, title:
titles = get_fieldvalues(recID, "245__a") or \
get_fieldvalues(recID, "111__a")
for title in titles:
out += "<p><center><big><strong>%s</strong></big></center></p>" % cgi.escape(title)
# thirdly, authors:
authors = get_fieldvalues(recID, "100__a") + get_fieldvalues(recID, "700__a")
if authors:
out += "<p><center>"
for author in authors:
out += '%s; ' % create_html_link(self.build_search_url(
ln=ln,
p=author,
f='author'),
{}, cgi.escape(author))
out += "</center></p>"
# fourthly, date of creation:
dates = get_fieldvalues(recID, "260__c")
for date in dates:
out += "<p><center><small>%s</small></center></p>" % date
# fifthly, abstract:
abstracts = get_fieldvalues(recID, "520__a")
for abstract in abstracts:
out += """<p style="margin-left: 15%%; width: 70%%">
<small><strong>Abstract:</strong> %s</small></p>""" % abstract
# fifthly bis, keywords:
keywords = get_fieldvalues(recID, "6531_a")
if len(keywords):
out += """<p style="margin-left: 15%%; width: 70%%">
<small><strong>Keyword(s):</strong>"""
for keyword in keywords:
out += '%s; ' % create_html_link(
self.build_search_url(ln=ln,
p=keyword,
f='keyword'),
{}, cgi.escape(keyword))
out += '</small></p>'
# fifthly bis bis, published in:
prs_p = get_fieldvalues(recID, "909C4p")
prs_v = get_fieldvalues(recID, "909C4v")
prs_y = get_fieldvalues(recID, "909C4y")
prs_n = get_fieldvalues(recID, "909C4n")
prs_c = get_fieldvalues(recID, "909C4c")
for idx in range(0, len(prs_p)):
out += """<p style="margin-left: 15%%; width: 70%%">
<small><strong>Publ. in:</strong> %s""" % prs_p[idx]
if prs_v and prs_v[idx]:
out += """<strong>%s</strong>""" % prs_v[idx]
if prs_y and prs_y[idx]:
out += """(%s)""" % prs_y[idx]
if prs_n and prs_n[idx]:
out += """, no.%s""" % prs_n[idx]
if prs_c and prs_c[idx]:
out += """, p.%s""" % prs_c[idx]
out += """.</small></p>"""
# sixthly, fulltext link:
urls_z = get_fieldvalues(recID, "8564_z")
urls_u = get_fieldvalues(recID, "8564_u")
# we separate the fulltext links and image links
for url_u in urls_u:
if url_u.endswith('.png'):
continue
else:
link_text = "URL"
try:
if urls_z[idx]:
link_text = urls_z[idx]
except IndexError:
pass
out += """<p style="margin-left: 15%%; width: 70%%">
<small><strong>%s:</strong> <a href="%s">%s</a></small></p>""" % (link_text, urls_u[idx], urls_u[idx])
# print some white space at the end:
out += "<br /><br />"
return out
def tmpl_print_record_list_for_similarity_boxen(self, title, recID_score_list, ln=CFG_SITE_LANG):
"""Print list of records in the "hs" (HTML Similarity) format for similarity boxes.
RECID_SCORE_LIST is a list of (recID1, score1), (recID2, score2), etc.
"""
from invenio.search_engine import print_record, record_public_p
recID_score_list_to_be_printed = []
# firstly find 5 first public records to print:
nb_records_to_be_printed = 0
nb_records_seen = 0
while nb_records_to_be_printed < 5 and nb_records_seen < len(recID_score_list) and nb_records_seen < 50:
# looking through first 50 records only, picking first 5 public ones
(recID, score) = recID_score_list[nb_records_seen]
nb_records_seen += 1
if record_public_p(recID):
nb_records_to_be_printed += 1
recID_score_list_to_be_printed.append([recID, score])
# secondly print them:
out = '''
<table><tr>
<td>
<table><tr><td class="blocknote">%(title)s</td></tr></table>
</td>
</tr>
<tr>
<td><table>
''' % { 'title': cgi.escape(title) }
for recid, score in recID_score_list_to_be_printed:
out += '''
<tr><td><font class="rankscoreinfo"><a>(%(score)s) </a></font><small> %(info)s</small></td></tr>''' % {
'score': score,
'info' : print_record(recid, format="hs", ln=ln),
}
out += """</table></td></tr></table> """
return out
def tmpl_print_record_brief(self, ln, recID):
"""Displays a brief record on-the-fly
Parameters:
- 'ln' *string* - The language to display
- 'recID' *int* - The record id
"""
out = ""
# record 'recID' does not exist in format 'format', so print some default format:
# firstly, title:
titles = get_fieldvalues(recID, "245__a") or \
get_fieldvalues(recID, "111__a")
# secondly, authors:
authors = get_fieldvalues(recID, "100__a") + get_fieldvalues(recID, "700__a")
# thirdly, date of creation:
dates = get_fieldvalues(recID, "260__c")
# thirdly bis, report numbers:
rns = get_fieldvalues(recID, "037__a")
rns = get_fieldvalues(recID, "088__a")
# fourthly, beginning of abstract:
abstracts = get_fieldvalues(recID, "520__a")
# fifthly, fulltext link:
urls_z = get_fieldvalues(recID, "8564_z")
urls_u = get_fieldvalues(recID, "8564_u")
# get rid of images
images = []
non_image_urls_u = []
for url_u in urls_u:
if url_u.endswith('.png'):
images.append(url_u)
else:
non_image_urls_u.append(url_u)
## unAPI identifier
out = '<abbr class="unapi-id" title="%s"></abbr>\n' % recID
out += self.tmpl_record_body(
titles=titles,
authors=authors,
dates=dates,
rns=rns,
abstracts=abstracts,
urls_u=non_image_urls_u,
urls_z=urls_z,
ln=ln)
return out
def tmpl_print_record_brief_links(self, ln, recID, sf='', so='d', sp='', rm='', display_claim_link=False):
"""Displays links for brief record on-the-fly
Parameters:
- 'ln' *string* - The language to display
- 'recID' *int* - The record id
"""
from invenio.jinja2utils import render_template_to_string
tpl = """{%- from "websearch_helpers.html" import record_brief_links with context -%}
{{ record_brief_links(get_record(recid)) }}"""
return render_template_to_string(tpl, recid=recID, _from_string=True).encode('utf-8')
def tmpl_xml_rss_prologue(self, current_url=None,
previous_url=None, next_url=None,
first_url=None, last_url=None,
nb_found=None, jrec=None, rg=None, cc=None):
"""Creates XML RSS 2.0 prologue."""
title = CFG_SITE_NAME
description = '%s latest documents' % CFG_SITE_NAME
if cc and cc != CFG_SITE_NAME:
title += ': ' + cgi.escape(cc)
description += ' in ' + cgi.escape(cc)
out = """<rss version="2.0"
xmlns:media="http://search.yahoo.com/mrss/"
xmlns:atom="http://www.w3.org/2005/Atom"
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:dcterms="http://purl.org/dc/terms/"
xmlns:opensearch="http://a9.com/-/spec/opensearch/1.1/">
<channel>
<title>%(rss_title)s</title>
<link>%(siteurl)s</link>
<description>%(rss_description)s</description>
<language>%(sitelang)s</language>
<pubDate>%(timestamp)s</pubDate>
<category></category>
<generator>Invenio %(version)s</generator>
<webMaster>%(sitesupportemail)s</webMaster>
<ttl>%(timetolive)s</ttl>%(previous_link)s%(next_link)s%(current_link)s%(total_results)s%(start_index)s%(items_per_page)s

<atom:link rel="search" href="%(siteurl)s/opensearchdescription" type="application/opensearchdescription+xml" title="Content Search" />
<textInput>
<title>Search </title>
<description>Search this site:</description>
<name>p</name>
<link>%(siteurl)s/search</link>
</textInput>
""" % {'sitename': CFG_SITE_NAME,
'siteurl': CFG_SITE_URL,
'sitelang': CFG_SITE_LANG,
'search_syntax': self.tmpl_opensearch_rss_url_syntax,
'timestamp': time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime()),
'version': CFG_VERSION,
'sitesupportemail': CFG_SITE_SUPPORT_EMAIL,
'timetolive': CFG_WEBSEARCH_RSS_TTL,
'current_link': (current_url and \
'\n<atom:link rel="self" href="%s" />\n' % current_url) or '',
'previous_link': (previous_url and \
'\n<atom:link rel="previous" href="%s" />' % previous_url) or '',
'next_link': (next_url and \
'\n<atom:link rel="next" href="%s" />' % next_url) or '',
'first_link': (first_url and \
'\n<atom:link rel="first" href="%s" />' % first_url) or '',
'last_link': (last_url and \
'\n<atom:link rel="last" href="%s" />' % last_url) or '',
'total_results': (nb_found and \
'\n<opensearch:totalResults>%i</opensearch:totalResults>' % nb_found) or '',
'start_index': (jrec and \
'\n<opensearch:startIndex>%i</opensearch:startIndex>' % jrec) or '',
'items_per_page': (rg and \
'\n<opensearch:itemsPerPage>%i</opensearch:itemsPerPage>' % rg) or '',
'rss_title': title,
'rss_description': description
}
return out
def tmpl_xml_rss_epilogue(self):
"""Creates XML RSS 2.0 epilogue."""
out = """\
</channel>
</rss>\n"""
return out
def tmpl_xml_podcast_prologue(self, current_url=None,
previous_url=None, next_url=None,
first_url=None, last_url=None,
nb_found=None, jrec=None, rg=None, cc=None):
"""Creates XML podcast prologue."""
title = CFG_SITE_NAME
description = '%s latest documents' % CFG_SITE_NAME
if CFG_CERN_SITE:
title = 'CERN'
description = 'CERN latest documents'
if cc and cc != CFG_SITE_NAME:
title += ': ' + cgi.escape(cc)
description += ' in ' + cgi.escape(cc)
out = """<rss xmlns:itunes="http://www.itunes.com/dtds/podcast-1.0.dtd" version="2.0">
<channel>
<title>%(podcast_title)s</title>
<link>%(siteurl)s</link>
<description>%(podcast_description)s</description>
<language>%(sitelang)s</language>
<pubDate>%(timestamp)s</pubDate>
<category></category>
<generator>Invenio %(version)s</generator>
<webMaster>%(siteadminemail)s</webMaster>
<ttl>%(timetolive)s</ttl>%(previous_link)s%(next_link)s%(current_link)s

<itunes:owner>
<itunes:email>%(siteadminemail)s</itunes:email>
</itunes:owner>
""" % {'sitename': CFG_SITE_NAME,
'siteurl': CFG_SITE_URL,
'sitelang': CFG_SITE_LANG,
'siteadminemail': CFG_SITE_ADMIN_EMAIL,
'timestamp': time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime()),
'version': CFG_VERSION,
'sitesupportemail': CFG_SITE_SUPPORT_EMAIL,
'timetolive': CFG_WEBSEARCH_RSS_TTL,
'current_link': (current_url and \
'\n<atom:link rel="self" href="%s" />\n' % current_url) or '',
'previous_link': (previous_url and \
'\n<atom:link rel="previous" href="%s" />' % previous_url) or '',
'next_link': (next_url and \
'\n<atom:link rel="next" href="%s" />' % next_url) or '',
'first_link': (first_url and \
'\n<atom:link rel="first" href="%s" />' % first_url) or '',
'last_link': (last_url and \
'\n<atom:link rel="last" href="%s" />' % last_url) or '',
'podcast_title': title,
'podcast_description': description
}
return out
def tmpl_xml_podcast_epilogue(self):
"""Creates XML podcast epilogue."""
out = """\n</channel>
</rss>\n"""
return out
def tmpl_xml_nlm_prologue(self):
"""Creates XML NLM prologue."""
out = """<articles>\n"""
return out
def tmpl_xml_nlm_epilogue(self):
"""Creates XML NLM epilogue."""
out = """\n</articles>"""
return out
def tmpl_xml_refworks_prologue(self):
"""Creates XML RefWorks prologue."""
out = """<references>\n"""
return out
def tmpl_xml_refworks_epilogue(self):
"""Creates XML RefWorks epilogue."""
out = """\n</references>"""
return out
def tmpl_xml_endnote_prologue(self):
"""Creates XML EndNote prologue."""
out = """<xml>\n<records>\n"""
return out
def tmpl_xml_endnote_8x_prologue(self):
"""Creates XML EndNote prologue."""
out = """<records>\n"""
return out
def tmpl_xml_endnote_epilogue(self):
"""Creates XML EndNote epilogue."""
out = """\n</records>\n</xml>"""
return out
def tmpl_xml_endnote_8x_epilogue(self):
"""Creates XML EndNote epilogue."""
out = """\n</records>"""
return out
def tmpl_xml_marc_prologue(self):
"""Creates XML MARC prologue."""
out = """<collection xmlns="http://www.loc.gov/MARC21/slim">\n"""
return out
def tmpl_xml_marc_epilogue(self):
"""Creates XML MARC epilogue."""
out = """\n</collection>"""
return out
def tmpl_xml_mods_prologue(self):
"""Creates XML MODS prologue."""
out = """<modsCollection xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"\n
xsi:schemaLocation="http://www.loc.gov/mods/v3\n
http://www.loc.gov/standards/mods/v3/mods-3-3.xsd">\n"""
return out
def tmpl_xml_mods_epilogue(self):
"""Creates XML MODS epilogue."""
out = """\n</modsCollection>"""
return out
def tmpl_xml_default_prologue(self):
"""Creates XML default format prologue. (Sanity calls only.)"""
out = """<collection>\n"""
return out
def tmpl_xml_default_epilogue(self):
"""Creates XML default format epilogue. (Sanity calls only.)"""
out = """\n</collection>"""
return out
def tmpl_collection_not_found_page_title(self, colname, ln=CFG_SITE_LANG):
"""
Create page title for cases when unexisting collection was asked for.
"""
_ = gettext_set_language(ln)
out = _("Collection %s Not Found") % cgi.escape(colname)
return out
def tmpl_collection_not_found_page_body(self, colname, ln=CFG_SITE_LANG):
"""
Create page body for cases when unexisting collection was asked for.
"""
_ = gettext_set_language(ln)
out = """<h1>%(title)s</h1>
<p>%(sorry)s</p>
<p>%(you_may_want)s</p>
""" % { 'title': self.tmpl_collection_not_found_page_title(colname, ln),
'sorry': _("Sorry, collection %s does not seem to exist.") % \
('<strong>' + cgi.escape(colname) + '</strong>'),
'you_may_want': _("You may want to start browsing from %s.") % \
('<a href="' + CFG_SITE_URL + '?ln=' + ln + '">' + \
cgi.escape(CFG_SITE_NAME_INTL.get(ln, CFG_SITE_NAME)) + '</a>')}
return out
def tmpl_alert_rss_teaser_box_for_query(self, id_query, ln, display_email_alert_part=True):
"""Propose teaser for setting up this query as alert or RSS feed.
Parameters:
- 'id_query' *int* - ID of the query we make teaser for
- 'ln' *string* - The language to display
- 'display_email_alert_part' *bool* - whether to display email alert part
"""
# load the right message language
_ = gettext_set_language(ln)
# get query arguments:
res = run_sql("SELECT urlargs FROM query WHERE id=%s", (id_query,))
argd = {}
if res:
argd = cgi.parse_qs(res[0][0])
rssurl = self.build_rss_url(argd)
alerturl = CFG_SITE_URL + '/youralerts/input?ln=%s&idq=%s' % (ln, id_query)
if display_email_alert_part:
msg_alert = _("""Set up a personal %(x_url1_open)semail alert%(x_url1_close)s
or subscribe to the %(x_url2_open)sRSS feed%(x_url2_close)s.""") % \
{'x_url1_open': '<a href="%s"><img src="%s/img/mail-icon-12x8.gif" border="0" alt="" /></a> ' % (alerturl, CFG_SITE_URL) + ' <a class="google" href="%s">' % (alerturl),
'x_url1_close': '</a>',
'x_url2_open': '<a href="%s"><img src="%s/img/feed-icon-12x12.gif" border="0" alt="" /></a> ' % (rssurl, CFG_SITE_URL) + ' <a class="google" href="%s">' % rssurl,
'x_url2_close': '</a>', }
else:
msg_alert = _("""Subscribe to the %(x_url2_open)sRSS feed%(x_url2_close)s.""") % \
{'x_url2_open': '<a href="%s"><img src="%s/img/feed-icon-12x12.gif" border="0" alt="" /></a> ' % (rssurl, CFG_SITE_URL) + ' <a class="google" href="%s">' % rssurl,
'x_url2_close': '</a>', }
out = '''<a name="googlebox"></a>
<table class="googlebox"><tr><th class="googleboxheader">%(similar)s</th></tr>
<tr><td class="googleboxbody">%(msg_alert)s</td></tr>
</table>
''' % {
'similar' : _("Interested in being notified about new results for this query?"),
'msg_alert': msg_alert, }
return out
def tmpl_detailed_record_metadata(self, recID, ln, format,
content,
creationdate=None,
modificationdate=None):
"""Returns the main detailed page of a record
Parameters:
- 'recID' *int* - The ID of the printed record
- 'ln' *string* - The language to display
- 'format' *string* - The format in used to print the record
- 'content' *string* - The main content of the page
- 'creationdate' *string* - The creation date of the printed record
- 'modificationdate' *string* - The last modification date of the printed record
"""
_ = gettext_set_language(ln)
## unAPI identifier
out = '<abbr class="unapi-id" title="%s"></abbr>\n' % recID
out += content
return out
def tmpl_display_back_to_search(self, req, recID, ln):
"""
Displays next-hit/previous-hit/back-to-search links
on the detailed record pages in order to be able to quickly
flip between detailed record pages
@param req: Apache request object
@type req: Apache request object
@param recID: detailed record ID
@type recID: int
@param ln: language of the page
@type ln: string
@return: html output
@rtype: html
"""
_ = gettext_set_language(ln)
# this variable is set to zero and then, nothing is displayed
if not CFG_WEBSEARCH_PREV_NEXT_HIT_LIMIT:
return ''
# search for a specific record having not done any search before
wlq = session_param_get(req, 'websearch-last-query', '')
wlqh = session_param_get(req, 'websearch-last-query-hits')
out = '''<br/><br/><div align="right">'''
# excedeed limit CFG_WEBSEARCH_PREV_NEXT_HIT_LIMIT,
# then will be displayed only the back to search link
if wlqh is None:
out += '''<div style="padding-bottom:2px;padding-top:30px;"><span class="moreinfo" style="margin-right:10px;">
%(back)s </span></div></div>''' % \
{'back': create_html_link(wlq, {}, _("Back to search"), {'class': "moreinfo"})}
return out
# let's look for the recID's collection
record_found = False
for coll in wlqh:
if recID in coll:
record_found = True
coll_recID = coll
break
# let's calculate lenght of recID's collection
if record_found:
recIDs = coll_recID[::-1]
totalrec = len(recIDs)
# search for a specific record having not done any search before
else:
return ''
# if there is only one hit,
# to show only the "back to search" link
if totalrec == 1:
# to go back to the last search results page
out += '''<div style="padding-bottom:2px;padding-top:30px;"><span class="moreinfo" style="margin-right:10px;">
%(back)s </span></div></div>''' % \
{'back': create_html_link(wlq, {}, _("Back to search"), {'class': "moreinfo"})}
elif totalrec > 1:
pos = recIDs.index(recID)
numrec = pos + 1
if pos == 0:
recIDnext = recIDs[pos + 1]
recIDlast = recIDs[totalrec - 1]
# to display only next and last links
out += '''<div><span class="moreinfo" style="margin-right:10px;">
%(numrec)s %(totalrec)s %(next)s %(last)s </span></div> ''' % {
'numrec': _("%s of") % ('<strong>' + self.tmpl_nice_number(numrec, ln) + '</strong>'),
'totalrec': ("%s") % ('<strong>' + self.tmpl_nice_number(totalrec, ln) + '</strong>'),
'next': create_html_link(self.build_search_url(recid=recIDnext, ln=ln),
{}, ('<font size="4">›</font>'), {'class': "moreinfo"}),
'last': create_html_link(self.build_search_url(recid=recIDlast, ln=ln),
{}, ('<font size="4">»</font>'), {'class': "moreinfo"})}
elif pos == totalrec - 1:
recIDfirst = recIDs[0]
recIDprev = recIDs[pos - 1]
# to display only first and previous links
out += '''<div style="padding-top:30px;"><span class="moreinfo" style="margin-right:10px;">
%(first)s %(previous)s %(numrec)s %(totalrec)s</span></div>''' % {
'first': create_html_link(self.build_search_url(recid=recIDfirst, ln=ln),
{}, ('<font size="4">«</font>'), {'class': "moreinfo"}),
'previous': create_html_link(self.build_search_url(recid=recIDprev, ln=ln),
{}, ('<font size="4">‹</font>'), {'class': "moreinfo"}),
'numrec': _("%s of") % ('<strong>' + self.tmpl_nice_number(numrec, ln) + '</strong>'),
'totalrec': ("%s") % ('<strong>' + self.tmpl_nice_number(totalrec, ln) + '</strong>')}
else:
# to display all links
recIDfirst = recIDs[0]
recIDprev = recIDs[pos - 1]
recIDnext = recIDs[pos + 1]
recIDlast = recIDs[len(recIDs) - 1]
out += '''<div style="padding-top:30px;"><span class="moreinfo" style="margin-right:10px;">
%(first)s %(previous)s
%(numrec)s %(totalrec)s %(next)s %(last)s </span></div>''' % {
'first': create_html_link(self.build_search_url(recid=recIDfirst, ln=ln),
{}, ('<font size="4">«</font>'),
{'class': "moreinfo"}),
'previous': create_html_link(self.build_search_url(recid=recIDprev, ln=ln),
{}, ('<font size="4">‹</font>'), {'class': "moreinfo"}),
'numrec': _("%s of") % ('<strong>' + self.tmpl_nice_number(numrec, ln) + '</strong>'),
'totalrec': ("%s") % ('<strong>' + self.tmpl_nice_number(totalrec, ln) + '</strong>'),
'next': create_html_link(self.build_search_url(recid=recIDnext, ln=ln),
{}, ('<font size="4">›</font>'), {'class': "moreinfo"}),
'last': create_html_link(self.build_search_url(recid=recIDlast, ln=ln),
{}, ('<font size="4">»</font>'), {'class': "moreinfo"})}
out += '''<div style="padding-bottom:2px;"><span class="moreinfo" style="margin-right:10px;">
%(back)s </span></div></div>''' % {
'back': create_html_link(wlq, {}, _("Back to search"), {'class': "moreinfo"})}
return out
def tmpl_record_plots(self, recID, ln):
"""
Displays little tables containing the images and captions contained in the specified document.
Parameters:
- 'recID' *int* - The ID of the printed record
- 'ln' *string* - The language to display
"""
from invenio.search_engine import get_record
from invenio.bibrecord import field_get_subfield_values
from invenio.bibrecord import record_get_field_instances
_ = gettext_set_language(ln)
out = ''
rec = get_record(recID)
flds = record_get_field_instances(rec, '856', '4')
images = []
for fld in flds:
image = field_get_subfield_values(fld, 'u')
caption = field_get_subfield_values(fld, 'y')
if type(image) == list and len(image) > 0:
image = image[0]
else:
continue
if type(caption) == list and len(caption) > 0:
caption = caption[0]
else:
continue
if not image.endswith('.png'):
# huh?
continue
if len(caption) >= 5:
images.append((int(caption[:5]), image, caption[5:]))
else:
# we don't have any idea of the order... just put it on
images.append(99999, image, caption)
images = sorted(images, key=lambda x: x[0])
for (index, image, caption) in images:
# let's put everything in nice little subtables with the image
# next to the caption
out = out + '<table width="95%" style="display: inline;">' + \
'<tr><td width="66%"><a name="' + str(index) + '" ' + \
'href="' + image + '">' + \
'<img src="' + image + '" width="95%"/></a></td>' + \
'<td width="33%">' + caption + '</td></tr>' + \
'</table>'
out = out + '<br /><br />'
return out
def tmpl_detailed_record_statistics(self, recID, ln,
downloadsimilarity,
downloadhistory, viewsimilarity):
"""Returns the statistics page of a record
Parameters:
- 'recID' *int* - The ID of the printed record
- 'ln' *string* - The language to display
- downloadsimilarity *string* - downloadsimilarity box
- downloadhistory *string* - downloadhistory box
- viewsimilarity *string* - viewsimilarity box
"""
# load the right message language
_ = gettext_set_language(ln)
out = ''
if CFG_BIBRANK_SHOW_DOWNLOAD_STATS and downloadsimilarity is not None:
similar = self.tmpl_print_record_list_for_similarity_boxen (
_("People who downloaded this document also downloaded:"), downloadsimilarity, ln)
out = '<table>'
out += '''
<tr><td>%(graph)s</td></tr>
<tr><td>%(similar)s</td></tr>
''' % { 'siteurl': CFG_SITE_URL, 'recid': recID, 'ln': ln,
'similar': similar, 'more': _("more"),
'graph': downloadsimilarity
}
out += '</table>'
out += '<br />'
if CFG_BIBRANK_SHOW_READING_STATS and viewsimilarity is not None:
out += self.tmpl_print_record_list_for_similarity_boxen (
_("People who viewed this page also viewed:"), viewsimilarity, ln)
if CFG_BIBRANK_SHOW_DOWNLOAD_GRAPHS and downloadhistory is not None:
out += downloadhistory + '<br />'
return out
def tmpl_detailed_record_citations_prologue(self, recID, ln):
"""Returns the prologue of the citations page of a record
Parameters:
- 'recID' *int* - The ID of the printed record
- 'ln' *string* - The language to display
"""
return '<table>'
def tmpl_detailed_record_citations_epilogue(self, recID, ln):
"""Returns the epilogue of the citations page of a record
Parameters:
- 'recID' *int* - The ID of the printed record
- 'ln' *string* - The language to display
"""
return '</table>'
def tmpl_detailed_record_citations_citing_list(self, recID, ln,
citinglist,
sf='', so='d', sp='', rm=''):
"""Returns the list of record citing this one
Parameters:
- 'recID' *int* - The ID of the printed record
- 'ln' *string* - The language to display
- citinglist *list* - a list of tuples [(x1,y1),(x2,y2),..] where x is doc id and y is number of citations
"""
# load the right message language
_ = gettext_set_language(ln)
out = ''
if CFG_BIBRANK_SHOW_CITATION_STATS and citinglist is not None:
similar = self.tmpl_print_record_list_for_similarity_boxen(
_("Cited by: %s records") % len (citinglist), citinglist, ln)
out += '''
<tr><td>
%(similar)s %(more)s
<br /><br />
</td></tr>''' % {
'more': create_html_link(
self.build_search_url(p='refersto:recid:%d' % recID, #XXXX
sf=sf,
so=so,
sp=sp,
rm=rm,
ln=ln),
{}, _("more")),
'similar': similar}
return out
def tmpl_detailed_record_citations_citation_history(self, recID, ln,
citationhistory):
"""Returns the citations history graph of this record
Parameters:
- 'recID' *int* - The ID of the printed record
- 'ln' *string* - The language to display
- citationhistory *string* - citationhistory box
"""
# load the right message language
_ = gettext_set_language(ln)
out = ''
if CFG_BIBRANK_SHOW_CITATION_GRAPHS and citationhistory is not None:
out = '<!--citation history--><tr><td>%s</td></tr>' % citationhistory
else:
out = "<!--not showing citation history. CFG_BIBRANK_SHOW_CITATION_GRAPHS:"
out += str(CFG_BIBRANK_SHOW_CITATION_GRAPHS) + " citationhistory "
if citationhistory:
out += str(len(citationhistory)) + "-->"
else:
out += "no citationhistory -->"
return out
def tmpl_detailed_record_citations_co_citing(self, recID, ln,
cociting):
"""Returns the list of cocited records
Parameters:
- 'recID' *int* - The ID of the printed record
- 'ln' *string* - The language to display
- cociting *string* - cociting box
"""
# load the right message language
_ = gettext_set_language(ln)
out = ''
if CFG_BIBRANK_SHOW_CITATION_STATS and cociting is not None:
similar = self.tmpl_print_record_list_for_similarity_boxen (
_("Co-cited with: %s records") % len (cociting), cociting, ln)
out = '''
<tr><td>
%(similar)s %(more)s
<br />
</td></tr>''' % { 'more': create_html_link(self.build_search_url(p='cocitedwith:%d' % recID, ln=ln),
{}, _("more")),
'similar': similar }
return out
def tmpl_detailed_record_citations_self_cited(self, recID, ln,
selfcited, citinglist):
"""Returns the list of self-citations for this record
Parameters:
- 'recID' *int* - The ID of the printed record
- 'ln' *string* - The language to display
- selfcited list - a list of self-citations for recID
"""
# load the right message language
_ = gettext_set_language(ln)
out = ''
if CFG_BIBRANK_SHOW_CITATION_GRAPHS and selfcited is not None:
sc_scorelist = [] #a score list for print..
for s in selfcited:
#copy weight from citations
weight = 0
for c in citinglist:
(crec, score) = c
if crec == s:
weight = score
tmp = [s, weight]
sc_scorelist.append(tmp)
scite = self.tmpl_print_record_list_for_similarity_boxen (
_(".. of which self-citations: %s records") % len (selfcited), sc_scorelist, ln)
out = '<tr><td>' + scite + '</td></tr>'
return out
def tmpl_author_information(self, req, pubs, authorname, num_downloads,
aff_pubdict, citedbylist, kwtuples, authors,
vtuples, names_dict, person_link,
bibauthorid_data, ln, return_html=False):
"""Prints stuff about the author given as authorname.
1. Author name + his/her institutes. Each institute I has a link
to papers where the auhtor has I as institute.
2. Publications, number: link to search by author.
3. Keywords
4. Author collabs
5. Publication venues like journals
The parameters are data structures needed to produce 1-6, as follows:
req - request
pubs - list of recids, probably the records that have the author as an author
authorname - evident
num_downloads - evident
aff_pubdict - a dictionary where keys are inst names and values lists of recordids
citedbylist - list of recs that cite pubs
kwtuples - keyword tuples like ('HIGGS BOSON',[3,4]) where 3 and 4 are recids
authors - a list of authors that have collaborated with authorname
names_dict - a dict of {name: frequency}
"""
from invenio.search_engine import perform_request_search
from operator import itemgetter
_ = gettext_set_language(ln)
ib_pubs = intbitset(pubs)
html = []
# construct an extended search as an interim solution for author id
# searches. Will build "(exactauthor:v1 OR exactauthor:v2)" strings
# extended_author_search_str = ""
# if bibauthorid_data["is_baid"]:
# if len(names_dict.keys()) > 1:
# extended_author_search_str = '('
#
# for name_index, name_query in enumerate(names_dict.keys()):
# if name_index > 0:
# extended_author_search_str += " OR "
#
# extended_author_search_str += 'exactauthor:"' + name_query + '"'
#
# if len(names_dict.keys()) > 1:
# extended_author_search_str += ')'
# rec_query = 'exactauthor:"' + authorname + '"'
#
# if bibauthorid_data["is_baid"] and extended_author_search_str:
# rec_query = extended_author_search_str
baid_query = ""
extended_author_search_str = ""
if 'is_baid' in bibauthorid_data and bibauthorid_data['is_baid']:
if bibauthorid_data["cid"]:
baid_query = 'author:%s' % bibauthorid_data["cid"]
elif bibauthorid_data["pid"] > -1:
baid_query = 'author:%s' % bibauthorid_data["pid"]
## todo: figure out if the author index is filled with pids/cids.
## if not: fall back to exactauthor search.
# if not index:
# baid_query = ""
if not baid_query:
baid_query = 'exactauthor:"' + authorname + '"'
if bibauthorid_data['is_baid']:
if len(names_dict.keys()) > 1:
extended_author_search_str = '('
for name_index, name_query in enumerate(names_dict.keys()):
if name_index > 0:
extended_author_search_str += " OR "
extended_author_search_str += 'exactauthor:"' + name_query + '"'
if len(names_dict.keys()) > 1:
extended_author_search_str += ')'
if bibauthorid_data['is_baid'] and extended_author_search_str:
baid_query = extended_author_search_str
baid_query = baid_query + " "
sorted_names_list = sorted(names_dict.iteritems(), key=itemgetter(1),
reverse=True)
# Prepare data for display
# construct names box
header = "<strong>" + _("Name variants") + "</strong>"
content = []
for name, frequency in sorted_names_list:
prquery = baid_query + ' exactauthor:"' + name + '"'
name_lnk = create_html_link(self.build_search_url(p=prquery),
{},
str(frequency),)
content.append("%s (%s)" % (name, name_lnk))
if not content:
content = [_("No Name Variants")]
names_box = self.tmpl_print_searchresultbox(header, "<br />\n".join(content))
# construct papers box
rec_query = baid_query
searchstr = create_html_link(self.build_search_url(p=rec_query),
{}, "<strong>" + "All papers (" + str(len(pubs)) + ")" + "</strong>",)
line1 = "<strong>" + _("Papers") + "</strong>"
line2 = searchstr
if CFG_BIBRANK_SHOW_DOWNLOAD_STATS and num_downloads:
line2 += " (" + _("downloaded") + " "
line2 += str(num_downloads) + " " + _("times") + ")"
if CFG_INSPIRE_SITE:
CFG_COLLS = ['Book',
'Conference',
'Introductory',
'Lectures',
'Preprint',
'Published',
'Review',
'Thesis']
else:
CFG_COLLS = ['Article',
'Book',
'Preprint', ]
collsd = {}
for coll in CFG_COLLS:
coll_papers = list(ib_pubs & intbitset(perform_request_search(f="collection", p=coll)))
if coll_papers:
collsd[coll] = coll_papers
colls = collsd.keys()
colls.sort(lambda x, y: cmp(len(collsd[y]), len(collsd[x]))) # sort by number of papers
for coll in colls:
rec_query = baid_query + 'collection:' + coll
line2 += "<br />" + create_html_link(self.build_search_url(p=rec_query),
{}, coll + " (" + str(len(collsd[coll])) + ")",)
if not pubs:
line2 = _("No Papers")
papers_box = self.tmpl_print_searchresultbox(line1, line2)
#make a authoraff string that looks like CERN (1), Caltech (2) etc
authoraff = ""
aff_pubdict_keys = aff_pubdict.keys()
aff_pubdict_keys.sort(lambda x, y: cmp(len(aff_pubdict[y]), len(aff_pubdict[x])))
if aff_pubdict_keys:
for a in aff_pubdict_keys:
print_a = a
if (print_a == ' '):
print_a = _("unknown affiliation")
if authoraff:
authoraff += '<br>'
authoraff += create_html_link(self.build_search_url(p=' or '.join(["%s" % x for x in aff_pubdict[a]]),
f='recid'),
{}, print_a + ' (' + str(len(aff_pubdict[a])) + ')',)
else:
authoraff = _("No Affiliations")
line1 = "<strong>" + _("Affiliations") + "</strong>"
line2 = authoraff
affiliations_box = self.tmpl_print_searchresultbox(line1, line2)
# print frequent keywords:
keywstr = ""
if (kwtuples):
for (kw, freq) in kwtuples:
if keywstr:
keywstr += '<br>'
rec_query = baid_query + 'keyword:"' + kw + '"'
searchstr = create_html_link(self.build_search_url(p=rec_query),
{}, kw + " (" + str(freq) + ")",)
keywstr = keywstr + " " + searchstr
else:
keywstr += _('No Keywords')
line1 = "<strong>" + _("Frequent keywords") + "</strong>"
line2 = keywstr
keyword_box = self.tmpl_print_searchresultbox(line1, line2)
header = "<strong>" + _("Frequent co-authors") + "</strong>"
content = []
sorted_coauthors = sorted(sorted(authors.iteritems(), key=itemgetter(0)),
key=itemgetter(1), reverse=True)
for name, frequency in sorted_coauthors:
rec_query = baid_query + 'exactauthor:"' + name + '"'
lnk = create_html_link(self.build_search_url(p=rec_query), {}, "%s (%s)" % (name, frequency),)
content.append("%s" % lnk)
if not content:
content = [_("No Frequent Co-authors")]
coauthor_box = self.tmpl_print_searchresultbox(header, "<br />\n".join(content))
pubs_to_papers_link = create_html_link(self.build_search_url(p=baid_query), {}, str(len(pubs)))
display_name = ""
try:
display_name = sorted_names_list[0][0]
except IndexError:
display_name = " "
headertext = ('<h1>%s <span style="font-size:50%%;">(%s papers)</span></h1>'
% (display_name, pubs_to_papers_link))
if return_html:
html.append(headertext)
else:
req.write(headertext)
#req.write("<h1>%s</h1>" % (authorname))
if person_link:
cmp_link = ('<div><a href="%s/person/claimstub?person=%s">%s</a></div>'
% (CFG_SITE_URL, person_link,
_("This is me. Verify my publication list.")))
if return_html:
html.append(cmp_link)
else:
req.write(cmp_link)
if return_html:
html.append("<table width=80%><tr valign=top><td>")
html.append(names_box)
html.append("<br />")
html.append(papers_box)
html.append("<br />")
html.append(keyword_box)
html.append("</td>")
html.append("<td> </td>")
html.append("<td>")
html.append(affiliations_box)
html.append("<br />")
html.append(coauthor_box)
html.append("</td></tr></table>")
else:
req.write("<table width=80%><tr valign=top><td>")
req.write(names_box)
req.write("<br />")
req.write(papers_box)
req.write("<br />")
req.write(keyword_box)
req.write("</td>")
req.write("<td> </td>")
req.write("<td>")
req.write(affiliations_box)
req.write("<br />")
req.write(coauthor_box)
req.write("</td></tr></table>")
# print citations:
rec_query = baid_query
if len(citedbylist):
line1 = "<strong>" + _("Citations:") + "</strong>"
line2 = ""
if not pubs:
line2 = _("No Citation Information available")
sr_box = self.tmpl_print_searchresultbox(line1, line2)
if return_html:
html.append(sr_box)
else:
req.write(sr_box)
if return_html:
return "\n".join(html)
# print frequent co-authors:
# collabstr = ""
# if (authors):
# for c in authors:
# c = c.strip()
# if collabstr:
# collabstr += '<br>'
# #do not add this person him/herself in the list
# cUP = c.upper()
# authornameUP = authorname.upper()
# if not cUP == authornameUP:
# commpubs = intbitset(pubs) & intbitset(perform_request_search(p="exactauthor:\"%s\" exactauthor:\"%s\"" % (authorname, c)))
# collabstr = collabstr + create_html_link(self.build_search_url(p='exactauthor:"' + authorname + '" exactauthor:"' + c + '"'),
# {}, c + " (" + str(len(commpubs)) + ")",)
# else: collabstr += 'None'
# banner = self.tmpl_print_searchresultbox("<strong>" + _("Frequent co-authors:") + "</strong>", collabstr)
# print frequently publishes in journals:
#if (vtuples):
# pubinfo = ""
# for t in vtuples:
# (journal, num) = t
# pubinfo += create_html_link(self.build_search_url(p='exactauthor:"' + authorname + '" ' + \
# 'journal:"' + journal + '"'),
# {}, journal + " ("+str(num)+")<br/>")
# banner = self.tmpl_print_searchresultbox("<strong>" + _("Frequently publishes in:") + "<strong>", pubinfo)
# req.write(banner)
def tmpl_detailed_record_references(self, recID, ln, content):
"""Returns the discussion page of a record
Parameters:
- 'recID' *int* - The ID of the printed record
- 'ln' *string* - The language to display
- 'content' *string* - The main content of the page
"""
# load the right message language
out = ''
if content is not None:
out += content
return out
def tmpl_citesummary_title(self, ln=CFG_SITE_LANG):
"""HTML citesummary title and breadcrumbs
A part of HCS format suite."""
return ''
def tmpl_citesummary2_title(self, searchpattern, ln=CFG_SITE_LANG):
"""HTML citesummary title and breadcrumbs
A part of HCS2 format suite."""
return ''
def tmpl_citesummary_back_link(self, searchpattern, ln=CFG_SITE_LANG):
"""HTML back to citesummary link
A part of HCS2 format suite."""
_ = gettext_set_language(ln)
out = ''
params = {'ln': 'en',
'p': quote(searchpattern),
'of': 'hcs'}
msg = _('Back to citesummary')
url = CFG_SITE_URL + '/search?' + \
'&'.join(['='.join(i) for i in params.iteritems()])
out += '<p><a href="%(url)s">%(msg)s</a></p>' % {'url': url, 'msg': msg}
return out
def tmpl_citesummary_more_links(self, searchpattern, ln=CFG_SITE_LANG):
_ = gettext_set_language(ln)
out = ''
msg = _('<p><a href="%(url)s">%(msg)s</a></p>')
params = {'ln': ln,
'p': quote(searchpattern),
'of': 'hcs2'}
url = CFG_SITE_URL + '/search?' + \
'&'.join(['='.join(i) for i in params.iteritems()])
out += msg % {'url': url,
'msg': _('Exclude self-citations')}
return out
def tmpl_citesummary_prologue(self, d_recids, collections, search_patterns,
searchfield, citable_recids, total_count,
ln=CFG_SITE_LANG):
"""HTML citesummary format, prologue. A part of HCS format suite."""
_ = gettext_set_language(ln)
out = """<table id="citesummary">
<tr>
<td>
<strong class="headline">%(msg_title)s</strong>
</td>""" % \
{'msg_title': _("Citation summary results"), }
for coll, dummy in collections:
out += '<td align="right">%s</td>' % _(coll)
out += '</tr>'
out += """<tr><td><strong>%(msg_recs)s</strong></td>""" % \
{'msg_recs': _("Total number of papers analyzed:"), }
for coll, colldef in collections:
link_url = CFG_SITE_URL + '/search?p='
if search_patterns[coll]:
p = search_patterns[coll]
if searchfield:
if " " in p:
p = searchfield + ':"' + p + '"'
else:
p = searchfield + ':' + p
link_url += quote(p)
if colldef:
link_url += '%20AND%20' + quote(colldef)
link_text = self.tmpl_nice_number(len(d_recids[coll]), ln)
out += '<td align="right"><a href="%s">%s</a></td>' % (link_url,
link_text)
out += '</tr>'
return out
def tmpl_citesummary_overview(self, collections, d_total_cites,
d_avg_cites, ln=CFG_SITE_LANG):
"""HTML citesummary format, overview. A part of HCS format suite."""
_ = gettext_set_language(ln)
out = """<tr><td><strong>%(msg_cites)s</strong></td>""" % \
{'msg_cites': _("Total number of citations:"), }
for coll, dummy in collections:
total_cites = d_total_cites[coll]
out += '<td align="right">%s</td>' % \
self.tmpl_nice_number(total_cites, ln)
out += '</tr>'
out += """<tr><td><strong>%(msg_avgcit)s</strong></td>""" % \
{'msg_avgcit': _("Average citations per paper:"), }
for coll, dummy in collections:
avg_cites = d_avg_cites[coll]
out += '<td align="right">%.1f</td>' % avg_cites
out += '</tr>'
return out
def tmpl_citesummary_minus_self_cites(self, d_total_cites, d_avg_cites,
ln=CFG_SITE_LANG):
"""HTML citesummary format, overview. A part of HCS format suite."""
_ = gettext_set_language(ln)
msg = _("Total number of citations excluding self-citations")
out = """<tr><td><strong>%(msg_cites)s</strong>""" % \
{'msg_cites': msg, }
# use ? help linking in the style of oai_repository_admin.py
msg = ' <small><small>[<a href="%s%s">?</a>]</small></small></td>'
out += msg % (CFG_SITE_URL,
'/help/citation-metrics#citesummary_self-cites')
for total_cites in d_total_cites.values():
out += '<td align="right">%s</td>' % \
self.tmpl_nice_number(total_cites, ln)
out += '</tr>'
msg = _("Average citations per paper excluding self-citations")
out += """<tr><td><strong>%(msg_avgcit)s</strong>""" % \
{'msg_avgcit': msg, }
# use ? help linking in the style of oai_repository_admin.py
msg = ' <small><small>[<a href="%s%s">?</a>]</small></small></td>'
out += msg % (CFG_SITE_URL,
'/help/citation-metrics#citesummary_self-cites')
for avg_cites in d_avg_cites.itervalues():
out += '<td align="right">%.1f</td>' % avg_cites
out += '</tr>'
return out
def tmpl_citesummary_footer(self):
return ''
def tmpl_citesummary_breakdown_header(self, ln=CFG_SITE_LANG):
_ = gettext_set_language(ln)
return """<tr><td><strong>%(msg_breakdown)s</strong></td></tr>""" % \
{'msg_breakdown': _("Breakdown of papers by citations:"), }
def tmpl_citesummary_breakdown_by_fame(self, d_cites, low, high, fame,
l_colls, searchpatterns,
searchfield, ln=CFG_SITE_LANG):
"""HTML citesummary format, breakdown by fame.
A part of HCS format suite."""
_ = gettext_set_language(ln)
out = """<tr><td>%(fame)s</td>""" % \
{'fame': _(fame), }
for coll, colldef in l_colls:
link_url = CFG_SITE_URL + '/search?p='
if searchpatterns.get(coll, None):
p = searchpatterns.get(coll, None)
if searchfield:
if " " in p:
p = searchfield + ':"' + p + '"'
else:
p = searchfield + ':' + p
link_url += quote(p) + '%20AND%20'
if colldef:
link_url += quote(colldef) + '%20AND%20'
if low == 0 and high == 0:
link_url += quote('cited:0')
else:
link_url += quote('cited:%i->%i' % (low, high))
link_text = self.tmpl_nice_number(d_cites[coll], ln)
out += '<td align="right"><a href="%s">%s</a></td>' % (link_url,
link_text)
out += '</tr>'
return out
def tmpl_citesummary_h_index(self, collections,
d_h_factors, ln=CFG_SITE_LANG):
"""HTML citesummary format, h factor output. A part of the HCS suite."""
_ = gettext_set_language(ln)
out = "<tr><td></td></tr><tr><td><strong>%(msg_metrics)s</strong> <small><small>[<a href=\"%(help_url)s\">?</a>]</small></small></td></tr>" % \
{'msg_metrics': _("Citation metrics"),
'help_url': CFG_SITE_URL + '/help/citation-metrics', }
out += '<tr><td>h-index'
# use ? help linking in the style of oai_repository_admin.py
msg = ' <small><small>[<a href="%s%s">?</a>]</small></small></td>'
out += msg % (CFG_SITE_URL,
'/help/citation-metrics#citesummary_h-index')
for coll, dummy in collections:
h_factors = d_h_factors[coll]
out += '<td align="right">%s</td>' % \
self.tmpl_nice_number(h_factors, ln)
out += '</tr>'
return out
def tmpl_citesummary_epilogue(self, ln=CFG_SITE_LANG):
"""HTML citesummary format, epilogue. A part of HCS format suite."""
out = "</table>"
return out
def tmpl_unapi(self, formats, identifier=None):
"""
Provide a list of object format available from the unAPI service
for the object identified by IDENTIFIER
"""
out = '<?xml version="1.0" encoding="UTF-8" ?>\n'
if identifier:
out += '<formats id="%i">\n' % (identifier)
else:
out += "<formats>\n"
for format_name, format_type in formats.iteritems():
docs = ''
if format_name == 'xn':
docs = 'http://www.nlm.nih.gov/databases/dtd/'
format_type = 'application/xml'
format_name = 'nlm'
elif format_name == 'xm':
docs = 'http://www.loc.gov/standards/marcxml/schema/MARC21slim.xsd'
format_type = 'application/xml'
format_name = 'marcxml'
elif format_name == 'xr':
format_type = 'application/rss+xml'
docs = 'http://www.rssboard.org/rss-2-0/'
elif format_name == 'xw':
format_type = 'application/xml'
docs = 'http://www.refworks.com/RefWorks/help/RefWorks_Tagged_Format.htm'
elif format_name == 'xoaidc':
format_type = 'application/xml'
docs = 'http://www.openarchives.org/OAI/2.0/oai_dc.xsd'
elif format_name == 'xe':
format_type = 'application/xml'
docs = 'http://www.endnote.com/support/'
format_name = 'endnote'
elif format_name == 'xd':
format_type = 'application/xml'
docs = 'http://dublincore.org/schemas/'
format_name = 'dc'
elif format_name == 'xo':
format_type = 'application/xml'
docs = 'http://www.loc.gov/standards/mods/v3/mods-3-3.xsd'
format_name = 'mods'
if docs:
out += '<format name="%s" type="%s" docs="%s" />\n' % (xml_escape(format_name), xml_escape(format_type), xml_escape(docs))
else:
out += '<format name="%s" type="%s" />\n' % (xml_escape(format_name), xml_escape(format_type))
out += "</formats>"
return out
| EUDAT-B2SHARE/invenio-old | modules/websearch/lib/websearch_templates.py | Python | gpl-2.0 | 194,109 |
# supaxoaxonic.py ---
#
# Filename: supaxoaxonic.py
# Description: Superficial Layer 2/3 axoaxonic cells
# Author: subhasis ray
# Maintainer:
# Created: Tue Oct 6 16:52:28 2009 (+0530)
# Version:
# Last-Updated: Fri Oct 21 17:06:19 2011 (+0530)
# By: Subhasis Ray
# Update #: 29
# URL:
# Keywords:
# Compatibility:
#
#
# Commentary:
#
#
#
#
# Change log:
#
#
#
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street, Fifth
# Floor, Boston, MA 02110-1301, USA.
#
#
# Code:
from datetime import datetime
import config
import trbutil
import moose
from cell import *
from capool import CaPool
class SupAxoaxonic(TraubCell):
chan_params = {
'ENa': 50e-3,
'EK': -100e-3,
'ECa': 125e-3,
'EAR': -40e-3,
'EGABA': -75e-3,
'X_AR': 0.0,
'TauCa': 20e-3
}
ca_dep_chans = ['KC_FAST']
num_comp = 59
presyn = 59
proto_file = 'SupAxoaxonic.p'
prototype = TraubCell.read_proto(proto_file, 'SupAxoaxonic', chan_params)
def __init__(self, *args):
# start = datetime.now()
TraubCell.__init__(self, *args)
caPool = moose.CaConc(self.soma.path + '/CaPool')
caPool.tau = 50e-3
# end = datetime.now()
# delta = end - start
# config.BENCHMARK_LOGGER.info('created cell in: %g s' % (delta.days * 86400 + delta.seconds + delta.microseconds * 1e-6))
def _topology(self):
raise Exception, 'Deprecated'
def _setup_passive(self):
raise Exception, 'Deprecated'
def _setup_channels(self):
raise Exception, 'Deprecated'
@classmethod
def test_single_cell(cls):
"""Simulates a single superficial axo-axonic cell and plots
the Vm and [Ca2+]"""
config.LOGGER.info("/**************************************************************************")
config.LOGGER.info(" *")
config.LOGGER.info(" * Simulating a single cell: %s" % (cls.__name__))
config.LOGGER.info(" *")
config.LOGGER.info(" **************************************************************************/")
sim = Simulation(cls.__name__)
mycell = SupAxoaxonic(SupAxoaxonic.prototype, sim.model.path + "/SupAxoaxonic")
print 'Created cell:', mycell.path
vm_table = mycell.comp[mycell.presyn].insertRecorder('Vm_supaxax', 'Vm', sim.data)
pulsegen = mycell.soma.insertPulseGen('pulsegen', sim.model, firstLevel=3e-10, firstDelay=50e-3, firstWidth=50e-3)
# pulsegen1 = mycell.soma.insertPulseGen('pulsegen1', sim.model, firstLevel=3e-7, firstDelay=150e-3, firstWidth=10e-3)
sim.schedule()
if mycell.has_cycle():
print "WARNING!! CYCLE PRESENT IN CICRUIT."
t1 = datetime.now()
sim.run(200e-3)
t2 = datetime.now()
delta = t2 - t1
print 'simulation time: ', delta.seconds + 1e-6 * delta.microseconds
sim.dump_data('data')
if config.has_pylab:
mus_vm = config.pylab.array(vm_table) * 1e3
mus_t = linspace(0, sim.simtime * 1e3, len(mus_vm))
try:
nrn_vm = config.pylab.loadtxt('../nrn/mydata/Vm_deepLTS.plot')
nrn_t = nrn_vm[:, 0]
nrn_vm = nrn_vm[:, 1]
config.pylab.plot(nrn_t, nrn_vm, 'y-', label='nrn vm')
except IOError:
print 'NEURON Data not available.'
config.pylab.plot(mus_t, mus_vm, 'g-.', label='mus vm')
config.pylab.legend()
config.pylab.show()
# test main --
from simulation import Simulation
import pylab
from subprocess import call
if __name__ == "__main__":
SupAxoaxonic.test_single_cell()
#
# supaxoaxonic.py ends here
| BhallaLab/moose-thalamocortical | DEMOS/pymoose/traub2005/py/supaxoaxonic.py | Python | lgpl-2.1 | 4,383 |
# -*- coding: utf-8 -*-
from crispy_forms.compatibility import integer_types, string_types
from crispy_forms.exceptions import DynamicError
from crispy_forms.layout import Fieldset, MultiField
from crispy_forms.bootstrap import Container
class LayoutSlice(object):
# List of layout objects that need args passed first before fields
args_first = (Fieldset, MultiField, Container)
def __init__(self, layout, key):
self.layout = layout
if isinstance(key, integer_types):
self.slice = slice(key, key+1, 1)
else:
self.slice = key
def wrapped_object(self, LayoutClass, fields, *args, **kwargs):
"""
Returns a layout object of type `LayoutClass` with `args` and `kwargs` that
wraps `fields` inside.
"""
if args:
if isinstance(fields, list):
fields= tuple(fields)
else:
fields = (fields,)
if LayoutClass in self.args_first:
arguments = args + fields
else:
arguments = fields + args
return LayoutClass(*arguments, **kwargs)
else:
if isinstance(fields, list):
return LayoutClass(*fields, **kwargs)
else:
return LayoutClass(fields, **kwargs)
def pre_map(self, function):
"""
Iterates over layout objects pointed in `self.slice` executing `function` on them.
It passes `function` penultimate layout object and the position where to find last one
"""
if isinstance(self.slice, slice):
for i in range(*self.slice.indices(len(self.layout.fields))):
function(self.layout, i)
elif isinstance(self.slice, list):
# A list of pointers Ex: [[[0, 0], 'div'], [[0, 2, 3], 'field_name']]
for pointer in self.slice:
position = pointer[0]
# If it's pointing first level
if len(position) == 1:
function(self.layout, position[-1])
else:
layout_object = self.layout.fields[position[0]]
for i in position[1:-1]:
layout_object = layout_object.fields[i]
try:
function(layout_object, position[-1])
except IndexError:
# We could avoid this exception, recalculating pointers.
# However this case is most of the time an undesired behavior
raise DynamicError("Trying to wrap a field within an already wrapped field, \
recheck your filter or layout")
def wrap(self, LayoutClass, *args, **kwargs):
"""
Wraps every layout object pointed in `self.slice` under a `LayoutClass` instance with
`args` and `kwargs` passed.
"""
def wrap_object(layout_object, j):
layout_object.fields[j] = self.wrapped_object(
LayoutClass, layout_object.fields[j], *args, **kwargs
)
self.pre_map(wrap_object)
def wrap_once(self, LayoutClass, *args, **kwargs):
"""
Wraps every layout object pointed in `self.slice` under a `LayoutClass` instance with
`args` and `kwargs` passed, unless layout object's parent is already a subclass of
`LayoutClass`.
"""
def wrap_object_once(layout_object, j):
if not isinstance(layout_object, LayoutClass):
layout_object.fields[j] = self.wrapped_object(
LayoutClass, layout_object.fields[j], *args, **kwargs
)
self.pre_map(wrap_object_once)
def wrap_together(self, LayoutClass, *args, **kwargs):
"""
Wraps all layout objects pointed in `self.slice` together under a `LayoutClass`
instance with `args` and `kwargs` passed.
"""
if isinstance(self.slice, slice):
# The start of the slice is replaced
start = self.slice.start if self.slice.start is not None else 0
self.layout.fields[start] = self.wrapped_object(
LayoutClass, self.layout.fields[self.slice], *args, **kwargs
)
# The rest of places of the slice are removed, as they are included in the previous
for i in reversed(range(*self.slice.indices(len(self.layout.fields)))):
if i != start:
del self.layout.fields[i]
elif isinstance(self.slice, list):
raise DynamicError("wrap_together doesn't work with filter, only with [] operator")
def map(self, function):
"""
Iterates over layout objects pointed in `self.slice` executing `function` on them
It passes `function` last layout object
"""
if isinstance(self.slice, slice):
for i in range(*self.slice.indices(len(self.layout.fields))):
function(self.layout.fields[i])
elif isinstance(self.slice, list):
# A list of pointers Ex: [[[0, 0], 'div'], [[0, 2, 3], 'field_name']]
for pointer in self.slice:
position = pointer[0]
layout_object = self.layout.fields[position[0]]
for i in position[1:]:
previous_layout_object = layout_object
layout_object = layout_object.fields[i]
# If update_attrs is applied to a string, we call to its wrapping layout object
if (
function.__name__ == 'update_attrs'
and isinstance(layout_object, string_types)
):
function(previous_layout_object)
else:
function(layout_object)
def update_attributes(self, **kwargs):
"""
Updates attributes of every layout object pointed in `self.slice` using kwargs
"""
def update_attrs(layout_object):
if hasattr(layout_object, 'attrs'):
layout_object.attrs.update(kwargs)
self.map(update_attrs)
| rootAir/rootAir | static/crispy_forms/layout_slice.py | Python | gpl-2.0 | 6,173 |
CMDNAME_ATTR = '__cmdname__'
ALIASES_ATTR = '__aliases__'
def command(name=None, aliases=()):
""" Decorator to register a command handler in a Plugin. """
fn = None
if callable(name):
fn = name
name = None
def _command(fn):
setattr(fn, CMDNAME_ATTR, fn.__name__ if name is None else name)
setattr(fn, ALIASES_ATTR, aliases)
return fn
if fn:
return _command(fn)
return _command
class PluginMeta(type):
def __new__(cls, name, bases, attrs):
plugin = type.__new__(cls, name, bases, attrs)
if bases == (object,):
# Skip metaclass magic for Plugin base class
return plugin
if plugin.name is None:
setattr(plugin, 'name', name)
commands = []
for name, value in attrs.iteritems():
if callable(value) and hasattr(value, CMDNAME_ATTR):
cmdname = getattr(value, CMDNAME_ATTR)
aliases = getattr(value, ALIASES_ATTR, ())
commands.append((cmdname, value, aliases))
plugin._commands = commands
return plugin
class Plugin(object):
__metaclass__ = PluginMeta
name = None # Populated with the class name if None
private = False # Whether the plug-in should be hidden in !list
_commands = None
def __init__(self, yakbot, irc):
self.yakbot = yakbot
self.irc = irc
def __hash__(self):
return hash(self.name)
def on_unload(self):
pass
| theY4Kman/yakbot | yakbot/ext.py | Python | mit | 1,531 |
import sys
def read_input():
ings = []
for s in sys.stdin.readlines():
s = s.replace(',', '')
[ing, _, capacity, _, durability, _, flavor, _, texture, _, calories] = s.split()
ings += [list(map(int, [capacity, durability, flavor, texture, calories]))]
return ings
def solve(ings, pred):
def f(ings, rem, recipe=[]):
if ings == []:
res = [0, 0, 0, 0, 0]
for r in recipe:
for i in range(0, len(res)):
res[i] += r[i]
for i in res:
if i < 0:
return 0
a = 1
for i in res[:-1]:
a *= i
if pred(res[-1]):
return a
else:
return -1
best = 0
if len(ings) == 1:
poss = [rem]
else:
poss = range(0, rem + 1)
for i in poss:
tmp = list(map(lambda x: i * x, ings[0]))
best = max(best, f(ings[1:], rem - i, recipe + [tmp]))
return best
return f(ings, 100, )
ings = read_input()
print(solve(ings, lambda calories: True))
print(solve(ings, lambda calories: calories == 500))
| plilja/adventofcode | 2015/day15/day15.py | Python | gpl-3.0 | 1,207 |
# -*- encoding: utf-8 -*-
"""
@author: miko
"""
from numpy import *
class cluster_node:
def __init__(self,vec,left=None,right=None,distance=0.0,id=None):
self.left=left
self.right=right
self.vec=vec
self.id=id
self.distance=distance
def L2dist(v1,v2):
return sqrt(sum(v1-v2)**2)
def L1dist(v1,v2):
return sum(abs(v1-v2))
def hcluster(features,distance=L2dist):
distances={}
currentClusterId=-1
clust=[cluster_node(array(features),id=i) for i in range(len(features))]
while(len(clust)>1):
lowesPair=(0,1)
closest=distance(clust[0].vec,clust[1].vec)
for i in range(len(clust)):
for j in range(i+1,len(clust)):
if((clust[i].id,clust[j].id) not in distances):
distances[(clust[i].id,clust[j].id)]=distance(clust[i].vec,clust[j].vec)
d=distances[(clust[i].id,clust[j].id)]
if d<closest:
closest=d
lowesPair=(i,j)
mergevec=(clust[lowesPair[0]].vec-clust[lowesPair[1].vec])/2.0
newcluster=cluster_node(array(mergevec),left=clust[lowesPair[0]],right=clust[lowesPair[1]],distance=closest,id=currentClusterId)
currentClusterId-=1
del clust[lowesPair[0]]
del clust[lowesPair[1]]
clust.append(newcluster)
return clust[0]
def extract_cluster(clust,dist):
cluster={}
if clust.distance<dist:
return [clust]
else:
cl=[]
cr=[]
if clust.left!=None:
cl=extract_cluster(clust.left, dist)
if clust.right!=None:
cr=extract_cluster(clust.right, dist)
return cl+cr | sinomiko/project | IdeaProjects/MachineLearning/HierarchicalClustering.py | Python | bsd-3-clause | 1,710 |
# Copyright (C) 2013,2017 Ian Harry, Duncan Brown
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""
This module provides a wrapper to the ConfigParser utilities for pycbc
workflow construction. This module is described in the page here:
https://ldas-jobs.ligo.caltech.edu/~cbc/docs/pycbc/ahope/initialization_inifile.html
"""
import os
import re
import stat
import shutil
import time
import logging
import urlparse
import cookielib
import requests
import distutils.spawn
import ConfigParser
import itertools
import pycbc_glue.pipeline
from cookielib import (_warn_unhandled_exception, LoadError, Cookie)
from bs4 import BeautifulSoup
def _really_load(self, f, filename, ignore_discard, ignore_expires):
"""
This function is required to monkey patch MozillaCookieJar's _really_load
function which does not understand the curl format cookie file created
by ecp-cookie-init. It patches the code so that #HttpOnly_ get loaded.
https://bugs.python.org/issue2190
https://bugs.python.org/file37625/httponly.patch
"""
now = time.time()
magic = f.readline()
if not re.search(self.magic_re, magic):
f.close()
raise LoadError(
"%r does not look like a Netscape format cookies file" %
filename)
try:
while 1:
line = f.readline()
if line == "": break
# last field may be absent, so keep any trailing tab
if line.endswith("\n"): line = line[:-1]
sline = line.strip()
# support HttpOnly cookies (as stored by curl or old Firefox).
if sline.startswith("#HttpOnly_"):
line = sline[10:]
# skip comments and blank lines XXX what is $ for?
elif (sline.startswith(("#", "$")) or sline == ""):
continue
domain, domain_specified, path, secure, expires, name, value = \
line.split("\t")
secure = (secure == "TRUE")
domain_specified = (domain_specified == "TRUE")
if name == "":
# cookies.txt regards 'Set-Cookie: foo' as a cookie
# with no name, whereas cookielib regards it as a
# cookie with no value.
name = value
value = None
initial_dot = domain.startswith(".")
assert domain_specified == initial_dot
discard = False
if expires == "":
expires = None
discard = True
# assume path_specified is false
c = Cookie(0, name, value,
None, False,
domain, domain_specified, initial_dot,
path, False,
secure,
expires,
discard,
None,
None,
{})
if not ignore_discard and c.discard:
continue
if not ignore_expires and c.is_expired(now):
continue
self.set_cookie(c)
except IOError:
raise
except Exception:
_warn_unhandled_exception()
raise LoadError("invalid Netscape format cookies file %r: %r" %
(filename, line))
# Now monkey patch the code
cookielib.MozillaCookieJar._really_load = _really_load # noqa
ecp_cookie_error = """The attempt to download the file at
{}
was redirected to the git.ligo.org sign-in page. This means that you likely
forgot to initialize your ECP cookie or that your LIGO.ORG credentials are
otherwise invalid. Create a valid ECP cookie for git.ligo.org by running
ecp-cookie-init LIGO.ORG https://git.ligo.org/users/auth/shibboleth/callback albert.einstein
before attempting to download files from git.ligo.org.
"""
def resolve_url(url, directory=None, permissions=None):
"""
Resolves a URL to a local file, and returns the path to
that file.
"""
u = urlparse.urlparse(url)
# create the name of the destination file
if directory is None:
directory = os.getcwd()
filename = os.path.join(directory,os.path.basename(u.path))
if u.scheme == '' or u.scheme == 'file':
# for regular files, make a direct copy
if os.path.isfile(u.path):
if os.path.isfile(filename):
# check to see if src and dest are the same file
src_inode = os.stat(u.path)[stat.ST_INO]
dst_inode = os.stat(filename)[stat.ST_INO]
if src_inode != dst_inode:
shutil.copy(u.path, filename)
else:
shutil.copy(u.path, filename)
else:
errmsg = "Cannot open file %s from URL %s" % (u.path, url)
raise ValueError(errmsg)
elif u.scheme == 'http' or u.scheme == 'https':
s = requests.Session()
s.mount(str(u.scheme)+'://',
requests.adapters.HTTPAdapter(max_retries=5))
# look for an ecp cookie file and load the cookies
cookie_dict = {}
ecp_file = '/tmp/ecpcookie.u%d' % os.getuid()
if os.path.isfile(ecp_file):
cj = cookielib.MozillaCookieJar()
cj.load(ecp_file, ignore_discard=True, ignore_expires=True)
else:
cj = []
for c in cj:
if c.domain == u.netloc:
# load cookies for this server
cookie_dict[c.name] = c.value
elif u.netloc == "code.pycbc.phy.syr.edu" and \
c.domain == "git.ligo.org":
# handle the redirect for code.pycbc to git.ligo.org
cookie_dict[c.name] = c.value
r = s.get(url, cookies=cookie_dict, allow_redirects=True)
if r.status_code != 200:
errmsg = "Unable to download %s\nError code = %d" % (url,
r.status_code)
raise ValueError(errmsg)
# if we are downloading from git.ligo.org, check that we
# did not get redirected to the sign-in page
if u.netloc == 'git.ligo.org' or u.netloc == 'code.pycbc.phy.syr.edu':
soup = BeautifulSoup(r.content, 'html.parser')
desc = soup.findAll(attrs={"property":"og:url"})
if len(desc) and \
desc[0]['content'] == 'https://git.ligo.org/users/sign_in':
raise ValueError(ecp_cookie_error.format(url))
output_fp = open(filename, 'w')
output_fp.write(r.content)
output_fp.close()
else:
# TODO: We could support other schemes such as gsiftp by
# calling out to globus-url-copy
errmsg = "Unknown URL scheme: %s\n" % (u.scheme)
errmsg += "Currently supported are: file, http, and https."
raise ValueError(errmsg)
if not os.path.isfile(filename):
errmsg = "Error trying to create file %s from %s" % (filename,url)
raise ValueError(errmsg)
if permissions:
if os.access(filename, os.W_OK):
os.chmod(filename, permissions)
else:
# check that the file has at least the permissions requested
s = os.stat(filename)[stat.ST_MODE]
if (s & permissions) != permissions:
errmsg = "Could not change permissions on %s (read-only)" % url
raise ValueError(errmsg)
return filename
def add_workflow_command_line_group(parser):
"""
The standard way of initializing a ConfigParser object in workflow will be
to do it from the command line. This is done by giving a
--local-config-files filea.ini fileb.ini filec.ini
command. You can also set config file override commands on the command
line. This will be most useful when setting (for example) start and
end times, or active ifos. This is done by
--config-overrides section1:option1:value1 section2:option2:value2 ...
This can also be given as
--config-overrides section1:option1
where the value will be left as ''.
To remove a configuration option, use the command line argument
--config-delete section1:option1
which will delete option1 from [section1] or
--config-delete section1
to delete all of the options in [section1]
Deletes are implemented before overrides.
This function returns an argparse OptionGroup to ensure these options are
parsed correctly and can then be sent directly to initialize an
WorkflowConfigParser.
Parameters
-----------
parser : argparse.ArgumentParser instance
The initialized argparse instance to add the workflow option group to.
"""
workflowArgs = parser.add_argument_group('workflow',
'Options needed for workflow setup.')
workflowArgs.add_argument("--config-files", nargs="+", action='store',
metavar="CONFIGFILE",
help="List of config files to be used in "
"analysis.")
workflowArgs.add_argument("--config-overrides", nargs="*", action='store',
metavar="SECTION:OPTION:VALUE",
help="List of section,option,value combinations to "
"add into the configuration file. Normally the gps "
"start and end times might be provided this way, "
"and user specific locations (ie. output directories). "
"This can also be provided as SECTION:OPTION or "
"SECTION:OPTION: both of which indicate that the "
"corresponding value is left blank.")
workflowArgs.add_argument("--config-delete", nargs="*", action='store',
metavar="SECTION:OPTION",
help="List of section,option combinations to delete "
"from the configuration file. This can also be "
"provided as SECTION which deletes the enture section"
" from the configuration file or SECTION:OPTION "
"which deletes a specific option from a given "
"section.")
class WorkflowConfigParser(pycbc_glue.pipeline.DeepCopyableConfigParser):
"""
This is a sub-class of glue.pipeline.DeepCopyableConfigParser, which lets
us add a few additional helper features that are useful in workflows.
"""
def __init__(self, configFiles=None, overrideTuples=None, parsedFilePath=None, deleteTuples=None):
"""
Initialize an WorkflowConfigParser. This reads the input configuration
files, overrides values if necessary and performs the interpolation.
See https://ldas-jobs.ligo.caltech.edu/~cbc/docs/pycbc/ahope/initialization_inifile.html
Parameters
-----------
configFiles : Path to .ini file, or list of paths
The file(s) to be read in and parsed.
overrideTuples : List of (section, option, value) tuples
Add the (section, option, value) triplets provided
in this list to the provided .ini file(s). If the section, option
pair is already present, it will be overwritten.
parsedFilePath : Path, optional (default=None)
If given, write the parsed .ini file back to disk at this location.
deleteTuples : List of (section, option) tuples
Delete the (section, option) pairs provided
in this list from provided .ini file(s). If the section only
is provided, the entire section will be deleted.
Returns
--------
WorkflowConfigParser
Initialized WorkflowConfigParser instance.
"""
if configFiles is None:
configFiles = []
if overrideTuples is None:
overrideTuples = []
if deleteTuples is None:
deleteTuples = []
pycbc_glue.pipeline.DeepCopyableConfigParser.__init__(self)
# Enable case sensitive options
self.optionxform = str
configFiles = [resolve_url(cFile) for cFile in configFiles]
self.read_ini_file(configFiles)
# Replace exe macros with full paths
self.perform_exe_expansion()
# Split sections like [inspiral&tmplt] into [inspiral] and [tmplt]
self.split_multi_sections()
# Populate shared options from the [sharedoptions] section
self.populate_shared_sections()
# Do deletes from command line
for delete in deleteTuples:
if len(delete) == 1:
if self.remove_section(delete[0]) is False:
raise ValueError("Cannot delete section %s, "
"no such section in configuration." % delete )
else:
logging.info("Deleting section %s from configuration",
delete[0])
elif len(delete) == 2:
if self.remove_option(delete[0],delete[1]) is False:
raise ValueError("Cannot delete option %s from section %s,"
" no such option in configuration." % delete )
else:
logging.info("Deleting option %s from section %s in "
"configuration", delete[1], delete[0])
else:
raise ValueError("Deletes must be tuples of length 1 or 2. "
"Got %s." % str(delete) )
# Do overrides from command line
for override in overrideTuples:
if len(override) not in [2,3]:
errmsg = "Overrides must be tuples of length 2 or 3."
errmsg = "Got %s." % (str(override) )
raise ValueError(errmsg)
section = override[0]
option = override[1]
value = ''
if len(override) == 3:
value = override[2]
# Check for section existence, create if needed
if not self.has_section(section):
self.add_section(section)
self.set(section, option, value)
logging.info("Overriding section %s option %s with value %s "
"in configuration.", section, option, value )
# Check for any substitutions that can be made
# FIXME: The python 3 version of ConfigParser can do this automatically
# move over to that if it can be backported to python2.X.
# We use the same formatting as the new configparser module when doing
# ExtendedInterpolation
# This is described at
# http://docs.python.org/3.4/library/configparser.html
self.perform_extended_interpolation()
# Check for duplicate options in sub-sections
self.sanity_check_subsections()
# Dump parsed .ini file if needed
if parsedFilePath:
fp = open(parsedFilePath,'w')
self.write(fp)
fp.close()
@classmethod
def from_args(cls, args):
"""
Initialize a WorkflowConfigParser instance using the command line values
parsed in args. args must contain the values provided by the
workflow_command_line_group() function. If you are not using the standard
workflow command line interface, you should probably initialize directly
using __init__()
Parameters
-----------
args : argparse.ArgumentParser
The command line arguments parsed by argparse
"""
# Identify the config files
confFiles = []
# files and URLs to resolve
if args.config_files:
confFiles += args.config_files
# Identify the deletes
confDeletes = args.config_delete or []
# and parse them
parsedDeletes = []
for delete in confDeletes:
splitDelete = delete.split(":")
if len(splitDelete) > 2:
raise ValueError(
"Deletes must be of format section:option "
"or section. Cannot parse %s." % str(delete))
else:
parsedDeletes.append(tuple(splitDelete))
# Identify the overrides
confOverrides = args.config_overrides or []
# and parse them
parsedOverrides = []
for override in confOverrides:
splitOverride = override.split(":")
if len(splitOverride) == 3:
parsedOverrides.append(tuple(splitOverride))
elif len(splitOverride) == 2:
parsedOverrides.append(tuple(splitOverride + [""]))
elif len(splitOverride) > 3:
# Cannot have colons in either section name or variable name
# but the value may contain colons
rec_value = ':'.join(splitOverride[2:])
parsedOverrides.append(tuple(splitOverride[:2] + [rec_value]))
else:
raise ValueError(
"Overrides must be of format section:option:value "
"or section:option. Cannot parse %s." % str(override))
return cls(confFiles, parsedOverrides, None, parsedDeletes)
def read_ini_file(self, cpFile):
"""
Read a .ini file and return it as a ConfigParser class.
This function does none of the parsing/combining of sections. It simply
reads the file and returns it unedited
Stub awaiting more functionality - see configparser_test.py
Parameters
----------
cpFile : Path to .ini file, or list of paths
The path(s) to a .ini file to be read in
Returns
-------
cp : ConfigParser
The ConfigParser class containing the read in .ini file
"""
# Read the file
self.read(cpFile)
def perform_exe_expansion(self):
"""
This function will look through the executables section of the
ConfigParser object and replace any values using macros with full paths.
For any values that look like
${which:lalapps_tmpltbank}
will be replaced with the equivalent of which(lalapps_tmpltbank)
Otherwise values will be unchanged.
"""
# Only works on executables section
if self.has_section('executables'):
for option, value in self.items('executables'):
# Check the value
newStr = self.interpolate_exe(value)
if newStr != value:
self.set('executables', option, newStr)
def interpolate_exe(self, testString):
"""
Replace testString with a path to an executable based on the format.
If this looks like
${which:lalapps_tmpltbank}
it will return the equivalent of which(lalapps_tmpltbank)
Otherwise it will return an unchanged string.
Parameters
-----------
testString : string
The input string
Returns
--------
newString : string
The output string.
"""
# First check if any interpolation is needed and abort if not
testString = testString.strip()
if not (testString.startswith('${') and testString.endswith('}')):
return testString
# This may not be an exe interpolation, so even if it has ${XXX} form
# I may not have to do anything
newString = testString
# Strip the ${ and }
testString = testString[2:-1]
testList = testString.split(':')
# Maybe we can add a few different possibilities for substitution
if len(testList) == 2:
if testList[0] == 'which':
newString = distutils.spawn.find_executable(testList[1])
if not newString:
errmsg = "Cannot find exe %s in your path " %(testList[1])
errmsg += "and you specified ${which:%s}." %(testList[1])
raise ValueError(errmsg)
return newString
def get_subsections(self, section_name):
""" Return a list of subsections for the given section name
"""
# Keep only subsection names
subsections = [sec[len(section_name)+1:] for sec in self.sections()\
if sec.startswith(section_name + '-')]
for sec in subsections:
sp = sec.split('-')
# This is unusual, but a format [section-subsection-tag] is okay. Just
# check that [section-subsection] section exists. If not it is possible
# the user is trying to use an subsection name with '-' in it
if (len(sp) > 1) and not self.has_section('%s-%s' % (section_name,
sp[0])):
raise ValueError( "Workflow uses the '-' as a delimiter so "
"this is interpreted as section-subsection-tag. "
"While checking section %s, no section with "
"name %s-%s was found. "
"If you did not intend to use tags in an "
"'advanced user' manner, or do not understand what "
"this means, don't use dashes in section "
"names. So [injection-nsbhinj] is good. "
"[injection-nsbh-inj] is not." % (sec, sp[0], sp[1]))
if len(subsections) > 0:
return [sec.split('-')[0] for sec in subsections]
elif self.has_section(section_name):
return ['']
else:
return []
def perform_extended_interpolation(self):
"""
Filter through an ini file and replace all examples of
ExtendedInterpolation formatting with the exact value. For values like
${example} this is replaced with the value that corresponds to the
option called example ***in the same section***
For values like ${common|example} this is replaced with the value that
corresponds to the option example in the section [common]. Note that
in the python3 config parser this is ${common:example} but python2.7
interprets the : the same as a = and this breaks things
Nested interpolation is not supported here.
"""
# Do not allow any interpolation of the section names
for section in self.sections():
for option,value in self.items(section):
# Check the option name
newStr = self.interpolate_string(option, section)
if newStr != option:
self.set(section,newStr,value)
self.remove_option(section,option)
# Check the value
newStr = self.interpolate_string(value, section)
if newStr != value:
self.set(section,option,newStr)
def interpolate_string(self, testString, section):
"""
Take a string and replace all example of ExtendedInterpolation
formatting within the string with the exact value.
For values like ${example} this is replaced with the value that
corresponds to the option called example ***in the same section***
For values like ${common|example} this is replaced with the value that
corresponds to the option example in the section [common]. Note that
in the python3 config parser this is ${common:example} but python2.7
interprets the : the same as a = and this breaks things
Nested interpolation is not supported here.
Parameters
----------
testString : String
The string to parse and interpolate
section : String
The current section of the ConfigParser object
Returns
----------
testString : String
Interpolated string
"""
# First check if any interpolation is needed and abort if not
reObj = re.search(r"\$\{.*?\}", testString)
while reObj:
# Not really sure how this works, but this will obtain the first
# instance of a string contained within ${....}
repString = (reObj).group(0)[2:-1]
# Need to test which of the two formats we have
splitString = repString.split('|')
if len(splitString) == 1:
try:
testString = testString.replace('${'+repString+'}',\
self.get(section,splitString[0]))
except ConfigParser.NoOptionError:
print("Substitution failed")
raise
if len(splitString) == 2:
try:
testString = testString.replace('${'+repString+'}',\
self.get(splitString[0],splitString[1]))
except ConfigParser.NoOptionError:
print("Substitution failed")
raise
reObj = re.search(r"\$\{.*?\}", testString)
return testString
def split_multi_sections(self):
"""
Parse through the WorkflowConfigParser instance and splits any sections
labelled with an "&" sign (for e.g. [inspiral&tmpltbank]) into
[inspiral] and [tmpltbank] sections. If these individual sections
already exist they will be appended to. If an option exists in both the
[inspiral] and [inspiral&tmpltbank] sections an error will be thrown
"""
# Begin by looping over all sections
for section in self.sections():
# Only continue if section needs splitting
if '&' not in section:
continue
# Get list of section names to add these options to
splitSections = section.split('&')
for newSec in splitSections:
# Add sections if they don't already exist
if not self.has_section(newSec):
self.add_section(newSec)
self.add_options_to_section(newSec, self.items(section))
self.remove_section(section)
def populate_shared_sections(self):
"""Parse the [sharedoptions] section of the ini file.
That section should contain entries according to:
* massparams = inspiral, tmpltbank
* dataparams = tmpltbank
This will result in all options in [sharedoptions-massparams] being
copied into the [inspiral] and [tmpltbank] sections and the options
in [sharedoptions-dataparams] being copited into [tmpltbank].
In the case of duplicates an error will be raised.
"""
if not self.has_section('sharedoptions'):
# No sharedoptions, exit
return
for key, value in self.items('sharedoptions'):
assert(self.has_section('sharedoptions-%s' %(key)))
# Comma separated
values = value.split(',')
common_options = self.items('sharedoptions-%s' %(key))
for section in values:
if not self.has_section(section):
self.add_section(section)
for arg, val in common_options:
if arg in self.options(section):
raise ValueError('Option exists in both original ' + \
'ConfigParser section [%s] and ' %(section,) + \
'sharedoptions section: %s %s' \
%(arg,'sharedoptions-%s' %(key)))
self.set(section, arg, val)
self.remove_section('sharedoptions-%s' %(key))
self.remove_section('sharedoptions')
def add_options_to_section(self ,section, items, overwrite_options=False):
"""
Add a set of options and values to a section of a ConfigParser object.
Will throw an error if any of the options being added already exist,
this behaviour can be overridden if desired
Parameters
----------
section : string
The name of the section to add options+values to
items : list of tuples
Each tuple contains (at [0]) the option and (at [1]) the value to
add to the section of the ini file
overwrite_options : Boolean, optional
By default this function will throw a ValueError if an option exists
in both the original section in the ConfigParser *and* in the
provided items.
This will override so that the options+values given in items
will replace the original values if the value is set to True.
Default = True
"""
# Sanity checking
if not self.has_section(section):
raise ValueError('Section %s not present in ConfigParser.' \
%(section,))
# Check for duplicate options first
for option,value in items:
if not overwrite_options:
if option in self.options(section):
raise ValueError('Option exists in both original ' + \
'ConfigParser section [%s] and ' %(section,) + \
'input list: %s' %(option,))
self.set(section,option,value)
def sanity_check_subsections(self):
"""
This function goes through the ConfigParset and checks that any options
given in the [SECTION_NAME] section are not also given in any
[SECTION_NAME-SUBSECTION] sections.
"""
# Loop over the sections in the ini file
for section in self.sections():
# [pegasus_profile] specially is allowed to be overriden by
# sub-sections
if section == 'pegasus_profile':
continue
# Loop over the sections again
for section2 in self.sections():
# Check if any are subsections of section
if section2.startswith(section + '-'):
# Check for duplicate options whenever this exists
self.check_duplicate_options(section, section2,
raise_error=True)
def check_duplicate_options(self, section1, section2, raise_error=False):
"""
Check for duplicate options in two sections, section1 and section2.
Will return a list of the duplicate options.
Parameters
----------
section1 : string
The name of the first section to compare
section2 : string
The name of the second section to compare
raise_error : Boolean, optional (default=False)
If True, raise an error if duplicates are present.
Returns
----------
duplicates : List
List of duplicate options
"""
# Sanity checking
if not self.has_section(section1):
raise ValueError('Section %s not present in ConfigParser.'\
%(section1,) )
if not self.has_section(section2):
raise ValueError('Section %s not present in ConfigParser.'\
%(section2,) )
items1 = self.options(section1)
items2 = self.options(section2)
# The list comprehension here creates a list of all duplicate items
duplicates = [x for x in items1 if x in items2]
if duplicates and raise_error:
raise ValueError('The following options appear in both section ' +\
'%s and %s: %s' \
%(section1,section2,' '.join(duplicates)))
return duplicates
def get_opt_tag(self, section, option, tag):
"""
Convenience function accessing get_opt_tags() for a single tag: see
documentation for that function.
NB calling get_opt_tags() directly is preferred for simplicity.
Parameters
-----------
self : ConfigParser object
The ConfigParser object (automatically passed when this is appended
to the ConfigParser class)
section : string
The section of the ConfigParser object to read
option : string
The ConfigParser option to look for
tag : string
The name of the subsection to look in, if not found in [section]
Returns
--------
string
The value of the options being searched for
"""
return self.get_opt_tags(section, option, [tag])
def get_opt_tags(self, section, option, tags):
"""
Supplement to ConfigParser.ConfigParser.get(). This will search for an
option in [section] and if it doesn't find it will also try in
[section-tag] for every value of tag in tags.
Will raise a ConfigParser.Error if it cannot find a value.
Parameters
-----------
self : ConfigParser object
The ConfigParser object (automatically passed when this is appended
to the ConfigParser class)
section : string
The section of the ConfigParser object to read
option : string
The ConfigParser option to look for
tags : list of strings
The name of subsections to look in, if not found in [section]
Returns
--------
string
The value of the options being searched for
"""
# Need lower case tag name; also exclude cases with tag=None
if tags:
tags = [tag.lower() for tag in tags if tag is not None]
try:
return self.get(section, option)
except ConfigParser.Error:
err_string = "No option '%s' in section [%s] " %(option,section)
if not tags:
raise ConfigParser.Error(err_string + ".")
return_vals = []
sub_section_list = []
for sec_len in range(1, len(tags)+1):
for tag_permutation in itertools.permutations(tags, sec_len):
joined_name = '-'.join(tag_permutation)
sub_section_list.append(joined_name)
section_list = ["%s-%s" %(section, sb) for sb in sub_section_list]
err_section_list = []
for sub in sub_section_list:
if self.has_section('%s-%s' %(section, sub)):
if self.has_option('%s-%s' %(section, sub), option):
err_section_list.append("%s-%s" %(section, sub))
return_vals.append(self.get('%s-%s' %(section, sub),
option))
# We also want to recursively go into sections
if not return_vals:
err_string += "or in sections [%s]." \
%("] [".join(section_list))
raise ConfigParser.Error(err_string)
if len(return_vals) > 1:
err_string += "and multiple entries found in sections [%s]."\
%("] [".join(err_section_list))
raise ConfigParser.Error(err_string)
return return_vals[0]
def has_option_tag(self, section, option, tag):
"""
Convenience function accessing has_option_tags() for a single tag: see
documentation for that function.
NB calling has_option_tags() directly is preferred for simplicity.
Parameters
-----------
self : ConfigParser object
The ConfigParser object (automatically passed when this is appended
to the ConfigParser class)
section : string
The section of the ConfigParser object to read
option : string
The ConfigParser option to look for
tag : string
The name of the subsection to look in, if not found in [section]
Returns
--------
Boolean
Is the option in the section or [section-tag]
"""
return self.has_option_tags(section, option, [tag])
def has_option_tags(self, section, option, tags):
"""
Supplement to ConfigParser.ConfigParser.has_option().
This will search for an option in [section] and if it doesn't find it
will also try in [section-tag] for each value in tags.
Returns True if the option is found and false if not.
Parameters
-----------
self : ConfigParser object
The ConfigParser object (automatically passed when this is appended
to the ConfigParser class)
section : string
The section of the ConfigParser object to read
option : string
The ConfigParser option to look for
tags : list of strings
The names of the subsection to look in, if not found in [section]
Returns
--------
Boolean
Is the option in the section or [section-tag] (for tag in tags)
"""
try:
self.get_opt_tags(section, option, tags)
return True
except ConfigParser.Error:
return False
| hagabbar/pycbc_copy | pycbc/workflow/configuration.py | Python | gpl-3.0 | 38,216 |
import img_scale
import pyfits as pyf
import pylab as pyl
from mpl_toolkits.axes_grid1 import axes_grid
import cPickle as pickle
import os
from scipy.stats import scoreatpercentile
def mk_image(galaxy):
base = './../../images_v5/GS_2.5as_matched/gs_all_'
i_img = pyf.getdata(base+str(galaxy)+'_I.fits')
j_img = pyf.getdata(base+str(galaxy)+'_J.fits')
h_img = pyf.getdata(base+str(galaxy)+'_H.fits')
#include 90% of pixels
x = pyl.hstack(i_img)
i_lim = scoreatpercentile(x,99)
x = pyl.hstack(j_img)
j_lim = scoreatpercentile(x,99)
x = pyl.hstack(h_img)
h_lim = scoreatpercentile(x,99)
print galaxy, i_lim, j_lim, h_lim
img = pyl.zeros((h_img.shape[0], h_img.shape[1], 3), dtype=float)
img[:,:,0] = img_scale.asinh(h_img, scale_min=-0.1*h_lim, scale_max=h_lim,
non_linear=0.5)
img[:,:,1] = img_scale.asinh(j_img, scale_min=-0.1*j_lim, scale_max=j_lim,
non_linear=0.5)
img[:,:,2] = img_scale.asinh(i_img, scale_min=-0.1*i_lim, scale_max=i_lim,
non_linear=0.5)
return img
# Get the Galaxy info
galaxies = pickle.load(open('galaxies.pickle','rb'))
galaxies = filter(lambda galaxy: galaxy.ston_I > 30., galaxies)
galaxies = pyl.asarray(filter(lambda galaxy: galaxy.ICD_IH < 0.5, galaxies))
# Make the low mass grid first
x = [galaxy.Mass for galaxy in galaxies]
y = [galaxy.ICD_IH *100 for galaxy in galaxies]
ll = 8.5
ul= 12
bins_x =pyl.linspace(ll, ul, 8)
bins_y = pyl.linspace(50, 0, 6)
grid = []
for i in range(bins_x.size-1):
xmin = bins_x[i]
xmax = bins_x[i+1]
for j in range(bins_y.size-1):
ymax = bins_y[j]
ymin = bins_y[j+1]
cond=[cond1 and cond2 and cond3 and cond4 for cond1, cond2, cond3,
cond4 in zip(x>=xmin, x<xmax, y>=ymin, y<ymax)]
grid.append(galaxies.compress(cond))
# Put the grid together
F = pyl.figure(1, figsize=(6, 4))
grid1 = axes_grid.ImageGrid(F, 111, nrows_ncols=(5,7), axes_pad=0.05,
add_all=True, share_all=True, aspect=True, direction='column')
from random import choice
base = './../../images_v5/GS_2.5as/gs_all_'
for i in range(len(grid)):
print len(grid[i])
if len(grid[i]) > 1:
galaxy = choice(grid[i])
ID = int(galaxy.ID)
while os.path.isfile(base+str(galaxy)+'_I.fits'):
print 'choose again', ID
galaxy = choice(grid[i])
elif len(grid[i]) == 1:
galaxy = grid[i][0]
else:
#grid1[i].axis('off')
grid1[i].spines['bottom'].set_color('0.8')
grid1[i].spines['top'].set_color('0.8')
grid1[i].spines['right'].set_color('0.8')
grid1[i].spines['left'].set_color('0.8')
grid1[i].set_axis_bgcolor('None')
#grid1[i].axis('off')
if len(grid[i]) != 0:
ID = int(galaxy.ID)
img = mk_image(ID)
grid1[i].imshow(img, origin='lower')
grid1[i].text(0.5, 0.5, str(ID), color='white' )
grid1[i].set_xticks([])
grid1[i].set_yticks([])
else:
pass
# Label everything
grid1[4].set_xlabel('8.75', fontsize=16)
grid1[9].set_xlabel('9.25', fontsize=16)
grid1[14].set_xlabel('9.75', fontsize=16)
grid1[19].set_xlabel('10.25\nLog Mass $(M_\odot)$', fontsize=16)
grid1[24].set_xlabel('10.75', fontsize=16)
grid1[29].set_xlabel('11.25', fontsize=16)
grid1[34].set_xlabel('11.75', fontsize=16)
grid1[0].set_ylabel('45%', fontsize=16)
grid1[1].set_ylabel('35%', fontsize=16)
grid1[2].set_ylabel(r'$\xi[i_{775}, H_{160}]$ (%)'+'\n25%', fontsize=16,
multialignment='center')
grid1[3].set_ylabel('15%', fontsize=16)
grid1[4].set_ylabel('5%', fontsize=16)
pyl.show()
| boada/ICD | sandbox/plot_icd_mass_montage.py | Python | mit | 3,643 |
# (c) Nelen & Schuurmans. GPL licensed, see LICENSE.rst.
from __future__ import unicode_literals
from django.contrib import admin
from django.contrib.gis.admin import GeoModelAdmin
from django.utils.translation import ugettext_lazy as _
from controlnext.models import Basin
from controlnext.models import GrowerInfo
from controlnext.models import UserProfile
from controlnext.models import WaterDemand
class BasinInline(admin.TabularInline):
model = Basin
extra = 1
class GrowerInfoAdmin(GeoModelAdmin):
"""GrowlerInfo admin layout."""
default_lat = 51.97477
default_lon = 4.15146
default_zoom = 10
fieldsets = (
(None, {
'classes': ['wide'],
'fields': ('name', 'image', 'crop', 'crop_surface')
}),
)
inlines = [BasinInline, ]
class BasinAdmin(GeoModelAdmin):
readonly_fields = ('random_url_slug', )
fieldsets = (
(None, {
'classes': ['wide'],
'fields': (
'grower', 'name', 'random_url_slug', 'filter_id',
'location_id', 'parameter_id', 'recirculation',
# rain parameters
'rain_filter_id', 'rain_location_id',
# other basin parameters
'max_storage', 'current_fill', 'current_fill_updated',
'rain_flood_surface', 'max_outflow_per_timeunit',
'reverse_osmosis',
'osmose_till_date', 'jdbc_source'
)
}),
(_("location"), {
'classes': ['collapse'],
'fields': ('location',)
}),
)
class UserProfileAdmin(admin.ModelAdmin):
filter_horizontal = ('grower', )
admin.site.register(UserProfile, UserProfileAdmin)
admin.site.register(GrowerInfo, GrowerInfoAdmin)
admin.site.register(Basin, BasinAdmin)
admin.site.register(WaterDemand)
| lizardsystem/controlnext | controlnext/admin.py | Python | gpl-3.0 | 1,860 |
"""
Various tools for detecting specific signals in a radargram
"""
import numpy as np
def surface(rdg, method='maxvec', loc=None, ywinwidth=None):
"""Detect surface echo in a radargram
Arguments
---------
rdg : Array
Radargram
Keywords
--------
method : string
Method to use for surface detection
loc : vec
A coordinate guess of where the surface should be
ywinwidth : [int, int]
Window limit around loc where the algo should search for the surface
Output
------
y : coordinate for the surface location
val : value of te surface echo
ok : Whether the detection is estimated to be good (1) or not (0)
"""
n = rdg.shape
loc = np.zeros(n[0]).astype('int') if loc is None else loc.astype('int')
ywinwidth = np.array([0,n[1]-1]).astype('int') if ywinwidth is None else ywinwidth.astype('int')
if ywinwidth[0] < 0:
ywinwidth[0] = 0
if ywinwidth[-1] > n[1]-1:
ywinwidth[-1] = n[1]-1
return globals()[method](rdg, loc, ywinwidth)
def maxvec(rdg, loc, ywinwidth):
"""
Select the maximum value for each range line in a radargram
"""
n = len(loc)
y = np.zeros(n).astype('int')
val = np.zeros(n).astype('int')
ok = np.zeros(n).astype('int')
for i, loci in enumerate(loc):
pls = np.abs(rdg[i, :])
itv = pls[loci+ywinwidth[0]:loci+ywinwidth[1]]
y[i] = loci + ywinwidth[0] + np.argmax(itv)
val[i] = pls[y[i]]
# No quality metrics for now
ok[i] = 1
return {'y':y, 'val':val, 'ok':ok}
def maxprd(rdg, loc, ywinwidth):
"""
Select the maximum of the val*val/dt for each range line in a radargram
"""
n = len(loc)
y = np.zeros(n).astype('int')
val = np.zeros(n).astype('int')
ok = np.zeros(n).astype('int')
for i, loci in enumerate(loc):
pls = np.abs(rdg[i, :])
prd = np.abs(np.roll(np.gradient(pls), 2) * pls)
itv = prd[loci+ywinwidth[0]:loci+ywinwidth[1]]
y[i] = loci + ywinwidth[0] + np.argmax(itv)
val[i] = pls[y[i]]
# No Quality metrics for now
ok[i] = 1
return {'y':y, 'val':val, 'ok':ok}
| cgrima/rsr | rsr/detect.py | Python | mit | 2,198 |
# test construction of array.array from different objects
from array import array
# tuple, list
print(array('b', (1, 2)))
print(array('h', [1, 2]))
# raw copy from bytes, bytearray
print(array('h', b'22')) # should be byteorder-neutral
print(array('h', bytearray(2)))
print(array('i', bytearray(4)))
# convert from other arrays
print(array('H', array('b', [1, 2])))
print(array('b', array('I', [1, 2])))
| martinribelotta/micropython | tests/basics/array_construct.py | Python | mit | 409 |
import pytest
import raccoon as rc
from raccoon.utils import assert_series_equal
def test_set_cell():
actual = rc.Series([4, 5, 6], index=[10, 11, 12], sort=False)
# change existing value
actual.set(11, 55)
assert actual.get(11) == 55
actual.set(10, 11)
assert actual.get(10) == 11
actual.set(10, 13)
assert actual.get(10) == 13
assert actual.data == [13, 55, 6]
# add a new row
actual.set(13, 14)
assert actual.data == [13, 55, 6, 14]
# add a new row note that index does not sort
actual.set(1, -100)
assert actual.data == [13, 55, 6, 14, -100]
assert actual.index == [10, 11, 12, 13, 1]
def test_set_cell_sort():
actual = rc.Series([7, 8, 9], index=[10, 12, 13], sort=True)
# change existing value
actual.set(12, 55)
assert actual.get(12) == 55
actual.set(10, 11)
assert actual.get(10) == 11
actual.set(10, 13)
assert actual.get(10) == 13
assert actual.data == [13, 55, 9]
# add a new row
actual.set(14, 15)
assert actual.index == [10, 12, 13, 14]
assert actual.data == [13, 55, 9, 15]
# row in the middle
actual.set(11, -1)
assert actual.index == [10, 11, 12, 13, 14]
assert actual.data == [13, -1, 55, 9, 15]
# add before beginning
actual.set(5, 999)
assert actual.index == [5, 10, 11, 12, 13, 14]
assert actual.data == [999, 13, -1, 55, 9, 15]
# fails for mixed index type
with pytest.raises(TypeError):
actual.set('Z', 60)
def test_set_rows():
actual = rc.Series([7, 8, 9], index=[10, 11, 12], sort=False)
# change some of the values
actual.set([11, 12], [88, 99])
assert actual.data == [7, 88, 99]
# change all existing values
actual.set(actual.index, values=[44, 55, 66])
assert actual.data == [44, 55, 66]
# modify existing and add a new row
actual.set([12, 9], [666, 77])
assert actual.data == [44, 55, 666, 77]
assert actual.index == [10, 11, 12, 9]
# only add a new row
actual.set([8], [5])
assert actual.data == [44, 55, 666, 77, 5]
assert actual.index == [10, 11, 12, 9, 8]
# not enough values
with pytest.raises(ValueError):
actual.set(indexes=[True, True, True], values=[1, 2])
# number of values must equal number of True indexes
with pytest.raises(ValueError):
actual.set(indexes=[True, False, True, False, False], values=[1, 2, 3])
# too many values
with pytest.raises(ValueError):
actual.set(indexes=[True, True, True, True], values=[1, 2, 3, 4])
def test_set_rows_sort():
actual = rc.Series([7, 8, 9], index=[10, 11, 12], sort=True)
# change some of the values
actual.set([11, 12], [88, 99])
assert actual.data == [7, 88, 99]
# change all existing values
actual.set(actual.index, values=[44, 55, 66])
assert actual.data == [44, 55, 66]
# modify existing and add a new row
actual.set([12, 9, 10.5], [666, 77, 1])
assert actual.data == [77, 44, 1, 55, 666]
assert actual.index == [9, 10, 10.5, 11, 12]
# only add a new row
actual.set([8], [5])
assert actual.data == [5, 77, 44, 1, 55, 666]
assert actual.index == [8, 9, 10, 10.5, 11, 12]
# not enough values
with pytest.raises(ValueError):
actual.set(indexes=[True, True, True], values=[1, 2])
# number of values must equal number of True indexes
with pytest.raises(ValueError):
actual.set(indexes=[True, False, True], values=[1, 2, 3])
# too many values
with pytest.raises(ValueError):
actual.set(indexes=[True, True, True, True], values=[1, 2, 3, 4])
def test_set_index_subset():
actual = rc.Series([7, 8, 9], index=[10, 11, 12], sort=False)
# by index value
actual.set(indexes=[12, 11, 10], values=[66, 55, 44])
assert actual.data == [44, 55, 66]
actual.set(indexes=[12, 10], values=[33, 11])
assert actual.data == [11, 55, 33]
# new rows
actual.set(indexes=[12, 13, 14], values=[120, 130, 140])
assert actual.data == [11, 55, 120, 130, 140]
assert actual.index == [10, 11, 12, 13, 14]
# values list shorter than indexes, raise error
with pytest.raises(ValueError):
actual.set(indexes=[10, 11], values=[1])
# by boolean list
actual = rc.Series([7, 8], index=['first', 'second'], sort=False)
actual.set(indexes=[False, True], values=[99])
assert actual.data == [7, 99]
# boolean list not size of existing index
with pytest.raises(ValueError):
actual.set(indexes=[True, False, True], values=[1, 2])
# boolean list True entries not same size as values list
with pytest.raises(ValueError):
actual.set(indexes=[True, True, False], values=[4, 5, 6])
with pytest.raises(ValueError):
actual.set(indexes=[True, True, False], values=[4])
def test_set_index_subset_sort():
actual = rc.Series([1, 2, 3], index=[10, 11, 12], sort=True)
# by index value
actual.set(indexes=[12, 11, 10], values=[66, 55, 44])
assert actual.data == [44, 55, 66]
actual.set(indexes=[12, 10], values=[33, 11])
assert actual.data == [11, 55, 33]
# new rows at end
actual.set(indexes=[12, 14, 15], values=[120, 130, 140])
assert actual.data == [11, 55, 120, 130, 140]
assert actual.index == [10, 11, 12, 14, 15]
# new rows at beginning
actual.set(indexes=[10, 4, 5], values=[-140, -120, -130])
assert actual.data == [-120, -130, -140, 55, 120, 130, 140]
assert actual.index == [4, 5, 10, 11, 12, 14, 15]
# new rows in middle
actual.set(indexes=[13, 6], values=[3131, 6060])
assert actual.data == [-120, -130, 6060, -140, 55, 120, 3131, 130, 140]
assert actual.index == [4, 5, 6, 10, 11, 12, 13, 14, 15]
# new row new columns
actual.set(indexes=[14, 15, 16], values=['zoo', 'boo', 'hoo'])
assert actual.index == [4, 5, 6, 10, 11, 12, 13, 14, 15, 16]
assert actual.data == [-120, -130, 6060, -140, 55, 120, 3131, 'zoo', 'boo', 'hoo']
# values list shorter than indexes, raise error
with pytest.raises(ValueError):
actual.set(indexes=[10, 11], values=[1])
# by boolean list
actual = rc.Series([1, 2], index=['first', 'second'], sort=True)
actual.set(indexes=[False, True], values=[99])
assert actual.data == [1, 99]
# boolean list not size of existing index
with pytest.raises(ValueError):
actual.set(indexes=[True, False, True], values=[1, 2])
# boolean list True entries not same size as values list
with pytest.raises(ValueError):
actual.set(indexes=[True, True, False], values=[4, 5, 6])
with pytest.raises(ValueError):
actual.set(indexes=[True, True, False], values=[4])
def test_set_single_value():
srs = rc.Series([4, 5, 6], index=[10, 11, 12], sort=False)
# set multiple index to one value
srs.set([10, 12], 99)
assert srs.data == [99, 5, 99]
def test_set_location():
srs = rc.Series([5, 6, 7, 8], index=[2, 4, 6, 8])
srs.set_location(0, -1)
assert_series_equal(srs, rc.Series([-1, 6, 7, 8], index=[2, 4, 6, 8]))
srs.set_location(3, -10)
assert_series_equal(srs, rc.Series([-1, 6, 7, -10], index=[2, 4, 6, 8]))
with pytest.raises(IndexError):
srs.set_location(5, 9)
def test_set_locations():
srs = rc.Series([5, 6, 7, 8], index=[2, 4, 6, 8])
srs.set_locations([0, 2], [-1, -3])
assert_series_equal(srs, rc.Series([-1, 6, -3, 8], index=[2, 4, 6, 8]))
srs.set_locations([1, 3], -10)
assert_series_equal(srs, rc.Series([-1, -10, -3, -10], index=[2, 4, 6, 8]))
with pytest.raises(IndexError):
srs.set_locations([1, 10], [9, 99])
def test_set_from_blank_srs():
# single cell
srs = rc.Series(sort=False)
srs.set(indexes=1, values=9)
assert srs.index == [1]
assert srs.data == [9]
# single column
srs = rc.Series(sort=False)
srs.set(indexes=[1, 2, 3], values=[9, 10, 11])
assert srs.index == [1, 2, 3]
assert srs.data == [9, 10, 11]
def test_set_square_brackets():
srs = rc.Series(sort=False)
srs[1] = 2
assert srs.data == [2]
assert srs.index == [1]
# srs[[0, 3]] - - set index = [0, 3]
srs[[0, 3]] = 4
assert srs.data == [2, 4, 4]
assert srs.index == [1, 0, 3]
# srs[1:2] - - set index slice 1:2
srs[1:3] = 5
assert srs.data == [5, 5, 5]
assert srs.index == [1, 0, 3]
assert srs.sort is False
# with sort = True
srs = rc.Series(sort=True)
srs[1] = 2
assert srs.data == [2]
assert srs.index == [1]
# srs[[0, 3]] - - set index = [0, 3]
srs[[0, 3]] = 4
assert srs.data == [4, 2, 4]
assert srs.index == [0, 1, 3]
# srs[1:2] - - set index slice 1:2
srs[1:3] = 5
assert srs.data == [4, 5, 5]
assert srs.index == [0, 1, 3]
assert srs.sort is True
# insert
srs[2] = 6
assert srs.data == [4, 5, 6, 5]
assert srs.index == [0, 1, 2, 3]
def test_append_row():
actual = rc.Series([7, 9], index=[10, 12], sort=False)
actual.append_row(9, 99)
expected = rc.Series([7, 9, 99], index=[10, 12, 9])
assert_series_equal(actual, expected)
actual.append_row(16, 100)
expected = rc.Series([7, 9, 99, 100], index=[10, 12, 9, 16])
assert_series_equal(actual, expected)
with pytest.raises(IndexError):
actual.append_row(10, 100)
def test_append_rows():
actual = rc.Series([7, 9], index=[10, 12], sort=False)
actual.append_rows([9, 11], [99, 100])
expected = rc.Series([7, 9, 99, 100], index=[10, 12, 9, 11])
assert_series_equal(actual, expected)
actual.append_rows([16, 17], [110, 120])
expected = rc.Series([7, 9, 99, 100, 110, 120], index=[10, 12, 9, 11, 16, 17])
assert_series_equal(actual, expected)
with pytest.raises(IndexError):
actual.append_rows([1, 10], [100, 110])
with pytest.raises(ValueError):
actual.append_rows([1, 10], [100, 110, 120])
| rsheftel/raccoon | tests/test_series/test_set.py | Python | mit | 9,949 |
# -*- coding: utf-8 -*-
###
# (C) Copyright (2012-2016) Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
###
import unittest
import mock
from hpOneView.connection import connection
from hpOneView.resources.storage.storage_pools import StoragePools
from hpOneView.resources.resource import ResourceClient
class StoragePoolsTest(unittest.TestCase):
def setUp(self):
self.host = '127.0.0.1'
self.connection = connection(self.host)
self._storage_pools = StoragePools(self.connection)
@mock.patch.object(ResourceClient, 'get_all')
def test_get_all_called_once(self, mock_get_all):
filter = 'name=TestName'
sort = 'name:ascending'
self._storage_pools.get_all(2, 500, filter, sort)
mock_get_all.assert_called_once_with(2, 500, filter=filter, sort=sort)
@mock.patch.object(ResourceClient, 'get_all')
def test_get_all_called_once_with_default(self, mock_get_all):
self._storage_pools.get_all()
mock_get_all.assert_called_once_with(0, -1, filter='', sort='')
@mock.patch.object(ResourceClient, 'get')
def test_get_by_id_called_once(self, mock_get):
storage_pools_id = "EE9326ED-4595-4828-B411-FE3BD6BA7E9D"
self._storage_pools.get(storage_pools_id)
mock_get.assert_called_once_with(storage_pools_id)
@mock.patch.object(ResourceClient, 'get')
def test_get_by_uri_called_once(self, mock_get):
storage_pools_uri = "/rest/storage-pools/EE9326ED-4595-4828-B411-FE3BD6BA7E9D"
self._storage_pools.get(storage_pools_uri)
mock_get.assert_called_once_with(storage_pools_uri)
@mock.patch.object(ResourceClient, 'create')
def test_add_called_once_with_defaults(self, mock_create):
storage_pool = {
"storageSystemUri": "/rest/storage-systems/111111",
"poolName": "storagepool1"
}
self._storage_pools.add(storage_pool)
mock_create.assert_called_once_with(storage_pool, timeout=-1)
@mock.patch.object(ResourceClient, 'create')
def test_add_called_once(self, mock_create):
storage_pool = {
"storageSystemUri": "/rest/storage-systems/111111",
"poolName": "storagepool1"
}
self._storage_pools.add(storage_pool, 70)
mock_create.assert_called_once_with(storage_pool, timeout=70)
@mock.patch.object(ResourceClient, 'delete')
def test_remove_called_once(self, mock_delete):
id = 'ad28cf21-8b15-4f92-bdcf-51cb2042db32'
self._storage_pools.remove(id, force=True, timeout=50)
mock_delete.assert_called_once_with(id, force=True, timeout=50)
@mock.patch.object(ResourceClient, 'delete')
def test_remove_called_once_with_defaults(self, mock_delete):
id = 'ad28cf21-8b15-4f92-bdcf-51cb2042db32'
self._storage_pools.remove(id)
mock_delete.assert_called_once_with(id, force=False, timeout=-1)
@mock.patch.object(ResourceClient, 'get_by')
def test_get_by_called_once(self, mock_get_by):
self._storage_pools.get_by("name", "test name")
mock_get_by.assert_called_once_with("name", "test name")
| andreadean5/python-hpOneView | tests/unit/resources/storage/test_storage_pools.py | Python | mit | 4,191 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SinhArcsinh transformation of a distribution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.distributions.python.ops import bijectors
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops.distributions import normal
from tensorflow.python.ops.distributions import transformed_distribution
from tensorflow.python.util import deprecation
__all__ = [
"SinhArcsinh",
]
class SinhArcsinh(transformed_distribution.TransformedDistribution):
"""The SinhArcsinh transformation of a distribution on `(-inf, inf)`.
This distribution models a random variable, making use of
a `SinhArcsinh` transformation (which has adjustable tailweight and skew),
a rescaling, and a shift.
The `SinhArcsinh` transformation of the Normal is described in great depth in
[Sinh-arcsinh distributions](https://www.jstor.org/stable/27798865).
Here we use a slightly different parameterization, in terms of `tailweight`
and `skewness`. Additionally we allow for distributions other than Normal,
and control over `scale` as well as a "shift" parameter `loc`.
#### Mathematical Details
Given random variable `Z`, we define the SinhArcsinh
transformation of `Z`, `Y`, parameterized by
`(loc, scale, skewness, tailweight)`, via the relation:
```
Y := loc + scale * F(Z) * (2 / F_0(2))
F(Z) := Sinh( (Arcsinh(Z) + skewness) * tailweight )
F_0(Z) := Sinh( Arcsinh(Z) * tailweight )
```
This distribution is similar to the location-scale transformation
`L(Z) := loc + scale * Z` in the following ways:
* If `skewness = 0` and `tailweight = 1` (the defaults), `F(Z) = Z`, and then
`Y = L(Z)` exactly.
* `loc` is used in both to shift the result by a constant factor.
* The multiplication of `scale` by `2 / F_0(2)` ensures that if `skewness = 0`
`P[Y - loc <= 2 * scale] = P[L(Z) - loc <= 2 * scale]`.
Thus it can be said that the weights in the tails of `Y` and `L(Z)` beyond
`loc + 2 * scale` are the same.
This distribution is different than `loc + scale * Z` due to the
reshaping done by `F`:
* Positive (negative) `skewness` leads to positive (negative) skew.
* positive skew means, the mode of `F(Z)` is "tilted" to the right.
* positive skew means positive values of `F(Z)` become more likely, and
negative values become less likely.
* Larger (smaller) `tailweight` leads to fatter (thinner) tails.
* Fatter tails mean larger values of `|F(Z)|` become more likely.
* `tailweight < 1` leads to a distribution that is "flat" around `Y = loc`,
and a very steep drop-off in the tails.
* `tailweight > 1` leads to a distribution more peaked at the mode with
heavier tails.
To see the argument about the tails, note that for `|Z| >> 1` and
`|Z| >> (|skewness| * tailweight)**tailweight`, we have
`Y approx 0.5 Z**tailweight e**(sign(Z) skewness * tailweight)`.
To see the argument regarding multiplying `scale` by `2 / F_0(2)`,
```
P[(Y - loc) / scale <= 2] = P[F(Z) * (2 / F_0(2)) <= 2]
= P[F(Z) <= F_0(2)]
= P[Z <= 2] (if F = F_0).
```
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tfp.distributions`.",
warn_once=True)
def __init__(self,
loc,
scale,
skewness=None,
tailweight=None,
distribution=None,
validate_args=False,
allow_nan_stats=True,
name="SinhArcsinh"):
"""Construct SinhArcsinh distribution on `(-inf, inf)`.
Arguments `(loc, scale, skewness, tailweight)` must have broadcastable shape
(indexing batch dimensions). They must all have the same `dtype`.
Args:
loc: Floating-point `Tensor`.
scale: `Tensor` of same `dtype` as `loc`.
skewness: Skewness parameter. Default is `0.0` (no skew).
tailweight: Tailweight parameter. Default is `1.0` (unchanged tailweight)
distribution: `tf.Distribution`-like instance. Distribution that is
transformed to produce this distribution.
Default is `tfp.distributions.Normal(0., 1.)`.
Must be a scalar-batch, scalar-event distribution. Typically
`distribution.reparameterization_type = FULLY_REPARAMETERIZED` or it is
a function of non-trainable parameters. WARNING: If you backprop through
a `SinhArcsinh` sample and `distribution` is not
`FULLY_REPARAMETERIZED` yet is a function of trainable variables, then
the gradient will be incorrect!
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`,
statistics (e.g., mean, mode, variance) use the value "`NaN`" to
indicate the result is undefined. When `False`, an exception is raised
if one or more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
"""
parameters = dict(locals())
with ops.name_scope(name,
values=[loc, scale, skewness, tailweight]) as name:
loc = ops.convert_to_tensor(loc, name="loc")
dtype = loc.dtype
scale = ops.convert_to_tensor(scale, name="scale", dtype=dtype)
tailweight = 1. if tailweight is None else tailweight
has_default_skewness = skewness is None
skewness = 0. if skewness is None else skewness
tailweight = ops.convert_to_tensor(
tailweight, name="tailweight", dtype=dtype)
skewness = ops.convert_to_tensor(skewness, name="skewness", dtype=dtype)
batch_shape = distribution_util.get_broadcast_shape(
loc, scale, tailweight, skewness)
# Recall, with Z a random variable,
# Y := loc + C * F(Z),
# F(Z) := Sinh( (Arcsinh(Z) + skewness) * tailweight )
# F_0(Z) := Sinh( Arcsinh(Z) * tailweight )
# C := 2 * scale / F_0(2)
if distribution is None:
distribution = normal.Normal(
loc=array_ops.zeros([], dtype=dtype),
scale=array_ops.ones([], dtype=dtype),
allow_nan_stats=allow_nan_stats)
else:
asserts = distribution_util.maybe_check_scalar_distribution(
distribution, dtype, validate_args)
if asserts:
loc = control_flow_ops.with_dependencies(asserts, loc)
# Make the SAS bijector, 'F'.
f = bijectors.SinhArcsinh(
skewness=skewness, tailweight=tailweight)
if has_default_skewness:
f_noskew = f
else:
f_noskew = bijectors.SinhArcsinh(
skewness=skewness.dtype.as_numpy_dtype(0.),
tailweight=tailweight)
# Make the AffineScalar bijector, Z --> loc + scale * Z (2 / F_0(2))
c = 2 * scale / f_noskew.forward(ops.convert_to_tensor(2, dtype=dtype))
affine = bijectors.AffineScalar(
shift=loc,
scale=c,
validate_args=validate_args)
bijector = bijectors.Chain([affine, f])
super(SinhArcsinh, self).__init__(
distribution=distribution,
bijector=bijector,
batch_shape=batch_shape,
validate_args=validate_args,
name=name)
self._parameters = parameters
self._loc = loc
self._scale = scale
self._tailweight = tailweight
self._skewness = skewness
@property
def loc(self):
"""The `loc` in `Y := loc + scale @ F(Z) * (2 / F(2))."""
return self._loc
@property
def scale(self):
"""The `LinearOperator` `scale` in `Y := loc + scale @ F(Z) * (2 / F(2))."""
return self._scale
@property
def tailweight(self):
"""Controls the tail decay. `tailweight > 1` means faster than Normal."""
return self._tailweight
@property
def skewness(self):
"""Controls the skewness. `Skewness > 0` means right skew."""
return self._skewness
| hfp/tensorflow-xsmm | tensorflow/contrib/distributions/python/ops/sinh_arcsinh.py | Python | apache-2.0 | 9,206 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-10-21 22:03
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('showings', '0003_remove_showing_additional_details'),
]
operations = [
migrations.AlterField(
model_name='show',
name='details',
field=models.CharField(help_text=b'Episodes watched etc.', max_length=200),
),
]
| WarwickAnimeSoc/aniMango | showings/migrations/0004_auto_20161021_2203.py | Python | mit | 503 |
__version__ = "0.1.0"
if __name__ == '__main__':
pass
| tomersk/ambhas_python | ambhas/__init__.py | Python | gpl-2.0 | 59 |
from .models import codepipeline_backends
from ..core.models import base_decorator
mock_codepipeline = base_decorator(codepipeline_backends)
| spulec/moto | moto/codepipeline/__init__.py | Python | apache-2.0 | 142 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
###############################################################################
#
# ODOO (ex OpenERP)
# Open Source Management Solution
# Copyright (C) 2001-2015 Micronaet S.r.l. (<https://micronaet.com>)
# Developer: Nicola Riolini @thebrush (<https://it.linkedin.com/in/thebrush>)
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
import os
import sys
import logging
import openerp
import openerp.netsvc as netsvc
import openerp.addons.decimal_precision as dp
from openerp.osv import fields, osv, expression, orm
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
from openerp import SUPERUSER_ID
from openerp import tools
from openerp.tools.translate import _
from openerp.tools.float_utils import float_round as round
from openerp.tools import (DEFAULT_SERVER_DATE_FORMAT,
DEFAULT_SERVER_DATETIME_FORMAT,
DATETIME_FORMATS_MAP,
float_compare)
_logger = logging.getLogger(__name__)
class edi_company_report_this(orm.Model):
''' Manage more than one importation depend on company
'''
_inherit = 'edi.company'
# -------------------------------------------------------------------------
# OVERRIDE: Collect data for report
# -------------------------------------------------------------------------
def collect_future_order_data_report(self, cr, uid, context=None):
""" Overridable procedure for manage the report data collected in all
company with active EDI company
Report:
'header'
'data'
'empty_record'
"""
this_id = 5
report = super(
edi_company_report_this, self).collect_future_order_data_report(
cr, uid, context=context)
return self.update_report_with_company_data(
cr, uid, this_id, report, context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| Micronaet/micronaet-trip | account_trip_edi_c5/status_report.py | Python | agpl-3.0 | 2,670 |
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 32 , FREQ = 'D', seed = 0, trendtype = "MovingAverage", cycle_length = 30, transform = "RelativeDifference", sigma = 0.0, exog_count = 20, ar_order = 0); | antoinecarme/pyaf | tests/artificial/transf_RelativeDifference/trend_MovingAverage/cycle_30/ar_/test_artificial_32_RelativeDifference_MovingAverage_30__20.py | Python | bsd-3-clause | 277 |
from PythonQt.QtGui import (QDialog, QVBoxLayout, QHBoxLayout, QLabel,
QLineEdit, QDialogButtonBox)
class MultiInputDialog(QDialog):
def __init__(self, title, label1, label2, parent=None):
super().__init__(parent)
self.setWindowTitle(title)
self.lay = QVBoxLayout(self)
self.lay1 = QHBoxLayout()
self.label1 = QLabel(label1, self)
self.lay1.addWidget(self.label1)
self.input1 = QLineEdit(self)
self.lay1.addWidget(self.input1)
self.lay.addLayout(self.lay1)
self.lay2 = QHBoxLayout()
self.label2 = QLabel(label2, self)
self.lay2.addWidget(self.label2)
self.input2 = QLineEdit(self)
self.lay2.addWidget(self.input2)
self.lay.addLayout(self.lay2)
bbox = QDialogButtonBox(QDialogButtonBox.Ok | QDialogButtonBox.Cancel,
self)
bbox.connect("accepted()", self.accept)
bbox.connect("rejected()", self.reject)
self.lay.addWidget(bbox)
def cleanup(self):
self.lay1.delete()
self.lay2.delete()
@staticmethod
def getTexts(title, label1, label2, text1="", text2="", parent=None):
dlg = MultiInputDialog(title, label1, label2, parent)
dlg.label1.setText(label1)
dlg.input1.setText(text1)
dlg.label2.setText(label2)
dlg.input2.setText(text2)
if dlg.exec_() == QDialog.Accepted:
ret1 = dlg.input1.text
ret2 = dlg.input2.text
dlg.cleanup()
dlg.delete()
return (True, ret1, ret2)
else:
return (False, "", "")
| pathmann/pyTSon | ressources/python/pytsonui/dialogs.py | Python | gpl-3.0 | 1,674 |
# Copyright 2013,2014 Music Technology Group - Universitat Pompeu Fabra
#
# This file is part of Dunya
#
# Dunya is free software: you can redistribute it and/or modify it under the
# terms of the GNU Affero General Public License as published by the Free Software
# Foundation (FSF), either version 3 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see http://www.gnu.org/licenses/
import re
from django import forms
from django.contrib.auth.models import User
from django.core.exceptions import ValidationError
class RegistrationForm(forms.Form):
required_css_class = 'required'
username = forms.CharField(label=u'Username', max_length=30)
first_name = forms.CharField(label=u'First name', max_length=30)
last_name = forms.CharField(label=u'Last name', max_length=30)
affiliation = forms.CharField(label="Affiliation", max_length=100)
email = forms.EmailField(label=u'Email')
password1 = forms.CharField(label=u'Password', widget=forms.PasswordInput())
password2 = forms.CharField(label=u'Password (Again)', widget=forms.PasswordInput())
def clean_affiliation(self):
affiliation = self.cleaned_data['affiliation']
if affiliation == "":
raise ValidationError("Affiliation must be entered")
return affiliation
def clean_password2(self):
if 'password1' in self.cleaned_data:
password1 = self.cleaned_data['password1']
password2 = self.cleaned_data['password2']
if password1 == password2:
return password2
raise forms.ValidationError('Passwords do not match.')
def clean_username(self):
username = self.cleaned_data['username']
if not re.search(r'^[\w.]+$', username):
raise forms.ValidationError('Username can only contain alphanumeric characters and the underscore.')
try:
User.objects.get(username=username)
except User.DoesNotExist:
return username
raise forms.ValidationError('Username is already taken.')
class DeleteAccountForm(forms.Form):
delete = forms.BooleanField(required=False, label="Yes, delete my account")
class UserEditForm(forms.Form):
email = forms.CharField(label=u'Email address', max_length=200)
first_name = forms.CharField(label=u'First Name', max_length=50)
last_name = forms.CharField(label=u'Last Name', max_length=100)
affiliation = forms.CharField(label=u'Affiliation', max_length=100)
| MTG/dunya | account/forms.py | Python | agpl-3.0 | 2,827 |
__license__ = 'GPL v3'
__copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>'
""" The GUI """
import os, sys, Queue, threading, glob
from contextlib import contextmanager
from threading import RLock, Lock, Thread
from urllib import unquote
from PyQt5.QtWidgets import QStyle # Gives a nicer error message than import from Qt
from PyQt5.Qt import (
QFileInfo, QObject, QBuffer, Qt, QByteArray, QTranslator,
QCoreApplication, QThread, QEvent, QTimer, pyqtSignal, QDateTime,
QDesktopServices, QFileDialog, QFileIconProvider, QSettings, QIcon,
QApplication, QDialog, QUrl, QFont, QFontDatabase, QLocale, QFontInfo)
ORG_NAME = 'KovidsBrain'
APP_UID = 'libprs500'
from calibre import prints
from calibre.constants import (islinux, iswindows, isbsd, isfrozen, isosx,
plugins, config_dir, filesystem_encoding, isxp)
from calibre.utils.config import Config, ConfigProxy, dynamic, JSONConfig
from calibre.ebooks.metadata import MetaInformation
from calibre.utils.date import UNDEFINED_DATE
from calibre.utils.localization import get_lang
from calibre.utils.filenames import expanduser
# Setup gprefs {{{
gprefs = JSONConfig('gui')
defs = gprefs.defaults
native_menubar_defaults = {
'action-layout-menubar': (
'Add Books', 'Edit Metadata', 'Convert Books',
'Choose Library', 'Save To Disk', 'Preferences',
'Help',
),
'action-layout-menubar-device': (
'Add Books', 'Edit Metadata', 'Convert Books',
'Location Manager', 'Send To Device',
'Save To Disk', 'Preferences', 'Help',
)
}
if isosx:
defs['action-layout-menubar'] = native_menubar_defaults['action-layout-menubar']
defs['action-layout-menubar-device'] = native_menubar_defaults['action-layout-menubar-device']
defs['action-layout-toolbar'] = (
'Add Books', 'Edit Metadata', None, 'Convert Books', 'View', None,
'Choose Library', 'Donate', None, 'Fetch News', 'Store', 'Save To Disk',
'Connect Share', None, 'Remove Books', 'Tweak ePub'
)
defs['action-layout-toolbar-device'] = (
'Add Books', 'Edit Metadata', None, 'Convert Books', 'View',
'Send To Device', None, None, 'Location Manager', None, None,
'Fetch News', 'Store', 'Save To Disk', 'Connect Share', None,
'Remove Books',
)
else:
defs['action-layout-menubar'] = ()
defs['action-layout-menubar-device'] = ()
defs['action-layout-toolbar'] = (
'Add Books', 'Edit Metadata', None, 'Convert Books', 'View', None,
'Store', 'Donate', 'Fetch News', 'Help', None,
'Remove Books', 'Choose Library', 'Save To Disk',
'Connect Share', 'Tweak ePub', 'Preferences',
)
defs['action-layout-toolbar-device'] = (
'Add Books', 'Edit Metadata', None, 'Convert Books', 'View',
'Send To Device', None, None, 'Location Manager', None, None,
'Fetch News', 'Save To Disk', 'Store', 'Connect Share', None,
'Remove Books', None, 'Help', 'Preferences',
)
defs['action-layout-toolbar-child'] = ()
defs['action-layout-context-menu'] = (
'Edit Metadata', 'Send To Device', 'Save To Disk',
'Connect Share', 'Copy To Library', None,
'Convert Books', 'View', 'Open Folder', 'Show Book Details',
'Similar Books', 'Tweak ePub', None, 'Remove Books',
)
defs['action-layout-context-menu-device'] = (
'View', 'Save To Disk', None, 'Remove Books', None,
'Add To Library', 'Edit Collections', 'Match Books'
)
defs['action-layout-context-menu-cover-browser'] = (
'Edit Metadata', 'Send To Device', 'Save To Disk',
'Connect Share', 'Copy To Library', None,
'Convert Books', 'View', 'Open Folder', 'Show Book Details',
'Similar Books', 'Tweak ePub', None, 'Remove Books',
)
defs['show_splash_screen'] = True
defs['toolbar_icon_size'] = 'medium'
defs['automerge'] = 'ignore'
defs['toolbar_text'] = 'always'
defs['font'] = None
defs['tags_browser_partition_method'] = 'first letter'
defs['tags_browser_collapse_at'] = 100
defs['tag_browser_dont_collapse'] = []
defs['edit_metadata_single_layout'] = 'default'
defs['default_author_link'] = 'https://en.wikipedia.org/w/index.php?search={author}'
defs['preserve_date_on_ctl'] = True
defs['manual_add_auto_convert'] = False
defs['auto_convert_same_fmt'] = False
defs['cb_fullscreen'] = False
defs['worker_max_time'] = 0
defs['show_files_after_save'] = True
defs['auto_add_path'] = None
defs['auto_add_check_for_duplicates'] = False
defs['blocked_auto_formats'] = []
defs['auto_add_auto_convert'] = True
defs['auto_add_everything'] = False
defs['ui_style'] = 'calibre' if iswindows or isosx else 'system'
defs['tag_browser_old_look'] = False
defs['book_list_tooltips'] = True
defs['bd_show_cover'] = True
defs['bd_overlay_cover_size'] = False
defs['tags_browser_category_icons'] = {}
defs['cover_browser_reflections'] = True
defs['book_list_extra_row_spacing'] = 0
defs['refresh_book_list_on_bulk_edit'] = True
defs['cover_grid_width'] = 0
defs['cover_grid_height'] = 0
defs['cover_grid_spacing'] = 0
defs['cover_grid_color'] = (80, 80, 80)
defs['cover_grid_cache_size_multiple'] = 5
defs['cover_grid_disk_cache_size'] = 2500
defs['cover_grid_show_title'] = False
defs['cover_grid_texture'] = None
defs['show_vl_tabs'] = False
defs['show_highlight_toggle_button'] = False
defs['add_comments_to_email'] = False
defs['cb_preserve_aspect_ratio'] = False
defs['show_rating_in_cover_browser'] = True
defs['gpm_template_editor_font_size'] = 10
defs['show_emblems'] = False
defs['emblem_size'] = 32
defs['emblem_position'] = 'left'
del defs
# }}}
UNDEFINED_QDATETIME = QDateTime(UNDEFINED_DATE)
ALL_COLUMNS = ['title', 'ondevice', 'authors', 'size', 'timestamp', 'rating', 'publisher',
'tags', 'series', 'pubdate']
def _config(): # {{{
c = Config('gui', 'preferences for the calibre GUI')
c.add_opt('send_to_storage_card_by_default', default=False,
help=_('Send file to storage card instead of main memory by default'))
c.add_opt('confirm_delete', default=False,
help=_('Confirm before deleting'))
c.add_opt('main_window_geometry', default=None,
help=_('Main window geometry'))
c.add_opt('new_version_notification', default=True,
help=_('Notify when a new version is available'))
c.add_opt('use_roman_numerals_for_series_number', default=True,
help=_('Use Roman numerals for series number'))
c.add_opt('sort_tags_by', default='name',
help=_('Sort tags list by name, popularity, or rating'))
c.add_opt('match_tags_type', default='any',
help=_('Match tags by any or all.'))
c.add_opt('cover_flow_queue_length', default=6,
help=_('Number of covers to show in the cover browsing mode'))
c.add_opt('LRF_conversion_defaults', default=[],
help=_('Defaults for conversion to LRF'))
c.add_opt('LRF_ebook_viewer_options', default=None,
help=_('Options for the LRF ebook viewer'))
c.add_opt('internally_viewed_formats', default=['LRF', 'EPUB', 'LIT',
'MOBI', 'PRC', 'POBI', 'AZW', 'AZW3', 'HTML', 'FB2', 'PDB', 'RB',
'SNB', 'HTMLZ', 'KEPUB'], help=_(
'Formats that are viewed using the internal viewer'))
c.add_opt('column_map', default=ALL_COLUMNS,
help=_('Columns to be displayed in the book list'))
c.add_opt('autolaunch_server', default=False, help=_('Automatically launch content server on application startup'))
c.add_opt('oldest_news', default=60, help=_('Oldest news kept in database'))
c.add_opt('systray_icon', default=False, help=_('Show system tray icon'))
c.add_opt('upload_news_to_device', default=True,
help=_('Upload downloaded news to device'))
c.add_opt('delete_news_from_library_on_upload', default=False,
help=_('Delete news books from library after uploading to device'))
c.add_opt('separate_cover_flow', default=False,
help=_('Show the cover flow in a separate window instead of in the main calibre window'))
c.add_opt('disable_tray_notification', default=False,
help=_('Disable notifications from the system tray icon'))
c.add_opt('default_send_to_device_action', default=None,
help=_('Default action to perform when send to device button is '
'clicked'))
c.add_opt('asked_library_thing_password', default=False,
help='Asked library thing password at least once.')
c.add_opt('search_as_you_type', default=False,
help=_('Start searching as you type. If this is disabled then search will '
'only take place when the Enter or Return key is pressed.'))
c.add_opt('highlight_search_matches', default=False,
help=_('When searching, show all books with search results '
'highlighted instead of showing only the matches. You can use the '
'N or F3 keys to go to the next match.'))
c.add_opt('save_to_disk_template_history', default=[],
help='Previously used Save to Disk templates')
c.add_opt('send_to_device_template_history', default=[],
help='Previously used Send to Device templates')
c.add_opt('main_search_history', default=[],
help='Search history for the main GUI')
c.add_opt('viewer_search_history', default=[],
help='Search history for the ebook viewer')
c.add_opt('viewer_toc_search_history', default=[],
help='Search history for the ToC in the ebook viewer')
c.add_opt('lrf_viewer_search_history', default=[],
help='Search history for the LRF viewer')
c.add_opt('scheduler_search_history', default=[],
help='Search history for the recipe scheduler')
c.add_opt('plugin_search_history', default=[],
help='Search history for the plugin preferences')
c.add_opt('shortcuts_search_history', default=[],
help='Search history for the keyboard preferences')
c.add_opt('jobs_search_history', default=[],
help='Search history for the tweaks preferences')
c.add_opt('tweaks_search_history', default=[],
help='Search history for tweaks')
c.add_opt('worker_limit', default=6,
help=_(
'Maximum number of simultaneous conversion/news download jobs. '
'This number is twice the actual value for historical reasons.'))
c.add_opt('get_social_metadata', default=True,
help=_('Download social metadata (tags/rating/etc.)'))
c.add_opt('overwrite_author_title_metadata', default=True,
help=_('Overwrite author and title with new metadata'))
c.add_opt('auto_download_cover', default=False,
help=_('Automatically download the cover, if available'))
c.add_opt('enforce_cpu_limit', default=True,
help=_('Limit max simultaneous jobs to number of CPUs'))
c.add_opt('gui_layout', choices=['wide', 'narrow'],
help=_('The layout of the user interface. Wide has the '
'book details panel on the right and narrow has '
'it at the bottom.'), default='wide')
c.add_opt('show_avg_rating', default=True,
help=_('Show the average rating per item indication in the tag browser'))
c.add_opt('disable_animations', default=False,
help=_('Disable UI animations'))
# This option is no longer used. It remains for compatibility with upgrades
# so the value can be migrated
c.add_opt('tag_browser_hidden_categories', default=set(),
help=_('tag browser categories not to display'))
c.add_opt
return ConfigProxy(c)
config = _config()
# }}}
QSettings.setPath(QSettings.IniFormat, QSettings.UserScope, config_dir)
QSettings.setPath(QSettings.IniFormat, QSettings.SystemScope, config_dir)
QSettings.setDefaultFormat(QSettings.IniFormat)
def available_heights():
desktop = QCoreApplication.instance().desktop()
return map(lambda x: x.height(), map(desktop.availableGeometry, range(desktop.screenCount())))
def available_height():
desktop = QCoreApplication.instance().desktop()
return desktop.availableGeometry().height()
def max_available_height():
return max(available_heights())
def min_available_height():
return min(available_heights())
def available_width():
desktop = QCoreApplication.instance().desktop()
return desktop.availableGeometry().width()
def get_windows_color_depth():
import win32gui, win32con, win32print
hwin = win32gui.GetDesktopWindow()
hwindc = win32gui.GetWindowDC(hwin)
ans = win32print.GetDeviceCaps(hwindc, win32con.BITSPIXEL)
win32gui.ReleaseDC(hwin, hwindc)
return ans
def get_screen_dpi():
d = QApplication.desktop()
return (d.logicalDpiX(), d.logicalDpiY())
_is_widescreen = None
def is_widescreen():
global _is_widescreen
if _is_widescreen is None:
try:
_is_widescreen = float(available_width())/available_height() > 1.4
except:
_is_widescreen = False
return _is_widescreen
def extension(path):
return os.path.splitext(path)[1][1:].lower()
def warning_dialog(parent, title, msg, det_msg='', show=False,
show_copy_button=True):
from calibre.gui2.dialogs.message_box import MessageBox
d = MessageBox(MessageBox.WARNING, _('WARNING:')+ ' ' +
title, msg, det_msg, parent=parent,
show_copy_button=show_copy_button)
if show:
return d.exec_()
return d
def error_dialog(parent, title, msg, det_msg='', show=False,
show_copy_button=True):
from calibre.gui2.dialogs.message_box import MessageBox
d = MessageBox(MessageBox.ERROR, _('ERROR:')+ ' ' +
title, msg, det_msg, parent=parent,
show_copy_button=show_copy_button)
if show:
return d.exec_()
return d
def question_dialog(parent, title, msg, det_msg='', show_copy_button=False,
default_yes=True,
# Skippable dialogs
# Set skip_dialog_name to a unique name for this dialog
# Set skip_dialog_msg to a message displayed to the user
skip_dialog_name=None, skip_dialog_msg=_('Show this confirmation again'),
skip_dialog_skipped_value=True, skip_dialog_skip_precheck=True,
# Override icon (QIcon to be used as the icon for this dialog or string for I())
override_icon=None,
# Change the text/icons of the yes and no buttons.
# The icons must be QIcon objects or strings for I()
yes_text=None, no_text=None, yes_icon=None, no_icon=None,
):
from calibre.gui2.dialogs.message_box import MessageBox
auto_skip = set(gprefs.get('questions_to_auto_skip', []))
if (skip_dialog_name is not None and skip_dialog_name in auto_skip):
return bool(skip_dialog_skipped_value)
d = MessageBox(MessageBox.QUESTION, title, msg, det_msg, parent=parent,
show_copy_button=show_copy_button, default_yes=default_yes,
q_icon=override_icon, yes_text=yes_text, no_text=no_text,
yes_icon=yes_icon, no_icon=no_icon)
if skip_dialog_name is not None and skip_dialog_msg:
tc = d.toggle_checkbox
tc.setVisible(True)
tc.setText(skip_dialog_msg)
tc.setChecked(bool(skip_dialog_skip_precheck))
d.resize_needed.emit()
ret = d.exec_() == d.Accepted
if skip_dialog_name is not None and not d.toggle_checkbox.isChecked():
auto_skip.add(skip_dialog_name)
gprefs.set('questions_to_auto_skip', list(auto_skip))
return ret
def info_dialog(parent, title, msg, det_msg='', show=False,
show_copy_button=True):
from calibre.gui2.dialogs.message_box import MessageBox
d = MessageBox(MessageBox.INFO, title, msg, det_msg, parent=parent,
show_copy_button=show_copy_button)
if show:
return d.exec_()
return d
def show_restart_warning(msg, parent=None):
d = warning_dialog(parent, _('Restart needed'), msg,
show_copy_button=False)
b = d.bb.addButton(_('Restart calibre now'), d.bb.AcceptRole)
b.setIcon(QIcon(I('lt.png')))
d.do_restart = False
def rf():
d.do_restart = True
b.clicked.connect(rf)
d.set_details('')
d.exec_()
b.clicked.disconnect()
return d.do_restart
class Dispatcher(QObject):
'''
Convenience class to use Qt signals with arbitrary python callables.
By default, ensures that a function call always happens in the
thread this Dispatcher was created in.
Note that if you create the Dispatcher in a thread without an event loop of
its own, the function call will happen in the GUI thread (I think).
'''
dispatch_signal = pyqtSignal(object, object)
def __init__(self, func, queued=True, parent=None):
QObject.__init__(self, parent)
self.func = func
typ = Qt.QueuedConnection
if not queued:
typ = Qt.AutoConnection if queued is None else Qt.DirectConnection
self.dispatch_signal.connect(self.dispatch, type=typ)
def __call__(self, *args, **kwargs):
self.dispatch_signal.emit(args, kwargs)
def dispatch(self, args, kwargs):
self.func(*args, **kwargs)
class FunctionDispatcher(QObject):
'''
Convenience class to use Qt signals with arbitrary python functions.
By default, ensures that a function call always happens in the
thread this FunctionDispatcher was created in.
Note that you must create FunctionDispatcher objects in the GUI thread.
'''
dispatch_signal = pyqtSignal(object, object, object)
def __init__(self, func, queued=True, parent=None):
global gui_thread
if gui_thread is None:
gui_thread = QThread.currentThread()
if not is_gui_thread():
raise ValueError(
'You can only create a FunctionDispatcher in the GUI thread')
QObject.__init__(self, parent)
self.func = func
typ = Qt.QueuedConnection
if not queued:
typ = Qt.AutoConnection if queued is None else Qt.DirectConnection
self.dispatch_signal.connect(self.dispatch, type=typ)
self.q = Queue.Queue()
self.lock = threading.Lock()
def __call__(self, *args, **kwargs):
if is_gui_thread():
return self.func(*args, **kwargs)
with self.lock:
self.dispatch_signal.emit(self.q, args, kwargs)
res = self.q.get()
return res
def dispatch(self, q, args, kwargs):
try:
res = self.func(*args, **kwargs)
except:
res = None
q.put(res)
class GetMetadata(QObject):
'''
Convenience class to ensure that metadata readers are used only in the
GUI thread. Must be instantiated in the GUI thread.
'''
edispatch = pyqtSignal(object, object, object)
idispatch = pyqtSignal(object, object, object)
metadataf = pyqtSignal(object, object)
metadata = pyqtSignal(object, object)
def __init__(self):
QObject.__init__(self)
self.edispatch.connect(self._get_metadata, type=Qt.QueuedConnection)
self.idispatch.connect(self._from_formats, type=Qt.QueuedConnection)
def __call__(self, id, *args, **kwargs):
self.edispatch.emit(id, args, kwargs)
def from_formats(self, id, *args, **kwargs):
self.idispatch.emit(id, args, kwargs)
def _from_formats(self, id, args, kwargs):
from calibre.ebooks.metadata.meta import metadata_from_formats
try:
mi = metadata_from_formats(*args, **kwargs)
except:
mi = MetaInformation('', [_('Unknown')])
self.metadataf.emit(id, mi)
def _get_metadata(self, id, args, kwargs):
from calibre.ebooks.metadata.meta import get_metadata
try:
mi = get_metadata(*args, **kwargs)
except:
mi = MetaInformation('', [_('Unknown')])
self.metadata.emit(id, mi)
class FileIconProvider(QFileIconProvider):
ICONS = {
'default' : 'unknown',
'dir' : 'dir',
'zero' : 'zero',
'jpeg' : 'jpeg',
'jpg' : 'jpeg',
'gif' : 'gif',
'png' : 'png',
'bmp' : 'bmp',
'cbz' : 'cbz',
'cbr' : 'cbr',
'svg' : 'svg',
'html' : 'html',
'htmlz' : 'html',
'htm' : 'html',
'xhtml' : 'html',
'xhtm' : 'html',
'lit' : 'lit',
'lrf' : 'lrf',
'lrx' : 'lrx',
'pdf' : 'pdf',
'pdr' : 'zero',
'rar' : 'rar',
'zip' : 'zip',
'txt' : 'txt',
'text' : 'txt',
'prc' : 'mobi',
'azw' : 'mobi',
'mobi' : 'mobi',
'pobi' : 'mobi',
'mbp' : 'zero',
'azw1' : 'tpz',
'azw2' : 'azw2',
'azw3' : 'azw3',
'azw4' : 'pdf',
'tpz' : 'tpz',
'tan' : 'zero',
'epub' : 'epub',
'fb2' : 'fb2',
'rtf' : 'rtf',
'odt' : 'odt',
'snb' : 'snb',
'djv' : 'djvu',
'djvu' : 'djvu',
'xps' : 'xps',
'oxps' : 'xps',
'docx' : 'docx',
'opml' : 'opml',
}
def __init__(self):
QFileIconProvider.__init__(self)
upath, bpath = I('mimetypes'), I('mimetypes', allow_user_override=False)
if upath != bpath:
# User has chosen to override mimetype icons
path_map = {v:I('mimetypes/%s.png' % v) for v in set(self.ICONS.itervalues())}
icons = self.ICONS.copy()
for uicon in glob.glob(os.path.join(upath, '*.png')):
ukey = os.path.basename(uicon).rpartition('.')[0].lower()
if ukey not in path_map:
path_map[ukey] = uicon
icons[ukey] = ukey
else:
path_map = {v:os.path.join(bpath, v + '.png') for v in set(self.ICONS.itervalues())}
icons = self.ICONS
self.icons = {k:path_map[v] for k, v in icons.iteritems()}
self.icons['calibre'] = I('lt.png', allow_user_override=False)
for i in ('dir', 'default', 'zero'):
self.icons[i] = QIcon(self.icons[i])
def key_from_ext(self, ext):
key = ext if ext in self.icons.keys() else 'default'
if key == 'default' and ext.count('.') > 0:
ext = ext.rpartition('.')[2]
key = ext if ext in self.icons.keys() else 'default'
return key
def cached_icon(self, key):
candidate = self.icons[key]
if isinstance(candidate, QIcon):
return candidate
icon = QIcon(candidate)
self.icons[key] = icon
return icon
def icon_from_ext(self, ext):
key = self.key_from_ext(ext.lower() if ext else '')
return self.cached_icon(key)
def load_icon(self, fileinfo):
key = 'default'
icons = self.icons
if fileinfo.isSymLink():
if not fileinfo.exists():
return icons['zero']
fileinfo = QFileInfo(fileinfo.readLink())
if fileinfo.isDir():
key = 'dir'
else:
ext = unicode(fileinfo.completeSuffix()).lower()
key = self.key_from_ext(ext)
return self.cached_icon(key)
def icon(self, arg):
if isinstance(arg, QFileInfo):
return self.load_icon(arg)
if arg == QFileIconProvider.Folder:
return self.icons['dir']
if arg == QFileIconProvider.File:
return self.icons['default']
return QFileIconProvider.icon(self, arg)
_file_icon_provider = None
def initialize_file_icon_provider():
global _file_icon_provider
if _file_icon_provider is None:
_file_icon_provider = FileIconProvider()
def file_icon_provider():
global _file_icon_provider
initialize_file_icon_provider()
return _file_icon_provider
def select_initial_dir(q):
while q:
c = os.path.dirname(q)
if c == q:
break
if os.path.exists(c):
return c
q = c
return expanduser(u'~')
class FileDialog(QObject):
def __init__(self, title=_('Choose Files'),
filters=[],
add_all_files_filter=True,
parent=None,
modal=True,
name='',
mode=QFileDialog.ExistingFiles,
default_dir=u'~',
no_save_dir=False,
combine_file_and_saved_dir=False
):
QObject.__init__(self)
ftext = ''
if filters:
for filter in filters:
text, extensions = filter
extensions = ['*'+(i if i.startswith('.') else '.'+i) for i in
extensions]
ftext += '%s (%s);;'%(text, ' '.join(extensions))
if add_all_files_filter or not ftext:
ftext += 'All files (*)'
if ftext.endswith(';;'):
ftext = ftext[:-2]
self.dialog_name = name if name else 'dialog_' + title
self.selected_files = None
self.fd = None
if combine_file_and_saved_dir:
bn = os.path.basename(default_dir)
prev = dynamic.get(self.dialog_name,
expanduser(u'~'))
if os.path.exists(prev):
if os.path.isfile(prev):
prev = os.path.dirname(prev)
else:
prev = expanduser(u'~')
initial_dir = os.path.join(prev, bn)
elif no_save_dir:
initial_dir = expanduser(default_dir)
else:
initial_dir = dynamic.get(self.dialog_name,
expanduser(default_dir))
if not isinstance(initial_dir, basestring):
initial_dir = expanduser(default_dir)
if not initial_dir or (not os.path.exists(initial_dir) and not (
mode == QFileDialog.AnyFile and (no_save_dir or combine_file_and_saved_dir))):
initial_dir = select_initial_dir(initial_dir)
self.selected_files = []
use_native_dialog = 'CALIBRE_NO_NATIVE_FILEDIALOGS' not in os.environ
with sanitize_env_vars():
opts = QFileDialog.Option()
if not use_native_dialog:
opts |= QFileDialog.DontUseNativeDialog
if mode == QFileDialog.AnyFile:
f = QFileDialog.getSaveFileName(parent, title,
initial_dir, ftext, "", opts)
if f and f[0]:
self.selected_files.append(f[0])
elif mode == QFileDialog.ExistingFile:
f = QFileDialog.getOpenFileName(parent, title,
initial_dir, ftext, "", opts)
if f and f[0] and os.path.exists(f[0]):
self.selected_files.append(f[0])
elif mode == QFileDialog.ExistingFiles:
fs = QFileDialog.getOpenFileNames(parent, title, initial_dir,
ftext, "", opts)
if fs and fs[0]:
for f in fs[0]:
f = unicode(f)
if not f:
continue
if not os.path.exists(f):
# QFileDialog for some reason quotes spaces
# on linux if there is more than one space in a row
f = unquote(f)
if f and os.path.exists(f):
self.selected_files.append(f)
else:
if mode == QFileDialog.Directory:
opts |= QFileDialog.ShowDirsOnly
f = unicode(QFileDialog.getExistingDirectory(parent, title, initial_dir, opts))
if os.path.exists(f):
self.selected_files.append(f)
if self.selected_files:
self.selected_files = [unicode(q) for q in self.selected_files]
saved_loc = self.selected_files[0]
if os.path.isfile(saved_loc):
saved_loc = os.path.dirname(saved_loc)
if not no_save_dir:
dynamic[self.dialog_name] = saved_loc
self.accepted = bool(self.selected_files)
def get_files(self):
if self.selected_files is None:
return tuple(os.path.abspath(unicode(i)) for i in self.fd.selectedFiles())
return tuple(self.selected_files)
def choose_dir(window, name, title, default_dir='~', no_save_dir=False):
fd = FileDialog(title=title, filters=[], add_all_files_filter=False,
parent=window, name=name, mode=QFileDialog.Directory,
default_dir=default_dir, no_save_dir=no_save_dir)
dir = fd.get_files()
fd.setParent(None)
if dir:
return dir[0]
def choose_osx_app(window, name, title, default_dir='/Applications'):
fd = FileDialog(title=title, parent=window, name=name, mode=QFileDialog.ExistingFile,
default_dir=default_dir)
app = fd.get_files()
fd.setParent(None)
if app:
return app
def choose_files(window, name, title,
filters=[], all_files=True, select_only_single_file=False, default_dir=u'~'):
'''
Ask user to choose a bunch of files.
:param name: Unique dialog name used to store the opened directory
:param title: Title to show in dialogs titlebar
:param filters: list of allowable extensions. Each element of the list
must be a 2-tuple with first element a string describing
the type of files to be filtered and second element a list
of extensions.
:param all_files: If True add All files to filters.
:param select_only_single_file: If True only one file can be selected
'''
mode = QFileDialog.ExistingFile if select_only_single_file else QFileDialog.ExistingFiles
fd = FileDialog(title=title, name=name, filters=filters, default_dir=default_dir,
parent=window, add_all_files_filter=all_files, mode=mode,
)
fd.setParent(None)
if fd.accepted:
return fd.get_files()
return None
def choose_save_file(window, name, title, filters=[], all_files=True, initial_path=None, initial_filename=None):
'''
Ask user to choose a file to save to. Can be a non-existent file.
:param filters: list of allowable extensions. Each element of the list
must be a 2-tuple with first element a string describing
the type of files to be filtered and second element a list
of extensions.
:param all_files: If True add All files to filters.
:param initial_path: The initially selected path (does not need to exist). Cannot be used with initial_filename.
:param initial_filename: If specified, the initially selected path is this filename in the previously used directory. Cannot be used with initial_path.
'''
kwargs = dict(title=title, name=name, filters=filters,
parent=window, add_all_files_filter=all_files, mode=QFileDialog.AnyFile)
if initial_path is not None:
kwargs['no_save_dir'] = True
kwargs['default_dir'] = initial_path
elif initial_filename is not None:
kwargs['combine_file_and_saved_dir'] = True
kwargs['default_dir'] = initial_filename
fd = FileDialog(**kwargs)
fd.setParent(None)
ans = None
if fd.accepted:
ans = fd.get_files()
if ans:
ans = ans[0]
return ans
def choose_images(window, name, title, select_only_single_file=True,
formats=('png', 'gif', 'jpg', 'jpeg', 'svg')):
mode = QFileDialog.ExistingFile if select_only_single_file else QFileDialog.ExistingFiles
fd = FileDialog(title=title, name=name,
filters=[('Images', list(formats))],
parent=window, add_all_files_filter=False, mode=mode,
)
fd.setParent(None)
if fd.accepted:
return fd.get_files()
return None
def pixmap_to_data(pixmap, format='JPEG', quality=90):
'''
Return the QPixmap pixmap as a string saved in the specified format.
'''
ba = QByteArray()
buf = QBuffer(ba)
buf.open(QBuffer.WriteOnly)
pixmap.save(buf, format, quality=quality)
return bytes(ba.data())
def decouple(prefix):
' Ensure that config files used by utility code are not the same as those used by the main calibre GUI '
dynamic.decouple(prefix)
from calibre.gui2.widgets import history
history.decouple(prefix)
class ResizableDialog(QDialog):
def __init__(self, *args, **kwargs):
QDialog.__init__(self, *args)
self.setupUi(self)
desktop = QCoreApplication.instance().desktop()
geom = desktop.availableGeometry(self)
nh, nw = geom.height()-25, geom.width()-10
if nh < 0:
nh = max(800, self.height())
if nw < 0:
nw = max(600, self.height())
nh = min(self.height(), nh)
nw = min(self.width(), nw)
self.resize(nw, nh)
class Translator(QTranslator):
'''
Translator to load translations for strings in Qt from the calibre
translations. Does not support advanced features of Qt like disambiguation
and plural forms.
'''
def translate(self, *args, **kwargs):
try:
src = unicode(args[1])
except:
return u''
t = _
return t(src)
gui_thread = None
qt_app = None
builtin_fonts_loaded = False
def load_builtin_fonts():
global _rating_font, builtin_fonts_loaded
# Load the builtin fonts and any fonts added to calibre by the user to
# Qt
if builtin_fonts_loaded:
return
builtin_fonts_loaded = True
for ff in glob.glob(P('fonts/liberation/*.?tf')) + \
[P('fonts/calibreSymbols.otf')] + \
glob.glob(os.path.join(config_dir, 'fonts', '*.?tf')):
if ff.rpartition('.')[-1].lower() in {'ttf', 'otf'}:
with open(ff, 'rb') as s:
# Windows requires font files to be executable for them to be
# loaded successfully, so we use the in memory loader
fid = QFontDatabase.addApplicationFontFromData(s.read())
if fid > -1:
fam = QFontDatabase.applicationFontFamilies(fid)
fam = set(map(unicode, fam))
if u'calibre Symbols' in fam:
_rating_font = u'calibre Symbols'
def setup_gui_option_parser(parser):
if islinux:
parser.add_option('--detach', default=False, action='store_true',
help=_('Detach from the controlling terminal, if any (linux only)'))
class Application(QApplication):
def __init__(self, args, force_calibre_style=False, override_program_name=None, headless=False, color_prefs=gprefs):
self.file_event_hook = None
if override_program_name:
args = [override_program_name] + args[1:]
if headless:
if not args:
args = sys.argv[:1]
args.extend(['-platformpluginpath', sys.extensions_location, '-platform', 'headless'])
self.headless = headless
qargs = [i.encode('utf-8') if isinstance(i, unicode) else i for i in args]
self.pi = plugins['progress_indicator'][0]
QApplication.__init__(self, qargs)
if islinux or isbsd:
self.setAttribute(Qt.AA_DontUseNativeMenuBar, 'CALIBRE_NO_NATIVE_MENUBAR' in os.environ)
self.setup_styles(force_calibre_style)
f = QFont(QApplication.font())
if (f.family(), f.pointSize()) == ('Sans Serif', 9): # Hard coded Qt settings, no user preference detected
f.setPointSize(10)
QApplication.setFont(f)
f = QFontInfo(f)
self.original_font = (f.family(), f.pointSize(), f.weight(), f.italic(), 100)
if not self.using_calibre_style and self.style().objectName() == 'fusion':
# Since Qt is using the fusion style anyway, specialize it
self.load_calibre_style()
fi = gprefs['font']
if fi is not None:
font = QFont(*(fi[:4]))
s = gprefs.get('font_stretch', None)
if s is not None:
font.setStretch(s)
QApplication.setFont(font)
dl = QLocale(get_lang())
if unicode(dl.bcp47Name()) != u'C':
QLocale.setDefault(dl)
global gui_thread, qt_app
gui_thread = QThread.currentThread()
self._translator = None
self.load_translations()
qt_app = self
self._file_open_paths = []
self._file_open_lock = RLock()
if not isosx:
# OS X uses a native color dialog that does not support custom
# colors
self.color_prefs = color_prefs
self.read_custom_colors()
self.lastWindowClosed.connect(self.save_custom_colors)
if isxp:
error_dialog(None, _('Windows XP not supported'), '<p>' + _(
'calibre versions newer than 2.0 do not run on Windows XP. This is'
' because the graphics toolkit calibre uses (Qt 5) crashes a lot'
' on Windows XP. We suggest you stay with <a href="%s">calibre 1.48</a>'
' which works well on Windows XP.') % 'http://download.calibre-ebook.com/1.48.0/', show=True)
raise SystemExit(1)
if iswindows:
# On windows the highlighted colors for inactive widgets are the
# same as non highlighted colors. This is a regression from Qt 4.
# https://bugreports.qt-project.org/browse/QTBUG-41060
p = self.palette()
for role in (p.Highlight, p.HighlightedText, p.Base, p.AlternateBase):
p.setColor(p.Inactive, role, p.color(p.Active, role))
self.setPalette(p)
# Prevent text copied to the clipboard from being lost on quit due to
# Qt 5 bug: https://bugreports.qt-project.org/browse/QTBUG-41125
self.aboutToQuit.connect(self.flush_clipboard)
def flush_clipboard(self):
try:
if self.clipboard().ownsClipboard():
import ctypes
ctypes.WinDLL('ole32.dll').OleFlushClipboard()
except Exception:
import traceback
traceback.print_exc()
def load_builtin_fonts(self, scan_for_fonts=False):
if scan_for_fonts:
from calibre.utils.fonts.scanner import font_scanner
# Start scanning the users computer for fonts
font_scanner
load_builtin_fonts()
def setup_styles(self, force_calibre_style):
depth_ok = True
if iswindows:
# There are some people that still run 16 bit winxp installs. The
# new style does not render well on 16bit machines.
try:
depth_ok = get_windows_color_depth() >= 32
except:
import traceback
traceback.print_exc()
if not depth_ok:
prints('Color depth is less than 32 bits disabling modern look')
self.using_calibre_style = force_calibre_style or 'CALIBRE_IGNORE_SYSTEM_THEME' in os.environ or (
depth_ok and gprefs['ui_style'] != 'system')
if self.using_calibre_style:
self.load_calibre_style()
def load_calibre_style(self):
icon_map = self.__icon_map_memory_ = {}
pcache = {}
for k, v in {
'DialogYesButton': u'ok.png',
'DialogNoButton': u'window-close.png',
'DialogCloseButton': u'window-close.png',
'DialogOkButton': u'ok.png',
'DialogCancelButton': u'window-close.png',
'DialogHelpButton': u'help.png',
'DialogOpenButton': u'document_open.png',
'DialogSaveButton': u'save.png',
'DialogApplyButton': u'ok.png',
'DialogDiscardButton': u'trash.png',
'MessageBoxInformation': u'dialog_information.png',
'MessageBoxWarning': u'dialog_warning.png',
'MessageBoxCritical': u'dialog_error.png',
'MessageBoxQuestion': u'dialog_question.png',
'BrowserReload': u'view-refresh.png',
}.iteritems():
if v not in pcache:
p = I(v)
if isinstance(p, bytes):
p = p.decode(filesystem_encoding)
# if not os.path.exists(p): raise ValueError(p)
pcache[v] = p
v = pcache[v]
icon_map[getattr(QStyle, 'SP_'+k)] = v
self.pi.load_style(icon_map)
def _send_file_open_events(self):
with self._file_open_lock:
if self._file_open_paths:
self.file_event_hook(self._file_open_paths)
self._file_open_paths = []
def load_translations(self):
if self._translator is not None:
self.removeTranslator(self._translator)
self._translator = Translator(self)
self.installTranslator(self._translator)
def event(self, e):
if callable(self.file_event_hook) and e.type() == QEvent.FileOpen:
path = unicode(e.file())
if os.access(path, os.R_OK):
with self._file_open_lock:
self._file_open_paths.append(path)
QTimer.singleShot(1000, self._send_file_open_events)
return True
else:
return QApplication.event(self, e)
@dynamic_property
def current_custom_colors(self):
from PyQt5.Qt import QColorDialog, QColor
def fget(self):
return [col.getRgb() for col in
(QColorDialog.customColor(i) for i in xrange(QColorDialog.customCount()))]
def fset(self, colors):
num = min(len(colors), QColorDialog.customCount())
for i in xrange(num):
QColorDialog.setCustomColor(i, QColor(*colors[i]))
return property(fget=fget, fset=fset)
def read_custom_colors(self):
colors = self.color_prefs.get('custom_colors_for_color_dialog', None)
if colors is not None:
self.current_custom_colors = colors
def save_custom_colors(self):
# Qt 5 regression, it no longer saves custom colors
colors = self.current_custom_colors
if colors != self.color_prefs.get('custom_colors_for_color_dialog', None):
self.color_prefs.set('custom_colors_for_color_dialog', colors)
def __enter__(self):
self.setQuitOnLastWindowClosed(False)
def __exit__(self, *args):
self.setQuitOnLastWindowClosed(True)
_store_app = None
@contextmanager
def sanitize_env_vars():
'''Unset various environment variables that calibre uses. This
is needed to prevent library conflicts when launching external utilities.'''
if islinux and isfrozen:
env_vars = {'LD_LIBRARY_PATH':'/lib', 'QT_PLUGIN_PATH':'/lib/qt_plugins', 'MAGICK_CODER_MODULE_PATH':None, 'MAGICK_CODER_FILTER_PATH':None}
elif iswindows:
env_vars = {k:None for k in 'MAGICK_HOME MAGICK_CONFIGURE_PATH MAGICK_CODER_MODULE_PATH MAGICK_FILTER_MODULE_PATH QT_PLUGIN_PATH'.split()}
elif isosx:
env_vars = {k:None for k in
'FONTCONFIG_FILE FONTCONFIG_PATH MAGICK_CONFIGURE_PATH MAGICK_CODER_MODULE_PATH MAGICK_FILTER_MODULE_PATH QT_PLUGIN_PATH'.split()}
else:
env_vars = {}
originals = {x:os.environ.get(x, '') for x in env_vars}
changed = {x:False for x in env_vars}
for var, suffix in env_vars.iteritems():
paths = [x for x in originals[var].split(os.pathsep) if x]
npaths = [] if suffix is None else [x for x in paths if x != (sys.frozen_path + suffix)]
if len(npaths) < len(paths):
if npaths:
os.environ[var] = os.pathsep.join(npaths)
else:
del os.environ[var]
changed[var] = True
try:
yield
finally:
for var, orig in originals.iteritems():
if changed[var]:
if orig:
os.environ[var] = orig
elif var in os.environ:
del os.environ[var]
SanitizeLibraryPath = sanitize_env_vars # For old plugins
def open_url(qurl):
# Qt 5 requires QApplication to be constructed before trying to use
# QDesktopServices::openUrl()
ensure_app()
if isinstance(qurl, basestring):
qurl = QUrl(qurl)
with sanitize_env_vars():
QDesktopServices.openUrl(qurl)
def get_current_db():
'''
This method will try to return the current database in use by the user as
efficiently as possible, i.e. without constructing duplicate
LibraryDatabase objects.
'''
from calibre.gui2.ui import get_gui
gui = get_gui()
if gui is not None and gui.current_db is not None:
return gui.current_db
from calibre.library import db
return db()
def open_local_file(path):
if iswindows:
with sanitize_env_vars():
os.startfile(os.path.normpath(path))
elif islinux or isbsd:
import subprocess
if isinstance(path, unicode):
path = path.encode(filesystem_encoding)
with sanitize_env_vars():
p = subprocess.Popen(['xdg-open', path])
t = Thread(name='WaitXDGOpen', target=p.wait)
t.daemon = True
t.start()
else:
url = QUrl.fromLocalFile(path)
open_url(url)
_ea_lock = Lock()
def ensure_app(headless=True):
global _store_app
with _ea_lock:
if _store_app is None and QApplication.instance() is None:
args = sys.argv[:1]
if headless and (islinux or isbsd):
args += ['-platformpluginpath', sys.extensions_location, '-platform', 'headless']
_store_app = QApplication(args)
if headless and (islinux or isbsd):
_store_app.headless = True
import traceback
# This is needed because as of PyQt 5.4 if sys.execpthook ==
# sys.__excepthook__ PyQt will abort the application on an
# unhandled python exception in a slot or virtual method. Since ensure_app()
# is used in worker processes for background work like rendering html
# or running a headless browser, we circumvent this as I really
# dont feel like going through all the code and making sure no
# unhandled exceptions ever occur. All the actual GUI apps already
# override sys.except_hook with a proper error handler.
def eh(t, v, tb):
try:
traceback.print_exception(t, v, tb, file=sys.stderr)
except:
pass
sys.excepthook = eh
def app_is_headless():
return getattr(_store_app, 'headless', False)
def must_use_qt(headless=True):
''' This function should be called if you want to use Qt for some non-GUI
task like rendering HTML/SVG or using a headless browser. It will raise a
RuntimeError if using Qt is not possible, which will happen if the current
thread is not the main GUI thread. On linux, it uses a special QPA headless
plugin, so that the X server does not need to be running. '''
global gui_thread
ensure_app(headless=headless)
if gui_thread is None:
gui_thread = QThread.currentThread()
if gui_thread is not QThread.currentThread():
raise RuntimeError('Cannot use Qt in non GUI thread')
def is_ok_to_use_qt():
try:
must_use_qt()
except RuntimeError:
return False
return True
def is_gui_thread():
global gui_thread
return gui_thread is QThread.currentThread()
_rating_font = 'Arial Unicode MS' if iswindows else 'sans-serif'
def rating_font():
global _rating_font
return _rating_font
def elided_text(text, font=None, width=300, pos='middle'):
''' Return a version of text that is no wider than width pixels when
rendered, replacing characters from the left, middle or right (as per pos)
of the string with an ellipsis. Results in a string much closer to the
limit than Qt's elidedText().'''
from PyQt5.Qt import QFontMetrics, QApplication
fm = QApplication.fontMetrics() if font is None else (font if isinstance(font, QFontMetrics) else QFontMetrics(font))
delta = 4
ellipsis = u'\u2026'
def remove_middle(x):
mid = len(x) // 2
return x[:max(0, mid - (delta//2))] + ellipsis + x[mid + (delta//2):]
chomp = {'middle':remove_middle, 'left':lambda x:(ellipsis + x[delta:]), 'right':lambda x:(x[:-delta] + ellipsis)}[pos]
while len(text) > delta and fm.width(text) > width:
text = chomp(text)
return unicode(text)
def find_forms(srcdir):
base = os.path.join(srcdir, 'calibre', 'gui2')
forms = []
for root, _, files in os.walk(base):
for name in files:
if name.endswith('.ui'):
forms.append(os.path.abspath(os.path.join(root, name)))
return forms
def form_to_compiled_form(form):
return form.rpartition('.')[0]+'_ui.py'
def build_forms(srcdir, info=None, summary=False, check_for_migration=False):
import re, cStringIO
from PyQt5.uic import compileUi
forms = find_forms(srcdir)
if info is None:
from calibre import prints
info = prints
pat = re.compile(r'''(['"]):/images/([^'"]+)\1''')
def sub(match):
ans = 'I(%s%s%s)'%(match.group(1), match.group(2), match.group(1))
return ans
num = 0
transdef_pat = re.compile(r'^\s+_translate\s+=\s+QtCore.QCoreApplication.translate$', flags=re.M)
transpat = re.compile(r'_translate\s*\(.+?,\s+"(.+?)(?<!\\)"\)', re.DOTALL)
# Ensure that people running from source have all their forms rebuilt for
# the qt5 migration
force_compile = check_for_migration and not gprefs.get('migrated_forms_to_qt5', False)
for form in forms:
compiled_form = form_to_compiled_form(form)
if force_compile or not os.path.exists(compiled_form) or os.stat(form).st_mtime > os.stat(compiled_form).st_mtime:
if not summary:
info('\tCompiling form', form)
buf = cStringIO.StringIO()
compileUi(form, buf)
dat = buf.getvalue()
dat = dat.replace('import images_rc', '')
dat = transdef_pat.sub('', dat)
dat = transpat.sub(r'_("\1")', dat)
dat = dat.replace('_("MMM yyyy")', '"MMM yyyy"')
dat = dat.replace('_("d MMM yyyy")', '"d MMM yyyy"')
dat = pat.sub(sub, dat)
open(compiled_form, 'wb').write(dat)
num += 1
if num:
info('Compiled %d forms' % num)
if force_compile:
gprefs.set('migrated_forms_to_qt5', True)
_df = os.environ.get('CALIBRE_DEVELOP_FROM', None)
if _df and os.path.exists(_df):
build_forms(_df, check_for_migration=True)
def event_type_name(ev_or_etype):
from PyQt5.QtCore import QEvent
etype = ev_or_etype.type() if isinstance(ev_or_etype, QEvent) else ev_or_etype
for name, num in vars(QEvent).iteritems():
if num == etype:
return name
return 'UnknownEventType'
| hojel/calibre | src/calibre/gui2/__init__.py | Python | gpl-3.0 | 51,805 |
from bing_search_api import BingSearchAPI
my_key = "MEL5FOrb1H5G1E78YY8N5mkfcvUK2hNBYsZl1aAEEbE"
def query(query_string):
bing = BingSearchAPI(my_key)
params = {'ImageFilters':'"Face:Face"',
'$format': 'json',
'$top': 10,
'$skip': 0}
results = bing.search('web',query_string,params).json() # requests 1.0+
return [result['Url'] for result in results['d']['results'][0]['Web']]
if __name__ == "__main__":
query_string = "Your Query"
print query(query_string)
| mzweilin/HashTag-Understanding | test/test_bing_search.py | Python | apache-2.0 | 529 |
# Copyright (c) 2013 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from contextlib import nested
import httplib
import mock
from neutron.openstack.common import importutils
from neutron.tests import base
from neutron.tests.unit.ryu import fake_ryu
class RyuAgentTestCase(base.BaseTestCase):
_AGENT_NAME = 'neutron.plugins.ryu.agent.ryu_neutron_agent'
def setUp(self):
super(RyuAgentTestCase, self).setUp()
self.addCleanup(mock.patch.stopall)
self.fake_ryu = fake_ryu.patch_fake_ryu_client().start()
self.mod_agent = importutils.import_module(self._AGENT_NAME)
class TestOVSNeutronOFPRyuAgent(RyuAgentTestCase):
def setUp(self):
super(TestOVSNeutronOFPRyuAgent, self).setUp()
self.plugin_api = mock.patch(
self._AGENT_NAME + '.RyuPluginApi').start()
self.ovsbridge = mock.patch(
self._AGENT_NAME + '.OVSBridge').start()
self.vifportset = mock.patch(
self._AGENT_NAME + '.VifPortSet').start()
self.q_ctx = mock.patch(
self._AGENT_NAME + '.q_context').start()
self.agent_rpc = mock.patch(
self._AGENT_NAME + '.agent_rpc.create_consumers').start()
self.sg_rpc = mock.patch(
self._AGENT_NAME + '.sg_rpc').start()
self.sg_agent = mock.patch(
self._AGENT_NAME + '.RyuSecurityGroupAgent').start()
def mock_rest_addr(self, rest_addr):
integ_br = 'integ_br'
tunnel_ip = '192.168.0.1'
ovsdb_ip = '172.16.0.1'
ovsdb_port = 16634
interval = 2
root_helper = 'helper'
self.mod_agent.OVSBridge.return_value.datapath_id = '1234'
mock_context = mock.Mock(return_value='abc')
self.q_ctx.get_admin_context_without_session = mock_context
mock_rest_addr = mock.Mock(return_value=rest_addr)
self.plugin_api.return_value.get_ofp_rest_api_addr = mock_rest_addr
# Instantiate OVSNeutronOFPRyuAgent
return self.mod_agent.OVSNeutronOFPRyuAgent(
integ_br, tunnel_ip, ovsdb_ip, ovsdb_port, interval, root_helper)
def test_valid_rest_addr(self):
self.mock_rest_addr('192.168.0.1:8080')
# OVSBridge
self.ovsbridge.assert_has_calls([
mock.call('integ_br', 'helper'),
mock.call().find_datapath_id()
])
# RyuPluginRpc
self.plugin_api.assert_has_calls([
mock.call('q-plugin'),
mock.call().get_ofp_rest_api_addr('abc')
])
# Agent RPC
self.agent_rpc.assert_has_calls([
mock.call(mock.ANY, 'q-agent-notifier', mock.ANY)
])
# OFPClient
self.mod_agent.client.OFPClient.assert_has_calls([
mock.call('192.168.0.1:8080')
])
# VifPortSet
self.vifportset.assert_has_calls([
mock.call(
self.ovsbridge.return_value,
self.mod_agent.client.OFPClient.return_value),
mock.call().setup()
])
# SwitchConfClient
self.mod_agent.client.SwitchConfClient.assert_has_calls([
mock.call('192.168.0.1:8080'),
mock.call().set_key('1234', 'ovs_tunnel_addr', '192.168.0.1'),
mock.call().set_key('1234', 'ovsdb_addr',
'tcp:%s:%d' % ('172.16.0.1', 16634))
])
# OVSBridge
self.ovsbridge.return_value.set_manager.assert_has_calls([
mock.call('ptcp:%d' % 16634)
])
def test_invalid_rest_addr(self):
self.assertRaises(self.mod_agent.q_exc.Invalid,
self.mock_rest_addr, (''))
def mock_port_update(self, **kwargs):
agent = self.mock_rest_addr('192.168.0.1:8080')
agent.port_update(mock.Mock(), **kwargs)
def test_port_update(self, **kwargs):
port = {'id': 1, 'security_groups': 'default'}
with mock.patch.object(self.ovsbridge.return_value,
'get_vif_port_by_id',
return_value=1) as get_vif:
self.mock_port_update(port=port)
get_vif.assert_called_once_with(1)
self.sg_agent.assert_has_calls([
mock.call().refresh_firewall()
])
def test_port_update_not_vifport(self, **kwargs):
port = {'id': 1, 'security_groups': 'default'}
with mock.patch.object(self.ovsbridge.return_value,
'get_vif_port_by_id',
return_value=0) as get_vif:
self.mock_port_update(port=port)
get_vif.assert_called_once_with(1)
self.assertFalse(self.sg_agent.return_value.refresh_firewall.called)
def test_port_update_without_secgroup(self, **kwargs):
port = {'id': 1}
with mock.patch.object(self.ovsbridge.return_value,
'get_vif_port_by_id',
return_value=1) as get_vif:
self.mock_port_update(port=port)
get_vif.assert_called_once_with(1)
self.assertFalse(self.sg_agent.return_value.refresh_firewall.called)
def mock_update_ports(self, vif_port_set=None, registered_ports=None):
with mock.patch.object(self.ovsbridge.return_value,
'get_vif_port_set',
return_value=vif_port_set):
agent = self.mock_rest_addr('192.168.0.1:8080')
return agent._update_ports(registered_ports)
def test_update_ports_unchanged(self):
self.assertIsNone(self.mock_update_ports())
def test_update_ports_changed(self):
vif_port_set = set([1, 3])
registered_ports = set([1, 2])
expected = dict(current=vif_port_set,
added=set([3]),
removed=set([2]))
actual = self.mock_update_ports(vif_port_set, registered_ports)
self.assertEqual(expected, actual)
def mock_process_devices_filter(self, port_info):
agent = self.mock_rest_addr('192.168.0.1:8080')
agent._process_devices_filter(port_info)
def test_process_devices_filter_add(self):
port_info = {'added': 1}
self.mock_process_devices_filter(port_info)
self.sg_agent.assert_has_calls([
mock.call().prepare_devices_filter(1)
])
def test_process_devices_filter_remove(self):
port_info = {'removed': 2}
self.mock_process_devices_filter(port_info)
self.sg_agent.assert_has_calls([
mock.call().remove_devices_filter(2)
])
def test_process_devices_filter_both(self):
port_info = {'added': 1, 'removed': 2}
self.mock_process_devices_filter(port_info)
self.sg_agent.assert_has_calls([
mock.call().prepare_devices_filter(1),
mock.call().remove_devices_filter(2)
])
def test_process_devices_filter_none(self):
port_info = {}
self.mock_process_devices_filter(port_info)
self.assertFalse(
self.sg_agent.return_value.prepare_devices_filter.called)
self.assertFalse(
self.sg_agent.return_value.remove_devices_filter.called)
class TestRyuPluginApi(RyuAgentTestCase):
def test_get_ofp_rest_api_addr(self):
with nested(
mock.patch(self._AGENT_NAME + '.RyuPluginApi.make_msg',
return_value='msg'),
mock.patch(self._AGENT_NAME + '.RyuPluginApi.call',
return_value='10.0.0.1')
) as (mock_msg, mock_call):
api = self.mod_agent.RyuPluginApi('topics')
addr = api.get_ofp_rest_api_addr('context')
self.assertEqual(addr, '10.0.0.1')
mock_msg.assert_has_calls([
mock.call('get_ofp_rest_api')
])
mock_call.assert_has_calls([
mock.call('context', 'msg', topic='topics')
])
class TestVifPortSet(RyuAgentTestCase):
def test_setup(self):
attrs = {'switch.datapath_id': 'dp1', 'ofport': 'p1'}
p1 = mock.Mock(**attrs)
attrs = {'switch.datapath_id': 'dp2', 'ofport': 'p2'}
p2 = mock.Mock(**attrs)
attrs = {'get_external_ports.return_value': [p1, p2]}
int_br = mock.Mock(**attrs)
with mock.patch(self._AGENT_NAME + '.client.OFPClient') as client:
api = client()
vif = self.mod_agent.VifPortSet(int_br, api)
vif.setup()
client.assert_has_calls([
mock.call().update_port('__NW_ID_EXTERNAL__', 'dp1', 'p1'),
mock.call().update_port('__NW_ID_EXTERNAL__', 'dp2', 'p2')
])
def test_setup_empty(self):
attrs = {'get_external_ports.return_value': []}
int_br = mock.Mock(**attrs)
api = mock.Mock()
vif = self.mod_agent.VifPortSet(int_br, api)
vif.setup()
self.assertEqual(api.update_port.call_count, 0)
class TestOVSBridge(RyuAgentTestCase):
def setUp(self):
super(TestOVSBridge, self).setUp()
self.lib_ovs = mock.patch(
'neutron.agent.linux.ovs_lib.OVSBridge').start()
def test_find_datapath_id(self):
with mock.patch(self._AGENT_NAME + '.OVSBridge.get_datapath_id',
return_value='1234') as mock_get_dpid:
br = self.mod_agent.OVSBridge('br_name', 'helper')
br.find_datapath_id()
mock_get_dpid.assert_has_calls([
mock.call()
])
self.assertEqual(br.datapath_id, '1234')
def test_set_manager(self):
with mock.patch(
self._AGENT_NAME + '.OVSBridge.run_vsctl') as mock_vsctl:
br = self.mod_agent.OVSBridge('br_name', 'helper')
br.set_manager('target')
mock_vsctl.assert_has_calls([
mock.call(['set-manager', 'target'])
])
def test_get_ofport(self):
with mock.patch(
self._AGENT_NAME + '.OVSBridge.db_get_val',
return_value=1) as mock_db:
br = self.mod_agent.OVSBridge('br_name', 'helper')
ofport = br.get_ofport('name')
mock_db.assert_has_calls([
mock.call('Interface', 'name', 'ofport')
])
self.assertEqual(ofport, 1)
def test_get_ports(self):
with nested(
mock.patch(self._AGENT_NAME + '.OVSBridge.get_port_name_list',
return_value=['p1', 'p2']),
mock.patch(self._AGENT_NAME + '.OVSBridge.get_ofport',
return_value=1)
) as (mock_name, mock_ofport):
get_port = mock.Mock(side_effect=['port1', 'port2'])
br = self.mod_agent.OVSBridge('br_name', 'helper')
ports = br._get_ports(get_port)
mock_name.assert_has_calls([
mock.call()
])
mock_ofport.assert_has_calls([
mock.call('p1'),
mock.call('p2')
])
get_port.assert_has_calls([
mock.call('p1'),
mock.call('p2')
])
self.assertEqual(len(ports), 2)
self.assertEqual(ports, ['port1', 'port2'])
def test_get_ports_empty(self):
with nested(
mock.patch(self._AGENT_NAME + '.OVSBridge.get_port_name_list',
return_value=[]),
mock.patch(self._AGENT_NAME + '.OVSBridge.get_ofport',
return_value=1)
) as (mock_name, mock_ofport):
get_port = mock.Mock(side_effect=['port1', 'port2'])
br = self.mod_agent.OVSBridge('br_name', 'helper')
ports = br._get_ports(get_port)
mock_name.assert_has_calls([
mock.call()
])
self.assertEqual(mock_ofport.call_count, 0)
self.assertEqual(get_port.call_count, 0)
self.assertEqual(len(ports), 0)
def test_get_ports_invalid_ofport(self):
with nested(
mock.patch(self._AGENT_NAME + '.OVSBridge.get_port_name_list',
return_value=['p1', 'p2']),
mock.patch(self._AGENT_NAME + '.OVSBridge.get_ofport',
side_effect=[-1, 1])
) as (mock_name, mock_ofport):
get_port = mock.Mock(side_effect=['port1', 'port2'])
br = self.mod_agent.OVSBridge('br_name', 'helper')
ports = br._get_ports(get_port)
mock_name.assert_has_calls([
mock.call()
])
mock_ofport.assert_has_calls([
mock.call('p1'),
mock.call('p2')
])
get_port.assert_has_calls([
mock.call('p2')
])
self.assertEqual(len(ports), 1)
self.assertEqual(ports, ['port1'])
def test_get_ports_invalid_port(self):
with nested(
mock.patch(self._AGENT_NAME + '.OVSBridge.get_port_name_list',
return_value=['p1', 'p2']),
mock.patch(self._AGENT_NAME + '.OVSBridge.get_ofport',
side_effect=[1, 2])
) as (mock_name, mock_ofport):
get_port = mock.Mock(side_effect=[None, 'port2'])
br = self.mod_agent.OVSBridge('br_name', 'helper')
ports = br._get_ports(get_port)
mock_name.assert_has_calls([
mock.call()
])
mock_ofport.assert_has_calls([
mock.call('p1'),
mock.call('p2')
])
get_port.assert_has_calls([
mock.call('p1'),
mock.call('p2')
])
self.assertEqual(len(ports), 1)
self.assertEqual(ports, ['port2'])
def test_get_external_port(self):
with nested(
mock.patch(self._AGENT_NAME + '.OVSBridge.db_get_map',
side_effect=[None, {'opts': 'opts_val'}]),
mock.patch(self._AGENT_NAME + '.OVSBridge.get_ofport',
return_value=1),
mock.patch(self._AGENT_NAME + '.VifPort')
) as (mock_db, mock_ofport, mock_vif):
br = self.mod_agent.OVSBridge('br_name', 'helper')
vifport = br._get_external_port('iface')
mock_db.assert_has_calls([
mock.call('Interface', 'iface', 'external_ids'),
mock.call('Interface', 'iface', 'options'),
])
mock_ofport.assert_has_calls([
mock.call('iface')
])
mock_vif.assert_has_calls([
mock.call('iface', 1, None, None, br)
])
self.assertEqual(vifport, mock_vif.return_value)
def test_get_external_port_vmport(self):
with nested(
mock.patch(self._AGENT_NAME + '.OVSBridge.db_get_map',
side_effect=[{'extids': 'extid_val'},
{'opts': 'opts_val'}]),
mock.patch(self._AGENT_NAME + '.OVSBridge.get_ofport',
return_value=1),
mock.patch(self._AGENT_NAME + '.VifPort')
) as (mock_db, mock_ofport, mock_vif):
br = self.mod_agent.OVSBridge('br_name', 'helper')
vifport = br._get_external_port('iface')
mock_db.assert_has_calls([
mock.call('Interface', 'iface', 'external_ids'),
])
self.assertEqual(mock_ofport.call_count, 0)
self.assertEqual(mock_vif.call_count, 0)
self.assertIsNone(vifport)
def test_get_external_port_tunnel(self):
with nested(
mock.patch(self._AGENT_NAME + '.OVSBridge.db_get_map',
side_effect=[None, {'remote_ip': '0.0.0.0'}]),
mock.patch(self._AGENT_NAME + '.OVSBridge.get_ofport',
return_value=1),
mock.patch(self._AGENT_NAME + '.VifPort')
) as (mock_db, mock_ofport, mock_vif):
br = self.mod_agent.OVSBridge('br_name', 'helper')
vifport = br._get_external_port('iface')
mock_db.assert_has_calls([
mock.call('Interface', 'iface', 'external_ids'),
mock.call('Interface', 'iface', 'options'),
])
self.assertEqual(mock_ofport.call_count, 0)
self.assertEqual(mock_vif.call_count, 0)
self.assertIsNone(vifport)
def test_get_external_ports(self):
with nested(
mock.patch(self._AGENT_NAME + '.OVSBridge._get_external_port'),
mock.patch(self._AGENT_NAME + '.OVSBridge._get_ports')
) as (mock_extport, mock_port):
br = self.mod_agent.OVSBridge('br_name', 'helper')
br.get_external_ports()
mock_port.assert_has_calls([
mock.call(mock_extport)
])
class TestRyuNeutronAgent(RyuAgentTestCase):
def test_get_my_ip(self):
sock_attrs = {
'return_value.getsockname.return_value': ['1.2.3.4', '']}
with mock.patch('socket.socket', **sock_attrs):
addr = self.mod_agent._get_my_ip()
self.assertEqual(addr, '1.2.3.4')
def test_get_ip_from_nic(self):
mock_device = mock.Mock()
mock_device.addr.list = mock.Mock(
return_value=[{'ip_version': 6, 'cidr': '::ffff:1.2.3.4'},
{'ip_version': 4, 'cidr': '1.2.3.4/8'}])
mock_ip_wrapper = mock.Mock()
mock_ip_wrapper.device = mock.Mock(return_value=mock_device)
with mock.patch(self._AGENT_NAME + '.ip_lib.IPWrapper',
return_value=mock_ip_wrapper):
addr = self.mod_agent._get_ip_from_nic('eth0')
self.assertEqual(addr, '1.2.3.4')
def test_get_ip_from_nic_empty(self):
mock_device = mock.Mock()
mock_device.addr.list = mock.Mock(return_value=[])
mock_ip_wrapper = mock.Mock()
mock_ip_wrapper.device = mock.Mock(return_value=mock_device)
with mock.patch(self._AGENT_NAME + '.ip_lib.IPWrapper',
return_value=mock_ip_wrapper):
addr = self.mod_agent._get_ip_from_nic('eth0')
self.assertIsNone(addr)
def test_get_ip_ip(self):
cfg_attrs = {'CONF.OVS.cfg_ip': '1.2.3.4',
'CONF.OVS.cfg_iface': 'eth0'}
with nested(
mock.patch(self._AGENT_NAME + '.cfg', **cfg_attrs),
mock.patch(self._AGENT_NAME + '._get_ip_from_nic',
return_value='10.0.0.1'),
mock.patch(self._AGENT_NAME + '._get_my_ip',
return_value='172.16.0.1')
) as (_cfg, mock_nicip, mock_myip):
ip = self.mod_agent._get_ip('cfg_ip', 'cfg_iface')
self.assertEqual(mock_nicip.call_count, 0)
self.assertEqual(mock_myip.call_count, 0)
self.assertEqual(ip, '1.2.3.4')
def test_get_ip_nic(self):
cfg_attrs = {'CONF.OVS.cfg_ip': None,
'CONF.OVS.cfg_iface': 'eth0'}
with nested(
mock.patch(self._AGENT_NAME + '.cfg', **cfg_attrs),
mock.patch(self._AGENT_NAME + '._get_ip_from_nic',
return_value='10.0.0.1'),
mock.patch(self._AGENT_NAME + '._get_my_ip',
return_value='172.16.0.1')
) as (_cfg, mock_nicip, mock_myip):
ip = self.mod_agent._get_ip('cfg_ip', 'cfg_iface')
mock_nicip.assert_has_calls([
mock.call('eth0')
])
self.assertEqual(mock_myip.call_count, 0)
self.assertEqual(ip, '10.0.0.1')
def test_get_ip_myip(self):
cfg_attrs = {'CONF.OVS.cfg_ip': None,
'CONF.OVS.cfg_iface': None}
with nested(
mock.patch(self._AGENT_NAME + '.cfg', **cfg_attrs),
mock.patch(self._AGENT_NAME + '._get_ip_from_nic',
return_value='10.0.0.1'),
mock.patch(self._AGENT_NAME + '._get_my_ip',
return_value='172.16.0.1')
) as (_cfg, mock_nicip, mock_myip):
ip = self.mod_agent._get_ip('cfg_ip', 'cfg_iface')
self.assertEqual(mock_nicip.call_count, 0)
mock_myip.assert_has_calls([
mock.call()
])
self.assertEqual(ip, '172.16.0.1')
def test_get_ip_nic_myip(self):
cfg_attrs = {'CONF.OVS.cfg_ip': None,
'CONF.OVS.cfg_iface': 'eth0'}
with nested(
mock.patch(self._AGENT_NAME + '.cfg', **cfg_attrs),
mock.patch(self._AGENT_NAME + '._get_ip_from_nic',
return_value=None),
mock.patch(self._AGENT_NAME + '._get_my_ip',
return_value='172.16.0.1')
) as (_cfg, mock_nicip, mock_myip):
ip = self.mod_agent._get_ip('cfg_ip', 'cfg_iface')
mock_nicip.assert_has_calls([
mock.call('eth0')
])
mock_myip.assert_has_calls([
mock.call()
])
self.assertEqual(ip, '172.16.0.1')
def test_get_tunnel_ip(self):
with mock.patch(self._AGENT_NAME + '._get_ip',
return_value='1.2.3.4') as mock_getip:
ip = self.mod_agent._get_tunnel_ip()
mock_getip.assert_has_calls([
mock.call('tunnel_ip', 'tunnel_interface')
])
self.assertEqual(ip, '1.2.3.4')
def test_get_ovsdb_ip(self):
with mock.patch(self._AGENT_NAME + '._get_ip',
return_value='1.2.3.4') as mock_getip:
ip = self.mod_agent._get_ovsdb_ip()
mock_getip.assert_has_calls([
mock.call('ovsdb_ip', 'ovsdb_interface')
])
self.assertEqual(ip, '1.2.3.4')
def mock_main(self):
cfg_attrs = {'CONF.OVS.integration_bridge': 'integ_br',
'CONF.OVS.ovsdb_port': 16634,
'CONF.AGENT.polling_interval': 2,
'CONF.AGENT.root_helper': 'helper'}
with nested(
mock.patch(self._AGENT_NAME + '.cfg', **cfg_attrs),
mock.patch(self._AGENT_NAME + '.logging_config'),
mock.patch(self._AGENT_NAME + '._get_tunnel_ip',
return_value='10.0.0.1'),
mock.patch(self._AGENT_NAME + '._get_ovsdb_ip',
return_value='172.16.0.1'),
) as (mock_conf, mock_log_conf, _tun, _ovsdb):
self.mod_agent.main()
mock_log_conf.assert_has_calls([
mock.call(mock_conf)
])
def test_main(self):
agent_attrs = {'daemon_loop.side_effect': SystemExit(0)}
with mock.patch(self._AGENT_NAME + '.OVSNeutronOFPRyuAgent',
**agent_attrs) as mock_agent:
self.assertRaises(SystemExit, self.mock_main)
mock_agent.assert_has_calls([
mock.call('integ_br', '10.0.0.1', '172.16.0.1', 16634, 2,
'helper'),
mock.call().daemon_loop()
])
def test_main_raise(self):
with nested(
mock.patch(self._AGENT_NAME + '.OVSNeutronOFPRyuAgent',
side_effect=httplib.HTTPException('boom')),
mock.patch('sys.exit', side_effect=SystemExit(0))
) as (mock_agent, mock_exit):
self.assertRaises(SystemExit, self.mock_main)
mock_agent.assert_has_calls([
mock.call('integ_br', '10.0.0.1', '172.16.0.1', 16634, 2,
'helper')
])
mock_exit.assert_has_calls([
mock.call(1)
])
| citrix-openstack-build/neutron | neutron/tests/unit/ryu/test_ryu_agent.py | Python | apache-2.0 | 23,803 |
#!/usr/bin/env python
# encoding: utf-8
"""
@version: python 2.7
@author: Sober.JChen
@license: Apache Licence
@contact: [email protected]
@software: PyCharm
@file: crop_save_and_view_nodules_in_3d.py
@time: 2017/3/14 13:15
"""
# ToDo ---这个脚本运行时请先根据预定义建好文件夹,并将candidates.csv文件的class头改成nodule_class并存为candidates_class.csv,否则会报错。
# ======================================================================
# Program: Diffusion Weighted MRI Reconstruction
# Link: https://code.google.com/archive/p/diffusion-mri
# Module: $RCSfile: mhd_utils.py,v $
# Language: Python
# Author: $Author: bjian $
# Date: $Date: 2008/10/27 05:55:55 $
# Version:
# $Revision: 1.1 by PJackson 2013/06/06 $
# Modification: Adapted to 3D
# Link: https://sites.google.com/site/pjmedphys/tutorials/medical-images-in-python
#
# $Revision: 2 by RodenLuo 2017/03/12 $
# Modication: Adapted to LUNA2016 data set for DSB2017
# Link:https://www.kaggle.com/rodenluo/data-science-bowl-2017/crop-save-and-view-nodules-in-3d
# $Revision: 3 by Sober.JChen 2017/03/15 $
# Modication: Adapted to LUNA2016 data set for DSB2017
# Link:
#-------write_meta_header,dump_raw_data,write_mhd_file------三个函数的由来
# ======================================================================
import SimpleITK as sitk
import numpy as np
from glob import glob
import pandas as pd
import scipy.ndimage
import os
import array
import math
try:
from tqdm import tqdm # long waits are not fun
except:
print('tqdm 是一个轻量级的进度条小包。。。')
tqdm = lambda x : x
# import traceback
workspace = './'
class nodules_crop(object):
def __init__(self, workspace):
"""param: workspace: 本次比赛all_patients的父目录"""
self.workspace = workspace
self.all_patients_path = os.path.join(self.workspace,"sample_patients/")
self.nodules_npy_path = "./nodule_cubes/npy/"
self.all_annotations_mhd_path = "./nodule_cubes/mhd/"
#self.all_candidates_mhd_path = "./nodule_cubes/mhd/"
self.ls_all_patients = glob(self.all_patients_path + "*.mhd")
self.df_annotations = pd.read_csv(self.workspace + "csv_files/annotations.csv")
self.df_annotations["file"] = self.df_annotations["seriesuid"].map(lambda file_name: self.get_filename(self.ls_all_patients, file_name))
self.df_annotations = self.df_annotations.dropna()
#self.df_candidates = pd.read_csv(self.workspace + "csv_files/candidates_class.csv")
#self.df_candidates["file"] = self.df_candidates["seriesuid"].map(lambda file_name: self.get_filename(self.ls_all_patients, file_name))
#self.df_candidates = self.df_candidates.dropna()
#---各种预定义
def set_window_width(self, image, MIN_BOUND=-1000.0):
image[image < MIN_BOUND] = MIN_BOUND
return image
#---设置窗宽
def resample(self,image, old_spacing, new_spacing=[1, 1, 1]):
resize_factor = old_spacing / new_spacing
new_real_shape = image.shape * resize_factor
new_shape = np.round(new_real_shape)
real_resize_factor = new_shape / image.shape
new_spacing = old_spacing / real_resize_factor
image = scipy.ndimage.interpolation.zoom(image, real_resize_factor, mode='nearest')
return image, new_spacing
#---重采样
def write_meta_header(self,filename, meta_dict):
header = ''
# do not use tags = meta_dict.keys() because the order of tags matters
tags = ['ObjectType', 'NDims', 'BinaryData',
'BinaryDataByteOrderMSB', 'CompressedData', 'CompressedDataSize',
'TransformMatrix', 'Offset', 'CenterOfRotation',
'AnatomicalOrientation',
'ElementSpacing',
'DimSize',
'ElementType',
'ElementDataFile',
'Comment', 'SeriesDescription', 'AcquisitionDate', 'AcquisitionTime', 'StudyDate', 'StudyTime']
for tag in tags:
if tag in meta_dict.keys():
header += '%s = %s\n' % (tag, meta_dict[tag])
f = open(filename, 'w')
f.write(header)
f.close()
def dump_raw_data(self,filename, data):
""" Write the data into a raw format file. Big endian is always used. """
#---将数据写入文件
# Begin 3D fix
data = data.reshape([data.shape[0], data.shape[1] * data.shape[2]])
# End 3D fix
rawfile = open(filename, 'wb')
a = array.array('f')
for o in data:
a.fromlist(list(o))
# if is_little_endian():
# a.byteswap()
a.tofile(rawfile)
rawfile.close()
def write_mhd_file(self,mhdfile, data, dsize):
assert (mhdfile[-4:] == '.mhd')
meta_dict = {}
meta_dict['ObjectType'] = 'Image'
meta_dict['BinaryData'] = 'True'
meta_dict['BinaryDataByteOrderMSB'] = 'False'
meta_dict['ElementType'] = 'MET_FLOAT'
meta_dict['NDims'] = str(len(dsize))
meta_dict['DimSize'] = ' '.join([str(i) for i in dsize])
meta_dict['ElementDataFile'] = os.path.split(mhdfile)[1].replace('.mhd', '.raw')
self.write_meta_header(mhdfile, meta_dict)
pwd = os.path.split(mhdfile)[0]
if pwd:
data_file = pwd + '/' + meta_dict['ElementDataFile']
else:
data_file = meta_dict['ElementDataFile']
self.dump_raw_data(data_file, data)
def save_annotations_nodule(self,nodule_crop, name_index):
np.save(os.path.join(self.nodules_npy_path,"1_%06d_annotations.npy" % (name_index)), nodule_crop)
# np.save(self.nodules_npy_path + str(1) + "_" + str(name_index) + '_annotations' + '.npy', nodule_crop)
# self.write_mhd_file(self.all_annotations_mhd_path + str(1) + "_" + str(name_index) + '_annotations' + '.mhd', nodule_crop,nodule_crop.shape)
#---保存结节文件,若需要使用Fiji软件查看分割效果可取消注释write_mhd_file
def save_candidates_nodule(self,nodule_crop, name_index, cancer_flag):
# np.save(self.nodules_npy_path + str(cancer_flag) + "_" + str(name_index) + '_candidates' + '.npy', nodule_crop)
np.save(os.path.join(self.nodules_npy_path, "%01d_%06d_candidates.npy" % (cancer_flag,name_index)), nodule_crop)
# self.write_mhd_file(self.all_candidates_mhd_path + str(cancer_flag) + "_" + str(name_index) + '.mhd', nodule_crop,nodule_crop.shape)
#---保存结节文件,若需要使用Fiji软件查看分割效果可取消注释write_mhd_file
def get_filename(self,file_list, case):
for f in file_list:
if case in f:
return (f)
#---匹配文件名
def annotations_crop(self):
for patient in enumerate(tqdm(self.ls_all_patients)):
patient = patient[1]
print(patient)
# 检查这个病人有没有大于3mm的结节
if patient not in self.df_annotations.file.values:
print('Patient ' + patient + 'Not exist!')
continue
patient_nodules = self.df_annotations[self.df_annotations.file == patient]
full_image_info = sitk.ReadImage(patient)
full_scan = sitk.GetArrayFromImage(full_image_info)
origin = np.array(full_image_info.GetOrigin())[::-1] #---获取“体素空间”中结节中心的坐标
old_spacing = np.array(full_image_info.GetSpacing())[::-1] #---该CT在“世界空间”中各个方向上相邻单位的体素的间距
image, new_spacing = self.resample(full_scan, old_spacing)#---重采样
print('Resample Done')
for index, nodule in patient_nodules.iterrows():
nodule_center = np.array([nodule.coordZ, nodule.coordY, nodule.coordX])#---获取“世界空间”中结节中心的坐标
v_center = np.rint((nodule_center - origin) / new_spacing)#映射到“体素空间”中的坐标
v_center = np.array(v_center, dtype=int)
#---这一系列的if语句是根据“判断一个结节的癌性与否需要结合该结节周边位置的阴影和位置信息”而来,故每个结节都获取了比该结节尺寸略大的3D体素
if nodule.diameter_mm < 5:
window_size = 7
elif nodule.diameter_mm < 10:
window_size = 9
elif nodule.diameter_mm < 20:
window_size = 15
elif nodule.diameter_mm < 25:
window_size = 17
elif nodule.diameter_mm < 30:
window_size = 20
else:
window_size = 22
zyx_1 = v_center - window_size # 注意是: Z, Y, X
zyx_2 = v_center + window_size + 1
nodule_box = np.zeros([45,45,45],np.int16)#---nodule_box_size = 45
img_crop = image[zyx_1[0]:zyx_2[0], zyx_1[1]:zyx_2[1], zyx_1[2]:zyx_2[2]]#---截取立方体
img_crop = self.set_window_width(img_crop)#---设置窗宽,小于-1000的体素值设置为-1000
zeros_fill = math.floor((45 - (2*window_size+1))/2)
try:
nodule_box[zeros_fill:45 - zeros_fill, zeros_fill:45 - zeros_fill,zeros_fill:45 - zeros_fill] = img_crop # ---将截取的立方体置于nodule_box
except:
# f = open("log.txt", 'a')
# traceback.print_exc(file=f)
# f.flush()
# f.close()
continue
nodule_box[nodule_box == 0] = -1000#---将填充的0设置为-1000,可能有极少数的体素由0=>-1000,不过可以忽略不计
self.save_annotations_nodule(nodule_box, index)
print('Done for this patient!\n\n')
print('Done for all!')
#---截取注释结节函数
def candidates_crop(self):
for patient in enumerate(tqdm(self.ls_all_patients)):
patient = patient[1]
print(patient)
# 检查这个病人有没有大于3mm的结节
if patient not in self.df_candidates.file.values:
continue
print('Patient ' + patient + 'Not exist!')
patient_nodules = self.df_candidates[self.df_candidates.file == patient]
full_image_info = sitk.ReadImage(patient)
full_scan = sitk.GetArrayFromImage(full_image_info)
origin = np.array(full_image_info.GetOrigin())[::-1] #---获取“体素空间”中结节中心的坐标
old_spacing = np.array(full_image_info.GetSpacing())[::-1] #---该CT在“世界空间”中各个方向上相邻单位的体素的间距
image, new_spacing = self.resample(full_scan, old_spacing)#---重采样
print('Resample Done')
for index, nodule in patient_nodules.iterrows():
nodule_center = np.array([nodule.coordZ, nodule.coordY, nodule.coordX])#---获取“世界空间”中结节中心的坐标
v_center = np.rint((nodule_center - origin) / new_spacing)#映射到“体素空间”中的坐标
v_center = np.array(v_center, dtype=int)
#---这个是根据“判断一个结节的癌性与否需要结合该结节周边位置的阴影和位置信息”而来,故每个结节都获取了比该结节尺寸略大的3D体素
window_size = 17#---window_size+window_size+1才是真正的Window_size
zyx_1 = v_center - window_size # 注意是: Z, Y, X
zyx_2 = v_center + window_size + 1
nodule_box = np.zeros([45,45,45],np.int16)#---nodule_box_size = 45
img_crop = image[zyx_1[0]:zyx_2[0], zyx_1[1]:zyx_2[1], zyx_1[2]:zyx_2[2]]#---截取立方体
img_crop = self.set_window_width(img_crop)#---设置窗宽,小于-1000的体素值设置为-1000
zeros_fill = int(math.floor((45 - (2*window_size+1))/2))
try:
nodule_box[zeros_fill:45 - zeros_fill, zeros_fill:45 - zeros_fill,zeros_fill:45 - zeros_fill] = img_crop # ---将截取的立方体置于nodule_box
except:
# f = open("log.txt", 'a')
# traceback.print_exc(file=f)
# f.flush()
# f.close()
continue
nodule_box[nodule_box == 0] = -1000#---将填充的0设置为-1000,可能有极少数的体素由0=>-1000,不过可以忽略不计
self.save_candidates_nodule(nodule_box, index,nodule.nodule_class)#---就是这个地方出问题,如果不把csv中的class改成nodule_class这里会报错
#---截取候选结节函数
if __name__ == '__main__':
nc = nodules_crop(workspace)
nc.annotations_crop()
#nc.candidates_crop()
| shihuai/TCAI-2017 | preprocess/crop_save_and_view_nodules_in_3d.py | Python | mit | 13,121 |
"""
Input/Output tools for working with binary data.
The Stata input tools were originally written by Joe Presbrey as part of PyDTA.
You can find more information here http://presbrey.mit.edu/PyDTA
See also
---------
numpy.lib.io
"""
from statsmodels.compat.python import (zip, lzip, lmap, lrange, string_types, long, lfilter,
asbytes, asstr, range)
from struct import unpack, calcsize, pack
from struct import error as struct_error
import datetime
import sys
import numpy as np
from numpy.lib._iotools import _is_string_like, easy_dtype
import statsmodels.tools.data as data_util
from pandas import isnull
def is_py3():
import sys
if sys.version_info[0] == 3:
return True
return False
PY3 = is_py3()
_date_formats = ["%tc", "%tC", "%td", "%tw", "%tm", "%tq", "%th", "%ty"]
def _datetime_to_stata_elapsed(date, fmt):
"""
Convert from datetime to SIF. http://www.stata.com/help.cgi?datetime
Parameters
----------
date : datetime.datetime
The date to convert to the Stata Internal Format given by fmt
fmt : str
The format to convert to. Can be, tc, td, tw, tm, tq, th, ty
"""
if not isinstance(date, datetime.datetime):
raise ValueError("date should be datetime.datetime format")
stata_epoch = datetime.datetime(1960, 1, 1)
if fmt in ["%tc", "tc"]:
delta = date - stata_epoch
return (delta.days * 86400000 + delta.seconds*1000 +
delta.microseconds/1000)
elif fmt in ["%tC", "tC"]:
from warnings import warn
warn("Stata Internal Format tC not supported.", UserWarning)
return date
elif fmt in ["%td", "td"]:
return (date- stata_epoch).days
elif fmt in ["%tw", "tw"]:
return (52*(date.year-stata_epoch.year) +
(date - datetime.datetime(date.year, 1, 1)).days / 7)
elif fmt in ["%tm", "tm"]:
return (12 * (date.year - stata_epoch.year) + date.month - 1)
elif fmt in ["%tq", "tq"]:
return 4*(date.year-stata_epoch.year) + int((date.month - 1)/3)
elif fmt in ["%th", "th"]:
return 2 * (date.year - stata_epoch.year) + int(date.month > 6)
elif fmt in ["%ty", "ty"]:
return date.year
else:
raise ValueError("fmt %s not understood" % fmt)
def _stata_elapsed_date_to_datetime(date, fmt):
"""
Convert from SIF to datetime. http://www.stata.com/help.cgi?datetime
Parameters
----------
date : int
The Stata Internal Format date to convert to datetime according to fmt
fmt : str
The format to convert to. Can be, tc, td, tw, tm, tq, th, ty
Examples
--------
>>> _stata_elapsed_date_to_datetime(52, "%tw") datetime.datetime(1961, 1, 1, 0, 0)
Notes
-----
datetime/c - tc
milliseconds since 01jan1960 00:00:00.000, assuming 86,400 s/day
datetime/C - tC - NOT IMPLEMENTED
milliseconds since 01jan1960 00:00:00.000, adjusted for leap seconds
date - td
days since 01jan1960 (01jan1960 = 0)
weekly date - tw
weeks since 1960w1
This assumes 52 weeks in a year, then adds 7 * remainder of the weeks.
The datetime value is the start of the week in terms of days in the
year, not ISO calendar weeks.
monthly date - tm
months since 1960m1
quarterly date - tq
quarters since 1960q1
half-yearly date - th
half-years since 1960h1 yearly
date - ty
years since 0000
If you don't have pandas with datetime support, then you can't do
milliseconds accurately.
"""
#NOTE: we could run into overflow / loss of precision situations here
# casting to int, but I'm not sure what to do. datetime won't deal with
# numpy types and numpy datetime isn't mature enough / we can't rely on
# pandas version > 0.7.1
#TODO: IIRC relative delta doesn't play well with np.datetime?
date = int(date)
stata_epoch = datetime.datetime(1960, 1, 1)
if fmt in ["%tc", "tc"]:
from dateutil.relativedelta import relativedelta
return stata_epoch + relativedelta(microseconds=date*1000)
elif fmt in ["%tC", "tC"]:
from warnings import warn
warn("Encountered %tC format. Leaving in Stata Internal Format.",
UserWarning)
return date
elif fmt in ["%td", "td"]:
return stata_epoch + datetime.timedelta(int(date))
elif fmt in ["%tw", "tw"]: # does not count leap days - 7 days is a week
year = datetime.datetime(stata_epoch.year + date // 52, 1, 1)
day_delta = (date % 52 ) * 7
return year + datetime.timedelta(int(day_delta))
elif fmt in ["%tm", "tm"]:
year = stata_epoch.year + date // 12
month_delta = (date % 12 ) + 1
return datetime.datetime(year, month_delta, 1)
elif fmt in ["%tq", "tq"]:
year = stata_epoch.year + date // 4
month_delta = (date % 4) * 3 + 1
return datetime.datetime(year, month_delta, 1)
elif fmt in ["%th", "th"]:
year = stata_epoch.year + date // 2
month_delta = (date % 2) * 6 + 1
return datetime.datetime(year, month_delta, 1)
elif fmt in ["%ty", "ty"]:
if date > 0:
return datetime.datetime(date, 1, 1)
else: # don't do negative years bc can't mix dtypes in column
raise ValueError("Year 0 and before not implemented")
else:
raise ValueError("Date fmt %s not understood" % fmt)
### Helper classes for StataReader ###
class _StataMissingValue(object):
"""
An observation's missing value.
Parameters
-----------
offset
value
Attributes
----------
string
value
Notes
-----
More information: <http://www.stata.com/help.cgi?missing>
"""
def __init__(self, offset, value):
self._value = value
if isinstance(value, (int, long)):
self._str = value-offset is 1 and \
'.' or ('.' + chr(value-offset+96))
else:
self._str = '.'
string = property(lambda self: self._str, doc="The Stata representation of \
the missing value: '.', '.a'..'.z'")
value = property(lambda self: self._value, doc='The binary representation \
of the missing value.')
def __str__(self): return self._str
__str__.__doc__ = string.__doc__
class _StataVariable(object):
"""
A dataset variable. Not intended for public use.
Parameters
----------
variable_data
Attributes
-----------
format : str
Stata variable format. See notes for more information.
index : int
Zero-index column index of variable.
label : str
Data Label
name : str
Variable name
type : str
Stata data type. See notes for more information.
value_format : str
Value format.
Notes
-----
More information: http://www.stata.com/help.cgi?format
"""
def __init__(self, variable_data):
self._data = variable_data
def __int__(self):
return self.index
def __str__(self):
return self.name
index = property(lambda self: self._data[0], doc='the variable\'s index \
within an observation')
type = property(lambda self: self._data[1], doc='the data type of \
variable\n\nPossible types are:\n{1..244:string, b:byte, h:int, l:long, \
f:float, d:double)')
name = property(lambda self: self._data[2], doc='the name of the variable')
format = property(lambda self: self._data[4], doc='the variable\'s Stata \
format')
value_format = property(lambda self: self._data[5], doc='the variable\'s \
value format')
label = property(lambda self: self._data[6], doc='the variable\'s label')
__int__.__doc__ = index.__doc__
__str__.__doc__ = name.__doc__
class StataReader(object):
"""
Stata .dta file reader.
Provides methods to return the metadata of a Stata .dta file and
a generator for the data itself.
Parameters
----------
file : file-like
A file-like object representing a Stata .dta file.
missing_values : bool
If missing_values is True, parse missing_values and return a
Missing Values object instead of None.
encoding : string, optional
Used for Python 3 only. Encoding to use when reading the .dta file.
Defaults to `locale.getpreferredencoding`
See also
--------
statsmodels.lib.io.genfromdta
Notes
-----
This is known only to work on file formats 113 (Stata 8/9), 114
(Stata 10/11), and 115 (Stata 12). Needs to be tested on older versions.
Known not to work on format 104, 108. If you have the documentation for
older formats, please contact the developers.
For more information about the .dta format see
http://www.stata.com/help.cgi?dta
http://www.stata.com/help.cgi?dta_113
"""
_header = {}
_data_location = 0
_col_sizes = ()
_has_string_data = False
_missing_values = False
#type code
#--------------------
#str1 1 = 0x01
#str2 2 = 0x02
#...
#str244 244 = 0xf4
#byte 251 = 0xfb (sic)
#int 252 = 0xfc
#long 253 = 0xfd
#float 254 = 0xfe
#double 255 = 0xff
#--------------------
#NOTE: the byte type seems to be reserved for categorical variables
# with a label, but the underlying variable is -127 to 100
# we're going to drop the label and cast to int
DTYPE_MAP = dict(lzip(lrange(1,245), ['a' + str(i) for i in range(1,245)]) + \
[(251, np.int16),(252, np.int32),(253, int),
(254, np.float32), (255, np.float64)])
TYPE_MAP = lrange(251)+list('bhlfd')
#NOTE: technically, some of these are wrong. there are more numbers
# that can be represented. it's the 27 ABOVE and BELOW the max listed
# numeric data type in [U] 12.2.2 of the 11.2 manual
MISSING_VALUES = { 'b': (-127,100), 'h': (-32767, 32740), 'l':
(-2147483647, 2147483620), 'f': (-1.701e+38, +1.701e+38), 'd':
(-1.798e+308, +8.988e+307) }
def __init__(self, fname, missing_values=False, encoding=None):
if encoding == None:
import locale
self._encoding = locale.getpreferredencoding()
else:
self._encoding = encoding
self._missing_values = missing_values
self._parse_header(fname)
def file_headers(self):
"""
Returns all .dta file headers.
out: dict
Has keys typlist, data_label, lbllist, varlist, nvar, filetype,
ds_format, nobs, fmtlist, vlblist, time_stamp, srtlist, byteorder
"""
return self._header
def file_format(self):
"""
Returns the file format.
Returns
-------
out : int
Notes
-----
Format 113: Stata 8/9
Format 114: Stata 10/11
Format 115: Stata 12
"""
return self._header['ds_format']
def file_label(self):
"""
Returns the dataset's label.
Returns
-------
out: string
"""
return self._header['data_label']
def file_timestamp(self):
"""
Returns the date and time Stata recorded on last file save.
Returns
-------
out : str
"""
return self._header['time_stamp']
def variables(self):
"""
Returns a list of the dataset's StataVariables objects.
"""
return lmap(_StataVariable, zip(lrange(self._header['nvar']),
self._header['typlist'], self._header['varlist'],
self._header['srtlist'],
self._header['fmtlist'], self._header['lbllist'],
self._header['vlblist']))
def dataset(self, as_dict=False):
"""
Returns a Python generator object for iterating over the dataset.
Parameters
----------
as_dict : bool, optional
If as_dict is True, yield each row of observations as a dict.
If False, yields each row of observations as a list.
Returns
-------
Generator object for iterating over the dataset. Yields each row of
observations as a list by default.
Notes
-----
If missing_values is True during instantiation of StataReader then
observations with _StataMissingValue(s) are not filtered and should
be handled by your applcation.
"""
try:
self._file.seek(self._data_location)
except Exception:
pass
if as_dict:
vars = lmap(str, self.variables())
for i in range(len(self)):
yield dict(zip(vars, self._next()))
else:
for i in range(self._header['nobs']):
yield self._next()
### Python special methods
def __len__(self):
"""
Return the number of observations in the dataset.
This value is taken directly from the header and includes observations
with missing values.
"""
return self._header['nobs']
def __getitem__(self, k):
"""
Seek to an observation indexed k in the file and return it, ordered
by Stata's output to the .dta file.
k is zero-indexed. Prefer using R.data() for performance.
"""
if not (isinstance(k, (int, long))) or k < 0 or k > len(self)-1:
raise IndexError(k)
loc = self._data_location + sum(self._col_size()) * k
if self._file.tell() != loc:
self._file.seek(loc)
return self._next()
### Private methods
def _null_terminate(self, s, encoding):
if PY3: # have bytes not strings, so must decode
null_byte = asbytes('\x00')
try:
s = s.lstrip(null_byte)[:s.index(null_byte)]
except:
pass
return s.decode(encoding)
else:
null_byte = asbytes('\x00')
try:
return s.lstrip(null_byte)[:s.index(null_byte)]
except:
return s
def _parse_header(self, file_object):
self._file = file_object
encoding = self._encoding
# parse headers
self._header['ds_format'] = unpack('b', self._file.read(1))[0]
if self._header['ds_format'] not in [113, 114, 115]:
raise ValueError("Only file formats >= 113 (Stata >= 9)"
" are supported. Got format %s. Please report "
"if you think this error is incorrect." %
self._header['ds_format'])
byteorder = self._header['byteorder'] = unpack('b',
self._file.read(1))[0]==0x1 and '>' or '<'
self._header['filetype'] = unpack('b', self._file.read(1))[0]
self._file.read(1)
nvar = self._header['nvar'] = unpack(byteorder+'h',
self._file.read(2))[0]
self._header['nobs'] = unpack(byteorder+'i', self._file.read(4))[0]
self._header['data_label'] = self._null_terminate(self._file.read(81),
encoding)
self._header['time_stamp'] = self._null_terminate(self._file.read(18),
encoding)
# parse descriptors
typlist =[ord(self._file.read(1)) for i in range(nvar)]
self._header['typlist'] = [self.TYPE_MAP[typ] for typ in typlist]
self._header['dtyplist'] = [self.DTYPE_MAP[typ] for typ in typlist]
self._header['varlist'] = [self._null_terminate(self._file.read(33),
encoding) for i in range(nvar)]
self._header['srtlist'] = unpack(byteorder+('h'*(nvar+1)),
self._file.read(2*(nvar+1)))[:-1]
if self._header['ds_format'] <= 113:
self._header['fmtlist'] = \
[self._null_terminate(self._file.read(12), encoding) \
for i in range(nvar)]
else:
self._header['fmtlist'] = \
[self._null_terminate(self._file.read(49), encoding) \
for i in range(nvar)]
self._header['lbllist'] = [self._null_terminate(self._file.read(33),
encoding) for i in range(nvar)]
self._header['vlblist'] = [self._null_terminate(self._file.read(81),
encoding) for i in range(nvar)]
# ignore expansion fields
# When reading, read five bytes; the last four bytes now tell you the
# size of the next read, which you discard. You then continue like
# this until you read 5 bytes of zeros.
while True:
data_type = unpack(byteorder+'b', self._file.read(1))[0]
data_len = unpack(byteorder+'i', self._file.read(4))[0]
if data_type == 0:
break
self._file.read(data_len)
# other state vars
self._data_location = self._file.tell()
self._has_string_data = len(lfilter(lambda x: isinstance(x, int),
self._header['typlist'])) > 0
self._col_size()
def _calcsize(self, fmt):
return isinstance(fmt, int) and fmt or \
calcsize(self._header['byteorder']+fmt)
def _col_size(self, k = None):
"""Calculate size of a data record."""
if len(self._col_sizes) == 0:
self._col_sizes = lmap(lambda x: self._calcsize(x),
self._header['typlist'])
if k == None:
return self._col_sizes
else:
return self._col_sizes[k]
def _unpack(self, fmt, byt):
d = unpack(self._header['byteorder']+fmt, byt)[0]
if fmt[-1] in self.MISSING_VALUES:
nmin, nmax = self.MISSING_VALUES[fmt[-1]]
if d < nmin or d > nmax:
if self._missing_values:
return _StataMissingValue(nmax, d)
else:
return None
return d
def _next(self):
typlist = self._header['typlist']
if self._has_string_data:
data = [None]*self._header['nvar']
for i in range(len(data)):
if isinstance(typlist[i], int):
data[i] = self._null_terminate(self._file.read(typlist[i]),
self._encoding)
else:
data[i] = self._unpack(typlist[i],
self._file.read(self._col_size(i)))
return data
else:
return lmap(lambda i: self._unpack(typlist[i],
self._file.read(self._col_size(i))),
lrange(self._header['nvar']))
def _open_file_binary_write(fname, encoding):
if hasattr(fname, 'write'):
#if 'b' not in fname.mode:
return fname
if PY3:
return open(fname, "wb", encoding=encoding)
else:
return open(fname, "wb")
def _set_endianness(endianness):
if endianness.lower() in ["<", "little"]:
return "<"
elif endianness.lower() in [">", "big"]:
return ">"
else: # pragma : no cover
raise ValueError("Endianness %s not understood" % endianness)
def _dtype_to_stata_type(dtype):
"""
Converts dtype types to stata types. Returns the byte of the given ordinal.
See TYPE_MAP and comments for an explanation. This is also explained in
the dta spec.
1 - 244 are strings of this length
251 - chr(251) - for int8 and int16, byte
252 - chr(252) - for int32, int
253 - chr(253) - for int64, long
254 - chr(254) - for float32, float
255 - chr(255) - double, double
If there are dates to convert, then dtype will already have the correct
type inserted.
"""
#TODO: expand to handle datetime to integer conversion
if dtype.type == np.string_:
return chr(dtype.itemsize)
elif dtype.type == np.object_: # try to coerce it to the biggest string
# not memory efficient, what else could we do?
return chr(244)
elif dtype == np.float64:
return chr(255)
elif dtype == np.float32:
return chr(254)
elif dtype == np.int64:
return chr(253)
elif dtype == np.int32:
return chr(252)
elif dtype == np.int8 or dtype == np.int16: # ok to assume bytes?
return chr(251)
else: # pragma : no cover
raise ValueError("Data type %s not currently understood. "
"Please report an error to the developers." % dtype)
def _dtype_to_default_stata_fmt(dtype):
"""
Maps numpy dtype to stata's default format for this type. Not terribly
important since users can change this in Stata. Semantics are
string -> "%DDs" where DD is the length of the string
float64 -> "%10.0g"
float32 -> "%9.0g"
int64 -> "%9.0g"
int32 -> "%9.0g"
int16 -> "%9.0g"
int8 -> "%8.0g"
"""
#TODO: expand this to handle a default datetime format?
if dtype.type == np.string_:
return "%" + str(dtype.itemsize) + "s"
elif dtype.type == np.object_:
return "%244s"
elif dtype == np.float64:
return "%10.0g"
elif dtype == np.float32:
return "%9.0g"
elif dtype == np.int64:
return "%9.0g"
elif dtype == np.int32:
return "%8.0g"
elif dtype == np.int8 or dtype == np.int16: # ok to assume bytes?
return "%8.0g"
else: # pragma : no cover
raise ValueError("Data type %s not currently understood. "
"Please report an error to the developers." % dtype)
def _pad_bytes(name, length):
"""
Takes a char string and pads it wih null bytes until it's length chars
"""
return name + "\x00" * (length - len(name))
def _default_names(nvar):
"""
Returns default Stata names v1, v2, ... vnvar
"""
return ["v%d" % i for i in range(1,nvar+1)]
def _convert_datetime_to_stata_type(fmt):
"""
Converts from one of the stata date formats to a type in TYPE_MAP
"""
if fmt in ["tc", "%tc", "td", "%td", "tw", "%tw", "tm", "%tm", "tq",
"%tq", "th", "%th", "ty", "%ty"]:
return np.float64 # Stata expects doubles for SIFs
else:
raise ValueError("fmt %s not understood" % fmt)
def _maybe_convert_to_int_keys(convert_dates, varlist):
new_dict = {}
for key in convert_dates:
if not convert_dates[key].startswith("%"): # make sure proper fmts
convert_dates[key] = "%" + convert_dates[key]
if key in varlist:
new_dict.update({varlist.index(key) : convert_dates[key]})
else:
if not isinstance(key, int):
raise ValueError("convery_dates key is not in varlist "
"and is not an int")
new_dict.update({key : convert_dates[key]})
return new_dict
_type_converters = {253 : np.long, 252 : int}
class StataWriter(object):
"""
A class for writing Stata binary dta files from array-like objects
Parameters
----------
fname : file path or buffer
Where to save the dta file.
data : array-like
Array-like input to save. Pandas objects are also accepted.
convert_dates : dict
Dictionary mapping column of datetime types to the stata internal
format that you want to use for the dates. Options are
'tc', 'td', 'tm', 'tw', 'th', 'tq', 'ty'. Column can be either a
number or a name.
encoding : str
Default is latin-1. Note that Stata does not support unicode.
byteorder : str
Can be ">", "<", "little", or "big". The default is None which uses
`sys.byteorder`
Returns
-------
writer : StataWriter instance
The StataWriter instance has a write_file method, which will
write the file to the given `fname`.
Examples
--------
>>> writer = StataWriter('./data_file.dta', data)
>>> writer.write_file()
Or with dates
>>> writer = StataWriter('./date_data_file.dta', date, {2 : 'tw'})
>>> writer.write_file()
"""
#type code
#--------------------
#str1 1 = 0x01
#str2 2 = 0x02
#...
#str244 244 = 0xf4
#byte 251 = 0xfb (sic)
#int 252 = 0xfc
#long 253 = 0xfd
#float 254 = 0xfe
#double 255 = 0xff
#--------------------
#NOTE: the byte type seems to be reserved for categorical variables
# with a label, but the underlying variable is -127 to 100
# we're going to drop the label and cast to int
DTYPE_MAP = dict(lzip(lrange(1,245), ['a' + str(i) for i in range(1,245)]) + \
[(251, np.int16),(252, np.int32),(253, int),
(254, np.float32), (255, np.float64)])
TYPE_MAP = lrange(251)+list('bhlfd')
MISSING_VALUES = { 'b': 101,
'h': 32741,
'l' : 2147483621,
'f': 1.7014118346046923e+38,
'd': 8.98846567431158e+307}
def __init__(self, fname, data, convert_dates=None, encoding="latin-1",
byteorder=None):
self._convert_dates = convert_dates
# attach nobs, nvars, data, varlist, typlist
if data_util._is_using_pandas(data, None):
self._prepare_pandas(data)
elif data_util._is_array_like(data, None):
data = np.asarray(data)
if data_util._is_structured_ndarray(data):
self._prepare_structured_array(data)
else:
if convert_dates is not None:
raise ValueError("Not able to convert dates in a plain"
" ndarray.")
self._prepare_ndarray(data)
else: # pragma : no cover
raise ValueError("Type %s for data not understood" % type(data))
if byteorder is None:
byteorder = sys.byteorder
self._byteorder = _set_endianness(byteorder)
self._encoding = encoding
self._file = _open_file_binary_write(fname, encoding)
def _write(self, to_write):
"""
Helper to call asbytes before writing to file for Python 3 compat.
"""
self._file.write(asbytes(to_write))
def _prepare_structured_array(self, data):
self.nobs = len(data)
self.nvar = len(data.dtype)
self.data = data
self.datarows = iter(data)
dtype = data.dtype
descr = dtype.descr
if dtype.names is None:
varlist = _default_names(nvar)
else:
varlist = dtype.names
# check for datetime and change the type
convert_dates = self._convert_dates
if convert_dates is not None:
convert_dates = _maybe_convert_to_int_keys(convert_dates,
varlist)
self._convert_dates = convert_dates
for key in convert_dates:
descr[key] = (
descr[key][0],
_convert_datetime_to_stata_type(convert_dates[key])
)
dtype = np.dtype(descr)
self.varlist = varlist
self.typlist = [_dtype_to_stata_type(dtype[i])
for i in range(self.nvar)]
self.fmtlist = [_dtype_to_default_stata_fmt(dtype[i])
for i in range(self.nvar)]
# set the given format for the datetime cols
if convert_dates is not None:
for key in convert_dates:
self.fmtlist[key] = convert_dates[key]
def _prepare_ndarray(self, data):
if data.ndim == 1:
data = data[:,None]
self.nobs, self.nvar = data.shape
self.data = data
self.datarows = iter(data)
#TODO: this should be user settable
dtype = data.dtype
self.varlist = _default_names(self.nvar)
self.typlist = [_dtype_to_stata_type(dtype) for i in range(self.nvar)]
self.fmtlist = [_dtype_to_default_stata_fmt(dtype)
for i in range(self.nvar)]
def _prepare_pandas(self, data):
#NOTE: we might need a different API / class for pandas objects so
# we can set different semantics - handle this with a PR to pandas.io
class DataFrameRowIter(object):
def __init__(self, data):
self.data = data
def __iter__(self):
for i, row in data.iterrows():
yield row
data = data.reset_index()
self.datarows = DataFrameRowIter(data)
self.nobs, self.nvar = data.shape
self.data = data
self.varlist = data.columns.tolist()
dtypes = data.dtypes
convert_dates = self._convert_dates
if convert_dates is not None:
convert_dates = _maybe_convert_to_int_keys(convert_dates,
self.varlist)
self._convert_dates = convert_dates
for key in convert_dates:
new_type = _convert_datetime_to_stata_type(convert_dates[key])
dtypes[key] = np.dtype(new_type)
self.typlist = [_dtype_to_stata_type(dt) for dt in dtypes]
self.fmtlist = [_dtype_to_default_stata_fmt(dt) for dt in dtypes]
# set the given format for the datetime cols
if convert_dates is not None:
for key in convert_dates:
self.fmtlist[key] = convert_dates[key]
def write_file(self):
self._write_header()
self._write_descriptors()
self._write_variable_labels()
# write 5 zeros for expansion fields
self._write(_pad_bytes("", 5))
if self._convert_dates is None:
self._write_data_nodates()
else:
self._write_data_dates()
#self._write_value_labels()
def _write_header(self, data_label=None, time_stamp=None):
byteorder = self._byteorder
# ds_format - just use 114
self._write(pack("b", 114))
# byteorder
self._write(byteorder == ">" and "\x01" or "\x02")
# filetype
self._write("\x01")
# unused
self._write("\x00")
# number of vars, 2 bytes
self._write(pack(byteorder+"h", self.nvar)[:2])
# number of obs, 4 bytes
self._write(pack(byteorder+"i", self.nobs)[:4])
# data label 81 bytes, char, null terminated
if data_label is None:
self._write(self._null_terminate(_pad_bytes("", 80),
self._encoding))
else:
self._write(self._null_terminate(_pad_bytes(data_label[:80],
80), self._encoding))
# time stamp, 18 bytes, char, null terminated
# format dd Mon yyyy hh:mm
if time_stamp is None:
time_stamp = datetime.datetime.now()
elif not isinstance(time_stamp, datetime):
raise ValueError("time_stamp should be datetime type")
self._write(self._null_terminate(
time_stamp.strftime("%d %b %Y %H:%M"),
self._encoding))
def _write_descriptors(self, typlist=None, varlist=None, srtlist=None,
fmtlist=None, lbllist=None):
nvar = self.nvar
# typlist, length nvar, format byte array
for typ in self.typlist:
self._write(typ)
# varlist, length 33*nvar, char array, null terminated
for name in self.varlist:
name = self._null_terminate(name, self._encoding)
name = _pad_bytes(asstr(name[:32]), 33)
self._write(name)
# srtlist, 2*(nvar+1), int array, encoded by byteorder
srtlist = _pad_bytes("", (2*(nvar+1)))
self._write(srtlist)
# fmtlist, 49*nvar, char array
for fmt in self.fmtlist:
self._write(_pad_bytes(fmt, 49))
# lbllist, 33*nvar, char array
#NOTE: this is where you could get fancy with pandas categorical type
for i in range(nvar):
self._write(_pad_bytes("", 33))
def _write_variable_labels(self, labels=None):
nvar = self.nvar
if labels is None:
for i in range(nvar):
self._write(_pad_bytes("", 81))
def _write_data_nodates(self):
data = self.datarows
byteorder = self._byteorder
TYPE_MAP = self.TYPE_MAP
typlist = self.typlist
for row in data:
#row = row.squeeze().tolist() # needed for structured arrays
for i,var in enumerate(row):
typ = ord(typlist[i])
if typ <= 244: # we've got a string
if len(var) < typ:
var = _pad_bytes(asstr(var), len(var) + 1)
self._write(var)
else:
try:
self._write(pack(byteorder+TYPE_MAP[typ], var))
except struct_error:
# have to be strict about type pack won't do any
# kind of casting
self._write(pack(byteorder+TYPE_MAP[typ],
_type_converters[typ](var)))
def _write_data_dates(self):
convert_dates = self._convert_dates
data = self.datarows
byteorder = self._byteorder
TYPE_MAP = self.TYPE_MAP
MISSING_VALUES = self.MISSING_VALUES
typlist = self.typlist
for row in data:
#row = row.squeeze().tolist() # needed for structured arrays
for i,var in enumerate(row):
typ = ord(typlist[i])
#NOTE: If anyone finds this terribly slow, there is
# a vectorized way to convert dates, see genfromdta for going
# from int to datetime and reverse it. will copy data though
if i in convert_dates:
var = _datetime_to_stata_elapsed(var, self.fmtlist[i])
if typ <= 244: # we've got a string
if isnull(var):
var = "" # missing string
if len(var) < typ:
var = _pad_bytes(var, len(var) + 1)
self._write(var)
else:
if isnull(var): # this only matters for floats
var = MISSING_VALUES[typ]
self._write(pack(byteorder+TYPE_MAP[typ], var))
def _null_terminate(self, s, encoding):
null_byte = '\x00'
if PY3:
s += null_byte
return s.encode(encoding)
else:
s += null_byte
return s
def genfromdta(fname, missing_flt=-999., encoding=None, pandas=False,
convert_dates=True):
"""
Returns an ndarray or DataFrame from a Stata .dta file.
Parameters
----------
fname : str or filehandle
Stata .dta file.
missing_flt : numeric
The numeric value to replace missing values with. Will be used for
any numeric value.
encoding : string, optional
Used for Python 3 only. Encoding to use when reading the .dta file.
Defaults to `locale.getpreferredencoding`
pandas : bool
Optionally return a DataFrame instead of an ndarray
convert_dates : bool
If convert_dates is True, then Stata formatted dates will be converted
to datetime types according to the variable's format.
"""
if isinstance(fname, string_types):
fhd = StataReader(open(fname, 'rb'), missing_values=False,
encoding=encoding)
elif not hasattr(fname, 'read'):
raise TypeError("The input should be a string or a filehandle. "\
"(got %s instead)" % type(fname))
else:
fhd = StataReader(fname, missing_values=False, encoding=encoding)
# validate_names = np.lib._iotools.NameValidator(excludelist=excludelist,
# deletechars=deletechars,
# case_sensitive=case_sensitive)
#TODO: This needs to handle the byteorder?
header = fhd.file_headers()
types = header['dtyplist']
nobs = header['nobs']
numvars = header['nvar']
varnames = header['varlist']
fmtlist = header['fmtlist']
dataname = header['data_label']
labels = header['vlblist'] # labels are thrown away unless DataArray
# type is used
data = np.zeros((nobs,numvars))
stata_dta = fhd.dataset()
dt = np.dtype(lzip(varnames, types))
data = np.zeros((nobs), dtype=dt) # init final array
for rownum,line in enumerate(stata_dta):
# doesn't handle missing value objects, just casts
# None will only work without missing value object.
if None in line:
for i,val in enumerate(line):
#NOTE: This will only be scalar types because missing strings
# are empty not None in Stata
if val is None:
line[i] = missing_flt
data[rownum] = tuple(line)
if pandas:
from pandas import DataFrame
data = DataFrame.from_records(data)
if convert_dates:
cols = np.where(lmap(lambda x : x in _date_formats, fmtlist))[0]
for col in cols:
i = col
col = data.columns[col]
data[col] = data[col].apply(_stata_elapsed_date_to_datetime,
args=(fmtlist[i],))
elif convert_dates:
#date_cols = np.where(map(lambda x : x in _date_formats,
# fmtlist))[0]
# make the dtype for the datetime types
cols = np.where(lmap(lambda x : x in _date_formats, fmtlist))[0]
dtype = data.dtype.descr
dtype = [(dt[0], object) if i in cols else dt for i,dt in
enumerate(dtype)]
data = data.astype(dtype) # have to copy
for col in cols:
def convert(x):
return _stata_elapsed_date_to_datetime(x, fmtlist[col])
data[data.dtype.names[col]] = lmap(convert,
data[data.dtype.names[col]])
return data
def savetxt(fname, X, names=None, fmt='%.18e', delimiter=' '):
"""
Save an array to a text file.
This is just a copy of numpy.savetxt patched to support structured arrays
or a header of names. Does not include py3 support now in savetxt.
Parameters
----------
fname : filename or file handle
If the filename ends in ``.gz``, the file is automatically saved in
compressed gzip format. `loadtxt` understands gzipped files
transparently.
X : array_like
Data to be saved to a text file.
names : list, optional
If given names will be the column header in the text file. If None and
X is a structured or recarray then the names are taken from
X.dtype.names.
fmt : str or sequence of strs
A single format (%10.5f), a sequence of formats, or a
multi-format string, e.g. 'Iteration %d -- %10.5f', in which
case `delimiter` is ignored.
delimiter : str
Character separating columns.
See Also
--------
save : Save an array to a binary file in NumPy ``.npy`` format
savez : Save several arrays into a ``.npz`` compressed archive
Notes
-----
Further explanation of the `fmt` parameter
(``%[flag]width[.precision]specifier``):
flags:
``-`` : left justify
``+`` : Forces to preceed result with + or -.
``0`` : Left pad the number with zeros instead of space (see width).
width:
Minimum number of characters to be printed. The value is not truncated
if it has more characters.
precision:
- For integer specifiers (eg. ``d,i,o,x``), the minimum number of
digits.
- For ``e, E`` and ``f`` specifiers, the number of digits to print
after the decimal point.
- For ``g`` and ``G``, the maximum number of significant digits.
- For ``s``, the maximum number of characters.
specifiers:
``c`` : character
``d`` or ``i`` : signed decimal integer
``e`` or ``E`` : scientific notation with ``e`` or ``E``.
``f`` : decimal floating point
``g,G`` : use the shorter of ``e,E`` or ``f``
``o`` : signed octal
``s`` : string of characters
``u`` : unsigned decimal integer
``x,X`` : unsigned hexadecimal integer
This explanation of ``fmt`` is not complete, for an exhaustive
specification see [1]_.
References
----------
.. [1] `Format Specification Mini-Language
<http://docs.python.org/library/string.html#
format-specification-mini-language>`_, Python Documentation.
Examples
--------
>>> savetxt('test.out', x, delimiter=',') # x is an array
>>> savetxt('test.out', (x,y,z)) # x,y,z equal sized 1D arrays
>>> savetxt('test.out', x, fmt='%1.4e') # use exponential notation
"""
if _is_string_like(fname):
if fname.endswith('.gz'):
import gzip
fh = gzip.open(fname, 'wb')
else:
fh = file(fname, 'w')
elif hasattr(fname, 'seek'):
fh = fname
else:
raise ValueError('fname must be a string or file handle')
X = np.asarray(X)
# Handle 1-dimensional arrays
if X.ndim == 1:
# Common case -- 1d array of numbers
if X.dtype.names is None:
X = np.atleast_2d(X).T
ncol = 1
# Complex dtype -- each field indicates a separate column
else:
ncol = len(X.dtype.descr)
else:
ncol = X.shape[1]
# `fmt` can be a string with multiple insertion points or a list of formats.
# E.g. '%10.5f\t%10d' or ('%10.5f', '$10d')
if isinstance(fmt, (list, tuple)):
if len(fmt) != ncol:
raise AttributeError('fmt has wrong shape. %s' % str(fmt))
format = delimiter.join(fmt)
elif isinstance(fmt, string_types):
if fmt.count('%') == 1:
fmt = [fmt, ]*ncol
format = delimiter.join(fmt)
elif fmt.count('%') != ncol:
raise AttributeError('fmt has wrong number of %% formats. %s'
% fmt)
else:
format = fmt
# handle names
if names is None and X.dtype.names:
names = X.dtype.names
if names is not None:
fh.write(delimiter.join(names) + '\n')
for row in X:
fh.write(format % tuple(row) + '\n')
if __name__ == "__main__":
import os
curdir = os.path.dirname(os.path.abspath(__file__))
res1 = genfromdta(curdir+'/../../datasets/macrodata/macrodata.dta')
| DonBeo/statsmodels | statsmodels/iolib/foreign.py | Python | bsd-3-clause | 43,125 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# complexity documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
cwd = os.getcwd()
parent = os.path.dirname(cwd)
sys.path.append(parent)
import pkg_resources
# -- General configuration -----------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'testdoubles'
copyright = u'2013, Omer Katz'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = pkg_resources.get_distribution('testdoubles').version
# The full version, including alpha/beta/rc tags.
release = pkg_resources.get_distribution('testdoubles').version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# error. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML error ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be error, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'testdoublesdoc'
# -- Options for LaTeX error --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'testdoubles.tex', u'testdoubles Documentation',
u'Omer Katz', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page error --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'testdoubles', u'testdoubles Documentation',
[u'Omer Katz'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo error ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'testdoubles', u'testdoubles Documentation',
u'Omer Katz', 'testdoubles', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False | testsuite/testdoubles | docs/conf.py | Python | bsd-3-clause | 8,235 |
import socket
import neovim
from threading import Thread
@neovim.plugin
class Snipper(object):
def __init__(self, vim):
# TODO: start the snipper program
# TODO: make SURE the snipper program starts
self.vim = vim
HOST = 'localhost'
PORT_IN = 53706
PORT_OUT = 53707
self.socket_out = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket_out.connect((HOST, PORT_OUT))
# self.socket_in = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# # self.socket_in.connect((HOST, PORT_IN))
# self.socket_in.bind((HOST, PORT_IN))
# # # self.socket.bind((HOST, PORT))
# self.socket_in.listen(1)
# input_thread = Thread(target = self.recieve_snippets)
# input_thread.start()
# input_thread.join()
# self.recieve_snippets()
@neovim.command('SendLine', sync=True)
def send_line(self):
# Set the folloing mapping in the .vimrc TODO: set it in plugin?
# inoremap <Enter> <C-O>:SendLine<CR><Enter>
line = self.vim.current.line
# (row, col) = self.vim.current.window.cursor
# line = self.vim.current.buffer[row-2] # 0 vs 1 based
# line = self.vim.eval("getline('.')")
self.socket_out.sendall(line)
def recieve_snippets(self):
while True:
# self.conn, self.addr = self.socket_in.accept()
data = self.conn.recv(1024)
# if not data:
# break
self.vim.command(":normal i {}".format(data))
# self.conn.close()
| Daphron/vim-snipper | rplugin/python/snipper.py | Python | gpl-3.0 | 1,591 |
"""Defines utility methods for testing nodes"""
from __future__ import unicode_literals
from node.models import Node
HOSTNAME_COUNTER = 1
SLAVEID_COUNTER = 1
def create_node(hostname=None, slave_id=None):
"""Creates a node model for unit testing
:returns: The node model
:rtype: :class:`node.models.Node`
"""
if not hostname:
global HOSTNAME_COUNTER
hostname = 'host%i.com' % HOSTNAME_COUNTER
HOSTNAME_COUNTER = HOSTNAME_COUNTER + 1
if not slave_id:
global SLAVEID_COUNTER
slave_id = '123-456-789-%i' % SLAVEID_COUNTER
SLAVEID_COUNTER = SLAVEID_COUNTER + 1
return Node.objects.create_nodes([hostname])[0]
| ngageoint/scale | scale/node/test/utils.py | Python | apache-2.0 | 689 |
import numpy as np
from poliastro.core.util import circular_velocity
from .._jit import jit
@jit
def extra_quantities(k, a_0, a_f, inc_0, inc_f, f):
"""Extra quantities given by the Edelbaum (a, i) model.
"""
V_0, beta_0_, _ = compute_parameters(k, a_0, a_f, inc_0, inc_f)
delta_V_ = delta_V(V_0, beta_0_, inc_0, inc_f)
t_f_ = delta_V_ / f
return delta_V_, t_f_
@jit
def beta(t, *, V_0, f, beta_0):
"""Compute yaw angle (β) as a function of time and the problem parameters.
"""
return np.arctan2(V_0 * np.sin(beta_0), V_0 * np.cos(beta_0) - f * t)
@jit
def beta_0(V_0, V_f, inc_0, inc_f):
"""Compute initial yaw angle (β) as a function of the problem parameters.
"""
delta_i_f = abs(inc_f - inc_0)
return np.arctan2(
np.sin(np.pi / 2 * delta_i_f), V_0 / V_f - np.cos(np.pi / 2 * delta_i_f)
)
@jit
def compute_parameters(k, a_0, a_f, inc_0, inc_f):
"""Compute parameters of the model.
"""
delta_inc = abs(inc_f - inc_0)
V_0 = circular_velocity(k, a_0)
V_f = circular_velocity(k, a_f)
beta_0_ = beta_0(V_0, V_f, inc_0, inc_f)
return V_0, beta_0_, delta_inc
@jit
def delta_V(V_0, beta_0, inc_0, inc_f):
"""Compute required increment of velocity.
"""
delta_i_f = abs(inc_f - inc_0)
return V_0 * np.cos(beta_0) - V_0 * np.sin(beta_0) / np.tan(
np.pi / 2 * delta_i_f + beta_0
)
| Juanlu001/poliastro | src/poliastro/core/thrust/change_a_inc.py | Python | mit | 1,408 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('trackbuild', '0002_auto_20150718_1936'),
]
operations = [
migrations.CreateModel(
name='BuildCounter',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('count', models.IntegerField(default=0)),
('release', models.ForeignKey(related_name='buildcounter', to='trackbuild.Release')),
],
),
]
| miraculixx/trackbuild | trackbuild/migrations/0003_buildcounter.py | Python | mit | 640 |
#!/usr/bin/env python
'''
' Author: Mitchell Steed
' Contributing Author: Douglas Kelly
' This file opens up a socket on the Host machine and allows the standard in from the Victim to be piped to it.
'''
'Import argument Parser, a library used for generating easy documentation based on expected parameter flags'
import argparse
'Imports the datetime library used for disecting and interpreting datetime formats'
import datetime
'Imports the hashlib library, used for creation of hashes based on the content imported from the socket'
import hashlib
'Imports the socket library, used to establish a tunnel between the victim and host machines'
import socket
'Set program constants'
buff_size = 1024
hash_algorithm = "md5"
data_delim = "%%%%"
def parse_arguments():
''' parse arguments, which include '-p' for port and '-a' for algorithms'''
parser = argparse.ArgumentParser(prog='Forensic Hasher', description='A simple tool to date, and hash linux command outputs', add_help=True)
parser.add_argument('-p', '--port', type=int, action='store', help='port the victim will listen on', default=3000)
parser.add_argument('-a', '--algorithm', type=str, action='store',
help='hashing algorith to use, options include: {}'.format(hashlib.algorithms), default='md5')
args = parser.parse_args()
return args.port, args.algorithm.lower()
def getContentsAndFileName(data):
''' Receives a data string parameter and returns a file name along with the contents of the file being the data '''
split_list = data.split(data_delim)
file_name = split_list[-1]
file_contents = data_delim.join(split_list[:-1])
return file_name, file_contents
def getDate():
''' Returns the current datetime of the imported file'''
return datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
def getHash(hashable):
''' Returns the hash of the content delivered to this method as a String parameter'''
method = getattr(hashlib, hash_algorithm)
h = method()
h.update(hashable)
return h.hexdigest()
def writeFile(file_name, file_contents):
''' Writes the file to the system'''
with open(file_name, "w+") as f:
f.write(file_contents)
def writeFiles(data):
''' This is the main function called when content is received via the socket.
Takes the string data, generate the file, then gives the file to the hasher to create a hash
Also prints out the datettime of the file, along with its hash
'''
# main file and hash
file_name, file_contents = getContentsAndFileName(data)
file_hash = getHash(file_contents)
writeFile(file_name, file_contents)
writeFile(file_name +"." + hash_algorithm, file_hash)
# date and date hash
date = getDate()
date_hash = getHash(date)
writeFile(file_name + ".date", date)
writeFile(file_name + ".date." + hash_algorithm, date_hash)
def handle(client):
''' handles the individual client connection '''
data = ""
while True:
localData = client.recv(buff_size)
if localData:
data += localData
else:
client.close()
break
writeFiles(data)
def listen(port):
''' Main function called from the run method '''
print "Now listening for input: "
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR,1)
sock.bind(("", port))
sock.listen(5)
while True:
client, _ = sock.accept()
handle(client)
sock.close()
def run():
''' Grabs the arguments input by the user and stores them to the global variables to be used by the listen function'''
port, hash_algorithm = parse_arguments()
listen(port)
if __name__ == "__main__":
''' Main method defined for the python file by convention'''
run()
| MitchSteed/ForensicHasher | workstation.py | Python | gpl-3.0 | 3,842 |
"""Copyright 2008 Orbitz WorldWide
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
from django.conf.urls import patterns, url
from . import views
urlpatterns = patterns(
'',
url('^header/?$', views.header, name='browser_header'),
url('^search/?$', views.search, name='browser_search'),
url('^mygraph/?$', views.myGraphLookup, name='browser_my_graph'),
url('^usergraph/?$', views.userGraphLookup, name='browser_usergraph'),
url('^$', views.browser, name='browser'),
)
| cgvarela/graphite-web | webapp/graphite/browser/urls.py | Python | apache-2.0 | 987 |
#!/usr/bin/env python
import i3
import re
import subprocess
import getopt
import sys
import os
def find_parent(window_id):
"""
Find the parent of a given window id
"""
root_window = i3.get_tree()
result = [None]
def finder(n, p=None):
if result[0] is not None:
return
for node in n:
if node['id'] == window_id:
result[0] = p
return
if len(node['nodes']):
finder(node['nodes'], node)
finder(root_window['nodes'])
return result[0]
def set_layout():
"""
Set the layout/split for the currently
focused window to either vertical or
horizontal, depending on its width/height
"""
current_win = i3.filter(nodes=[], focused=True)
for win in current_win:
parent = find_parent(win['id'])
if (parent and "rect" in parent
and parent['layout'] != 'tabbed'
and parent['layout'] != 'stacked'):
height = parent['rect']['height']
width = parent['rect']['width']
if height > width:
new_layout = 'vertical'
else:
new_layout = 'horizontal'
i3.split(new_layout)
def print_help():
print("Usage: " + sys.argv[0] + " [-p path/to/pid.file]")
print("")
print("Options:")
print(" -p path/to/pid.file Saves the PID for this program in the filename specified")
print("")
def main():
"""
Main function - listen for window focus
changes and call set_layout when focus
changes
"""
opt_list, args = getopt.getopt(sys.argv[1:], 'hp:')
pid_file = None
for opt in opt_list:
if opt[0] == "-h":
print_help()
sys.exit()
if opt[0] == "-p":
pid_file = opt[1]
if pid_file:
with open(pid_file, 'w') as f:
f.write(str(os.getpid()))
process = subprocess.Popen(
['xprop', '-root', '-spy'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
regex = re.compile(b'^_NET_CLIENT_LIST_STACKING|^_NET_ACTIVE_WINDOW')
last_line = ""
while True:
line = process.stdout.readline()
if line == b'': #X is dead
break
if line == last_line:
continue
if regex.match(line):
set_layout()
last_line = line
process.kill()
sys.exit()
if __name__ == "__main__":
main()
| suchacoder/Dotfiles | .config/i3/alternating_layouts.py | Python | gpl-3.0 | 2,518 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
JSONType = Any
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_check_availability_request(
subscription_id: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2015-04-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/providers/Microsoft.DomainRegistration/checkDomainAvailability')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_list_request(
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2015-04-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/providers/Microsoft.DomainRegistration/domains')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_control_center_sso_request_request(
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2015-04-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/providers/Microsoft.DomainRegistration/generateSsoRequest')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_list_recommendations_request(
subscription_id: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2015-04-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/providers/Microsoft.DomainRegistration/listDomainRecommendations')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_list_by_resource_group_request(
resource_group_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2015-04-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_request(
resource_group_name: str,
domain_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2015-04-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains/{domainName}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"domainName": _SERIALIZER.url("domain_name", domain_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_create_or_update_request_initial(
resource_group_name: str,
domain_name: str,
subscription_id: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2015-04-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains/{domainName}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"domainName": _SERIALIZER.url("domain_name", domain_name, 'str', pattern=r'[a-zA-Z0-9][a-zA-Z0-9\.-]+'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_delete_request(
resource_group_name: str,
domain_name: str,
subscription_id: str,
*,
force_hard_delete_domain: Optional[bool] = None,
**kwargs: Any
) -> HttpRequest:
api_version = "2015-04-01"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains/{domainName}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"domainName": _SERIALIZER.url("domain_name", domain_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if force_hard_delete_domain is not None:
query_parameters['forceHardDeleteDomain'] = _SERIALIZER.query("force_hard_delete_domain", force_hard_delete_domain, 'bool')
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
return HttpRequest(
method="DELETE",
url=url,
params=query_parameters,
**kwargs
)
def build_update_request(
resource_group_name: str,
domain_name: str,
subscription_id: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2015-04-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains/{domainName}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"domainName": _SERIALIZER.url("domain_name", domain_name, 'str', pattern=r'[a-zA-Z0-9][a-zA-Z0-9\.-]+'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PATCH",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_list_ownership_identifiers_request(
resource_group_name: str,
domain_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2015-04-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains/{domainName}/domainOwnershipIdentifiers')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"domainName": _SERIALIZER.url("domain_name", domain_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_ownership_identifier_request(
resource_group_name: str,
domain_name: str,
name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2015-04-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains/{domainName}/domainOwnershipIdentifiers/{name}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"domainName": _SERIALIZER.url("domain_name", domain_name, 'str'),
"name": _SERIALIZER.url("name", name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_create_or_update_ownership_identifier_request(
resource_group_name: str,
domain_name: str,
name: str,
subscription_id: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2015-04-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains/{domainName}/domainOwnershipIdentifiers/{name}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"domainName": _SERIALIZER.url("domain_name", domain_name, 'str'),
"name": _SERIALIZER.url("name", name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_delete_ownership_identifier_request(
resource_group_name: str,
domain_name: str,
name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2015-04-01"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains/{domainName}/domainOwnershipIdentifiers/{name}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"domainName": _SERIALIZER.url("domain_name", domain_name, 'str'),
"name": _SERIALIZER.url("name", name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
return HttpRequest(
method="DELETE",
url=url,
params=query_parameters,
**kwargs
)
def build_update_ownership_identifier_request(
resource_group_name: str,
domain_name: str,
name: str,
subscription_id: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2015-04-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains/{domainName}/domainOwnershipIdentifiers/{name}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"domainName": _SERIALIZER.url("domain_name", domain_name, 'str'),
"name": _SERIALIZER.url("name", name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PATCH",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_renew_request(
resource_group_name: str,
domain_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2015-04-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains/{domainName}/renew')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"domainName": _SERIALIZER.url("domain_name", domain_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
class DomainsOperations(object):
"""DomainsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.web.v2015_04_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def check_availability(
self,
identifier: "_models.NameIdentifier",
**kwargs: Any
) -> "_models.DomainAvailablilityCheckResult":
"""Check if a domain is available for registration.
Check if a domain is available for registration.
:param identifier: Name of the domain.
:type identifier: ~azure.mgmt.web.v2015_04_01.models.NameIdentifier
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DomainAvailablilityCheckResult, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2015_04_01.models.DomainAvailablilityCheckResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DomainAvailablilityCheckResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(identifier, 'NameIdentifier')
request = build_check_availability_request(
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.check_availability.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DomainAvailablilityCheckResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
check_availability.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.DomainRegistration/checkDomainAvailability'} # type: ignore
@distributed_trace
def list(
self,
**kwargs: Any
) -> Iterable["_models.DomainCollection"]:
"""Get all domains in a subscription.
Get all domains in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DomainCollection or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.web.v2015_04_01.models.DomainCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DomainCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("DomainCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.DomainRegistration/domains'} # type: ignore
@distributed_trace
def get_control_center_sso_request(
self,
**kwargs: Any
) -> "_models.DomainControlCenterSsoRequest":
"""Generate a single sign-on request for the domain management portal.
Generate a single sign-on request for the domain management portal.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DomainControlCenterSsoRequest, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2015_04_01.models.DomainControlCenterSsoRequest
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DomainControlCenterSsoRequest"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_control_center_sso_request_request(
subscription_id=self._config.subscription_id,
template_url=self.get_control_center_sso_request.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DomainControlCenterSsoRequest', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_control_center_sso_request.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.DomainRegistration/generateSsoRequest'} # type: ignore
@distributed_trace
def list_recommendations(
self,
parameters: "_models.DomainRecommendationSearchParameters",
**kwargs: Any
) -> Iterable["_models.NameIdentifierCollection"]:
"""Get domain name recommendations based on keywords.
Get domain name recommendations based on keywords.
:param parameters: Search parameters for domain name recommendations.
:type parameters: ~azure.mgmt.web.v2015_04_01.models.DomainRecommendationSearchParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NameIdentifierCollection or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.web.v2015_04_01.models.NameIdentifierCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
cls = kwargs.pop('cls', None) # type: ClsType["_models.NameIdentifierCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
_json = self._serialize.body(parameters, 'DomainRecommendationSearchParameters')
request = build_list_recommendations_request(
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.list_recommendations.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
_json = self._serialize.body(parameters, 'DomainRecommendationSearchParameters')
request = build_list_recommendations_request(
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("NameIdentifierCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_recommendations.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.DomainRegistration/listDomainRecommendations'} # type: ignore
@distributed_trace
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs: Any
) -> Iterable["_models.DomainCollection"]:
"""Get all domains in a resource group.
Get all domains in a resource group.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DomainCollection or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.web.v2015_04_01.models.DomainCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DomainCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_resource_group_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
template_url=self.list_by_resource_group.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_resource_group_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("DomainCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains'} # type: ignore
@distributed_trace
def get(
self,
resource_group_name: str,
domain_name: str,
**kwargs: Any
) -> "_models.Domain":
"""Get a domain.
Get a domain.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param domain_name: Name of the domain.
:type domain_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Domain, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2015_04_01.models.Domain
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Domain"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
resource_group_name=resource_group_name,
domain_name=domain_name,
subscription_id=self._config.subscription_id,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Domain', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains/{domainName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name: str,
domain_name: str,
domain: "_models.Domain",
**kwargs: Any
) -> "_models.Domain":
cls = kwargs.pop('cls', None) # type: ClsType["_models.Domain"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(domain, 'Domain')
request = build_create_or_update_request_initial(
resource_group_name=resource_group_name,
domain_name=domain_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self._create_or_update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('Domain', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('Domain', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains/{domainName}'} # type: ignore
@distributed_trace
def begin_create_or_update(
self,
resource_group_name: str,
domain_name: str,
domain: "_models.Domain",
**kwargs: Any
) -> LROPoller["_models.Domain"]:
"""Creates or updates a domain.
Creates or updates a domain.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param domain_name: Name of the domain.
:type domain_name: str
:param domain: Domain registration information.
:type domain: ~azure.mgmt.web.v2015_04_01.models.Domain
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either Domain or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.web.v2015_04_01.models.Domain]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.Domain"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
domain_name=domain_name,
domain=domain,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('Domain', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains/{domainName}'} # type: ignore
@distributed_trace
def delete(
self,
resource_group_name: str,
domain_name: str,
force_hard_delete_domain: Optional[bool] = None,
**kwargs: Any
) -> None:
"""Delete a domain.
Delete a domain.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param domain_name: Name of the domain.
:type domain_name: str
:param force_hard_delete_domain: Specify :code:`<code>true</code>` to delete the domain
immediately. The default is :code:`<code>false</code>` which deletes the domain after 24 hours.
:type force_hard_delete_domain: bool
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request(
resource_group_name=resource_group_name,
domain_name=domain_name,
subscription_id=self._config.subscription_id,
force_hard_delete_domain=force_hard_delete_domain,
template_url=self.delete.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains/{domainName}'} # type: ignore
@distributed_trace
def update(
self,
resource_group_name: str,
domain_name: str,
domain: "_models.DomainPatchResource",
**kwargs: Any
) -> "_models.Domain":
"""Creates or updates a domain.
Creates or updates a domain.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param domain_name: Name of the domain.
:type domain_name: str
:param domain: Domain registration information.
:type domain: ~azure.mgmt.web.v2015_04_01.models.DomainPatchResource
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Domain, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2015_04_01.models.Domain
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Domain"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(domain, 'DomainPatchResource')
request = build_update_request(
resource_group_name=resource_group_name,
domain_name=domain_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.update.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('Domain', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('Domain', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains/{domainName}'} # type: ignore
@distributed_trace
def list_ownership_identifiers(
self,
resource_group_name: str,
domain_name: str,
**kwargs: Any
) -> Iterable["_models.DomainOwnershipIdentifierCollection"]:
"""Lists domain ownership identifiers.
Lists domain ownership identifiers.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param domain_name: Name of domain.
:type domain_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DomainOwnershipIdentifierCollection or the result
of cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.web.v2015_04_01.models.DomainOwnershipIdentifierCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DomainOwnershipIdentifierCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_ownership_identifiers_request(
resource_group_name=resource_group_name,
domain_name=domain_name,
subscription_id=self._config.subscription_id,
template_url=self.list_ownership_identifiers.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_ownership_identifiers_request(
resource_group_name=resource_group_name,
domain_name=domain_name,
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("DomainOwnershipIdentifierCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_ownership_identifiers.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains/{domainName}/domainOwnershipIdentifiers'} # type: ignore
@distributed_trace
def get_ownership_identifier(
self,
resource_group_name: str,
domain_name: str,
name: str,
**kwargs: Any
) -> "_models.DomainOwnershipIdentifier":
"""Get ownership identifier for domain.
Get ownership identifier for domain.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param domain_name: Name of domain.
:type domain_name: str
:param name: Name of identifier.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DomainOwnershipIdentifier, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2015_04_01.models.DomainOwnershipIdentifier
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DomainOwnershipIdentifier"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_ownership_identifier_request(
resource_group_name=resource_group_name,
domain_name=domain_name,
name=name,
subscription_id=self._config.subscription_id,
template_url=self.get_ownership_identifier.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DomainOwnershipIdentifier', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_ownership_identifier.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains/{domainName}/domainOwnershipIdentifiers/{name}'} # type: ignore
@distributed_trace
def create_or_update_ownership_identifier(
self,
resource_group_name: str,
domain_name: str,
name: str,
domain_ownership_identifier: "_models.DomainOwnershipIdentifier",
**kwargs: Any
) -> "_models.DomainOwnershipIdentifier":
"""Creates an ownership identifier for a domain or updates identifier details for an existing
identifier.
Creates an ownership identifier for a domain or updates identifier details for an existing
identifier.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param domain_name: Name of domain.
:type domain_name: str
:param name: Name of identifier.
:type name: str
:param domain_ownership_identifier: A JSON representation of the domain ownership properties.
:type domain_ownership_identifier: ~azure.mgmt.web.v2015_04_01.models.DomainOwnershipIdentifier
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DomainOwnershipIdentifier, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2015_04_01.models.DomainOwnershipIdentifier
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DomainOwnershipIdentifier"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(domain_ownership_identifier, 'DomainOwnershipIdentifier')
request = build_create_or_update_ownership_identifier_request(
resource_group_name=resource_group_name,
domain_name=domain_name,
name=name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.create_or_update_ownership_identifier.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DomainOwnershipIdentifier', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update_ownership_identifier.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains/{domainName}/domainOwnershipIdentifiers/{name}'} # type: ignore
@distributed_trace
def delete_ownership_identifier(
self,
resource_group_name: str,
domain_name: str,
name: str,
**kwargs: Any
) -> None:
"""Delete ownership identifier for domain.
Delete ownership identifier for domain.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param domain_name: Name of domain.
:type domain_name: str
:param name: Name of identifier.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_ownership_identifier_request(
resource_group_name=resource_group_name,
domain_name=domain_name,
name=name,
subscription_id=self._config.subscription_id,
template_url=self.delete_ownership_identifier.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete_ownership_identifier.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains/{domainName}/domainOwnershipIdentifiers/{name}'} # type: ignore
@distributed_trace
def update_ownership_identifier(
self,
resource_group_name: str,
domain_name: str,
name: str,
domain_ownership_identifier: "_models.DomainOwnershipIdentifier",
**kwargs: Any
) -> "_models.DomainOwnershipIdentifier":
"""Creates an ownership identifier for a domain or updates identifier details for an existing
identifier.
Creates an ownership identifier for a domain or updates identifier details for an existing
identifier.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param domain_name: Name of domain.
:type domain_name: str
:param name: Name of identifier.
:type name: str
:param domain_ownership_identifier: A JSON representation of the domain ownership properties.
:type domain_ownership_identifier: ~azure.mgmt.web.v2015_04_01.models.DomainOwnershipIdentifier
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DomainOwnershipIdentifier, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2015_04_01.models.DomainOwnershipIdentifier
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DomainOwnershipIdentifier"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(domain_ownership_identifier, 'DomainOwnershipIdentifier')
request = build_update_ownership_identifier_request(
resource_group_name=resource_group_name,
domain_name=domain_name,
name=name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.update_ownership_identifier.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DomainOwnershipIdentifier', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_ownership_identifier.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains/{domainName}/domainOwnershipIdentifiers/{name}'} # type: ignore
@distributed_trace
def renew(
self,
resource_group_name: str,
domain_name: str,
**kwargs: Any
) -> None:
"""Renew a domain.
Renew a domain.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param domain_name: Name of the domain.
:type domain_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_renew_request(
resource_group_name=resource_group_name,
domain_name=domain_name,
subscription_id=self._config.subscription_id,
template_url=self.renew.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
renew.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains/{domainName}/renew'} # type: ignore
| Azure/azure-sdk-for-python | sdk/appservice/azure-mgmt-web/azure/mgmt/web/v2015_04_01/operations/_domains_operations.py | Python | mit | 65,265 |
"""Invocation: python manage.py award_badge <badge-slug> <user>
Awards the badge (identified by its slug) to the user (identified by user name)."""
from django.core import management
from django.contrib.auth.models import User
import sys
from apps.widgets.badges import badges
def award_badge(slug, username):
"""award the badge"""
try:
user = User.objects.get(username=username)
badge = badges.get_badge(slug)
if badge:
badges.award_badge(profile=user.profile, badge=badge)
else:
sys.stderr.write("Badge with the slug %s does not exist.\n" % slug)
except User.DoesNotExist:
sys.stderr.write("User with the username %s does not exist.\n" % username)
class Command(management.base.BaseCommand):
"""command"""
help = 'Awards a badge (identified by its slug) to a user.'
def handle(self, *args, **options):
"""
Awards a badge (identified by its slug) to a user
"""
if len(args) != 2:
self.stderr.write("Usage: 'python manage.py award_badge <badge-slug> <user>\n")
return
slug = args[0]
username = args[1]
award_badge(slug, username)
| yongwen/makahiki | makahiki/apps/widgets/badges/management/commands/award_badge.py | Python | mit | 1,207 |
"""PCSCPart10: PC/SC Part 10 (pinpad)
__author__ = "Ludovic Rousseau"
Copyright 2009-2010 Ludovic Rosseau
Author: Ludovic Rousseau, mailto:[email protected]
This file is part of pyscard.
pyscard is free software; you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation; either version 2.1 of the License, or
(at your option) any later version.
pyscard is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with pyscard; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
from __future__ import print_function
from smartcard.scard import *
# constants defined in PC/SC v2 Part 10
CM_IOCTL_GET_FEATURE_REQUEST = SCARD_CTL_CODE(3400)
FEATURE_VERIFY_PIN_START = 0x01
FEATURE_VERIFY_PIN_FINISH = 0x02
FEATURE_MODIFY_PIN_START = 0x03
FEATURE_MODIFY_PIN_FINISH = 0x04
FEATURE_GET_KEY_PRESSED = 0x05
FEATURE_VERIFY_PIN_DIRECT = 0x06
FEATURE_MODIFY_PIN_DIRECT = 0x07
FEATURE_MCT_READER_DIRECT = 0x08
FEATURE_MCT_UNIVERSAL = 0x09
FEATURE_IFD_PIN_PROPERTIES = 0x0A
FEATURE_ABORT = 0x0B
FEATURE_SET_SPE_MESSAGE = 0x0C
FEATURE_VERIFY_PIN_DIRECT_APP_ID = 0x0D
FEATURE_MODIFY_PIN_DIRECT_APP_ID = 0x0E
FEATURE_WRITE_DISPLAY = 0x0F
FEATURE_GET_KEY = 0x10
FEATURE_IFD_DISPLAY_PROPERTIES = 0x11
FEATURE_GET_TLV_PROPERTIES = 0x12
FEATURE_CCID_ESC_COMMAND = 0x13
Features = {
"FEATURE_VERIFY_PIN_START": FEATURE_VERIFY_PIN_START,
"FEATURE_VERIFY_PIN_FINISH": FEATURE_VERIFY_PIN_FINISH,
"FEATURE_MODIFY_PIN_START": FEATURE_MODIFY_PIN_START,
"FEATURE_MODIFY_PIN_FINISH": FEATURE_MODIFY_PIN_FINISH,
"FEATURE_GET_KEY_PRESSED": FEATURE_GET_KEY_PRESSED,
"FEATURE_VERIFY_PIN_DIRECT": FEATURE_VERIFY_PIN_DIRECT,
"FEATURE_MODIFY_PIN_DIRECT": FEATURE_MODIFY_PIN_DIRECT,
"FEATURE_MCT_READER_DIRECT": FEATURE_MCT_READER_DIRECT,
"FEATURE_MCT_UNIVERSAL": FEATURE_MCT_UNIVERSAL,
"FEATURE_IFD_PIN_PROPERTIES": FEATURE_IFD_PIN_PROPERTIES,
"FEATURE_ABORT": FEATURE_ABORT,
"FEATURE_SET_SPE_MESSAGE": FEATURE_SET_SPE_MESSAGE,
"FEATURE_VERIFY_PIN_DIRECT_APP_ID": FEATURE_VERIFY_PIN_DIRECT_APP_ID,
"FEATURE_MODIFY_PIN_DIRECT_APP_ID": FEATURE_MODIFY_PIN_DIRECT_APP_ID,
"FEATURE_WRITE_DISPLAY": FEATURE_WRITE_DISPLAY,
"FEATURE_GET_KEY": FEATURE_GET_KEY,
"FEATURE_IFD_DISPLAY_PROPERTIES": FEATURE_IFD_DISPLAY_PROPERTIES,
"FEATURE_GET_TLV_PROPERTIES": FEATURE_GET_TLV_PROPERTIES,
"FEATURE_CCID_ESC_COMMAND": FEATURE_CCID_ESC_COMMAND}
# properties returned by FEATURE_GET_TLV_PROPERTIES
PCSCv2_PART10_PROPERTY_wLcdLayout = 1
PCSCv2_PART10_PROPERTY_bEntryValidationCondition = 2
PCSCv2_PART10_PROPERTY_bTimeOut2 = 3
PCSCv2_PART10_PROPERTY_wLcdMaxCharacters = 4
PCSCv2_PART10_PROPERTY_wLcdMaxLines = 5
PCSCv2_PART10_PROPERTY_bMinPINSize = 6
PCSCv2_PART10_PROPERTY_bMaxPINSize = 7
PCSCv2_PART10_PROPERTY_sFirmwareID = 8
PCSCv2_PART10_PROPERTY_bPPDUSupport = 9
PCSCv2_PART10_PROPERTY_dwMaxAPDUDataSize = 10
PCSCv2_PART10_PROPERTY_wIdVendor = 11
PCSCv2_PART10_PROPERTY_wIdProduct = 12
Properties = {
"PCSCv2_PART10_PROPERTY_wLcdLayout": PCSCv2_PART10_PROPERTY_wLcdLayout,
"PCSCv2_PART10_PROPERTY_bEntryValidationCondition": \
PCSCv2_PART10_PROPERTY_bEntryValidationCondition,
"PCSCv2_PART10_PROPERTY_bTimeOut2": PCSCv2_PART10_PROPERTY_bTimeOut2,
"PCSCv2_PART10_PROPERTY_wLcdMaxCharacters": \
PCSCv2_PART10_PROPERTY_wLcdMaxCharacters,
"PCSCv2_PART10_PROPERTY_wLcdMaxLines": PCSCv2_PART10_PROPERTY_wLcdMaxLines,
"PCSCv2_PART10_PROPERTY_bMinPINSize": PCSCv2_PART10_PROPERTY_bMinPINSize,
"PCSCv2_PART10_PROPERTY_bMaxPINSize": PCSCv2_PART10_PROPERTY_bMaxPINSize,
"PCSCv2_PART10_PROPERTY_sFirmwareID": PCSCv2_PART10_PROPERTY_sFirmwareID,
"PCSCv2_PART10_PROPERTY_bPPDUSupport": PCSCv2_PART10_PROPERTY_bPPDUSupport,
"PCSCv2_PART10_PROPERTY_dwMaxAPDUDataSize": PCSCv2_PART10_PROPERTY_dwMaxAPDUDataSize,
"PCSCv2_PART10_PROPERTY_wIdVendor": PCSCv2_PART10_PROPERTY_wIdVendor,
"PCSCv2_PART10_PROPERTY_wIdProduct": PCSCv2_PART10_PROPERTY_wIdProduct}
# we already have: Features['FEATURE_x'] = FEATURE_x
# we will now also have: Features[FEATURE_x] = 'FEATURE_x'
for k in list(Features.keys()):
Features[Features[k]] = k
for k in list(Properties.keys()):
Properties[Properties[k]] = k
def getFeatureRequest(cardConnection):
""" Get the list of Part10 features supported by the reader.
@param cardConnection: L{CardConnection} object
@rtype: list
@return: a list of list [[tag1, value1], [tag2, value2]]
"""
response = cardConnection.control(CM_IOCTL_GET_FEATURE_REQUEST, [])
features = []
while (len(response) > 0):
tag = response[0]
control = (((((response[2] << 8) + \
response[3]) << 8) + \
response[4]) << 8) + \
response[5]
try:
features.append([Features[tag], control])
except KeyError:
pass
del response[:6]
return features
def hasFeature(featureList, feature):
""" return the controlCode for a feature or None
@param feature: feature to look for
@param featureList: feature list as returned by L{getFeatureRequest()}
@return: feature value or None
"""
for f in featureList:
if f[0] == feature or Features[f[0]] == feature:
return f[1]
def getPinProperties(cardConnection, featureList=None, controlCode=None):
""" return the PIN_PROPERTIES structure
@param cardConnection: L{CardConnection} object
@param featureList: feature list as returned by L{getFeatureRequest()}
@param controlCode: control code for L{FEATURE_IFD_PIN_PROPERTIES}
@rtype: dict
@return: a dict """
if controlCode is None:
if featureList is None:
featureList = getFeatureRequest(cardConnection)
controlCode = hasFeature(featureList, FEATURE_IFD_PIN_PROPERTIES)
if controlCode is None:
return {'raw': []}
response = cardConnection.control(controlCode, [])
d = {
'raw': response,
'LcdLayoutX': response[0],
'LcdLayoutY': response[1],
'EntryValidationCondition': response[2],
'TimeOut2': response[3]}
return d
def getTlvProperties(cardConnection, featureList=None, controlCode=None):
""" return the GET_TLV_PROPERTIES structure
@param cardConnection: L{CardConnection} object
@param featureList: feature list as returned by L{getFeatureRequest()}
@param controlCode: control code for L{FEATURE_GET_TLV_PROPERTIES}
@rtype: dict
@return: a dict """
if controlCode is None:
if featureList is None:
featureList = getFeatureRequest(cardConnection)
controlCode = hasFeature(featureList, FEATURE_GET_TLV_PROPERTIES)
if controlCode is None:
return {'raw': []}
response = cardConnection.control(controlCode, [])
d = {
'raw': response,
}
# create a new list to consume it
tmp = list(response)
while tmp:
tag = tmp[0]
len = tmp[1]
data = tmp[2:2 + len]
if PCSCv2_PART10_PROPERTY_sFirmwareID == tag:
# convert to a string
data = "".join([chr(c) for c in data])
# we now suppose the value is an integer
elif 1 == len:
# byte
data = data[0]
elif 2 == len:
# 16 bits value
data = data[1] * 256 + data[0]
elif 4 == len:
# 32 bits value
data = ((data[3] * 256 + data[2]) * 256 + data[1]) * 256 + data[0]
# store the value in the dictionnary
try:
d[Properties[tag]] = data
except KeyError:
d["UNKNOWN"] = data
del tmp[0:2 + len]
return d
if __name__ == '__main__':
"""Small sample illustrating the use of PCSCPart10."""
from smartcard.pcsc.PCSCReader import PCSCReader
cc = PCSCReader.readers()[0].createConnection()
cc.connect(mode=SCARD_SHARE_DIRECT)
#print(cc.control(CM_IOCTL_GET_FEATURE_REQUEST))
features = getFeatureRequest(cc)
print(features)
print(hasFeature(features, FEATURE_VERIFY_PIN_START))
print(hasFeature(features, FEATURE_VERIFY_PIN_DIRECT))
properties = getPinProperties(cc)
print("\nPinProperties:")
for k, v in list(properties.items()):
print(" %s: %s" % (k, v))
print("\nTlvProperties:")
properties = getTlvProperties(cc)
for k, v in list(properties.items()):
print(" %s: %s" % (k, v))
| moreati/pyscard | smartcard/pcsc/PCSCPart10.py | Python | lgpl-2.1 | 8,728 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Compute and Reduce with Tuple Inputs
=======================================
**Author**: `Ziheng Jiang <https://github.com/ZihengJiang>`_
Often we want to compute multiple outputs with the same shape within
a single loop or perform reduction that involves multiple values like
:code:`argmax`. These problems can be addressed by tuple inputs.
In this tutorial, we will introduce the usage of tuple inputs in TVM.
"""
from __future__ import absolute_import, print_function
import tvm
import numpy as np
######################################################################
# Describe Batchwise Computation
# ------------------------------
# For operators which have the same shape, we can put them together as
# the inputs of :any:`tvm.compute`, if we want them to be scheduled
# together in the next schedule procedure.
#
n = tvm.var("n")
m = tvm.var("m")
A0 = tvm.placeholder((m, n), name='A0')
A1 = tvm.placeholder((m, n), name='A1')
B0, B1 = tvm.compute((m, n), lambda i, j: (A0[i, j] + 2, A1[i, j] * 3), name='B')
# The generated IR code would be:
s = tvm.create_schedule(B0.op)
print(tvm.lower(s, [A0, A1, B0, B1], simple_mode=True))
######################################################################
# .. _reduction-with-tuple-inputs:
#
# Describe Reduction with Collaborative Inputs
# --------------------------------------------
# Sometimes, we require multiple inputs to express some reduction
# operators, and the inputs will collaborate together, e.g. :code:`argmax`.
# In the reduction procedure, :code:`argmax` need to compare the value of
# operands, also need to keep the index of operand. It can be expressed
# with :any:`comm_reducer` as below:
# x and y are the operands of reduction, both of them is a tuple of index
# and value.
def fcombine(x, y):
lhs = tvm.expr.Select((x[1] >= y[1]), x[0], y[0])
rhs = tvm.expr.Select((x[1] >= y[1]), x[1], y[1])
return lhs, rhs
# our identity element also need to be a tuple, so `fidentity` accepts
# two types as inputs.
def fidentity(t0, t1):
return tvm.const(-1, t0), tvm.min_value(t1)
argmax = tvm.comm_reducer(fcombine, fidentity, name='argmax')
# describe the reduction computation
m = tvm.var('m')
n = tvm.var('n')
idx = tvm.placeholder((m, n), name='idx', dtype='int32')
val = tvm.placeholder((m, n), name='val', dtype='int32')
k = tvm.reduce_axis((0, n), 'k')
T0, T1 = tvm.compute((m, ), lambda i: argmax((idx[i, k], val[i, k]), axis=k), name='T')
# the generated IR code would be:
s = tvm.create_schedule(T0.op)
print(tvm.lower(s, [idx, val, T0, T1], simple_mode=True))
######################################################################
# .. note::
#
# For ones who are not familiar with reduction, please refer to
# :ref:`general-reduction`.
######################################################################
# Schedule Operation with Tuple Inputs
# ------------------------------------
# It is worth mentioning that although you will get multiple outputs
# with one batch operation, but they can only be scheduled together
# in terms of operation.
n = tvm.var("n")
m = tvm.var("m")
A0 = tvm.placeholder((m, n), name='A0')
B0, B1 = tvm.compute((m, n), lambda i, j: (A0[i, j] + 2, A0[i, j] * 3), name='B')
A1 = tvm.placeholder((m, n), name='A1')
C = tvm.compute((m, n), lambda i, j: A1[i, j] + B0[i, j], name='C')
s = tvm.create_schedule(C.op)
s[B0].compute_at(s[C], C.op.axis[0])
# as you can see in the below generated IR code:
print(tvm.lower(s, [A0, A1, C], simple_mode=True))
######################################################################
# Summary
# -------
# This tutorial introduces the usage of tuple inputs operation.
#
# - Describe normal batchwise computation.
# - Describe reduction operation with tuple inputs.
# - Notice that you can only schedule computation in terms of operation instead of tensor.
| Huyuwei/tvm | tutorials/language/tuple_inputs.py | Python | apache-2.0 | 4,628 |
"""
http://community.topcoder.com/stat?c=problem_statement&pm=1667
Single Round Match 147 Round 1 - Division II, Level One
"""
class CCipher:
def decode(self, cipherText, shift):
a = ord('A')
decoder = [a + (c - shift if c >= shift else c - shift + 26) for c in range(26)]
plain = [chr(decoder[ord(c) - a]) for c in cipherText]
return ''.join(plain)
| warmsea/tc-srm | srm147/CCipher.py | Python | mit | 389 |
#! /usr/bin/python3
# Module importieren
import socket # TCP Sockets
import string, base64 # Stringbearbeitung und Base64-En- und Decoding
import rsa # RSA-Modul
# Host-IP und Port festlegen
host = "127.0.0.1"
#host = "10.32.100.1"
port = 10000
# Socket oeffnen
s = socket.socket()
# Socket verbinden
print('\n\nConnecting ' + str(host) + ' Port: ' + str(port) + "\n" )
s.connect((host, port))
# Nachricht von Server holen
msg = s.recv(2048);
print("Antwort: " + msg.decode())
# Antwort in die drei Teile zerlegen: IP HELLO BASE64-encodierte Nachricht
erg = msg.strip().split(b' ')
# erster Check: Stimmt die IP, die beim Server ankam mit unserer ueberein?
if ( erg[0] == socket.gethostbyname(socket.gethostname())) :
print('Von Server uebermittelte IP ' + erg[0].decode() + ' stimmt mit ermittelter IP: ' + socket.gethostbyname(socket.gethostname()) + ' ueberein!\n')
elif ( erg[0] == b"127.0.0.1" ) :
print('localhost-Zugriff - Von Server uebermittelte IP ' + erg[0].decode() + '!\n')
else :
print('Server denkt ich habe IP ' + erg[0].decode() + ' stimmt nicht mit ermittelter IP: ' + socket.gethostbyname(socket.gethostname()) + ' ueberein! => ABBRUCH\n\n')
exit (1)
# zweiter Check: Protokoll sieht ein HELLO an zweiter Stelle vor
if ( erg[1] != b"HELLO" ) :
print ("Server sagt nicht HELLO! => ABBRUCH\n\n")
exit(2)
# Ausgabe des vom Server uebermittelten Nutztextes (verschluesselt und base64-encodiert)
print("DEBUG erg[2] = " + erg[2].decode() + "\n")
# Private und PubKey laden
privkey = rsa.PrivateKey.load_pkcs1(open('client.id_rsa').read().encode(),'PEM')
pubkey = rsa.PublicKey.load_pkcs1(open('server.id_rsa.pypub').read().encode())
# Mit Client-private-key entschluesseln
decrypted = rsa.decrypt(base64.b64decode(erg[2]), privkey)
print("Entschluesselt (Zufallswort): " + decrypted.decode() + "\n")
# Nutzdaten anhaengen
msg = decrypted.decode() + " || " + "Dies sind meine Nutzdaten - hoffentlich kannst du sie lesen! (Python3-Client)"
# Mit Server-public-key verschluesseln und base64-encodieren
encrypted = rsa.encrypt(msg.encode(), pubkey)
msg = base64.b64encode(encrypted)
# Ausgabe des gesendeten Strings
print ("gesendet: " + msg.decode() + "\n\n")
# Senden
s.send(msg)
# Antwort des Servers holen
msg = s.recv(2048);
print("Antwort: " + msg.decode())
# Mit Client-private-key entschluesseln
decrypted = rsa.decrypt(base64.b64decode(msg), privkey)
print("Entschluesselt: " + decrypted.decode() + "\n")
# Antwort in die zwei Teile zerlegen: OK || Nutzdaten
erg = decrypted.strip().split(b' || ')
# Hat der Server OK geantwortet?
if ( erg[0] != b"OK") :
print("Server hat nicht OK geantwortet -> Fehler!")
print("Nutzdaten der Antwort: " + erg[1].decode() )
# Socket schliessen
s.close() | MikroMakroFDS/SecuMod | Scripts/py/client3_demo.py | Python | gpl-2.0 | 2,815 |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from django import forms
from django.contrib.auth.models import User
from django.utils.translation import ugettext_lazy as _lazy
import django_filters
from oneanddone.base.filters import MultiFieldFilter, MyDateRangeFilter
from oneanddone.tasks.models import Task, TaskAttempt, TaskProject, TaskTeam, TaskType
from oneanddone.base.widgets import HorizCheckboxSelect
class ActivityFilterSet(django_filters.FilterSet):
task__owner = django_filters.ModelChoiceFilter(
label=_lazy(u'Task Owner'),
queryset=User.objects.filter(is_staff=True).order_by('profile__name'))
task__team = django_filters.ModelChoiceFilter(
label=_lazy(u'Team'),
queryset=TaskTeam.objects.all())
user = MultiFieldFilter(
['user__profile__name', 'user__profile__username', 'user__email'],
label=_lazy(u'Task User')
)
modified = MyDateRangeFilter(
label=_lazy(u'Date')
)
class Meta:
model = TaskAttempt
fields = ('task__owner', 'task__team', 'user', 'modified')
class SmallTasksFilterSet(django_filters.FilterSet):
difficulty = django_filters.ChoiceFilter(
choices=(
('', _lazy(u'Any')),
(Task.BEGINNER, _lazy(u'Beginner')),
(Task.INTERMEDIATE, _lazy(u'Intermediate')),
(Task.ADVANCED, _lazy(u'Advanced'))),
label=_lazy(u'Task Difficulty'),
required=False)
execution_time = django_filters.ChoiceFilter(
choices=(('', _lazy(u'Any')), (15, 15), (30, 30), (45, 45), (60, 60)),
label=_lazy(u'Estimated minutes'),
required=False)
class Meta:
model = Task
fields = ('difficulty', 'execution_time')
class TasksFilterSet(django_filters.FilterSet):
search = MultiFieldFilter(
['name', 'short_description', 'why_this_matters',
'prerequisites', 'instructions', 'keyword_set__name'],
label=_lazy(u'Search for tasks')
)
difficulty = django_filters.ChoiceFilter(
choices=(
('', _lazy(u'Any')),
(Task.BEGINNER, _lazy(u'Beginner')),
(Task.INTERMEDIATE, _lazy(u'Intermediate')),
(Task.ADVANCED, _lazy(u'Advanced'))),
label=_lazy(u'Task Difficulty'),
required=False)
execution_time = django_filters.MultipleChoiceFilter(
choices=((15, 15), (30, 30), (45, 45), (60, 60)),
widget=HorizCheckboxSelect,
label=_lazy(u'Estimated minutes'))
team = django_filters.ModelMultipleChoiceFilter(
label=_lazy(u'Team'),
queryset=TaskTeam.objects.all(),
widget=forms.CheckboxSelectMultiple)
project = django_filters.ModelMultipleChoiceFilter(
label=_lazy(u'Project'),
queryset=TaskProject.objects.all(),
widget=forms.CheckboxSelectMultiple)
type = django_filters.ModelMultipleChoiceFilter(
label=_lazy(u'Type'),
queryset=TaskType.objects.all(),
widget=forms.CheckboxSelectMultiple)
keyword = django_filters.CharFilter(
name='keyword_set__name'
)
class Meta:
model = Task
fields = ('search', 'difficulty', 'execution_time', 'team', 'project', 'type', 'keyword')
| tessie/oneanddone | oneanddone/tasks/filters.py | Python | mpl-2.0 | 3,399 |
# Copyright (c) 2018 NEC, Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_upgradecheck.upgradecheck import Code
from dragonflow.cmd import status
from dragonflow.tests import base as tests_base
class TestUpgradeChecks(tests_base.BaseTestCase):
def setUp(self):
super(TestUpgradeChecks, self).setUp()
self.cmd = status.Checks()
def test__check_placeholder(self):
check_result = self.cmd._check_placeholder()
self.assertEqual(
Code.SUCCESS, check_result.code)
| openstack/dragonflow | dragonflow/tests/unit/cmd/test_status.py | Python | apache-2.0 | 1,058 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.