metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "JorgeDeLosSantos/curso_mecanica_de_materiales",
"score": 3
} |
#### File: proyecto_01/prj01/uitor.py
```python
import wx
import ui
import numpy as np
class wxFrame(wx.Frame):
def __init__(self):
wx.Frame.__init__(self,parent=None,title="ABC",size=(600,450))
self.init_ctrls()
self.SetBackgroundColour("#E5E5E5")
self.Show()
def init_ctrls(self):
self.msz = wx.BoxSizer(wx.VERTICAL)
self.numsz = wx.BoxSizer(wx.HORIZONTAL)
self.dep = wx.StaticBox(self, -1, 'Datos de entrada')
self.desz = wx.StaticBoxSizer(self.dep, wx.VERTICAL)
self.dsp = wx.StaticBox(self, -1, 'Datos de salida')
self.dssz = wx.StaticBoxSizer(self.dsp, wx.VERTICAL)
lb = wx.StaticText(self, -1, u"Número de elementos", size=(120,-1))
self.numel = wx.TextCtrl(self, -1, "", size=(80,-1))
self.oknumel = wx.Button(self, -1, "OK", size=(40,-1))
# Datos de entrada
self.input_data = ui.DataGrid(self,(5,5))
self.desz.Add(self.input_data, 1, wx.EXPAND)
# Datos de salida
self.output_data = ui.DataGrid(self,(5,2))
self.dssz.Add(self.output_data, 1, wx.EXPAND)
# Botón calcular
self.calc = wx.Button(self, -1, "Calcular")
self.numsz.Add(lb, 0, wx.ALIGN_CENTRE|wx.ALL, 5)
self.numsz.Add(self.numel, 0, wx.ALIGN_CENTRE|wx.ALL, 5)
self.numsz.Add(self.oknumel, 0, wx.ALIGN_CENTRE|wx.ALL, 5)
self.msz.Add(self.numsz, 1, wx.EXPAND)
self.msz.Add(self.desz, 5, wx.EXPAND|wx.ALL, 5)
self.msz.Add(self.dssz, 5, wx.EXPAND|wx.ALL, 5)
self.msz.Add(self.calc, 1, wx.ALL|wx.ALIGN_CENTRE, 5)
self.SetSizer(self.msz)
colnames = "ID,OD,L,T,G".split(",")
for k,col in enumerate(colnames):
self.input_data.SetColLabelValue(k,col)
colnames_out = u"\N{GREEK SMALL LETTER TAU},\N{GREEK SMALL LETTER PHI}".split(",")
for k,col in enumerate(colnames_out):
self.output_data.SetColLabelValue(k,col)
self.Bind(wx.EVT_BUTTON, self.on_numel, self.oknumel)
self.Bind(wx.EVT_BUTTON, self.calcular, self.calc)
def on_numel(self,event):
numel = int(self.numel.GetValue())
self.input_data.UpdateGridSize(numel,5)
self.output_data.UpdateGridSize(numel,2)
def calcular(self,event):
data = self.input_data.GetArrayData()
ID = data[:,0]
OD = data[:,1]
L = data[:,2]
T = data[:,3]
G = data[:,4]
J = np.pi/2*((OD/2)**4-(ID/2)**4)
TS = []
for k in range(len(T)):
_ts = sum(T[0:k+1])
TS.append(_ts)
TS = np.array(TS)
phi = ((TS*L)/(J*G))*(180/np.pi)
tau = (TS*OD)/J
self.output_data.SetArrayData(np.column_stack((tau,phi)))
if __name__ == '__main__':
app = wx.App()
fr = wxFrame()
app.MainLoop()
``` |
{
"source": "JorgeDeLosSantos/git-test",
"score": 3
} |
#### File: JorgeDeLosSantos/git-test/app_demo.py
```python
import wx
class MiAplicacion(wx.Frame):
def __init__(self,parent,title):
wx.Frame.__init__(self,parent,title=title,size=(250,200))
self.sz = wx.BoxSizer(wx.VERTICAL)
bt = wx.Button(self,-1,"Hola")
txt02 = wx.TextCtrl(self,-1,"")
self.sz.Add(bt, 1, wx.EXPAND)
self.sz.Add(txt02, 1, wx.EXPAND)
self.SetBackgroundColour("#ff7700")
txt = wx.TextCtrl(self, -1, "")
self.sz.Add(txt, 1, wx.EXPAND)
self.SetSizer(self.sz)
self.Centre(True)
self.Show()
# This line was added by lab2dls user
# Testing collab...
if __name__=='__main__':
version = "0.1.1"
app = wx.App()
frame = MiAplicacion(None, u"AppDemo %s"%(version))
app.MainLoop()
``` |
{
"source": "JorgeDeLosSantos/iwx",
"score": 3
} |
#### File: iwx/iwx/grid.py
```python
import wx
import wx.grid as grid
import numpy as np
from utils import *
# Using openpyxl
openpyxl_warning = """
Please install openpyxl to use the "Export to Excel" feature.
"""
try:
import openpyxl
except ImportError:
print openpyxl_error
pass
class DataGrid(grid.Grid):
"""
Create a wx.grid.Grid based grid, with other features,
as add and delete rows and columns interactively.
DataGrid allows use only numeric data, if the input value
(on cell edit) isn't a number, then the value changes to NaN
(NumPy constant).
parent : wxPython container
Parent object
gridsize : `tuple`, `list`
Size of grid, using format -> ``(rows, columns)``
Example ::
app = wx.App()
fr = wx.Frame(None, -1, u"ABC")
dp = DataGrid(fr,(10,2))
fr.Show()
app.MainLoop()
"""
def __init__(self,parent,gridsize,**kwargs):
grid.Grid.__init__(self,parent=parent,id=-1,**kwargs)
rows = int(gridsize[0])
cols = int(gridsize[1])
self.CreateGrid(rows,cols)
self.SetRowLabelSize(20)
self.Bind(grid.EVT_GRID_CELL_CHANGE, self.OnCellEdit)
self.Bind(grid.EVT_GRID_CELL_RIGHT_CLICK, self.OnRightClick)
def UpdateGridSize(self,rows,cols):
"""
Update the grid Size
"""
self.ClearGrid()
ccols = self.GetNumberCols()
crows = self.GetNumberRows()
if rows > crows:
self.AppendRows(rows-crows)
elif rows < crows:
self.DeleteRows(0,crows-rows)
if cols > ccols:
self.AppendCols(cols-ccols)
elif cols < ccols:
self.DeleteCols(0,ccols-cols)
def SetArrayData(self,data):
"""
Set the data for grid, data must be a numpy array.
::
data = np.random.random((20,20))
gd.SetArrayData(data)
"""
r,c = data.shape # For numpy array
self.UpdateGridSize(r,c)
for i in range(r):
for j in range(c):
val = str(data[i][j])
self.SetCellValue(i,j,val)
def GetArrayData(self):
"""
Get the grid data, return a numpy array.
"""
nrows = self.GetNumberRows()
ncols = self.GetNumberCols()
X = np.zeros((nrows,ncols))
for i in range(nrows):
for j in range(ncols):
cval = self.GetCellValue(i,j)
if not isempty(cval):
try:
X[i][j] = float(cval)
except:
# ?
X[i][j] = np.nan
else:
X[i][j] = np.nan
return X
def GetSelectedData(self):
"""
Get the data from selected cells.
"""
scols = self.GetSelectedCols()
srows = self.GetSelectedRows()
X = np.zeros((len(srows),len(scols)))
for ii,row in enumerate(srows):
for jj,col in enumerate(scols):
try:
X[ii][jj] = self.GetCellValue(row,col)
except ValueError:
X[ii][jj] = np.nan
return X
def GetSelectedCols(self):
"""
Return a list with selected columns index
"""
scols = []
top_left = self.GetSelectionBlockTopLeft()
bottom_right = self.GetSelectionBlockBottomRight()
if not isempty(bottom_right) and not isempty(top_left):
max_col = bottom_right[0][1]
min_col = top_left[0][1]
scols = range(min_col,max_col+1)
return scols
def GetSelectedRows(self):
"""
Return a list with selected rows index
"""
srows = []
top_left = self.GetSelectionBlockTopLeft()
bottom_right = self.GetSelectionBlockBottomRight()
if not isempty(bottom_right) and not isempty(top_left):
max_row = bottom_right[0][0]
min_row = top_left[0][0]
srows = range(min_row,max_row+1)
return srows
def OnCellEdit(self,event):
"""
Method that handle response when edited a cell.
**Features**
* Evaluates simple arithmetic expressions using Microsoft Excel
formulas syntax, like: ``=1+2``
* If value isn't a number, then the cell value is assigned to
NaN (Numpy constant).
"""
row,col = (event.GetRow(),event.GetCol())
cval = self.GetCellValue(row,col)
if cval.startswith("="):
try:
cval = str(eval(cval[1:]))
self.SetCellValue(row,col,cval)
except:
pass
try:
cval = float(cval)
except ValueError:
cval = np.nan
self.SetCellValue(row,col,str(cval))
def OnRightClick(self,event):
"""
Show a pop-up-menu when right click is pressed over
the DataGrid, with following options:
* Delete rows
* Delete columns
* Add row...
* Add column...
* Randomly fill columns
* Randomly fill cells
"""
pum = wx.Menu() # Pop-up menu
delrows = wx.MenuItem(pum, -1, "Delete rows")
pum.AppendItem(delrows)
delcols = wx.MenuItem(pum, -1, "Delete columns")
pum.AppendItem(delcols)
pum.AppendSeparator()
addrow = wx.MenuItem(pum, -1, "Add row...")
pum.AppendItem(addrow)
addcol = wx.MenuItem(pum, -1, "Add column...")
pum.AppendItem(addcol)
pum.AppendSeparator()
randomfill_cols = wx.MenuItem(pum, -1, "Randomly fill columns")
pum.AppendItem(randomfill_cols)
randomfill_cells = wx.MenuItem(pum, -1, "Randomly fill cells")
pum.AppendItem(randomfill_cells)
# Binds events
pum.Bind(wx.EVT_MENU, self.del_rows, delrows)
pum.Bind(wx.EVT_MENU, self.del_cols, delcols)
pum.Bind(wx.EVT_MENU, self.add_row, addrow)
pum.Bind(wx.EVT_MENU, self.add_col, addcol)
pum.Bind(wx.EVT_MENU, self.random_fill_cols, randomfill_cols)
pum.Bind(wx.EVT_MENU, self.random_fill_cells, randomfill_cells)
# Show
self.PopupMenu(pum)
pum.Destroy() #Destroy the pop-up menu
def del_rows(self,event):
"""
Delete selected rows
"""
rows = self.GetSelectedRows()
self.DeleteRows(rows[0],len(rows))
def del_cols(self,event):
"""
Delete selected columns
"""
cols = self.GetSelectedCols()
self.DeleteCols(cols[0],len(cols))
def add_row(self,event):
"""
Append one row
"""
self.AppendRows(1)
def add_col(self,event):
"""
Append one column
"""
self.AppendCols(1)
def random_fill(self):
"""
Fills all the grid with random numbers
"""
nrows = self.GetNumberRows()
ncols = self.GetNumberCols()
data = np.random.random((nrows,ncols))
self.SetArrayData(data)
def random_fill_cols(self,event):
"""
Fills selected columns with random numbers.
"""
cols = self.GetSelectedCols()
nrows = self.GetNumberRows()
for ii in range(nrows):
for col in cols:
val = str(np.random.rand())
self.SetCellValue(ii,col,val)
def random_fill_cells(self,event):
"""
Fills selected cells with random numbers.
"""
scols = self.GetSelectedCols()
srows = self.GetSelectedRows()
for ii,row in enumerate(srows):
for jj,col in enumerate(scols):
val = str(np.random.rand())
self.SetCellValue(row, col, val)
def toExcel(self):
"""
Export grid data to Excel sheet. (not implemented yet)
"""
pass
def toHTMLTable(self):
"""
Export grid data to HTML table format. (not implemented yet)
"""
pass
if __name__=='__main__':
app = wx.App()
fr = wx.Frame(None, -1, u"ABC")
dp = DataGrid(fr,(10,2))
dp.random_fill()
fr.Show()
app.MainLoop()
``` |
{
"source": "JorgeDeLosSantos/NanchiPlot",
"score": 3
} |
#### File: NanchiPlot/nanchi/image.py
```python
import numpy as np
from skimage import io, color, filters
path_img = r"C:/Users/User/Pictures/ittg_logo.png"
def sobel(X):
sbl = filters.sobel(X)
return sbl
def prewitt(X):
pw = filters.prewitt(X)
return pw
def roberts(X):
rb = filters.roberts(X)
return rb
def rgb2gray(X):
gray = color.rgb2gray(X)
return gray
def binarize(X):
th = 0.5
bX = X > th
return bX.astype('float')
if __name__ == '__main__':
img = io.imread(path_img)
img = rgb2gray(img)
#img = sobel(img)
bi = binarize(img)
io.imshow(bi)
io.show()
```
#### File: NanchiPlot/nanchi/setplot.py
```python
import matplotlib as mpl
import matplotlib.cm as cm
import matplotlib.pyplot as plt
from _const_ import *
# WX Backend ?
def set_default_params(axes,figure):
# Figure properties
#~ axes.cla()
#~ mpl.rc('figure',autolayout=True)
set_default_axes_props(axes)
def set_default_axes_props(axes):
axes.cla()
axes.set_aspect("auto")
if is_reversed_yaxis(axes):
axes.invert_yaxis()
if not axes.get_frame_on():
axes.set_frame_on(True)
def is_reversed_yaxis(axes):
"""
"""
a,b = axes.get_ylim()
if a > b: return True
return False
```
#### File: NanchiPlot/nanchi/uibase.py
```python
import wx
import wx.aui as aui
import wx.grid as grid
import wx.lib.scrolledpanel as scrolled
import os
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.figure import Figure
try:
from uimpl import FigureCanvas # Customized canvas
import setplot
import uimpl
import uiaux as aux
from util import isempty, rgb2hex
from _const_ import * # String & Constants values
except ImportError:
from nanchi.uimpl import FigureCanvas # Customized canvas
import nanchi.setplot as setplot
import nanchi.uimpl as uimpl
import nanchi.uiaux as aux
from nanchi.util import isempty, rgb2hex
from nanchi._const_ import * # String & Constants values
class NanchiNoteBook(aui.AuiNotebook):
def __init__(self, parent):
_styles = aui.AUI_NB_TOP | aui.AUI_NB_TAB_SPLIT | aui.AUI_NB_TAB_MOVE
aui.AuiNotebook.__init__(self, parent=parent, style=_styles)
# Graph Panel
self.graphs = GraphPanel(self)
self.data = DataPanel(self)
#self.setup = SetupPanel(self)
self.axes = self.graphs.axes
self.figure = self.graphs.figure
self.canvas = self.graphs.canvas
self.AddPage(self.graphs, u"Graphs")
self.AddPage(self.data, u"Data")
#self.AddPage(self.setup, u"Settings")
self.Bind(aui.EVT_AUINOTEBOOK_PAGE_CHANGED, self.OnPageChanged)
def OnPageChanged(self, event):
pass
#gp = pickle.load(open("graph_properties.dat","rb"))
#self.axes.set_xlabel(gp["xlabel"])
#self.axes.set_ylabel(gp["ylabel"])
#self.canvas.draw() # Draw canvas
class GraphPanel(wx.Panel):
def __init__(self,parent,*args,**kwargs):
wx.Panel.__init__(self,parent,-1)
# Sizer
self.mainsz = wx.BoxSizer(wx.VERTICAL)
# Init canvas Figure & Axes
self.initCanvas()
# Color properties
self.SetBackgroundColour(PANEL_BG_COLOR)
# Status bar from NanchiPlot App
self.sb = self.GetParent().GetParent().GetParent().GetStatusBar()
#print self.sb # debug
# Configurar sizer
self.SetSizer(self.mainsz)
def initCanvas(self):
# Creating Figure & Axes
self.figure = Figure()
self.axes = self.figure.add_subplot(111)
self.canvas = FigureCanvas(self, -1, self.figure)
# Initial events
self.EVT_ON_RIGHT = self.canvas.mpl_connect('button_press_event', self.OnRightClick)
self.EVT_ON_SCROLL = self.canvas.mpl_connect('scroll_event', self.OnScroll)
# Graph properties
setplot.set_default_params(self.axes,self.figure)
# FigureCanvas
self.mainsz.Add(self.canvas, 1, wx.EXPAND|wx.ALL, 1)
def OnRightClick(self,event):
if event.button == 3:
self.InitPopUpMenu()
elif event.button == 1:
# To implement: move lines and texts
# without previous selection of this option
# on LineToolbar
pass
def OnScroll(self,event):
"""
Zoom -> Scrolling Mouse
"""
scale_factor = 2.0
minx, maxx = self.axes.get_xlim()
miny, maxy = self.axes.get_ylim()
xdata, ydata = event.xdata, event.ydata
if event.button == "up":
kf = 1.0/scale_factor
elif event.button == "down":
kf= scale_factor
xfactor = 0.5*(maxx - minx)*kf
yfactor = 0.5*(maxy - miny)*kf
if event.key is not(None) and "control" in event.key: # ctrl + control --- why ?
self.axes.set_xlim((xdata - xfactor, xdata + xfactor))
self.axes.set_ylim((ydata - yfactor, ydata + yfactor))
else: pass
self.canvas.draw() # Update
def InitPopUpMenu(self):
pum = wx.Menu()
ls = wx.MenuItem(pum, -1, "Line style")
pum.AppendItem(ls)
linecolor = wx.MenuItem(pum, -1, u"Line color")
pum.AppendItem(linecolor)
linewidth = wx.MenuItem(pum, -1, u"Line width")
pum.AppendItem(linewidth)
pum.AppendSeparator()
gs = wx.MenuItem(pum, -1, "Grid style")
pum.AppendItem(gs)
gridcolor = wx.MenuItem(pum, -1, u"Grid color")
pum.AppendItem(gridcolor)
gridwidth = wx.MenuItem(pum, -1, u"Grid width")
pum.AppendItem(gridwidth)
pum.AppendSeparator()
axbackg = wx.MenuItem(pum, -1, u"Background Color")
pum.AppendItem(axbackg)
aspax = wx.Menu()
_aspax_auto = wx.MenuItem(aspax, -1, u"auto")
aspax.AppendItem(_aspax_auto)
_aspax_equal = wx.MenuItem(aspax, -1, u"equal")
aspax.AppendItem(_aspax_equal)
pum.AppendMenu(-1, "Axes aspect", aspax)
pum.AppendSeparator()
xlabel = wx.MenuItem(pum, -1, u"X-Label")
pum.AppendItem(xlabel)
ylabel = wx.MenuItem(pum, -1, u"Y-Label")
pum.AppendItem(ylabel)
title = wx.MenuItem(pum, -1, u"Insert title")
pum.AppendItem(title)
intext = wx.MenuItem(pum, -1, u"Insert text/annotation")
pum.AppendItem(intext)
pum.AppendSeparator()
setxticks = wx.MenuItem(pum, -1, u"Update xticks")
pum.AppendItem(setxticks)
setyticks = wx.MenuItem(pum, -1, u"Update yticks")
pum.AppendItem(setyticks)
pum.AppendSeparator()
zoom_box = wx.MenuItem(pum, -1, u"Zoom Box")
pum.AppendItem(zoom_box)
# Binds
self.Bind(wx.EVT_MENU, self.OnText, intext)
self.Bind(wx.EVT_MENU, self.OnBackground, axbackg)
self.Bind(wx.EVT_MENU, self.OnGridColor, gridcolor)
self.Bind(wx.EVT_MENU, self.OnGridWidth, gridwidth)
self.Bind(wx.EVT_MENU, self.OnGridStyle, gs)
self.Bind(wx.EVT_MENU, self.OnXLabel, xlabel)
self.Bind(wx.EVT_MENU, self.OnYLabel, ylabel)
self.Bind(wx.EVT_MENU, self.OnTitle, title)
self.Bind(wx.EVT_MENU, self.OnZoom, zoom_box)
self.Bind(wx.EVT_MENU, self.OnAxesAspect, _aspax_equal)
self.Bind(wx.EVT_MENU, self.OnAxesAspect, _aspax_auto)
# Lines
self.Bind(wx.EVT_MENU, self.OnLineStyle, ls)
self.Bind(wx.EVT_MENU, self.OnLineColor, linecolor)
self.Bind(wx.EVT_MENU, self.OnLineWidth, linewidth)
# Ticks
self.Bind(wx.EVT_MENU, self.OnXTicks, setxticks)
self.Bind(wx.EVT_MENU, self.OnYTicks, setyticks)
# Show
self.PopupMenu(pum)
pum.Destroy()
def OnText(self,event):
self.TEXT_EVT = self.canvas.mpl_connect("button_press_event", self.set_text)
def set_text(self,event):
cx = event.xdata
cy = event.ydata
dialog = wx.TextEntryDialog(self,"Insert text",
NANCHI_MAIN_CAPTION, u"", style=wx.OK|wx.CANCEL)
if dialog.ShowModal() == wx.ID_OK:
if not cx is None and not cy is None:
self.axes.text(cx, cy, unicode(dialog.GetValue()), picker=True)
self.canvas.draw()
else:
msg = wx.MessageDialog(self,u"Select a position inside of Axes",
caption=DEFAULT_DIALOG_CAPTION, style=wx.ICON_ERROR|wx.OK)
msg.ShowModal()
msg.Destroy()
dialog.Destroy()
self.canvas.mpl_disconnect(self.TEXT_EVT)
def OnBackground(self,event):
dlg = wx.ColourDialog(self)
if dlg.ShowModal() == wx.ID_OK:
color = dlg.GetColourData().Colour
r,g,b = color.Red(),color.Green(),color.Blue()
self.axes.set_axis_bgcolor(rgb2hex(r,g,b))
dlg.Destroy()
self.canvas.draw()
def OnAxesAspect(self,event):
aspect = event.GetEventObject().GetLabel(event.GetId())
self.axes.set_aspect(aspect)
self.canvas.draw()
def OnGridColor(self,event):
dlg = wx.ColourDialog(self)
if dlg.ShowModal() == wx.ID_OK:
color = dlg.GetColourData().Colour
r,g,b = color.Red(),color.Green(),color.Blue()
self.axes.grid(color=rgb2hex(r,g,b))
dlg.Destroy()
self.canvas.draw()
def OnGridStyle(self,event):
dlg = aux.LineStyleDialog(None)
if dlg.ShowModal() == wx.ID_OK:
self._gs = dlg.GetData()
self.axes.grid(ls=self._gs)
dlg.Destroy()
self.canvas.draw()
def OnGridWidth(self,event):
dlg = wx.TextEntryDialog(self, u"Insert a width", NANCHI_MAIN_CAPTION)
if dlg.ShowModal()==wx.ID_OK:
_lw = float(dlg.GetValue())
self.axes.grid(lw=_lw)
dlg.Destroy()
self.canvas.draw()
def OnLineColor(self,event):
self.LINE_COLOR_EVT = self.canvas.mpl_connect("pick_event", self.set_line_color)
def set_line_color(self,event):
dlg = wx.ColourDialog(self)
if dlg.ShowModal() == wx.ID_OK:
color = dlg.GetColourData().Colour
r,g,b = color.Red(),color.Green(),color.Blue()
event.artist.set_color(rgb2hex(r,g,b))
dlg.Destroy()
self.canvas.draw()
self.canvas.mpl_disconnect(self.LINE_COLOR_EVT)
def OnLineStyle(self,event):
self.LINE_STYLE_EVT = self.canvas.mpl_connect("pick_event", self.set_line_style)
self.sb.SetStatusText("Select a line")
def set_line_style(self,event):
dlg = aux.LineStyleDialog(None)
if dlg.ShowModal() == wx.ID_OK:
self._ls = dlg.GetData()
event.artist.set_linestyle(self._ls)
dlg.Destroy()
# Quit LS_EVT
self.canvas.mpl_disconnect(self.LINE_STYLE_EVT)
# Update SB
self.sb.SetStatusText("Done: Line style applied")
self.canvas.draw()
def OnLineWidth(self,event):
self.LINE_WIDTH_EVT = self.canvas.mpl_connect("pick_event", self.set_line_width)
self.sb.SetStatusText("Select a line")
def set_line_width(self,event):
self.canvas.mpl_disconnect(self.LINE_WIDTH_EVT)
dlg = wx.TextEntryDialog(self, u"Insert a width", NANCHI_MAIN_CAPTION)
if dlg.ShowModal()==wx.ID_OK:
try:
_lw = float(dlg.GetValue())
except ValueError:
_lw = event.artist.get_linewidth()
event.artist.set_linewidth(_lw)
dlg.Destroy()
self.canvas.draw()
def OnXLabel(self,event):
current_label = unicode(self.axes.get_xlabel())
dialog = wx.TextEntryDialog(None,
"Insert xlabel",
NANCHI_MAIN_CAPTION, current_label, style=wx.OK|wx.CANCEL)
if dialog.ShowModal() == wx.ID_OK:
self.axes.set_xlabel(dialog.GetValue())
self.canvas.draw()
dialog.Destroy()
def OnYLabel(self,event):
current_label = unicode(self.axes.get_ylabel())
dialog = wx.TextEntryDialog(self,
"Insert ylabel",
NANCHI_MAIN_CAPTION, current_label, style=wx.OK|wx.CANCEL)
if dialog.ShowModal() == wx.ID_OK:
self.axes.set_ylabel(dialog.GetValue())
self.canvas.draw()
dialog.Destroy()
def OnTitle(self,event):
dialog = wx.TextEntryDialog(self,
u"Insert a title",
NANCHI_MAIN_CAPTION, "", style=wx.OK|wx.CANCEL)
if dialog.ShowModal() == wx.ID_OK:
self.axes.set_title(dialog.GetValue())
self.canvas.draw()
dialog.Destroy()
def OnXTicks(self,event):
dlg = aux.TickDialog(self, self.axes, "x")
if dlg.ShowModal() == wx.ID_OK:
ticks,labels = dlg.GetData()
self.axes.set_xticks(ticks)
self.axes.set_xticklabels(labels)
dlg.Destroy()
self.canvas.draw()
def OnYTicks(self,event):
dlg = aux.TickDialog(self, self.axes, "y")
if dlg.ShowModal() == wx.ID_OK:
ticks,labels = dlg.GetData()
self.axes.set_yticks(ticks)
self.axes.set_yticklabels(labels)
self.sb.SetStatusText(u"")
dlg.Destroy()
self.canvas.draw()
def OnLineLabel(self,event):
self.LINE_LABEL_EVT = self.canvas.mpl_connect("pick_event", self.set_line_label)
def set_line_label(self,event):
self.canvas.mpl_disconnect(self.LINE_LABEL_EVT)
dlg = wx.TextEntryDialog(self, u"Insert a label", NANCHI_MAIN_CAPTION)
if dlg.ShowModal()==wx.ID_OK:
_label = dlg.GetValue()
event.artist.set_label(_label)
dlg.Destroy()
self.canvas.draw()
def OnShowLegend(self,event):
self.axes.legend(loc="best")
self.canvas.draw()
def OnPieLabels(self,event):
pass
def OnZoom(self,event):
self.sb.SetStatusText(u"Drag the cursor to select a region")
self.canvas.zoomit()
def OnMoveLine(self,event):
self.MOVE_LINE_EVT = self.canvas.mpl_connect("pick_event", self.move_line)
self.sb.SetStatusText(u"Select a line to move")
def move_line(self,event):
self._selected_line = event.artist
self._p0 = (event.mouseevent.xdata, event.mouseevent.ydata)
self._xdata0 = self._selected_line.get_xdata()
self._ydata0 = self._selected_line.get_ydata()
self._mpl_ml_motion = self.canvas.mpl_connect("motion_notify_event", self._ml_motion)
self._mpl_ml_release = self.canvas.mpl_connect("button_release_event", self._ml_release)
def _ml_motion(self,event):
"""
Move line motion
"""
cx = event.xdata
cy = event.ydata
deltax = cx - self._p0[0]
deltay = cy - self._p0[1]
self._selected_line.set_xdata(self._xdata0 + deltax)
self._selected_line.set_ydata(self._ydata0 + deltay)
self.canvas.draw()
def _ml_release(self,event):
self.canvas.mpl_disconnect(self._mpl_ml_motion)
self.canvas.mpl_disconnect(self._mpl_ml_release)
self.canvas.mpl_disconnect(self.MOVE_LINE_EVT)
self.axes.relim()
self.axes.autoscale_view(True,True,True)
self.canvas.draw()
self.sb.SetStatusText(u"Line %s has been moved"%(self._selected_line.__repr__()))
def OnMoveText(self,event):
self.MOVE_TEXT_EVT = self.canvas.mpl_connect("pick_event", self.move_text)
self.sb.SetStatusText(u"Select a text to move")
def move_text(self,event):
self._selected_text = event.artist
self._mpl_mt_motion = self.canvas.mpl_connect("motion_notify_event", self._mt_motion)
self._mpl_mt_release = self.canvas.mpl_connect("button_release_event", self._mt_release)
def _mt_motion(self,event):
cx = event.xdata
cy = event.ydata
self._selected_text.set_position((cx,cy))
self.canvas.draw()
def _mt_release(self,event):
self.canvas.mpl_disconnect(self._mpl_mt_motion)
self.canvas.mpl_disconnect(self._mpl_mt_release)
self.canvas.mpl_disconnect(self.MOVE_TEXT_EVT)
self.axes.relim()
self.axes.autoscale_view(True,True,True)
self.canvas.draw()
self.sb.SetStatusText(u"Text %s has been moved to the position (%0.4f,%0.4f)"
%(self._selected_text.get_text(),
self._selected_text.get_position()[0],
self._selected_text.get_position()[1]))
# =====================================================================
class GraphWindow(wx.Frame):
def __init__(self,parent,title,*args,**kwargs):
wx.Frame.__init__(self,parent=parent,title=title,*args,**kwargs)
self.SetBackgroundColour(FRAME_BG_COLOR)
self.Centre(True)
# =====================================================================
class DataPanel(scrolled.ScrolledPanel):
def __init__(self,parent,*args,**kwargs):
scrolled.ScrolledPanel.__init__(self,parent,-1,size=(100,-1),
style = wx.TAB_TRAVERSAL|wx.SUNKEN_BORDER)
self.initCtrls()
self.initSizers()
self.initEvents()
self.SetupScrolling()
# Color properties
self.SetBackgroundColour(PANEL_BG_COLOR)
def initCtrls(self):
self.grid_data = DataGrid(self,(10,2))
def initSizers(self):
# Create sizers
self.mainsz = wx.BoxSizer(wx.VERTICAL)
# Add to sizers
self.mainsz.Add(self.grid_data, 5, wx.EXPAND|wx.ALL, 2)
# Set Sizers
self.SetSizer(self.mainsz)
def initEvents(self):
pass
# Main class for grid data
class DataGrid(grid.Grid):
def __init__(self,parent,gridsize,**kwargs):
grid.Grid.__init__(self,parent=parent,id=-1,**kwargs)
rows = int(gridsize[0])
cols = int(gridsize[1])
self.CreateGrid(rows,cols)
self.SetRowLabelSize(20)
# Para graficar desde rejilla
if isinstance(self.GetParent(),DataPanel):
self.axes = self.GetParent().GetParent().graphs.axes
self.canvas = self.GetParent().GetParent().graphs.canvas
self.Bind(grid.EVT_GRID_CELL_CHANGE, self.OnCellEdit)
self.Bind(grid.EVT_GRID_CELL_RIGHT_CLICK, self.OnRightClick)
def UpdateGridSize(self,rows,cols):
self.ClearGrid()
ccols = self.GetNumberCols()
crows = self.GetNumberRows()
if rows > crows:
self.AppendRows(rows-crows)
elif rows < crows:
self.DeleteRows(0,crows-rows)
if cols > ccols:
self.AppendCols(cols-ccols)
elif cols < ccols:
self.DeleteCols(0,ccols-cols)
def SetArrayData(self,data):
"""
Data must be a numpy array
"""
r,c = data.shape # For numpy array
self.UpdateGridSize(r,c)
for i in range(r):
for j in range(c):
if i==0: self.SetColFormatFloat(5, 6, 4)
val = str(data[i][j])
self.SetCellValue(i,j,val)
def GetArrayData(self):
nrows = self.GetNumberRows()
ncols = self.GetNumberCols()
X = np.zeros((nrows,ncols))
for i in range(nrows):
for j in range(ncols):
cval = self.GetCellValue(i,j)
if not isempty(cval):
try:
X[i][j] = float(cval)
except:
# Revisar valores devueltos
X[i][j] = np.nan
else:
X[i][j] = np.nan
return X
def GetSelectedData(self):
scols = self.GetSelectedCols()
srows = self.GetSelectedRows()
X = np.zeros((len(srows),len(scols)))
for ii,row in enumerate(srows):
for jj,col in enumerate(scols):
try:
X[ii][jj] = self.GetCellValue(row,col)
except ValueError:
X[ii][jj] = np.nan
return X
def GetSelectedCols(self):
scols = []
top_left = self.GetSelectionBlockTopLeft()
bottom_right = self.GetSelectionBlockBottomRight()
if not isempty(bottom_right) and not isempty(top_left):
max_col = bottom_right[0][1]
min_col = top_left[0][1]
scols = range(min_col,max_col+1)
return scols
def GetSelectedRows(self):
srows = []
top_left = self.GetSelectionBlockTopLeft()
bottom_right = self.GetSelectionBlockBottomRight()
if not isempty(bottom_right) and not isempty(top_left):
max_row = bottom_right[0][0]
min_row = top_left[0][0]
srows = range(min_row,max_row+1)
return srows
def OnCellEdit(self,event):
"""
"""
row,col = (event.GetRow(),event.GetCol())
cval = self.GetCellValue(row,col)
if cval.startswith("="):
try:
cval = str(eval(cval[1:]))
self.SetCellValue(row,col,cval)
except:
pass
try:
cval = float(cval)
except ValueError:
cval = np.nan
self.SetCellValue(row,col,str(cval))
def OnRightClick(self,event):
"""
On right click, show pop-up menu.
"""
pum = wx.Menu()
delrows = wx.MenuItem(pum, -1, "Delete rows")
pum.AppendItem(delrows)
delcols = wx.MenuItem(pum, -1, "Delete columns")
pum.AppendItem(delcols)
pum.AppendSeparator()
addrow = wx.MenuItem(pum, -1, "Add rows...")
pum.AppendItem(addrow)
addcol = wx.MenuItem(pum, -1, "Add columns...")
pum.AppendItem(addcol)
pum.AppendSeparator()
editcollabel = wx.MenuItem(pum, -1, "Edit column label")
pum.AppendItem(editcollabel)
pum.AppendSeparator()
randomfill = wx.MenuItem(pum, -1, "Fill columns randomly")
pum.AppendItem(randomfill)
# Binds
pum.Bind(wx.EVT_MENU, self.del_rows, delrows)
pum.Bind(wx.EVT_MENU, self.del_cols, delcols)
pum.Bind(wx.EVT_MENU, self.add_row, addrow)
pum.Bind(wx.EVT_MENU, self.add_col, addcol)
pum.Bind(wx.EVT_MENU, self.edit_collabel, editcollabel)
pum.Bind(wx.EVT_MENU, self.random_fill, randomfill)
# Show
self.PopupMenu(pum)
pum.Destroy()
def del_rows(self,event):
"""
Delete rows
"""
rows = self.GetSelectedRows()
if not isempty(rows):
self.DeleteRows(rows[0],len(rows))
def del_cols(self,event):
"""
Delete columns
"""
cols = self.GetSelectedCols()
if not isempty(cols):
self.DeleteCols(cols[0],len(cols))
def add_row(self,event):
"""
Add row
"""
self.AppendRows(1)
def add_col(self,event):
"""
Add column
"""
self.AppendCols(1)
def edit_collabel(self,event):
"""
Set column label
"""
ccols = self.GetSelectedCols()
dlg = wx.TextEntryDialog(None, "Insert new label...",
DEFAULT_DIALOG_CAPTION)
if dlg.ShowModal() == wx.ID_OK:
label = dlg.GetValue()
for col in ccols:
self.SetColLabelValue(col,label)
def random_fill(self,event):
"""
Fill columns randomly
"""
cols = self.GetSelectedCols()
nrows = self.GetNumberRows()
for ii in range(nrows):
for col in cols:
val = str(np.random.rand())
self.SetCellValue(ii,col,val)
class SetupWindow(wx.Frame):
def __init__(self,**kwargs):
wx.Frame.__init__(self, parent=None, id=wx.ID_ANY, title="Settings",
size=(400,200), **kwargs)
# Init controls
self.initCtrls()
# Center
self.Centre(True)
self.Show()
def initCtrls(self):
self.mainsz = wx.BoxSizer(wx.VERTICAL)
self.themesz = wx.BoxSizer(wx.HORIZONTAL)
self.buttonsz = wx.BoxSizer(wx.HORIZONTAL)
# Theme controls
themes = "White|Dark".split("|")
_theme_label = wx.StaticText(self, wx.ID_ANY, "Themes")
_theme_options = wx.ComboBox(self, wx.ID_ANY, choices=themes)
self.themesz.Add(_theme_label, 1, wx.ALIGN_LEFT|wx.ALL, 2)
self.themesz.Add(_theme_options, 4, wx.ALIGN_LEFT|wx.ALL, 2)
# Buttons (OK|CANCEL)
_ok_button = wx.Button(self, wx.ID_OK)
_cancel_button = wx.Button(self, wx.ID_CANCEL)
self.buttonsz.Add(_ok_button, 1, wx.ALIGN_CENTER|wx.ALL, 5)
self.buttonsz.Add(_cancel_button, 1, wx.ALIGN_CENTER|wx.ALL, 5)
self.mainsz.Add(self.themesz, 1, wx.EXPAND|wx.ALL, 5)
self.mainsz.Add(self.buttonsz, 1, wx.ALIGN_CENTRE|wx.ALL, 5)
self.SetSizer(self.mainsz)
# Bind events
_ok_button.Bind(wx.EVT_BUTTON, self.OnOK)
_cancel_button.Bind(wx.EVT_BUTTON, self.OnCancel)
def OnOK(self,event):
print 1
def OnCancel(self,event):
print 0
self.Close(True)
class Graph3DWindow(object):
def __init__(self, data):
from mpl_toolkits.mplot3d import Axes3D
self.data = data
self.initCtrls()
def initCtrls(self):
self.figure = plt.figure()
nr, nc = self.data.shape
X, Y = np.meshgrid(np.linspace(1,nr,nr), np.linspace(1,nc,nc))
Z = self.data
self.axes = self.figure.add_subplot(111, projection="3d")
surf = self.axes.plot_surface(X, Y, Z, rstride=1, cstride=1,cmap="hot")
plt.show()
if __name__=='__main__':
#~ app = wx.App()
Graph3DWindow(np.random.random((10,10)))
#~ app.MainLoop()
```
#### File: NanchiPlot/nanchi/uimpl.py
```python
import matplotlib.pyplot as plt
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg
import matplotlib.lines as lines
import numpy as np
import wx
class ZoomRectangle(object):
def __init__(self,figure,axes,canvas):
self.canvas = canvas
self.figure = figure
self.axes = axes
self.cline = lines.Line2D([],[], color="#00ff00", ls="--")
def connect(self):
print "connect: ",self.canvas,self.figure,self.axes
self.btpress = self.canvas.mpl_connect("button_press_event", self.on_press)
self.btrelease = self.canvas.mpl_connect("button_release_event", self.on_release)
print self.btpress, self.btrelease
self.axes.add_line(self.cline)
def on_motion(self,event):
self.cline.set_xdata([])
self.cline.set_ydata([])
# ---
self.x = event.xdata
self.y = event.ydata
# ---
xdata = [self.x0, self.x0, self.x, self.x, self.x0]
ydata = [self.y0, self.y, self.y, self.y0, self.y0]
# ---
self.cline.set_xdata(xdata)
self.cline.set_ydata(ydata)
# ---
self.canvas.draw()
def on_press(self,event):
#~ print "Press"
self.x0 = event.xdata
self.y0 = event.ydata
self.motion = self.canvas.mpl_connect("motion_notify_event", self.on_motion)
def on_release(self,event):
"Release"
self.canvas.mpl_disconnect(self.motion)
self.canvas.mpl_disconnect(self.btpress)
self.canvas.mpl_disconnect(self.btrelease)
min_x = min([self.x0, self.x])
max_x = max([self.x0, self.x])
min_y = min([self.y0, self.y])
max_y = max([self.y0, self.y])
self.axes.set_xlim(min_x, max_x)
self.axes.set_ylim(min_y, max_y)
self.canvas.draw()
class FigureCanvas(FigureCanvasWxAgg):
def __init__(self,parent,id,figure,**kwargs):
FigureCanvasWxAgg.__init__(self,parent=parent, id=id, figure=figure,**kwargs)
self.figure = figure
self.axes = self.figure.get_axes()[0]
def disconnect_all(self):
try:
self.mpl_disconnect(self.motion)
self.mpl_disconnect(self.btpress)
self.mpl_disconnect(self.btrelease)
except:
pass
def zoomit(self):
self.cline = lines.Line2D([],[], color="#ff00ff", ls="--", lw=2.0)
self.btpress = self.mpl_connect("button_press_event", self.on_press)
self.btrelease = self.mpl_connect("button_release_event", self.on_release)
self.axes.add_line(self.cline)
def on_motion(self,event):
self.cline.set_xdata([])
self.cline.set_ydata([])
# ---
self.x = event.xdata
self.y = event.ydata
# ---
xdata = [self.x0, self.x0, self.x, self.x, self.x0]
ydata = [self.y0, self.y, self.y, self.y0, self.y0]
# ---
self.cline.set_xdata(xdata)
self.cline.set_ydata(ydata)
# ---
self.draw()
def on_press(self,event):
self.x0 = event.xdata
self.y0 = event.ydata
self.motion = self.mpl_connect("motion_notify_event", self.on_motion)
def on_release(self,event):
self.disconnect_all()
try:
self.cline.remove() # Delete box
except:
self.stop_event_loop()
min_x = min([self.x0, self.x])
max_x = max([self.x0, self.x])
min_y = min([self.y0, self.y])
max_y = max([self.y0, self.y])
self.axes.set_xlim(min_x, max_x)
self.axes.set_ylim(min_y, max_y)
self.draw()
if __name__ == '__main__':
plt.plot([1,2,3,12,1,3])
fig = plt.gcf()
ax = plt.gca()
zr = ZoomRectangle(fig,ax,fig.canvas)
zr.connect()
plt.show()
``` |
{
"source": "JorgeDeLosSantos/nusa",
"score": 3
} |
#### File: examples/spring/simple_case.py
```python
from nusa.core import *
from nusa.model import *
from nusa.element import *
def simple_case():
P = 750
k = 300
# Model
ms = SpringModel("Simple")
# Nodes
n1 = Node((0,0))
n2 = Node((0,0))
# Elements
e1 = Spring((n1,n2),k)
for nd in (n1,n2):
ms.add_node(nd)
ms.add_element(e1)
ms.add_force(n2,(P,))
ms.add_constraint(n1,ux=0)
ms.solve()
# print("Node displacements")
# for n in ms.get_nodes():
# print(n.ux, n.uy)
print(ms.simple_report())
if __name__ == '__main__':
simple_case()
```
#### File: nusa/nusa/_experimental.py
```python
import numpy as np
import numpy.linalg as la
import json
from nusa import *
# class NusaModelReader(object):
# def __init__(self,filename):
# self.filename = filename
def read_model(filename,mtype="spring"):
if mtype == "spring":
return _read_spring_model(filename)
elif mtype == "truss":
return _read_truss_model(filename)
else:
raise ValueError("mtype must be a valid model type (spring, truss, bar, beam, lineartriangle)")
def _read_truss_model(filename):
nodes_data,elements_data,constraints_data,forces_data = _get_data_from_json(filename)
nc = nodes_data
ec = elements_data
x,y = nc[:,0], nc[:,1]
nodes = []
elements = []
for k,nd in enumerate(nc):
cn = Node((x[k],y[k]))
nodes.append(cn)
for k,elm in enumerate(ec):
i,j,E,A = int(elm[0]-1),int(elm[1]-1),elm[2],elm[3]
ni,nj = nodes[i],nodes[j]
ce = Truss((ni,nj), E, A)
elements.append(ce)
model = TrussModel("Truss Model")
for n in nodes:
model.add_node(n)
for e in elements:
model.add_element(e)
for c in constraints_data:
k,ux,uy = int(c[0]),c[1],c[2]
if ~np.isnan(ux) and ~np.isnan(uy):
model.add_constraint(nodes[k-1], ux=ux, uy=uy)
elif ~np.isnan(ux):
model.add_constraint(nodes[k-1], ux=ux)
elif ~np.isnan(uy):
model.add_constraint(nodes[k-1], uy=uy)
for f in forces_data:
k,fx,fy = int(f[0]),f[1],f[2]
model.add_force(nodes[k-1],(fx,fy))
return model
def _read_spring_model(filename):
nodes_data,elements_data,constraints_data,forces_data = _get_data_from_json(filename)
nc = nodes_data
ec = elements_data
x,y = nc[:,0], nc[:,1]
nodes = []
elements = []
for k,nd in enumerate(nc):
cn = Node((x[k],y[k]))
nodes.append(cn)
for k,elm in enumerate(ec):
i,j,ke = int(elm[0]-1),int(elm[1]-1),elm[2]
ni,nj = nodes[i],nodes[j]
ce = Spring((ni,nj), ke)
elements.append(ce)
model = SpringModel("Truss Model")
for n in nodes:
model.add_node(n)
for e in elements:
model.add_element(e)
for c in constraints_data:
k,ux,uy = int(c[0]),c[1],c[2]
if ~np.isnan(ux) and ~np.isnan(uy):
model.add_constraint(nodes[k-1], ux=ux, uy=uy)
elif ~np.isnan(ux):
model.add_constraint(nodes[k-1], ux=ux)
elif ~np.isnan(uy):
model.add_constraint(nodes[k-1], uy=uy)
for f in forces_data:
k,fx,fy = int(f[0]),f[1],f[2]
model.add_force(nodes[k-1],(fx,fy))
return model
def _dicts2array(listofdicts):
"""
Convert a list of dicts to numpy array [internal purposes only]
"""
nel = len(listofdicts)
nch = len(listofdicts[0])
keys = listofdicts[0].keys()
array = np.zeros((nel,nch))
for i,dc in enumerate(listofdicts):
for j,key in enumerate(keys):
value = dc[key]
if value == "free": # in case of "free" constraints
value = np.nan
array[i,j] = value
return array
def _get_data_from_json(filename):
with open(filename, 'r') as nusafile:
data = nusafile.read()
obj = json.loads(data)
nodes_data = _dicts2array(obj["nodes"])
elements_data = _dicts2array(obj["elements"])
constraints_data = _dicts2array(obj["constraints"])
forces_data = _dicts2array(obj["forces"])
return nodes_data,elements_data,constraints_data,forces_data
if __name__=='__main__':
fname = "data/truss_model01.nusa"
m1 = read_model(fname, "truss")
m1.solve()
m1.simple_report()
``` |
{
"source": "JorgeDeLosSantos/PiChi",
"score": 2
} |
#### File: PiChi/core/__testCtrls.py
```python
import wx
class Test(wx.Frame):
def __init__(self,parent):
wx.Frame.__init__(self,parent,title="Test")
mainsz = wx.BoxSizer(wx.VERTICAL)
self.SetSizer(mainsz)
lc = wx.ListCtrl(self, -1, size=(-1,100),
style=wx.LC_REPORT
|wx.BORDER_SUNKEN)
lc.InsertColumn(0, "C1")
lc.InsertColumn(1, "C2")
lc.InsertColumn(2, "C3")
for x in range(0,5):
lc.InsertStringItem(x, "RPY")
lc.SetStringItem(x, 1, "HI")
lc.SetStringItem(x, 2, "HI")
mainsz.Add(lc, 1, wx.EXPAND)
self.Centre(1)
self.Show()
def main():
app = wx.App()
fr = Test(None)
app.MainLoop()
if __name__=='__main__':
main()
```
#### File: PiChi/core/uicalc.py
```python
import wx
import sympy
import matplotlib
matplotlib.use('WXAgg')
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigureCanvas
from matplotlib.figure import Figure
from sympy import (sin,cos,tan,sec,csc,cot,ln,log,exp,asin,acos,atan,sqrt) # Algunas funciones
from cfg import *
def preproc(cad):
"""
Replace this function for other in "util" module
"""
return cad.replace("^","**")
class UIDer(wx.Frame):
def __init__(self,parent):
wx.Frame.__init__(self,parent,title=PICHI_STR,size=(400,300))
self.funpanel = wx.Panel(self,-1)
self.initSizers()
self.initCanvas()
self.initCtrls()
self.Centre(1)
self.Show()
def initSizers(self):
self.mainsz = wx.BoxSizer(wx.VERTICAL)
self.funsz = wx.BoxSizer(wx.HORIZONTAL)
self.funpanel.SetSizer(self.funsz)
self.SetSizer(self.mainsz)
def initCtrls(self):
self.funlabel = wx.StaticText(self.funpanel, -1, " f(x) ")
self.fun = wx.TextCtrl(self.funpanel, -1, "")
self.boton = wx.Button(self, -1, "Derivar")
# Fonts
font1 = self.funlabel.GetFont()
font1.SetPointSize(12)
self.funlabel.SetFont(font1)
self.fun.SetFont(font1)
self.fun.SetForegroundColour((0,0,255))
self.funsz.Add(self.funlabel, 1, wx.EXPAND|wx.ALL, 5)
self.funsz.Add(self.fun, 7, wx.EXPAND|wx.ALL, 5)
self.mainsz.Add(self.funpanel, 1, wx.EXPAND|wx.ALL, 5)
self.mainsz.Add(self.boton, 1, wx.EXPAND|wx.ALL, 5)
self.mainsz.Add(self.canvas, 6, wx.EXPAND|wx.ALL, 5)
self.Bind(wx.EVT_BUTTON, self.derivar, self.boton)
def initCanvas(self):
self.figure = Figure()
# FigureCanvas
self.canvas = FigureCanvas(self, -1, self.figure)
self.figure.set_facecolor((1,1,1)) # ...
self.string = self.figure.text(0.05, 0.5, "")
self.string.set_fontsize(18)
def derivar(self,event):
x = sympy.Symbol("x")
fx = self.fun.GetValue() # Función original
fx = preproc(fx)
Fx = sympy.diff(eval(fx)) # Función derivada
str_Fx = "$\\frac{d}{dx}(%s)\, = \,%s$"%(sympy.latex(eval(fx)), sympy.latex(Fx))
self.string.set_text(str_Fx)
self.canvas.draw() # "Redibujar"
class UIInt(wx.Frame):
def __init__(self,parent):
wx.Frame.__init__(self,parent,title=PICHI_STR,size=(400,300))
self.funpanel = wx.Panel(self, -1)
self.initSizers()
self.initCanvas()
self.initCtrls()
self.SetBackgroundColour("#FFFFFF")
self.Centre(1)
self.Show()
def initSizers(self):
self.mainsz = wx.BoxSizer(wx.VERTICAL)
self.funsz = wx.BoxSizer(wx.HORIZONTAL)
self.funpanel.SetSizer(self.funsz)
self.SetSizer(self.mainsz)
def initCtrls(self):
self.funlabel = wx.StaticText(self.funpanel, -1, " f(x) ")
self.fun = wx.TextCtrl(self.funpanel, -1, "")
self.boton = wx.Button(self, -1, "Integrar", size=(100,25))
# Fonts
font1 = self.funlabel.GetFont()
font1.SetPointSize(12)
self.funlabel.SetFont(font1)
self.fun.SetFont(font1)
self.fun.SetForegroundColour((0,0,255))
self.funsz.Add(self.funlabel, 1, wx.EXPAND|wx.ALL, 5)
self.funsz.Add(self.fun, 7, wx.EXPAND|wx.ALL, 5)
self.mainsz.Add(self.funpanel, 1, wx.EXPAND|wx.ALL, 5)
self.mainsz.Add(self.canvas, 6, wx.EXPAND|wx.ALL, 5)
self.mainsz.Add(self.boton, 0, wx.ALIGN_CENTRE|wx.ALL, 5)
self.Bind(wx.EVT_BUTTON, self.integrar, self.boton)
def initCanvas(self):
self.figure = Figure()
# FigureCanvas
self.canvas = FigureCanvas(self, -1, self.figure)
self.figure.set_facecolor((1,1,1))
self.string = self.figure.text(0.05, 0.5, "")
self.string.set_fontsize(18)
def integrar(self,event):
x = sympy.Symbol("x")
fx = self.fun.GetValue() # Función original
fx = preproc(fx)
Fx = sympy.integrate(eval(fx)) # Función integrada
str_Fx = "$\int \, (%s) \,dx \,= \,%s + C$"%(sympy.latex(eval(fx)), sympy.latex(Fx))
self.string.set_text(str_Fx)
self.canvas.draw()
class UILimit(wx.Frame):
def __init__(self,parent):
wx.Frame.__init__(self,parent,title=PICHI_STR,size=(400,300))
self.funpanel = wx.Panel(self, -1)
self.initSizers()
self.initCanvas()
self.initCtrls()
self.Centre(1)
self.Show()
def initSizers(self):
self.mainsz = wx.BoxSizer(wx.VERTICAL)
self.funsz = wx.BoxSizer(wx.HORIZONTAL)
self.funpanel.SetSizer(self.funsz)
self.SetSizer(self.mainsz)
def initCtrls(self):
self.funlabel = wx.StaticText(self.funpanel, -1, " f(x) ")
self.fun = wx.TextCtrl(self.funpanel, -1, "", style= wx.TE_PROCESS_ENTER)
self.boton = wx.Button(self, -1, "Calcular limite")
# Fonts
font1 = self.funlabel.GetFont()
font1.SetPointSize(12)
self.funlabel.SetFont(font1)
self.fun.SetFont(font1)
self.fun.SetForegroundColour((0,0,255))
self.funsz.Add(self.funlabel, 1, wx.EXPAND|wx.ALL, 5)
self.funsz.Add(self.fun, 7, wx.EXPAND|wx.ALL, 5)
self.mainsz.Add(self.funpanel, 1, wx.EXPAND|wx.ALL, 5)
self.mainsz.Add(self.boton, 1, wx.EXPAND|wx.ALL, 5)
self.mainsz.Add(self.canvas, 6, wx.EXPAND|wx.ALL, 5)
self.Bind(wx.EVT_BUTTON, self.limite, self.boton)
self.Bind(wx.EVT_TEXT_ENTER, self.limite)
def initCanvas(self):
self.figure = Figure()
# FigureCanvas
self.canvas = FigureCanvas(self, -1, self.figure)
self.figure.set_facecolor((1,1,1))
self.string = self.figure.text(0.05, 0.5, "")
self.string.set_fontsize(18)
def limite(self,event):
x = sympy.Symbol("x")
so = self.fun.GetValue() # Función original
if not(so):
print "Función no definida"
return False
so = preproc(so)
so = so.split(",")
fx = so[0]
val = float(so[1])
Fx = sympy.limit(eval(fx),x,val) # Función integrada
str_Fx = r"$ \lim_{x \to %s} \, (%s) \,dx \,= \,%s$"%(val, sympy.latex(eval(fx)), sympy.latex(Fx))
self.string.set_text(str_Fx)
self.canvas.draw()
if __name__=='__main__':
app = wx.App()
fr = UILimit(None)
app.MainLoop()
```
#### File: PiChi/core/uilinalg.py
```python
import wx
from cfg import *
class UIDet(wx.Frame):
def __init__(self,parent):
wx.Frame.__init__(self,parent,title=PYXMATH_NAME,size=(400,300))
self.Centre(1)
self.Show()
if __name__=='__main__':
app = wx.App()
fr = UIDet(None)
app.MainLoop()
``` |
{
"source": "JorgeDeLosSantos/pyapdl",
"score": 3
} |
#### File: pyapdl/pyapdl/materials.py
```python
import numpy as np
class Material(object):
def __init__(self,name,**props):
"""
Create a material class
Properties
----------
E : int,float
Young modulus
nu : int,float
Poisson's ratio
density : int,float
Density
"""
self.name = name
if props.has_key("E"): self.__E = props["E"]
if props.has_key("nu"): self.__nu = props["nu"]
if props.has_key("density"): self.__density = props["density"]
@property
def nu(self):
return self.__nu
@nu.setter
def nu(self,val):
self.__nu = val
@property
def E(self):
return self.__E
@E.setter
def E(self,val):
self.__E = val
@property
def density(self):
return self.__density
@density.setter
def density(self,val):
self.__density = val
def toANSYS(self,number):
E_str = "MP,EX,%g,%g"%(number,self.E)
nu_str = "MP,NUXY,%g,%g"%(number,self.nu)
density_str = "MP,DENS,%g,%g"%(number,self.density)
return "\n".join([E_str,nu_str,density_str])
def create_material(n,**kwargs):
m = Material(n,**kwargs)
return m.toANSYS(n)
if __name__ == '__main__':
print create_material(1,E=100,nu=0.3,density=7850)
```
#### File: pyapdl/pyapdl/meshing.py
```python
def mesh_line(line):
_ml = "LMESH,%s,,"%(line)
return _ml
if __name__ == '__main__':
pass
``` |
{
"source": "JorgeDeLosSantos/ubim",
"score": 3
} |
#### File: images/ch4/code_mpl.py
```python
import matplotlib.pyplot as plt
import numpy as np
__all__ = ["img_01",
"img_02",
"img_03",
"img_04",
"img_05",
"img_06",
"img_07"]
def img_01():
x = np.linspace(0,10)
y = np.cos(x)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(x, y)
plt.savefig("img_01.png")
def img_02():
T = [50, 60, 70, 80, 90, 100, 110, 120]
P = [12, 20, 33, 54, 90, 148, 244, 403]
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(T, P)
ax.set_xlabel(u"Temperatura (°C)")
ax.set_ylabel(u"Presión (KPa)")
ax.set_title(u"Relación P-T")
plt.savefig("img_02.png")
def img_03():
x = np.linspace(0,10)
y = np.cos(x)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(x, y, lw=2)
ax.plot(x, y+1, lw=4)
ax.plot(x, y+2, linewidth=6)
plt.savefig("img_03c.png")
def img_04():
theta = np.linspace(0,2*np.pi,1000)
r = 0.25*np.cos(3*theta)
fig = plt.figure()
ax = fig.add_subplot(111, projection="polar")
ax.plot(theta, r)
plt.savefig("img_04.png")
def img_05():pass
def img_06():pass
def img_07():pass
def img_08():pass
if __name__=='__main__':
[eval(fun+"()") for fun in __all__]
``` |
{
"source": "Jorge-DevOps/API_HealthTech",
"score": 2
} |
#### File: Apps/LoginUsuarios/serializers.py
```python
from rest_framework import serializers
from django.contrib.auth.models import User
# User Serializer
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('id', 'username', 'email','password')
extra_kwargs = {'password':{'<PASSWORD>': True}}
def create(self, validated_data):
user = User(
email=validated_data['email'],
username=validated_data['username']
)
user.set_password(validated_data['password'])
user.save()
return user
# Register Serializer
class RegisterSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('id', 'username', 'email', 'password')
extra_kwargs = {'password': {'<PASSWORD>': True}}
def create(self, validated_data):
user = User.objects.create_user(validated_data['username'], validated_data['email'], validated_data['password'])
return user
```
#### File: Apps/LoginUsuarios/views.py
```python
from django.http import response
from django.shortcuts import render
from django.shortcuts import render
# Create your views here.
from rest_framework import generics, permissions
from rest_framework.response import Response
from knox.models import AuthToken
from .serializers import UserSerializer, RegisterSerializer
from django.http import HttpResponse
# Register-- API
class RegisterAPI(generics.GenericAPIView):
serializer_class = RegisterSerializer
def post(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
user = serializer.save()
return Response({
"user": UserSerializer(user, context=self.get_serializer_context()).data,
"token": AuthToken.objects.create(user)[1]
})
#---------Part Login in the api------------
from django.contrib.auth import login
from rest_framework import permissions
from rest_framework.authtoken.serializers import AuthTokenSerializer
from knox.views import LoginView as KnoxLoginView
import json
from django import http
from Apps.share.paciente.models import Paciente
from Apps.share.medico.models import Medico
from Apps.share.administrador.models import Administrador
class LoginAPI(KnoxLoginView):
permission_classes = (permissions.AllowAny,)
def traerPerfil(consulta):
for p in consulta:
print("valor de P: ",p.id_perfil.id_perfil)
return int(p.id_perfil.id_perfil)
def post(self, request, format=None):
serializer = AuthTokenSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
user = serializer.validated_data['user']
#Validación
dataRequestPerfil=request.data.get('perfil')
dataRequestUsername=request.data.get('username')
if dataRequestPerfil=="1":
queryAdministrador=f"SELECT id_usuario, id_perfil FROM administrador WHERE email = '{dataRequestUsername}'"
consulta=Administrador.objects.raw(queryAdministrador)
elif dataRequestPerfil=="2":
queryPaciente=f"SELECT id_usuario, id_perfil FROM paciente WHERE email = '{dataRequestUsername}'"
consulta=Paciente.objects.raw(queryPaciente)
elif dataRequestPerfil=="3":
queryMedico=f"SELECT id_usuario, id_perfil FROM medico WHERE email = '{dataRequestUsername}'"
consulta=Medico.objects.raw(queryMedico)
#queryPaciente2= Paciente.objects.raw(f'SELECT id_perfil FROM paciente WHERE email = '{dataRequestUsername}'')
if LoginAPI.traerPerfil(consulta)==int(dataRequestPerfil):
login(request, user)
response=super(LoginAPI, self).post(request, format=None)
print("si es un paciente")
else:
response=HttpResponse("Credenciales invalidas")
#Parte del codigo que se usa para el Login
return response
```
#### File: share/cita/views.py
```python
from django.shortcuts import render
from django.db import connection
from django.views.generic.base import View
from rest_framework.generics import UpdateAPIView
from xlwt.Formatting import Font
from django.http import HttpResponse
import xlwt
from Apps.share.medico.models import Medico
from Apps.horario.models import horario
from Apps.agenda.models import Agenda
import json
from json import loads
from django.views import View
from rest_framework import permissions
from django.views.generic.edit import UpdateView
from knox.views import LoginView as KnoxLoginView
#from knox.views import LoginView as KnoxLoginView
# Create your views here.
def traerMedicos(request):
consulta = list(Medico.objects.values('username', 'id_agenda'))
response = HttpResponse(json.dumps(consulta, indent=4), content_type='application/json')
return response
def horariosDisponibles(request):
dic = loads(request.body)
idAgenda=dic['id_agenda']
dataRequestFecha=dic['fecha']
#with connection.cursor() as cursor:
# cursor.execute("""SELECT agenda.id_agenda
# FROM agenda
# JOIN medico ON (agenda.id_agenda = medico.id_agenda)
# WHERE medico.email = %s""", [dataRequestUsername]
# )
#
# result = dictfetchall(cursor)
#idAgenda = result[0]['id_agenda']
with connection.cursor() as cursor:
cursor.execute("""SELECT *
FROM horario
WHERE horario.id_horario NOT IN
(
SELECT cita.id_horario
FROM cita
WHERE cita.fecha =%s AND
cita.id_agenda =%s
)""", [dataRequestFecha, idAgenda]
)
result = dictfetchall(cursor)
response = HttpResponse(json.dumps(result, indent=4, default=str),content_type='application/json')
return response
def export_informeCitas(request):
response = HttpResponse(content_type='application/ms-excel')
#nesecitamos enviarle el contenido
response['Content-Disposition'] = 'attachment; filename=Ep_' + \
str('informeCitasMedicas')+'.xls'
wb = xlwt.Workbook(encoding='utf-8')
ws=wb.add_sheet('Cita')
row_num = 0
font_style=xlwt.XFStyle()
font_style.font.bold =True
columns = ['id_cita','id_agenda','id_usuario','fecha','id_horario']
for col_num in range(len(columns)):
ws.write(row_num,col_num,columns[col_num], font_style)
font_style=xlwt.XFStyle()
with connection.cursor() as cursor:
cursor.execute("SELECT id_cita, id_agenda, id_usuario, fecha, id_horario FROM cita ")
rawData = cursor.fetchall()
result = []
for r in rawData:
result.append(list(r))
rows = result
for row in rows:
row_num+=1
for col_num in range(len(row)):
ws.write(row_num,col_num,str(row[col_num]), font_style)
wb.save(response)
return response
from json import loads
def lista_citas(request):
dic = loads(request.body)
dataRequestUsername=dic['username']
with connection.cursor() as cursor:
cursor.execute("""SELECT medico.username, cita.fecha, horario.hora_inicio, horario.hora_fin, consultorio.nombre
FROM medico
JOIN agenda ON (medico.id_agenda = agenda.id_agenda)
JOIN consultorio ON(agenda.id_consultorio = consultorio.id_consultorio)
JOIN cita ON (agenda.id_agenda = cita.id_agenda)
JOIN horario ON (cita.id_horario = horario.id_horario)
JOIN paciente ON (cita.id_usuario = paciente.id_usuario)
WHERE paciente.email = %s""", [dataRequestUsername])
result = dictfetchall(cursor)
response = HttpResponse(json.dumps(result, indent=4, default=str),content_type='application/json')
return response
def dictfetchall(cursor):
"Return all rows from a cursor as a dict"
columns = [col[0] for col in cursor.description]
return [
dict(zip(columns, row))
for row in cursor.fetchall()
]
``` |
{
"source": "jorgedfbranco/apm-server",
"score": 3
} |
#### File: apm-server/script/check_changelogs.py
```python
import io
import hashlib
import os
import requests
SUPPORTED_VERSIONS = ["6.8", "7.5", "7.6", "7.x"]
def parse_version(version):
return tuple([int(x) if x != "x" else 100 for x in version.split('.')])
def shasum(fp):
h = hashlib.sha1()
while True:
buf = fp.read()
if len(buf) == 0:
break
h.update(buf)
return h.hexdigest()
def main():
cl_dir = 'changelogs'
any_failures = False
for cl in sorted(os.listdir(cl_dir)):
version, _ = os.path.splitext(cl)
if version not in SUPPORTED_VERSIONS:
continue
parsed_version = parse_version(version)
with open(os.path.join(cl_dir, cl), mode='rb') as f:
master = shasum(f)
print("**", cl, master, "**")
for v in SUPPORTED_VERSIONS:
if parsed_version <= parse_version(v):
print("checking {} on {}".format(cl, v))
url = "https://raw.githubusercontent.com/elastic/apm-server/{}/changelogs/{}".format(v, cl)
rsp = requests.get(url)
status = "success"
if rsp.status_code == 200:
h = shasum(io.BytesIO(rsp.content))
else:
h = "error: {}".format(rsp.status_code)
# rsp.raise_for_status()
if h != master:
status = "failed"
any_failures = True
print(h, url, status)
print()
if any_failures:
raise Exception('Some changelogs are missing, please look at for failed.')
if __name__ == '__main__':
main()
``` |
{
"source": "jorgediazjr/dials-dev20191018",
"score": 2
} |
#### File: cbflib/drel-ply/drel-gu.py
```python
import drel_lex
import drel_yacc
import sys
import CifFile
import StarFile
class Process:
def execute_method(self):
#create our lexer and parser
self.lexer = drel_lex.lexer
self.parser = drel_yacc.parser
#use a simple dictionary
self.testdic = CifFile.CifDic("dict/cif_short.dic")
self.testdic.diclang = "DDLm"
self.testblock = CifFile.CifFile("tests/c2ctest5.cif") ["c2ctest5"]
#create the global namespace
self.namespace = self.testblock.keys()
self.namespace = dict(map(None,self.namespace,self.namespace))
self.parser.loopable_cats = ["import"]
self.parser.special_id = [self.namespace]
self.parser.withtable = {}
self.parser.target_id = None
self.parser.indent = ""
#get valuename from cmdline
valuename = sys.argv[1]
f = open("method_expression")
expression = f.readline()
str_list = []
str_list.append(expression)
while expression:
expression = f.readline()
str_list.append(expression)
expression = "".join(str_list)
self.parser.target_id = valuename
res = self.parser.parse(expression + "\n", lexer=self.lexer)
realfunc = drel_yacc.make_func(res, "myfunc", valuename)
print "Compiling dREL....."
exec realfunc
realres = myfunc(self.testdic,self.testblock)
print "Generated value: %s" % realres
fout = open("method_output", 'w')
print>>fout, realres
#method returns realres as the value that would be missing
#for validation
#failUnless(realres == value)
p = Process()
p.execute_method()
```
#### File: pycbf/xmas/readmarheader.py
```python
import struct
# Convert mar c header file types to python struct module types
mar_c_to_python_struct = {
"INT32" : "i",
"UINT32" : "I",
"char" : "c",
"UINT16" : "H"
}
# Sizes (bytes) of mar c header objects
mar_c_sizes = {
"INT32" : 4,
"UINT32" : 4,
"char" : 1,
"UINT16" : 2
}
# This was worked out by trial and error from a trial image I think
MAXIMAGES=9
def make_format(cdefinition):
"""
Reads the header definition in c and makes the format
string to pass to struct.unpack
"""
lines = cdefinition.split("\n")
fmt = ""
names = []
expected = 0
for line in lines:
if line.find(";")==-1:
continue
decl = line.split(";")[0].lstrip().rstrip()
try:
[type, name] = decl.split()
except:
#print "skipping:",line
continue
# print "type:",type," name:",name
if name.find("[")>-1:
# repeated ... times
try:
num = name.split("[")[1].split("]")[0]
num = num.replace("MAXIMAGES",str(MAXIMAGES))
num = num.replace("sizeof(INT32)","4")
times = eval(num)
except:
print "Please decode",decl
raise
else:
times=1
try:
fmt += mar_c_to_python_struct[type]*times
names += [name]*times
expected += mar_c_sizes[type]*times
except:
#print "skipping",line
continue
#print "%4d %4d"%(mar_c_sizes[type]*times,expected),name,":",times,line
#print struct.calcsize(fmt),expected
return names, fmt
def read_mar_header(filename):
"""
Get the header from a binary file
"""
f = open(filename,"rb")
f.seek(1024)
header=f.read(3072)
f.close()
return header
def interpret_header(header, fmt, names):
"""
given a format and header interpret it
"""
values = struct.unpack(fmt,header)
dict = {}
i=0
for name in names:
if dict.has_key(name):
if type(values[i]) == type("string"):
dict[name] = dict[name]+values[i]
else:
try:
dict[name].append(values[i])
except:
dict[name] = [dict[name],values[i]]
else:
dict[name] = values[i]
i=i+1
return dict
# Now for the c definition (found on mar webpage)
# The following string is therefore copyrighted by Mar I guess
cdefinition = """
typedef struct frame_header_type {
/* File/header format parameters (256 bytes) */
UINT32 header_type; /* flag for header type
(can be used as magic number) */
char header_name[16]; /* header name (MMX) */
UINT32 header_major_version; /* header_major_version (n.) */
UINT32 header_minor_version; /* header_minor_version (.n) */
UINT32 header_byte_order;/* BIG_ENDIAN (Motorola,MIPS);
LITTLE_ENDIAN (DEC, Intel) */
UINT32 data_byte_order; /* BIG_ENDIAN (Motorola,MIPS);
LITTLE_ENDIAN (DEC, Intel) */
UINT32 header_size; /* in bytes */
UINT32 frame_type; /* flag for frame type */
UINT32 magic_number; /* to be used as a flag -
usually to indicate new file */
UINT32 compression_type; /* type of image compression */
UINT32 compression1; /* compression parameter 1 */
UINT32 compression2; /* compression parameter 2 */
UINT32 compression3; /* compression parameter 3 */
UINT32 compression4; /* compression parameter 4 */
UINT32 compression5; /* compression parameter 4 */
UINT32 compression6; /* compression parameter 4 */
UINT32 nheaders; /* total number of headers */
UINT32 nfast; /* number of pixels in one line */
UINT32 nslow; /* number of lines in image */
UINT32 depth; /* number of bytes per pixel */
UINT32 record_length; /* number of pixels between
succesive rows */
UINT32 signif_bits; /* true depth of data, in bits */
UINT32 data_type; /* (signed,unsigned,float...) */
UINT32 saturated_value; /* value marks pixel as saturated */
UINT32 sequence; /* TRUE or FALSE */
UINT32 nimages; /* total number of images - size of
each is nfast*(nslow/nimages) */
UINT32 origin; /* corner of origin */
UINT32 orientation; /* direction of fast axis */
UINT32 view_direction; /* direction to view frame */
UINT32 overflow_location;/* FOLLOWING_HEADER, FOLLOWING_DATA */
UINT32 over_8_bits; /* # of pixels with counts 255 */
UINT32 over_16_bits; /* # of pixels with count 65535 */
UINT32 multiplexed; /* multiplex flag */
UINT32 nfastimages; /* # of images in fast direction */
UINT32 nslowimages; /* # of images in slow direction */
UINT32 background_applied; /* flags correction has been applied -
hold magic number ? */
UINT32 bias_applied; /* flags correction has been applied -
hold magic number ? */
UINT32 flatfield_applied; /* flags correction has been applied -
hold magic number ? */
UINT32 distortion_applied; /* flags correction has been applied -
hold magic number ? */
UINT32 original_header_type; /* Header/frame type from file
that frame is read from */
UINT32 file_saved; /* Flag that file has been saved,
should be zeroed if modified */
char reserve1[(64-40)*sizeof(INT32)-16];
/* Data statistics (128) */
UINT32 total_counts[2]; /* 64 bit integer range = 1.85E19*/
UINT32 special_counts1[2];
UINT32 special_counts2[2];
UINT32 min;
UINT32 max;
UINT32 mean;
UINT32 rms;
UINT32 p10;
UINT32 p90;
UINT32 stats_uptodate;
UINT32 pixel_noise[MAXIMAGES]; /* 1000*base noise value (ADUs) */
char reserve2[(32-13-MAXIMAGES)*sizeof(INT32)];
/* More statistics (256) */
UINT16 percentile[128];
/* Goniostat parameters (128 bytes) */
INT32 xtal_to_detector; /* 1000*distance in millimeters */
INT32 beam_x; /* 1000*x beam position (pixels) */
INT32 beam_y; /* 1000*y beam position (pixels) */
INT32 integration_time; /* integration time in milliseconds */
INT32 exposure_time; /* exposure time in milliseconds */
INT32 readout_time; /* readout time in milliseconds */
INT32 nreads; /* number of readouts to get this image */
INT32 start_twotheta; /* 1000*two_theta angle */
INT32 start_omega; /* 1000*omega angle */
INT32 start_chi; /* 1000*chi angle */
INT32 start_kappa; /* 1000*kappa angle */
INT32 start_phi; /* 1000*phi angle */
INT32 start_delta; /* 1000*delta angle */
INT32 start_gamma; /* 1000*gamma angle */
INT32 start_xtal_to_detector; /* 1000*distance in mm (dist in um)*/
INT32 end_twotheta; /* 1000*two_theta angle */
INT32 end_omega; /* 1000*omega angle */
INT32 end_chi; /* 1000*chi angle */
INT32 end_kappa; /* 1000*kappa angle */
INT32 end_phi; /* 1000*phi angle */
INT32 end_delta; /* 1000*delta angle */
INT32 end_gamma; /* 1000*gamma angle */
INT32 end_xtal_to_detector; /* 1000*distance in mm (dist in um)*/
INT32 rotation_axis; /* active rotation axis */
INT32 rotation_range; /* 1000*rotation angle */
INT32 detector_rotx; /* 1000*rotation of detector around X */
INT32 detector_roty; /* 1000*rotation of detector around Y */
INT32 detector_rotz; /* 1000*rotation of detector around Z */
char reserve3[(32-28)*sizeof(INT32)];
/* Detector parameters (128 bytes) */
INT32 detector_type; /* detector type */
INT32 pixelsize_x; /* pixel size (nanometers) */
INT32 pixelsize_y; /* pixel size (nanometers) */
INT32 mean_bias; /* 1000*mean bias value */
INT32 photons_per_100adu; /* photons / 100 ADUs */
INT32 measured_bias[MAXIMAGES]; /* 1000*mean bias value for each image*/
INT32 measured_temperature[MAXIMAGES]; /* Temperature of each
detector in milliKelvins */
INT32 measured_pressure[MAXIMAGES]; /* Pressure of each chamber
in microTorr */
/* Retired reserve4 when MAXIMAGES set to 9 from 16 and
two fields removed, and temp and pressure added
char reserve4[(32-(5+3*MAXIMAGES))*sizeof(INT32)]
*/
/* X-ray source and optics parameters (128 bytes) */
/* X-ray source parameters (8*4 bytes) */
INT32 source_type; /* (code) - target, synch. etc */
INT32 source_dx; /* Optics param. - (size microns) */
INT32 source_dy; /* Optics param. - (size microns) */
INT32 source_wavelength; /* wavelength (femtoMeters) */
INT32 source_power; /* (Watts) */
INT32 source_voltage; /* (Volts) */
INT32 source_current; /* (microAmps) */
INT32 source_bias; /* (Volts) */
INT32 source_polarization_x; /* () */
INT32 source_polarization_y; /* () */
char reserve_source[4*sizeof(INT32)];
/* X-ray optics_parameters (8*4 bytes) */
INT32 optics_type; /* Optics type (code)*/
INT32 optics_dx; /* Optics param. - (size microns) */
INT32 optics_dy; /* Optics param. - (size microns) */
INT32 optics_wavelength; /* Optics param. - (size microns) */
INT32 optics_dispersion; /* Optics param. - (*10E6) */
INT32 optics_crossfire_x; /* Optics param. - (microRadians) */
INT32 optics_crossfire_y; /* Optics param. - (microRadians) */
INT32 optics_angle; /* Optics param. - (monoch.
2theta - microradians) */
INT32 optics_polarization_x; /* () */
INT32 optics_polarization_y; /* () */
char reserve_optics[4*sizeof(INT32)];
char reserve5[((32-28)*sizeof(INT32))];
/* File parameters (1024 bytes) */
char filetitle[128]; /* Title */
char filepath[128]; /* path name for data file */
char filename[64]; /* name of data file */
char acquire_timestamp[32]; /* date and time of acquisition */
char header_timestamp[32]; /* date and time of header update */
char save_timestamp[32]; /* date and time file saved */
char file_comments[512]; /* comments, use as desired */
char reserve6[1024-(128+128+64+(3*32)+512)];
/* Dataset parameters (512 bytes) */
char dataset_comments[512]; /* comments, used as desired */
/* pad out to 3072 bytes */
char pad[3072-(256+128+256+(3*128)+1024+512)];
} frame_header;
"""
class marheaderreader:
"""
Class to sit and read a series of images (makes format etc only once)
"""
def __init__(self):
"""
Initialise internal stuff
"""
self.names , self.fmt = make_format(cdefinition)
def get_header(self,filename):
"""
Reads a header from file filename
"""
h=read_mar_header(filename)
dict = interpret_header(h,self.fmt,self.names)
# Append ESRF formatted stuff
items = self.readesrfstring(dict["dataset_comments[512]"])
for pair in items:
dict[pair[0]]=pair[1]
items = self.readesrfstring(dict["file_comments[512]"])
for pair in items:
dict[pair[0]]=pair[1]
dict["pixelsize_x_mm"]= str(float(dict["pixelsize_x"])/1e6)
dict["pixelsize_y_mm"]= str(float(dict["pixelsize_y"])/1e6)
dict["integration_time_sec"]= str(float(dict["integration_time"])/1e3)
dict["beam_y_mm"]= str(float(dict["pixelsize_y_mm"])*
float(dict["beam_y"])/1000.)
dict["beam_x_mm"]= str(float(dict["pixelsize_x_mm"])*
float(dict["beam_x"])/1000.)
return dict
def readesrfstring(self,s):
"""
Interpret the so called "esrf format" header lines
which are in comment sections
"""
s=s.replace("\000","")
items = filter(None, [len(x)>1 and x or None for x in [
item.split("=") for item in s.split(";")]])
return items
if __name__=="__main__":
"""
Make a little program to process files
"""
import sys
print "Starting"
names,fmt = make_format(cdefinition)
print "Names and format made"
h = read_mar_header(sys.argv[1])
print "Read header, interpreting"
d = interpret_header(h,fmt,names)
printed = {}
for name in names:
if printed.has_key(name):
continue
print name,":",d[name]
printed[name]=1
```
#### File: application/group/group_reflections.py
```python
from __future__ import absolute_import, division, print_function
from six.moves import range
from xfel.merging.application.worker import worker
from dials.array_family import flex
from xfel.merging.application.reflection_table_utils import reflection_table_utils
from xfel.merging.application.utils.memory_usage import get_memory_usage
class hkl_group(worker):
'''For each asu hkl, gather all of its measurements from all ranks at a single rank, while trying to evenly distribute asu HKLs over the ranks.'''
def __init__(self, params, mpi_helper=None, mpi_logger=None):
super(hkl_group, self).__init__(params=params, mpi_helper=mpi_helper, mpi_logger=mpi_logger)
def __repr__(self):
return "Group symmetry-reduced HKLs"
def distribute_reflection_table(self):
'''Create a reflection table for storing reflections distributed over hkl chunks'''
table = flex.reflection_table()
table['miller_index_asymmetric'] = flex.miller_index()
table['intensity.sum.value'] = flex.double()
table['intensity.sum.variance'] = flex.double()
table['exp_id'] = flex.std_string()
return table
def run(self, experiments, reflections):
self.logger.log_step_time("GROUP")
# set up hkl chunks to be used for all-to-all; every avialable rank participates in all-to-all, even a rank that doesn't load any data
self.logger.log_step_time("SETUP_CHUNKS")
self.setup_hkl_chunks()
self.logger.log_step_time("SETUP_CHUNKS", True)
# for the ranks, which have loaded the data, distribute the reflections over the hkl chunks
self.logger.log_step_time("DISTRIBUTE_OVER_CHUNKS")
self.distribute_reflections_over_hkl_chunks(reflections=reflections)
self.logger.log_step_time("DISTRIBUTE_OVER_CHUNKS", True)
# run all-to-all
if self.params.parallel.a2a == 1: # 1 means: the number of slices in each chunk is 1, i.e. alltoall is done on the whole chunks
alltoall_reflections = self.get_reflections_from_alltoall()
else: # do alltoall on chunk slices - useful if the run-time memory is not sufficient to do alltoall on the whole chunks
alltoall_reflections = self.get_reflections_from_alltoall_sliced(number_of_slices=self.params.parallel.a2a)
self.logger.log_step_time("SORT")
self.logger.log("Sorting consolidated reflection table...")
alltoall_reflections.sort('miller_index_asymmetric')
self.logger.log_step_time("SORT", True)
self.logger.log_step_time("GROUP", True)
return experiments, alltoall_reflections
def setup_hkl_chunks(self):
'''Set up a list of reflection tables, or chunks, for distributing reflections'''
# split the full miller set into chunks; the number of chunks is equal to the number of ranks
import numpy as np
self.hkl_split_set = np.array_split(self.params.scaling.miller_set.indices(), self.mpi_helper.size)
# initialize a list of hkl chunks - reflection tables to store distributed reflections
self.hkl_chunks = []
for i in range(len(self.hkl_split_set)):
self.hkl_chunks.append(self.distribute_reflection_table())
def distribute_reflections_over_hkl_chunks(self, reflections):
'''Distribute reflections, according to their HKLs, over pre-set HKL chunks'''
total_reflection_count = reflections.size()
total_distributed_reflection_count = 0
if total_reflection_count > 0:
# set up two lists to be passed to the C++ extension: HKLs and chunk ids. It's basically a hash table to look up chunk ids by HKLs
hkl_list = flex.miller_index()
chunk_id_list = flex.int()
for i in range(len(self.hkl_split_set)):
for j in range(len(self.hkl_split_set[i])):
hkl = (self.hkl_split_set[i][j][0], self.hkl_split_set[i][j][1], self.hkl_split_set[i][j][2])
hkl_list.append(hkl)
chunk_id_list.append(i)
# distribute reflections over hkl chunks, using a C++ extension
from xfel.merging import get_hkl_chunks_cpp
get_hkl_chunks_cpp(reflections, hkl_list, chunk_id_list, self.hkl_chunks)
for chunk in self.hkl_chunks:
total_distributed_reflection_count += len(chunk)
self.logger.log("Distributed %d out of %d reflections"%(total_distributed_reflection_count, total_reflection_count))
self.logger.log("Memory usage: %d MB"%get_memory_usage())
reflections.clear()
def get_reflections_from_alltoall(self):
'''Use MPI alltoall method to gather all reflections with the same asu hkl from all ranks at a single rank'''
self.logger.log_step_time("ALL-TO-ALL")
self.logger.log("Executing MPI all-to-all...")
received_hkl_chunks = self.mpi_helper.comm.alltoall(self.hkl_chunks)
self.logger.log("Received %d hkl chunks after all-to-all"%len(received_hkl_chunks))
self.logger.log_step_time("ALL-TO-ALL", True)
self.logger.log_step_time("CONSOLIDATE")
self.logger.log("Consolidating reflection tables...")
result_reflections = flex.reflection_table()
for chunk in received_hkl_chunks:
result_reflections.extend(chunk)
self.logger.log_step_time("CONSOLIDATE", True)
return result_reflections
def get_reflections_from_alltoall_sliced(self, number_of_slices):
'''Split each hkl chunk into N slices. This is needed to address the MPI alltoall memory problem'''
result_reflections = self.distribute_reflection_table() # the total reflection table, which this rank will receive after all slices of alltoall
list_of_sliced_hkl_chunks = [] # if self.hkl_chunks is [A,B,C...], this list will be [[A1,A2,...,An], [B1,B2,...,Bn], [C1,C2,...,Cn], ...], where n is the number of chunk slices
for i in range(len(self.hkl_chunks)):
hkl_chunk_slices = []
for chunk_slice in reflection_table_utils.get_next_reflection_table_slice(self.hkl_chunks[i], number_of_slices, self.distribute_reflection_table):
hkl_chunk_slices.append(chunk_slice)
list_of_sliced_hkl_chunks.append(hkl_chunk_slices)
self.logger.log("Ready for all-to-all...")
self.logger.log("Memory usage: %d MB"%get_memory_usage())
for j in range(number_of_slices):
hkl_chunks_for_alltoall = list()
for i in range(len(self.hkl_chunks)):
hkl_chunks_for_alltoall.append(list_of_sliced_hkl_chunks[i][j]) # [Aj,Bj,Cj...]
self.logger.log_step_time("ALL-TO-ALL")
self.logger.log("Executing MPI all-to-all...")
self.logger.log("Memory usage: %d MB"%get_memory_usage())
received_hkl_chunks = comm.alltoall(hkl_chunks_for_alltoall)
self.logger.log("After all-to-all received %d hkl chunks" %len(received_hkl_chunks))
self.logger.log_step_time("ALL-TO-ALL", True)
self.logger.log_step_time("CONSOLIDATE")
self.logger.log("Consolidating reflection tables...")
for chunk in received_hkl_chunks:
result_reflections.extend(chunk)
self.logger.log_step_time("CONSOLIDATE", True)
return result_reflections
if __name__ == '__main__':
from xfel.merging.application.worker import exercise_worker
exercise_worker(hkl_group)
```
#### File: algorithms/indexing/stills_indexer.py
```python
from __future__ import absolute_import, division, print_function
import copy
import math
import logging
import libtbx
from dxtbx.model.experiment_list import Experiment, ExperimentList
from dials.array_family import flex
from dials.algorithms.indexing.indexer import Indexer
from dials.algorithms.indexing.known_orientation import IndexerKnownOrientation
from dials.algorithms.indexing.lattice_search import BasisVectorSearch, LatticeSearch
from dials.algorithms.indexing.nave_parameters import NaveParameters
from dials.algorithms.indexing import DialsIndexError, DialsIndexRefineError
logger = logging.getLogger(__name__)
def calc_2D_rmsd_and_displacements(reflections):
displacements = flex.vec2_double(
reflections["xyzobs.px.value"].parts()[0],
reflections["xyzobs.px.value"].parts()[1],
) - flex.vec2_double(
reflections["xyzcal.px"].parts()[0], reflections["xyzcal.px"].parts()[1]
)
rmsd = math.sqrt(flex.mean(displacements.dot(displacements)))
return rmsd, displacements
def plot_displacements(reflections, predictions, experiments):
rmsd, displacements = calc_2D_rmsd_and_displacements(predictions)
from matplotlib import pyplot as plt
plt.figure()
for cv in displacements:
plt.plot([cv[0]], [-cv[1]], "r.")
plt.title(" %d spots, r.m.s.d. %5.2f pixels" % (len(displacements), rmsd))
plt.axes().set_aspect("equal")
plt.show()
plt.close()
plt.figure()
sz1, sz2 = experiments[0].detector[0].get_image_size()
for item, cv in zip(predictions, displacements):
plt.plot([item["xyzcal.px"][0]], [sz1 - item["xyzcal.px"][1]], "r.")
plt.plot([item["xyzobs.px.value"][0]], [sz1 - item["xyzobs.px.value"][1]], "g.")
plt.plot(
[item["xyzcal.px"][0], item["xyzcal.px"][0] + 10.0 * cv[0]],
[sz1 - item["xyzcal.px"][1], sz1 - item["xyzcal.px"][1] - 10.0 * cv[1]],
"r-",
)
plt.xlim([0, experiments[0].detector[0].get_image_size()[0]])
plt.ylim([0, experiments[0].detector[0].get_image_size()[1]])
plt.title(" %d spots, r.m.s.d. %5.2f pixels" % (len(displacements), rmsd))
plt.axes().set_aspect("equal")
plt.show()
plt.close()
def e_refine(params, experiments, reflections, graph_verbose=False):
# Stills-specific parameters we always want
assert params.refinement.reflections.outlier.algorithm in (
None,
"null",
), (
"Cannot index, set refinement.reflections.outlier.algorithm=null"
) # we do our own outlier rejection
from dials.algorithms.refinement.refiner import RefinerFactory
refiner = RefinerFactory.from_parameters_data_experiments(
params, reflections, experiments
)
refiner.run()
ref_sel = refiner.selection_used_for_refinement()
assert ref_sel.count(True) == len(reflections)
if not graph_verbose:
return refiner
RR = refiner.predict_for_reflection_table(reflections)
plot_displacements(reflections, RR, experiments)
return refiner
class StillsIndexer(Indexer):
""" Class for indexing stills """
def __init__(self, reflections, experiments, params=None):
if params.refinement.reflections.outlier.algorithm in ("auto", libtbx.Auto):
# The stills_indexer provides its own outlier rejection
params.refinement.reflections.outlier.algorithm = "null"
super(StillsIndexer, self).__init__(reflections, experiments, params)
def index(self):
# most of this is the same as dials.algorithms.indexing.indexer.indexer_base.index(), with some stills
# specific modifications (don't re-index after choose best orientation matrix, but use the indexing from
# choose best orientation matrix, also don't use macrocycles) of refinement after indexing.
# 2017 update: do accept multiple lattices per shot
experiments = ExperimentList()
while True:
self.d_min = self.params.refinement_protocol.d_min_start
max_lattices = self.params.multiple_lattice_search.max_lattices
if max_lattices is not None and len(experiments) >= max_lattices:
break
if len(experiments) > 0:
cutoff_fraction = (
self.params.multiple_lattice_search.recycle_unindexed_reflections_cutoff
)
d_spacings = 1 / self.reflections["rlp"].norms()
d_min_indexed = flex.min(d_spacings.select(self.indexed_reflections))
min_reflections_for_indexing = cutoff_fraction * len(
self.reflections.select(d_spacings > d_min_indexed)
)
crystal_ids = self.reflections.select(d_spacings > d_min_indexed)["id"]
if (crystal_ids == -1).count(True) < min_reflections_for_indexing:
logger.info(
"Finish searching for more lattices: %i unindexed reflections remaining."
% (min_reflections_for_indexing)
)
break
n_lattices_previous_cycle = len(experiments)
# index multiple lattices per shot
if len(experiments) == 0:
experiments.extend(self.find_lattices())
if len(experiments) == 0:
raise DialsIndexError("No suitable lattice could be found.")
else:
try:
new = self.find_lattices()
experiments.extend(new)
except Exception as e:
logger.info("Indexing remaining reflections failed")
logger.debug(
"Indexing remaining reflections failed, exception:\n" + str(e)
)
# reset reflection lattice flags
# the lattice a given reflection belongs to: a value of -1 indicates
# that a reflection doesn't belong to any lattice so far
self.reflections["id"] = flex.int(len(self.reflections), -1)
self.index_reflections(experiments, self.reflections)
if len(experiments) == n_lattices_previous_cycle:
# no more lattices found
break
if (
not self.params.stills.refine_candidates_with_known_symmetry
and self.params.known_symmetry.space_group is not None
):
self._apply_symmetry_post_indexing(
experiments, self.reflections, n_lattices_previous_cycle
)
# discard nearly overlapping lattices on the same shot
if self._check_have_similar_crystal_models(experiments):
break
self.indexed_reflections = self.reflections["id"] > -1
if self.d_min is None:
sel = self.reflections["id"] <= -1
else:
sel = flex.bool(len(self.reflections), False)
lengths = 1 / self.reflections["rlp"].norms()
isel = (lengths >= self.d_min).iselection()
sel.set_selected(isel, True)
sel.set_selected(self.reflections["id"] > -1, False)
self.unindexed_reflections = self.reflections.select(sel)
reflections_for_refinement = self.reflections.select(
self.indexed_reflections
)
if len(self.params.stills.isoforms) > 0:
logger.info("")
logger.info("#" * 80)
logger.info("Starting refinement")
logger.info("#" * 80)
logger.info("")
isoform_experiments = ExperimentList()
isoform_reflections = flex.reflection_table()
# Note, changes to params after initial indexing. Cannot use tie to target when fixing the unit cell.
self.all_params.refinement.reflections.outlier.algorithm = "null"
self.all_params.refinement.parameterisation.crystal.fix = "cell"
self.all_params.refinement.parameterisation.crystal.unit_cell.restraints.tie_to_target = (
[]
)
for expt_id, experiment in enumerate(experiments):
reflections = reflections_for_refinement.select(
reflections_for_refinement["id"] == expt_id
)
reflections["id"] = flex.int(len(reflections), 0)
refiners = []
for isoform in self.params.stills.isoforms:
iso_experiment = copy.deepcopy(experiment)
crystal = iso_experiment.crystal
if (
isoform.lookup_symbol
!= crystal.get_space_group().type().lookup_symbol()
):
logger.info(
"Crystal isoform lookup_symbol %s does not match isoform %s lookup_symbol %s"
% (
crystal.get_space_group().type().lookup_symbol(),
isoform.name,
isoform.lookup_symbol,
)
)
continue
crystal.set_B(isoform.cell.fractionalization_matrix())
logger.info("Refining isoform %s" % isoform.name)
refiners.append(
e_refine(
params=self.all_params,
experiments=ExperimentList([iso_experiment]),
reflections=reflections,
graph_verbose=False,
)
)
if len(refiners) == 0:
raise DialsIndexError(
"No isoforms had a lookup symbol that matched"
)
positional_rmsds = [
math.sqrt(P.rmsds()[0] ** 2 + P.rmsds()[1] ** 2)
for P in refiners
]
logger.info(
"Positional rmsds for all isoforms:" + str(positional_rmsds)
)
minrmsd_mm = min(positional_rmsds)
minindex = positional_rmsds.index(minrmsd_mm)
logger.info(
"The smallest rmsd is %5.1f um from isoform %s"
% (
1000.0 * minrmsd_mm,
self.params.stills.isoforms[minindex].name,
)
)
if self.params.stills.isoforms[minindex].rmsd_target_mm is not None:
logger.info(
"Asserting %f < %f"
% (
minrmsd_mm,
self.params.stills.isoforms[minindex].rmsd_target_mm,
)
)
assert (
minrmsd_mm
< self.params.stills.isoforms[minindex].rmsd_target_mm
)
logger.info(
"Acceptable rmsd for isoform %s."
% (self.params.stills.isoforms[minindex].name)
)
if len(self.params.stills.isoforms) == 2:
logger.info(
"Rmsd gain over the other isoform %5.1f um."
% (1000.0 * abs(positional_rmsds[0] - positional_rmsds[1]))
)
R = refiners[minindex]
# Now one last check to see if direct beam is out of bounds
if self.params.stills.isoforms[minindex].beam_restraint is not None:
from scitbx import matrix
refined_beam = matrix.col(
R.get_experiments()[0]
.detector[0]
.get_beam_centre_lab(experiments[0].beam.get_s0())[0:2]
)
known_beam = matrix.col(
self.params.stills.isoforms[minindex].beam_restraint
)
logger.info(
"Asserting difference in refined beam center and expected beam center %f < %f"
% (
(refined_beam - known_beam).length(),
self.params.stills.isoforms[minindex].rmsd_target_mm,
)
)
assert (
(refined_beam - known_beam).length()
< self.params.stills.isoforms[minindex].rmsd_target_mm
)
# future--circle of confusion could be given as a separate length in mm instead of reusing rmsd_target
experiment = R.get_experiments()[0]
experiment.crystal.identified_isoform = self.params.stills.isoforms[
minindex
].name
isoform_experiments.append(experiment)
reflections["id"] = flex.int(len(reflections), expt_id)
isoform_reflections.extend(reflections)
experiments = isoform_experiments
reflections_for_refinement = isoform_reflections
if self.params.refinement_protocol.mode == "repredict_only":
from dials.algorithms.indexing.nave_parameters import NaveParameters
from dials.algorithms.refinement.prediction.managed_predictors import (
ExperimentsPredictorFactory,
)
refined_experiments, refined_reflections = (
experiments,
reflections_for_refinement,
)
ref_predictor = ExperimentsPredictorFactory.from_experiments(
experiments,
force_stills=True,
spherical_relp=self.all_params.refinement.parameterisation.spherical_relp_model,
)
ref_predictor(refined_reflections)
refined_reflections["delpsical2"] = (
refined_reflections["delpsical.rad"] ** 2
)
for expt_id in range(len(refined_experiments)):
refls = refined_reflections.select(
refined_reflections["id"] == expt_id
)
nv = NaveParameters(
params=self.all_params,
experiments=refined_experiments[expt_id : expt_id + 1],
reflections=refls,
refinery=None,
graph_verbose=False,
)
experiments[expt_id].crystal = nv()
ref_predictor = ExperimentsPredictorFactory.from_experiments(
experiments,
force_stills=True,
spherical_relp=self.all_params.refinement.parameterisation.spherical_relp_model,
)
ref_predictor(refined_reflections)
elif self.params.refinement_protocol.mode is None:
refined_experiments, refined_reflections = (
experiments,
reflections_for_refinement,
)
else:
try:
refined_experiments, refined_reflections = self.refine(
experiments, reflections_for_refinement
)
except Exception as e:
s = str(e)
if len(experiments) == 1:
raise DialsIndexRefineError(e.message)
logger.info("Refinement failed:")
logger.info(s)
del experiments[-1]
break
self._unit_cell_volume_sanity_check(experiments, refined_experiments)
self.refined_reflections = refined_reflections.select(
refined_reflections["id"] > -1
)
for i, expt in enumerate(self.experiments):
ref_sel = self.refined_reflections.select(
self.refined_reflections["imageset_id"] == i
)
ref_sel = ref_sel.select(ref_sel["id"] >= 0)
for i_expt in set(ref_sel["id"]):
refined_expt = refined_experiments[i_expt]
expt.detector = refined_expt.detector
expt.beam = refined_expt.beam
expt.goniometer = refined_expt.goniometer
expt.scan = refined_expt.scan
refined_expt.imageset = expt.imageset
if not (
self.all_params.refinement.parameterisation.beam.fix == "all"
and self.all_params.refinement.parameterisation.detector.fix == "all"
):
# Experimental geometry may have changed - re-map centroids to
# reciprocal space
self.reflections.map_centroids_to_reciprocal_space(self.experiments)
# update for next cycle
experiments = refined_experiments
self.refined_experiments = refined_experiments
if self.refined_experiments is None:
raise DialsIndexRefineError("None of the experiments could refine.")
# discard experiments with zero reflections after refinement
id_set = set(self.refined_reflections["id"])
if len(id_set) < len(self.refined_experiments):
filtered_refined_reflections = flex.reflection_table()
for i in range(len(self.refined_experiments)):
if i not in id_set:
del self.refined_experiments[i]
for old, new in zip(sorted(id_set), range(len(id_set))):
subset = self.refined_reflections.select(
self.refined_reflections["id"] == old
)
subset["id"] = flex.int(len(subset), new)
filtered_refined_reflections.extend(subset)
self.refined_reflections = filtered_refined_reflections
if len(self.refined_experiments) > 1:
from dials.algorithms.indexing.compare_orientation_matrices import (
rotation_matrix_differences,
)
logger.info(
rotation_matrix_differences(self.refined_experiments.crystals())
)
logger.info("Final refined crystal models:")
for i, crystal_model in enumerate(self.refined_experiments.crystals()):
n_indexed = 0
for _ in experiments.where(crystal=crystal_model):
n_indexed += (self.reflections["id"] == i).count(True)
logger.info("model %i (%i reflections):" % (i + 1, n_indexed))
logger.info(crystal_model)
if (
"xyzcal.mm" in self.refined_reflections
): # won't be there if refine_all_candidates = False and no isoforms
self._xyzcal_mm_to_px(self.experiments, self.refined_reflections)
def experiment_list_for_crystal(self, crystal):
experiments = ExperimentList()
for imageset in self.experiments.imagesets():
experiments.append(
Experiment(
imageset=imageset,
beam=imageset.get_beam(),
detector=imageset.get_detector(),
goniometer=imageset.get_goniometer(),
scan=imageset.get_scan(),
crystal=crystal,
)
)
return experiments
def choose_best_orientation_matrix(self, candidate_orientation_matrices):
logger.info("*" * 80)
logger.info("Selecting the best orientation matrix")
logger.info("*" * 80)
class CandidateInfo(libtbx.group_args):
pass
candidates = []
params = copy.deepcopy(self.all_params)
for icm, cm in enumerate(candidate_orientation_matrices):
if icm >= self.params.basis_vector_combinations.max_refine:
break
# Index reflections in P1
sel = self.reflections["id"] == -1
refl = self.reflections.select(sel)
experiments = self.experiment_list_for_crystal(cm)
self.index_reflections(experiments, refl)
indexed = refl.select(refl["id"] >= 0)
indexed = indexed.select(indexed.get_flags(indexed.flags.indexed))
# If target symmetry supplied, try to apply it. Then, apply the change of basis to the reflections
# indexed in P1 to the target setting
if (
self.params.stills.refine_candidates_with_known_symmetry
and self.params.known_symmetry.space_group is not None
):
new_crystal, cb_op_to_primitive = self._symmetry_handler.apply_symmetry(
cm
)
if new_crystal is None:
logger.info("Cannot convert to target symmetry, candidate %d", icm)
continue
new_crystal = new_crystal.change_basis(
self._symmetry_handler.cb_op_primitive_inp
)
cm = new_crystal
experiments = self.experiment_list_for_crystal(cm)
if not cb_op_to_primitive.is_identity_op():
indexed["miller_index"] = cb_op_to_primitive.apply(
indexed["miller_index"]
)
if self._symmetry_handler.cb_op_primitive_inp is not None:
indexed[
"miller_index"
] = self._symmetry_handler.cb_op_primitive_inp.apply(
indexed["miller_index"]
)
if params.indexing.stills.refine_all_candidates:
try:
logger.info(
"$$$ stills_indexer::choose_best_orientation_matrix, candidate %d initial outlier identification",
icm,
)
acceptance_flags = self.identify_outliers(
params, experiments, indexed
)
# create a new "indexed" list with outliers thrown out:
indexed = indexed.select(acceptance_flags)
logger.info(
"$$$ stills_indexer::choose_best_orientation_matrix, candidate %d refinement before outlier rejection",
icm,
)
R = e_refine(
params=params,
experiments=experiments,
reflections=indexed,
graph_verbose=False,
)
ref_experiments = R.get_experiments()
# try to improve the outcome with a second round of outlier rejection post-initial refinement:
acceptance_flags = self.identify_outliers(
params, ref_experiments, indexed
)
# insert a round of Nave-outlier rejection on top of the r.m.s.d. rejection
nv0 = NaveParameters(
params=params,
experiments=ref_experiments,
reflections=indexed,
refinery=R,
graph_verbose=False,
)
nv0()
acceptance_flags_nv0 = nv0.nv_acceptance_flags
indexed = indexed.select(acceptance_flags & acceptance_flags_nv0)
logger.info(
"$$$ stills_indexer::choose_best_orientation_matrix, candidate %d after positional and delta-psi outlier rejection",
icm,
)
R = e_refine(
params=params,
experiments=ref_experiments,
reflections=indexed,
graph_verbose=False,
)
ref_experiments = R.get_experiments()
nv = NaveParameters(
params=params,
experiments=ref_experiments,
reflections=indexed,
refinery=R,
graph_verbose=False,
)
crystal_model = nv()
assert (
len(crystal_model) == 1
), "$$$ stills_indexer::choose_best_orientation_matrix, Only one crystal at this stage"
crystal_model = crystal_model[0]
# Drop candidates that after refinement can no longer be converted to the known target space group
if (
not self.params.stills.refine_candidates_with_known_symmetry
and self.params.known_symmetry.space_group is not None
):
new_crystal, cb_op_to_primitive = self._symmetry_handler.apply_symmetry(
crystal_model
)
if new_crystal is None:
logger.info(
"P1 refinement yielded model diverged from target, candidate %d",
icm,
)
continue
rmsd, _ = calc_2D_rmsd_and_displacements(
R.predict_for_reflection_table(indexed)
)
except Exception as e:
logger.info(
"Couldn't refine candidate %d, %s: %s",
icm,
e.__class__.__name__,
str(e),
)
else:
logger.info(
"$$$ stills_indexer::choose_best_orientation_matrix, candidate %d done",
icm,
)
candidates.append(
CandidateInfo(
crystal=crystal_model,
green_curve_area=nv.green_curve_area,
ewald_proximal_volume=nv.ewald_proximal_volume(),
n_indexed=len(indexed),
rmsd=rmsd,
indexed=indexed,
experiments=ref_experiments,
)
)
else:
from dials.algorithms.refinement.prediction.managed_predictors import (
ExperimentsPredictorFactory,
)
ref_predictor = ExperimentsPredictorFactory.from_experiments(
experiments,
force_stills=True,
spherical_relp=params.refinement.parameterisation.spherical_relp_model,
)
rmsd, _ = calc_2D_rmsd_and_displacements(ref_predictor(indexed))
candidates.append(
CandidateInfo(
crystal=cm,
n_indexed=len(indexed),
rmsd=rmsd,
indexed=indexed,
experiments=experiments,
)
)
if len(candidates) == 0:
raise DialsIndexError("No suitable indexing solution found")
logger.info("**** ALL CANDIDATES:")
for i, XX in enumerate(candidates):
logger.info("\n****Candidate %d %s", i, XX)
cc = XX.crystal
if hasattr(cc, "get_half_mosaicity_deg"):
logger.info(
" half mosaicity %5.2f deg.", (cc.get_half_mosaicity_deg())
)
logger.info(" domain size %.0f Ang.", (cc.get_domain_size_ang()))
logger.info("\n**** BEST CANDIDATE:")
results = flex.double([c.rmsd for c in candidates])
best = candidates[flex.min_index(results)]
logger.info(best)
if params.indexing.stills.refine_all_candidates:
if best.rmsd > params.indexing.stills.rmsd_min_px:
raise DialsIndexError("RMSD too high, %f" % best.rmsd)
if len(candidates) > 1:
for i in range(len(candidates)):
if i == flex.min_index(results):
continue
if best.ewald_proximal_volume > candidates[i].ewald_proximal_volume:
logger.info(
"Couldn't figure out which candidate is best; picked the one with the best RMSD."
)
best.indexed["entering"] = flex.bool(best.n_indexed, False)
return best.crystal, best.n_indexed
def identify_outliers(self, params, experiments, indexed):
if not params.indexing.stills.candidate_outlier_rejection:
return flex.bool(len(indexed), True)
logger.info("$$$ stills_indexer::identify_outliers")
refiner = e_refine(params, experiments, indexed, graph_verbose=False)
RR = refiner.predict_for_reflection_table(indexed)
px_sz = experiments[0].detector[0].get_pixel_size()
class Match(object):
pass
matches = []
for item in RR.rows():
m = Match()
m.x_obs = item["xyzobs.px.value"][0] * px_sz[0]
m.y_obs = item["xyzobs.px.value"][1] * px_sz[1]
m.x_calc = item["xyzcal.px"][0] * px_sz[0]
m.y_calc = item["xyzcal.px"][1] * px_sz[1]
m.miller_index = item["miller_index"]
matches.append(m)
from rstbx.phil.phil_preferences import indexing_api_defs
import iotbx.phil
hardcoded_phil = iotbx.phil.parse(input_string=indexing_api_defs).extract()
from rstbx.indexing_api.outlier_procedure import OutlierPlotPDF
# comment this in if PDF graph is desired:
# hardcoded_phil.indexing.outlier_detection.pdf = "outlier.pdf"
# new code for outlier rejection inline here
if hardcoded_phil.indexing.outlier_detection.pdf is not None:
hardcoded_phil.__inject__(
"writer", OutlierPlotPDF(hardcoded_phil.indexing.outlier_detection.pdf)
)
# execute Sauter and Poon (2010) algorithm
from rstbx.indexing_api import outlier_detection
od = outlier_detection.find_outliers_from_matches(
matches, verbose=True, horizon_phil=hardcoded_phil
)
if hardcoded_phil.indexing.outlier_detection.pdf is not None:
od.make_graphs(canvas=hardcoded_phil.writer.R.c, left_margin=0.5)
hardcoded_phil.writer.R.c.showPage()
hardcoded_phil.writer.R.c.save()
return od.get_cache_status()
def refine(self, experiments, reflections):
acceptance_flags = self.identify_outliers(
self.all_params, experiments, reflections
)
# create a new "reflections" list with outliers thrown out:
reflections = reflections.select(acceptance_flags)
R = e_refine(
params=self.all_params,
experiments=experiments,
reflections=reflections,
graph_verbose=False,
)
ref_experiments = R.get_experiments()
# try to improve the outcome with a second round of outlier rejection post-initial refinement:
acceptance_flags = self.identify_outliers(
self.all_params, ref_experiments, reflections
)
# insert a round of Nave-outlier rejection on top of the r.m.s.d. rejection
nv0 = NaveParameters(
params=self.all_params,
experiments=ref_experiments,
reflections=reflections,
refinery=R,
graph_verbose=False,
)
nv0()
acceptance_flags_nv0 = nv0.nv_acceptance_flags
reflections = reflections.select(acceptance_flags & acceptance_flags_nv0)
R = e_refine(
params=self.all_params,
experiments=ref_experiments,
reflections=reflections,
graph_verbose=False,
)
ref_experiments = R.get_experiments()
nv = NaveParameters(
params=self.all_params,
experiments=ref_experiments,
reflections=reflections,
refinery=R,
graph_verbose=False,
)
nv()
rmsd, _ = calc_2D_rmsd_and_displacements(
R.predict_for_reflection_table(reflections)
)
matches = R.get_matches()
xyzcal_mm = flex.vec3_double(len(reflections))
xyzcal_mm.set_selected(matches["iobs"], matches["xyzcal.mm"])
reflections["xyzcal.mm"] = xyzcal_mm
reflections.set_flags(matches["iobs"], reflections.flags.used_in_refinement)
reflections["entering"] = flex.bool(len(reflections), False)
if self.all_params.indexing.stills.set_domain_size_ang_value is not None:
for exp in ref_experiments:
exp.crystal.set_domain_size_ang(
self.all_params.indexing.stills.set_domain_size_ang_value
)
if self.all_params.indexing.stills.set_mosaic_half_deg_value is not None:
for exp in ref_experiments:
exp.crystal.set_half_mosaicity_deg(
self.all_params.indexing.stills.set_mosaic_half_deg_value
)
return ref_experiments, reflections
""" Mixin class definitions that override the dials indexing class methods specific to stills """
class StillsIndexerKnownOrientation(IndexerKnownOrientation, StillsIndexer):
pass
class StillsIndexerBasisVectorSearch(StillsIndexer, BasisVectorSearch):
pass
class StillsIndexerLatticeSearch(StillsIndexer, LatticeSearch):
pass
```
#### File: algorithms/integration/image_integrator.py
```python
from __future__ import absolute_import, division, print_function
import logging
import platform
from time import time
import dials.algorithms.integration
logger = logging.getLogger(__name__)
class ProcessorImageBase(object):
""" Processor interface class. """
def __init__(self, manager):
"""
Initialise the processor.
The processor requires a manager class implementing the Manager interface.
This class executes all the workers in separate threads and accumulates the
results to expose to the user.
:param manager: The processing manager
:param params: The phil parameters
"""
self.manager = manager
@property
def executor(self):
"""
Get the executor
:return: The executor
"""
return self.manager.executor
@executor.setter
def executor(self, function):
"""
Set the executor
:param function: The executor
"""
self.manager.executor = function
def process(self):
"""
Do all the processing tasks.
:return: The processing results
"""
from dials.util.mp import multi_node_parallel_map
start_time = time()
self.manager.initialize()
mp_method = self.manager.params.integration.mp.method
mp_nproc = min(len(self.manager), self.manager.params.integration.mp.nproc)
if (
mp_nproc > 1 and platform.system() == "Windows"
): # platform.system() forks which is bad for MPI, so don't use it unless nproc > 1
logger.warning(
"\n"
+ "*" * 80
+ "\n"
+ "Multiprocessing is not available on windows. Setting nproc = 1\n"
+ "*" * 80
+ "\n"
)
mp_nproc = 1
assert mp_nproc > 0, "Invalid number of processors"
logger.info(self.manager.summary())
logger.info(" Using %s with %d parallel job(s)\n" % (mp_method, mp_nproc))
if mp_nproc > 1:
def process_output(result):
for message in result[1]:
logger.log(message.levelno, message.msg)
self.manager.accumulate(result[0])
result[0].reflections = None
result[0].data = None
def execute_task(task):
from dials.util import log
log.config_simple_cached()
result = task()
handlers = logging.getLogger("dials").handlers
assert len(handlers) == 1, "Invalid number of logging handlers"
return result, handlers[0].messages()
multi_node_parallel_map(
func=execute_task,
iterable=list(self.manager.tasks()),
njobs=mp_njobs,
nproc=mp_nproc,
callback=process_output,
method=mp_method,
preserve_order=True,
preserve_exception_message=True,
)
else:
for task in self.manager.tasks():
self.manager.accumulate(task())
self.manager.finalize()
end_time = time()
self.manager.time.user_time = end_time - start_time
result = self.manager.result()
return result, self.manager.time
class Result(object):
"""
A class representing a processing result.
"""
def __init__(self, index, reflections):
"""
Initialise the data.
:param index: The processing job index
:param reflections: The processed reflections
:param data: Other processed data
"""
self.index = index
self.reflections = reflections
class Dataset(object):
def __init__(self, frames, size):
from dials.array_family import flex
self.frames = frames
nframes = frames[1] - frames[0]
self.data = []
self.mask = []
for sz in size:
self.data.append(flex.double(flex.grid(nframes, sz[0], sz[1])))
self.mask.append(flex.bool(flex.grid(nframes, sz[0], sz[1])))
def set_image(self, index, data, mask):
from dials.array_family import flex
for d1, d2 in zip(self.data, data):
h, w = d2.all()
d2.reshape(flex.grid(1, h, w))
d1[index : index + 1, :, :] = d2.as_double()
for m1, m2 in zip(self.mask, mask):
h, w = m2.all()
m2.reshape(flex.grid(1, h, w))
m1[index : index + 1, :, :] = m2
class Task(object):
"""
A class to perform a null task.
"""
def __init__(self, index, frames, reflections, experiments, params, executor):
"""
Initialise the task
:param index: The index of the processing job
:param frames: The frames to process
:param experiments: The list of experiments
:param reflections: The list of reflections
:param params The processing parameters
:param executor: The executor class
"""
self.index = index
self.frames = frames
self.experiments = experiments
self.reflections = reflections
self.params = params
self.executor = executor
def __call__(self):
"""
Do the processing.
:return: The processed data
"""
from dials.model.data import make_image
from dials.model.data import MultiPanelImageVolume
from dials.model.data import ImageVolume
from dials.algorithms.integration.processor import job
# Set the job index
job.index = self.index
# Get the start time
start_time = time()
# Check all reflections have same imageset and get it
exp_id = list(set(self.reflections["id"]))
imageset = self.experiments[exp_id[0]].imageset
for i in exp_id[1:]:
assert (
self.experiments[i].imageset == imageset
), "Task can only handle 1 imageset"
# Get the sub imageset
frame00, frame01 = self.frames
try:
frame10, frame11 = imageset.get_array_range()
except Exception:
frame10, frame11 = (0, len(imageset))
try:
assert frame00 < frame01
assert frame10 < frame11
assert frame00 >= frame10
assert frame01 <= frame11
index0 = frame00 - frame10
index1 = index0 + (frame01 - frame00)
assert index0 < index1
assert index0 >= 0
assert index1 <= len(imageset)
imageset = imageset[index0:index1]
except Exception:
raise RuntimeError("Programmer Error: bad array range")
try:
frame0, frame1 = imageset.get_array_range()
except Exception:
frame0, frame1 = (0, len(imageset))
# Initialise the dataset
image_volume = MultiPanelImageVolume()
for panel in self.experiments[0].detector:
image_volume.add(
ImageVolume(
frame0, frame1, panel.get_image_size()[1], panel.get_image_size()[0]
)
)
# Read all the images into a block of data
read_time = 0.0
for i in range(len(imageset)):
st = time()
image = imageset.get_corrected_data(i)
mask = imageset.get_mask(i)
if self.params.integration.lookup.mask is not None:
assert len(mask) == len(self.params.lookup.mask), (
"Mask/Image are incorrect size %d %d"
% (len(mask), len(self.params.integration.lookup.mask))
)
mask = tuple(
m1 & m2 for m1, m2 in zip(self.params.integration.lookup.mask, mask)
)
image_volume.set_image(frame0 + i, make_image(image, mask))
read_time += time() - st
del image
del mask
# Process the data
st = time()
data = self.executor.process(image_volume, self.experiments, self.reflections)
process_time = time() - st
# Set the result values
result = Result(self.index, self.reflections)
result.read_time = read_time
result.process_time = process_time
result.total_time = time() - start_time
result.data = data
return result
class ManagerImage(object):
"""
A class to manage processing book-keeping
"""
def __init__(self, experiments, reflections, params):
"""
Initialise the manager.
:param experiments: The list of experiments
:param reflections: The list of reflections
:param params: The phil parameters
"""
# Initialise the callbacks
self.executor = None
# Save some data
self.experiments = experiments
self.reflections = reflections
# Save some parameters
self.params = params
# Set the finalized flag to False
self.finalized = False
# Initialise the timing information
self.time = dials.algorithms.integration.TimingInfo()
def initialize(self):
"""
Initialise the processing
"""
from dials_algorithms_integration_integrator_ext import (
ReflectionManagerPerImage,
)
# Get the start time
start_time = time()
# Ensure the reflections contain bounding boxes
assert "bbox" in self.reflections, "Reflections have no bbox"
# Split the reflections into partials
self._split_reflections()
# Create the reflection manager
frames = self.experiments[0].scan.get_array_range()
self.manager = ReflectionManagerPerImage(frames, self.reflections)
# Parallel reading of HDF5 from the same handle is not allowed. Python
# multiprocessing is a bit messed up and used fork on linux so need to
# close and reopen file.
for exp in self.experiments:
if exp.imageset.reader().is_single_file_reader():
exp.imageset.reader().nullify_format_instance()
# Set the initialization time
self.time.initialize = time() - start_time
def task(self, index):
"""
Get a task.
"""
return Task(
index=index,
frames=self.manager.frames(index),
reflections=self.manager.split(index),
experiments=self.experiments,
params=self.params,
executor=self.executor,
)
def tasks(self):
"""
Iterate through the tasks.
"""
for i in range(len(self)):
yield self.task(i)
def accumulate(self, result):
"""
Accumulate the results.
"""
self.manager.accumulate(result.index, result.reflections)
if result.data is not None:
self.executor.accumulate(result.index, result.data)
self.time.read += result.read_time
self.time.process += result.process_time
self.time.total += result.total_time
def finalize(self):
"""
Finalize the processing and finish.
"""
# Get the start time
start_time = time()
# Check manager is finished
assert self.manager.finished(), "Manager is not finished"
# Update the time and finalized flag
self.time.finalize = time() - start_time
self.finalized = True
def result(self):
"""
Return the result.
:return: The result
"""
assert self.finalized, "Manager is not finalized"
return self.reflections
def finished(self):
"""
Return if all tasks have finished.
:return: True/False all tasks have finished
"""
return self.finalized and self.manager.finished()
def __len__(self):
"""
Return the number of tasks.
:return: the number of tasks
"""
return len(self.manager)
def summary(self):
return ""
def _split_reflections(self):
"""
Split the reflections into partials or over job boundaries
"""
# Optionally split the reflection table into partials, otherwise,
# split over job boundaries
num_full = len(self.reflections)
self.reflections.split_partials()
num_partial = len(self.reflections)
assert num_partial >= num_full, "Invalid number of partials"
if num_partial > num_full:
logger.info(
" Split %d reflections into %d partial reflections\n"
% (num_full, num_partial)
)
class ProcessorImage(ProcessorImageBase):
""" Top level processor for per image processing. """
def __init__(self, experiments, reflections, params):
""" Initialise the manager and the processor. """
# Create the processing manager
manager = ManagerImage(experiments, reflections, params)
# Initialise the processor
super(ProcessorImage, self).__init__(manager)
class InitializerRot(object):
"""
A pre-processing class for oscillation data.
"""
def __init__(self, experiments, params):
"""
Initialise the pre-processor.
"""
self.experiments = experiments
self.params = params
def __call__(self, reflections):
"""
Do some pre-processing.
"""
from dials.array_family import flex
# Compute some reflection properties
reflections.compute_zeta_multi(self.experiments)
reflections.compute_d(self.experiments)
reflections.compute_bbox(self.experiments)
# Filter the reflections by zeta
mask = flex.abs(reflections["zeta"]) < self.params.filter.min_zeta
reflections.set_flags(mask, reflections.flags.dont_integrate)
# Filter the reflections by powder ring
if self.params.filter.powder_filter is not None:
mask = self.params.filter.powder_filter(reflections["d"])
reflections.set_flags(mask, reflections.flags.in_powder_ring)
class FinalizerRot(object):
"""
A post-processing class for oscillation data.
"""
def __init__(self, experiments, params):
"""
Initialise the post processor.
"""
self.experiments = experiments
self.params = params
def __call__(self, reflections):
"""
Do some post processing.
"""
# Compute the corrections
reflections.compute_corrections(self.experiments)
class ImageIntegratorExecutor(object):
def __init__(self):
pass
def process(self, image_volume, experiments, reflections):
from dials.algorithms.integration.processor import job
# Compute the partiality
reflections.compute_partiality(experiments)
# Get some info
full_value = 0.997
fully_recorded = reflections["partiality"] > full_value
npart = fully_recorded.count(False)
nfull = fully_recorded.count(True)
nice = reflections.get_flags(reflections.flags.in_powder_ring).count(True)
nint = reflections.get_flags(reflections.flags.dont_integrate).count(False)
ntot = len(reflections)
# Write some output
logger.info("")
logger.info(" Beginning integration job %d" % job.index)
logger.info("")
logger.info(
" Frames: %d -> %d" % (image_volume.frame0(), image_volume.frame1())
)
logger.info("")
logger.info(" Number of reflections")
logger.info(" Partial: %d" % npart)
logger.info(" Full: %d" % nfull)
logger.info(" In ice ring: %d" % nice)
logger.info(" Integrate: %d" % nint)
logger.info(" Total: %d" % ntot)
logger.info("")
# Print a histogram of reflections on frames
if image_volume.frame1() - image_volume.frame0() > 1:
logger.info(
" The following histogram shows the number of reflections predicted"
)
logger.info(" to have all or part of their intensity on each frame.")
logger.info("")
logger.info(frame_hist(reflections["bbox"], prefix=" ", symbol="*"))
logger.info("")
# Compute the shoebox mask
reflections.compute_mask(experiments=experiments, image_volume=image_volume)
# Compute the background
reflections.compute_background(
experiments=experiments, image_volume=image_volume
)
# Compute the summed intensity
reflections.compute_summed_intensity(image_volume=image_volume)
# Compute the centroid
reflections.compute_centroid(experiments=experiments, image_volume=image_volume)
# Get some reflection info
image_volume.update_reflection_info(reflections)
# Print some info
fmt = " Integrated % 5d (sum) + % 5d (prf) / % 5d reflections"
nsum = reflections.get_flags(reflections.flags.integrated_sum).count(True)
nprf = reflections.get_flags(reflections.flags.integrated_prf).count(True)
ntot = len(reflections)
logger.info(fmt % (nsum, nprf, ntot))
class ImageIntegrator(object):
"""
A class that does integration directly on the image skipping the shoebox
creation step.
"""
def __init__(self, experiments, reflections, params):
"""
Initialize the integrator
:param experiments: The experiment list
:param reflections: The reflections to process
:param params: The parameters to use
"""
# Check all reflections have same imageset and get it
imageset = experiments[0].imageset
for expr in experiments:
assert expr.imageset == imageset, "All experiments must share and imageset"
# Save some stuff
self.experiments = experiments
self.reflections = reflections
self.params = Parameters.from_phil(params.integration)
self.profile_model_report = None
self.integration_report = None
def integrate(self):
"""
Integrate the data
"""
from dials.algorithms.integration.report import IntegrationReport
from dials.util.command_line import heading
# Init the report
self.profile_model_report = None
self.integration_report = None
# Print the summary
logger.info(
"=" * 80
+ (
"\n\n"
"Processing reflections\n\n"
" Processing the following experiments:\n"
"\n"
" Experiments: %d\n"
" Beams: %d\n"
" Detectors: %d\n"
" Goniometers: %d\n"
" Scans: %d\n"
" Crystals: %d\n"
" Imagesets: %d\n"
)
% (
len(self.experiments),
len(self.experiments.beams()),
len(self.experiments.detectors()),
len(self.experiments.goniometers()),
len(self.experiments.scans()),
len(self.experiments.crystals()),
len(self.experiments.imagesets()),
)
)
# Print a heading
logger.info("=" * 80)
logger.info("")
logger.info(heading("Integrating reflections"))
logger.info("")
# Initialise the processing
initialize = InitializerRot(self.experiments, self.params)
initialize(self.reflections)
# Construvt the image integrator processor
processor = ProcessorImage(self.experiments, self.reflections, self.params)
processor.executor = ImageIntegratorExecutor()
# Do the processing
self.reflections, time_info = processor.process()
# Finalise the processing
finalize = FinalizerRot(self.experiments, self.params)
finalize(self.reflections)
# Create the integration report
self.integration_report = IntegrationReport(self.experiments, self.reflections)
logger.info("")
logger.info(self.integration_report.as_str(prefix=" "))
# Print the time info
logger.info(str(time_info))
logger.info("")
# Return the reflections
return self.reflections
```
#### File: refinement/parameterisation/autoreduce.py
```python
from __future__ import absolute_import, division, print_function
import logging
logger = logging.getLogger(__name__)
# Parameterisation auto reduction helpers
from dials_refinement_helpers_ext import surpl_iter as surpl
from dials_refinement_helpers_ext import uc_surpl_iter as uc_surpl
from dials_refinement_helpers_ext import pg_surpl_iter as pg_surpl
from scitbx.array_family import flex
from dials.algorithms.refinement import DialsRefineConfigError
# PHIL
from libtbx.phil import parse
phil_str = """
min_nref_per_parameter = 5
.help = "the smallest number of reflections per parameter for a"
"model parameterisation below which the parameterisation will"
"not be made in full, but the action described below will be"
"triggered."
.type = int(value_min=1)
action = *fail fix remove
.help = "action to take if there are too few reflections across the"
"experiments related to a particular model parameterisation."
"If fail, an exception will be raised and refinement will not"
"proceed. If fix, refinement will continue but with the"
"parameters relating to that model remaining fixed at their"
"initial values. If remove, parameters relating to that model"
"will be fixed, and in addition all reflections related to"
"that parameterisation will be removed. This will therefore"
"remove these reflections from other parameterisations of the"
"global model too. For example, if a crystal model could not"
"be parameterised it will be excised completely and not"
"contribute to the joint refinement of the detector and beam."
"In the fix mode, reflections emanating from that crystal will"
"still form residuals and will contribute to detector and beam"
"refinement."
.type = choice
detector_reduce = False
.type = bool
.help = "Special case designed for detector metrology refinement"
"(particularly of the CSPAD). See detector_reduce_list for"
"details."
.expert_level = 2
detector_reduce_list = Dist Tau2 Tau3
.type = strings
.help = "Partial names to match to detector parameters to try fixing."
"If there are still not"
"enough parameters for refinement after fixing these, then"
"fail. This is to ensure that metrology refinement never"
"completes if it is not able to refine some panels. The default"
"is to try fixing the distance as well as Tau2 and Tau3"
"rotations of detector panel, leaving the in-plane shifts and"
"the rotation around the detector normal for refinement."
"groups only."
.expert_level = 2
"""
phil_scope = parse(phil_str)
class AutoReduce(object):
"""Checks for over-parameterisation of models and acts in that case.
Tests each provided model parameterisation to ensure there are enough
reflections in refinement to support that parameterisation. If there are
not then some action is taken. More details are given in documentation
within the phil_str alongside this class definition.
Attributes:
det_params (list): A list of DetectorParameterisation objects
beam_params (list): A list of BeamParameterisation objects
xl_ori_params (list): A list of CrystalOrientationParameterisation objects
xl_uc_params (list): A list of CrystalUnitCellParameterisation objects
gon_params (list): A list of GoniometerParameterisation objects
reflections: A reflection table
"""
def __init__(
self,
options,
det_params,
beam_params,
xl_ori_params,
xl_uc_params,
gon_params,
reflection_manager,
scan_varying=False,
):
"""Initialise the AutoReduce object
Args:
options: A PHIL scope containing the auto reduction options
det_params (list): A list of DetectorParameterisation objects
beam_params (list): A list of BeamParameterisation objects
xl_ori_params (list): A list of CrystalOrientationParameterisation
objects
xl_uc_params (list): A list of CrystalUnitCellParameterisation objects
gon_params (list): A list of GoniometerParameterisation objects
reflection_manager: The ReflectionManager object handling reflection
data for refinement
scan_varying (bool): Whether preparing for scan-varying refinement or
scan static refinement
"""
self.det_params = det_params
self.beam_params = beam_params
self.xl_ori_params = xl_ori_params
self.xl_uc_params = xl_uc_params
self.gon_params = gon_params
self.reflection_manager = reflection_manager
self._options = options
self._scan_varying = scan_varying
# A template logging message to fill in when failing
self._failmsg = (
"Too few reflections to parameterise {0}\nTry modifying "
"refinement.parameterisation.auto_reduction options"
)
# Determine if there are enough reflections to support a particular
# parameterisation. First, a minimum number of reflections is determined,
# by the product of the number of free parameters and a user-provided
# minimum number of reflections per parameter. The total number of reflections
# affected by this parameterisation is calculated, and the difference between
# that and the minimum number of reflections is returned.
def _surplus_reflections(self, p):
reflections = self.reflection_manager.get_obs()
cutoff = self._options.min_nref_per_parameter * p.num_free()
return surpl(reflections["id"], p.get_experiment_ids()).result - cutoff
# Special version of _surplus_reflections for crystal unit cell
# parameterisations. In some cases certain parameters of a unit cell
# parameterisation may affect only some subset of the total number of
# reflections. For example, for an orthorhombic cell the g_param_0 parameter
# has no effect on predictions in the plane (0,k,l). Here, take the number
# of affected reflections for each parameter individually into account.
def _unit_cell_surplus_reflections(self, p):
F_dbdp = flex.mat3_double(p.get_ds_dp())
min_nref = self._options.min_nref_per_parameter
reflections = self.reflection_manager.get_obs()
# if no free parameters, do as _surplus_reflections
if len(F_dbdp) == 0:
exp_ids = p.get_experiment_ids()
isel = flex.size_t()
for exp_id in exp_ids:
isel.extend((reflections["id"] == exp_id).iselection())
return len(isel)
return (
uc_surpl(
reflections["id"],
reflections["miller_index"],
p.get_experiment_ids(),
F_dbdp,
).result
- min_nref
)
# Special version of _surplus_reflections for hierarchical multi-panel detector
# parameterisations. In that case, certain parameters affect only the
# reflections that fall on a particular panel group of the detector.
def _panel_gp_surplus_reflections(self, p, pnl_ids, group):
exp_ids = p.get_experiment_ids()
gp_params = [gp == group for gp in p.get_param_panel_groups()]
fixlist = p.get_fixed()
free_gp_params = [a and not b for a, b in zip(gp_params, fixlist)]
nparam = free_gp_params.count(True)
cutoff = self._options.min_nref_per_parameter * nparam
reflections = self.reflection_manager.get_obs()
surplus = pg_surpl(
reflections["id"], reflections["panel"], pnl_ids, exp_ids, cutoff
).result
return surplus
def _weak_parameterisation_search(self):
weak = None
nref_deficit = 0
panels = None
pnl_gp = None
name = None
for i, p in enumerate(self.beam_params):
net_nref = self._surplus_reflections(p)
if net_nref < nref_deficit:
nref_deficit = net_nref
weak = p
name = "Beam{}".format(i + 1)
for i, p in enumerate(self.xl_ori_params):
net_nref = self._surplus_reflections(p)
if net_nref < nref_deficit:
nref_deficit = net_nref
weak = p
name = "Crystal{} orientation".format(i + 1)
for i, p in enumerate(self.xl_uc_params):
net_nref = self._unit_cell_surplus_reflections(p)
if net_nref < nref_deficit:
nref_deficit = net_nref
weak = p
name = "Crystal{} unit cell".format(i + 1)
for i, p in enumerate(self.det_params):
try:
pnl_groups = p.get_panel_ids_by_group()
for igp, gp in enumerate(pnl_groups):
net_nref = self._panel_gp_surplus_reflections(p, gp, igp)
if net_nref < nref_deficit:
nref_deficit = net_nref
weak = p
panels = gp
pnl_gp = igp
name = "Detector{0}PanelGroup{1}".format(i + 1, pnl_gp + 1)
except AttributeError: # non-hierarchical detector parameterisation
net_nref = self._surplus_reflections(p)
if net_nref < nref_deficit:
nref_deficit = net_nref
weak = p
panels = None
pnl_gp = None
name = "Detector{}".format(i + 1)
for i, p in enumerate(self.gon_params):
net_nref = self._surplus_reflections(p)
if net_nref < nref_deficit:
nref_deficit = net_nref
weak = p
name = "Goniometer{}".format(i + 1)
return {
"parameterisation": weak,
"panels": panels,
"panel_group_id": pnl_gp,
"name": name,
}
def detector_reduce(self):
"""Reduce detector parameters.
Special case intended for metrology refinement of multi-panel detectors."""
reduce_list = self._options.detector_reduce_list
for i, dp in enumerate(self.det_params):
to_fix = flex.bool(dp.get_fixed())
try: # test for hierarchical detector parameterisation
pnl_groups = dp.get_panel_ids_by_group()
for igp, gp in enumerate(pnl_groups):
surplus = self._panel_gp_surplus_reflections(dp, gp, igp)
if surplus < 0:
msg = (
"Require {0} more reflections to parameterise Detector{1} "
"panel group {2}"
)
logger.warning(
msg.format(-1 * surplus, i + 1, igp + 1)
+ "\nAttempting reduction of non-essential parameters"
)
names = cls._filter_parameter_names(dp)
prefix = "Group{}".format(igp + 1)
reduce_this_group = [prefix + e for e in reduce_list]
to_fix |= flex.bool(string_sel(reduce_this_group, names))
# try again, and fail if still unsuccessful
surplus = self._panel_gp_surplus_reflections(dp, gp, igp)
if surplus < 0:
msg = msg.format(-1 * surplus, i + 1, igp + 1)
raise DialsRefineConfigError(msg + "\nFailing.")
except AttributeError:
if self._surplus_reflections(dp) < 0:
mdl = "Detector{}".format(i + 1)
msg = self._failmsg.format(mdl)
raise DialsRefineConfigError(msg)
dp.set_fixed(to_fix)
def check_and_fail(self):
"""Check for too few reflections to support the model parameterisation.
Test each parameterisation of each type against the reflections it affects.
Returns:
None
Raises:
DialsRefineConfigError: If there are too few reflections to support
a parameterisation.
"""
for i, bp in enumerate(self.beam_params):
if self._surplus_reflections(bp) < 0:
mdl = "Beam{}".format(i + 1)
msg = self._failmsg.format(mdl)
raise DialsRefineConfigError(msg)
for i, xlo in enumerate(self.xl_ori_params):
if self._surplus_reflections(xlo) < 0:
mdl = "Crystal{} orientation".format(i + 1)
msg = self._failmsg.format(mdl)
raise DialsRefineConfigError(msg)
for i, xluc in enumerate(self.xl_uc_params):
if self._unit_cell_surplus_reflections(xluc) < 0:
mdl = "Crystal{} unit cell".format(i + 1)
msg = self._failmsg.format(mdl)
raise DialsRefineConfigError(msg)
for i, dp in enumerate(self.det_params):
try: # test for hierarchical detector parameterisation
pnl_groups = dp.get_panel_ids_by_group()
for igp, gp in enumerate(pnl_groups):
if self._panel_gp_surplus_reflections(dp, gp, igp) < 0:
msg = "Too few reflections to parameterise Detector{0} panel group {1}"
msg = msg.format(i + 1, igp + 1)
msg += "\nTry modifying refinement.parameterisation.auto_reduction options"
raise DialsRefineConfigError(msg)
except AttributeError:
if self._surplus_reflections(dp) < 0:
mdl = "Detector{}".format(i + 1)
msg = self._failmsg.format(mdl)
raise DialsRefineConfigError(msg)
for i, gonp in enumerate(self.gon_params):
if self._surplus_reflections(gonp) < 0:
mdl = "Goniometer{}".format(i + 1)
msg = self._failmsg.format(mdl)
raise DialsRefineConfigError(msg)
def check_and_fix(self):
"""Fix parameters when there are too few reflections.
Test each parameterisation of each type against the reflections it affects.
If there are too few reflections to support that parameterisation, fix the
parameters.
Returns:
None
"""
warnmsg = "Too few reflections to parameterise {0}"
tmp = []
for i, bp in enumerate(self.beam_params):
if self._surplus_reflections(bp) >= 0:
tmp.append(bp)
else:
mdl = "Beam{}".format(i + 1)
msg = warnmsg.format(mdl)
logger.warning(msg)
self.beam_params = tmp
tmp = []
for i, xlo in enumerate(self.xl_ori_params):
if self._surplus_reflections(xlo) >= 0:
tmp.append(xlo)
else:
mdl = "Crystal{} orientation".format(i + 1)
msg = warnmsg.format(mdl)
logger.warning(msg)
self.xl_ori_params = tmp
tmp = []
for i, xluc in enumerate(self.xl_uc_params):
if self._unit_cell_surplus_reflections(xluc) >= 0:
tmp.append(xluc)
else:
mdl = "Crystal{} unit cell".format(i + 1)
msg = warnmsg.format(mdl)
logger.warning(msg)
self.xl_uc_params = tmp
tmp = []
for i, dp in enumerate(self.det_params):
fixlist = dp.get_fixed()
try: # test for hierarchical detector parameterisation
pnl_groups = dp.get_panel_ids_by_group()
for igp, gp in enumerate(pnl_groups):
if self._panel_gp_surplus_reflections(dp, gp, igp) < 0:
msg = "Too few reflections to parameterise Detector{0}PanelGroup{1}"
msg = msg.format(i + 1, igp + 1)
logger.warning(msg)
gp_params = [gp == igp for gp in dp.get_param_panel_groups()]
for j, val in enumerate(gp_params):
if val:
fixlist[j] = True
dp.set_fixed(fixlist)
if dp.num_free() > 0:
tmp.append(dp)
else:
msg = "No parameters remain free for Detector{}".format(i + 1)
logger.warning(msg)
except AttributeError:
if self._surplus_reflections(dp) >= 0:
tmp.append(dp)
else:
mdl = "Detector{}".format(i + 1)
msg = warnmsg.format(mdl)
logger.warning(msg)
self.det_params = tmp
tmp = []
for i, gonp in enumerate(self.gon_params):
if self._surplus_reflections(gonp) >= 0:
tmp.append(gonp)
else:
mdl = "Goniometer{}".format(i + 1)
msg = warnmsg.format(mdl)
logger.warning(msg)
self.gon_params = tmp
def check_and_remove(self):
"""Fix parameters and remove reflections when there are too few reflections.
Test each parameterisation of each type against the reflections it affects.
If there are too few reflections to support that parameterisation, fix the
parameters and remove those reflections so that they will not be included
in refinement.
Returns:
None
Raises:
DialsRefineConfigError: error if only one single panel detector is present.
"""
# If there is only one detector in a single experiment, the detector should
# be multi-panel for remove to make sense
if len(self.det_params) == 1:
n_exp = len(self.det_params[0].get_experiment_ids())
if n_exp == 1 and not self.det_params[0].is_multi_state():
raise DialsRefineConfigError(
"For single experiment, single panel refinement "
"auto_reduction.action=remove cannot be used as it could only "
"remove all reflections from refinement"
)
# Define a warning message template to use each search iteration
warnmsg = "Too few reflections to parameterise {0}"
warnmsg += (
"\nAssociated reflections will be removed from the Reflection Manager"
)
while True:
# Identify a poorly-supported parameterisation
dat = self._weak_parameterisation_search()
if dat["parameterisation"] is None:
break
exp_ids = dat["parameterisation"].get_experiment_ids()
msg = warnmsg.format(dat["name"])
# Fix relevant parameters and identify observations to remove
obs = self.reflection_manager.get_obs()
isel = flex.size_t()
if dat["panels"] is not None:
fixlist = dat["parameterisation"].get_fixed()
pnl_gps = dat["parameterisation"].get_param_panel_groups()
for i, gp in enumerate(pnl_gps):
if gp == dat["panel_group_id"]:
fixlist[i] = True
dat["parameterisation"].set_fixed(fixlist)
# identify observations on this panel group from associated experiments
for exp_id in exp_ids:
subsel = (obs["id"] == exp_id).iselection()
panels_this_exp = obs["panel"].select(subsel)
for pnl in dat["panels"]:
isel.extend(subsel.select(panels_this_exp == pnl))
else:
fixlist = [True] * dat["parameterisation"].num_total()
dat["parameterisation"].set_fixed(fixlist)
# identify observations from the associated experiments
for exp_id in exp_ids:
isel.extend((obs["id"] == exp_id).iselection())
# Now remove the selected reflections
sel = flex.bool(len(obs), True)
sel.set_selected(isel, False)
self.reflection_manager.filter_obs(sel)
logger.warning(msg)
# Strip out parameterisations with zero free parameters
self.beam_params = [p for p in self.beam_params if p.num_free() > 0]
self.xl_ori_params = [p for p in self.xl_ori_params if p.num_free() > 0]
self.xl_uc_params = [p for p in self.xl_uc_params if p.num_free() > 0]
self.det_params = [p for p in self.det_params if p.num_free() > 0]
self.gon_params = [p for p in self.gon_params if p.num_free() > 0]
def __call__(self):
"""Perform checks and parameter reduction according to the selected option.
Returns:
None
"""
# In the scan-varying case we can't calculate dB_dp before composing the
# model, so revert to the original function
if self._scan_varying:
self._unit_cell_surplus_reflections = self._surplus_reflections
# As a special case for detector metrology, try reducing the number of
# detector parameters if there are too few for some panel group. If this is
# unsuccessful, fail outright.
if self._options.detector_reduce:
self.detector_reduce()
if self._options.action == "fail":
self.check_and_fail()
elif self._options.action == "fix":
self.check_and_fix()
elif self._options.action == "remove":
self.check_and_remove()
```
#### File: refinement/parameterisation/scan_varying_prediction_parameters.py
```python
from __future__ import absolute_import, division, print_function
import math
from collections import namedtuple
from scitbx import matrix
from dials.array_family import flex
from dials.algorithms.refinement.parameterisation.prediction_parameters import (
XYPhiPredictionParameterisation,
SparseGradientVectorMixin,
)
class StateDerivativeCache(object):
"""Keep derivatives of the model states in a memory-efficient format
by storing each derivative once alongside the indices of reflections affected
by that derivative"""
def __init__(self, parameterisations=None):
if parameterisations is None:
parameterisations = []
self._cache = dict.fromkeys(parameterisations)
self._Pair = namedtuple("Pair", ["derivative", "iselection"])
# set up lists with the right number of elements
self.clear()
self._nref = 0
def build_gradients(self, parameterisation, isel=None, imatch=None):
"""Return an object mimicking a list of flex arrays containing state
gradients wrt each parameter of the parameterisation. In fact this is a
generator so that iterating over the elements of the list will return
control here so that the gradient array for a single parameter can be
reconstructed on the fly"""
# Get the data from the cache
entry = self._cache[parameterisation]
# Figure out the right flex array type from entries in the cache
shape = None
for e in entry:
if e:
shape = e[0].derivative.n
break
if shape is None:
raise TypeError("No model state derivatives found")
if shape == (3, 1):
arr_type = flex.vec3_double
null = (0, 0, 0)
elif shape == (3, 3):
arr_type = flex.mat3_double
null = (0, 0, 0, 0, 0, 0, 0, 0, 0)
else:
raise TypeError("Unrecognised model state derivative type")
# Loop over the data for each parameter
for p_data in entry:
# Build an empty array of the same length as the original reflection
# list used when the cache was filled
ds_dp = arr_type(self._nref, null)
# Reconstitute full array from the cache
for pair in p_data:
ds_dp.set_selected(pair.iselection, pair.derivative)
# First select only elements relevant to the current gradient calculation
# block (i.e. if nproc > 1 or gradient_calculation_blocksize was set)
if imatch is not None:
ds_dp = ds_dp.select(imatch)
# Now select only those reflections from the full list that are affected
# by this parameterisation
if isel is not None:
ds_dp = ds_dp.select(isel)
yield ds_dp
def clear(self):
"""Clear all cached values"""
for p in self._cache:
self._cache[p] = [[] for i in range(p.num_free())]
def append(self, parameterisation, iparam, derivative, iselection):
"""For a particular parameterisation and parameter number of the free
parameters of that parameterisation, append a state derivative and the
iselection of reflections it affects to the cache"""
l1 = self._cache[parameterisation]
l2 = l1[iparam]
l2.append(self._Pair(derivative, iselection))
@property
def nref(self):
"""Get the length of the reflection list to which indices in the iselections
refer"""
return self._nref
@nref.setter
def nref(self, value):
"""Set the length of the reflection list to which indices in the iselections
refer"""
self._nref = value
class ScanVaryingPredictionParameterisation(XYPhiPredictionParameterisation):
"""An extension of the rotation scans version of the
PredictionParameterisation class that supports model parameterisations that
vary smoothly with the observed image number"""
def __init__(
self,
experiments,
detector_parameterisations=None,
beam_parameterisations=None,
xl_orientation_parameterisations=None,
xl_unit_cell_parameterisations=None,
goniometer_parameterisations=None,
):
if detector_parameterisations is None:
detector_parameterisations = []
if beam_parameterisations is None:
beam_parameterisations = []
if xl_orientation_parameterisations is None:
xl_orientation_parameterisations = []
if xl_unit_cell_parameterisations is None:
xl_unit_cell_parameterisations = []
if goniometer_parameterisations is None:
goniometer_parameterisations = []
# determine once here which types of parameterisations are scan-varying
self._varying_detectors = any(
hasattr(p, "num_sets") for p in detector_parameterisations
)
self._varying_beams = any(
hasattr(p, "num_sets") for p in beam_parameterisations
)
self._varying_xl_orientations = any(
hasattr(p, "num_sets") for p in xl_orientation_parameterisations
)
self._varying_xl_unit_cells = any(
hasattr(p, "num_sets") for p in xl_unit_cell_parameterisations
)
self._varying_goniometers = any(
hasattr(p, "num_sets") for p in goniometer_parameterisations
)
to_cache = []
if self._varying_detectors:
to_cache.extend(detector_parameterisations)
if self._varying_beams:
to_cache.extend(beam_parameterisations)
if self._varying_xl_orientations:
to_cache.extend(xl_orientation_parameterisations)
if self._varying_xl_unit_cells:
to_cache.extend(xl_unit_cell_parameterisations)
if self._varying_goniometers:
to_cache.extend(goniometer_parameterisations)
self._derivative_cache = StateDerivativeCache(to_cache)
# set up base class
super(ScanVaryingPredictionParameterisation, self).__init__(
experiments,
detector_parameterisations=detector_parameterisations,
beam_parameterisations=beam_parameterisations,
xl_orientation_parameterisations=xl_orientation_parameterisations,
xl_unit_cell_parameterisations=xl_unit_cell_parameterisations,
goniometer_parameterisations=goniometer_parameterisations,
)
# Avoid calculation in calculate_model_state_uncertainties unless this
# is set to True
self.set_scan_varying_errors = False
def _get_xl_orientation_parameterisation(self, experiment_id):
"""Return the crystal orientation parameterisation for the requested
experiment number (or None if the crystal orientation in that experiment
is not parameterised)"""
param_set = self._exp_to_param[experiment_id]
xl_op = None
if param_set.xl_ori_param is not None:
xl_op = self._xl_orientation_parameterisations[param_set.xl_ori_param]
return xl_op
def _get_xl_unit_cell_parameterisation(self, experiment_id):
"""Return the crystal unit cell parameterisation for the requested
experiment number (or None if the crystal unit cell in that experiment
is not parameterised)"""
param_set = self._exp_to_param[experiment_id]
xl_ucp = None
if param_set.xl_uc_param is not None:
xl_ucp = self._xl_unit_cell_parameterisations[param_set.xl_uc_param]
return xl_ucp
def _get_beam_parameterisation(self, experiment_id):
"""Return the beam parameterisation for the requested experiment number
(or None if the beam in that experiment is not parameterised)"""
param_set = self._exp_to_param[experiment_id]
bp = None
if param_set.beam_param is not None:
bp = self._beam_parameterisations[param_set.beam_param]
return bp
def _get_detector_parameterisation(self, experiment_id):
"""Return the detector parameterisation for the requested experiment number
(or None if the detector in that experiment is not parameterised)"""
param_set = self._exp_to_param[experiment_id]
dp = None
if param_set.det_param is not None:
dp = self._detector_parameterisations[param_set.det_param]
return dp
def _get_goniometer_parameterisation(self, experiment_id):
"""Return the goniometer parameterisation for the requested experiment number
(or None if the goniometer in that experiment is not parameterised)"""
param_set = self._exp_to_param[experiment_id]
gp = None
if param_set.gonio_param is not None:
gp = self._goniometer_parameterisations[param_set.gonio_param]
return gp
def _get_state_from_parameterisation(
self, parameterisation, frame, multi_state_elt=None
):
"""Get the model state from the parameterisation at the specified frame,
taking care of whether it is a scan-varying parameterisation or not"""
if parameterisation is None:
return None
if (
hasattr(parameterisation, "num_sets")
and not self._current_frame.get(parameterisation) == frame
):
parameterisation.compose(frame)
self._current_frame[parameterisation] = frame
if multi_state_elt is None:
state = parameterisation.get_state()
else:
state = parameterisation.get_state(multi_state_elt=multi_state_elt)
return state
def _prepare_for_compose(self, reflections, skip_derivatives=False):
"""Add columns to the reflection table to hold the varying state matrices
or vectors for the experimental models, if required. Also prepare the cache
for the derivatives of states that are scan-varying"""
nref = len(reflections)
# set columns if needed
if "u_matrix" not in reflections:
reflections["u_matrix"] = flex.mat3_double(nref)
if "b_matrix" not in reflections:
reflections["b_matrix"] = flex.mat3_double(nref)
if "s0_vector" not in reflections:
reflections["s0_vector"] = flex.vec3_double(nref)
if "d_matrix" not in reflections:
reflections["d_matrix"] = flex.mat3_double(nref)
if "D_matrix" not in reflections:
reflections["D_matrix"] = flex.mat3_double(nref)
if "S_matrix" not in reflections:
reflections["S_matrix"] = flex.mat3_double(nref)
# Clear the state derivative cache and set the number of reflections needed
# to reconstruct the derivative arrays later
self._derivative_cache.clear()
self._derivative_cache.nref = nref
def compose(self, reflections, skip_derivatives=False):
"""Compose scan-varying crystal parameterisations at the specified image
number, for the specified experiment, for each image. Put the varying
matrices in the reflection table, and cache the derivatives."""
self._prepare_for_compose(reflections, skip_derivatives)
for iexp, exp in enumerate(self._experiments):
# select the reflections of interest
sel = reflections["id"] == iexp
isel = sel.iselection()
blocks = reflections["block"].select(isel)
# identify which parameterisations to use for this experiment
xl_op = self._get_xl_orientation_parameterisation(iexp)
xl_ucp = self._get_xl_unit_cell_parameterisation(iexp)
bp = self._get_beam_parameterisation(iexp)
dp = self._get_detector_parameterisation(iexp)
gp = self._get_goniometer_parameterisation(iexp)
# reset current frame cache for scan-varying parameterisations
self._current_frame = {}
# get state and derivatives for each block
for block in range(flex.min(blocks), flex.max(blocks) + 1):
# determine the subset of reflections this affects
subsel = isel.select(blocks == block)
if len(subsel) == 0:
continue
# get the panels hit by these reflections
panels = reflections["panel"].select(subsel)
# get the integer frame number nearest the centre of that block
frames = reflections["block_centre"].select(subsel)
# can only be false if original block assignment has gone wrong
assert frames.all_eq(
frames[0]
), "Failing: a block contains reflections that shouldn't be there"
frame = int(math.floor(frames[0]))
# model states at current frame
U = self._get_state_from_parameterisation(xl_op, frame)
if U is None:
U = matrix.sqr(exp.crystal.get_U())
B = self._get_state_from_parameterisation(xl_ucp, frame)
if B is None:
B = matrix.sqr(exp.crystal.get_B())
s0 = self._get_state_from_parameterisation(bp, frame)
if s0 is None:
s0 = matrix.col(exp.beam.get_s0())
S = self._get_state_from_parameterisation(gp, frame)
if S is None:
S = matrix.sqr(exp.goniometer.get_setting_rotation())
# set states for crystal, beam and goniometer
reflections["u_matrix"].set_selected(subsel, U.elems)
reflections["b_matrix"].set_selected(subsel, B.elems)
reflections["s0_vector"].set_selected(subsel, s0.elems)
reflections["S_matrix"].set_selected(subsel, S.elems)
# set states and derivatives for this detector
if dp is not None: # detector is parameterised
if dp.is_multi_state(): # parameterised detector is multi panel
# loop through the panels in this detector
for panel_id, _ in enumerate(exp.detector):
# get the right subset of array indices to set for this panel
subsel2 = subsel.select(panels == panel_id)
if len(subsel2) == 0:
# if no reflections intersect this panel, skip calculation
continue
dmat = self._get_state_from_parameterisation(
dp, frame, multi_state_elt=panel_id
)
if dmat is None:
dmat = exp.detector[panel_id].get_d_matrix()
Dmat = exp.detector[panel_id].get_D_matrix()
reflections["d_matrix"].set_selected(subsel2, dmat)
reflections["D_matrix"].set_selected(subsel2, Dmat)
if self._varying_detectors and not skip_derivatives:
for j, dd in enumerate(
dp.get_ds_dp(
multi_state_elt=panel_id, use_none_as_null=True
)
):
if dd is None:
continue
self._derivative_cache.append(dp, j, dd, subsel)
else: # parameterised detector is single panel
dmat = self._get_state_from_parameterisation(dp, frame)
if dmat is None:
dmat = exp.detector[0].get_d_matrix()
Dmat = exp.detector[0].get_D_matrix()
reflections["d_matrix"].set_selected(subsel, dmat)
reflections["D_matrix"].set_selected(subsel, Dmat)
if self._varying_detectors and not skip_derivatives:
for j, dd in enumerate(dp.get_ds_dp(use_none_as_null=True)):
if dd is None:
continue
self._derivative_cache.append(dp, j, dd, subsel)
else: # set states for unparameterised detector (dp is None)
# loop through the panels in this detector
for panel_id, _ in enumerate(exp.detector):
# get the right subset of array indices to set for this panel
subsel2 = subsel.select(panels == panel_id)
if len(subsel2) == 0:
# if no reflections intersect this panel, skip to the next
continue
dmat = exp.detector[panel_id].get_d_matrix()
Dmat = exp.detector[panel_id].get_D_matrix()
reflections["d_matrix"].set_selected(subsel2, dmat)
reflections["D_matrix"].set_selected(subsel2, Dmat)
# set derivatives of the states for crystal, beam and goniometer
if not skip_derivatives:
if xl_op is not None and self._varying_xl_orientations:
for j, dU in enumerate(xl_op.get_ds_dp(use_none_as_null=True)):
if dU is None:
continue
self._derivative_cache.append(xl_op, j, dU, subsel)
if xl_ucp is not None and self._varying_xl_unit_cells:
for j, dB in enumerate(xl_ucp.get_ds_dp(use_none_as_null=True)):
if dB is None:
continue
self._derivative_cache.append(xl_ucp, j, dB, subsel)
if bp is not None and self._varying_beams:
for j, ds0 in enumerate(bp.get_ds_dp(use_none_as_null=True)):
if ds0 is None:
continue
self._derivative_cache.append(bp, j, ds0, subsel)
if gp is not None and self._varying_goniometers:
for j, dS in enumerate(gp.get_ds_dp(use_none_as_null=True)):
if dS is None:
continue
self._derivative_cache.append(gp, j, dS, subsel)
# set the UB matrices for prediction
reflections["ub_matrix"] = reflections["u_matrix"] * reflections["b_matrix"]
# called by refiner.run for setting the crystal scan points
def get_varying_UB(self, obs_image_numbers, experiment_id):
"""Extract the setting matrix from the contained scan-dependent crystal
parameterisations at specified image number."""
if not (self._varying_xl_unit_cells or self._varying_xl_orientations):
return None
# identify which crystal parameterisations to use for this experiment
xl_op = self._get_xl_orientation_parameterisation(experiment_id)
xl_ucp = self._get_xl_unit_cell_parameterisation(experiment_id)
UB_list = []
for i in obs_image_numbers:
U = self._get_state_from_parameterisation(xl_op, i)
B = self._get_state_from_parameterisation(xl_ucp, i)
UB_list.append(U * B)
return UB_list
# called by refiner.run for setting the beam scan points
def get_varying_s0(self, obs_image_numbers, experiment_id):
"""Extract the s0 vector from the contained scan-dependent beam
parameterisation at specified image number."""
if not self._varying_beams:
return None
# identify which beam parameterisation to use for this experiment
bp = self._get_beam_parameterisation(experiment_id)
s0_list = []
for i in obs_image_numbers:
s0 = self._get_state_from_parameterisation(bp, i)
s0_list.append(s0)
return s0_list
# called by refiner.run for setting the goniometer scan points
def get_varying_setting_rotation(self, obs_image_numbers, experiment_id):
"""Extract the S matrix from the contained scan-dependent goniometer
parameterisation at specified image number."""
if not self._varying_goniometers:
return None
# identify which goniometer parameterisation to use for this experiment
gp = self._get_goniometer_parameterisation(experiment_id)
S_list = []
for i in obs_image_numbers:
S = self._get_state_from_parameterisation(gp, i)
S_list.append(S)
return S_list
# overloaded for the scan-varying case
def _get_model_data_for_experiment(self, experiment, reflections):
"""helper function to return model data s0, U, B, D and S for a particular
experiment. In this scan-varying overload this is trivial because these
values are already set as arrays in the reflection table"""
return {
"s0": reflections["s0_vector"],
"U": reflections["u_matrix"],
"B": reflections["b_matrix"],
"D": reflections["D_matrix"],
"S": reflections["S_matrix"],
}
def _beam_derivatives(self, isel, parameterisation, reflections):
"""Determine whether ds0_dp was precalculated then call the base class
method"""
if self._varying_beams:
if "imatch" in reflections:
imatch = reflections["imatch"]
else:
imatch = None
ds0_dxluc_p = self._derivative_cache.build_gradients(
parameterisation=parameterisation, isel=isel, imatch=imatch
)
else:
ds0_dxluc_p = None
return super(ScanVaryingPredictionParameterisation, self)._beam_derivatives(
isel, parameterisation, ds0_dxluc_p
)
def _xl_orientation_derivatives(self, isel, parameterisation, reflections):
"""Determine whether dU_dp was precalculated then call the base class
method"""
if self._varying_xl_orientations:
if "imatch" in reflections:
imatch = reflections["imatch"]
else:
imatch = None
dU_dxlo_p = self._derivative_cache.build_gradients(
parameterisation=parameterisation, isel=isel, imatch=imatch
)
else:
dU_dxlo_p = None
return super(
ScanVaryingPredictionParameterisation, self
)._xl_orientation_derivatives(isel, parameterisation, dU_dxlo_p)
def _xl_unit_cell_derivatives(self, isel, parameterisation, reflections):
"""Determine whether dB_dp was precalculated then call the base class
method"""
if self._varying_xl_unit_cells:
if "imatch" in reflections:
imatch = reflections["imatch"]
else:
imatch = None
dB_dxluc_p = self._derivative_cache.build_gradients(
parameterisation=parameterisation, isel=isel, imatch=imatch
)
else:
dB_dxluc_p = None
return super(
ScanVaryingPredictionParameterisation, self
)._xl_unit_cell_derivatives(isel, parameterisation, dB_dxluc_p)
def _detector_derivatives(self, isel, panel_id, parameterisation, reflections):
"""Determine whether dd_dp was precalculated then call the base class
method"""
if self._varying_detectors:
if "imatch" in reflections:
imatch = reflections["imatch"]
else:
imatch = None
dd_ddet_p = self._derivative_cache.build_gradients(
parameterisation=parameterisation, isel=isel, imatch=imatch
)
else:
dd_ddet_p = None
return super(ScanVaryingPredictionParameterisation, self)._detector_derivatives(
isel, panel_id, parameterisation, dd_ddet_p
)
def _goniometer_derivatives(self, isel, parameterisation, reflections):
"""Determine whether dS_dp was precalculated then call the base class
method"""
if self._varying_goniometers:
if "imatch" in reflections:
imatch = reflections["imatch"]
else:
imatch = None
dS_dgon_p = self._derivative_cache.build_gradients(
parameterisation=parameterisation, isel=isel, imatch=imatch
)
else:
dS_dgon_p = None
return super(
ScanVaryingPredictionParameterisation, self
)._goniometer_derivatives(isel, parameterisation, dS_dgon_p)
def calculate_model_state_uncertainties(
self, var_cov=None, obs_image_number=None, experiment_id=None
):
"""Take a variance-covariance matrix of all free parameters (probably
calculated by a minimisation engine). For each parameterisation in the
global model, extract the subset of this matrix for the associated block
of parameters. Pass this on to the relevant model parameterisation to
calculate its own uncertainty of state.
This scan-varying version should first be called with var_cov set but
obs_image_number=None and experiment_id=None. This calls the scan-static
version to do the calculation for the scan-static parameterisations and
also caches the subsets of var_cov relevant for the scan-varying
parameterisations. Subsequent calls should provide obs_image_number and
experiment_id to calculate for a particular crystal at a particular
scan-point"""
# First call, only a variance-covariance matrix is supplied
if var_cov is not None:
assert [obs_image_number, experiment_id].count(None) == 2
super(
ScanVaryingPredictionParameterisation, self
).calculate_model_state_uncertainties(var_cov)
return
# Later calls, only an experiment and image number are supplied for
# identify the crystal parameterisations for this experiment
xl_op = self._get_xl_orientation_parameterisation(experiment_id)
xl_ucp = self._get_xl_unit_cell_parameterisation(experiment_id)
result = {}
# compose at the requested image number and calculate using the cached
# varcov matrices. Take the first elt of the list because the crystal
# parameterisations are not multi-state
if xl_op is not None:
try:
xl_op.compose(obs_image_number)
result["U_cov"] = xl_op.calculate_state_uncertainties(var_cov=None)[0]
except TypeError:
pass
if xl_ucp is not None:
try:
xl_ucp.compose(obs_image_number)
result["B_cov"] = xl_ucp.calculate_state_uncertainties(var_cov=None)[0]
except TypeError:
pass
return result
def set_model_state_uncertainties(self, u_cov_list, b_cov_list, experiment_id=None):
"""Identify the crystal parameterisations and set the list of covariance
matrices, if available. They will only be available if the parameterisation
is a scan-varying type, otherwise they are None"""
xl_op = self._get_xl_orientation_parameterisation(experiment_id)
xl_ucp = self._get_xl_unit_cell_parameterisation(experiment_id)
if u_cov_list:
try:
xl_op.set_state_uncertainties(u_cov_list)
except AttributeError:
pass
if b_cov_list:
try:
xl_ucp.set_state_uncertainties(b_cov_list)
except AttributeError:
pass
class ScanVaryingPredictionParameterisationSparse(
SparseGradientVectorMixin, ScanVaryingPredictionParameterisation
):
pass
```
#### File: scaling/error_model/error_model.py
```python
from __future__ import absolute_import, division, print_function
from math import log, exp
from dials.array_family import flex
from scitbx import sparse
from libtbx.table_utils import simple_table
def get_error_model(error_model_type):
"""Return the correct error model class from a params option."""
if error_model_type == "basic":
return BasicErrorModel
else:
raise ValueError("Invalid choice of error model: %s" % error_model_type)
class BasicErrorModel(object):
"""
Object to manage calculation of deviations for an error model.
"""
min_reflections_required = 250
def __init__(self, Ih_table, n_bins=10, min_Ih=25.0, min_partiality=0.95):
self.Ih_table = Ih_table
self.n_bins = n_bins
self.binning_info = {
"initial_variances": [],
"bin_boundaries": [],
"bin_variances": [],
"refl_per_bin": [],
"n_reflections": None,
}
# First select on initial delta
self.filter_unsuitable_reflections(
cutoff=12.0, min_Ih=min_Ih, min_partiality=min_partiality
)
self.n_h = self.Ih_table.calc_nh()
self.sigmaprime = self.calc_sigmaprime([1.0, 0.0])
self.bin_variances = None
self._summation_matrix = self.create_summation_matrix()
self.weights = self.binning_info["refl_per_bin"] ** 0.5
self.refined_parameters = [1.0, 0.0]
self.delta_hl = self.calc_deltahl()
self.calculate_bin_variances()
self.binning_info["initial_variances"] = self.binning_info["bin_variances"]
def __str__(self):
a = abs(self.refined_parameters[0])
b = abs(self.refined_parameters[1])
ISa = "%.3f" % (1.0 / (b * a)) if (b * a) > 0 else "Unable to estimate"
return "\n".join(
(
"",
"Error model details:",
" Type: basic",
" Current parameters: a = %.5f, b = %.5f" % (a, b),
" Error model formula: "
+ u"\u03C3"
+ "'"
+ u"\xb2"
+ " = a"
+ u"\xb2"
+ "("
+ u"\u03C3\xb2"
" + (bI)" + u"\xb2" + ")",
" estimated I/sigma asymptotic limit: %s" % ISa,
"",
)
)
def minimisation_summary(self):
"""Generate a summary of the model minimisation for output."""
header = [
"Intensity range (<Ih>)",
"n_refl",
"Uncorrected variance",
"Corrected variance",
]
rows = []
bin_bounds = ["%.2f" % i for i in self.binning_info["bin_boundaries"]]
for i, (initial_var, bin_var, n_refl) in enumerate(
zip(
self.binning_info["initial_variances"],
self.binning_info["bin_variances"],
self.binning_info["refl_per_bin"],
)
):
rows.append(
[
bin_bounds[i] + " - " + bin_bounds[i + 1],
str(int(n_refl)),
str(round(initial_var, 3)),
str(round(bin_var, 3)),
]
)
st = simple_table(rows, header)
return "\n".join(
(
"Results of error model refinement. Uncorrected and corrected variances",
"of normalised intensity deviations for given intensity ranges. Variances",
"are expected to be ~1.0 for reliable errors (sigmas).",
st.format(),
"",
)
)
@property
def summation_matrix(self):
"""A sparse matrix to allow summation over intensity groups."""
return self._summation_matrix
@property
def bin_counts(self):
"""An array of the number of intensities assigned to each bin."""
return self.binning_info["refl_per_bin"]
def filter_unsuitable_reflections(self, cutoff, min_Ih, min_partiality):
"""Do a first pass to calculate delta_hl and filter out the largest
deviants, so that the error model is not misled by these and instead
operates on the central ~90% of the data. Also choose reflection groups
with n_h > 1, as these have deltas of zero by definition and will bias
the variance calculations. Also, only use groups where <Ih> > 25.0, as
the assumptions of normally distributed deltas will not hold for low
<Ih>."""
self.n_h = self.Ih_table.calc_nh()
self.sigmaprime = self.calc_sigmaprime([1.0, 0.0])
delta_hl = self.calc_deltahl()
# make sure the fit isn't misled by extreme values
sel = flex.abs(delta_hl) < cutoff
if "partiality" in self.Ih_table.Ih_table:
sel &= self.Ih_table.Ih_table["partiality"] > min_partiality
self.Ih_table = self.Ih_table.select(sel)
n = self.Ih_table.size
sum_I_over_var = (
self.Ih_table.intensities / self.Ih_table.variances
) * self.Ih_table.h_index_matrix
n_per_group = flex.double(n, 1) * self.Ih_table.h_index_matrix
avg_I_over_var = sum_I_over_var / n_per_group
sel = avg_I_over_var > 0.85
self.Ih_table = self.Ih_table.select_on_groups(sel)
self.n_h = self.Ih_table.calc_nh()
scaled_Ih = self.Ih_table.Ih_values * self.Ih_table.inverse_scale_factors
# need a scaled min_Ih, where can reasonably expect norm distribution
# (use min_Ih=25 by default, sigma ~ 5)
sel2 = scaled_Ih > min_Ih
# can't calculate a true deviation for groups of 1
sel3 = self.n_h > 1.0
sel4 = self.Ih_table.intensities > 0.001
# don't want to include weaker reflections where the background adds
# significantly to the variances, as these would no longer be normally
# distributed and skew the fit.
self.Ih_table = self.Ih_table.select(sel2 & sel3 & sel4)
n = self.Ih_table.size
if n < self.min_reflections_required:
raise ValueError(
"Insufficient reflections (%s) to perform error modelling." % n
)
self.n_h = self.Ih_table.calc_nh()
# now make sure any left also have n > 1
sel = self.n_h > 1.0
self.Ih_table = self.Ih_table.select(sel)
self.n_h = self.Ih_table.calc_nh()
def calc_sigmaprime(self, x):
"""Calculate the error from the model."""
sigmaprime = (
x[0]
* ((self.Ih_table.variances) + ((x[1] * self.Ih_table.intensities) ** 2))
** 0.5
) / self.Ih_table.inverse_scale_factors
return sigmaprime
def calc_deltahl(self):
"""Calculate the normalised deviations from the model."""
I_hl = self.Ih_table.intensities
g_hl = self.Ih_table.inverse_scale_factors
I_h = self.Ih_table.Ih_values
prefactor = ((self.n_h - flex.double(self.n_h.size(), 1.0)) / self.n_h) ** 0.5
delta_hl = prefactor * ((I_hl / g_hl) - I_h) / self.sigmaprime
return delta_hl
def update_for_minimisation(self, x):
""""Calculate the updated quantites."""
self.sigmaprime = self.calc_sigmaprime(x)
self.delta_hl = self.calc_deltahl()
self.bin_variances = self.calculate_bin_variances()
def create_summation_matrix(self):
""""Create a summation matrix to allow sums into intensity bins.
This routine attempts to bin into bins equally spaced in log(intensity),
to give a representative sample across all intensities. To avoid
undersampling, it is required that there are at least 100 reflections
per intensity bin unless there are very few reflections."""
n = self.Ih_table.size
if n < self.min_reflections_required:
raise ValueError(
"Insufficient reflections (%s) to perform error modelling." % n
)
self.binning_info["n_reflections"] = n
summation_matrix = sparse.matrix(n, self.n_bins)
Ih = self.Ih_table.Ih_values * self.Ih_table.inverse_scale_factors
size_order = flex.sort_permutation(Ih, reverse=True)
Imax = max(Ih)
Imin = max(1.0, min(Ih)) # avoid log issues
spacing = (log(Imax) - log(Imin)) / float(self.n_bins)
boundaries = [Imax] + [
exp(log(Imax) - (i * spacing)) for i in range(1, self.n_bins + 1)
]
boundaries[-1] = min(Ih) - 0.01
self.binning_info["bin_boundaries"] = boundaries
self.binning_info["refl_per_bin"] = flex.double()
n_cumul = 0
if Ih.size() > 100 * self.min_reflections_required:
self.min_reflections_required = int(Ih.size() / 100.0)
min_per_bin = min(self.min_reflections_required, int(n / (3.0 * self.n_bins)))
for i in range(len(boundaries) - 1):
maximum = boundaries[i]
minimum = boundaries[i + 1]
sel1 = Ih <= maximum
sel2 = Ih > minimum
sel = sel1 & sel2
isel = sel.iselection()
n_in_bin = isel.size()
if n_in_bin < min_per_bin: # need more in this bin
m = n_cumul + min_per_bin
if m < n: # still some refl left to use
idx = size_order[m]
intensity = Ih[idx]
boundaries[i + 1] = intensity
minimum = boundaries[i + 1]
sel = sel1 & (Ih > minimum)
isel = sel.iselection()
n_in_bin = isel.size()
self.binning_info["refl_per_bin"].append(n_in_bin)
for j in isel:
summation_matrix[j, i] = 1
n_cumul += n_in_bin
cols_to_del = []
for i, col in enumerate(summation_matrix.cols()):
if col.non_zeroes < min_per_bin - 5:
cols_to_del.append(i)
n_new_cols = summation_matrix.n_cols - len(cols_to_del)
if n_new_cols == self.n_bins:
return summation_matrix
new_sum_matrix = sparse.matrix(summation_matrix.n_rows, n_new_cols)
next_col = 0
refl_per_bin = flex.double()
for i, col in enumerate(summation_matrix.cols()):
if i not in cols_to_del:
new_sum_matrix[:, next_col] = col
next_col += 1
refl_per_bin.append(self.binning_info["refl_per_bin"][i])
self.binning_info["refl_per_bin"] = refl_per_bin
return new_sum_matrix
def calculate_bin_variances(self):
"""Calculate the variance of each bin."""
sum_deltasq = (self.delta_hl ** 2) * self.summation_matrix
sum_delta_sq = (self.delta_hl * self.summation_matrix) ** 2
bin_vars = (sum_deltasq / self.bin_counts) - (
sum_delta_sq / (self.bin_counts ** 2)
)
self.binning_info["bin_variances"] = bin_vars
return bin_vars
def update_variances(self, variances, intensities):
"""Use the error model parameter to calculate new values for the variances."""
new_variance = (self.refined_parameters[0] ** 2) * (
variances + ((self.refined_parameters[1] * intensities) ** 2)
)
return new_variance
def clear_Ih_table(self):
"""Delete the Ih_table, to free memory."""
del self.Ih_table
```
#### File: scaling/model/scaling_model_factory.py
```python
from __future__ import absolute_import, division, print_function
from collections import OrderedDict
from dials.array_family import flex
import dials.algorithms.scaling.model.model as Model
class KBSMFactory(object):
"""
Factory for creating a KB scaling model.
"""
@classmethod
def create(cls, params, _, __):
"""create the simple KB scaling model."""
configdict = OrderedDict({"corrections": []})
parameters_dict = {}
if params.parameterisation.decay_term:
configdict["corrections"].append("decay")
parameters_dict["decay"] = {
"parameters": flex.double([0.0]),
"parameter_esds": None,
}
if params.parameterisation.scale_term:
configdict["corrections"].append("scale")
parameters_dict["scale"] = {
"parameters": flex.double([1.0]),
"parameter_esds": None,
}
return Model.KBScalingModel(parameters_dict, configdict)
class PhysicalSMFactory(object):
"""
Factory for creating a physical scaling model.
"""
@classmethod
def create(cls, params, experiments, _):
"""Create the scaling model defined by the params."""
configdict = OrderedDict({"corrections": []})
parameters_dict = {}
osc_range = experiments.scan.get_oscillation_range()
one_osc_width = experiments.scan.get_oscillation()[1]
configdict.update({"valid_osc_range": osc_range})
if params.parameterisation.scale_term:
configdict["corrections"].append("scale")
n_scale_param, s_norm_fac, scale_rot_int = Model.initialise_smooth_input(
osc_range, one_osc_width, params.parameterisation.scale_interval
)
configdict.update(
{"s_norm_fac": s_norm_fac, "scale_rot_interval": scale_rot_int}
)
parameters_dict["scale"] = {
"parameters": flex.double(n_scale_param, 1.0),
"parameter_esds": None,
}
if params.parameterisation.decay_term:
configdict["corrections"].append("decay")
n_decay_param, d_norm_fac, decay_rot_int = Model.initialise_smooth_input(
osc_range, one_osc_width, params.parameterisation.decay_interval
)
configdict.update(
{"d_norm_fac": d_norm_fac, "decay_rot_interval": decay_rot_int}
)
parameters_dict["decay"] = {
"parameters": flex.double(n_decay_param, 0.0),
"parameter_esds": None,
}
if params.parameterisation.absorption_term:
configdict["corrections"].append("absorption")
lmax = params.parameterisation.lmax
n_abs_param = (2 * lmax) + (lmax ** 2) # arithmetic sum formula (a1=3, d=2)
configdict.update({"lmax": lmax})
surface_weight = params.parameterisation.surface_weight
configdict.update({"abs_surface_weight": surface_weight})
parameters_dict["absorption"] = {
"parameters": flex.double(n_abs_param, 0.0),
"parameter_esds": None,
}
return Model.PhysicalScalingModel(parameters_dict, configdict)
def calc_n_param_from_bins(value_min, value_max, n_bins):
"""Return the correct number of bins for initialising the gaussian
smoothers."""
assert n_bins > 0
assert isinstance(n_bins, int)
bin_width = (value_max - value_min) / n_bins
if n_bins == 1:
n_param = 2
elif n_bins == 2:
n_param = 3
else:
n_param = n_bins + 2
return n_param, bin_width
class ArraySMFactory(object):
"""
Factory for creating an array-based scaling model.
"""
@classmethod
def create(cls, params, experiments, reflections):
"""create an array-based scaling model."""
reflections = reflections.select(reflections["d"] > 0.0)
configdict = OrderedDict({"corrections": []})
# First initialise things common to more than one correction.
one_osc_width = experiments.scan.get_oscillation()[1]
osc_range = experiments.scan.get_oscillation_range()
configdict.update({"valid_osc_range": osc_range})
n_time_param, time_norm_fac, time_rot_int = Model.initialise_smooth_input(
osc_range, one_osc_width, params.parameterisation.decay_interval
)
(xvalues, yvalues, _) = reflections["xyzobs.px.value"].parts()
(xmax, xmin) = (flex.max(xvalues) + 0.001, flex.min(xvalues) - 0.001)
(ymax, ymin) = (flex.max(yvalues) + 0.001, flex.min(yvalues) - 0.001)
parameters_dict = {}
if params.parameterisation.decay_term:
configdict["corrections"].append("decay")
resmax = (1.0 / (flex.min(reflections["d"]) ** 2)) + 0.001
resmin = (1.0 / (flex.max(reflections["d"]) ** 2)) - 0.001
n_res_bins = params.parameterisation.n_resolution_bins
n_res_param, res_bin_width = calc_n_param_from_bins(
resmin, resmax, n_res_bins
)
configdict.update(
{
"n_res_param": n_res_param,
"n_time_param": n_time_param,
"resmin": resmin,
"res_bin_width": res_bin_width,
"time_norm_fac": time_norm_fac,
"time_rot_interval": time_rot_int,
}
)
parameters_dict["decay"] = {
"parameters": flex.double((n_time_param * n_res_param), 1.0),
"parameter_esds": None,
}
if params.parameterisation.absorption_term:
configdict["corrections"].append("absorption")
nxbins = nybins = params.parameterisation.n_absorption_bins
n_x_param, x_bin_width = calc_n_param_from_bins(xmin, xmax, nxbins)
n_y_param, y_bin_width = calc_n_param_from_bins(ymin, ymax, nybins)
configdict.update(
{
"n_x_param": n_x_param,
"n_y_param": n_y_param,
"xmin": xmin,
"ymin": ymin,
"x_bin_width": x_bin_width,
"y_bin_width": y_bin_width,
"n_time_param": n_time_param,
"time_norm_fac": time_norm_fac,
"time_rot_interval": time_rot_int,
}
)
parameters_dict["absorption"] = {
"parameters": flex.double((n_x_param * n_y_param * n_time_param), 1.0),
"parameter_esds": None,
}
if params.parameterisation.modulation_term:
configdict["corrections"].append("modulation")
nx_det_bins = ny_det_bins = params.parameterisation.n_modulation_bins
n_x_mod_param, x_det_bw = calc_n_param_from_bins(xmin, xmax, nx_det_bins)
n_y_mod_param, y_det_bw = calc_n_param_from_bins(ymin, ymax, ny_det_bins)
configdict.update(
{
"n_x_mod_param": n_x_mod_param,
"n_y_mod_param": n_y_mod_param,
"xmin": xmin,
"ymin": ymin,
"x_det_bin_width": x_det_bw,
"y_det_bin_width": y_det_bw,
}
)
parameters_dict["modulation"] = {
"parameters": flex.double((n_x_mod_param * n_y_mod_param), 1.0),
"parameter_esds": None,
}
return Model.ArrayScalingModel(parameters_dict, configdict)
```
#### File: algorithms/statistics/temp.py
```python
from __future__ import absolute_import, division, print_function
# from scipy.stats.distributions import ksone
# v1 = []
# v2 = []
# x = 0.5
# n = 100
# m = int(floor(n * (1 - x)))
# for j in range(0, m+1):
# a = x + float(j) / n
# if a > 0 and a < 1:
# b = (j - 1) / log(a)
# c = (n - j) / log(1 - a)
# elif a <= 0:
# b = 0
# c = 0
# elif a >= 1:
# b = -100
# c = 0
# v1.append(b)
# v2.append(c)
# from matplotlib import pylab
# pylab.plot(v1)
# pylab.plot(v2)
# pylab.show()
# exit(0)
# def smirnov2(n, e):
# from math import floor
# assert(n > 0 and e >= 0.0 and e <= 1.0)
# if e == 0.0:
# return 1.0
# nn = int(floor(n * (1.0 - e)))
# p = 0.0
# c = 1.0
# for v in range(0, nn+1):
# evn = e + float(v) / n
# aa = pow(evn, (v - 1))
# bb = pow(1.0 - evn, n - v)
# p += c * aa * bb
# print aa, bb, c, p
# c *= float(n - v) / (v + 1)
##print v, c
# return p * e
def compute_lz(z):
from math import sqrt, pi, exp
s = sum(
exp(-(2.0 * k - 1.0) ** 2 * pi ** 2 / (8.0 * z ** 2)) for k in range(1, 10000)
)
return s * sqrt(2.0 * pi) / z
from dials.algorithms.statistics import *
from math import sqrt
xx = []
scdf = []
kcdf = []
x = 0.01
# for n in range(1, 100):
# s = smirnov_cdf(n, x)
# k = compute_lz(x) / sqrt(n)
##k = kolmogorov_cdf(n, x)
# xx.append(n)
# scdf.append(s)
# kcdf.append(k)
n = 100
for i in range(1, 200):
x = float(i) / 100.0
s = 0
# s = smirnov_cdf(n, x)
k = 0
k = compute_lz(x) / sqrt(n)
xx.append(x)
scdf.append(s)
kcdf.append(k)
from matplotlib import pylab
p1 = pylab.plot(xx, scdf, color="r")
p2 = pylab.plot(xx, kcdf, color="b")
pylab.show()
```
#### File: symmetry/cosym/test_cosym_symmetry_analysis.py
```python
from __future__ import absolute_import, division, print_function
from scitbx.array_family import flex
from cctbx import crystal, sgtbx, uctbx
import pytest
def test_symmetry_analysis():
coords = flex.double(
[
[0.835, 0.158],
[0.772, 0.104],
[0.108, 0.907],
[0.058, 0.76],
[0.926, 0.189],
[0.221, 0.888],
[0.957, 0.137],
[0.958, 0.143],
[-0.015, 0.726],
[-0.066, 0.29],
[0.135, 0.848],
[0.085, 0.788],
[0.897, 0.126],
[0.749, 0.073],
[0.166, 0.943],
[0.871, 0.248],
[0.116, 0.968],
[0.116, 0.973],
[0.706, 0.007],
[0.288, -0.055],
[0.137, 0.848],
[0.089, 0.78],
[0.893, 0.122],
[0.749, 0.077],
[0.165, 0.941],
[0.877, 0.242],
[0.114, 0.968],
[0.12, 0.971],
[0.716, 0.002],
[0.292, -0.062],
[0.841, 0.162],
[0.774, 0.104],
[0.1, 0.909],
[0.054, 0.761],
[0.927, 0.184],
[0.227, 0.88],
[0.957, 0.137],
[0.961, 0.143],
[-0.007, 0.716],
[-0.061, 0.287],
[0.13, 0.848],
[0.084, 0.783],
[0.898, 0.124],
[0.749, 0.075],
[0.169, 0.94],
[0.871, 0.247],
[0.114, 0.969],
[0.12, 0.969],
[0.717, 0.0],
[0.296, -0.066],
[0.84, 0.154],
[0.776, 0.103],
[0.104, 0.908],
[0.057, 0.755],
[0.925, 0.19],
[0.227, 0.883],
[0.958, 0.136],
[0.962, 0.143],
[-0.017, 0.724],
[-0.067, 0.295],
]
)
sym_ops = [
sgtbx.rt_mx(s)
for s in ("-z,-y,-x", "y,z,x", "x,y,z", "-x,-z,-y", "z,x,y", "-y,-x,-z")
]
crystal_symmetry = crystal.symmetry(
unit_cell=uctbx.unit_cell((98.33, 98.33, 135.99, 90, 90, 120)),
space_group_info=sgtbx.space_group_info("R3:H"),
).minimum_cell()
from cctbx.sgtbx.lattice_symmetry import metric_subgroups
subgroups = metric_subgroups(
crystal_symmetry, max_delta=5, bravais_types_only=False
)
cb_op_inp_min = sgtbx.change_of_basis_op()
from dials.algorithms.symmetry.cosym import SymmetryAnalysis
analysis = SymmetryAnalysis(coords, sym_ops, subgroups, cb_op_inp_min)
assert analysis.best_solution.likelihood > 0.99
assert analysis.best_solution.confidence > 0.98
assert (
analysis.best_solution.subgroup["best_subsym"].space_group().type().number()
== 148
) # R -3 :H
assert (
str(analysis)
== """\
Scoring individual symmetry elements
----------------------------------------------
likelihood Z-CC CC Operator
----------------------------------------------
0.087 1.96 0.20 2 |(0, -1, 1)
0.087 1.96 0.20 2 |(-1, 0, 1)
0.949 10.00 1.00 *** 3^-1 |(1, 1, 1)
0.087 1.96 0.20 2 |(-1, 1, 0)
0.949 10.00 1.00 *** 3 |(1, 1, 1)
----------------------------------------------
Scoring all possible sub-groups
--------------------------------------------------------------------------------
Patterson group Likelihood NetZcc Zcc+ Zcc- delta Reindex operator
--------------------------------------------------------------------------------
R -3 :H *** 0.995 8.04 10.00 1.96 0.0 b-c,-a+c,a+b+c
P -1 0.003 -6.50 0.00 6.50 0.0 a,b,c
R -3 m :H 0.001 6.50 6.50 0.00 0.0 b-c,-a+c,a+b+c
C 1 2/m 1 0.000 -5.24 1.96 7.21 0.0 -a-b,a-b,c
C 1 2/m 1 0.000 -5.24 1.96 7.21 0.0 -b-c,b-c,a
C 1 2/m 1 0.000 -5.24 1.96 7.21 0.0 -a-c,-a+c,b
--------------------------------------------------------------------------------
Best solution: R -3 :H
Unit cell: (98.33, 98.33, 135.99, 90, 90, 120)
Reindex operator: b-c,-a+c,a+b+c
Laue group probability: 0.995
Laue group confidence: 0.994"""
)
d = analysis.as_dict()
assert d["sym_op_scores"][0] == {
"cc": pytest.approx(0.19620531091685714),
"operator": "-x,-z,-y",
"likelihood": pytest.approx(0.08665625555575088),
"stars": "",
"z_cc": pytest.approx(1.9620531091685713),
}
assert d["subgroup_scores"][0] == {
"confidence": pytest.approx(0.9940687431995551),
"z_cc_for": pytest.approx(9.999725360190128),
"stars": "***",
"patterson_group": "-R 3",
"max_angular_difference": 0.0,
"likelihood": pytest.approx(0.995493024305035),
"cb_op": "-1/3*x+2/3*y-1/3*z,-2/3*x+1/3*y+1/3*z,1/3*x+1/3*y+1/3*z",
"z_cc_against": pytest.approx(1.9620621986200772),
"unit_cell": pytest.approx(
(
98.32999999999998,
98.32999999999998,
135.99,
90.0,
90.0,
119.99999999999999,
)
),
"z_cc_net": pytest.approx(8.037663161570052),
}
```
#### File: dials/command_line/analyse_output.py
```python
from __future__ import absolute_import, division, print_function
import copy
import errno
import os
import math
import matplotlib
import libtbx.phil
from dials.array_family import flex
from dials.util import show_mail_on_error
# Offline backend
matplotlib.use("Agg")
from matplotlib import pyplot
RAD2DEG = 180 / math.pi
help_message = """
Generate a number of analysis plots from input integrated or indexed reflections.
Examples::
dials.analyse_output indexed.refl
dials.analyse_output refined.refl
dials.analyse_output integrated.refl
"""
# Create the phil parameters
phil_scope = libtbx.phil.parse(
"""
output {
directory = .
.type = str
.help = "The directory to store the results"
}
grid_size = Auto
.type = ints(size=2)
pixels_per_bin = 10
.type = int(value_min=1)
centroid_diff_max = None
.help = "Magnitude in pixels of shifts mapped to the extreme colours"
"in the heatmap plots centroid_diff_x and centroid_diff_y"
.type = float
.expert_level = 1
"""
)
def ensure_directory(path):
""" Make the directory if not already there. """
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def ensure_required(rlist, required):
""" Check which keys aren't present. """
not_present = []
for k in required:
if k not in rlist:
not_present.append(k)
if len(not_present) != 0:
print(" Skipping: following required fields not present:")
for k in not_present:
print(" %s" % k)
return False
return True
def determine_grid_size(rlist, grid_size=None):
from libtbx import Auto
panel_ids = rlist["panel"]
n_panels = flex.max(panel_ids) + 1
if grid_size is not None and grid_size is not Auto:
assert (grid_size[0] * grid_size[1]) >= n_panels, n_panels
return grid_size
n_cols = int(math.floor(math.sqrt(n_panels)))
n_rows = int(math.ceil(n_panels / n_cols))
return n_cols, n_rows
class per_panel_plot(object):
title = None
filename = None
cbar_ylabel = None
xlabel = "x"
ylabel = "y"
def __init__(self, rlist, directory, grid_size=None, pixels_per_bin=10):
min_x, max_x, min_y, max_y = self.get_min_max_xy(rlist)
panel_ids = rlist["panel"]
crystal_ids = rlist["id"]
n_crystals = flex.max(crystal_ids) + 1
n_panels = flex.max(panel_ids) + 1
n_cols, n_rows = determine_grid_size(rlist, grid_size=grid_size)
for i_crystal in range(n_crystals):
crystal_sel = crystal_ids == i_crystal
fig, axes = pyplot.subplots(n_rows, n_cols, squeeze=False)
self.gridsize = tuple(
int(math.ceil(i))
for i in (max_x / pixels_per_bin, max_y / pixels_per_bin)
)
clim = (1e8, 1e-8)
plots = []
i_panel = 0
for i_row in range(n_rows):
for i_col in range(n_cols):
panel_sel = panel_ids == i_panel
sel = panel_sel & crystal_sel
i_panel += 1
if n_panels > 1:
axes[i_row][i_col].set_title("Panel %d" % i_panel)
axes[i_row][i_col].set_title("Panel %d" % i_panel)
if (i_row + 1) == n_rows:
axes[i_row][i_col].set_xlabel(self.xlabel)
else:
pyplot.setp(axes[i_row][i_col].get_xticklabels(), visible=False)
if i_col == 0:
axes[i_row][i_col].set_ylabel(self.ylabel)
else:
pyplot.setp(axes[i_row][i_col].get_yticklabels(), visible=False)
if sel.count(True) > 0:
rlist_sel = rlist.select(sel)
if len(rlist_sel) <= 1:
ax = pyplot.scatter([], []) # create empty plot
else:
ax = self.plot_one_panel(axes[i_row][i_col], rlist_sel)
clim = (
min(clim[0], ax.get_clim()[0]),
max(clim[1], ax.get_clim()[1]),
)
plots.append(ax)
axes[i_row][i_col].set_xlim(min_x, max_x)
axes[i_row][i_col].set_ylim(min_y, max_y)
axes[i_row][i_col].axes.set_aspect("equal")
axes[i_row][i_col].invert_yaxis()
for p in plots:
p.set_clim(clim)
default_size = fig.get_size_inches()
if self.cbar_ylabel is not None and (n_cols, n_rows) == (1, 24):
fig.set_size_inches(
(n_cols * default_size[0], 0.15 * n_rows * default_size[1])
)
elif self.cbar_ylabel is not None and (n_cols, n_rows) == (5, 24):
fig.set_size_inches(
(n_cols * default_size[0], 0.5 * n_rows * default_size[1])
)
else:
fig.set_size_inches(
(n_cols * default_size[0], n_rows * default_size[1])
)
# pyplot.tight_layout()
if self.cbar_ylabel is not None:
cax = fig.add_axes([0.9, 0.1, 0.03, 0.8])
for ax in plots:
try:
cbar = fig.colorbar(ax, cax=cax)
cbar.ax.set_ylabel(self.cbar_ylabel, fontsize=n_cols * 10)
cbar.ax.tick_params(labelsize=n_cols * 8)
except Exception:
continue
else:
break
if 1 and (n_cols, n_rows) == (1, 24):
fig.subplots_adjust(hspace=0.1 / (n_rows), right=0.8)
elif n_panels > 1:
fig.subplots_adjust(hspace=0.1 / n_rows, right=0.8)
if self.title is not None:
fig.suptitle(self.title, fontsize=n_cols * 12)
fig.savefig(os.path.join(directory, self.filename))
fig.set_size_inches(default_size)
pyplot.close()
def get_min_max_xy(self, rlist):
xc, yc, zc = rlist["xyzcal.px"].parts()
xo, yo, zo = rlist["xyzobs.px.value"].parts()
min_x = math.floor(min(flex.min(xc), flex.min(xo)))
min_y = math.floor(min(flex.min(yc), flex.min(yo)))
max_x = math.ceil(max(flex.max(xc), flex.max(xo)))
max_y = math.ceil(max(flex.max(yc), flex.max(yo)))
return min_x, max_x, min_y, max_y
def plot_one_panel(self, ax, rlist):
raise NotImplementedError()
class StrongSpotsAnalyser(object):
""" Analyse a list of strong spots. """
def __init__(self, directory):
""" Setup the directory. """
# Set the directory
self.directory = os.path.join(directory, "strong")
ensure_directory(self.directory)
# Set the required fields
self.required = ["xyzobs.px.value", "panel"]
def __call__(self, rlist):
""" Analyse the strong spots. """
from dials.util.command_line import Command
# Check we have the required fields
print("Analysing strong spots")
if not ensure_required(rlist, self.required):
return
# Remove I_sigma <= 0
selection = rlist["intensity.sum.variance"] <= 0
if selection.count(True) > 0:
rlist.del_selected(selection)
print(" Removing %d reflections with variance <= 0" % selection.count(True))
if "flags" in rlist:
# Select only strong reflections
Command.start(" Selecting only strong reflections")
mask = rlist.get_flags(rlist.flags.strong)
if mask.count(True) > 0:
rlist = rlist.select(mask)
Command.end(" Selected %d strong reflections" % len(rlist))
# Look at distribution of spot counts
self.spot_count_per_image(rlist)
self.spot_count_per_panel(rlist)
def spot_count_per_image(self, rlist):
""" Analyse the spot count per image. """
x, y, z = rlist["xyzobs.px.value"].parts()
max_z = int(math.ceil(flex.max(z)))
ids = rlist["id"]
spot_count_per_image = []
for j in range(flex.max(ids) + 1):
spot_count_per_image.append(flex.int())
zsel = z.select(ids == j)
for i in range(max_z):
sel = (zsel >= i) & (zsel < (i + 1))
spot_count_per_image[j].append(sel.count(True))
colours = ["blue", "red", "green", "orange", "purple", "black"] * 10
assert len(spot_count_per_image) <= colours
fig = pyplot.figure()
ax = fig.add_subplot(111)
ax.set_title("Spot count per image")
for j in range(len(spot_count_per_image)):
ax.scatter(
list(range(len(spot_count_per_image[j]))),
spot_count_per_image[j],
s=5,
color=colours[j],
marker="o",
alpha=0.4,
)
ax.set_xlabel("Image #")
ax.set_ylabel("# spots")
pyplot.savefig(os.path.join(self.directory, "spots_per_image.png"))
pyplot.close()
def spot_count_per_panel(self, rlist):
""" Analyse the spot count per panel. """
panel = rlist["panel"]
if flex.max(panel) == 0:
# only one panel, don't bother generating a plot
return
n_panels = int(flex.max(panel))
spot_count_per_panel = flex.int()
for i in range(n_panels):
sel = (panel >= i) & (panel < (i + 1))
spot_count_per_panel.append(sel.count(True))
fig = pyplot.figure()
ax = fig.add_subplot(111)
ax.set_title("Spot count per panel")
ax.scatter(
list(range(len(spot_count_per_panel))),
spot_count_per_panel,
s=10,
color="blue",
marker="o",
alpha=0.4,
)
ax.set_xlabel("Panel #")
ax.set_ylabel("# spots")
pyplot.savefig(os.path.join(self.directory, "spots_per_panel.png"))
pyplot.close()
class CentroidAnalyser(object):
""" Analyse the reflection centroids. """
def __init__(
self, directory, grid_size=None, pixels_per_bin=10, centroid_diff_max=1.5
):
""" Setup the directory. """
# Set the directory
self.directory = os.path.join(directory, "centroid")
ensure_directory(self.directory)
self.grid_size = grid_size
self.pixels_per_bin = pixels_per_bin
self.centroid_diff_max = centroid_diff_max
# Set the required fields
self.required = [
"intensity.sum.value",
"intensity.sum.variance",
"xyzcal.px",
"xyzobs.px.value",
"xyzcal.mm",
"xyzobs.mm.value",
]
def __call__(self, rlist):
""" Analyse the reflection centroids. """
from dials.util.command_line import Command
# Check we have the required fields
print("Analysing reflection centroids")
if not ensure_required(rlist, self.required):
return
# Remove I_sigma <= 0
selection = rlist["intensity.sum.variance"] <= 0
if selection.count(True) > 0:
rlist.del_selected(selection)
print(" Removing %d reflections with variance <= 0" % selection.count(True))
# Remove partial reflections as their observed centroids won't be accurate
if "partiality" in rlist:
selection = rlist["partiality"] < 0.99
if selection.count(True) > 0 and selection.count(True) < selection.size():
rlist.del_selected(selection)
print(" Removing %d partial reflections" % selection.count(True))
# Select only integrated reflections
Command.start(" Selecting only summation-integated reflections")
mask = rlist.get_flags(rlist.flags.integrated_sum)
if mask.count(True) > 0:
threshold = 10
rlist = rlist.select(mask)
Command.end(" Selected %d summation-integrated reflections" % len(rlist))
else:
# Select only those reflections used in refinement
threshold = 0
mask = rlist.get_flags(rlist.flags.used_in_refinement)
rlist = rlist.select(mask)
Command.end(" Selected %d refined reflections" % len(rlist))
# Look at differences in calculated/observed position
print(" Analysing centroid differences with I/Sigma > %s" % threshold)
self.centroid_diff_hist(rlist, threshold)
print(" Analysing centroid differences in x/y with I/Sigma > %s" % threshold)
self.centroid_diff_xy(rlist, threshold)
self.centroid_xy_xz_zy_residuals(rlist, threshold)
print(" Analysing centroid differences in z with I/Sigma > %s" % threshold)
self.centroid_diff_z(rlist, threshold)
print(" Analysing centroid differences vs phi with I/Sigma > %s" % threshold)
self.centroid_mean_diff_vs_phi(rlist, threshold)
def centroid_diff_hist(self, rlist, threshold):
""" Analyse the correlations. """
I = rlist["intensity.sum.value"]
I_sig = flex.sqrt(rlist["intensity.sum.variance"])
I_over_S = I / I_sig
mask = I_over_S > threshold
rlist = rlist.select(mask)
assert len(rlist) > 0
xc, yc, zc = rlist["xyzcal.px"].parts()
xo, yo, zo = rlist["xyzobs.px.value"].parts()
xd = xo - xc
yd = yo - yc
zd = zo - zc
diff = flex.sqrt(xd * xd + yd * yd + zd * zd)
fig = pyplot.figure()
pyplot.title("Difference between observed and calculated")
pyplot.hist(diff, bins=20)
pyplot.xlabel("Difference in position (pixels)")
pyplot.ylabel("# reflections")
fig.savefig(os.path.join(self.directory, "centroid_diff_hist.png"))
pyplot.close()
def centroid_diff_xy(self, rlist, threshold):
""" Look at the centroid difference in x, y """
I = rlist["intensity.sum.value"]
I_sig = flex.sqrt(rlist["intensity.sum.variance"])
I_over_S = I / I_sig
mask = I_over_S > threshold
rlist = rlist.select(mask)
assert len(rlist) > 0
class diff_x_plot(per_panel_plot):
def __init__(self, *args, **kwargs):
self.title = "Difference between observed and calculated in X"
self.filename = "centroid_diff_x.png"
self.cbar_ylabel = "Difference in x position (pixels)"
self.centroid_diff_max = kwargs.pop("centroid_diff_max", None)
super(diff_x_plot, self).__init__(*args, **kwargs)
def plot_one_panel(self, ax, rlist):
xc, yc, zc = rlist["xyzcal.px"].parts()
xo, yo, zo = rlist["xyzobs.px.value"].parts()
xd = xo - xc
if self.centroid_diff_max is None:
self.centroid_diff_max = max(abs(xd))
hex_ax = ax.hexbin(
xc.as_numpy_array(),
yc.as_numpy_array(),
C=xd.as_numpy_array(),
gridsize=self.gridsize,
vmin=-1.0 * self.centroid_diff_max,
vmax=self.centroid_diff_max,
)
return hex_ax
class diff_y_plot(per_panel_plot):
def __init__(self, *args, **kwargs):
self.title = "Difference between observed and calculated in Y"
self.filename = "centroid_diff_y.png"
self.cbar_ylabel = "Difference in y position (pixels)"
self.centroid_diff_max = kwargs.pop("centroid_diff_max", None)
super(diff_y_plot, self).__init__(*args, **kwargs)
def plot_one_panel(self, ax, rlist):
xc, yc, zc = rlist["xyzcal.px"].parts()
xo, yo, zo = rlist["xyzobs.px.value"].parts()
yd = yo - yc
if self.centroid_diff_max is None:
self.centroid_diff_max = max(abs(yd))
hex_ax = ax.hexbin(
xc.as_numpy_array(),
yc.as_numpy_array(),
C=yd.as_numpy_array(),
gridsize=self.gridsize,
vmin=-1.0 * self.centroid_diff_max,
vmax=self.centroid_diff_max,
)
return hex_ax
diff_x_plot(
rlist,
self.directory,
grid_size=self.grid_size,
pixels_per_bin=self.pixels_per_bin,
centroid_diff_max=self.centroid_diff_max,
)
diff_y_plot(
rlist,
self.directory,
grid_size=self.grid_size,
pixels_per_bin=self.pixels_per_bin,
centroid_diff_max=self.centroid_diff_max,
)
def centroid_diff_z(self, rlist, threshold):
""" Look at the centroid difference in x, y """
I = rlist["intensity.sum.value"]
I_sig = flex.sqrt(rlist["intensity.sum.variance"])
I_over_S = I / I_sig
mask = I_over_S > threshold
rlist = rlist.select(mask)
assert len(rlist) > 0
xc, yc, zc = rlist["xyzcal.px"].parts()
xo, yo, zo = rlist["xyzobs.px.value"].parts()
zd = zo - zc
fig = pyplot.figure()
pyplot.title("Difference between observed and calculated in Z")
cax = pyplot.hexbin(zc, zd, gridsize=100)
cax.axes.set_xlabel("z (images)")
cax.axes.set_ylabel("Difference in z position")
cbar = pyplot.colorbar(cax)
cbar.ax.set_ylabel("# Reflections")
fig.savefig(os.path.join(self.directory, "centroid_diff_z.png"))
pyplot.close()
def centroid_mean_diff_vs_phi(self, rlist, threshold):
I = rlist["intensity.sum.value"]
I_sig = flex.sqrt(rlist["intensity.sum.variance"])
I_over_S = I / I_sig
mask = I_over_S > threshold
rlist = rlist.select(mask)
assert len(rlist) > 0
xc, yc, zc = rlist["xyzcal.mm"].parts()
xo, yo, zo = rlist["xyzobs.mm.value"].parts()
dx = xc - xo
dy = yc - yo
dphi = (zc - zo) * RAD2DEG
mean_residuals_x = flex.double()
mean_residuals_y = flex.double()
mean_residuals_phi = flex.double()
rmsd_x = flex.double()
rmsd_y = flex.double()
rmsd_phi = flex.double()
phi_obs_deg = RAD2DEG * zo
phi = []
for i_phi in range(
int(math.floor(flex.min(phi_obs_deg))),
int(math.ceil(flex.max(phi_obs_deg))),
):
sel = (phi_obs_deg >= i_phi) & (phi_obs_deg < (i_phi + 1))
if sel.count(True) == 0:
continue
mean_residuals_x.append(flex.mean(dx.select(sel)))
mean_residuals_y.append(flex.mean(dy.select(sel)))
mean_residuals_phi.append(flex.mean(dphi.select(sel)))
rmsd_x.append(math.sqrt(flex.mean_sq(dx.select(sel))))
rmsd_y.append(math.sqrt(flex.mean_sq(dy.select(sel))))
rmsd_phi.append(math.sqrt(flex.mean_sq(dphi.select(sel))))
phi.append(i_phi)
fig = pyplot.figure()
ax = fig.add_subplot(311)
# fig.subplots_adjust(hspace=0.5)
pyplot.axhline(0, color="grey")
ax.scatter(phi, mean_residuals_x)
ax.set_xlabel("phi (deg)")
ax.set_ylabel(r"mean $\Delta$ x (mm)")
ax = fig.add_subplot(312)
pyplot.axhline(0, color="grey")
ax.scatter(phi, mean_residuals_y)
ax.set_xlabel("phi (deg)")
ax.set_ylabel(r"mean $\Delta$ y (mm)")
ax = fig.add_subplot(313)
pyplot.axhline(0, color="grey")
ax.scatter(phi, mean_residuals_phi)
ax.set_xlabel("phi (deg)")
ax.set_ylabel(r"mean $\Delta$ phi (deg)")
pyplot.savefig(os.path.join(self.directory, "centroid_mean_diff_vs_phi.png"))
pyplot.close()
fig = pyplot.figure()
ax = fig.add_subplot(311)
# fig.subplots_adjust(hspace=0.5)
pyplot.axhline(flex.mean(rmsd_x), color="grey")
ax.scatter(phi, rmsd_x)
ax.set_xlabel("phi (deg)")
ax.set_ylabel("rmsd x (mm)")
ax = fig.add_subplot(312)
pyplot.axhline(flex.mean(rmsd_y), color="grey")
ax.scatter(phi, rmsd_y)
ax.set_xlabel("phi (deg)")
ax.set_ylabel("rmsd y (mm)")
ax = fig.add_subplot(313)
pyplot.axhline(flex.mean(rmsd_phi), color="grey")
ax.scatter(phi, rmsd_phi)
ax.set_xlabel("phi (deg)")
ax.set_ylabel("rmsd phi (deg)")
pyplot.savefig(os.path.join(self.directory, "centroid_rmsd_vs_phi.png"))
pyplot.close()
def centroid_xy_xz_zy_residuals(self, rlist, threshold):
I = rlist["intensity.sum.value"]
I_sig = flex.sqrt(rlist["intensity.sum.variance"])
I_over_S = I / I_sig
mask = I_over_S > threshold
rlist = rlist.select(mask)
assert len(rlist) > 0
class residuals_xy_plot(per_panel_plot):
title = "Centroid residuals in X and Y"
filename = "centroid_xy_residuals.png"
cbar_ylabel = None
xlabel = "X (pixels)"
ylabel = "Y (pixels)"
def plot_one_panel(self, ax, rlist):
xc, yc, zc = rlist["xyzcal.px"].parts()
xo, yo, zo = rlist["xyzobs.px.value"].parts()
dx = xc - xo
dy = yc - yo
ax.axhline(0, color="grey")
ax.axvline(0, color="grey")
ax_xy = ax.scatter(
dx.as_numpy_array(), dy.as_numpy_array(), c="b", alpha=0.3
)
ax.set_aspect("equal")
return ax_xy
def get_min_max_xy(self, rlist):
xc, yc, zc = rlist["xyzcal.px"].parts()
xo, yo, zo = rlist["xyzobs.px.value"].parts()
dx = xc - xo
dy = yc - yo
min_x = math.floor(flex.min(dx))
min_y = math.floor(flex.min(dy))
max_x = math.ceil(flex.max(dx))
max_y = math.ceil(flex.max(dy))
return min_x, max_x, min_y, max_y
class residuals_zy_plot(per_panel_plot):
title = "Centroid residuals in Z and Y"
filename = "centroid_zy_residuals.png"
cbar_ylabel = None
xlabel = "Z (images)"
ylabel = "Y (pixels)"
def plot_one_panel(self, ax, rlist):
xc, yc, zc = rlist["xyzcal.px"].parts()
xo, yo, zo = rlist["xyzobs.px.value"].parts()
dy = yc - yo
dz = zc - zo
ax.axhline(0, color="grey")
ax.axvline(0, color="grey")
ax_zy = ax.scatter(
dz.as_numpy_array(), dy.as_numpy_array(), c="b", alpha=0.3
)
ax.set_aspect("equal")
return ax_zy
def get_min_max_xy(self, rlist):
_, yc, zc = rlist["xyzcal.px"].parts()
_, yo, zo = rlist["xyzobs.px.value"].parts()
dy = yc - yo
dz = zc - zo
min_x = math.floor(flex.min(dz))
min_y = math.floor(flex.min(dy))
max_x = math.ceil(flex.max(dz))
max_y = math.ceil(flex.max(dy))
return min_x, max_x, min_y, max_y
class residuals_xz_plot(per_panel_plot):
title = "Centroid residuals in X and Z"
filename = "centroid_xz_residuals.png"
cbar_ylabel = None
xlabel = "X (pixels)"
ylabel = "Z (images)"
def plot_one_panel(self, ax, rlist):
xc, yc, zc = rlist["xyzcal.px"].parts()
xo, yo, zo = rlist["xyzobs.px.value"].parts()
dx = xc - xo
dz = zc - zo
ax.axhline(0, color="grey")
ax.axvline(0, color="grey")
ax_xz = ax.scatter(
dx.as_numpy_array(), dz.as_numpy_array(), c="b", alpha=0.3
)
ax.set_aspect("equal")
return ax_xz
def get_min_max_xy(self, rlist):
xc, yc, zc = rlist["xyzcal.px"].parts()
xo, yo, zo = rlist["xyzobs.px.value"].parts()
dx = xc - xo
dz = zc - zo
min_x = math.floor(flex.min(dx))
min_y = math.floor(flex.min(dz))
max_x = math.ceil(flex.max(dx))
max_y = math.ceil(flex.max(dz))
return min_x, max_x, min_y, max_y
residuals_xy_plot(rlist, self.directory, grid_size=self.grid_size)
residuals_zy_plot(rlist, self.directory, grid_size=self.grid_size)
residuals_xz_plot(rlist, self.directory, grid_size=self.grid_size)
class BackgroundAnalyser(object):
""" Analyse the background. """
def __init__(self, directory, grid_size=None, pixels_per_bin=10):
""" Setup the directory. """
# Set the directory
self.directory = os.path.join(directory, "background")
ensure_directory(self.directory)
self.grid_size = grid_size
self.pixels_per_bin = pixels_per_bin
# Set the required fields
self.required = [
"background.mse",
"background.mean",
"intensity.sum.value",
"intensity.sum.variance",
"xyzcal.px",
]
def __call__(self, rlist):
""" Analyse the relfection background. """
from dials.util.command_line import Command
# Check we have the required fields
print("Analysing reflection backgrounds")
if not ensure_required(rlist, self.required):
return
selection = rlist["intensity.sum.variance"] <= 0
if selection.count(True) > 0:
rlist.del_selected(selection)
print(" Removing %d reflections with variance <= 0" % selection.count(True))
selection = rlist["background.mse"] < 0
if selection.count(True) > 0:
rlist.del_selected(selection)
print(
" Removing %d reflections with negative background model RMSD"
% selection.count(True)
)
selection = rlist["background.mean"] <= 0
if selection.count(True) > 0:
rlist.del_selected(selection)
print(
" Removing %d reflections with mean background <= 0"
% selection.count(True)
)
# Select only integrated reflections
Command.start(" Selecting only integated reflections")
mask = rlist.get_flags(rlist.flags.integrated)
if mask.count(True) == 0:
return
rlist = rlist.select(mask)
Command.end(" Selected %d integrated reflections" % len(rlist))
# Look at distribution of I/Sigma
print(" Analysing distribution of background mean")
self.mean_hist(rlist)
print(" Analysing distribution of background mean vs XY")
self.mean_vs_xy(rlist)
print(" Analysing distribution of background mean vs z")
self.mean_vs_z(rlist)
print(" Analysing distribution of background mean vs I/Sigma")
self.mean_vs_ios(rlist)
print(" Analysing distribution of background CVRMSD")
self.rmsd_hist(rlist)
print(" Analysing distribution of background CVRMSD vs XY")
self.rmsd_vs_xy(rlist)
print(" Analysing distribution of background CVRMSD vs z")
self.rmsd_vs_z(rlist)
print(" Analysing distribution of background CVRMSD vs I/Sigma")
self.rmsd_vs_ios(rlist)
def mean_hist(self, rlist):
""" Analyse the background RMSD. """
MEAN = rlist["background.mean"]
fig = pyplot.figure()
pyplot.title("Background Model mean histogram")
pyplot.hist(MEAN, bins=20)
pyplot.xlabel("mean")
pyplot.ylabel("# reflections")
fig.savefig(os.path.join(self.directory, "background_model_mean_hist"))
pyplot.close()
def mean_vs_xy(self, rlist):
""" Plot I/Sigma vs X/Y """
class mean_vs_xy_plot(per_panel_plot):
title = "Distribution of Background Model mean vs X/Y"
filename = "background_model_mean_vs_xy.png"
cbar_ylabel = "Background Model mean"
def plot_one_panel(self, ax, rlist):
MEAN = rlist["background.mean"]
x, y, z = rlist["xyzcal.px"].parts()
hex_ax = ax.hexbin(
x.as_numpy_array(),
y.as_numpy_array(),
C=MEAN.as_numpy_array(),
gridsize=self.gridsize,
vmin=0,
vmax=1,
)
return hex_ax
mean_vs_xy_plot(
rlist,
self.directory,
grid_size=self.grid_size,
pixels_per_bin=self.pixels_per_bin,
)
def mean_vs_z(self, rlist):
""" Plot I/Sigma vs Z. """
MEAN = rlist["background.mean"]
x, y, z = rlist["xyzcal.px"].parts()
fig = pyplot.figure()
pyplot.title("Distribution of Background Model mean vs Z")
cax = pyplot.hexbin(z, MEAN, gridsize=100)
cax.axes.set_xlabel("z (images)")
cax.axes.set_ylabel("Background Model mean")
cbar = pyplot.colorbar(cax)
cbar.ax.set_ylabel("# reflections")
fig.savefig(os.path.join(self.directory, "background_model_mean_vs_z.png"))
pyplot.close()
def mean_vs_ios(self, rlist):
""" Analyse the correlations. """
MEAN = rlist["background.mean"]
I = rlist["intensity.sum.value"]
I_sig = flex.sqrt(rlist["intensity.sum.variance"])
I_over_S = I / I_sig
mask = I_over_S > 0.1
I_over_S = I_over_S.select(mask)
MEAN = MEAN.select(mask)
fig = pyplot.figure()
pyplot.title("Background Model mean vs Log I/Sigma")
cax = pyplot.hexbin(flex.log(I_over_S), MEAN, gridsize=100)
cbar = pyplot.colorbar(cax)
cax.axes.set_xlabel("Log I/Sigma")
cax.axes.set_ylabel("Background Model mean")
cbar.ax.set_ylabel("# reflections")
fig.savefig(os.path.join(self.directory, "background_model_mean_vs_ios.png"))
pyplot.close()
def rmsd_hist(self, rlist):
""" Analyse the background RMSD. """
RMSD = flex.sqrt(rlist["background.mse"])
MEAN = rlist["background.mean"]
RMSD = RMSD / MEAN
fig = pyplot.figure()
pyplot.title("Background Model mean histogram")
pyplot.hist(RMSD, bins=20)
pyplot.xlabel("mean")
pyplot.ylabel("# reflections")
fig.savefig(os.path.join(self.directory, "background_model_cvrmsd_hist"))
pyplot.close()
def rmsd_vs_xy(self, rlist):
""" Plot I/Sigma vs X/Y """
class rmsd_vs_xy_plot(per_panel_plot):
title = "Distribution of Background Model CVRMSD vs X/Y"
filename = "background_model_cvrmsd_vs_xy.png"
cbar_ylabel = "Background Model CVRMSD"
def plot_one_panel(self, ax, rlist):
RMSD = flex.sqrt(rlist["background.mse"])
MEAN = rlist["background.mean"]
RMSD = RMSD / MEAN
x, y, z = rlist["xyzcal.px"].parts()
hex_ax = ax.hexbin(
x.as_numpy_array(),
y.as_numpy_array(),
C=RMSD.as_numpy_array(),
gridsize=self.gridsize,
vmin=0,
vmax=1,
)
return hex_ax
rmsd_vs_xy_plot(
rlist,
self.directory,
grid_size=self.grid_size,
pixels_per_bin=self.pixels_per_bin,
)
def rmsd_vs_z(self, rlist):
""" Plot I/Sigma vs Z. """
RMSD = flex.sqrt(rlist["background.mse"])
MEAN = rlist["background.mean"]
RMSD = RMSD / MEAN
x, y, z = rlist["xyzcal.px"].parts()
fig = pyplot.figure()
pyplot.title("Distribution of Background Model CVRMSD vs Z")
cax = pyplot.hexbin(z, RMSD, gridsize=100)
cax.axes.set_xlabel("z (images)")
cax.axes.set_ylabel("Background Model CVRMSD")
cbar = pyplot.colorbar(cax)
cbar.ax.set_ylabel("# reflections")
fig.savefig(os.path.join(self.directory, "background_model_cvrmsd_vs_z.png"))
pyplot.close()
def rmsd_vs_ios(self, rlist):
""" Analyse the correlations. """
RMSD = flex.sqrt(rlist["background.mse"])
MEAN = rlist["background.mean"]
RMSD = RMSD / MEAN
I = rlist["intensity.sum.value"]
I_sig = flex.sqrt(rlist["intensity.sum.variance"])
I_over_S = I / I_sig
mask = I_over_S > 0.1
I_over_S = I_over_S.select(mask)
RMSD = RMSD.select(mask)
fig = pyplot.figure()
pyplot.title("Background Model CVRMSD vs Log I/Sigma")
cax = pyplot.hexbin(flex.log(I_over_S), RMSD, gridsize=100)
cbar = pyplot.colorbar(cax)
cax.axes.set_xlabel("Log I/Sigma")
cax.axes.set_ylabel("Background Model CVRMSD")
cbar.ax.set_ylabel("# reflections")
fig.savefig(os.path.join(self.directory, "background_model_cvrmsd_vs_ios.png"))
pyplot.close()
class IntensityAnalyser(object):
""" Analyse the intensities. """
def __init__(self, directory, grid_size=None, pixels_per_bin=10):
""" Set up the directory. """
# Set the directory
self.directory = os.path.join(directory, "intensity")
ensure_directory(self.directory)
self.grid_size = grid_size
self.pixels_per_bin = pixels_per_bin
# Set the required fields
self.required = ["intensity.sum.value", "intensity.sum.variance", "xyzcal.px"]
def __call__(self, rlist):
""" Analyse the reflection centroids. """
from dials.util.command_line import Command
# FIXME Do the same and a comparison for intensity.prf
# Check we have the required fields
print("Analysing reflection intensities")
if not ensure_required(rlist, self.required):
return
selection = rlist["intensity.sum.variance"] <= 0
if selection.count(True) > 0:
rlist.del_selected(selection)
print(" Removing %d reflections with variance <= 0" % selection.count(True))
selection = rlist["intensity.sum.value"] <= 0
if selection.count(True) > 0:
rlist.del_selected(selection)
print(
" Removing %d reflections with intensity <= 0" % selection.count(True)
)
# Select only integrated reflections
Command.start(" Selecting only integated reflections")
mask = rlist.get_flags(rlist.flags.integrated)
if mask.count(True) == 0:
return
rlist = rlist.select(mask)
Command.end(" Selected %d integrated reflections" % len(rlist))
# Look at distribution of I/Sigma
print(" Analysing distribution of I/Sigma")
self.i_over_s_hist(rlist)
print(" Analysing distribution of I/Sigma vs xy")
self.i_over_s_vs_xy(rlist, "sum")
print(" Analysing distribution of I/Sigma vs xy")
self.i_over_s_vs_xy(rlist, "prf")
print(" Analysing distribution of I/Sigma vs z")
self.i_over_s_vs_z(rlist)
print(" Analysing number of background pixels used")
self.num_background_hist(rlist)
print(" Analysing number of foreground pixels used")
self.num_foreground_hist(rlist)
def i_over_s_hist(self, rlist):
""" Analyse the correlations. """
I = rlist["intensity.sum.value"]
I_sig = flex.sqrt(rlist["intensity.sum.variance"])
I_over_S = I / I_sig
fig = pyplot.figure()
pyplot.title("Log I/Sigma histogram")
pyplot.hist(flex.log(I_over_S), bins=20)
pyplot.xlabel("Log I/Sigma")
pyplot.ylabel("# reflections")
fig.savefig(os.path.join(self.directory, "ioversigma_hist"))
pyplot.close()
def i_over_s_vs_xy(self, rlist, intensity_type):
""" Plot I/Sigma vs X/Y """
class i_over_s_vs_xy_plot(per_panel_plot):
title = "Distribution of I/Sigma vs X/Y"
filename = "ioversigma_%s_vs_xy.png" % intensity_type
cbar_ylabel = "Log I/Sigma"
def plot_one_panel(self, ax, rlist):
I_sig = flex.sqrt(rlist["intensity.%s.variance" % intensity_type])
sel = I_sig > 0
rlist = rlist.select(sel)
I_sig = I_sig.select(sel)
I = rlist["intensity.%s.value" % intensity_type]
I_over_S = I / I_sig
x, y, z = rlist["xyzcal.px"].parts()
hex_ax = ax.hexbin(
x.as_numpy_array(),
y.as_numpy_array(),
C=flex.log(I_over_S),
gridsize=self.gridsize,
)
return hex_ax
i_over_s_vs_xy_plot(
rlist,
self.directory,
grid_size=self.grid_size,
pixels_per_bin=self.pixels_per_bin,
)
def i_over_s_vs_z(self, rlist):
""" Plot I/Sigma vs Z. """
I = rlist["intensity.sum.value"]
I_sig = flex.sqrt(rlist["intensity.sum.variance"])
I_over_S = I / I_sig
x, y, z = rlist["xyzcal.px"].parts()
fig = pyplot.figure()
pyplot.title("Distribution of I/Sigma vs Z")
cax = pyplot.hexbin(z, flex.log(I_over_S), gridsize=100)
cax.axes.set_xlabel("z (images)")
cax.axes.set_ylabel("Log I/Sigma")
cbar = pyplot.colorbar(cax)
cbar.ax.set_ylabel("# reflections")
fig.savefig(os.path.join(self.directory, "ioversigma_vs_z.png"))
pyplot.close()
def num_background_hist(self, rlist):
""" Analyse the number of background pixels. """
if "n_background" in rlist:
N = rlist["n_background"]
fig = pyplot.figure()
pyplot.title("Num Background Pixel Histogram")
pyplot.hist(N, bins=20)
pyplot.xlabel("Number of pixels")
pyplot.ylabel("# reflections")
fig.savefig(os.path.join(self.directory, "n_background_hist.png"))
pyplot.close()
def num_foreground_hist(self, rlist):
""" Analyse the number of foreground pixels. """
if "n_foreground" in rlist:
N = rlist["n_foreground"]
fig = pyplot.figure()
pyplot.title("Num Foreground Pixel Histogram")
pyplot.hist(N, bins=20)
pyplot.xlabel("Number of pixels")
pyplot.ylabel("# reflections")
fig.savefig(os.path.join(self.directory, "n_foreground_hist.png"))
pyplot.close()
class ReferenceProfileAnalyser(object):
""" Analyse the reference profiles. """
def __init__(self, directory, grid_size=None, pixels_per_bin=10):
""" Set up the directory. """
# Set the directory
self.directory = os.path.join(directory, "reference")
ensure_directory(self.directory)
self.grid_size = grid_size
self.pixels_per_bin = pixels_per_bin
# Set the required fields
self.required = [
"intensity.prf.value",
"intensity.prf.variance",
"xyzcal.px",
"profile.correlation",
]
def __call__(self, rlist):
""" Analyse the reference profiles. """
from dials.util.command_line import Command
# Check we have the required fields
print("Analysing reference profiles")
if not ensure_required(rlist, self.required):
return
# Select only integrated reflections
Command.start(" Selecting only integated reflections")
mask = rlist.get_flags(rlist.flags.integrated)
if mask.count(True) == 0:
return
rlist = rlist.select(mask)
Command.end(" Selected %d integrated reflections" % len(rlist))
# Analyse distribution of reference spots
print(" Analysing reference profile distribution vs x/y")
self.reference_xy(rlist)
print(" Analysing reference profile distribution vs z")
self.reference_z(rlist)
# Look at correlations between profiles
def ideal_correlations(filename, rlist):
""" Call for reference spots and all reflections. """
print(" Analysing reflection profile correlations")
self.ideal_reflection_corr_hist(rlist, filename)
print(" Analysing reflection profile correlations vs x/y")
self.ideal_reflection_corr_vs_xy(rlist, filename)
print(" Analysing reflection profile correlations vs z")
self.ideal_reflection_corr_vs_z(rlist, filename)
print(" Analysing reflection profile correlations vs I/Sigma")
self.ideal_reflection_corr_vs_ios(rlist, filename)
# Look at correlations between profiles
def correlations(filename, rlist):
""" Call for reference spots and all reflections. """
print(" Analysing reflection profile correlations")
self.reflection_corr_hist(rlist, filename)
print(" Analysing reflection profile correlations vs x/y")
self.reflection_corr_vs_xy(rlist, filename)
print(" Analysing reflection profile correlations vs z")
self.reflection_corr_vs_z(rlist, filename)
print(" Analysing reflection profile correlations vs I/Sigma")
self.reflection_corr_vs_ios(rlist, filename)
mask = rlist.get_flags(rlist.flags.reference_spot)
correlations("reference", rlist.select(mask))
correlations("reflection", rlist)
ideal_correlations("reference", rlist.select(mask))
ideal_correlations("reflection", rlist)
def reference_xy(self, rlist):
""" Analyse the distribution of reference profiles. """
mask = rlist.get_flags(rlist.flags.reference_spot)
rlist = rlist.select(mask)
class reference_xy_plot(per_panel_plot):
title = "Reference profiles binned in X/Y"
filename = "reference_xy.png"
cbar_ylabel = "# reflections"
def plot_one_panel(self, ax, rlist):
x, y, z = rlist["xyzcal.px"].parts()
hex_ax = ax.hexbin(
x.as_numpy_array(), y.as_numpy_array(), gridsize=self.gridsize
)
return hex_ax
reference_xy_plot(
rlist,
self.directory,
grid_size=self.grid_size,
pixels_per_bin=self.pixels_per_bin,
)
def reference_z(self, rlist):
""" Analyse the distribution of reference profiles. """
x, y, z = rlist["xyzcal.px"].parts()
fig = pyplot.figure()
pyplot.title("Reference profiles binned in Z")
pyplot.hist(z, bins=20)
pyplot.xlabel("z (images)")
pyplot.ylabel("# reflections")
fig.savefig(os.path.join(self.directory, "reference_z.png"))
pyplot.close()
def reflection_corr_hist(self, rlist, filename):
""" Analyse the correlations. """
corr = rlist["profile.correlation"]
fig = pyplot.figure()
pyplot.title("Reflection correlations histogram")
pyplot.hist(corr, bins=20)
pyplot.xlabel("Correlation with reference profile")
pyplot.ylabel("# reflections")
fig.savefig(os.path.join(self.directory, "%s_corr_hist" % filename))
pyplot.close()
def reflection_corr_vs_xy(self, rlist, filename):
""" Analyse the correlations. """
tmp_filename = filename
class corr_vs_xy_plot(per_panel_plot):
title = "Reflection correlations binned in X/Y"
filename = "%s_corr_vs_xy.png" % tmp_filename
cbar_ylabel = "Correlation with reference profile"
def plot_one_panel(self, ax, rlist):
corr = rlist["profile.correlation"]
x, y, z = rlist["xyzcal.px"].parts()
hex_ax = ax.hexbin(
x.as_numpy_array(),
y.as_numpy_array(),
C=corr.as_numpy_array(),
gridsize=self.gridsize,
vmin=0,
vmax=1,
)
return hex_ax
corr_vs_xy_plot(
rlist,
self.directory,
grid_size=self.grid_size,
pixels_per_bin=self.pixels_per_bin,
)
def reflection_corr_vs_z(self, rlist, filename):
""" Analyse the correlations. """
corr = rlist["profile.correlation"]
x, y, z = rlist["xyzcal.px"].parts()
fig = pyplot.figure()
pyplot.title("Reflection correlations vs Z")
cax = pyplot.hexbin(z, corr, gridsize=100)
cbar = pyplot.colorbar(cax)
cax.axes.set_xlabel("z (images)")
cax.axes.set_ylabel("Correlation with reference profile")
cbar.ax.set_ylabel("# reflections")
fig.savefig(os.path.join(self.directory, "%s_corr_vs_z.png" % filename))
pyplot.close()
def reflection_corr_vs_ios(self, rlist, filename):
""" Analyse the correlations. """
corr = rlist["profile.correlation"]
I = rlist["intensity.prf.value"]
I_sig = flex.sqrt(rlist["intensity.prf.variance"])
mask = I_sig > 0
I = I.select(mask)
I_sig = I_sig.select(mask)
corr = corr.select(mask)
I_over_S = I / I_sig
mask = I_over_S > 0.1
I_over_S = I_over_S.select(mask)
corr = corr.select(mask)
fig = pyplot.figure()
pyplot.title("Reflection correlations vs Log I/Sigma")
cax = pyplot.hexbin(flex.log(I_over_S), corr, gridsize=100)
cbar = pyplot.colorbar(cax)
cax.axes.set_xlabel("Log I/Sigma")
cax.axes.set_ylabel("Correlation with reference profile")
cbar.ax.set_ylabel("# reflections")
fig.savefig(os.path.join(self.directory, "%s_corr_vs_ios.png" % filename))
pyplot.close()
def ideal_reflection_corr_hist(self, rlist, filename):
""" Analyse the correlations. """
if "correlation.ideal.profile" in rlist:
corr = rlist["correlation.ideal.profile"]
pyplot.title("Reflection correlations histogram")
pyplot.hist(corr, bins=20)
pyplot.xlabel("Correlation with reference profile")
pyplot.ylabel("# reflections")
pyplot.savefig(
os.path.join(self.directory, "ideal_%s_corr_hist" % filename)
)
pyplot.close()
def ideal_reflection_corr_vs_xy(self, rlist, filename):
""" Analyse the correlations. """
if "correlation.ideal.profile" in rlist:
corr = rlist["correlation.ideal.profile"]
x, y, z = rlist["xyzcal.px"].parts()
pyplot.title("Reflection correlations binned in X/Y")
cax = pyplot.hexbin(x, y, C=corr, gridsize=100, vmin=0.0, vmax=1.0)
cbar = pyplot.colorbar(cax)
pyplot.xlabel("x (pixels)")
pyplot.ylabel("y (pixels)")
cbar.ax.set_ylabel("Correlation with reference profile")
pyplot.savefig(
os.path.join(self.directory, "ideal_%s_corr_vs_xy.png" % filename)
)
pyplot.close()
def ideal_reflection_corr_vs_z(self, rlist, filename):
""" Analyse the correlations. """
if "correlation.ideal.profile" in rlist:
corr = rlist["correlation.ideal.profile"]
x, y, z = rlist["xyzcal.px"].parts()
pyplot.title("Reflection correlations vs Z")
cax = pyplot.hexbin(z, corr, gridsize=100)
cbar = pyplot.colorbar(cax)
pyplot.xlabel("z (images)")
pyplot.ylabel("Correlation with reference profile")
cbar.ax.set_ylabel("# reflections")
pyplot.savefig(
os.path.join(self.directory, "ideal_%s_corr_vs_z.png" % filename)
)
pyplot.close()
def ideal_reflection_corr_vs_ios(self, rlist, filename):
""" Analyse the correlations. """
if "correlation.ideal.profile" in rlist:
corr = rlist["correlation.ideal.profile"]
I = rlist["intensity.prf.value"]
I_sig = flex.sqrt(rlist["intensity.prf.variance"])
mask = I_sig > 0
I = I.select(mask)
I_sig = I_sig.select(mask)
corr = corr.select(mask)
I_over_S = I / I_sig
mask = I_over_S > 0.1
I_over_S = I_over_S.select(mask)
corr = corr.select(mask)
pyplot.title("Reflection correlations vs Log I/Sigma")
cax = pyplot.hexbin(flex.log(I_over_S), corr, gridsize=100)
cbar = pyplot.colorbar(cax)
pyplot.xlabel("Log I/Sigma")
pyplot.ylabel("Correlation with reference profile")
cbar.ax.set_ylabel("# reflections")
pyplot.savefig(
os.path.join(self.directory, "ideal_%s_corr_vs_ios.png" % filename)
)
pyplot.close()
def analyse(rlist, directory, grid_size=None, pixels_per_bin=10, centroid_diff_max=1.5):
""" Setup the analysers. """
directory = os.path.join(directory, "analysis")
analysers = [
StrongSpotsAnalyser(directory),
CentroidAnalyser(
directory,
grid_size=grid_size,
pixels_per_bin=pixels_per_bin,
centroid_diff_max=centroid_diff_max,
),
BackgroundAnalyser(
directory, grid_size=grid_size, pixels_per_bin=pixels_per_bin
),
IntensityAnalyser(
directory, grid_size=grid_size, pixels_per_bin=pixels_per_bin
),
ReferenceProfileAnalyser(
directory, grid_size=grid_size, pixels_per_bin=pixels_per_bin
),
]
""" Do all the analysis. """
for a in analysers:
a(copy.deepcopy(rlist))
def run():
from dials.util.options import OptionParser
# Create the parser
usage = "usage: dials.analyse_output [options] observations.refl"
parser = OptionParser(
usage=usage, phil=phil_scope, read_reflections=True, epilog=help_message
)
parser.add_option(
"--xkcd",
action="store_true",
dest="xkcd",
default=False,
help="Special drawing mode",
)
# Parse the command line arguments
params, options = parser.parse_args(show_diff_phil=True)
if options.xkcd:
pyplot.xkcd()
# Show the help
if len(params.input.reflections) != 1:
parser.print_help()
exit(0)
# Analyse the reflections
analyse(
params.input.reflections[0].data,
params.output.directory,
grid_size=params.grid_size,
pixels_per_bin=params.pixels_per_bin,
centroid_diff_max=params.centroid_diff_max,
)
if __name__ == "__main__":
with show_mail_on_error():
run()
```
#### File: dials/command_line/compute_delta_cchalf.py
```python
from __future__ import absolute_import, division, print_function
import collections
import logging
from math import sqrt
import matplotlib
from iotbx.reflection_file_reader import any_reflection_file
from libtbx.phil import parse
import dials.util
from dials.algorithms.statistics.delta_cchalf import PerImageCChalfStatistics
from dials.array_family import flex
from dials.util import Sorry
from dials.util.exclude_images import exclude_image_ranges_for_scaling
from dials.util.multi_dataset_handling import select_datasets_on_ids
matplotlib.use("Agg")
from matplotlib import pylab
logger = logging.getLogger("dials.command_line.compute_delta_cchalf")
help_message = """
This program computes the delta cchalf excluding images
"""
# Set the phil scope
phil_scope = parse(
"""
input {
mtzfile = None
.type = str
.help = "We can also import an MTZ file"
}
mode = *dataset image_group
.type = choice
.help = "Perform analysis on whole datasets or batch groups"
group_size = 10
.type = int(value_min=1)
.help = "The number of images to group together when calculating delta"
"cchalf in image_group mode"
output {
experiments = "filtered.expt"
.type = str
.help = "The filtered experiments file"
reflections = "filtered.refl"
.type = str
.help = "The filtered reflections file"
table = "delta_cchalf.dat"
.type = str
.help = "A file with delta cchalf values"
}
nbins = 10
.type = int(value_min=1)
.help = "The number of resolution bins to use"
dmin = None
.type = float
.help = "The maximum resolution"
dmax = None
.type = float
.help = "The minimum resolution"
stdcutoff = 4.0
.type = float
.help = "Datasets with a delta cc half below (mean - stdcutoff*std) are removed"
output {
log = 'dials.compute_delta_cchalf.log'
.type = str
.help = "The log filename"
}
"""
)
class Script(object):
"""A class for running the script."""
def __init__(self, params, experiments, reflections):
"""Initialise the script."""
self.experiments = experiments
self.reflections = reflections
self.params = params
self.delta_cchalf_i = {}
self.results_summary = {
"dataset_removal": {
"mode": self.params.mode,
"stdcutoff": self.params.stdcutoff,
}
}
# Set up a named tuple
self.DataRecord = collections.namedtuple(
"DataRecord",
(
"unit_cell",
"space_group",
"miller_index",
"dataset",
"intensity",
"variance",
"identifiers",
"images",
),
)
def prepare_data(self):
"""Prepare the data into a DataRecord."""
if self.params.mode == "image_group":
for exp in self.experiments:
if not exp.scan:
raise Sorry("Cannot use mode=image_group with scanless experiments")
if len(self.experiments) > 0:
if len(self.experiments) > 0 and len(self.reflections) == 1:
data = self.read_experiments(self.experiments, self.reflections[0])
elif len(self.experiments) > 0 and len(self.experiments) == len(
self.reflections
):
# need to join together reflections
joint_table = flex.reflection_table()
for table in self.reflections:
joint_table.extend(table)
self.reflections = [joint_table]
data = self.read_experiments(self.experiments, self.reflections[0])
else:
raise Sorry("Unequal number of reflection tables and experiments")
elif self.params.input.mtzfile is not None:
data = self.read_mtzfile(self.params.input.mtzfile)
else:
raise SystemExit
return data
def run(self):
"""Run the delta_cc_half algorithm."""
data = self.prepare_data()
# Create the statistics object
statistics = PerImageCChalfStatistics(
data.miller_index,
data.identifiers,
data.dataset,
data.images,
data.intensity,
data.variance,
data.unit_cell,
data.space_group,
self.params.nbins,
self.params.dmin,
self.params.dmax,
self.params.mode,
self.params.group_size,
)
self.delta_cchalf_i = statistics.delta_cchalf_i()
self.results_summary["mean_cc_half"] = statistics._cchalf_mean
# Print out the datasets in order of delta cc 1/2
sorted_datasets, sorted_cc_half_values = self.sort_deltacchalf_values(
self.delta_cchalf_i, self.results_summary
)
# Write a text file with delta cchalf value
logger.info("Writing table to %s", self.params.output.table)
with open(self.params.output.table, "w") as outfile:
for dataset, cchalf in zip(sorted_datasets, sorted_cc_half_values):
outfile.write("%d %f\n" % (dataset, cchalf))
# Remove datasets based on delta cc1/2
if self.experiments and len(self.reflections) == 1:
cutoff_value = self._calculate_cutoff_value(
self.delta_cchalf_i, self.params.stdcutoff
)
self.results_summary["dataset_removal"].update(
{"cutoff_value": cutoff_value}
)
below_cutoff = sorted_cc_half_values < cutoff_value
ids_to_remove = sorted_datasets.select(below_cutoff)
if self.params.mode == "dataset":
filtered_reflections = self.remove_datasets_below_cutoff(
self.experiments,
self.reflections[0],
ids_to_remove,
self.results_summary,
)
elif self.params.mode == "image_group":
filtered_reflections = self.remove_image_ranges_below_cutoff(
self.experiments,
self.reflections[0],
ids_to_remove,
statistics.image_group_to_expid_and_range,
statistics.expid_to_image_groups,
self.results_summary,
)
self.reflections = [filtered_reflections]
def read_experiments(self, experiments, reflections):
"""
Get information from experiments and reflections
"""
# Get space group and unit cell
space_group = None
unit_cell = []
exp_identifiers = []
for e in experiments:
if space_group is None:
space_group = e.crystal.get_space_group()
else:
assert (
space_group.type().number()
== e.crystal.get_space_group().type().number()
)
unit_cell.append(e.crystal.get_unit_cell())
exp_identifiers.append(e.identifier)
# get a list of the ids from the reflection table corresponding to exp_ids
identifiers = []
for expit in exp_identifiers:
for k in reflections.experiment_identifiers().keys():
if reflections.experiment_identifiers()[k] == expit:
identifiers.append(k)
break
# Selection of reflections
selection = ~(
reflections.get_flags(reflections.flags.bad_for_scaling, all=False)
)
outliers = reflections.get_flags(reflections.flags.outlier_in_scaling)
reflections = reflections.select(selection & ~outliers)
# Scale factor
inv_scale_factor = reflections["inverse_scale_factor"]
selection = inv_scale_factor > 0
reflections = reflections.select(selection)
inv_scale_factor = reflections["inverse_scale_factor"]
# Get the reflection data
index = reflections["id"]
miller_index = reflections["miller_index"]
intensity = reflections["intensity.scale.value"] / inv_scale_factor
variance = reflections["intensity.scale.variance"] / inv_scale_factor ** 2
# calculate image number of observation (e.g 0.0 <= z < 1.0), image = 1
images = flex.floor(reflections["xyzobs.px.value"].parts()[2]).iround() + 1
# Get the MTZ file
return self.DataRecord(
unit_cell=unit_cell,
space_group=space_group,
miller_index=miller_index,
dataset=index,
intensity=intensity,
variance=variance,
identifiers=identifiers,
images=images,
)
def read_mtzfile(self, filename):
"""
Read the mtz file
"""
# Read the mtz file
reader = any_reflection_file(filename)
# Get the columns as miller arrays
miller_arrays = reader.as_miller_arrays(merge_equivalents=False)
# Select the desired columns
intensities = None
batches = None
for array in miller_arrays:
if array.info().labels == ["I", "SIGI"]:
intensities = array
if array.info().labels == ["BATCH"]:
batches = array
assert intensities is not None
assert batches is not None
assert len(batches.data()) == len(intensities.data())
# Get the unit cell and space group
unit_cell = intensities.unit_cell()
space_group = intensities.crystal_symmetry().space_group()
# The reflection data
miller_index = intensities.indices()
batch = batches.data()
intensity = intensities.data()
variance = intensities.sigmas() ** 2
# Create unit cell list
min_batch = min(batch)
dataset = batch - min_batch
num_datasets = max(dataset) + 1
unit_cell_list = [unit_cell for _ in range(num_datasets)]
# Get the MTZ file
return self.DataRecord(
unit_cell=unit_cell_list,
space_group=space_group,
miller_index=miller_index,
dataset=dataset,
intensity=intensity,
variance=variance,
)
@staticmethod
def sort_deltacchalf_values(delta_cchalf_i, results_summary):
"""Return the sorted datasets and cchalf values.
Also add the sorted lists to the results summary. Datasets are sorted
from low to high based on deltacchalf values."""
datasets = list(delta_cchalf_i.keys())
sorted_index = sorted(
range(len(datasets)), key=lambda x: delta_cchalf_i[datasets[x]]
)
# sorted by deltacchalf from low to high
sorted_cc_half_values = flex.double([])
sorted_datasets = flex.int([])
for i in sorted_index:
val = delta_cchalf_i[datasets[i]]
logger.info("Dataset: %d, Delta CC 1/2: %.3f", datasets[i], 100 * val)
sorted_cc_half_values.append(val)
sorted_datasets.append(datasets[i])
results_summary["per_dataset_delta_cc_half_values"] = {
"datasets": list(sorted_datasets),
"delta_cc_half_values": list(sorted_cc_half_values),
}
return sorted_datasets, sorted_cc_half_values
@staticmethod
def _calculate_cutoff_value(delta_cchalf_i, stdcutoff):
Y = list(delta_cchalf_i.values())
mean = sum(Y) / len(Y)
sdev = sqrt(sum((yy - mean) ** 2 for yy in Y) / len(Y))
logger.info("\nmean delta_cc_half %s", (mean * 100))
logger.info("stddev delta_cc_half %s", (sdev * 100))
cutoff_value = mean - stdcutoff * sdev
logger.info("cutoff value: %s \n", (cutoff_value * 100))
return cutoff_value
@staticmethod
def remove_image_ranges_below_cutoff(
experiments,
reflections,
ids_to_remove,
image_group_to_expid_and_range,
expid_to_image_groups,
results_summary,
):
"""Remove image ranges from the datasets."""
n_valid_reflections = reflections.get_flags(
reflections.flags.bad_for_scaling, all=False
).count(False)
experiments_to_delete = []
exclude_images = []
image_ranges_removed = [] # track for results summary
n_removed_this_cycle = 1
while n_removed_this_cycle != 0:
other_potential_ids_to_remove = []
n_removed_this_cycle = 0
for id_ in sorted(ids_to_remove):
exp_id, image_range = image_group_to_expid_and_range[
id_
] # numerical id
identifier = reflections.experiment_identifiers()[exp_id]
if expid_to_image_groups[exp_id][-1] == id_: # is last group
image_ranges_removed.append([image_range, exp_id])
logger.info(
"Removing image range %s from experiment %s",
image_range,
identifier,
)
exclude_images.append(
[
identifier
+ ":"
+ str(image_range[0])
+ ":"
+ str(image_range[1])
]
)
del expid_to_image_groups[exp_id][-1]
n_removed_this_cycle += 1
else:
other_potential_ids_to_remove.append(id_)
ids_to_remove = other_potential_ids_to_remove
for id_ in other_potential_ids_to_remove:
exp_id, image_range = image_group_to_expid_and_range[id_]
identifier = reflections.experiment_identifiers()[exp_id]
logger.info(
"""Image range %s from experiment %s is below the cutoff, but not at the end of a sequence.""",
image_range,
identifier,
)
# Now remove individual batches
if -1 in reflections["id"]:
reflections = reflections.select(reflections["id"] != -1)
reflection_list = reflections.split_by_experiment_id()
reflection_list, experiments = exclude_image_ranges_for_scaling(
reflection_list, experiments, exclude_images
)
# check if any image groups were all outliers and missed by the analysis
# This catches an edge case where there is an image group full of
# outliers, which gets filtered out before the analysis but should
# be set as not a valid image range.
for exp in experiments:
if len(exp.scan.get_valid_image_ranges(exp.identifier)) > 1:
exp.scan.set_valid_image_ranges(
exp.identifier, [exp.scan.get_valid_image_ranges(exp.identifier)[0]]
)
logger.info(
"Limited image range for %s to %s due to scaling outlier group",
exp.identifier,
exp.scan.get_valid_image_ranges(exp.identifier),
)
# if a whole experiment has been excluded: need to remove it here
for exp in experiments:
if not exp.scan.get_valid_image_ranges(
exp.identifier
): # if all removed above
experiments_to_delete.append(exp.identifier)
if experiments_to_delete:
experiments, reflection_list = select_datasets_on_ids(
experiments, reflection_list, exclude_datasets=experiments_to_delete
)
assert len(reflection_list) == len(experiments)
output_reflections = flex.reflection_table()
for r in reflection_list:
output_reflections.extend(r)
n_valid_filtered_reflections = output_reflections.get_flags(
output_reflections.flags.bad_for_scaling, all=False
).count(False)
results_summary["dataset_removal"].update(
{
"image_ranges_removed": image_ranges_removed,
"experiments_fully_removed": experiments_to_delete,
"n_reflections_removed": n_valid_reflections
- n_valid_filtered_reflections,
}
)
return output_reflections
@staticmethod
def remove_datasets_below_cutoff(
experiments, reflections, ids_to_remove, results_summary
):
"""Remove the datasets with ids in ids_to_remove.
Remove from the experiemnts and reflections and add information to the
results summary dict.
Returns:
output_reflections: The reflection table with data removed.
"""
n_valid_reflections = reflections.get_flags(
reflections.flags.bad_for_scaling, all=False
).count(False)
datasets_to_remove = []
for id_ in sorted(ids_to_remove):
logger.info("Removing dataset %d", id_)
datasets_to_remove.append(reflections.experiment_identifiers()[id_])
output_reflections = reflections.remove_on_experiment_identifiers(
datasets_to_remove
)
experiments.remove_on_experiment_identifiers(datasets_to_remove)
output_reflections.assert_experiment_identifiers_are_consistent(experiments)
n_valid_filtered_reflections = output_reflections.get_flags(
output_reflections.flags.bad_for_scaling, all=False
).count(False)
results_summary["dataset_removal"].update(
{
"experiments_fully_removed": datasets_to_remove,
"n_reflections_removed": n_valid_reflections
- n_valid_filtered_reflections,
}
)
return output_reflections
def write_experiments_and_reflections(self):
"""Save the reflections and experiments data."""
if self.experiments and len(self.reflections) == 1:
logger.info(
"Saving %d reflections to %s",
len(self.reflections[0]),
self.params.output.reflections,
)
self.reflections[0].as_file(self.params.output.reflections)
logger.info("Saving the experiments to %s", self.params.output.experiments)
self.experiments.as_file(self.params.output.experiments)
def plot_data(self):
"""Plot histogram and line plot of cc half values."""
fig, ax = pylab.subplots()
ax.hist(list(self.delta_cchalf_i.values()))
ax.set_xlabel("Delta CC 1/2")
fig.savefig("plot1.png")
X = list(self.delta_cchalf_i.keys())
Y = list(self.delta_cchalf_i.values())
fig, ax = pylab.subplots()
ax.plot(X, Y)
ax.set_xlabel("Dataset number")
ax.set_ylabel("Delta CC 1/2")
fig.savefig("plot2.png")
def run(args=None, phil=phil_scope):
"""Run the command-line script."""
import dials.util.log
from dials.util.options import OptionParser
from dials.util.options import flatten_reflections
from dials.util.options import flatten_experiments
usage = "dials.compute_delta_cchalf [options] scaled.expt scaled.refl"
parser = OptionParser(
usage=usage,
phil=phil,
epilog=help_message,
read_experiments=True,
read_reflections=True,
check_format=False,
)
params, _ = parser.parse_args(args=args, show_diff_phil=False)
dials.util.log.config(logfile=params.output.log)
experiments = flatten_experiments(params.input.experiments)
reflections = flatten_reflections(params.input.reflections)
if len(experiments) == 0 and not params.input.mtzfile:
parser.print_help()
return
script = Script(params, experiments, reflections)
script.run()
script.write_experiments_and_reflections()
script.plot_data()
if __name__ == "__main__":
with dials.util.show_mail_on_error():
run()
```
#### File: dials/command_line/resolutionizer.py
```python
from __future__ import absolute_import, division, print_function
import logging
import sys
import libtbx.phil
from dials.util import resolutionizer
from dials.util import log
from dials.util.options import OptionParser
from dials.util.options import flatten_reflections, flatten_experiments
from dials.util.version import dials_version
from dials.util.multi_dataset_handling import parse_multiple_datasets
logger = logging.getLogger("dials.resolutionizer")
help_message = """
"""
phil_scope = libtbx.phil.parse(
"""
include scope dials.util.resolutionizer.phil_defaults
output {
log = dials.resolutionizer.log
.type = path
}
""",
process_includes=True,
)
def run(args):
usage = (
"dials.resolutionizer [options] scaled.expt scaled.refl | scaled_unmerged.mtz"
)
parser = OptionParser(
usage=usage,
phil=phil_scope,
read_reflections=True,
read_experiments=True,
check_format=False,
epilog=help_message,
)
params, options, unhandled = parser.parse_args(
return_unhandled=True, show_diff_phil=True
)
reflections = flatten_reflections(params.input.reflections)
experiments = flatten_experiments(params.input.experiments)
if (not reflections or not experiments) and not unhandled:
parser.print_help()
return
if reflections and experiments and unhandled:
sys.exit(
"Must provide either scaled unmerged mtz OR dials-format scaled reflections and experiments files"
)
# Configure the logging
log.config(logfile=params.output.log)
logger.info(dials_version())
if len(unhandled) == 1:
scaled_unmerged = unhandled[0]
m = resolutionizer.Resolutionizer.from_unmerged_mtz(
scaled_unmerged, params.resolutionizer
)
else:
reflections = parse_multiple_datasets(reflections)
m = resolutionizer.Resolutionizer.from_reflections_and_experiments(
reflections, experiments, params.resolutionizer
)
m.resolution_auto()
if __name__ == "__main__":
run(sys.argv[1:])
```
#### File: dials/command_line/sequence_to_stills.py
```python
from __future__ import absolute_import, division, print_function
import logging
from libtbx.phil import parse
from scitbx import matrix
from dxtbx.model import MosaicCrystalSauter2014
from dxtbx.model.experiment_list import Experiment, ExperimentList
from dials.algorithms.refinement.prediction.managed_predictors import (
ExperimentsPredictorFactory,
)
from dials.array_family import flex
from dials.model.data import Shoebox
from dials.util import show_mail_on_error
from dials.util.options import OptionParser, flatten_experiments, flatten_reflections
logger = logging.getLogger("dials.command_line.sequence_to_stills")
# The phil scope
phil_scope = parse(
"""
output {
experiments = stills.expt
.type = str
.help = "Filename for the experimental models that have been converted to stills"
reflections = stills.refl
.type = str
.help = "Filename for the reflection tables with split shoeboxes (3D to 2D)"
domain_size_ang = None
.type = float
.help = "Override for domain size. If None, use the crystal's domain size, if"
"available"
half_mosaicity_deg = None
.type = float
.help = "Override for mosaic angle. If None, use the crystal's mosaic angle, if"
"available"
}
max_scan_points = None
.type = int
.expert_level = 2
.help = Limit number of scan points
"""
)
def sequence_to_stills(experiments, reflections, params):
assert len(reflections) == 1
reflections = reflections[0]
new_experiments = ExperimentList()
new_reflections = flex.reflection_table()
# This is the subset needed to integrate
for key in [
"id",
"imageset_id",
"shoebox",
"bbox",
"intensity.sum.value",
"intensity.sum.variance",
"entering",
"flags",
"miller_index",
"panel",
"xyzobs.px.value",
"xyzobs.px.variance",
]:
if key in reflections:
new_reflections[key] = type(reflections[key])()
elif key == "imageset_id":
assert len(experiments.imagesets()) == 1
reflections["imageset_id"] = flex.int(len(reflections), 0)
new_reflections["imageset_id"] = flex.int()
elif key == "entering":
reflections["entering"] = flex.bool(len(reflections), False)
new_reflections["entering"] = flex.bool()
else:
raise RuntimeError("Expected key not found in reflection table: %s" % key)
for expt_id, experiment in enumerate(experiments):
# Get the goniometr setting matrix
goniometer_setting_matrix = matrix.sqr(
experiment.goniometer.get_setting_rotation()
)
goniometer_axis = matrix.col(experiment.goniometer.get_rotation_axis())
step = experiment.scan.get_oscillation()[1]
refls = reflections.select(reflections["id"] == expt_id)
_, _, _, _, z1, z2 = refls["bbox"].parts()
# Create an experiment for each scanpoint
for i_scan_point in range(*experiment.scan.get_array_range()):
if params.max_scan_points and i_scan_point >= params.max_scan_points:
break
# The A matrix is the goniometer setting matrix for this scan point
# times the scan varying A matrix at this scan point. Note, the
# goniometer setting matrix for scan point zero will be the identity
# matrix and represents the beginning of the oscillation.
# For stills, the A matrix needs to be positioned in the midpoint of an
# oscillation step. Hence, here the goniometer setting matrixis rotated
# by a further half oscillation step.
A = (
goniometer_axis.axis_and_angle_as_r3_rotation_matrix(
angle=experiment.scan.get_angle_from_array_index(i_scan_point)
+ (step / 2),
deg=True,
)
* goniometer_setting_matrix
* matrix.sqr(experiment.crystal.get_A_at_scan_point(i_scan_point))
)
crystal = MosaicCrystalSauter2014(experiment.crystal)
crystal.set_A(A)
# Copy in mosaic parameters if available
if params.output.domain_size_ang is None and hasattr(
experiment.crystal, "get_domain_size_ang"
):
crystal.set_domain_size_ang(experiment.crystal.get_domain_size_ang())
elif params.output.domain_size_ang is not None:
crystal.set_domain_size_ang(params.output.domain_size_ang)
if params.output.half_mosaicity_deg is None and hasattr(
experiment.crystal, "get_half_mosaicity_deg"
):
crystal.set_half_mosaicity_deg(
experiment.crystal.get_half_mosaicity_deg()
)
elif params.output.half_mosaicity_deg is not None:
crystal.set_half_mosaicity_deg(params.output.half_mosaicity_deg)
new_experiment = Experiment(
detector=experiment.detector,
beam=experiment.beam,
crystal=crystal,
imageset=experiment.imageset.as_imageset()[
i_scan_point : i_scan_point + 1
],
)
new_experiments.append(new_experiment)
# Each reflection in a 3D shoebox can be found on multiple images.
# Slice the reflections such that any reflection on this scan point
# is included with this image
new_id = len(new_experiments) - 1
subrefls = refls.select((i_scan_point >= z1) & (i_scan_point < z2))
for refl in subrefls.rows():
assert i_scan_point in range(*refl["bbox"][4:6])
new_sb = Shoebox()
start = i_scan_point - refl["bbox"][4] # z1
new_sb.data = refl["shoebox"].data[start : start + 1, :, :]
new_sb.background = refl["shoebox"].background[start : start + 1, :, :]
new_sb.mask = refl["shoebox"].mask[start : start + 1, :, :]
intensity = new_sb.summed_intensity()
new_sb.bbox = tuple(
list(refl["bbox"])[0:4] + [0, 1]
) # keep the original shoebox but reset the z values
new_sb.panel = refl["panel"]
new_refl = {}
new_refl["id"] = new_refl["imageset_id"] = new_id
new_refl["shoebox"] = new_sb
new_refl["bbox"] = new_sb.bbox
new_refl["intensity.sum.value"] = intensity.observed.value
new_refl["intensity.sum.variance"] = intensity.observed.variance
for key in ["entering", "flags", "miller_index", "panel"]:
new_refl[key] = refl[key]
centroid = new_sb.centroid_foreground_minus_background()
new_refl["xyzobs.px.value"] = centroid.px.position
new_refl["xyzobs.px.variance"] = centroid.px.variance
new_reflections.append({})
for key in new_refl:
new_reflections[key][-1] = new_refl[key]
# Re-predict using the reflection slices and the stills predictors
ref_predictor = ExperimentsPredictorFactory.from_experiments(
new_experiments, force_stills=new_experiments.all_stills()
)
new_reflections = ref_predictor(new_reflections)
return (new_experiments, new_reflections)
def run(args=None, phil=phil_scope):
"""
Validate the arguments and load experiments/reflections for sequence_to_stills
Arguments:
args: The command line arguments to use. Defaults to sys.argv[1:]
phil: The phil_scope. Defaults to the master phil_scope for this program
"""
# The script usage
usage = "usage: dials.sequence_to_stills [options] [param.phil] models.expt reflections.refl"
# Create the parser
parser = OptionParser(
usage=usage,
phil=phil,
read_experiments=True,
read_reflections=True,
check_format=False,
epilog=__doc__,
)
params, options = parser.parse_args(args=args, show_diff_phil=True)
# Try to load the models and data
if not params.input.experiments or not params.input.reflections:
parser.print_help()
return
experiments = flatten_experiments(params.input.experiments)
reflections = flatten_reflections(params.input.reflections)
(new_experiments, new_reflections) = sequence_to_stills(
experiments, reflections, params
)
# Write out the output experiments, reflections
new_experiments.as_file(params.output.experiments)
new_reflections.as_file(params.output.reflections)
if __name__ == "__main__":
with show_mail_on_error():
run()
```
#### File: algorithms/polygon/test_polygon.py
```python
from __future__ import absolute_import, division, print_function
def test_polygon():
from dials.algorithms.polygon import polygon
x = 1
y = 1
vertices = [(0, 0), (2, 0), (2, 2), (0, 2)]
poly = polygon(vertices)
assert poly.is_inside(x, y)
poly = polygon([(3, 5), (40, 90), (80, 70), (50, 50), (70, 20)])
for p in [(42, 40), (16, 25), (64, 67), (16, 30), (48, 45), (21, 30)]:
assert poly.is_inside(p[0], p[1])
for p in [(59, 4), (70, 15), (57, 14), (21, 78), (37, 100), (88, 89)]:
assert not poly.is_inside(p[0], p[1])
```
#### File: test/command_line/test_slice_sweep.py
```python
from __future__ import absolute_import, division, print_function
import six.moves.cPickle as pickle
import pytest
import os
from libtbx import easy_run
from dxtbx.model.experiment_list import ExperimentListFactory
def test_slice_sweep_and_compare_with_expected_results(dials_regression, run_in_tmpdir):
# use the i04_weak_data for this test
data_dir = os.path.join(dials_regression, "refinement_test_data", "i04_weak_data")
experiments_path = os.path.join(data_dir, "experiments.json")
pickle_path = os.path.join(data_dir, "indexed_strong.pickle")
for pth in (experiments_path, pickle_path):
assert os.path.exists(pth)
cmd = (
"dials.slice_sweep "
+ experiments_path
+ " "
+ pickle_path
+ ' "image_range=1 20"'
)
result = easy_run.fully_buffered(command=cmd).raise_if_errors()
# load results
sliced_exp = ExperimentListFactory.from_json_file(
"experiments_1_20.expt", check_format=False
)[0]
with open("indexed_strong_1_20.refl", "rb") as f:
sliced_refs = pickle.load(f)
# simple test of results
assert sliced_exp.scan.get_image_range() == (1, 20)
assert len(sliced_refs) == 3670
def test_slice_sweep_with_first_images_missing(dials_regression, run_in_tmpdir):
"""Test slicing where scan image range does not start at 1, exercising
a case that exposed a bug"""
# use the i04_weak_data for this test
data_dir = os.path.join(dials_regression, "refinement_test_data", "i04_weak_data")
experiments_path = os.path.join(data_dir, "experiments.json")
# first slice
cmd = "dials.slice_sweep " + experiments_path + " image_range=5,20"
result = easy_run.fully_buffered(command=cmd).raise_if_errors()
# second slice
cmd = "dials.slice_sweep experiments_5_20.expt image_range=10,20"
result = easy_run.fully_buffered(command=cmd).raise_if_errors()
sliced_exp = ExperimentListFactory.from_json_file(
"experiments_5_20_10_20.expt", check_format=False
)[0]
assert sliced_exp.scan.get_image_range() == (10, 20)
assert sliced_exp.scan.get_array_range() == (9, 20)
assert sliced_exp.scan.get_oscillation()[0] == pytest.approx(83.35)
```
#### File: test/command_line/test_split_experiments.py
```python
from __future__ import absolute_import, division, print_function
"""Tests for dials.split_experiments when experiment ids are set"""
import procrunner
from dials.array_family import flex
from dxtbx.model import Beam, Experiment, ExperimentList
from dxtbx.model.experiment_list import ExperimentListFactory
def generate_exp(wavelength=1):
"""Generate an experiment containing a beam with a given wavelength."""
beam = Beam(direction=(0.0, 0.0, 1.0), wavelength=wavelength)
exp = Experiment(beam=beam)
return exp
def test_split_by_wavelength(tmpdir):
"""Test the split_by_wavelength option of dials.split_experiments"""
experiments = ExperimentList()
exp = generate_exp(wavelength=1.0)
exp.identifier = "0"
experiments.append(exp)
exp = generate_exp(wavelength=0.5)
exp.identifier = "1"
experiments.append(exp)
reflections = flex.reflection_table()
reflections["id"] = flex.int([0, 1])
reflections["intensity"] = flex.double([100.0, 200.0])
reflections.experiment_identifiers()[0] = "0"
reflections.experiment_identifiers()[1] = "1"
experiments.as_json(tmpdir.join("tmp.expt").strpath)
reflections.as_file(tmpdir.join("tmp.refl").strpath)
result = procrunner.run(
["dials.split_experiments", "tmp.expt", "tmp.refl", "by_wavelength=True"],
working_directory=tmpdir,
)
assert not result.returncode and not result.stderr
for i, (wl, ids, intensity) in enumerate(
zip([0.5, 1.0], ["1", "0"], [200.0, 100.0])
):
assert tmpdir.join("split_%d.expt" % i).check()
assert tmpdir.join("split_%d.refl" % i).check()
exp_single = ExperimentListFactory.from_json_file(
tmpdir.join("split_%d.expt" % i).strpath, check_format=False
)
ref_single = flex.reflection_table.from_file(
tmpdir.join("split_%d.refl" % i).strpath
)
assert exp_single[0].beam.get_wavelength() == wl
assert exp_single[0].identifier == ids
id_ = ref_single["id"][0]
assert ref_single.experiment_identifiers()[id_] == ids
assert list(ref_single["intensity"]) == [intensity]
# Now test for successful error handling if no identifiers set.
experiments[0].identifier = ""
experiments[1].identifier = ""
experiments.as_json(tmpdir.join("tmp.expt").strpath)
result = procrunner.run(
["dials.split_experiments", "tmp.expt", "tmp.refl", "by_wavelength=True"],
working_directory=tmpdir,
)
assert result.returncode == 1
assert result.stderr.startswith(b"Sorry")
experiments[0].identifier = "0"
experiments[1].identifier = "1"
del reflections.experiment_identifiers()[0]
del reflections.experiment_identifiers()[1]
experiments.as_json(tmpdir.join("tmp.expt").strpath)
reflections.as_file(tmpdir.join("tmp.refl").strpath)
result = procrunner.run(
["dials.split_experiments", "tmp.expt", "tmp.refl", "by_wavelength=True"],
working_directory=tmpdir,
)
assert result.returncode == 1
assert result.stderr.startswith(b"Sorry")
```
#### File: test/command_line/test_spot_counts_per_image.py
```python
from __future__ import absolute_import, division, print_function
import os
from libtbx import easy_run
from glob import glob
def test_spot_counts_per_image(dials_data, run_in_tmpdir):
path = dials_data("centroid_test_data").strpath
# import the data
cmd = "dials.import %s output.experiments=imported.expt" % " ".join(
glob(os.path.join(path, "*.cbf"))
)
easy_run.fully_buffered(cmd).raise_if_errors()
assert os.path.exists("imported.expt")
# find the spots
cmd = "dials.find_spots imported.expt min_spot_size=3"
easy_run.fully_buffered(cmd).raise_if_errors()
assert os.path.exists("strong.refl")
cmd = "dials.spot_counts_per_image imported.expt strong.refl plot=spot_counts.png"
result = easy_run.fully_buffered(cmd).raise_if_errors()
assert os.path.exists("spot_counts.png"), result.show_stdout()
assert (
"| image | #spots | #spots_no_ice | total_intensity |"
+ " d_min | d_min (distl method 1) | d_min (distl method 2) |"
in result.stdout_lines
), result.stdout_lines
```
#### File: test/command_line/test_spotfinder.py
```python
from __future__ import absolute_import, division, print_function
import six.moves.cPickle as pickle
import os
import procrunner
import pytest
from dials.array_family import flex # noqa: F401, import dependency
def test_find_spots_from_images(dials_data, tmpdir):
result = procrunner.run(
[
"dials.find_spots",
"output.reflections=spotfinder.refl",
"output.shoeboxes=True",
"algorithm=dispersion",
]
+ [
f.strpath for f in dials_data("centroid_test_data").listdir("centroid*.cbf")
],
working_directory=tmpdir.strpath,
)
assert not result.returncode and not result.stderr
assert tmpdir.join("spotfinder.refl").check(file=1)
with tmpdir.join("spotfinder.refl").open("rb") as f:
reflections = pickle.load(f)
assert len(reflections) in range(653, 655)
refl = reflections[0]
assert refl["intensity.sum.value"] == pytest.approx(42)
assert refl["bbox"] == pytest.approx((1398, 1400, 513, 515, 0, 1))
assert refl["xyzobs.px.value"] == pytest.approx(
(1399.1190476190477, 514.2142857142857, 0.5)
)
assert "shoebox" in reflections
def test_find_spots_with_resolution_filter(dials_data, tmpdir):
result = procrunner.run(
[
"dials.find_spots",
"output.reflections=spotfinder.refl",
"output.shoeboxes=False",
"algorithm=dispersion",
"filter.d_min=2",
"filter.d_max=15",
]
+ [
f.strpath for f in dials_data("centroid_test_data").listdir("centroid*.cbf")
],
working_directory=tmpdir.strpath,
)
assert not result.returncode and not result.stderr
assert tmpdir.join("spotfinder.refl").check(file=1)
with tmpdir.join("spotfinder.refl").open("rb") as f:
reflections = pickle.load(f)
assert len(reflections) in range(467, 469)
assert "shoebox" not in reflections
def test_find_spots_with_hot_mask(dials_data, tmpdir):
# now write a hot mask
result = procrunner.run(
[
"dials.find_spots",
"write_hot_mask=True",
"output.reflections=spotfinder.refl",
"algorithm=dispersion",
"output.shoeboxes=False",
]
+ [
f.strpath for f in dials_data("centroid_test_data").listdir("centroid*.cbf")
],
working_directory=tmpdir.strpath,
)
assert not result.returncode and not result.stderr
assert tmpdir.join("spotfinder.refl").check(file=1)
assert tmpdir.join("hot_mask_0.pickle").check(file=1)
with tmpdir.join("spotfinder.refl").open("rb") as f:
reflections = pickle.load(f)
assert len(reflections) in range(653, 655)
assert "shoebox" not in reflections
with tmpdir.join("hot_mask_0.pickle").open("rb") as f:
mask = pickle.load(f)
assert len(mask) == 1
assert mask[0].count(False) == 12
def test_find_spots_with_hot_mask_with_prefix(dials_data, tmpdir):
# now write a hot mask
result = procrunner.run(
[
"dials.find_spots",
"write_hot_mask=True",
"hot_mask_prefix=my_hot_mask",
"output.reflections=spotfinder.refl",
"output.shoeboxes=False",
"algorithm=dispersion",
]
+ [
f.strpath for f in dials_data("centroid_test_data").listdir("centroid*.cbf")
],
working_directory=tmpdir.strpath,
)
assert not result.returncode and not result.stderr
assert tmpdir.join("spotfinder.refl").check(file=1)
assert tmpdir.join("my_hot_mask_0.pickle").check(file=1)
with tmpdir.join("spotfinder.refl").open("rb") as f:
reflections = pickle.load(f)
assert len(reflections) in range(653, 655)
assert "shoebox" not in reflections
with tmpdir.join("my_hot_mask_0.pickle").open("rb") as f:
mask = pickle.load(f)
assert len(mask) == 1
assert mask[0].count(False) == 12
def test_find_spots_with_generous_parameters(dials_data, tmpdir):
# now with more generous parameters
result = procrunner.run(
[
"dials.find_spots",
"min_spot_size=3",
"max_separation=3",
"output.reflections=spotfinder.refl",
"algorithm=dispersion",
]
+ [
f.strpath for f in dials_data("centroid_test_data").listdir("centroid*.cbf")
],
working_directory=tmpdir.strpath,
)
assert not result.returncode and not result.stderr
assert tmpdir.join("spotfinder.refl").check(file=1)
with tmpdir.join("spotfinder.refl").open("rb") as f:
reflections = pickle.load(f)
assert len(reflections) in range(678, 680)
def test_find_spots_with_user_defined_mask(dials_data, tmpdir):
# Now with a user defined mask
result = procrunner.run(
[
"dials.find_spots",
"output.reflections=spotfinder.refl",
"output.shoeboxes=True",
"algorithm=dispersion",
"lookup.mask="
+ dials_data("centroid_test_data").join("mask.pickle").strpath,
]
+ [
f.strpath for f in dials_data("centroid_test_data").listdir("centroid*.cbf")
],
working_directory=tmpdir.strpath,
)
assert not result.returncode and not result.stderr
assert tmpdir.join("spotfinder.refl").check(file=1)
with tmpdir.join("spotfinder.refl").open("rb") as f:
reflections = pickle.load(f)
from dxtbx.model.experiment_list import ExperimentListFactory
experiments = ExperimentListFactory.from_json_file(
dials_data("centroid_test_data").join("experiments.json").strpath
)
assert len(experiments) == 1
imageset = experiments.imagesets()[0]
detector = imageset.get_detector()
beam = imageset.get_beam()
for x, y, z in reflections["xyzobs.px.value"]:
d = detector[0].get_resolution_at_pixel(beam.get_s0(), (x, y))
assert d >= 3
def test_find_spots_with_user_defined_region(dials_data, tmpdir):
result = procrunner.run(
[
"dials.find_spots",
"output.reflections=spotfinder.refl",
"output.shoeboxes=True",
"region_of_interest=800,1200,800,1200",
]
+ [
f.strpath for f in dials_data("centroid_test_data").listdir("centroid*.cbf")
],
working_directory=tmpdir.strpath,
)
assert not result.returncode and not result.stderr
assert tmpdir.join("spotfinder.refl").check(file=1)
with tmpdir.join("spotfinder.refl").open("rb") as f:
reflections = pickle.load(f)
x, y, z = reflections["xyzobs.px.value"].parts()
assert x.all_ge(800)
assert y.all_ge(800)
assert x.all_lt(1200)
assert y.all_lt(1200)
def test_find_spots_with_xfel_stills(dials_regression, tmpdir):
# now with XFEL stills
result = procrunner.run(
[
"dials.find_spots",
os.path.join(
dials_regression,
"spotfinding_test_data",
"idx-s00-20131106040302615.cbf",
),
"output.reflections=spotfinder.refl",
"algorithm=dispersion",
],
working_directory=tmpdir.strpath,
)
assert not result.returncode and not result.stderr
assert tmpdir.join("spotfinder.refl").check(file=1)
with tmpdir.join("spotfinder.refl").open("rb") as f:
reflections = pickle.load(f)
assert len(reflections) == 2643
```
#### File: test/util/test_options.py
```python
from __future__ import absolute_import, division, print_function
from mock import Mock
from dials.util.options import flatten_reflections, flatten_experiments, OptionParser
from dials.array_family import flex
def test_can_read_headerless_h5_and_no_detector_is_present(dials_data):
data_h5 = dials_data("vmxi_thaumatin").join("image_15799_data_000001.h5").strpath
parser = OptionParser(read_experiments=True, read_experiments_from_images=True)
params, options = parser.parse_args([data_h5])
experiments = flatten_experiments(params.input.experiments)
assert len(experiments) == 1
assert not experiments[0].detector
def mock_reflection_file_object(id_=0, identifier=True):
"""Create a mock reflection_file_object."""
fileobj = Mock()
r = flex.reflection_table()
r["id"] = flex.int([-1, id_, id_])
if identifier:
r.experiment_identifiers()[id_] = str(id_)
fileobj.data = r
return fileobj
def mock_two_reflection_file_object(ids=[0, 2]):
"""Create a mock reflection_file_object with two datasets."""
fileobj = Mock()
r = flex.reflection_table()
r["id"] = flex.int([-1, ids[0], ids[0], ids[1], ids[1]])
r.experiment_identifiers()[ids[0]] = str(ids[0])
r.experiment_identifiers()[ids[1]] = str(ids[1])
fileobj.data = r
return fileobj
def test_flatten_experiments_updating_id_values():
"""Test the correct handling of duplicate table id values.
Note that this function does not have the ability to update the
experiment string identifier, only ensure that the table id values
do not clash (it is not possible even to load multiple experiments
with the same identifier).
"""
# Test the case of two single reflection tables.
file_list = [mock_reflection_file_object(id_=0), mock_reflection_file_object(id_=0)]
rs = flatten_reflections(file_list)
assert rs[0] is file_list[0].data
assert list(rs[0]["id"]) == [-1, 0, 0]
assert list(rs[0].experiment_identifiers().keys()) == [0]
assert list(rs[0].experiment_identifiers().values()) == ["0"]
assert rs[1] is file_list[1].data
assert list(rs[1]["id"]) == [-1, 1, 1]
assert list(rs[1].experiment_identifiers().keys()) == [1]
assert list(rs[1].experiment_identifiers().values()) == ["0"]
# Now test the case where one reflection table contains two experiments
file_list = [mock_two_reflection_file_object(), mock_reflection_file_object(id_=0)]
rs = flatten_reflections(file_list)
assert rs[0] is file_list[0].data
assert list(rs[0]["id"]) == [-1, 0, 0, 1, 1]
assert list(rs[0].experiment_identifiers().keys()) == [0, 1]
assert list(rs[0].experiment_identifiers().values()) == ["0", "2"]
assert rs[1] is file_list[1].data
assert list(rs[1]["id"]) == [-1, 2, 2]
assert list(rs[1].experiment_identifiers().keys()) == [2]
assert list(rs[1].experiment_identifiers().values()) == ["0"]
file_list = [
mock_reflection_file_object(id_=0),
mock_two_reflection_file_object(ids=[1, 2]),
]
rs = flatten_reflections(file_list)
assert rs[0] is file_list[0].data
assert list(rs[0]["id"]) == [-1, 0, 0]
assert list(rs[0].experiment_identifiers().keys()) == [0]
assert list(rs[0].experiment_identifiers().values()) == ["0"]
assert rs[1] is file_list[1].data
assert list(rs[1]["id"]) == [-1, 1, 1, 2, 2]
assert list(rs[1].experiment_identifiers().keys()) == [1, 2]
assert list(rs[1].experiment_identifiers().values()) == ["1", "2"]
```
#### File: image_viewer/slip_viewer/frame.py
```python
from __future__ import absolute_import, division, print_function
import imp
import math
import os
import wx
from . import pyslip
from . import tile_generation
from ..rstbx_frame import EVT_EXTERNAL_UPDATE
from ..rstbx_frame import XrayFrame as XFBaseClass
from rstbx.viewer import settings as rv_settings, image as rv_image
from wxtbx import bitmaps
pyslip._Tiles = tile_generation._Tiles
class chooser_wrapper(object):
def __init__(self, image_set, index):
self.image_set = image_set
self.path = os.path.basename(image_set.get_path(index))
self.full_path = image_set.get_path(index)
self.index = index
self._raw_data = None
def __str__(self):
return "%s [%d]" % (self.path, self.index + 1)
def get_detector(self):
return self.image_set.get_detector()
def get_scan(self):
return self.image_set.get_scan()
def get_beam(self):
return self.image_set.get_beam()
def get_mask(self):
return self.image_set.get_mask(self.index)
def get_raw_data(self):
if self._raw_data is None:
return self.image_set[self.index]
return self._raw_data
def set_raw_data(self, raw_data):
self._raw_data = raw_data
def get_detectorbase(self):
return self.image_set.get_detectorbase(self.index)
def get_vendortype(self):
return self.image_set.get_vendortype(self.index)
def show_header(self):
return self.image_set.get_detectorbase(self.index).show_header()
class XrayFrame(XFBaseClass):
def set_pyslip(self, parent):
self.pyslip = pyslip.PySlip(parent, tile_dir=None, min_level=0)
def __init__(self, *args, **kwds):
self.params = kwds.get("params", None)
if "params" in kwds:
del kwds["params"] # otherwise wx complains
### Collect any plugins
slip_viewer_dir = os.path.join(os.path.dirname(__file__))
contents = os.listdir(slip_viewer_dir)
plugin_names = [
f.split(".py")[0] for f in contents if f.endswith("_frame_plugin.py")
]
self.plugins = {}
for name in plugin_names:
self.plugins[name] = imp.load_source(
name, os.path.join(slip_viewer_dir, name + ".py")
)
if len(plugin_names) > 0:
print("Loaded plugins: " + ", ".join(plugin_names))
wx.Frame.__init__(self, *args, **kwds)
self.settings = rv_settings()
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.SetSizer(self.sizer)
# initialization is done in stages as windows are created
self.pyslip = None
self.viewer = wx.Panel(self, wx.ID_ANY)
self.viewer.SetMinSize((640, 640))
self.viewer.SetBackgroundColour(wx.BLACK)
self.viewer.ClearBackground()
self.sizer.Add(self.viewer, 1, wx.EXPAND)
self.statusbar = self.CreateStatusBar()
self.settings_frame = None
self._calibration_frame = None
self._ring_frame = None
self._uc_frame = None
self._score_frame = None
self._plugins_frame = {key: None for key in self.plugins}
self.zoom_frame = None
self.plot_frame = None
self.metrology_matrices = None
# Currently displayed image. XXX Can this be zapped?
self._img = None
self._distl = None
self.toolbar = self.CreateToolBar(style=wx.TB_3DBUTTONS | wx.TB_TEXT)
self.setup_toolbar()
self.toolbar.Realize()
self.mb = wx.MenuBar()
self.setup_menus()
self.SetMenuBar(self.mb)
self.Fit()
self.SetMinSize(self.GetSize())
self.SetSize((720, 720))
self.Bind(EVT_EXTERNAL_UPDATE, self.OnExternalUpdate)
self.Bind(wx.EVT_UPDATE_UI, self.OnUpdateUICalibration, id=self._id_calibration)
self.Bind(wx.EVT_UPDATE_UI, self.OnUpdateUINext, id=wx.ID_FORWARD)
self.Bind(wx.EVT_UPDATE_UI, self.OnUpdateUIPrevious, id=wx.ID_BACKWARD)
self.Bind(wx.EVT_UPDATE_UI, self.OnUpdateUIRing, id=self._id_ring)
self.Bind(wx.EVT_UPDATE_UI, self.OnUpdateUIUC, id=self._id_uc)
self.Bind(wx.EVT_UPDATE_UI, self.OnUpdateUIScore, id=self._id_score)
for p in self.plugins:
self.Bind(
wx.EVT_UPDATE_UI,
self.OnUpdateUIPluginWrapper(p),
id=self._id_plugins[p],
)
# consolidate initialization of PySlip object into a single function
def init_pyslip(self):
self.set_pyslip(self.viewer)
self.init_pyslip_presizer()
def Show(self):
# Due to the asynchronous nature of X11 on Linux, just showing a frame
# does not guarantee window creation. The frame calls Raise() so that it
# will be shown. This addresses an error with PySlip requiring the
# window to exist before instantiation.
super(XrayFrame, self).Show()
self.Raise()
def setup_toolbar(self):
XFBaseClass.setup_toolbar(self)
btn = self.toolbar.AddLabelTool(
id=wx.ID_SAVEAS,
label="Save As...",
bitmap=bitmaps.fetch_icon_bitmap("actions", "save_all", 32),
shortHelp="Save As...",
kind=wx.ITEM_NORMAL,
)
self.Bind(wx.EVT_MENU, self.OnSaveAs, btn)
# using StaticBox creates a horizontal white bar in Linux
def make_gui(self, parent):
parent.sizer = wx.BoxSizer(wx.HORIZONTAL)
parent.SetSizer(parent.sizer)
parent.sizer.Add(self.pyslip, 1, wx.EXPAND)
def init_pyslip_presizer(self):
self.demo_select_dispatch = {}
# self.tile_directory = None#"/Users/nksauter/rawdata/demo/insulin_1_001.img"
# build the GUI
self.make_gui(self.viewer)
# finally, bind event to handler
self.pyslip.Bind(pyslip.EVT_PYSLIP_POSITION, self.handle_position_event)
def handle_position_event(self, event):
"""Handle a pySlip POSITION event."""
posn_str = ""
if event.position:
(lon, lat) = event.position
fast_picture, slow_picture = self.pyslip.tiles.lon_lat_to_picture_fast_slow(
lon, lat
)
posn_str = "Picture: slow=%.3f / fast=%.3f pixels." % (
slow_picture,
fast_picture,
)
coords = self.pyslip.tiles.get_flex_pixel_coordinates(lon, lat)
if len(coords) >= 2:
if len(coords) == 3:
readout = int(round(coords[2]))
else:
readout = -1
coords_str = "slow=%.3f / fast=%.3f pixels" % (coords[0], coords[1])
if len(coords) == 2:
posn_str += " Readout: " + coords_str + "."
elif readout >= 0:
posn_str += " Readout %d: %s." % (readout, coords_str)
possible_intensity = None
fi = self.pyslip.tiles.raw_image
detector = fi.get_detector()
ifs = (int(coords[1]), int(coords[0])) # int fast slow
isf = (int(coords[0]), int(coords[1])) # int slow fast
raw_data = fi.get_raw_data()
if not isinstance(raw_data, tuple):
raw_data = (raw_data,)
if len(detector) > 1:
if readout >= 0:
if detector[readout].is_coord_valid(ifs):
possible_intensity = raw_data[readout][isf]
else:
if detector[0].is_coord_valid(ifs):
possible_intensity = raw_data[0][isf]
if possible_intensity is not None:
if possible_intensity == 0:
format_str = " I=%6.4f"
else:
yaya = int(math.ceil(math.log10(abs(possible_intensity))))
format_str = " I=%%6.%df" % (max(0, 5 - yaya))
posn_str += format_str % possible_intensity
if (
len(coords) > 2 and readout >= 0
): # indicates it's a tiled image in a valid region
reso = self.pyslip.tiles.get_resolution(
coords[1], coords[0], readout
)
else:
reso = self.pyslip.tiles.get_resolution(coords[1], coords[0])
if reso is not None:
posn_str += " Resolution: %.3f" % (reso)
self.statusbar.SetStatusText(posn_str)
else:
self.statusbar.SetStatusText(
"Click and drag to pan; "
+ "middle-click and drag to plot intensity profile, right-click to zoom"
)
# print "event with no position",event
return
def init_pyslip_postsizer(self):
self.pyslip.ZoomToLevel(-2) # tiles.zoom_level
self.pyslip.GotoPosition(
self.pyslip.tiles.get_initial_instrument_centering_within_picture_as_lon_lat()
)
def setup_menus(self):
file_menu = wx.Menu()
self.mb.Append(file_menu, "File")
item = file_menu.Append(-1, "Open integration results...")
self.Bind(wx.EVT_MENU, self.OnLoadIntegration, item)
item = file_menu.Append(-1, "Open image...")
self.Bind(wx.EVT_MENU, self.OnLoadFile, item)
self._actions_menu = wx.Menu()
self.mb.Append(self._actions_menu, "Actions")
# item = self._actions_menu.Append(-1, "Change beam center...")
# self.Bind(wx.EVT_MENU, self.OnChangeBeamCenter, item)
# item = self._actions_menu.Append(-1, "Reset beam center to header value")
# self.Bind(wx.EVT_MENU, lambda evt: self.viewer.ResetBeamCenter(), item)
item = self._actions_menu.Append(-1, "Save As...")
self.Bind(wx.EVT_MENU, self.OnSaveAs, item)
# Known wxWidgets/wxPython issue
# (http://trac.wxwidgets.org/ticket/12394): stock item ID is
# expected for zero-length text. Work around by making text
# contain single space. XXX Placement
self._id_calibration = wx.NewId()
item = self._actions_menu.Append(self._id_calibration, " ")
self.Bind(wx.EVT_MENU, self.OnCalibration, source=item)
# XXX Placement
self._id_ring = wx.NewId()
item = self._actions_menu.Append(self._id_ring, " ")
self.Bind(wx.EVT_MENU, self.OnRing, source=item)
# XXX Placement
self._id_uc = wx.NewId()
item = self._actions_menu.Append(self._id_uc, " ")
self.Bind(wx.EVT_MENU, self.OnUC, source=item)
# XXX Placement
self._id_score = wx.NewId()
item = self._actions_menu.Append(self._id_score, " ")
self.Bind(wx.EVT_MENU, self.OnScore, source=item)
self._id_plugins = {}
for p in self.plugins:
self._id_plugins[p] = wx.NewId()
item = self._actions_menu.Append(self._id_plugins[p], " ")
self.Bind(wx.EVT_MENU, self.OnPluginWrapper(p), source=item)
def has_four_quadrants(self):
d = self.pyslip.tiles.raw_image.get_detector()
return len(d) > 1 and len(d.hierarchy()) == 4
def add_file_name_or_data(self, file_name_or_data):
"""The add_file_name_or_data() function appends @p
file_name_or_data to the image chooser, unless it is already
present. For file-backed images, the base name is displayed in
the chooser. If necessary, the number of entries in the chooser
is pruned. The function returns the index of the recently added
entry. XXX This is probably the place for heuristics to determine
if the viewer was given a pattern, or a plain list of files. XXX
Rename this function, because it only deals with the chooser?
"""
key = self.get_key(file_name_or_data)
for i in range(self.image_chooser.GetCount()):
if key == str(self.image_chooser.GetClientData(i)):
return i
if self.image_chooser.GetCount() >= self.CHOOSER_SIZE:
self.image_chooser.Delete(0)
i = self.image_chooser.GetCount()
if type(file_name_or_data) is dict:
self.image_chooser.Insert(key, i, None)
elif isinstance(file_name_or_data, chooser_wrapper):
self.image_chooser.Insert(key, i, file_name_or_data)
else:
self.image_chooser.Insert(os.path.basename(key), i, key)
return i
def get_beam_center_px(self):
"""
Get the beam center in pixel coordinates relative to the tile closest to it.
@return panel_id, beam_center_fast, beam_center_slow. panel_id is the panel the
returned coordinates are relative to.
"""
detector = self.get_detector()
beam = self.get_beam()
if abs(detector[0].get_distance()) == 0:
return 0.0, 0.0
# FIXME assumes all detector elements use the same millimeter-to-pixel convention
try:
# determine if the beam intersects one of the panels
panel_id, (x_mm, y_mm) = detector.get_ray_intersection(beam.get_s0())
except RuntimeError as e:
if not ("DXTBX_ASSERT(" in str(e) and ") failure" in str(e)):
# unknown exception from dxtbx
raise e
if len(detector) > 1:
# find the panel whose center is closest to the beam.
panel_id = 0
lowest_res = 0
for p_id, panel in enumerate(detector):
w, h = panel.get_image_size()
res = panel.get_resolution_at_pixel(beam.get_s0(), (w // 2, h // 2))
if res > lowest_res:
panel_id = p_id
lowest_res = res
x_mm, y_mm = detector[panel_id].get_beam_centre(beam.get_s0())
else:
panel_id = 0
# FIXME this is horrible but cannot find easier way without
# restructuring code - N.B. case I am debugging now is one
# panel detector *parallel to beam* for which the question is
# ill posed.
try:
x_mm, y_mm = detector[0].get_beam_centre(beam.get_s0())
except RuntimeError as e:
if "DXTBX_ASSERT" in str(e):
x_mm, y_mm = 0.0, 0.0
else:
raise e
beam_pixel_fast, beam_pixel_slow = detector[panel_id].millimeter_to_pixel(
(x_mm, y_mm)
)
return panel_id, beam_pixel_fast, beam_pixel_slow
def load_image(self, file_name_or_data, get_raw_data=None, show_untrusted=False):
"""The load_image() function displays the image from @p
file_name_or_data. The chooser is updated appropriately.
"""
# Due to a bug in wxPython 3.0.2 for Linux
# http://trac.wxwidgets.org/ticket/16034
# the creation of the PySlip object is deferred until it is needed and
# after other windows are created
if self.pyslip is None:
self.init_pyslip()
# The settings dialog is created after PySlip because it may require
# values from PySlip
if self.settings_frame is None:
self.OnShowSettings(None)
self.Layout()
if isinstance(file_name_or_data, chooser_wrapper):
img = rv_image(file_name_or_data)
else:
try:
img = rv_image(file_name_or_data.get_detectorbase())
except AttributeError:
img = rv_image(os.path.abspath(file_name_or_data))
try:
title = file_name_or_data.full_path
except AttributeError:
title = str(file_name_or_data)
self.SetTitle(title)
# Update the selection in the chooser.
i = self.add_file_name_or_data(file_name_or_data)
self.image_chooser.SetSelection(i)
self.pyslip.tiles.show_untrusted = show_untrusted
self.pyslip.tiles.current_brightness = self.settings.brightness
self.pyslip.tiles.current_color_scheme = self.settings.color_scheme
self.pyslip.tiles.set_image(
file_name_or_data=img,
metrology_matrices=self.metrology_matrices,
get_raw_data=get_raw_data,
)
# Initialise position zoom level for first image. XXX Why do we
# have to coll ZoomToLevel to refresh subsequent images?
if self._img is None:
self.init_pyslip_postsizer()
else:
self.pyslip.ZoomToLevel(self.pyslip.tiles.zoom_level)
self._img = img # XXX
self.settings_frame.set_image(self._img)
self.update_statusbar() # XXX Not always working?
# self.Layout()
detector = self.get_detector()
if abs(detector[0].get_distance()) > 0:
def map_coords(x, y, p):
if len(self.pyslip.tiles.raw_image.get_detector()) > 1:
y, x = self.pyslip.tiles.flex_image.tile_readout_to_picture(
p, y - 0.5, x - 0.5
)
return self.pyslip.tiles.picture_fast_slow_to_map_relative(x, y)
panel_id, beam_pixel_fast, beam_pixel_slow = self.get_beam_center_px()
self.beam_center_cross_data = [
(
(
map_coords(beam_pixel_fast + 3.0, beam_pixel_slow, panel_id),
map_coords(beam_pixel_fast - 3.0, beam_pixel_slow, panel_id),
),
{"width": 2, "color": "#0000FFA0", "closed": False},
),
(
(
map_coords(beam_pixel_fast, beam_pixel_slow + 3.0, panel_id),
map_coords(beam_pixel_fast, beam_pixel_slow - 3.0, panel_id),
),
{"width": 2, "color": "#0000FFA0", "closed": False},
),
]
# Unconditionally delete extra layers--update_settings() will add
# them back if appropriate. This also creates the self.*_layer
# variables.
if hasattr(self, "beam_layer") and self.beam_layer is not None:
self.pyslip.DeleteLayer(self.beam_layer, update=False)
self.beam_layer = None
if hasattr(self, "spotfinder_layer") and self.spotfinder_layer is not None:
self.pyslip.DeleteLayer(self.spotfinder_layer)
self.spotfinder_layer = None
if hasattr(self, "tile_layer") and self.tile_layer is not None:
self.pyslip.DeleteLayer(self.tile_layer)
self.tile_layer = None
if hasattr(self, "tile_text_layer") and self.tile_text_layer is not None:
self.pyslip.DeleteLayer(self.tile_text_layer)
self.tile_text_layer = None
# if hasattr(self, 'plugins_layer') and hasattr(self.plugins_layer, "__iter__"):
# for key in self.plugins_layer:
# if self.plugins_layer[key] is not None:
# self.pyslip.DeleteLayer(self.plugins_layer[key])
# self.plugins_layer = {key:None for key in self.plugins}
self.update_settings()
# Destroy the calibration frame if it present but unsupported for
# this image. XXX Need to do something about the ring tool too
# when switching between different kinds of images. XXX Centering
# is broken when switching between different kinds of images.
if self._calibration_frame and not self.has_four_quadrants():
self.OnCalibration(None)
def get_detector(self):
return self.pyslip.tiles.raw_image.get_detector()
def get_beam(self):
return self.pyslip.tiles.raw_image.get_beam()
def get_key(self, file_name_or_data):
"""This overridden get_key() function returns the key of @p file_name_or_data
if it's an DetectorImageBase object. Otherwise it returns the super class's
key
"""
from iotbx.detectors.detectorbase import DetectorImageBase
if isinstance(file_name_or_data, DetectorImageBase):
return file_name_or_data.filename
elif isinstance(file_name_or_data, chooser_wrapper):
return str(file_name_or_data)
else:
return super(XrayFrame, self).get_key(file_name_or_data)
def update_settings(self, layout=True):
# XXX The zoom level from the settings panel are not taken into
# account here.
new_brightness = self.settings.brightness
new_color_scheme = self.settings.color_scheme
if (
new_brightness is not self.pyslip.tiles.current_brightness
or new_color_scheme is not self.pyslip.tiles.current_color_scheme
):
self.pyslip.tiles.update_brightness(new_brightness, new_color_scheme)
if self.settings.show_beam_center:
if self.beam_layer is None and hasattr(self, "beam_center_cross_data"):
self.beam_layer = self.pyslip.AddPolygonLayer(
self.beam_center_cross_data,
name="<beam_layer>",
show_levels=[-2, -1, 0, 1, 2, 3, 4, 5],
update=False,
)
elif self.beam_layer is not None:
self.pyslip.DeleteLayer(self.beam_layer, update=False)
self.beam_layer = None
if self.settings.show_spotfinder_spots:
if self.spotfinder_layer is None:
tdata = self.pyslip.tiles.get_spotfinder_data(self.params)
self.spotfinder_layer = self.pyslip.AddPointLayer(
tdata,
color="green",
name="<spotfinder_layer>",
radius=2,
renderer=self.pyslip.LightweightDrawPointLayer,
show_levels=[-2, -1, 0, 1, 2, 3, 4, 5],
)
elif self.spotfinder_layer is not None:
self.pyslip.DeleteLayer(self.spotfinder_layer)
self.spotfinder_layer = None
if self.settings.show_effective_tiling:
if self.tile_layer is None:
tdata, ttdata = self.pyslip.tiles.get_effective_tiling_data(self.params)
self.tile_layer = self.pyslip.AddPolygonLayer(
tdata, name="<tiling_layer>", show_levels=[-2, -1, 0, 1, 2, 3, 4, 5]
)
if self.tile_text_layer is None:
self.tile_text_layer = self.pyslip.AddTextLayer(
ttdata,
name="<tiling_text_layer>",
show_levels=[-2, -1, 0, 1, 2, 3, 4, 5],
colour="#0000FFA0",
textcolour="#0000FFA0",
fontsize=30,
placement="cc",
radius=0,
)
elif (self.tile_layer is not None) and (self.tile_text_layer is not None):
self.pyslip.DeleteLayer(self.tile_layer)
self.tile_layer = None
self.pyslip.DeleteLayer(self.tile_text_layer)
self.tile_text_layer = None
if hasattr(self, "user_callback"):
self.user_callback(self)
self.pyslip.Update() # triggers redraw
def OnCalibration(self, event):
from rstbx.slip_viewer.calibration_frame import SBSettingsFrame
if not self._calibration_frame:
self._calibration_frame = SBSettingsFrame(
self, wx.ID_ANY, "Quadrant calibration", style=wx.CAPTION | wx.CLOSE_BOX
)
self._calibration_frame.Show()
self._calibration_frame.Raise()
else:
self._calibration_frame.Destroy()
def OnRing(self, event):
from .ring_frame import RingSettingsFrame
if not self._ring_frame:
self._ring_frame = RingSettingsFrame(
self, wx.ID_ANY, "Ring tool", style=wx.CAPTION | wx.CLOSE_BOX
)
self._ring_frame.Show()
self._ring_frame.Raise()
else:
self._ring_frame.Destroy()
def OnUC(self, event):
from .uc_frame import UCSettingsFrame
if not self._uc_frame:
self._uc_frame = UCSettingsFrame(
self, wx.ID_ANY, "Unit cell tool", style=wx.CAPTION | wx.CLOSE_BOX
)
self._uc_frame.Show()
self._uc_frame.Raise()
else:
self._uc_frame.Destroy()
def OnScore(self, event):
from .score_frame import ScoreSettingsFrame
if not self._score_frame:
self._score_frame = ScoreSettingsFrame(
self, wx.ID_ANY, "Score tool", style=wx.CAPTION | wx.CLOSE_BOX
)
self._score_frame.Show()
self._score_frame.Raise()
else:
self._score_frame.Destroy()
def OnPluginWrapper(self, p):
def OnPlugin(event):
if not self._plugins_frame[p]:
helper = self.plugins[p].PluginHelper
self._plugins_frame[p] = helper._plugin_settings_frame(
self,
wx.ID_ANY,
helper._plugin_title,
style=wx.CAPTION | wx.CLOSE_BOX,
)
self._plugins_frame[p].Show()
self._plugins_frame[p].Raise()
else:
self._plugins_frame[p].Destroy()
return OnPlugin
def OnUpdateUICalibration(self, event):
# If quadrant calibration is not supported for this image, disable
# the corresponding menu item. Toggle the menu item text
# depending on the state of the tool.
if self.has_four_quadrants():
event.Enable(True)
if self._calibration_frame:
event.SetText("Hide quadrant calibration")
else:
event.SetText("Show quadrant calibration")
else:
event.Enable(False)
event.SetText("Show quadrant calibration")
def OnUpdateUINext(self, event):
# Enable/disable the "Next" button based on the image's position
# in the list.
event.Enable(
self.image_chooser.GetSelection() + 1 < self.image_chooser.GetCount()
)
def OnUpdateUIPrevious(self, event):
# Enable/disable the "Previous" button based on the image's
# position in the list.
event.Enable(self.image_chooser.GetSelection() >= 1)
def OnUpdateUIRing(self, event):
# Toggle the menu item text depending on the state of the tool.
if self._ring_frame:
event.SetText("Hide ring tool")
else:
event.SetText("Show ring tool")
def OnUpdateUIUC(self, event):
# Toggle the menu item text depending on the state of the tool.
if self._uc_frame:
event.SetText("Hide unit cell tool")
else:
event.SetText("Show unit cell tool")
def OnUpdateUIScore(self, event):
# Toggle the menu item text depending on the state of the tool.
if self._score_frame:
event.SetText("Hide score tool")
else:
event.SetText("Show score tool")
def OnUpdateUIPluginWrapper(self, p):
def OnUpdateUIPlugin(event):
# Toggle the menu item text depending on the state of the tool.
helper = self.plugins[p].PluginHelper
if self._plugins_frame[p]:
event.SetText(helper._plugin_hide_text)
else:
event.SetText(helper._plugin_show_text)
return OnUpdateUIPlugin
def OnSaveAs(self, event):
### XXX TODO: Save overlays
### XXX TODO: Fix bug where multi-asic images are slightly cropped due to tranformation error'
import PIL.Image as Image
dialog = wx.FileDialog(
self,
defaultDir="",
message="Save PNG or PDF file",
style=wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT,
wildcard="PNG file (*.png)|*.png|PDF file (*.pdf)|*.pdf",
)
if dialog.ShowModal() != wx.ID_OK:
return
file_name = dialog.GetPath()
if file_name == "":
return
self.update_statusbar("Writing " + file_name + "...")
if dialog.GetFilterIndex() == 0:
# XXX Copied from tile_generation.py; all its disclaimers
# apply.
raw_img = self.pyslip.tiles.raw_image
detector = raw_img.get_detector()
data = raw_img.get_raw_data()
if not isinstance(data, tuple): # XXX should not need this test
data = (data,)
if len(detector) > 1:
from .tile_generation import _get_flex_image_multipanel
flex_img = _get_flex_image_multipanel(
brightness=self.settings.brightness / 100,
panels=detector,
raw_data=data,
beam=raw_img.get_beam(),
)
else:
from .tile_generation import _get_flex_image
flex_img = _get_flex_image(
brightness=self.settings.brightness / 100,
data=data[0],
saturation=detector[0].get_trusted_range()[1],
vendortype=raw_img.get_vendortype(),
)
if flex_img.supports_rotated_tiles_antialiasing_recommended:
currentZoom = self.pyslip.level
self.pyslip.tiles.UseLevel(0) # 1:1 zoom level
try:
x, y, width, height = self._img._raw.bounding_box_mm()
x1, y1 = self._img._raw.detector_coords_as_image_coords(x, y)
x2, y2 = self._img._raw.detector_coords_as_image_coords(
x + width, y + height
)
except AttributeError:
x1 = min(
[
p.get_pixel_lab_coord(c)[0] / p.get_pixel_size()[0]
for p in detector
for c in [
(0, 0),
(0, p.get_image_size()[1]),
(p.get_image_size()[0], 0),
(p.get_image_size()[0], p.get_image_size()[1]),
]
]
)
y1 = min(
[
p.get_pixel_lab_coord(c)[1] / p.get_pixel_size()[1]
for p in detector
for c in [
(0, 0),
(0, p.get_image_size()[1]),
(p.get_image_size()[0], 0),
(p.get_image_size()[0], p.get_image_size()[1]),
]
]
)
x2 = max(
[
p.get_pixel_lab_coord(c)[0] / p.get_pixel_size()[0]
for p in detector
for c in [
(0, 0),
(0, p.get_image_size()[1]),
(p.get_image_size()[0], 0),
(p.get_image_size()[0], p.get_image_size()[1]),
]
]
)
y2 = max(
[
p.get_pixel_lab_coord(c)[1] / p.get_pixel_size()[1]
for p in detector
for c in [
(0, 0),
(0, p.get_image_size()[1]),
(p.get_image_size()[0], 0),
(p.get_image_size()[0], p.get_image_size()[1]),
]
]
)
# Map > View - determine layout in X direction
x_offset = x1
start_x_tile = int(math.floor(x_offset / self.pyslip.tile_size_x))
stop_x_tile = (
x2 + self.pyslip.tile_size_x - 1
) / self.pyslip.tile_size_x
stop_x_tile = int(stop_x_tile)
col_list = range(start_x_tile, stop_x_tile)
x_pix = start_x_tile * self.pyslip.tile_size_y - x_offset
y_offset = y1
start_y_tile = int(math.floor(y_offset / self.pyslip.tile_size_y))
stop_y_tile = (
y2 + self.pyslip.tile_size_y - 1
) / self.pyslip.tile_size_y
stop_y_tile = int(stop_y_tile)
row_list = range(start_y_tile, stop_y_tile)
y_pix_start = start_y_tile * self.pyslip.tile_size_y - y_offset
bitmap = wx.EmptyBitmap(x2 - x1, y2 - y1)
dc = wx.MemoryDC()
dc.SelectObject(bitmap)
# start pasting tiles
for x in col_list:
y_pix = y_pix_start
for y in row_list:
dc.DrawBitmap(
self.pyslip.tiles.GetTile(x, y), x_pix, y_pix, False
)
y_pix += self.pyslip.tile_size_y
x_pix += self.pyslip.tile_size_x
dc.SelectObject(wx.NullBitmap)
wximg = wx.ImageFromBitmap(bitmap)
imageout = Image.new("RGB", (wximg.GetWidth(), wximg.GetHeight()))
imageout.frombytes(wximg.GetData())
self.pyslip.tiles.UseLevel(currentZoom)
else: # write the image out at full resolution
flex_img.setWindow(0.0, 0.0, 1)
flex_img.spot_convention(0)
flex_img.adjust(color_scheme=self.settings.color_scheme)
flex_img.prep_string()
data_string = flex_img.as_bytes()
imageout = Image.frombytes(
"RGB", (flex_img.ex_size2(), flex_img.ex_size1()), data_string
)
with open(file_name, "wb") as fh:
imageout.save(fh, "PNG")
elif dialog.GetFilterIndex() == 1:
from reportlab.lib.units import inch
from reportlab.pdfgen import canvas
# Dots per inch in PDF output, and fudge factor to not make
# fine features impossibly small. XXX The fudge factor should
# go.
DPI = 72
LINE_WIDTH_FACTOR = 0.6
# XXX Copied from tile_generation.py; all its disclaimers
# apply.
raw_img = self.pyslip.tiles.raw_image
detector = raw_img.get_detector()
data = raw_img.get_raw_data()
if not isinstance(data, tuple): # XXX should not need this test
data = (data,)
if len(detector) > 1:
from .tile_generation import _get_flex_image_multipanel
flex_img = _get_flex_image_multipanel(
brightness=self.settings.brightness / 100,
panels=detector,
raw_data=data,
beam=raw_img.get_beam(),
)
else:
from .tile_generation import _get_flex_image
flex_img = _get_flex_image(
brightness=self.settings.brightness / 100,
data=data[0],
saturation=detector[0].get_trusted_range()[1],
vendortype=raw_img.get_vendortype(),
)
flex_img.setWindow(0, 0, 1)
flex_img.adjust(color_scheme=self.settings.color_scheme)
flex_img.prep_string()
# XXX Order of size1/size2 correct?
pdf_size = (flex_img.size2() * inch / DPI, flex_img.size1() * inch / DPI)
pdf_canvas = canvas.Canvas(filename=file_name, pagesize=pdf_size)
pil_img = Image.frombytes(
"RGB", (flex_img.size2(), flex_img.size1()), flex_img.as_bytes()
)
pdf_canvas.drawInlineImage(
pil_img, 0, 0, width=pdf_size[0], height=pdf_size[1]
)
for layer_id in self.pyslip.layer_z_order:
layer = self.pyslip.layer_mapping[layer_id]
# XXX This would probably be more elegant if these were
# functions in some layer class. Note repeated roundabout
# way (via a wx.Pen object) to extract RGB values from the
# colour parameter.
if layer.type == self.pyslip.TypeEllipse:
for (
p,
place,
width,
colour,
closed,
filled,
fillcolour,
x_off,
y_off,
pdata,
) in layer.data:
if layer.map_rel:
pp = []
for pelement in p:
fs = self.pyslip.tiles.map_relative_to_picture_fast_slow(
pelement[0], pelement[1]
)
pp.append(
(
fs[0] * inch / DPI,
pdf_size[1] - fs[1] * inch / DPI,
)
)
ellipse_center = (pp[0][0], pp[0][1])
major = (
pp[1][0] - ellipse_center[0],
pp[1][1] - ellipse_center[1],
)
minor = (
pp[2][0] - ellipse_center[0],
pp[2][1] - ellipse_center[1],
)
else:
raise NotImplementedError(
"PDF output in view-relative coordinates not implemented"
)
pen = wx.Pen(colour)
pdf_canvas.setLineWidth(width * LINE_WIDTH_FACTOR)
pdf_canvas.setStrokeColorRGB(
pen.Colour.Red() / 255,
pen.Colour.Green() / 255,
pen.Colour.Blue() / 255,
)
angle = math.atan2(major.elems[1], major.elems[0])
r_major = math.hypot(major.elems[0], major.elems[1])
r_minor = math.hypot(minor.elems[0], minor.elems[1])
pdf_canvas.saveState()
pdf_canvas.translate(
ellipse_center.elems[0], ellipse_center.elems[1]
)
pdf_canvas.rotate(math.degrees(angle))
pdf_canvas.ellipse(-r_major, -r_minor, r_major, r_minor)
pdf_canvas.restoreState()
elif layer.type == self.pyslip.TypeImage:
raise NotImplementedError(
"PDF output of image layers not implemented"
)
elif layer.type == self.pyslip.TypePoint:
for (
lon,
lat,
place,
radius,
colour,
x_off,
y_off,
pdata,
) in layer.data:
if layer.map_rel:
fs = self.pyslip.tiles.map_relative_to_picture_fast_slow(
lon, lat
)
else:
raise NotImplementedError(
"PDF output in view-relative coordinates not implemented"
)
pt = (fs[0] * inch / DPI, pdf_size[1] - fs[1] * inch / DPI)
pen = wx.Pen(colour)
pdf_canvas.setLineWidth(radius)
pdf_canvas.setStrokeColorRGB(
pen.Colour.Red() / 255,
pen.Colour.Green() / 255,
pen.Colour.Blue() / 255,
)
pdf_canvas.circle(pt[0], pt[1], 0.5 * radius * inch / DPI)
elif layer.type == self.pyslip.TypePolygon:
for (
p,
place,
width,
colour,
closed,
filled,
fillcolour,
x_off,
y_off,
pdata,
) in layer.data:
path = pdf_canvas.beginPath()
for i, pp in enumerate(p):
if layer.map_rel:
fs = self.pyslip.tiles.map_relative_to_picture_fast_slow(
pp[0], pp[1]
)
else:
raise NotImplementedError(
"PDF output in view-relative coordinates not implemented"
)
pt = (fs[0] * inch / DPI, pdf_size[1] - fs[1] * inch / DPI)
if i == 0:
path.moveTo(pt[0], pt[1])
else:
path.lineTo(pt[0], pt[1])
if closed:
path.close()
pen = wx.Pen(colour)
pdf_canvas.setFillColorRGB(
pen.Colour.Red() / 255,
pen.Colour.Green() / 255,
pen.Colour.Blue() / 255,
)
pdf_canvas.setLineWidth(width * LINE_WIDTH_FACTOR)
pdf_canvas.setStrokeColorRGB(
pen.Colour.Red() / 255,
pen.Colour.Green() / 255,
pen.Colour.Blue() / 255,
)
pdf_canvas.drawPath(path, fill=filled)
elif layer.type == self.pyslip.TypeText:
for (
lon,
lat,
tdata,
placement,
radius,
colour,
textcolour,
fontname,
fontsize,
offset_x,
offset_y,
data,
) in layer.data:
if placement != "cc":
print(
Warning(
"Only centered placement available when drawing text on pdf"
)
)
if layer.map_rel:
fs = self.pyslip.tiles.map_relative_to_picture_fast_slow(
lon, lat
)
else:
raise NotImplementedError(
"PDF output in view-relative coordinates not implemented"
)
from reportlab.pdfbase.pdfmetrics import stringWidth
scale = 5 # XXX this scaleup by 5 is arbitrary!
try:
w = stringWidth(tdata, fontname, fontsize * scale)
except KeyError:
fontname = "Helvetica"
w = stringWidth(tdata, fontname, fontsize * scale)
if fs[0] - (w / 2) < 0: # handle text falling off the left side
txt = pdf_canvas.beginText(x=0, y=fs[1])
else:
txt = pdf_canvas.beginText(x=fs[0] - (w / 2), y=fs[1])
txt.setFont(fontname, fontsize * scale)
txt.setFillColor(textcolour)
txt.setStrokeColor(textcolour)
txt.textLine(tdata)
pdf_canvas.drawText(txt)
pdf_canvas.save()
self.update_statusbar("Writing " + file_name + "..." + " Done.")
from rstbx.viewer.frame import SettingsFrame
def override_SF_set_image(self, image):
self.Layout()
self.Fit()
SettingsFrame.set_image = override_SF_set_image
```
#### File: dials/util/Resolutionizer.py
```python
from __future__ import absolute_import, division, print_function
import math
import sys
import time
import iotbx.phil
from cctbx.array_family import flex
from dials.util import Sorry
from scitbx import lbfgs
def nint(a):
return int(round(a))
start_time = time.time()
def stamp(message):
# print("[%7.3f] %s" % (time.time() - start_time, message))
return
def poly_residual(xp, y, params):
"""Compute the residual between the observations y[i] and sum_j
params[j] x[i]^j. For efficiency, x[i]^j are pre-calculated in xp."""
c = len(y)
e = flex.double([flex.sum(xp[j] * params) for j in range(c)])
return flex.sum(flex.pow2(y - e))
def poly_gradients(xp, y, params):
"""Compute the gradient of the residual w.r.t. the parameters, N.B.
will be performed using a finite difference method. N.B. this should
be trivial to do algebraicly."""
eps = 1.0e-6
g = flex.double()
n = len(params)
for j in range(n):
rs = []
for signed_eps in [-eps, eps]:
params_eps = params[:]
params_eps[j] += signed_eps
rs.append(poly_residual(xp, y, params_eps))
g.append((rs[1] - rs[0]) / (2 * eps))
return g
class poly_fitter(object):
"""A class to do the polynomial fit. This will fit observations y
at points x with a polynomial of order n."""
def __init__(self, points, values, order):
self.x = flex.double([1.0 for j in range(order)])
self._x = flex.double(points)
self._y = flex.double(values)
# precalculate x[j]^[0-(n - 1)] values
self._xp = [
flex.double([math.pow(x, j) for j in range(order)]) for x in self._x
]
return
def refine(self):
"""Actually perform the parameter refinement."""
tp = lbfgs.termination_parameters(max_iterations=1000)
r = lbfgs.run(target_evaluator=self, termination_params=tp)
return r
def compute_functional_and_gradients(self):
return (
poly_residual(self._xp, self._y, self.x),
poly_gradients(self._xp, self._y, self.x),
)
def get_parameters(self):
return list(self.x)
def evaluate(self, x):
"""Evaluate the resulting fit at point x."""
return sum([math.pow(x, k) * self.x[k] for k in range(len(self.x))])
def fit(x, y, order):
"""Fit the values y(x) then return this fit. x, y should
be iterables containing floats of the same size. The order is the order
of polynomial to use for this fit. This will be useful for e.g. I/sigma."""
stamp("fitter: %s %s %s" % (x, y, order))
pf = poly_fitter(x, y, order)
stamp("fitter: refine")
pf.refine()
stamp("fitter: done")
return [pf.evaluate(_x) for _x in x]
def tanh_fit(x, y, iqr_multiplier=None):
from scitbx.math import curve_fitting
tf = curve_fitting.tanh_fit(x, y)
f = curve_fitting.tanh(*tf.params)
if iqr_multiplier is not None:
assert iqr_multiplier > 0
yc = f(x)
dy = y - yc
from scitbx.math import five_number_summary
min_x, q1_x, med_x, q3_x, max_x = five_number_summary(dy)
iqr_x = q3_x - q1_x
cut_x = iqr_multiplier * iqr_x
outliers = (dy > q3_x + cut_x) | (dy < q1_x - cut_x)
if outliers.count(True) > 0:
xo = x.select(~outliers)
yo = y.select(~outliers)
tf = curve_fitting.tanh_fit(xo, yo)
f = curve_fitting.tanh(*tf.params)
return f(x)
def log_fit(x, y, order):
"""Fit the values log(y(x)) then return exp() to this fit. x, y should
be iterables containing floats of the same size. The order is the order
of polynomial to use for this fit. This will be useful for e.g. I/sigma."""
ly = [math.log(_y) for _y in y]
pf = poly_fitter(x, ly, order)
pf.refine()
return [math.exp(pf.evaluate(_x)) for _x in x]
def log_inv_fit(x, y, order):
"""Fit the values log(1 / y(x)) then return the inverse of this fit.
x, y should be iterables, the order of the polynomial for the transformed
fit needs to be specified. This will be useful for e.g. Rmerge."""
ly = [math.log(1.0 / _y) for _y in y]
pf = poly_fitter(x, ly, order)
pf.refine()
return [(1.0 / math.exp(pf.evaluate(_x))) for _x in x]
def interpolate_value(x, y, t):
"""Find the value of x: y(x) = t."""
if t > max(y) or t < min(y):
raise RuntimeError("t outside of [%f, %f]" % (min(y), max(y)))
for j in range(1, len(x)):
x0 = x[j - 1]
y0 = y[j - 1]
x1 = x[j]
y1 = y[j]
if (y0 - t) * (y1 - t) < 0:
return x0 + (t - y0) * (x1 - x0) / (y1 - y0)
phil_str = """
rmerge = None
.type = float(value_min=0)
.help = "Maximum value of Rmerge in the outer resolution shell"
.short_caption = "Outer shell Rmerge"
.expert_level = 1
completeness = None
.type = float(value_min=0)
.help = "Minimum completeness in the outer resolution shell"
.short_caption = "Outer shell completeness"
.expert_level = 1
cc_ref = 0.1
.type = float(value_min=0)
.help = "Minimum value of CC vs reference dataset in the outer resolution shell"
.short_caption = "Outer shell CCref"
.expert_level = 1
cc_half = 0.3
.type = float(value_min=0)
.help = "Minimum value of CC1/2 in the outer resolution shell"
.short_caption = "Outer shell CC1/2"
.expert_level = 1
cc_half_method = *half_dataset sigma_tau
.type = choice
cc_half_significance_level = 0.1
.type = float(value_min=0, value_max=1)
.expert_level = 1
cc_half_fit = polynomial *tanh
.type = choice
.expert_level = 1
isigma = 0.25
.type = float(value_min=0)
.help = "Minimum value of the unmerged <I/sigI> in the outer resolution shell"
.short_caption = "Outer shell unmerged <I/sigI>"
.expert_level = 1
misigma = 1.0
.type = float(value_min=0)
.help = "Minimum value of the merged <I/sigI> in the outer resolution shell"
.short_caption = "Outer shell merged <I/sigI>"
.expert_level = 1
i_mean_over_sigma_mean = None
.type = float(value_min=0)
.help = "Minimum value of the unmerged <I>/<sigI> in the outer resolution shell"
.short_caption = "Outer shell unmerged <I>/<sigI>"
.expert_level = 2
nbins = 100
.type = int
.help = "Number of resolution bins to use for estimation of resolution limit."
.short_caption = "Number of resolution bins."
.expert_level = 1
binning_method = *counting_sorted volume
.type = choice
.help = "Use equal-volume bins or bins with approximately equal numbers of reflections per bin."
.short_caption = "Equal-volume or equal #ref binning."
.expert_level = 1
anomalous = False
.type = bool
.short_caption = "Keep anomalous pairs separate in merging statistics"
.expert_level = 1
labels = None
.type = strings
space_group = None
.type = space_group
.expert_level = 1
reference = None
.type = path
"""
phil_defaults = iotbx.phil.parse(
"""
resolutionizer {
%s
batch_range = None
.type = ints(size=2, value_min=0)
plot = False
.type = bool
.expert_level = 2
}
"""
% phil_str
)
class resolution_plot(object):
def __init__(self, ylabel):
import matplotlib
matplotlib.use("Agg")
from matplotlib import pyplot
pyplot.style.use("ggplot")
self.ylabel = ylabel
self.fig = pyplot.figure()
self.ax = self.fig.add_subplot(111)
def plot(self, d_star_sq, values, label):
self.ax.plot(d_star_sq, values, label=label)
if label.startswith("CC"):
ylim = self.ax.get_ylim()
self.ax.set_ylim(0, max(ylim[1], 1.05))
def plot_resolution_limit(self, d):
from cctbx import uctbx
d_star_sq = uctbx.d_as_d_star_sq(d)
self.ax.plot([d_star_sq, d_star_sq], self.ax.get_ylim(), linestyle="--")
def savefig(self, filename):
from cctbx import uctbx
xticks = self.ax.get_xticks()
xticks_d = [
"%.2f" % uctbx.d_star_sq_as_d(ds2) if ds2 > 0 else 0 for ds2 in xticks
]
self.ax.set_xticklabels(xticks_d)
self.ax.set_xlabel("Resolution (A)")
self.ax.set_ylabel(self.ylabel)
self.ax.legend(loc="best")
self.fig.savefig(filename)
class resolutionizer(object):
"""A class to calculate things from merging reflections."""
def __init__(self, i_obs, params, batches=None, reference=None):
self._params = params
self._reference = reference
if self._reference is not None:
self._reference = self._reference.merge_equivalents(
use_internal_variance=False
).array()
i_obs = i_obs.customized_copy(
anomalous_flag=params.anomalous, info=i_obs.info()
)
if self._params.batch_range is not None and batches is not None:
batch_min, batch_max = self._params.batch_range
assert batches is not None
sel = (batches.data() >= batch_min) & (batches.data() <= batch_max)
i_obs = i_obs.select(sel).set_info(i_obs.info())
if self._params.space_group is not None:
i_obs = i_obs.customized_copy(
space_group_info=self._params.space_group, info=i_obs.info()
)
self._intensities = i_obs
import iotbx.merging_statistics
self._merging_statistics = iotbx.merging_statistics.dataset_statistics(
i_obs=i_obs,
n_bins=self._params.nbins,
cc_one_half_significance_level=self._params.cc_half_significance_level,
cc_one_half_method=self._params.cc_half_method,
binning_method=self._params.binning_method,
anomalous=params.anomalous,
use_internal_variance=False,
eliminate_sys_absent=False,
assert_is_not_unique_set_under_symmetry=False,
)
@classmethod
def from_unmerged_mtz(cls, scaled_unmerged, params):
def miller_array_from_mtz(unmerged_mtz):
from iotbx import reflection_file_reader
hkl_in = reflection_file_reader.any_reflection_file(scaled_unmerged)
miller_arrays = hkl_in.as_miller_arrays(merge_equivalents=False)
i_obs = None
batches = None
all_i_obs = []
for array in miller_arrays:
labels = array.info().label_string()
if array.is_xray_intensity_array():
all_i_obs.append(array)
if labels == "BATCH":
assert batches is None
batches = array
if i_obs is None:
if len(all_i_obs) == 0:
raise Sorry("No intensities found in %s." % file_name)
elif len(all_i_obs) > 1:
if params.labels is not None:
from iotbx.reflection_file_utils import label_table
lab_tab = label_table(all_i_obs)
i_obs = lab_tab.select_array(
label=params.labels[0], command_line_switch="labels"
)
if i_obs is None:
raise Sorry(
"Multiple intensity arrays - please specify one:\n%s"
% "\n".join(
[
" labels=%s" % a.info().label_string()
for a in all_i_obs
]
)
)
else:
i_obs = all_i_obs[0]
if hkl_in.file_type() == "ccp4_mtz":
# need original miller indices otherwise we don't get correct anomalous
# merging statistics
mtz_object = hkl_in.file_content()
if "M_ISYM" in mtz_object.column_labels():
indices = mtz_object.extract_original_index_miller_indices()
i_obs = i_obs.customized_copy(indices=indices, info=i_obs.info())
return i_obs, batches
i_obs, batches = miller_array_from_mtz(scaled_unmerged)
if params.reference is not None:
reference, _ = miller_array_from_mtz(params.reference)
else:
reference = None
return cls(i_obs, params, batches=batches, reference=reference)
def resolution_auto(self):
"""Compute resolution limits based on the current self._params set."""
if self._params.rmerge:
stamp("ra: rmerge")
print("Resolution rmerge: %.2f" % self.resolution_rmerge())
if self._params.completeness:
stamp("ra: comp")
print("Resolution completeness: %.2f" % self.resolution_completeness())
if self._params.cc_half:
stamp("ra: cc")
print("Resolution cc_half : %.2f" % self.resolution_cc_half())
if self._params.cc_ref and self._reference is not None:
stamp("ra: cc")
print("Resolution cc_ref : %.2f" % self.resolution_cc_ref())
if self._params.isigma:
stamp("ra: isig")
print("Resolution I/sig: %.2f" % self.resolution_unmerged_isigma())
if self._params.misigma:
stamp("ra: mnisig")
print("Resolution Mn(I/sig): %.2f" % self.resolution_merged_isigma())
if self._params.i_mean_over_sigma_mean:
print(
"Resolution Mn(I)/Mn(sig): %.2f"
% self.resolution_i_mean_over_sigma_mean()
)
def resolution_rmerge(self, limit=None, log=None):
"""Compute a resolution limit where either rmerge = 1.0 (limit if
set) or the full extent of the data. N.B. this fit is only meaningful
for positive values."""
if limit is None:
limit = self._params.rmerge
rmerge_s = flex.double(
[b.r_merge for b in self._merging_statistics.bins]
).reversed()
s_s = flex.double(
[1 / b.d_min ** 2 for b in self._merging_statistics.bins]
).reversed()
sel = rmerge_s > 0
rmerge_s = rmerge_s.select(sel)
s_s = s_s.select(sel)
if limit == 0.0:
r_rmerge = 1.0 / math.sqrt(flex.max(s_s))
rmerge_f = None
elif limit > flex.max(rmerge_s):
r_rmerge = 1.0 / math.sqrt(flex.max(s_s))
rmerge_f = None
else:
rmerge_f = log_inv_fit(s_s, rmerge_s, 6)
if log:
fout = open(log, "w")
for j, s in enumerate(s_s):
d = 1.0 / math.sqrt(s)
o = rmerge_s[j]
m = rmerge_f[j]
fout.write("%f %f %f %f\n" % (s, d, o, m))
fout.close()
try:
r_rmerge = 1.0 / math.sqrt(interpolate_value(s_s, rmerge_f, limit))
except Exception:
r_rmerge = 1.0 / math.sqrt(flex.max(s_s))
if self._params.plot:
plot = resolution_plot(ylabel="Rmerge")
if rmerge_f is not None:
plot.plot(s_s, rmerge_f, label="fit")
plot.plot(s_s, rmerge_s, label="Rmerge")
plot.plot_resolution_limit(r_rmerge)
plot.savefig("rmerge.png")
return r_rmerge
def resolution_i_mean_over_sigma_mean(self, limit=None, log=None):
"""Compute a resolution limit where either <I>/<sigma> = 1.0 (limit if
set) or the full extent of the data."""
if limit is None:
limit = self._params.i_mean_over_sigma_mean
isigma_s = flex.double(
[b.i_mean_over_sigi_mean for b in self._merging_statistics.bins]
).reversed()
s_s = flex.double(
[1 / b.d_min ** 2 for b in self._merging_statistics.bins]
).reversed()
sel = isigma_s > 0
isigma_s = isigma_s.select(sel)
s_s = s_s.select(sel)
if flex.min(isigma_s) > limit:
r_isigma = 1.0 / math.sqrt(flex.max(s_s))
isigma_f = None
else:
isigma_f = log_fit(s_s, isigma_s, 6)
if log:
fout = open(log, "w")
for j, s in enumerate(s_s):
d = 1.0 / math.sqrt(s)
o = isigma_s[j]
m = isigma_f[j]
fout.write("%f %f %f %f\n" % (s, d, o, m))
fout.close()
try:
r_isigma = 1.0 / math.sqrt(interpolate_value(s_s, isigma_f, limit))
except Exception:
if limit > max(isigma_f):
r_isigma = 1.0 / math.sqrt(flex.min(s_s))
else:
r_isigma = 1.0 / math.sqrt(flex.max(s_s))
if self._params.plot:
plot = resolution_plot(ylabel="Unmerged <I>/<sigma>")
if isigma_f is not None:
plot.plot(s_s, isigma_f, label="fit")
plot.plot(s_s, isigma_s, label="Unmerged <I>/<sigma>")
plot.plot_resolution_limit(r_isigma)
plot.savefig("i_mean_over_sigma_mean.png")
return r_isigma
def resolution_unmerged_isigma(self, limit=None, log=None):
"""Compute a resolution limit where either I/sigma = 1.0 (limit if
set) or the full extent of the data."""
if limit is None:
limit = self._params.isigma
isigma_s = flex.double(
[b.unmerged_i_over_sigma_mean for b in self._merging_statistics.bins]
).reversed()
s_s = flex.double(
[1 / b.d_min ** 2 for b in self._merging_statistics.bins]
).reversed()
sel = isigma_s > 0
isigma_s = isigma_s.select(sel)
s_s = s_s.select(sel)
if flex.min(isigma_s) > limit:
r_isigma = 1.0 / math.sqrt(flex.max(s_s))
isigma_f = None
else:
isigma_f = log_fit(s_s, isigma_s, 6)
if log:
fout = open(log, "w")
for j, s in enumerate(s_s):
d = 1.0 / math.sqrt(s)
o = isigma_s[j]
m = isigma_f[j]
fout.write("%f %f %f %f\n" % (s, d, o, m))
fout.close()
try:
r_isigma = 1.0 / math.sqrt(interpolate_value(s_s, isigma_f, limit))
except Exception:
r_isigma = 1.0 / math.sqrt(flex.max(s_s))
if self._params.plot:
plot = resolution_plot(ylabel="Unmerged I/sigma")
if isigma_f is not None:
plot.plot(s_s, isigma_f, label="fit")
plot.plot(s_s, isigma_s, label="Unmerged I/sigma")
plot.plot_resolution_limit(r_isigma)
plot.savefig("isigma.png")
return r_isigma
def resolution_merged_isigma(self, limit=None, log=None):
"""Compute a resolution limit where either Mn(I/sigma) = 1.0 (limit if
set) or the full extent of the data."""
if limit is None:
limit = self._params.misigma
misigma_s = flex.double(
[b.i_over_sigma_mean for b in self._merging_statistics.bins]
).reversed()
s_s = flex.double(
[1 / b.d_min ** 2 for b in self._merging_statistics.bins]
).reversed()
sel = misigma_s > 0
misigma_s = misigma_s.select(sel)
s_s = s_s.select(sel)
if flex.min(misigma_s) > limit:
r_misigma = 1.0 / math.sqrt(flex.max(s_s))
misigma_f = None
else:
misigma_f = log_fit(s_s, misigma_s, 6)
if log:
fout = open(log, "w")
for j, s in enumerate(s_s):
d = 1.0 / math.sqrt(s)
o = misigma_s[j]
m = misigma_f[j]
fout.write("%f %f %f %f\n" % (s, d, o, m))
fout.close()
try:
r_misigma = 1.0 / math.sqrt(interpolate_value(s_s, misigma_f, limit))
except Exception:
r_misigma = 1.0 / math.sqrt(flex.max(s_s))
if self._params.plot:
plot = resolution_plot(ylabel="Merged I/sigma")
if misigma_f is not None:
plot.plot(s_s, misigma_f, label="fit")
plot.plot(s_s, misigma_s, label="Merged I/sigma")
plot.plot_resolution_limit(r_misigma)
plot.savefig("misigma.png")
return r_misigma
def resolution_completeness(self, limit=None, log=None):
"""Compute a resolution limit where completeness < 0.5 (limit if
set) or the full extent of the data. N.B. this completeness is
with respect to the *maximum* completeness in a shell, to reflect
triclinic cases."""
if limit is None:
limit = self._params.completeness
comp_s = flex.double(
[b.completeness for b in self._merging_statistics.bins]
).reversed()
s_s = flex.double(
[1 / b.d_min ** 2 for b in self._merging_statistics.bins]
).reversed()
if flex.min(comp_s) > limit:
r_comp = 1.0 / math.sqrt(flex.max(s_s))
comp_f = None
else:
comp_f = fit(s_s, comp_s, 6)
rlimit = limit * max(comp_s)
if log:
fout = open(log, "w")
for j, s in enumerate(s_s):
d = 1.0 / math.sqrt(s)
o = comp_s[j]
m = comp_f[j]
fout.write("%f %f %f %f\n" % (s, d, o, m))
fout.close()
try:
r_comp = 1.0 / math.sqrt(interpolate_value(s_s, comp_f, rlimit))
except Exception:
r_comp = 1.0 / math.sqrt(flex.max(s_s))
if self._params.plot:
plot = resolution_plot(ylabel="Completeness")
if comp_f is not None:
plot.plot(s_s, comp_f, label="fit")
plot.plot(s_s, comp_s, label="Completeness")
plot.plot_resolution_limit(r_comp)
plot.savefig("completeness.png")
return r_comp
def resolution_cc_half(self, limit=None, log=None):
"""Compute a resolution limit where cc_half < 0.5 (limit if
set) or the full extent of the data."""
if limit is None:
limit = self._params.cc_half
if self._params.cc_half_method == "sigma_tau":
cc_s = flex.double(
[b.cc_one_half_sigma_tau for b in self._merging_statistics.bins]
).reversed()
else:
cc_s = flex.double(
[b.cc_one_half for b in self._merging_statistics.bins]
).reversed()
s_s = flex.double(
[1 / b.d_min ** 2 for b in self._merging_statistics.bins]
).reversed()
p = self._params.cc_half_significance_level
if p is not None:
if self._params.cc_half_method == "sigma_tau":
significance = flex.bool(
[
b.cc_one_half_sigma_tau_significance
for b in self._merging_statistics.bins
]
).reversed()
cc_half_critical_value = flex.double(
[
b.cc_one_half_sigma_tau_critical_value
for b in self._merging_statistics.bins
]
).reversed()
else:
significance = flex.bool(
[b.cc_one_half_significance for b in self._merging_statistics.bins]
).reversed()
cc_half_critical_value = flex.double(
[
b.cc_one_half_critical_value
for b in self._merging_statistics.bins
]
).reversed()
# index of last insignificant bin
i = flex.last_index(significance, False)
if i is None or i == len(significance) - 1:
i = 0
else:
i += 1
else:
i = 0
if self._params.cc_half_fit == "tanh":
cc_f = tanh_fit(s_s[i:], cc_s[i:], iqr_multiplier=4)
else:
cc_f = fit(s_s[i:], cc_s[i:], 6)
stamp("rch: fits")
rlimit = limit * max(cc_s)
if log:
fout = open(log, "w")
for j, s in enumerate(s_s):
d = 1.0 / math.sqrt(s)
o = cc_s[j]
m = cc_f[j]
fout.write("%f %f %f %f\n" % (s, d, o, m))
fout.close()
try:
r_cc = 1.0 / math.sqrt(interpolate_value(s_s[i:], cc_f, rlimit))
except Exception:
r_cc = 1.0 / math.sqrt(max(s_s[i:]))
stamp("rch: done : %s" % r_cc)
if self._params.plot:
plot = resolution_plot("CC1/2")
plot.plot(s_s[i:], cc_f, label="fit")
plot.plot(s_s, cc_s, label="CC1/2")
if p is not None:
plot.plot(
s_s, cc_half_critical_value, label="Confidence limit (p=%g)" % p
)
plot.plot_resolution_limit(r_cc)
plot.savefig("cc_half.png")
return r_cc
def resolution_cc_ref(self, limit=None, log=None):
"""Compute a resolution limit where cc_ref < 0.5 (limit if
set) or the full extent of the data."""
if limit is None:
limit = self._params.cc_ref
intensities = self._intensities.merge_equivalents(
use_internal_variance=False
).array()
cc_s = flex.double()
for b in self._merging_statistics.bins:
sel = intensities.resolution_filter_selection(d_min=b.d_min, d_max=b.d_max)
sel_ref = self._reference.resolution_filter_selection(
d_min=b.d_min, d_max=b.d_max
)
d = intensities.select(sel)
dref = self._reference.select(sel_ref)
cc = d.correlation(dref, assert_is_similar_symmetry=False)
cc_s.append(cc.coefficient())
cc_s = cc_s.reversed()
s_s = flex.double(
[1 / b.d_min ** 2 for b in self._merging_statistics.bins]
).reversed()
if self._params.cc_half_fit == "tanh":
cc_f = tanh_fit(s_s, cc_s, iqr_multiplier=4)
else:
cc_f = fit(s_s, cc_s, 6)
stamp("rch: fits")
rlimit = limit * max(cc_s)
if log:
fout = open(log, "w")
for j, s in enumerate(s_s):
d = 1.0 / math.sqrt(s)
o = cc_s[j]
m = cc_f[j]
fout.write("%f %f %f %f\n" % (s, d, o, m))
fout.close()
try:
r_cc = 1.0 / math.sqrt(interpolate_value(s_s, cc_f, rlimit))
except Exception:
r_cc = 1.0 / math.sqrt(max(s_s))
stamp("rch: done : %s" % r_cc)
if self._params.plot:
plot = resolution_plot("CCref")
plot.plot(s_s, cc_f, label="fit")
plot.plot(s_s, cc_s, label="CCref")
plot.plot_resolution_limit(r_cc)
plot.savefig("cc_ref.png")
return r_cc
def run(args):
working_phil = phil_defaults
interp = working_phil.command_line_argument_interpreter(home_scope="resolutionizer")
params, unhandled = interp.process_and_fetch(
args, custom_processor="collect_remaining"
)
params = params.extract().resolutionizer
if len(unhandled) == 0:
working_phil.show()
exit()
assert len(unhandled) == 1
scaled_unmerged = unhandled[0]
stamp("Resolutionizer.py starting")
m = resolutionizer.from_unmerged_mtz(scaled_unmerged, params)
stamp("instantiated")
m.resolution_auto()
stamp("the end.")
if __name__ == "__main__":
run(sys.argv[1:])
```
#### File: data/beamline_defs/PILATUS_2M_S_N_24_0107_Diamond.py
```python
from __future__ import absolute_import, division, print_function
import iotbx.cif.model
import dxtbx.data.beamline_defs
class get_definition(dxtbx.data.beamline_defs.template):
def __init__(self, timestamp=None, **kwargs):
self._timestamp = timestamp
def CIF_block(self):
"""Interface function to generate a CIF block for this detector."""
return self._identify_time()(mmcif=False)
def mmCIF_block(self):
"""Interface function to generate an mmCIF block for this detector."""
return self._identify_time()(mmcif=True)
def _identify_time(self):
"""Determine detector environment based on timestamp."""
if not self._timestamp: # default to I19
return self._at_I19
if self._timestamp >= self._date_to_epoch(2015, 11, 1):
return self._at_I19 # moved to I19 on 01.11.2015
return self._at_I04_1 # before was on I04-1
def _base(self, mmcif=False):
"""Generates
1. a CIF/mmCIF block that contains information that
is always true about the detector.
2. a lookup function for CIF/mmCIF strings."""
# prepare string lookup table
lookup = self._lookup(mmcif)
b = iotbx.cif.model.block()
b[lookup("df.detector")] = "Photon counting pixel array"
b[lookup("df.rad.type")] = "Synchrotron"
return b, lookup
def _at_I19(self, mmcif=False):
b, lookup = self._base(mmcif)
b[lookup("df.m.dev")] = "Fixed \\c 3-circle diffractometer"
b[lookup("df.m.dev_type")] = "Fluid Film Devices"
b[lookup("df.m.method")] = "shutterless scans"
b[lookup("df.m.spec_supp")] = "MiTeGen MicroMount"
b[lookup("df.rad.source")] = "Diamond Light Source Beamline I19-1"
b[lookup("df.rad.mono")] = "Silicon 111"
return b
def _at_I04_1(self, mmcif=False):
b, lookup = self._base(mmcif)
b[lookup("df.rad.source")] = "Diamond Light Source Beamline I04-1"
return b
```
#### File: dxtbx/format/FormatEiger0MQDump.py
```python
from __future__ import absolute_import, division, print_function
import json
import os
import numpy
from scitbx.array_family import flex
import msgpack
from dxtbx.format.Format import Format
try:
import bitshuffle
except ImportError:
pass # not available in conda
class FormatEiger0MQDump(Format):
@staticmethod
def understand(image_file):
if os.path.exists(os.path.join(os.path.split(image_file)[0], "header")):
return True
return False
def _start(self):
header = os.path.join(os.path.split(self._image_file)[0], "header")
data = msgpack.unpackb(self.open_file(header).read())
self._header = json.loads(data[1])
def _goniometer(self):
return None # return self._goniometer_factory.single_axis()
def _detector(self):
"""Return a model for a simple detector, presuming no one has
one of these on a two-theta stage. Assert that the beam centre is
provided in the Mosflm coordinate frame."""
distance = self._header["detector_distance"] * 1000
if distance == 0:
# XXX hack for development
distance = 175
pixel_size_x = self._header["x_pixel_size"]
pixel_size_y = self._header["y_pixel_size"]
beam_x = self._header["beam_center_x"] * pixel_size_x * 1000
beam_y = self._header["beam_center_y"] * pixel_size_y * 1000
if beam_x == 0 and beam_y == 0:
# hack for development
beam_x = 154.87
beam_y = 165.66
pixel_size_x = 1000 * self._header["x_pixel_size"]
pixel_size_y = 1000 * self._header["y_pixel_size"]
image_size = (
self._header["x_pixels_in_detector"],
self._header["y_pixels_in_detector"],
)
# XXX fixme hard coded
overload = 0xFFFF
underload = -1
return self._detector_factory.simple(
"PAD",
distance,
(beam_x, beam_y),
"+x",
"-y",
(pixel_size_x, pixel_size_y),
image_size,
(underload, overload),
[],
)
def _beam(self, index=None):
return self._beam_factory.simple(self._header["wavelength"])
def _scan(self):
return None
def get_goniometer(self, index=None):
return self._goniometer()
def get_detector(self, index=None):
return self._detector()
def get_beam(self, index=None):
return self._beam()
def get_scan(self, index=None):
if index is None:
return self._scan()
scan = self._scan()
if scan is not None:
return scan[index]
return scan
def get_raw_data(self):
nx = self._header["x_pixels_in_detector"]
ny = self._header["y_pixels_in_detector"]
depth = self._header["bit_depth_image"]
assert self._header["pixel_mask_applied"] is True
if depth == 16:
dtype = numpy.uint16
elif depth == 32:
dtype = numpy.uint32
else:
dtype = 1 / 0
dt = numpy.dtype(dtype)
data = msgpack.unpackb(self.open_file(self._image_file).read(), raw=False)[2]
blob = numpy.fromstring(data[12:], dtype=numpy.uint8)
if dtype == numpy.uint32:
block = numpy.ndarray(shape=(), dtype=">u4", buffer=data[8:12]) / 4
image = bitshuffle.decompress_lz4(blob, (ny, nx), dt, block)
else:
image = bitshuffle.decompress_lz4(blob, (ny, nx), dt)
image = flex.int(image.astype("int32"))
# only need to overwrite values if read and used in 16-bit mode
if dtype == numpy.uint16:
bad = 2 ** 16 - 1
sel = image.as_1d() >= bad
image.as_1d().set_selected(sel, -1)
return image
def get_detectorbase(self, index=None):
raise NotImplementedError
```
#### File: dxtbx/format/FormatSMVJHSim.py
```python
from __future__ import absolute_import, division, print_function
import calendar
import sys
import time
from iotbx.detectors import SMVImage
from dxtbx.format.FormatSMV import FormatSMV
class FormatSMVJHSim(FormatSMV):
"""A class for reading SMV format JHSim images, and correctly constructing
a model for the experiment from this."""
# all ADSC detectors generate images with an ADC offset of 40
# for Mar/Rayonix it is 10
# Rigaku SMV uses 20, and 5 for image plate formats
# for one particular simulation, I used 1
ADC_OFFSET = 1
image_pedestal = 1
@staticmethod
def understand(image_file):
"""Check to see if this looks like an JHSim SMV format image, i.e. we can
make sense of it. From JH: "The best way to identify images from any of my
simulators is to look for BEAMLINE=fake in the header."."""
size, header = FormatSMV.get_smv_header(image_file)
if header.get("BEAMLINE") == "fake":
return True
else:
return False
def detectorbase_start(self):
if not hasattr(self, "detectorbase") or self.detectorbase is None:
self.detectorbase = SMVImage(self._image_file)
self.detectorbase.open_file = self.open_file
self.detectorbase.readHeader()
def _goniometer(self):
"""Return a model for a simple single-axis goniometer. This should
probably be checked against the image header."""
return self._goniometer_factory.single_axis()
def _detector(self):
"""Return a model for a simple detector, presuming no one has
one of these on a two-theta stage. Assert that the beam centre is
provided in the Mosflm coordinate frame."""
distance = float(self._header_dictionary["DISTANCE"])
beam_x = float(self._header_dictionary["BEAM_CENTER_X"])
beam_y = float(self._header_dictionary["BEAM_CENTER_Y"])
pixel_size = float(self._header_dictionary["PIXEL_SIZE"])
image_size = (
float(self._header_dictionary["SIZE1"]),
float(self._header_dictionary["SIZE2"]),
)
image_pedestal = 1
try:
image_pedestal = float(self._header_dictionary["ADC_OFFSET"])
except (KeyError):
pass
overload = 65535 - image_pedestal
underload = 1 - image_pedestal
# interpret beam center conventions
image_height_mm = pixel_size * image_size[1]
adxv_beam_center = (beam_x, beam_y)
cctbx_beam_center = (
adxv_beam_center[0] + pixel_size,
image_height_mm - adxv_beam_center[1] + pixel_size,
)
# Guess whether this is mimicking a Pilatus, if so set detector type so
# that spot-finding parameters are appropriate
if pixel_size == 0.172:
stype = "SENSOR_PAD"
else:
stype = "CCD"
return self._detector_factory.simple(
stype,
distance,
cctbx_beam_center,
"+x",
"-y",
(pixel_size, pixel_size),
image_size,
(underload, overload),
[],
)
def _beam(self):
"""Return a simple model for the beam."""
wavelength = float(self._header_dictionary["WAVELENGTH"])
return self._beam_factory.simple(wavelength)
def _scan(self):
"""Return the scan information for this image."""
format = self._scan_factory.format("SMV")
exposure_time = 1
epoch = None
# PST, PDT timezones not recognised by default...
epoch = 0
try:
date_str = self._header_dictionary["DATE"]
date_str = date_str.replace("PST", "").replace("PDT", "")
except KeyError:
date_str = ""
for format_string in ["%a %b %d %H:%M:%S %Y", "%a %b %d %H:%M:%S %Z %Y"]:
try:
epoch = calendar.timegm(time.strptime(date_str, format_string))
break
except ValueError:
pass
# assert(epoch)
osc_start = float(self._header_dictionary["OSC_START"])
osc_range = float(self._header_dictionary["OSC_RANGE"])
return self._scan_factory.single(
self._image_file, format, exposure_time, osc_start, osc_range, epoch
)
def get_raw_data(self):
"""Get the pixel intensities (i.e. read the image and return as a
flex array of integers.)"""
assert len(self.get_detector()) == 1
panel = self.get_detector()[0]
image_size = panel.get_image_size()
raw_data = self._get_endianic_raw_data(size=image_size)
# apply image pedestal, will result in *negative pixel values*
# this is a horrible idea since raw_data is unsigned
# see all instances of image_pedestal in dxtbx
image_pedestal = self._header_dictionary.get("ADC_OFFSET", 1)
raw_data -= int(image_pedestal)
return raw_data
if __name__ == "__main__":
for arg in sys.argv[1:]:
print(FormatSMVJHSim.understand(arg))
```
#### File: dxtbx/format/FormatXTCJungfrau.py
```python
from __future__ import absolute_import, division, print_function
import sys
from builtins import range
import numpy as np
from libtbx.phil import parse
from scitbx.array_family import flex
from scitbx.matrix import col
import psana
from dxtbx.format.FormatXTC import FormatXTC, locator_str
from dxtbx.model import Detector
try:
from xfel.cxi.cspad_ana import cspad_tbx
except ImportError:
# xfel not configured
pass
jungfrau_locator_str = """
jungfrau {
dark = True
.type = bool
.help = Dictates if dark subtraction is done from raw data
monolithic = False
.type = bool
.help = switch to FormatXTCJungfrauMonolithic if True. Used for LS49 image averaging
}
"""
jungfrau_locator_scope = parse(
jungfrau_locator_str + locator_str, process_includes=True
)
class FormatXTCJungfrau(FormatXTC):
def __init__(self, image_file, **kwargs):
super(FormatXTCJungfrau, self).__init__(
image_file, locator_scope=jungfrau_locator_scope, **kwargs
)
self._ds = FormatXTC._get_datasource(image_file, self.params)
self._env = self._ds.env()
self.populate_events()
self.n_images = len(self.times)
self._cached_detector = {}
self._cached_psana_detectors = {}
@staticmethod
def understand(image_file):
try:
params = FormatXTC.params_from_phil(jungfrau_locator_scope, image_file)
except Exception:
return False
return any(["jungfrau" in src.lower() for src in params.detector_address])
def get_raw_data(self, index):
d = FormatXTCJungfrau.get_detector(self, index)
evt = self._get_event(index)
run = self.get_run_from_index(index)
if run.run() not in self._cached_psana_detectors:
assert len(self.params.detector_address) == 1
self._cached_psana_detectors[run.run()] = psana.Detector(
self.params.detector_address[0], self._env
)
det = self._cached_psana_detectors[run.run()]
data = det.calib(evt)
data = data.astype(np.float64)
self._raw_data = []
for quad_count, quad in enumerate(d.hierarchy()):
for asic_count, asic in enumerate(quad):
fdim, sdim = asic.get_image_size()
sensor_id = asic_count // 4 # There are 2X4 asics per quadrant
asic_in_sensor_id = asic_count % 4 # this number will be 0,1,2 or 3
asic_data = data[quad_count][
sensor_id * sdim : (sensor_id + 1) * sdim,
asic_in_sensor_id * fdim : (asic_in_sensor_id + 1) * fdim,
] # 8 sensors per quad
self._raw_data.append(flex.double(np.array(asic_data)))
assert len(d) == len(self._raw_data)
return tuple(self._raw_data)
def get_num_images(self):
return self.n_images
def get_detector(self, index=None):
return FormatXTCJungfrau._detector(self, index)
def get_beam(self, index=None):
return self._beam(index)
def _beam(self, index=None):
"""Returns a simple model for the beam """
if index is None:
index = 0
evt = self._get_event(index)
wavelength = cspad_tbx.evt_wavelength(evt)
if wavelength is None:
return None
return self._beam_factory.simple(wavelength)
def get_goniometer(self, index=None):
return None
def get_scan(self, index=None):
return None
def _detector(self, index=None):
run = self.get_run_from_index(index)
if run.run() in self._cached_detector:
return self._cached_detector[run.run()]
if index is None:
index = 0
self._env = self._ds.env()
assert len(self.params.detector_address) == 1
self._det = psana.Detector(self.params.detector_address[0], self._env)
geom = self._det.pyda.geoaccess(self._get_event(index).run())
pixel_size = (
self._det.pixel_size(self._get_event(index)) / 1000.0
) # convert to mm
d = Detector()
pg0 = d.hierarchy()
# first deal with D0
det_num = 0
D0 = geom.get_top_geo().get_list_of_children()[0]
xx, yy, zz = D0.get_pixel_coords()
xx = xx / 1000.0 # to mm
yy = yy / 1000.0 # to mm
zz = zz / 1000.0 # to mm
oriD0 = col((np.mean(xx), np.mean(yy), -np.mean(zz)))
fp = col((xx[0][0][1], yy[0][0][1], zz[0][0][1]))
sp = col((xx[0][1][0], yy[0][1][0], zz[0][1][0]))
op = col((xx[0][0][0], yy[0][0][0], zz[0][0][0]))
origin = oriD0
fast = (fp - op).normalize()
slow = (sp - op).normalize()
pg0.set_local_frame(fast.elems, slow.elems, origin.elems)
pg0.set_name("D%d" % (det_num))
# Now deal with Qx
for quad_num in range(2):
pg1 = pg0.add_group()
Qx = D0.get_list_of_children()[quad_num]
xx, yy, zz = Qx.get_pixel_coords()
xx = xx / 1000.0 # to mm
yy = yy / 1000.0 # to mm
zz = zz / 1000.0 # to mm
oriQx = col((np.mean(xx), np.mean(yy), np.mean(zz)))
fp = col((xx[0][1], yy[0][1], zz[0][1]))
sp = col((xx[1][0], yy[1][0], zz[1][0]))
op = col((xx[0][0], yy[0][0], zz[0][0]))
origin = oriQx
fast = (fp - op).normalize()
slow = (sp - op).normalize()
pg1.set_local_frame(fast.elems, slow.elems, origin.elems)
pg1.set_name("D%dQ%d" % (det_num, quad_num))
# Now deal with Az
for asic_num in range(8):
val = "ARRAY_D0Q%dA%d" % (quad_num, asic_num)
p = pg1.add_panel()
dim_slow = xx.shape[0]
dim_fast = xx.shape[1]
sensor_id = asic_num // 4 # There are 2X4 asics per quadrant
asic_in_sensor_id = asic_num % 4 # this number will be 0,1,2 or 3
id_slow = sensor_id * (dim_slow // 2)
id_fast = asic_in_sensor_id * (dim_fast // 4)
oriAy = col(
(xx[id_slow][id_fast], yy[id_slow][id_fast], zz[id_slow][id_fast])
)
fp = col(
(
xx[id_slow][id_fast + 1],
yy[id_slow][id_fast + 1],
zz[id_slow][id_fast + 1],
)
)
sp = col(
(
xx[id_slow + 1][id_fast],
yy[id_slow + 1][id_fast],
zz[id_slow + 1][id_fast],
)
)
origin = oriAy - oriQx
fast = (fp - oriAy).normalize()
slow = (sp - oriAy).normalize()
p.set_local_frame(fast.elems, slow.elems, origin.elems)
p.set_pixel_size((pixel_size, pixel_size))
p.set_image_size((dim_fast // 4, dim_slow // 2))
p.set_trusted_range((-1, 2e6))
p.set_name(val)
self._cached_detector[run.run()] = d
return d
class FormatXTCJungfrauMonolithic(FormatXTCJungfrau):
""" Monolithic version of the Jungfrau, I.E. use the psana detector image function to assemble a monolithic image """
@staticmethod
def understand(image_file):
try:
params = FormatXTC.params_from_phil(jungfrau_locator_scope, image_file)
if params.jungfrau.monolithic:
return True
return False
except Exception:
return False
def get_raw_data(self, index):
self.get_detector(index) # is this line required?
evt = self._get_event(index)
run = self.get_run_from_index(index)
if run.run() not in self._cached_psana_detectors:
assert len(self.params.detector_address) == 1
self._cached_psana_detectors[run.run()] = psana.Detector(
self.params.detector_address[0], self._env
)
det = self._cached_psana_detectors[run.run()]
data = det.image(evt)
data = data.astype(np.float64)
self._raw_data = flex.double(data)
return self._raw_data
def get_detector(self, index=None):
return self._detector(index)
def _detector(self, index=None):
return self._detector_factory.simple(
sensor="UNKNOWN",
distance=100.0,
beam_centre=(50.0, 50.0),
fast_direction="+x",
slow_direction="-y",
pixel_size=(0.075, 0.075),
image_size=(1030, 1064),
trusted_range=(-1, 2e6),
mask=[],
)
if __name__ == "__main__":
for arg in sys.argv[1:]:
# Bug, should call this part differently for understand method to work
print(FormatXTCJungfrau.understand(arg))
```
#### File: dxtbx/model/experiment_list.py
```python
from __future__ import absolute_import, division, print_function
import copy
import json
import os
import warnings
from builtins import range
import pkg_resources
import six
import six.moves.cPickle as pickle
from dxtbx.datablock import (
BeamComparison,
DataBlockFactory,
DataBlockTemplateImporter,
DetectorComparison,
GoniometerComparison,
SequenceDiff,
)
from dxtbx.format.FormatMultiImage import FormatMultiImage
from dxtbx.format.image import ImageBool, ImageDouble
from dxtbx.imageset import ImageGrid, ImageSequence, ImageSet, ImageSetFactory
from dxtbx.model import (
BeamFactory,
CrystalFactory,
DetectorFactory,
Experiment,
ExperimentList,
GoniometerFactory,
ProfileModelFactory,
ScanFactory,
)
from dxtbx.sequence_filenames import template_image_range
from dxtbx.serialize import xds
from dxtbx.serialize.filename import resolve_path
from dxtbx.serialize.load import _decode_dict
try:
from typing import Any, Dict, Optional, Tuple
except ImportError:
pass
__all__ = [
"BeamComparison",
"DetectorComparison",
"ExperimentListFactory",
"GoniometerComparison",
"SequenceDiff",
]
class InvalidExperimentListError(RuntimeError):
"""
Indicates an error whilst validating the experiment list.
This means that there is some structural problem that prevents the given data
from representing a well-formed experiment list. This doesn't indicate e.g.
some problem with the data or model consistency.
"""
class ExperimentListDict(object):
"""A helper class for serializing the experiment list to dictionary (needed
to save the experiment list to JSON format."""
def __init__(self, obj, check_format=True, directory=None):
""" Initialise. Copy the dictionary. """
# Basic check: This is a dict-like object. This can happen if e.g. we
# were passed a DataBlock list instead of an ExperimentList dictionary
if isinstance(obj, list) or not hasattr(obj, "get"):
raise InvalidExperimentListError(
"Expected dictionary, not {}".format(type(obj))
)
self._obj = copy.deepcopy(obj)
self._check_format = check_format
self._directory = directory
# If this doesn't claim to be an ExperimentList, don't even try
if self._obj.get("__id__") != "ExperimentList":
raise InvalidExperimentListError(
"Expected __id__ 'ExperimentList', but found {}".format(
repr(self._obj.get("__id__"))
)
)
# Extract lists of models referenced by experiments
# Go through all the imagesets and make sure the dictionary
# references by an index rather than a file path.
self._lookups = {
model: self._extract_models(model, function)
for model, function in (
("beam", BeamFactory.from_dict),
("detector", DetectorFactory.from_dict),
("goniometer", GoniometerFactory.from_dict),
("scan", ScanFactory.from_dict),
("crystal", CrystalFactory.from_dict),
("profile", ProfileModelFactory.from_dict),
("imageset", lambda x: x),
("scaling_model", self._scaling_model_from_dict),
)
}
def _extract_models(self, name, from_dict):
""" Helper function. Extract the models. """
"""if name == imageset: Extract imageset objects from the source.
This function does resolving of an (old) method of imageset lookup
e.g. it was valid to have a string as the imageset value in an
experiment instead of an int - in which case the imageset was
loaded from the named file in the target directory.
If any experiments point to a file in this way, the imageset is
loaded and the experiment is rewritted with an integer pointing
to the new ImageSet in the returned list.
Returns:
The ordered list of serialized-ImageSet dictionaries
that the Experiment list points to.
"""
# Extract all the model list
mlist = self._obj.get(name, [])
# Convert the model from dictionary to concreate
# python class for the model.
mlist = [from_dict(d) for d in mlist]
# Dictionaries for file mappings
mmap = {}
# For each experiment, check the model is not specified by
# a path, if it is then get the dictionary of the model
# and insert it into the list. Replace the path reference
# with an index
for eobj in self._obj["experiment"]:
value = eobj.get(name)
if value is None:
continue
elif isinstance(value, str):
if value not in mmap:
mmap[value] = len(mlist)
mlist.append(
from_dict(_experimentlist_from_file(value, self._directory))
)
eobj[name] = mmap[value]
elif not isinstance(value, int):
raise TypeError("expected int or str, got %s" % type(value))
# Return the model list
return mlist
def _load_pickle_path(self, imageset_data, param):
# type: (Dict, str) -> Tuple[Optional[str], Any]
"""Read a filename from an imageset dict and load if required.
Args:
imageset_data: The dictionary holding imageset information
param: The key name to lookup in the imageset dictionary
Returns:
A tuple of (filename, data) where data has been loaded from
the pickle file. If there is no key entry then (None, None)
is returned. If the configuration parameter check_format is
False then (filename, None) will be returned.
"""
if param not in imageset_data:
return "", None
filename = resolve_path(imageset_data[param], directory=self._directory)
if self._check_format and filename:
with open(filename, "rb") as fh:
if six.PY3:
return filename, pickle.load(fh, encoding="bytes")
else:
return filename, pickle.load(fh)
return filename or "", None
def decode(self):
""" Decode the dictionary into a list of experiments. """
# Extract all the experiments
# Map of imageset/scan pairs
imagesets = {}
# For every experiment, use the given input to create
# a sensible experiment.
el = ExperimentList()
for eobj in self._obj["experiment"]:
# Get the models
identifier = eobj.get("identifier", "")
beam = self._lookup_model("beam", eobj)
detector = self._lookup_model("detector", eobj)
goniometer = self._lookup_model("goniometer", eobj)
scan = self._lookup_model("scan", eobj)
crystal = self._lookup_model("crystal", eobj)
profile = self._lookup_model("profile", eobj)
scaling_model = self._lookup_model("scaling_model", eobj)
key = (eobj.get("imageset"), eobj.get("scan"))
imageset = None
try:
imageset = imagesets[key] # type: ImageSet
except KeyError:
# This imageset hasn't been loaded yet - create it
imageset_data = self._lookup_model("imageset", eobj)
# Create the imageset from the input data
if imageset_data is not None:
if "params" in imageset_data:
format_kwargs = imageset_data["params"]
else:
format_kwargs = {}
# Load the external lookup data
mask_filename, mask = self._load_pickle_path(imageset_data, "mask")
gain_filename, gain = self._load_pickle_path(imageset_data, "gain")
pedestal_filename, pedestal = self._load_pickle_path(
imageset_data, "pedestal"
)
dx_filename, dx = self._load_pickle_path(imageset_data, "dx")
dy_filename, dy = self._load_pickle_path(imageset_data, "dy")
if imageset_data["__id__"] == "ImageSet":
imageset = self._make_stills(
imageset_data, format_kwargs=format_kwargs
)
elif imageset_data["__id__"] == "ImageGrid":
imageset = self._make_grid(
imageset_data, format_kwargs=format_kwargs
)
elif (
imageset_data["__id__"] == "ImageSequence"
or imageset_data["__id__"] == "ImageSweep"
):
imageset = self._make_sequence(
imageset_data,
beam=beam,
detector=detector,
goniometer=goniometer,
scan=scan,
format_kwargs=format_kwargs,
)
elif imageset_data["__id__"] == "MemImageSet":
imageset = self._make_mem_imageset(imageset_data)
else:
raise RuntimeError("Unknown imageset type")
if imageset is not None:
# Set the external lookup
if mask is None:
mask = ImageBool()
else:
mask = ImageBool(mask)
if gain is None:
gain = ImageDouble()
else:
gain = ImageDouble(gain)
if pedestal is None:
pedestal = ImageDouble()
else:
pedestal = ImageDouble(pedestal)
if dx is None:
dx = ImageDouble()
else:
dx = ImageDouble(dx)
if dy is None:
dy = ImageDouble()
else:
dy = ImageDouble(dy)
if not imageset.external_lookup.mask.data.empty():
if not mask.empty():
mask = tuple(m.data() for m in mask)
for m1, m2 in zip(
mask, imageset.external_lookup.mask.data
):
m1 &= m2.data()
imageset.external_lookup.mask.data = ImageBool(mask)
else:
imageset.external_lookup.mask.data = mask
imageset.external_lookup.mask.filename = mask_filename
imageset.external_lookup.gain.data = gain
imageset.external_lookup.gain.filename = gain_filename
imageset.external_lookup.pedestal.data = pedestal
imageset.external_lookup.pedestal.filename = pedestal_filename
imageset.external_lookup.dx.data = dx
imageset.external_lookup.dx.filename = dx_filename
imageset.external_lookup.dy.data = dy
imageset.external_lookup.dy.filename = dy_filename
# Update the imageset models
if isinstance(imageset, ImageSequence):
imageset.set_beam(beam)
imageset.set_detector(detector)
imageset.set_goniometer(goniometer)
imageset.set_scan(scan)
elif isinstance(imageset, (ImageSet, ImageGrid)):
for i in range(len(imageset)):
imageset.set_beam(beam, i)
imageset.set_detector(detector, i)
imageset.set_goniometer(goniometer, i)
imageset.set_scan(scan, i)
imageset.update_detector_px_mm_data()
# Add the imageset to the dict - even if empty - as this will
# prevent a duplicated attempt at reconstruction
imagesets[key] = imageset
# Append the experiment
el.append(
Experiment(
imageset=imageset,
beam=beam,
detector=detector,
goniometer=goniometer,
scan=scan,
crystal=crystal,
profile=profile,
scaling_model=scaling_model,
identifier=identifier,
)
)
# Return the experiment list
return el
def _make_mem_imageset(self, imageset):
""" Can't make a mem imageset from dict. """
return None
def _make_stills(self, imageset, format_kwargs=None):
""" Make a still imageset. """
filenames = [
resolve_path(p, directory=self._directory) for p in imageset["images"]
]
indices = None
if "single_file_indices" in imageset:
indices = imageset["single_file_indices"]
assert len(indices) == len(filenames)
return ImageSetFactory.make_imageset(
filenames,
None,
check_format=self._check_format,
single_file_indices=indices,
format_kwargs=format_kwargs,
)
def _make_grid(self, imageset, format_kwargs=None):
""" Make a still imageset. """
grid_size = imageset["grid_size"]
return ImageGrid.from_imageset(
self._make_stills(imageset, format_kwargs=format_kwargs), grid_size
)
def _make_sequence(
self,
imageset,
beam=None,
detector=None,
goniometer=None,
scan=None,
format_kwargs=None,
):
""" Make an image sequence. """
# Get the template format
template = resolve_path(imageset["template"], directory=self._directory)
# Get the number of images (if no scan is given we'll try
# to find all the images matching the template
if scan is None:
i0, i1 = template_image_range(template)
else:
i0, i1 = scan.get_image_range()
format_class = None
if self._check_format is False:
if "single_file_indices" in imageset:
format_class = FormatMultiImage
# Make a sequence from the input data
return ImageSetFactory.make_sequence(
template,
list(range(i0, i1 + 1)),
format_class=format_class,
check_format=self._check_format,
beam=beam,
detector=detector,
goniometer=goniometer,
scan=scan,
format_kwargs=format_kwargs,
)
def _lookup_model(self, name, experiment_dict):
"""
Find a model by looking up its index from a dictionary
Args:
name (str): The model name e.g. 'beam', 'detector'
experiment_dict (Dict[str, int]):
The experiment dictionary. experiment_dict[name] must
exist and be not None to retrieve a model. If this key
exists, then there *must* be an item with this index
in the ExperimentListDict internal model stores.
Returns:
Optional[Any]:
A model by looking up the index pointed to by
experiment_dict[name]. If not present or empty,
then None is returned.
"""
if experiment_dict.get(name) is None:
return None
return self._lookups[name][experiment_dict[name]]
@staticmethod
def _scaling_model_from_dict(obj):
""" Get the scaling model from a dictionary. """
for entry_point in pkg_resources.iter_entry_points("dxtbx.scaling_model_ext"):
if entry_point.name == obj["__id__"]:
return entry_point.load().from_dict(obj)
def _experimentlist_from_file(filename, directory=None):
""" Load a model dictionary from a file. """
filename = resolve_path(filename, directory=directory)
try:
with open(filename, "r") as infile:
return json.load(infile, object_hook=_decode_dict)
except IOError:
raise IOError("unable to read file, %s" % filename)
class ExperimentListDumper(object):
""" A class to help writing JSON files. """
def __init__(self, experiment_list):
""" Initialise """
warnings.warn(
"class ExperimentListDumper() is deprecated. "
"Use experiment_list.as_json(), experiment_list.as_pickle(), experiment_list.as_file() directly",
DeprecationWarning,
stacklevel=2,
)
assert experiment_list
self.as_json = experiment_list.as_json
self.as_pickle = experiment_list.as_pickle
self.as_file = experiment_list.as_file
class ExperimentListFactory(object):
""" A class to help instantiate experiment lists. """
@staticmethod
def from_args(args, verbose=False, unhandled=None):
""" Try to load experiment from any recognised format. """
# Create a list for unhandled arguments
if unhandled is None:
unhandled = []
experiments = ExperimentList()
## First try as image files
# experiments = ExperimentListFactory.from_datablock(
# DataBlockFactory.from_args(args, verbose, unhandled1))
# Try to load from serialized formats
for filename in args:
try:
experiments.extend(
ExperimentListFactory.from_serialized_format(filename)
)
if verbose:
print("Loaded experiments from %s" % filename)
except Exception as e:
if verbose:
print("Could not load experiments from %s: %s" % (filename, str(e)))
unhandled.append(filename)
# Return the experiments
return experiments
@staticmethod
def from_filenames(
filenames,
verbose=False,
unhandled=None,
compare_beam=None,
compare_detector=None,
compare_goniometer=None,
scan_tolerance=None,
format_kwargs=None,
load_models=True,
):
""" Create a list of data blocks from a list of directory or file names. """
experiments = ExperimentList()
for db in DataBlockFactory.from_filenames(
filenames,
verbose=verbose,
unhandled=unhandled,
compare_beam=compare_beam,
compare_detector=compare_detector,
compare_goniometer=compare_goniometer,
scan_tolerance=scan_tolerance,
format_kwargs=format_kwargs,
):
experiments.extend(
ExperimentListFactory.from_datablock_and_crystal(db, None, load_models)
)
return experiments
@staticmethod
def from_imageset_and_crystal(imageset, crystal, load_models=True):
""" Load an experiment list from an imageset and crystal. """
if isinstance(imageset, ImageSequence):
return ExperimentListFactory.from_sequence_and_crystal(
imageset, crystal, load_models
)
else:
return ExperimentListFactory.from_stills_and_crystal(
imageset, crystal, load_models
)
@staticmethod
def from_sequence_and_crystal(imageset, crystal, load_models=True):
""" Create an experiment list from sequence and crystal. """
if load_models:
return ExperimentList(
[
Experiment(
imageset=imageset,
beam=imageset.get_beam(),
detector=imageset.get_detector(),
goniometer=imageset.get_goniometer(),
scan=imageset.get_scan(),
crystal=crystal,
)
]
)
else:
return ExperimentList([Experiment(imageset=imageset, crystal=crystal)])
@staticmethod
def from_stills_and_crystal(imageset, crystal, load_models=True):
""" Create an experiment list from stills and crystal. """
experiments = ExperimentList()
if load_models:
for i in range(len(imageset)):
experiments.append(
Experiment(
imageset=imageset[i : i + 1],
beam=imageset.get_beam(i),
detector=imageset.get_detector(i),
goniometer=imageset.get_goniometer(i),
scan=imageset.get_scan(i),
crystal=crystal,
)
)
else:
for i in range(len(imageset)):
experiments.append(
Experiment(imageset=imageset[i : i + 1], crystal=crystal)
)
return experiments
@staticmethod
def from_datablock_and_crystal(datablock, crystal, load_models=True):
""" Load an experiment list from a datablock. """
# Initialise the experiment list
experiments = ExperimentList()
# If we have a list, loop through
if isinstance(datablock, list):
for db in datablock:
experiments.extend(
ExperimentListFactory.from_datablock_and_crystal(
db, crystal, load_models
)
)
return experiments
# Add all the imagesets
for imageset in datablock.extract_imagesets():
experiments.extend(
ExperimentListFactory.from_imageset_and_crystal(
imageset, crystal, load_models
)
)
# Check the list is consistent
assert experiments.is_consistent()
# Return the experiments
return experiments
@staticmethod
def from_dict(obj, check_format=True, directory=None):
"""Load an experiment list from a dictionary.
Args:
obj (dict):
Dictionary containing either ExperimentList or DataBlock
structure.
check_format (bool):
If True, the file will be read to verify metadata.
directory (str):
Returns:
ExperimentList: The dictionary converted
"""
try:
experiments = ExperimentList()
for db in DataBlockFactory.from_dict(
obj, check_format=check_format, directory=directory
):
experiments.extend(
ExperimentListFactory.from_datablock_and_crystal(db, None)
)
except Exception:
experiments = None
# Decode the experiments from the dictionary
if experiments is None:
experiments = ExperimentListDict(
obj, check_format=check_format, directory=directory
).decode()
# Check the list is consistent
assert experiments.is_consistent()
# Return the experiments
return experiments
@staticmethod
def from_json(text, check_format=True, directory=None):
""" Load an experiment list from JSON. """
return ExperimentListFactory.from_dict(
json.loads(text, object_hook=_decode_dict),
check_format=check_format,
directory=directory,
)
@staticmethod
def from_json_file(filename, check_format=True):
""" Load an experiment list from a json file. """
filename = os.path.abspath(filename)
directory = os.path.dirname(filename)
with open(filename, "r") as infile:
return ExperimentListFactory.from_json(
infile.read(), check_format=check_format, directory=directory
)
@staticmethod
def from_pickle_file(filename):
""" Decode an experiment list from a pickle file. """
with open(filename, "rb") as infile:
obj = pickle.load(infile)
assert isinstance(obj, ExperimentList)
return obj
@staticmethod
def from_xds(xds_inp, xds_other):
""" Generate an experiment list from XDS files. """
# Get the sequence from the XDS files
sequence = xds.to_imageset(xds_inp, xds_other)
# Get the crystal from the XDS files
crystal = xds.to_crystal(xds_other)
# Create the experiment list
experiments = ExperimentListFactory.from_imageset_and_crystal(sequence, crystal)
# Set the crystal in the experiment list
assert len(experiments) == 1
# Return the experiment list
return experiments
@staticmethod
def from_serialized_format(filename, check_format=True):
""" Try to load the experiment list from a serialized format. """
# First try as a JSON file
try:
return ExperimentListFactory.from_json_file(filename, check_format)
except Exception:
pass
# Now try as a pickle file
return ExperimentListFactory.from_pickle_file(filename)
class ExperimentListTemplateImporter(object):
""" A class to import an experiment list from a template. """
def __init__(self, templates, verbose=False, **kwargs):
importer = DataBlockTemplateImporter(templates, verbose=verbose, **kwargs)
self.experiments = ExperimentList()
for db in importer.datablocks:
self.experiments.extend(
ExperimentListFactory.from_datablock_and_crystal(db, None)
)
```
#### File: dxtbx/model/goniometer.py
```python
from __future__ import absolute_import, division, print_function
import math
from builtins import object, range
import libtbx.phil
from scitbx.array_family import flex
import pycbf
from dxtbx_model_ext import KappaGoniometer # noqa: F401, exported symbol
from dxtbx_model_ext import Goniometer, MultiAxisGoniometer
goniometer_phil_scope = libtbx.phil.parse(
"""
goniometer
.expert_level = 1
.short_caption = "Goniometer overrides"
{
axes = None
.type = floats
.help = "Override the goniometer axes. Axes must be provided in the"
"order crystal-to-goniometer, i.e. for a Kappa goniometer"
"phi,kappa,omega"
.short_caption="Goniometer axes"
angles = None
.type = floats
.help = "Override the goniometer angles. Axes must be provided in the"
"order crystal-to-goniometer, i.e. for a Kappa goniometer"
"phi,kappa,omega"
.short_caption = "Goniometer angles"
names = None
.type = str
.help = "The multi axis goniometer axis names"
.short_caption = "The axis names"
scan_axis = None
.type = int
.help = "The scan axis"
.short_caption = "The scan axis"
fixed_rotation = None
.type = floats(size=9)
.help = "Override the fixed rotation matrix"
.short_caption = "Fixed rotation matrix"
setting_rotation = None
.type = floats(size=9)
.help = "Override the setting rotation matrix"
.short_caption = "Setting rotation matrix"
invert_rotation_axis = False
.type = bool
.help = "Invert the rotation axis"
.short_caption = "Invert rotation axis"
}
"""
)
class GoniometerFactory(object):
"""A factory class for goniometer objects, which will encapsulate
some standard goniometer designs to make it a little easier to get
started with all of this - for cases when we are not using a CBF.
When we have a CBF just use that factory method and everything will be
peachy."""
@staticmethod
def single_axis_goniometer_from_phil(params, reference=None):
"""
Generate or overwrite a single axis goniometer
"""
# Check the axes parameter
if params.goniometer.axes is not None and len(params.goniometer.axes) != 3:
raise RuntimeError("Single axis goniometer requires 3 axes parameters")
# Check the angles parameter
if params.goniometer.angles is not None:
raise RuntimeError("Single axis goniometer requires angles == None")
# Check the names parameter
if params.goniometer.names is not None:
raise RuntimeError("Single axis goniometer requires names == None")
# Init the gonionmeter
if reference is None:
goniometer = Goniometer()
else:
goniometer = reference
# Set the parameters
if params.goniometer.axes is not None:
goniometer.set_rotation_axis_datum(params.goniometer.axes)
if params.goniometer.fixed_rotation is not None:
goniometer.set_fixed_rotation(params.goniometer.fixed_rotation)
if params.goniometer.setting_rotation is not None:
goniometer.set_setting_rotation(params.goniometer.setting_rotation)
if params.goniometer.invert_rotation_axis is True:
rotation_axis = goniometer.get_rotation_axis_datum()
goniometer.set_rotation_axis_datum([-x for x in rotation_axis])
# Return the model
return goniometer
@staticmethod
def multi_axis_goniometer_from_phil(params, reference=None):
# Check the axes parameter
if params.goniometer.axes is not None:
if len(params.goniometer.axes) % 3:
raise RuntimeError(
"Number of values for axes parameter must be multiple of 3."
)
# Check the fixed rotation
if params.goniometer.fixed_rotation is not None:
raise RuntimeError("Multi-axis goniometer requires fixed_rotation == None")
# Check the setting rotation
if params.goniometer.setting_rotation is not None:
raise RuntimeError(
"Multi-axis goniometer requires setting_rotation == None"
)
# Check the input
if reference is None:
if params.goniometer.axes is None:
raise RuntimeError("No axes set")
# Create the axes
axes = flex.vec3_double(
params.goniometer.axes[i * 3 : (i * 3) + 3]
for i in range(len(params.goniometer.axes) // 3)
)
# Invert the rotation axis
if params.goniometer.invert_rotation_axis is True:
axes = flex.vec3_double([[-x for x in v] for v in axes])
# Create the angles
if params.goniometer.angles is not None:
angles = params.goniometer.angles
if len(angles) != len(axes):
raise RuntimeError("Number of angles must match axes")
else:
angles = flex.double([0] * len(axes))
# Create the names
if params.goniometer.names is not None:
names = params.goniometer.names
if len(names) != len(axes):
raise RuntimeError("Number of names must match axes")
else:
names = flex.std_string([""] * len(axes))
# Create the scan axis
if params.goniometer.scan_axis is not None:
scan_axis = params.goniometer.scan_axis
else:
scan_axis = 0
# Create the model
goniometer = MultiAxisGoniometer(axes, angles, names, scan_axis)
else:
goniometer = reference
# Set the axes
if params.goniometer.axes is not None:
axes = flex.vec3_double(
params.goniometer.axes[i * 3 : (i * 3) + 3]
for i in range(len(params.goniometer.axes) // 3)
)
if len(goniometer.get_axes()) != len(axes):
raise RuntimeError(
"Number of axes must match the current goniometer (%s)"
% len(goniometer.get_axes())
)
goniometer.set_axes(axes)
# Invert rotation axis
if params.goniometer.invert_rotation_axis is True:
axes = flex.vec3_double(
[[-x for x in v] for v in goniometer.get_axes()]
)
goniometer.set_axes(axes)
# Set the angles
if params.goniometer.angles is not None:
if len(goniometer.get_angles()) != len(params.goniometer.angles):
raise RuntimeError(
"Number of angles must match the current goniometer (%s)"
% len(goniometer.get_angles())
)
goniometer.set_angles(params.goniometer.angles)
# Set the namess
if params.goniometer.names is not None:
if len(goniometer.get_names()) != len(params.goniometer.names):
raise RuntimeError(
"Number of names must match the current goniometer (%s)"
% len(goniometer.get_names())
)
goniometer.set_names(params.goniometer.names)
# Set the scan axis
if params.goniometer.scan_axis is not None:
raise RuntimeError("Can not override scan axis")
# Return the model
return goniometer
@staticmethod
def from_phil(params, reference=None):
"""
Convert the phil parameters into a beam model
"""
if reference is not None:
if isinstance(reference, MultiAxisGoniometer):
goniometer = GoniometerFactory.multi_axis_goniometer_from_phil(
params, reference
)
else:
goniometer = GoniometerFactory.single_axis_goniometer_from_phil(
params, reference
)
else:
if params.goniometer.axes is None:
return None
if len(params.goniometer.axes) > 3:
goniometer = GoniometerFactory.multi_axis_goniometer_from_phil(params)
else:
goniometer = GoniometerFactory.single_axis_goniometer_from_phil(params)
return goniometer
@staticmethod
def from_dict(d, t=None):
"""Convert the dictionary to a goniometer model
Params:
d The dictionary of parameters
t The template dictionary to use
Returns:
The goniometer model
"""
if d is None and t is None:
return None
joint = t.copy() if t else {}
joint.update(d)
# Create the model from the joint dictionary
if {"axes", "angles", "scan_axis"}.issubset(joint):
return MultiAxisGoniometer.from_dict(joint)
return Goniometer.from_dict(joint)
@staticmethod
def make_goniometer(rotation_axis, fixed_rotation):
return Goniometer(
tuple(map(float, rotation_axis)), tuple(map(float, fixed_rotation))
)
@staticmethod
def make_kappa_goniometer(alpha, omega, kappa, phi, direction, scan_axis):
omega_axis = (1, 0, 0)
phi_axis = (1, 0, 0)
c = math.cos(alpha * math.pi / 180)
s = math.sin(alpha * math.pi / 180)
if direction == "+y":
kappa_axis = (c, s, 0.0)
elif direction == "+z":
kappa_axis = (c, 0.0, s)
elif direction == "-y":
kappa_axis = (c, -s, 0.0)
elif direction == "-z":
kappa_axis = (c, 0.0, -s)
else:
raise RuntimeError("Invalid direction")
if scan_axis == "phi":
scan_axis = 0
else:
scan_axis = 2
axes = flex.vec3_double((phi_axis, kappa_axis, omega_axis))
angles = flex.double((phi, kappa, omega))
names = flex.std_string(("PHI", "KAPPA", "OMEGA"))
return GoniometerFactory.make_multi_axis_goniometer(
axes, angles, names, scan_axis
)
@staticmethod
def make_multi_axis_goniometer(axes, angles, names, scan_axis):
return MultiAxisGoniometer(axes, angles, names, scan_axis)
@staticmethod
def single_axis():
"""Construct a single axis goniometer which is canonical in the
CBF reference frame."""
axis = (1, 0, 0)
fixed = (1, 0, 0, 0, 1, 0, 0, 0, 1)
return GoniometerFactory.make_goniometer(axis, fixed)
@staticmethod
def single_axis_reverse():
"""Construct a single axis goniometer which is canonical in the
CBF reference frame, but reversed in rotation."""
axis = (-1, 0, 0)
fixed = (1, 0, 0, 0, 1, 0, 0, 0, 1)
return GoniometerFactory.make_goniometer(axis, fixed)
@staticmethod
def known_axis(axis):
"""Return an goniometer instance for a known rotation axis, assuming
that nothing is known about the fixed element of the rotation axis."""
assert len(axis) == 3
fixed = (1, 0, 0, 0, 1, 0, 0, 0, 1)
return Goniometer(axis, fixed)
@staticmethod
def kappa(alpha, omega, kappa, phi, direction, scan_axis):
"""Return a kappa goniometer where omega is the primary axis (i,e.
aligned with X in the CBF coordinate frame) and has the kappa arm
with angle alpha attached to it, aligned with -z, +y, +z or -y at
omega = 0, that being the direction, which in turn has phi fixed to it
which should initially be coincident with omega. We also need to know
which axis is being used for the scan i.e. phi or omega. All angles
should be given in degrees. This will work by first constructing the
rotation axes and then composing them to the scan axis and fixed
component of the rotation."""
return GoniometerFactory.make_kappa_goniometer(
alpha, omega, kappa, phi, direction, scan_axis
)
@staticmethod
def multi_axis(axes, angles, names, scan_axis):
""""""
return GoniometerFactory.make_multi_axis_goniometer(
axes, angles, names, scan_axis
)
@staticmethod
def imgCIF(cif_file):
"""Initialize a goniometer model from an imgCIF file."""
# FIXME in here work out how to get the proper setting matrix if != 1
cbf_handle = pycbf.cbf_handle_struct()
cbf_handle.read_file(cif_file.encode(), pycbf.MSG_DIGEST)
return GoniometerFactory.imgCIF_H(cbf_handle)
@staticmethod
def imgCIF_H(cbf_handle):
"""Initialize a goniometer model from an imgCIF file handle, where
it is assumed that the file has already been read."""
# find the goniometer axes and dependencies
axis_names = flex.std_string()
depends_on = flex.std_string()
axes = flex.vec3_double()
angles = flex.double()
scan_axis = None
cbf_handle.find_category(b"axis")
for i in range(cbf_handle.count_rows()):
cbf_handle.find_column(b"equipment")
if cbf_handle.get_value() == b"goniometer":
cbf_handle.find_column(b"id")
axis_names.append(cbf_handle.get_value())
axis = []
for i in range(3):
cbf_handle.find_column(b"vector[%i]" % (i + 1))
axis.append(float(cbf_handle.get_value()))
axes.append(axis)
cbf_handle.find_column(b"depends_on")
depends_on.append(cbf_handle.get_value())
cbf_handle.next_row()
# find the starting angles of each goniometer axis and figure out which one
# is the scan axis (i.e. non-zero angle_increment)
cbf_handle.find_category(b"diffrn_scan_axis")
for i in range(cbf_handle.count_rows()):
cbf_handle.find_column(b"axis_id")
axis_name = cbf_handle.get_value()
if axis_name.decode() not in axis_names:
cbf_handle.next_row()
continue
cbf_handle.find_column(b"angle_start")
axis_angle = float(cbf_handle.get_value())
cbf_handle.find_column(b"angle_increment")
increment = float(cbf_handle.get_value())
angles.append(axis_angle)
if abs(increment) > 0:
assert (
scan_axis is None
), "More than one scan axis is defined: not currently supported"
scan_axis = flex.first_index(axis_names, axis_name)
cbf_handle.next_row()
assert axes.size() == angles.size()
if scan_axis is None:
# probably a still shot -> scan axis arbitrary as no scan
scan_axis = 0
# figure out the order of the axes from the depends_on values
order = flex.size_t()
for i in range(axes.size()):
if depends_on[i] == ".":
o = 0
else:
o = flex.first_index(axis_names, depends_on[i]) + 1
assert o not in order
order.append(o)
# multi-axis gonio requires axes in order as viewed from crystal to gonio base
# i.e. the reverse of the order we have from cbf header
order = order.reversed()
axes = axes.select(order)
angles = angles.select(order)
axis_names = axis_names.select(order)
scan_axis = axes.size() - scan_axis - 1
# construct a multi-axis goniometer
gonio = GoniometerFactory.multi_axis(axes, angles, axis_names, scan_axis)
return gonio
```
#### File: tests/command_line/test_to_xds.py
```python
from __future__ import absolute_import, division, print_function
import procrunner
import pytest
from dxtbx.imageset import ImageSetFactory
from dxtbx.serialize import dump
@pytest.fixture(scope="session")
def expected_output(dials_data):
return """\
DETECTOR=PILATUS MINIMUM_VALID_PIXEL_VALUE=0 OVERLOAD=495976
SENSOR_THICKNESS= 0.320
DIRECTION_OF_DETECTOR_X-AXIS= 1.00000 0.00000 0.00000
DIRECTION_OF_DETECTOR_Y-AXIS= 0.00000 1.00000 0.00000
NX=2463 NY=2527 QX=0.1720 QY=0.1720
DETECTOR_DISTANCE= 190.180
ORGX= 1235.84 ORGY= 1279.58
ROTATION_AXIS= 1.00000 0.00000 0.00000
STARTING_ANGLE= 0.000
OSCILLATION_RANGE= 0.200
X-RAY_WAVELENGTH= 0.97950
INCIDENT_BEAM_DIRECTION= -0.000 -0.000 1.021
FRACTION_OF_POLARIZATION= 0.999
POLARIZATION_PLANE_NORMAL= 0.000 1.000 0.000
NAME_TEMPLATE_OF_DATA_FRAMES= %s
TRUSTED_REGION= 0.0 1.41
UNTRUSTED_RECTANGLE= 487 495 0 2528
UNTRUSTED_RECTANGLE= 981 989 0 2528
UNTRUSTED_RECTANGLE= 1475 1483 0 2528
UNTRUSTED_RECTANGLE= 1969 1977 0 2528
UNTRUSTED_RECTANGLE= 0 2464 195 213
UNTRUSTED_RECTANGLE= 0 2464 407 425
UNTRUSTED_RECTANGLE= 0 2464 619 637
UNTRUSTED_RECTANGLE= 0 2464 831 849
UNTRUSTED_RECTANGLE= 0 2464 1043 1061
UNTRUSTED_RECTANGLE= 0 2464 1255 1273
UNTRUSTED_RECTANGLE= 0 2464 1467 1485
UNTRUSTED_RECTANGLE= 0 2464 1679 1697
UNTRUSTED_RECTANGLE= 0 2464 1891 1909
UNTRUSTED_RECTANGLE= 0 2464 2103 2121
UNTRUSTED_RECTANGLE= 0 2464 2315 2333
DATA_RANGE= 1 9
JOB=XYCORR INIT COLSPOT IDXREF DEFPIX INTEGRATE CORRECT\
""" % (
dials_data("centroid_test_data").join("centroid_????.cbf").strpath
)
def test_to_xds_from_images(dials_data, expected_output, tmpdir):
file_names = dials_data("centroid_test_data").listdir("centroid_*.cbf")
result = procrunner.run(["dxtbx.to_xds"] + file_names, working_directory=tmpdir)
assert not result.returncode and not result.stderr
# allow extra lines to have been added (these may be comments)
for record in expected_output.split("\n"):
assert record.strip().encode("latin-1") in result.stdout, record
def test_to_xds_from_json(dials_data, expected_output, tmpdir):
file_names = dials_data("centroid_test_data").listdir("centroid_*.cbf")
# now test reading from a json file
sequence = ImageSetFactory.new([f.strpath for f in file_names])[0]
with tmpdir.join("sequence.json").open("wb") as fh:
dump.imageset(sequence, fh)
result = procrunner.run(["dxtbx.to_xds", "sequence.json"], working_directory=tmpdir)
assert not result.returncode and not result.stderr
# allow extra lines to have been added (these may be comments)
for record in expected_output.split("\n"):
assert record.strip().encode("latin-1") in result.stdout, record
```
#### File: modules/tntbx/run_tests.py
```python
from libtbx import test_utils
import libtbx.load_env
def run():
tst_list = (
"$D/tst_tntbx_ext.py",
"$D/tst_large_eigen.py",
)
build_dir = libtbx.env.under_build("tntbx")
dist_dir = libtbx.env.dist_path("tntbx")
test_utils.run_tests(build_dir, dist_dir, tst_list)
if (__name__ == "__main__"):
run()
```
#### File: modules/tntbx/tst_large_eigen.py
```python
import tntbx.eigensystem
from scitbx.array_family import flex
from libtbx.test_utils import approx_equal
from itertools import count
try:
import platform
except ImportError:
release = ""
else:
release = platform.release()
if ( release.endswith("_FC4")
or release.endswith("_FC4smp")):
Numeric = None # LinearAlgebra.generalized_inverse is broken
else:
try:
import Numeric
except ImportError:
Numeric = None
else:
import LinearAlgebra
def exercise_eigensystem():
m = [0.13589302585705959, -0.041652833629281995, 0.0,
-0.02777294381303139, 0.0, -0.028246956907939123,
-0.037913518508910102, -0.028246956907939123, 0.028246956907939127,
0.066160475416849232, 0.0, -1.998692119493731e-18, 0.0,
1.9342749002960583e-17, 2.1341441122454314e-17, -0.041652833629281995,
0.16651402880692701, -0.041652833629282252, -0.054064923492613354,
-0.041657741914063608, -0.027943612435735281, 0.058527480224229975,
-0.027943612435735132, -0.034820867346713427, -0.030583867788494697,
0.062764479782448576, 1.2238306785281e-33, 0.0, 2.0081205093967302e-33,
8.4334666705394195e-34, 0.0, -0.041652833629282252,
0.13589302585705959, 0.0, -0.041574928784987308, -0.028246956907939064,
0.0, -0.028246956907939064, 0.063090910812553094,
0.028246956907939068, -0.034843953904614026, -1.9986921194937106e-18,
-8.9229029691439759e-18, 1.0316952905846454e-17, 3.3927420561961863e-18,
-0.02777294381303139, -0.05406492349261334, 0.0, 1.0754189352289423,
0.0, 0.055233150062734049, -0.030424256077943676, 0.02480889398479039,
-0.024808893984790394, -0.024808893984790425, 0.0,
-6.8972719971392908e-18, -1.7405118013554239e-17,
6.1312902919038241e-18, -4.3765557245111121e-18, 0.0,
-0.041657741914063601, -0.041574928784987308, 0.0, 1.0754189352289425,
0.02824584556921347, 0.0, 0.056206884746120872, -0.028245845569213546,
-0.02824584556921347, -0.027961039176907447, 5.9506047506615062e-18,
0.0, -1.4122576510436466e-18, -7.3628624017051534e-18,
-0.028246956907939123, -0.027943612435735277, -0.028246956907939064,
0.055233150062734056, 0.028245845569213474, 0.058637637326162888,
-0.021712364921140592, 0.038218912676148069, -0.039777184406252442,
-0.036925272405022302, 0.0015582717301043682, 4.0451470549537404e-18,
0.0, -1.3724767734658202e-17, -1.7769914789611943e-17,
-0.037913518508910102, 0.058527480224229982, 0.0, -0.030424256077943652,
0.0, -0.021712364921140592, 0.28175206518101342, 0.0039066487534216467,
-0.0039066487534216484, -0.26003970025987289, 0.0,
7.5625035918149771e-18, 1.2380916665439571e-17, -1.0148697613996034e-16,
-9.6668563066335756e-17, -0.028246956907939123, -0.027943612435735128,
-0.028246956907939064, 0.02480889398479039, 0.056206884746120879,
0.038218912676148069, 0.0039066487534216484, 0.058637637326162798,
-0.031992570455625299, -0.042125561429569719, -0.026645066870537512,
4.0451470549537242e-18, 9.6341055014789307e-18, -2.0511818948105707e-17,
-1.4922860501580496e-17, 0.028246956907939127, -0.034820867346713427,
0.063090910812553094, -0.024808893984790394, -0.028245845569213508,
-0.039777184406252442, -0.0039066487534216501, -0.031992570455625306,
0.2922682186182603, 0.043683833159674099, -0.26027564816263499,
-5.2586006938540899e-18, 0.0, 9.7867664544895616e-18,
1.5045367148343648e-17, 0.066160475416849232, -0.030583867788494701,
0.028246956907939068, -0.024808893984790466, -0.028245845569213474,
-0.036925272405022302, -0.26003970025987289, -0.042125561429569719,
0.043683833159674092, 0.29696497266489519, -0.0015582717301043699,
-7.5250966340669269e-19, 2.6213138712286628e-17,
-1.5094268920622606e-17, 1.1871379455070714e-17, 0.0,
0.062764479782448576, -0.034843953904614026, 0.0, -0.027961039176907457,
0.0015582717301043665, 0.0, -0.026645066870537509, -0.26027564816263499,
-0.0015582717301043699, 0.28692071503317257, -3.8759778199373453e-18,
3.9691789817943108e-17, -8.14982209920807e-18, 3.5417945538672385e-17,
-1.998692119493731e-18, 1.6308338425568088e-33, -1.9986921194937102e-18,
-6.8972719971392892e-18, 5.950604750661507e-18, 4.0451470549537412e-18,
7.5625035918149756e-18, 4.0451470549537242e-18, -5.2586006938540899e-18,
-7.5250966340669346e-19, -3.8759778199373446e-18, 0.054471159454508082,
0.0064409353489570716, 0.0035601061597435495, -0.044470117945807464,
0.0, 0.0, -8.9229029691439759e-18, -1.7405118013554239e-17, 0.0, 0.0,
1.2380916665439569e-17, 9.6341055014789307e-18, 0.0,
2.6213138712286628e-17, 3.9691789817943114e-17, 0.0064409353489570777,
0.47194161564298681, -0.17456675614101194, 0.29093392415301783,
1.9342749002960583e-17, 1.4951347746978839e-33, 1.0316952905846454e-17,
6.1312902919038033e-18, -1.4122576510436468e-18,
-1.3724767734658202e-17, -1.0148697613996034e-16,
-2.0511818948105704e-17, 9.7867664544895616e-18,
-1.5094268920622603e-17, -8.1498220992080684e-18,
0.0035601061597435478, -0.17456675614101194, 0.61470061296798617,
0.4365737506672307, 2.1341441122454314e-17, 1.5911491532730484e-34,
3.392742056196186e-18, -4.3765557245111453e-18, -7.3628624017051534e-18,
-1.7769914789611943e-17, -9.6668563066335731e-17,
-1.4922860501580496e-17, 1.5045367148343648e-17, 1.1871379455070717e-17,
3.5417945538672392e-17, -0.044470117945807464, 0.29093392415301783,
0.4365737506672307, 0.77197779276605605]
#
m = flex.double(m)
m.resize(flex.grid(15,15))
s = tntbx.eigensystem.real(m)
e_values = s.values()
#
if (Numeric is not None):
n = Numeric.asarray(m)
n.shape = (15,15)
n_values = LinearAlgebra.eigenvalues(n)
assert len(e_values) == len(n_values)
#
print ' Eigenvalues'
print ' %-16s %-16s' % ('TNT','Numpy')
for i,e,n in zip(count(1), e_values, n_values):
if (isinstance(e, complex)): e = e.real
if (isinstance(n, complex)): n = n.real
print " %2d %16.12f %16.12f" % (i, e, n)
#
sorted_values = e_values.select(
flex.sort_permutation(data=e_values, reverse=True))
assert approx_equal(sorted_values, [
1.1594490522786849, 1.0940938851317932, 1.0788186474215089,
0.68233800454109983, 0.62042869735706307, 0.53297576878337871,
0.18708677344352156, 0.16675360561093594, 0.12774949816038345,
0.071304124011754358, 0.02865105770313877, 0.027761263516876356,
1.5830173657858035e-17, 2.6934929627275647e-18,
-5.5511151231257827e-17])
def run():
exercise_eigensystem()
print "OK"
if (__name__ == "__main__"):
run()
```
#### File: xia2/command_line/compare_merging_stats.py
```python
from __future__ import absolute_import, division, print_function
import os
import iotbx.phil
from cctbx import uctbx
from dials.util.options import OptionParser
help_message = """
"""
phil_scope = iotbx.phil.parse(
"""
n_bins = 20
.type = int(value_min=1)
anomalous = False
.type = bool
use_internal_variance = False
.type = bool
eliminate_sys_absent = False
.type = bool
plot_labels = None
.type = strings
data_labels = None
.type = str
size_inches = None
.type = floats(size=2, value_min=0)
image_dir = None
.type = path
format = *png pdf
.type = choice
style = *ggplot
.type = choice
""",
process_includes=True,
)
def run(args):
import libtbx.load_env
usage = "%s [options]" % libtbx.env.dispatcher_name
parser = OptionParser(
usage=usage, phil=phil_scope, check_format=False, epilog=help_message
)
params, options, args = parser.parse_args(
show_diff_phil=True, return_unhandled=True
)
results = []
for mtz in args:
print(mtz)
assert os.path.isfile(mtz), mtz
results.append(
get_merging_stats(
mtz,
anomalous=params.anomalous,
n_bins=params.n_bins,
use_internal_variance=params.use_internal_variance,
eliminate_sys_absent=params.eliminate_sys_absent,
data_labels=params.data_labels,
)
)
plot_merging_stats(
results,
labels=params.plot_labels,
size_inches=params.size_inches,
image_dir=params.image_dir,
format=params.format,
style=params.style,
)
def get_merging_stats(
scaled_unmerged_mtz,
anomalous=False,
n_bins=20,
use_internal_variance=False,
eliminate_sys_absent=False,
data_labels=None,
):
import iotbx.merging_statistics
i_obs = iotbx.merging_statistics.select_data(
scaled_unmerged_mtz, data_labels=data_labels
)
i_obs = i_obs.customized_copy(anomalous_flag=False, info=i_obs.info())
result = iotbx.merging_statistics.dataset_statistics(
i_obs=i_obs,
n_bins=n_bins,
anomalous=anomalous,
use_internal_variance=use_internal_variance,
eliminate_sys_absent=eliminate_sys_absent,
)
return result
def plot_merging_stats(
results,
labels=None,
plots=None,
prefix=None,
size_inches=None,
image_dir=None,
format="png",
style="ggplot",
):
import matplotlib
matplotlib.use("Agg")
from matplotlib import pyplot
if style is not None:
pyplot.style.use(style)
from cycler import cycler
colors = pyplot.rcParams["axes.prop_cycle"].by_key()["color"]
linestyles = []
for style in ("-", "--", ":", "-."):
linestyles.extend([style] * len(colors))
colors = colors * len(set(linestyles))
pyplot.rc("axes", prop_cycle=(cycler("c", colors) + cycler("ls", linestyles)))
plots_ = {
"r_merge": "$R_{merge}$",
"r_meas": "$R_{meas}$",
"r_pim": "$R_{pim}$",
"cc_one_half": r"$CC_{\frac{1}{2}}$",
"cc_one_half_sigma_tau": r"$CC_{\frac{1}{2}}$",
"cc_anom": "$CC_{anom}$",
"i_over_sigma_mean": r"$< I / \sigma(I) >$",
"completeness": "Completeness",
"mean_redundancy": "Multiplicity",
}
if plots is None:
plots = plots_
else:
plots = {k: plots_[k] for k in plots}
if prefix is None:
prefix = ""
if labels is not None:
assert len(results) == len(labels)
if image_dir is None:
image_dir = "."
elif not os.path.exists(image_dir):
os.makedirs(image_dir)
for k in plots:
def plot_data(results, k, labels, linestyle=None):
for i, result in enumerate(results):
if labels is not None:
label = labels[i].replace("\\$", "$")
else:
label = None
bins = result.bins
x = [bins[i].d_min for i in range(len(bins))]
x = [uctbx.d_as_d_star_sq(d) for d in x]
y = [getattr(bins[i], k) for i in range(len(bins))]
pyplot.plot(x, y, label=label, linestyle=linestyle)
plot_data(results, k, labels)
pyplot.xlabel(r"Resolution ($\AA$)")
pyplot.ylabel(plots.get(k, k))
if k in ("cc_one_half", "cc_one_half_sigma_tau", "completeness"):
pyplot.ylim(0, 1.05)
elif k in ("cc_anom",):
pyplot.ylim(min(0, pyplot.ylim()[0]), 1.05)
else:
pyplot.ylim(0, pyplot.ylim()[1])
ax = pyplot.gca()
xticks = ax.get_xticks()
xticks_d = [
"%.2f" % uctbx.d_star_sq_as_d(ds2) if ds2 > 0 else 0 for ds2 in xticks
]
ax.set_xticklabels(xticks_d)
if size_inches is not None:
fig = pyplot.gcf()
fig.set_size_inches(size_inches)
if labels is not None:
if k.startswith("cc"):
pyplot.legend(loc="lower left")
elif k.startswith("r_"):
pyplot.legend(loc="upper left")
elif k.startswith("i_"):
pyplot.legend(loc="upper right")
else:
pyplot.legend(loc="best")
pyplot.tight_layout()
pyplot.savefig(os.path.join(image_dir, prefix + k + ".%s" % format))
pyplot.clf()
if __name__ == "__main__":
import sys
from libtbx.utils import show_times_at_exit
show_times_at_exit()
run(sys.argv[1:])
```
#### File: xia2/command_line/ispyb_json.py
```python
from __future__ import absolute_import, division, print_function
import json
import os
import sys
import xia2.Interfaces.ISPyB
from xia2.Schema.XProject import XProject
def ispyb_object():
from xia2.Interfaces.ISPyB.ISPyBXmlHandler import ISPyBXmlHandler
assert os.path.exists("xia2.json")
assert os.path.exists("xia2.txt")
command_line = ""
for record in open("xia2.txt"):
if record.startswith("Command line:"):
command_line = record.replace("Command line:", "").strip()
xinfo = XProject.from_json(filename="xia2.json")
crystals = xinfo.get_crystals()
assert len(crystals) == 1
crystal = next(crystals.itervalues())
ISPyBXmlHandler.add_xcrystal(crystal)
return ISPyBXmlHandler.json_object(command_line=command_line)
def zocalo_object():
assert os.path.exists("xia2.json")
xinfo = XProject.from_json(filename="xia2.json")
crystals = xinfo.get_crystals()
assert len(crystals) == 1
return xia2.Interfaces.ISPyB.xia2_to_json_object(crystals.values())
def ispyb_json(json_out):
with open(json_out, "w") as fh:
json.dump(ispyb_object(), fh, indent=2)
if __name__ == "__main__":
if len(sys.argv) >= 2:
ispyb_json(sys.argv[1])
else:
ispyb_json("ispyb.json")
```
#### File: xia2/command_line/to_shelxcde.py
```python
from __future__ import absolute_import, division, print_function
def to_shelxcde(hklin, prefix, sites=0):
"""Read hklin (unmerged reflection file) and generate SHELXC input file
and HKL file"""
from iotbx.reflection_file_reader import any_reflection_file
from iotbx.shelx.hklf import miller_array_export_as_shelx_hklf
reader = any_reflection_file(hklin)
intensities = [
ma
for ma in reader.as_miller_arrays(merge_equivalents=False)
if ma.info().labels == ["I", "SIGI"]
][0]
mtz_object = reader.file_content()
indices = reader.file_content().extract_original_index_miller_indices()
intensities = intensities.customized_copy(indices=indices, info=intensities.info())
with open("%s.hkl" % prefix, "wb") as hkl_file_handle:
miller_array_export_as_shelx_hklf(intensities, hkl_file_handle)
uc = intensities.unit_cell().parameters()
sg = intensities.space_group().type().lookup_symbol().replace(" ", "")
# spacegroup name disputes
if sg.startswith("R3:"):
sg = "H3"
elif sg.startswith("R32:"):
sg = "H32"
open("%s.sh" % prefix, "w").write(
"\n".join(
[
"shelxc %s << eof" % prefix,
"cell %f %f %f %f %f %f" % uc,
"spag %s" % sg,
"sad %s.hkl" % prefix,
"find %d" % sites,
"maxm %d" % ((2 * intensities.data().size() // 1000000) + 1),
"eof",
"",
]
)
)
if __name__ == "__main__":
import sys
if len(sys.argv) > 3:
sites = int(sys.argv[3])
to_shelxcde(sys.argv[1], sys.argv[2], sites)
else:
to_shelxcde(sys.argv[1], sys.argv[2])
```
#### File: xia2/command_line/wilson_stuff.py
```python
from __future__ import absolute_import, division, print_function
import sys
from iotbx import mtz
from mmtbx.scaling import data_statistics
m = mtz.object(sys.argv[1])
mas = m.as_miller_arrays()
data = None
for ma in mas:
if ma.is_xray_intensity_array():
data = ma
break
def nres_from_mtz(m):
sg = m.space_group()
uc = m.crystals()[0].unit_cell()
n_ops = len(sg.all_ops())
v_asu = uc.volume() / n_ops
return v_asu / (2.7 * 128)
n_res = nres_from_mtz(m)
wilson_scaling = data_statistics.wilson_scaling(miller_array=data, n_residues=n_res)
wilson_scaling.show()
```
#### File: modules/xia2/conftest.py
```python
from __future__ import absolute_import, division, print_function
import argparse
import os
import re
import procrunner
import pytest
import six
from dials.conftest import run_in_tmpdir # noqa: F401
def pytest_addoption(parser):
"""
Option '--regression-full' needs to be used to run all regression tests,
including the full-length xia2 runs.
"""
class RFAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string):
namespace.regression = True
namespace.regression_full = True
parser.addoption(
"--regression-full",
nargs=0,
action=RFAction,
help="run all regression tests, this will take a while. Implies --regression",
)
def pytest_configure(config):
if six.PY3:
import dxtbx.tests.python3_test_filter as ptf
exp = ptf.Python3TestFailureExpectationPlugin(config)
config.pluginmanager.register(exp)
@pytest.fixture(scope="session")
def regression_test(request):
if not request.config.getoption("--regression-full"):
pytest.skip("Test requires --regression-full option to run.")
@pytest.fixture(scope="session")
def ccp4():
"""
Return information about the CCP4 installation.
Skip the test if CCP4 is not installed.
"""
if not os.getenv("CCP4"):
pytest.skip("CCP4 installation required for this test")
try:
result = procrunner.run(["refmac5", "-i"], print_stdout=False)
except OSError:
pytest.skip(
"CCP4 installation required for this test - Could not find CCP4 executable"
)
if result["exitcode"] or result["timeout"]:
pytest.skip(
"CCP4 installation required for this test - Could not run CCP4 executable"
)
version = re.search(br"patch level *([0-9]+)\.([0-9]+)\.([0-9]+)", result["stdout"])
if not version:
pytest.skip(
"CCP4 installation required for this test - Could not determine CCP4 version"
)
return {"path": os.getenv("CCP4"), "version": [int(v) for v in version.groups()]}
@pytest.fixture(scope="session")
def xds():
"""
Return information about the XDS installation.
Skip the test if XDS is not installed.
"""
try:
result = procrunner.run(["xds"], print_stdout=False)
except OSError:
pytest.skip("XDS installation required for this test")
if result["exitcode"] or result["timeout"]:
pytest.skip("XDS installation required for this test - Could not run XDS")
if "license expired" in result["stdout"]:
pytest.skip("XDS installation required for this test - XDS license is expired")
version = re.search(br"BUILT=([0-9]+)\)", result["stdout"])
if not version:
pytest.skip(
"XDS installation required for this test - Could not determine XDS version"
)
return {"version": int(version.groups()[0])}
```
#### File: xia2/Driver/DriverFactory.py
```python
from __future__ import absolute_import, division, print_function
import os
# another factory to delegate to
from xia2.Driver.ClusterDriverFactory import ClusterDriverFactory
from xia2.Driver.InteractiveDriver import InteractiveDriver
from xia2.Driver.QSubDriver import QSubDriver
from xia2.Driver.ScriptDriver import ScriptDriver
from xia2.Driver.SimpleDriver import SimpleDriver
class _DriverFactory(object):
def __init__(self):
self._driver_type = "simple"
self._implemented_types = [
"simple",
"script",
"interactive",
"qsub",
"cluster.sge",
]
# should probably write a message or something explaining
# that the following Driver implementation is being used
if "XIA2CORE_DRIVERTYPE" in os.environ:
self.set_driver_type(os.environ["XIA2CORE_DRIVERTYPE"])
def set_driver_type(self, driver_type):
"""Set the kind of driver this factory should produce."""
if driver_type not in self._implemented_types:
raise RuntimeError("unimplemented driver class: %s" % driver_type)
self._driver_type = driver_type
def get_driver_type(self):
return self._driver_type
def Driver(self, driver_type=None):
"""Create a new Driver instance, optionally providing the
type of Driver we want."""
if not driver_type:
driver_type = self._driver_type
if "cluster" in driver_type:
return ClusterDriverFactory.Driver(driver_type)
driver_class = {
"simple": SimpleDriver,
"script": ScriptDriver,
"interactive": InteractiveDriver,
"qsub": QSubDriver,
}.get(driver_type)
if driver_class:
return driver_class()
raise RuntimeError('Driver class "%s" unknown' % driver_type)
DriverFactory = _DriverFactory()
```
#### File: xia2/Experts/MissetExpert.py
```python
from __future__ import absolute_import, division, print_function
from scitbx import matrix
from scitbx.math import r3_rotation_axis_and_angle_from_matrix
from scitbx.math.euler_angles import xyz_angles, xyz_matrix
class MosflmMissetExpert(object):
"""A class to calculate the missetting angles to use for integration
given some values around the start and a good way in (ideally 90 degrees)
to the data processing. The protocol to obtain these remains to be
established."""
def __init__(self, phi0, misset0, phi1, misset1):
"""Initialise the rotation axis and what have you from some
experimental results. N.B. all input values in DEGREES."""
# canonical: X = X-ray beam
# Z = rotation axis
# Y = Z ^ X
z = matrix.col([0, 0, 1])
# then calculate the rotation axis
R = (
(
z.axis_and_angle_as_r3_rotation_matrix(phi1, deg=True)
* matrix.sqr(xyz_matrix(misset1[0], misset1[1], misset1[2]))
)
* (
z.axis_and_angle_as_r3_rotation_matrix(phi0, deg=True)
* matrix.sqr(xyz_matrix(misset0[0], misset0[1], misset0[2]))
).inverse()
)
self._z = z
self._r = matrix.col(r3_rotation_axis_and_angle_from_matrix(R).axis)
self._M0 = matrix.sqr(xyz_matrix(misset0[0], misset0[1], misset0[2]))
return
def get_r(self):
"""Get the rotation axis."""
return self._r.elems
def missets(self, phi):
"""Calculate the missetting angles for the given rotation angle."""
P = self._z.axis_and_angle_as_r3_rotation_matrix(phi, deg=True)
R = self._r.axis_and_angle_as_r3_rotation_matrix(phi, deg=True)
M = P.inverse() * R * self._M0
return xyz_angles(M)
if __name__ == "__main__":
# example taken from the problematic myoglobin data set
mme = MosflmMissetExpert(0.25, (-0.33, -0.32, -0.01), 91.75, (0.56, -0.12, -0.03))
for j in range(0, 320, 20):
phi = 0.5 * j + 0.25
x, y, z = mme.missets(phi)
print("%8.2f %8.2f %8.2f %8.2f" % (j + 1, x, y, z))
```
#### File: xia2/Handlers/Environment.py
```python
from __future__ import absolute_import, division, print_function
import ctypes
import os
import platform
import tempfile
from xia2.Handlers.Streams import Chatter, Debug
def which(pgm, debug=False):
path = os.getenv("PATH")
for p in path.split(os.path.pathsep):
p = os.path.join(p, pgm)
if debug:
Chatter.write("Seeking %s" % p)
if os.path.exists(p) and os.access(p, os.X_OK):
return p
def memory_usage():
try:
import resource
return resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
except Exception as e:
Debug.write("Error getting RAM usage: %s" % str(e))
return 0
def debug_memory_usage():
"""Print line, file, memory usage."""
try:
import inspect
frameinfo = inspect.getframeinfo(inspect.stack()[1][0])
Debug.write(
"RAM usage at %s %d: %d"
% (os.path.split(frameinfo.filename)[-1], frameinfo.lineno, memory_usage())
)
except Exception as e:
Debug.write("Error getting RAM usage: %s" % str(e))
def df(path=os.getcwd()):
"""Return disk space in bytes in path."""
if platform.system() == "Windows":
try:
bytes = ctypes.c_ulonglong(0)
ctypes.windll.kernel32.GetDiskFreeSpaceExW(
ctypes.c_wchar_p(path), None, None, ctypes.pointer(bytes)
)
return bytes.value
except Exception as e:
Debug.write("Error getting disk space: %s" % str(e))
return 0
s = os.statvfs(path)
return s.f_frsize * s.f_bavail
def ulimit_n():
# see xia2#172 - change limit on number of file handles to smaller of
# hard limit, 4096
try:
import resource
except ImportError:
# not available on all operating systems. do nothing.
return
current, hard = resource.getrlimit(resource.RLIMIT_NOFILE)
demand = min(4096, hard)
resource.setrlimit(resource.RLIMIT_NOFILE, (demand, demand))
current, hard = resource.getrlimit(resource.RLIMIT_NOFILE)
return current, demand, hard
class _Environment(object):
"""A class to store environmental considerations."""
def __init__(self, working_directory=None):
if working_directory is None:
self._working_directory = os.getcwd()
else:
self._working_directory = working_directory
self._is_setup = False
def _setup(self):
if self._is_setup:
return
# Make sure USER env var is defined (historical reasons)
if not "USER" in os.environ:
if "USERNAME" in os.environ:
os.environ["USER"] = os.environ["USERNAME"]
else:
os.environ["USER"] = "xia2"
# define a local CCP4_SCR
ccp4_scr = tempfile.mkdtemp()
os.environ["CCP4_SCR"] = ccp4_scr
Debug.write("Created CCP4_SCR: %s" % ccp4_scr)
ulimit = ulimit_n()
if ulimit:
Debug.write("File handle limits: %d/%d/%d" % ulimit)
self._is_setup = True
def set_working_directory(self, working_directory):
self._working_directory = working_directory
def generate_directory(self, path_tuple):
"""Used for generating working directories."""
self._setup()
path = self._working_directory
if isinstance(path_tuple, type("string")):
path_tuple = (path_tuple,)
for p in path_tuple:
path = os.path.join(path, p)
if not os.path.exists(path):
Debug.write("Making directory: %s" % path)
os.makedirs(path)
else:
Debug.write("Directory exists: %s" % path)
return path
def setenv(self, name, value):
"""A wrapper for os.environ."""
self._setup()
os.environ[name] = value
def getenv(self, name):
"""A wrapper for os.environ."""
self._setup()
return os.environ.get(name)
def cleanup(self):
return
Environment = _Environment()
# jiffy functions
def get_number_cpus():
"""Portably get the number of processor cores available."""
if os.name == "nt":
# Windows only has once CPU because easy_mp does not support more. #191
return 1
# if environmental variable NSLOTS is set to a number then use that
try:
return int(os.environ.get("NSLOTS"))
except (ValueError, TypeError):
pass
from libtbx.introspection import number_of_processors
return number_of_processors(return_value_if_unknown=-1)
if __name__ == "__main__":
print(get_number_cpus())
print(df(os.getcwd()))
```
#### File: xia2/Handlers/Flags.py
```python
from __future__ import absolute_import, division, print_function
import os
class _Flags(object):
"""A singleton to manage boolean flags."""
def __init__(self):
# XDS specific things - to help with handling tricky data sets
self._xparm = None
self._xparm_beam_vector = None
self._xparm_rotation_axis = None
self._xparm_origin = None
self._xparm_a = None
self._xparm_b = None
self._xparm_c = None
# starting directory (to allow setting working directory && relative
# paths on input)
self._starting_directory = os.getcwd()
def get_starting_directory(self):
return self._starting_directory
def set_xparm(self, xparm):
self._xparm = xparm
from xia2.Wrappers.XDS.XDS import xds_read_xparm
xparm_info = xds_read_xparm(xparm)
self._xparm_origin = xparm_info["ox"], xparm_info["oy"]
self._xparm_beam_vector = tuple(xparm_info["beam"])
self._xparm_rotation_axis = tuple(xparm_info["axis"])
self._xparm_distance = xparm_info["distance"]
def get_xparm(self):
return self._xparm
def get_xparm_origin(self):
return self._xparm_origin
def get_xparm_rotation_axis(self):
return self._xparm_rotation_axis
def get_xparm_beam_vector(self):
return self._xparm_beam_vector
def get_xparm_distance(self):
return self._xparm_distance
def set_xparm_ub(self, xparm):
self._xparm_ub = xparm
tokens = map(float, open(xparm, "r").read().split())
self._xparm_a = tokens[-9:-6]
self._xparm_b = tokens[-6:-3]
self._xparm_c = tokens[-3:]
def get_xparm_a(self):
return self._xparm_a
def get_xparm_b(self):
return self._xparm_b
def get_xparm_c(self):
return self._xparm_c
def set_freer_file(self, freer_file):
freer_file = os.path.abspath(freer_file)
if not os.path.exists(freer_file):
raise RuntimeError("%s does not exist" % freer_file)
from xia2.Modules.FindFreeFlag import FindFreeFlag
from xia2.Handlers.Streams import Debug
column = FindFreeFlag(freer_file)
Debug.write("FreeR_flag column in %s found: %s" % (freer_file, column))
self._freer_file = freer_file
Flags = _Flags()
```
#### File: xia2/Handlers/Streams.py
```python
from __future__ import absolute_import, division, print_function
import inspect
import logging
import os
import sys
from datetime import date
import libtbx.load_env
try:
from dlstbx.util.colorstreamhandler import ColorStreamHandler
except ImportError:
ColorStreamHandler = None
april = {
"CC half ": "Cromulence",
"I/sigma ": "Excellence",
"Total observations": "How many spots ",
"Total unique": "Unique spots",
"High resolution limit ": "Littlest visible thing",
"Low resolution limit ": "Biggest visible thing",
"Resolution limit for": "Littlest visible thing",
}
def banner(comment, forward=True, size=60):
if not comment:
return "-" * size
l = len(comment)
m = (size - (l + 2)) // 2
n = size - (l + 2 + m)
return "%s %s %s" % ("-" * m, comment, "-" * n)
class _Stream(object):
"""A class to represent an output stream. This will be used as a number
of static instances - Debug and Chatter in particular."""
def __init__(self, streamname, prefix):
"""Create a new stream."""
# FIXME would rather this goes to a file...
# unless this is impossible
if streamname:
self._file_name = "%s.txt" % streamname
else:
self._file_name = None
self._file = None
self._streamname = streamname
self._prefix = prefix
self._otherstream = None
self._off = False
self._cache = False
self._cachelines = []
self._additional = False
self._filter = None
def cache(self):
self._cache = True
self._cachelines = []
def filter(self, filter):
self._filter = filter
def uncache(self):
if not self._cache:
return
self._cache = False
for record, forward in self._cachelines:
self.write(record, forward)
return self._cachelines
def get_file(self):
if self._file:
return self._file
if not self._file_name:
self._file = sys.stdout
else:
self._file = open(self._file_name, "w")
return self._file
def set_file(self, file):
self._file = file
def set_additional(self):
self._additional = True
def write(self, record, forward=True, strip=True):
if self._filter:
for replace in self._filter:
record = record.replace(replace, self._filter[replace])
if self._off:
return None
if self._cache:
self._cachelines.append((record, forward))
return
if self._additional:
f = inspect.currentframe().f_back
m = f.f_code.co_filename
l = f.f_lineno
record = "Called from %s / %d\n%s" % (m, l, record)
for r in record.split("\n"):
if self._prefix:
result = self.get_file().write(
("[%s] %s\n" % (self._prefix, r.strip() if strip else r)).encode(
"utf-8"
)
)
else:
result = self.get_file().write(
("%s\n" % (r.strip() if strip else r)).encode("utf-8")
)
self.get_file().flush()
if self._otherstream and forward:
self._otherstream.write(record, strip=strip)
return result
def bigbanner(self, comment, forward=True, size=60):
"""Write a big banner for something."""
hashes = "#" * size
self.write(hashes, forward)
self.write("# %s" % comment, forward)
self.write(hashes, forward)
def banner(self, comment, forward=True, size=60):
self.write(banner(comment, forward=forward, size=size))
def smallbanner(self, comment, forward):
"""Write a small batter for something, like this:
----- comment ------."""
dashes = "-" * 10
self.write("%s %s %s" % (dashes, comment, dashes), forward)
def block(self, task, data, program, options):
"""Print out a description of the task being performed with
the program and a dictionary of options which will be printed
in alphabetical order."""
self.banner("%s %s with %s" % (task, data, program), size=80)
for o in sorted(options):
if options[o]:
oname = "%s:" % o
self.write("%s %s" % (oname.ljust(30), options[o]))
def entry(self, options):
"""Print subsequent entries to the above block."""
for o in sorted(options):
if options[o]:
oname = "%s:" % o
self.write("%s %s" % (oname.ljust(30), options[o]))
def join(self, otherstream):
"""Join another stream so that all output from this stream goes also
to that one."""
self._otherstream = otherstream
def off(self):
"""Switch the stream writing off..."""
self._off = True
# FIXME 23/NOV/06 now write a xia2.txt from chatter and rename that
# output stream Stdout... then copy everything there!
cl = libtbx.env.dispatcher_name
if cl:
if "xia2" not in cl or "python" in cl or cl == "xia2.new":
cl = "xia2"
else:
cl = "xia2"
if cl.endswith(".bat"):
# windows adds .bat extension to dispatcher
cl = cl[:-4]
Chatter = _Stream("%s" % cl, None)
Journal = _Stream("%s-journal" % cl, None)
Stdout = _Stream(None, None)
day = date.today().timetuple()
if (day.tm_mday == 1 and day.tm_mon == 4) or "XIA2_APRIL" in os.environ:
# turning log fonts to GREEN
Stdout.filter(april)
Debug = _Stream("%s-debug" % cl, None)
Chatter.join(Stdout)
def streams_off():
"""Switch off the chatter output - designed for unit tests..."""
Chatter.off()
Journal.off()
Debug.off()
def setup_logging(logfile=None, debugfile=None, verbose=False):
"""
Initialise logging for xia2
:param logfile: Filename for info/info+debug log output.
:type logfile: str
:param debugfile: Filename for debug log output.
:type debugfile: str
:param verbose: Enable debug output for logfile and console.
:type verbose: bool
"""
if verbose:
loglevel = logging.DEBUG
else:
loglevel = logging.INFO
if os.getenv("COLOURLOG") and ColorStreamHandler:
console = ColorStreamHandler(sys.stdout)
else:
console = logging.StreamHandler(sys.stdout)
console.setLevel(loglevel)
xia2_logger = logging.getLogger("xia2")
xia2_logger.addHandler(console)
xia2_logger.setLevel(loglevel)
other_loggers = [logging.getLogger(package) for package in ("dials", "dxtbx")]
if logfile:
fh = logging.FileHandler(filename=logfile, mode="w")
fh.setLevel(loglevel)
xia2_logger.addHandler(fh)
for logger in other_loggers:
logger.addHandler(fh)
logger.setLevel(loglevel)
if debugfile:
fh = logging.FileHandler(filename=debugfile, mode="w")
fh.setLevel(logging.DEBUG)
for logger in [xia2_logger] + other_loggers:
logger.addHandler(fh)
logger.setLevel(logging.DEBUG)
def reconfigure_streams_to_logging():
"Modify xia2 Chatter/Debug objects to use python logging"
def logger_file(loggername, level=logging.INFO):
"Returns a file-like object that writes to a logger"
log_function = logging.getLogger(loggername).log
class _(object):
@staticmethod
def flush():
pass
@staticmethod
def write(logobject):
if logobject.endswith("\n"):
# the Stream.write() function adds a trailing newline.
# remove that again
logobject = logobject[:-1]
log_function(level, logobject)
return _()
Debug.set_file(logger_file("xia2.stream.debug", level=logging.DEBUG))
Debug.join(None)
Chatter.set_file(logger_file("xia2.stream.chatter"))
Chatter.join(None)
Stdout.set_file(logger_file("xia2.stream.stdout"))
Stdout.join(None)
if __name__ == "__main__":
setup_logging(logfile="logfile", debugfile="debugfile")
reconfigure_streams_to_logging()
Chatter.write("nothing much, really")
Debug.write("this is a debug-level message")
Stdout.write("I write to stdout")
```
#### File: xia2/Handlers/Syminfo.py
```python
from __future__ import absolute_import, division, print_function
import copy
import os
import re
import sys
from cctbx import sgtbx
class _Syminfo(object):
"""An object to retain symmetry information."""
def __init__(self):
"""Initialise everything."""
self._parse_symop()
self._int_re = re.compile("^[0-9]*$")
def _generate_lattice(self, lattice_type, shortname):
"""Generate a lattice name (e.g. tP) from TETRAGONAL and P422."""
hash = {
"TRICLINIC": "a",
"MONOCLINIC": "m",
"ORTHORHOMBIC": "o",
"TETRAGONAL": "t",
"TRIGONAL": "h",
"HEXAGONAL": "h",
"CUBIC": "c",
}
lattice = "%s%s" % (hash[lattice_type.upper()], shortname[0].upper())
if lattice[1] != "H":
return lattice
else:
return "%sR" % lattice[0]
def _parse_symop(self):
"""Parse the CCP4 symop library."""
self._symop = {}
self._spacegroup_name_to_lattice = {}
self._spacegroup_short_to_long = {}
self._spacegroup_long_to_short = {}
self._spacegroup_name_to_number = {}
self._spacegroup_name_to_pointgroup = {}
current = 0
for line in open(os.path.join(os.environ["CLIBD"], "symop.lib")).readlines():
if line[0] != " ":
index, _, _, shortname, _, lattice_type = line.split()[0:6]
index = int(index)
lattice_type = lattice_type.lower()
longname = line.split("'")[1]
lattice = self._generate_lattice(lattice_type, shortname)
pointgroup = ""
for token in longname.split():
if len(longname.split()) <= 2:
pointgroup += token[0]
elif token[0] != "1":
pointgroup += token[0]
self._symop[index] = {
"index": index,
"lattice_type": lattice_type,
"lattice": lattice,
"name": shortname,
"longname": longname,
"pointgroup": pointgroup,
"symops": 0,
"operations": [],
}
if shortname not in self._spacegroup_name_to_lattice:
self._spacegroup_name_to_lattice[shortname] = lattice
if shortname not in self._spacegroup_name_to_number:
self._spacegroup_name_to_number[shortname] = index
if longname not in self._spacegroup_long_to_short:
self._spacegroup_long_to_short[longname] = shortname
if shortname not in self._spacegroup_short_to_long:
self._spacegroup_short_to_long[shortname] = longname
if shortname not in self._spacegroup_name_to_pointgroup:
self._spacegroup_name_to_pointgroup[shortname] = pointgroup
current = index
else:
self._symop[current]["symops"] += 1
self._symop[current]["operations"].append(line.strip())
def get_syminfo(self, spacegroup_number):
"""Return the syminfo for spacegroup number."""
return copy.deepcopy(self._symop[spacegroup_number])
def get_pointgroup(self, name):
"""Get the pointgroup for this spacegroup, e.g. P422 for P43212."""
space_group = sgtbx.space_group_info(name).group()
point_group = (
space_group.build_derived_patterson_group().build_derived_acentric_group()
)
return point_group.type().lookup_symbol().replace(" ", "")
def get_lattice(self, name):
"""Get the lattice for a named spacegroup."""
# check that this isn't already a lattice name
if name in [
"aP",
"mP",
"mC",
"oP",
"oC",
"oI",
"oF",
"tP",
"tI",
"hR",
"hP",
"cP",
"cI",
"cF",
]:
return name
from cctbx.sgtbx.bravais_types import bravais_lattice
if isinstance(name, int):
lattice = bravais_lattice(number=name)
elif self._int_re.match(name):
name = int(name)
lattice = bravais_lattice(number=name)
else:
lattice = bravais_lattice(symbol=str(name))
return str(lattice)
def get_spacegroup_numbers(self):
"""Get a list of all spacegroup numbers."""
numbers = sorted(self._symop.keys())
return numbers
def spacegroup_number_to_name(self, spacegroup_number):
"""Return the name of this spacegroup."""
return sgtbx.space_group_info(spacegroup_number).type().lookup_symbol()
def spacegroup_name_to_number(self, spacegroup):
"""Return the number corresponding to this spacegroup."""
# check have not had number passed in
try:
number = int(spacegroup)
return number
except ValueError:
pass
return sgtbx.space_group_info(str(spacegroup)).type().number()
def get_num_symops(self, spacegroup_number):
"""Get the number of symmetry operations that spacegroup
number has."""
return len(sgtbx.space_group_info(number=spacegroup_number).group())
def get_symops(self, spacegroup):
"""Get the operations for spacegroup number N."""
try:
number = int(spacegroup)
except ValueError:
number = self.spacegroup_name_to_number(spacegroup)
return self._symop[number]["operations"]
def get_subgroups(self, spacegroup):
"""Get the list of spacegroups which are included entirely in this
spacegroup."""
try:
number = int(spacegroup)
except ValueError:
number = self.spacegroup_name_to_number(spacegroup)
symops = self._symop[number]["operations"]
subgroups = [
self.spacegroup_number_to_name(j)
for j in range(1, 231)
if all(operation in symops for operation in self._symop[j]["operations"])
]
return subgroups
Syminfo = _Syminfo()
if __name__ == "__main__":
for arg in sys.argv[1:]:
print(Syminfo.get_pointgroup(arg))
print(Syminfo.get_lattice(arg))
```
#### File: xia2/Modules/DeltaCcHalf.py
```python
from __future__ import absolute_import, division, print_function
import sys
import iotbx.phil
from cctbx import crystal
from cctbx.array_family import flex
from libtbx.phil import command_line
from xia2.Modules.Analysis import separate_unmerged
master_phil_scope = iotbx.phil.parse(
"""\
cc_one_half_method = half_dataset *sigma_tau
.type = choice
unit_cell = None
.type = unit_cell
n_bins = 20
.type = int(value_min=1)
d_min = None
.type = float(value_min=0)
batch
.multiple = True
{
id = None
.type = str
range = None
.type = ints(size=2, value_min=0)
}
include scope xia2.Modules.MultiCrystalAnalysis.batch_phil_scope
""",
process_includes=True,
)
class delta_cc_half(object):
def __init__(
self,
unmerged_intensities,
batches_all,
n_bins=20,
d_min=None,
cc_one_half_method="sigma_tau",
id_to_batches=None,
):
sel = unmerged_intensities.sigmas() > 0
unmerged_intensities = unmerged_intensities.select(sel).set_info(
unmerged_intensities.info()
)
batches_all = batches_all.select(sel)
unmerged_intensities.setup_binner(n_bins=n_bins)
self.unmerged_intensities = unmerged_intensities
self.merged_intensities = unmerged_intensities.merge_equivalents().array()
separate = separate_unmerged(
unmerged_intensities, batches_all, id_to_batches=id_to_batches
)
self.intensities = separate.intensities
self.batches = separate.batches
self.run_id_to_batch_id = separate.run_id_to_batch_id
from iotbx.merging_statistics import dataset_statistics
self.merging_statistics = dataset_statistics(
unmerged_intensities,
n_bins=n_bins,
cc_one_half_significance_level=0.01,
binning_method="counting_sorted",
anomalous=True,
use_internal_variance=False,
eliminate_sys_absent=False,
cc_one_half_method=cc_one_half_method,
)
if cc_one_half_method == "sigma_tau":
cc_overall = self.merging_statistics.cc_one_half_sigma_tau_overall
else:
cc_overall = self.merging_statistics.cc_one_half_overall
self.merging_statistics.show()
self.delta_cc = flex.double()
for test_k in self.intensities.keys():
# print test_k
indices_i = flex.miller_index()
data_i = flex.double()
sigmas_i = flex.double()
for k, unmerged in self.intensities.iteritems():
if k == test_k:
continue
indices_i.extend(unmerged.indices())
data_i.extend(unmerged.data())
sigmas_i.extend(unmerged.sigmas())
unmerged_i = unmerged_intensities.customized_copy(
indices=indices_i, data=data_i, sigmas=sigmas_i
).set_info(unmerged_intensities.info())
unmerged_i.setup_binner_counting_sorted(n_bins=n_bins)
if cc_one_half_method == "sigma_tau":
cc_bins = unmerged_i.cc_one_half_sigma_tau(
use_binning=True, return_n_refl=True
)
else:
cc_bins = unmerged_i.cc_one_half(use_binning=True, return_n_refl=True)
cc_i = flex.mean_weighted(
flex.double(b[0] for b in cc_bins.data[1:-1]),
flex.double(b[1] for b in cc_bins.data[1:-1]),
)
delta_cc_i = cc_i - cc_overall
self.delta_cc.append(delta_cc_i)
def _labels(self):
if self.run_id_to_batch_id is not None:
labels = self.run_id_to_batch_id.values()
else:
labels = ["%i" % (j + 1) for j in range(len(self.delta_cc))]
return labels
def _normalised_delta_cc_i(self):
mav = flex.mean_and_variance(self.delta_cc)
return (self.delta_cc - mav.mean()) / mav.unweighted_sample_standard_deviation()
def get_table(self):
from libtbx import table_utils
rows = [["dataset", "batches", "delta_cc_i", "sigma"]]
labels = self._labels()
normalised_score = self._normalised_delta_cc_i()
perm = flex.sort_permutation(self.delta_cc)
for i in perm:
bmin = flex.min(self.batches[i].data())
bmax = flex.max(self.batches[i].data())
rows.append(
[
str(labels[i]),
"%i to %i" % (bmin, bmax),
"% .3f" % self.delta_cc[i],
"% .2f" % normalised_score[i],
]
)
return table_utils.format(rows, has_header=True, prefix="|", postfix="|")
def plot_histogram(self, filename):
import math
from matplotlib import pyplot
normalised_score = self._normalised_delta_cc_i()
pyplot.figure()
# bins = range(
# int(math.floor(flex.min(normalised_score))), int(math.ceil(flex.max(normalised_score)))+1)
from libtbx.utils import frange
bins = frange(
math.floor(flex.min(normalised_score)),
math.ceil(flex.max(normalised_score)) + 1,
step=0.1,
)
n, bins, patches = pyplot.hist(
normalised_score.as_numpy_array(), bins=bins, fill=False
)
pyplot.xlabel(r"$\sigma$")
pyplot.ylabel("Frequency")
pyplot.savefig(filename)
def run(args):
cmd_line = command_line.argument_interpreter(master_params=master_phil_scope)
working_phil, args = cmd_line.process_and_fetch(
args=args, custom_processor="collect_remaining"
)
working_phil.show()
params = working_phil.extract()
if params.unit_cell is not None:
unit_cell = params.unit_cell
crystal_symmetry = crystal.symmetry(unit_cell=unit_cell)
else:
crystal_symmetry = None
from iotbx.reflection_file_reader import any_reflection_file
result = any_reflection_file(args[0])
unmerged_intensities = None
batches_all = None
for ma in result.as_miller_arrays(
merge_equivalents=False, crystal_symmetry=crystal_symmetry
):
# print ma.info().labels
if ma.info().labels == ["I(+)", "SIGI(+)", "I(-)", "SIGI(-)"]:
assert ma.anomalous_flag()
unmerged_intensities = ma
elif ma.info().labels == ["I", "SIGI"]:
assert not ma.anomalous_flag()
unmerged_intensities = ma
elif ma.info().labels == ["BATCH"]:
batches_all = ma
assert batches_all is not None
assert unmerged_intensities is not None
id_to_batches = None
if len(params.batch) > 0:
id_to_batches = {}
for b in params.batch:
assert b.id is not None
assert b.range is not None
assert b.id not in id_to_batches, "Duplicate batch id: %s" % b.id
id_to_batches[b.id] = b.range
result = delta_cc_half(
unmerged_intensities,
batches_all,
n_bins=params.n_bins,
d_min=params.d_min,
cc_one_half_method=params.cc_one_half_method,
id_to_batches=id_to_batches,
)
hist_filename = "delta_cc_hist.png"
print("Saving histogram to %s" % hist_filename)
result.plot_histogram(hist_filename)
print(result.get_table())
from xia2.Handlers.Citations import Citations
Citations.cite("delta_cc_half")
for citation in Citations.get_citations_acta():
print(citation)
if __name__ == "__main__":
run(sys.argv[1:])
```
#### File: Modules/Indexer/XDSIndexerSum.py
```python
from __future__ import absolute_import, division, print_function
import os
from xia2.Handlers.Phil import PhilIndex
from xia2.Handlers.Streams import Debug
from xia2.Modules.Indexer.XDSIndexer import XDSIndexer
from xia2.Wrappers.XDS.Merge2cbf import Merge2cbf
# the class that we are extending
# odds and sods that are needed
# FIXME need to put in access here to Phil parameters to know how wide to make
# the summed images
class XDSIndexerSum(XDSIndexer):
"""An extension of XDSIndexer using all available images."""
def __init__(self):
super(XDSIndexerSum, self).__init__()
# XDSIndexer.__init__ modfies this!
self._index_select_images = _index_select_images
# helper functions
def _index_select_images(self):
"""Select correct images based on image headers."""
# FIXME in here (i) sum the images defined from the existing class
# contents then (ii) change the template stored, the directory and
# the header contents to correspond to those new images. Finally make
# a note of these changes so we can correct XPARM file at the end.
assert min(self.get_matching_images()) == 1
# make a note so we can fix the XPARM.XDS file at the end
self._true_phi_width = self.get_header_item("phi_width")
params = PhilIndex.params.xds.merge2cbf
if params.data_range is None:
params.data_range = 1, len(self.get_matching_images())
m2c = Merge2cbf(params=params)
m2c.setup_from_image(self.get_image_name(1))
m2c.set_working_directory(
os.path.join(self.get_working_directory(), "summed_images")
)
os.mkdir(m2c.get_working_directory())
m2c.run()
# Is this safe to do?
self._setup_from_image(
os.path.join(m2c.get_working_directory(), "merge2cbf_averaged_0001.cbf")
)
phi_width = self.get_header_item("phi_width")
if phi_width == 0.0:
raise RuntimeError("cannot use still images")
# use five degrees for the background calculation
five_deg = int(round(5.0 / phi_width)) - 1
if five_deg < 5:
five_deg = 5
images = self.get_matching_images()
# characterise the images - are there just two (e.g. dna-style
# reference images) or is there a full block? if it is the
# former then we have a problem, as we want *all* the images in the
# sweep...
wedges = []
min_images = params.xia2.settings.input.min_images
if len(images) < 3 and len(images) < min_images:
raise RuntimeError(
"This INDEXER cannot be used for only %d images" % len(images)
)
Debug.write("Adding images for indexer: %d -> %d" % (min(images), max(images)))
wedges.append((min(images), max(images)))
# FIXME this should have a wrapper function!
if min(images) + five_deg in images:
self._background_images = (min(images), min(images) + five_deg)
else:
self._background_images = (min(images), max(images))
return wedges
# FIXME here override _index_finish by calling original _index_finish
# then correcting the XPARM file as mentioned above.
def _index_finish(self):
self._modify_xparm_xds()
XDSIndexer._index_finish(self)
def _modify_xparm_xds(self):
import fileinput
xparm_filename = os.path.join(self.get_working_directory(), "XPARM.XDS")
assert os.path.isfile(xparm_filename)
f = fileinput.input(xparm_filename, mode="rb", inplace=1)
updated_oscillation_range = False
for line in f:
if not updated_oscillation_range:
# Starting image number (STARTING_FRAME=),
# spindle angle at start (STARTING_ANGLE=),
# oscillation range,
# and laboratory coordinates of the rotation axis.
tokens = line.split()
if len(tokens) == 6:
summed_oscillation_range = float(tokens[2])
# sanity check - is this actually necessary?
assert (
summed_oscillation_range - self.get_header_item("phi_width")
) < 1e-6
tokens[2] = "%.4f" % self._true_phi_width
print(" ".join(tokens))
continue
print(line, end=" ")
f.close()
# copy across file contents internally
self._data_files["XPARM.XDS"] = open(xparm_filename, mode="rb").read()
```
#### File: Modules/Integrater/IntegraterFactory.py
```python
from __future__ import absolute_import, division, print_function
import os
from xia2.DriverExceptions.NotAvailableError import NotAvailableError
from xia2.Handlers.Phil import PhilIndex
from xia2.Handlers.PipelineSelection import add_preference, get_preferences
from xia2.Handlers.Streams import Debug
from xia2.Modules.Integrater.DialsIntegrater import DialsIntegrater
from xia2.Modules.Integrater.MosflmIntegrater import MosflmIntegrater
from xia2.Modules.Integrater.XDSIntegrater import XDSIntegrater
# FIXME 06/SEP/06 this should take an implementation of indexer to
# help with the decision about which integrater to
# use, and also to enable invisible configuration.
#
# FIXME 06/SEP/06 also need interface which will work with xsweep
# objects.
def IntegraterForXSweep(xsweep, json_file=None):
"""Create an Integrater implementation to work with the provided
XSweep."""
# FIXME this needs properly implementing...
if xsweep is None:
raise RuntimeError("XSweep instance needed")
if not xsweep.__class__.__name__ == "XSweep":
raise RuntimeError("XSweep instance needed")
integrater = Integrater()
if json_file is not None:
assert os.path.isfile(json_file)
Debug.write("Loading integrater from json: %s" % json_file)
import time
t0 = time.time()
integrater = integrater.__class__.from_json(filename=json_file)
t1 = time.time()
Debug.write("Loaded integrater in %.2f seconds" % (t1 - t0))
else:
integrater.setup_from_imageset(xsweep.get_imageset())
integrater.set_integrater_sweep_name(xsweep.get_name())
# copy across resolution limits
if xsweep.get_resolution_high() or xsweep.get_resolution_low():
d_min = PhilIndex.params.xia2.settings.resolution.d_min
d_max = PhilIndex.params.xia2.settings.resolution.d_max
# override with sweep versions if set - xia2#146
if xsweep.get_resolution_high():
d_min = xsweep.get_resolution_high()
if xsweep.get_resolution_low():
d_max = xsweep.get_resolution_low()
if d_min is not None and d_min != integrater.get_integrater_high_resolution():
Debug.write("Assigning resolution limits from XINFO input:")
Debug.write("d_min: %.3f" % d_min)
integrater.set_integrater_high_resolution(d_min, user=True)
if d_max is not None and d_max != integrater.get_integrater_low_resolution():
Debug.write("Assigning resolution limits from XINFO input:")
Debug.write("d_max: %.3f" % d_max)
integrater.set_integrater_low_resolution(d_max, user=True)
# check the epoch and perhaps pass this in for future reference
# (in the scaling)
if xsweep._epoch > 0:
integrater.set_integrater_epoch(xsweep._epoch)
# need to do the same for wavelength now as that could be wrong in
# the image header...
if xsweep.get_wavelength_value():
Debug.write(
"Integrater factory: Setting wavelength: %.6f"
% xsweep.get_wavelength_value()
)
integrater.set_wavelength(xsweep.get_wavelength_value())
# likewise the distance...
if xsweep.get_distance():
Debug.write(
"Integrater factory: Setting distance: %.2f" % xsweep.get_distance()
)
integrater.set_distance(xsweep.get_distance())
integrater.set_integrater_sweep(xsweep, reset=False)
return integrater
def Integrater():
"""Return an Integrater implementation."""
# FIXME this should take an indexer as an argument...
integrater = None
preselection = get_preferences().get("integrater")
if not integrater and (not preselection or preselection == "dials"):
try:
integrater = DialsIntegrater()
Debug.write("Using Dials Integrater")
if PhilIndex.params.xia2.settings.pipeline in ["dials", "dials-full"]:
integrater.set_output_format("pickle")
except NotAvailableError:
if preselection == "dials":
raise RuntimeError(
"preselected integrater dials not available: "
+ "dials not installed?"
)
if not integrater and (not preselection or preselection == "mosflmr"):
try:
integrater = MosflmIntegrater()
Debug.write("Using MosflmR Integrater")
if not get_preferences().get("scaler"):
add_preference("scaler", "ccp4a")
except NotAvailableError:
if preselection == "mosflmr":
raise RuntimeError("preselected integrater mosflmr not available")
if not integrater and (not preselection or preselection == "xdsr"):
try:
integrater = XDSIntegrater()
Debug.write("Using XDS Integrater in new resolution mode")
except NotAvailableError:
if preselection == "xdsr":
raise RuntimeError(
"preselected integrater xdsr not available: " + "xds not installed?"
)
if not integrater:
raise RuntimeError("no integrater implementations found")
# check to see if resolution limits were passed in through the
# command line...
dmin = PhilIndex.params.xia2.settings.resolution.d_min
dmax = PhilIndex.params.xia2.settings.resolution.d_max
if dmin:
Debug.write("Adding user-assigned resolution limits:")
if dmax:
Debug.write("dmin: %.3f dmax: %.2f" % (dmin, dmax))
integrater.set_integrater_resolution(dmin, dmax, user=True)
else:
Debug.write("dmin: %.3f" % dmin)
integrater.set_integrater_high_resolution(dmin, user=True)
return integrater
if __name__ == "__main__":
integrater = Integrater()
```
#### File: xia2/Modules/Mtzdump.py
```python
from __future__ import absolute_import, division, print_function
import copy
import os
import sys
from iotbx import mtz
class Mtzdump(object):
"""A class to give the same functionality as the wrapper for the CCP4
MTZDUMP program."""
def __init__(self):
self._header = {}
self._header["datasets"] = []
self._header["dataset_info"] = {}
self._batch_header = {}
self._batches = None
self._reflections = 0
self._resolution_range = (0, 0)
def set_working_directory(self, wd):
pass
def get_working_directory(self):
return None
def set_hklin(self, hklin):
self._hklin = hklin
def dump(self):
"""Actually obtain the contents of the mtz file header."""
assert self._hklin, self._hklin
assert os.path.exists(self._hklin), self._hklin
mtz_obj = mtz.object(self._hklin)
# work through the file acculumating the necessary information
self._header["datasets"] = []
self._header["dataset_info"] = {}
self._batches = [batch.num() for batch in mtz_obj.batches()]
self._header["column_labels"] = [column.label() for column in mtz_obj.columns()]
self._header["column_types"] = [column.type() for column in mtz_obj.columns()]
self._resolution_range = mtz_obj.max_min_resolution()
self._header["spacegroup"] = mtz_obj.space_group_name()
self._reflections = mtz_obj.n_reflections()
for crystal in mtz_obj.crystals():
if crystal.name() == "HKL_base":
continue
pname = crystal.project_name()
xname = crystal.name()
cell = crystal.unit_cell().parameters()
for dataset in crystal.datasets():
dname = dataset.name()
wavelength = dataset.wavelength()
dataset_id = "%s/%s/%s" % (pname, xname, dname)
dataset_number = dataset.i_dataset()
assert dataset_id not in self._header["datasets"]
self._header["datasets"].append(dataset_id)
self._header["dataset_info"][dataset_id] = {}
self._header["dataset_info"][dataset_id]["wavelength"] = wavelength
self._header["dataset_info"][dataset_id]["cell"] = cell
self._header["dataset_info"][dataset_id]["id"] = dataset_number
def dump_batch_headers(self):
"""Actually print the contents of the mtz file batch headers."""
assert self._hklin, self._hklin
assert os.path.exists(self._hklin), self._hklin
mtz_obj = mtz.object(self._hklin)
for batch in mtz_obj.batches():
current_batch = batch.num()
umat = batch.umat()
self._batch_header[current_batch] = {"umat": umat}
def get_batch_header(self, batch):
return copy.deepcopy(self._batch_header[batch])
def get_columns(self):
"""Get a list of the columns and their types as tuples
(label, type) in a list."""
results = []
for i in range(len(self._header["column_labels"])):
results.append(
(self._header["column_labels"][i], self._header["column_types"][i])
)
return results
def get_resolution_range(self):
return self._resolution_range
def get_datasets(self):
"""Return a list of available datasets."""
return self._header["datasets"]
def get_dataset_info(self, dataset):
"""Get the cell, spacegroup & wavelength associated with
a dataset. The dataset is specified by pname/xname/dname."""
result = copy.deepcopy(self._header["dataset_info"][dataset])
result["spacegroup"] = self._header["spacegroup"]
return result
def get_spacegroup(self):
"""Get the spacegroup recorded for this reflection file."""
return self._header["spacegroup"]
def get_batches(self):
"""Get a list of batches found in this reflection file."""
return self._batches
def get_column_range(self, column):
"""Get the value ranges for this column. This now works by reading
the file rather than using cached values => could be slow."""
assert self._hklin, self._hklin
assert os.path.exists(self._hklin), self._hklin
mtz_obj = mtz.object(self._hklin)
col = mtz_obj.get_column(column)
valid = col.extract_valid_values()
return min(valid), max(valid)
def get_reflections(self):
"""Return the number of reflections found in the reflection
file."""
return self._reflections
if __name__ == "__main__":
m = Mtzdump()
if len(sys.argv) > 1:
m.set_hklin(sys.argv[1])
else:
raise RuntimeError("%s hklin.mtz" % sys.argv[0])
m.dump()
print(m.get_spacegroup())
```
#### File: xia2/Modules/MtzUtils.py
```python
from __future__ import absolute_import, division, print_function
import iotbx.mtz
def space_group_from_mtz(file_name):
mtz_obj = iotbx.mtz.object(file_name=file_name)
return mtz_obj.space_group()
def space_group_name_from_mtz(file_name):
return space_group_from_mtz(file_name).type().lookup_symbol()
def space_group_number_from_mtz(file_name):
return space_group_from_mtz(file_name).type().number()
def batches_from_mtz(file_name):
mtz_obj = iotbx.mtz.object(file_name=file_name)
return [batch.num() for batch in mtz_obj.batches()]
def nref_from_mtz(file_name):
mtz_obj = iotbx.mtz.object(file_name=file_name)
return mtz_obj.n_reflections()
def reindex(hklin, hklout, change_of_basis_op, space_group=None):
from cctbx import sgtbx
from xia2.Modules.Scaler.CommonScaler import clean_reindex_operator
if not isinstance(change_of_basis_op, sgtbx.change_of_basis_op):
change_of_basis_op = sgtbx.change_of_basis_op(
str(clean_reindex_operator(change_of_basis_op))
)
if space_group is not None and not isinstance(space_group, sgtbx.space_group):
space_group = sgtbx.space_group_info(str(space_group)).group()
mtz_obj = iotbx.mtz.object(file_name=hklin)
original_index_miller_indices = mtz_obj.extract_original_index_miller_indices()
reindexed_miller_indices = change_of_basis_op.apply(original_index_miller_indices)
if space_group is not None:
mtz_obj.set_space_group(space_group)
mtz_obj.replace_original_index_miller_indices(reindexed_miller_indices)
mtz_obj.write(hklout)
```
#### File: Modules/Scaler/CCP4ScalerHelpers.py
```python
from __future__ import absolute_import, division, print_function
import math
import os
import sys
import xia2.Wrappers.CCP4.Pointless
import xia2.Wrappers.Dials.Symmetry
from iotbx import mtz
from xia2.Experts.ResolutionExperts import remove_blank
from xia2.Handlers.Files import FileHandler
from xia2.Handlers.Phil import PhilIndex
from xia2.Handlers.Streams import Debug
from xia2.lib.bits import auto_logfiler
from xia2.Modules import MtzUtils
############ JIFFY FUNCTIONS #################
def nint(a):
return int(round(a) - 0.5) + (a > 0)
def _resolution_estimate(ordered_pair_list, cutoff):
"""Come up with a linearly interpolated estimate of resolution at
cutoff cutoff from input data [(resolution, i_sigma)]."""
x = []
y = []
for o in ordered_pair_list:
x.append(o[0])
y.append(o[1])
if max(y) < cutoff:
# there is no resolution where this exceeds the I/sigma
# cutoff
return -1.0
# this means that there is a place where the resolution cutof
# can be reached - get there by working backwards
x.reverse()
y.reverse()
if y[0] >= cutoff:
# this exceeds the resolution limit requested
return x[0]
j = 0
while y[j] < cutoff:
j += 1
resolution = x[j] + (cutoff - y[j]) * (x[j - 1] - x[j]) / (y[j - 1] - y[j])
return resolution
def ersatz_resolution(reflection_file, batch_ranges):
mtz_obj = mtz.object(reflection_file)
miller = mtz_obj.extract_miller_indices()
dmax, dmin = mtz_obj.max_min_resolution()
ipr_column = None
sigipr_column = None
i_column = None
sigi_column = None
batch_column = None
uc = None
for crystal in mtz_obj.crystals():
if crystal.name() == "HKL_Base":
continue
uc = crystal.unit_cell()
for dataset in crystal.datasets():
for column in dataset.columns():
if column.label() == "IPR":
ipr_column = column
elif column.label() == "SIGIPR":
sigipr_column = column
elif column.label() == "BATCH":
batch_column = column
elif column.label() == "I":
i_column = column
elif column.label() == "SIGI":
sigi_column = column
assert ipr_column or i_column
assert sigipr_column or sigi_column
assert batch_column
if ipr_column is None:
ipr_column = i_column
if sigipr_column is None:
sigipr_column = sigi_column
ipr_values = ipr_column.extract_values()
sigipr_values = sigipr_column.extract_values()
batch_values = batch_column.extract_values()
batches = batch_values.as_double().iround()
resolutions = {}
for start, end in batch_ranges:
sel = (batches >= start) & (batches <= end)
d = uc.d(miller.select(sel))
isig = ipr_values.select(sel) / sigipr_values.select(sel)
resolutions[(start, end)] = compute_resolution(dmax, dmin, d, isig)
return resolutions
def meansd(values):
mean = sum(values) / len(values)
var = sum([(v - mean) * (v - mean) for v in values]) / len(values)
return mean, math.sqrt(var)
def compute_resolution(dmax, dmin, d, isig):
# XXX As far as I can tell this function doesn't do anything useful as it
# just returns the unmodified dmin that was passed as input! Please refer
# to return 1.0 / math.sqrt(s) below & remove comment when you are happy...
bins = {}
smax = 1.0 / (dmax * dmax)
smin = 1.0 / (dmin * dmin)
for j in range(len(d)):
s = 1.0 / (d[j] * d[j])
n = nint(100.0 * (s - smax) / (smin - smax))
if not n in bins:
bins[n] = []
bins[n].append(isig[j])
# compute starting point i.e. maximum point on the curve, to cope with
# cases where low resolution has low I / sigma - see #1690.
max_misig = 0.0
max_bin = 0
for b in sorted(bins):
s = smax + b * (smin - smax) / 100.0
misig = meansd(bins[b])[0]
if misig > max_misig:
max_misig = misig
max_bin = b
for b in sorted(bins):
if b < max_bin:
continue
s = smax + b * (smin - smax) / 100.0
misig = meansd(bins[b])[0]
if misig < 1.0:
return 1.0 / math.sqrt(s)
return dmin
def _prepare_pointless_hklin(working_directory, hklin, phi_width):
"""Prepare some data for pointless - this will take only 180 degrees
of data if there is more than this (through a "pointless" command) else
will simply return hklin."""
# also remove blank images?
if not PhilIndex.params.xia2.settings.small_molecule:
Debug.write("Excluding blank images")
hklout = os.path.join(
working_directory, "%s_noblank.mtz" % (os.path.split(hklin)[-1][:-4])
)
FileHandler.record_temporary_file(hklout)
hklin = remove_blank(hklin, hklout)
# find the number of batches
batches = MtzUtils.batches_from_mtz(hklin)
n_batches = max(batches) - min(batches)
phi_limit = 180
if (
n_batches * phi_width < phi_limit
or PhilIndex.params.xia2.settings.small_molecule
):
return hklin
hklout = os.path.join(
working_directory, "%s_prepointless.mtz" % (os.path.split(hklin)[-1][:-4])
)
pl = xia2.Wrappers.CCP4.Pointless.Pointless()
pl.set_working_directory(working_directory)
auto_logfiler(pl)
pl.set_hklin(hklin)
pl.set_hklout(hklout)
first = min(batches)
last = first + int(phi_limit / phi_width)
Debug.write(
"Preparing data for pointless - %d batches (%d degrees)"
% ((last - first), phi_limit)
)
pl.limit_batches(first, last)
# we will want to delete this one exit
FileHandler.record_temporary_file(hklout)
return hklout
def _fraction_difference(value, reference):
"""How much (what %age) does value differ to reference?"""
if reference == 0.0:
return value
return math.fabs((value - reference) / reference)
############### HELPER CLASS #########################
class CCP4ScalerHelper(object):
"""A class to help the CCP4 Scaler along a little."""
def __init__(self):
self._working_directory = os.getcwd()
def set_working_directory(self, working_directory):
self._working_directory = working_directory
def get_working_directory(self):
return self._working_directory
def Pointless(self):
"""Create a Pointless wrapper from the xia2 wrapper - and set the
working directory and log file stuff as a part of this..."""
pointless = xia2.Wrappers.CCP4.Pointless.Pointless()
pointless.set_working_directory(self.get_working_directory())
auto_logfiler(pointless)
return pointless
def dials_symmetry(self):
symmetry = xia2.Wrappers.Dials.Symmetry.DialsSymmetry()
symmetry.set_working_directory(self.get_working_directory())
auto_logfiler(symmetry)
return symmetry
def pointless_indexer_jiffy(self, hklin, refiner):
"""A jiffy to centralise the interactions between pointless
and the Indexer."""
need_to_return = False
probably_twinned = False
if PhilIndex.params.xia2.settings.symmetry.program == "dials":
symmetry = self.dials_symmetry()
else:
symmetry = self.Pointless()
symmetry.set_hklin(hklin)
symmetry.decide_pointgroup()
rerun_pointless = False
possible = symmetry.get_possible_lattices()
correct_lattice = None
Debug.write("Possible lattices (pointless):")
Debug.write(" ".join(possible))
for lattice in possible:
state = refiner.set_refiner_asserted_lattice(lattice)
if state == refiner.LATTICE_CORRECT:
Debug.write("Agreed lattice %s" % lattice)
correct_lattice = lattice
break
elif state == refiner.LATTICE_IMPOSSIBLE:
Debug.write("Rejected lattice %s" % lattice)
rerun_pointless = True
continue
elif state == refiner.LATTICE_POSSIBLE:
Debug.write("Accepted lattice %s, will reprocess" % lattice)
need_to_return = True
correct_lattice = lattice
break
if correct_lattice is None:
correct_lattice = refiner.get_refiner_lattice()
rerun_pointless = True
Debug.write("No solution found: assuming lattice from refiner")
if rerun_pointless:
symmetry.set_correct_lattice(correct_lattice)
symmetry.decide_pointgroup()
Debug.write("Pointless analysis of %s" % symmetry.get_hklin())
pointgroup = symmetry.get_pointgroup()
reindex_op = symmetry.get_reindex_operator()
probably_twinned = symmetry.get_probably_twinned()
Debug.write("Pointgroup: %s (%s)" % (pointgroup, reindex_op))
return pointgroup, reindex_op, need_to_return, probably_twinned
def pointless_indexer_multisweep(self, hklin, refiners):
"""A jiffy to centralise the interactions between pointless
and the Indexer, multisweep edition."""
need_to_return = False
probably_twinned = False
pointless = self.Pointless()
pointless.set_hklin(hklin)
pointless.decide_pointgroup()
rerun_pointless = False
possible = pointless.get_possible_lattices()
correct_lattice = None
Debug.write("Possible lattices (pointless):")
Debug.write(" ".join(possible))
# any of them contain the same indexer link, so all good here.
refiner = refiners[0]
for lattice in possible:
state = refiner.set_refiner_asserted_lattice(lattice)
if state == refiner.LATTICE_CORRECT:
Debug.write("Agreed lattice %s" % lattice)
correct_lattice = lattice
break
elif state == refiner.LATTICE_IMPOSSIBLE:
Debug.write("Rejected lattice %s" % lattice)
rerun_pointless = True
continue
elif state == refiner.LATTICE_POSSIBLE:
Debug.write("Accepted lattice %s, will reprocess" % lattice)
need_to_return = True
correct_lattice = lattice
break
if correct_lattice is None:
correct_lattice = refiner.get_refiner_lattice()
rerun_pointless = True
Debug.write("No solution found: assuming lattice from refiner")
if need_to_return:
if (
PhilIndex.params.xia2.settings.integrate_p1
and not PhilIndex.params.xia2.settings.reintegrate_correct_lattice
):
need_to_return = False
rerun_pointless = True
else:
for refiner in refiners[1:]:
refiner.refiner_reset()
if rerun_pointless:
pointless.set_correct_lattice(correct_lattice)
pointless.decide_pointgroup()
Debug.write("Pointless analysis of %s" % pointless.get_hklin())
pointgroup = pointless.get_pointgroup()
reindex_op = pointless.get_reindex_operator()
probably_twinned = pointless.get_probably_twinned()
Debug.write("Pointgroup: %s (%s)" % (pointgroup, reindex_op))
return pointgroup, reindex_op, need_to_return, probably_twinned
# Sweep info class to replace dictionary... #884
class SweepInformation(object):
def __init__(self, integrater):
self._project_info = integrater.get_integrater_project_info()
self._sweep_name = integrater.get_integrater_sweep_name()
self._integrater = integrater
sweep = integrater.get_integrater_sweep()
self._batches = sweep.get_frames_to_process()
if self._batches is None:
self._batches = integrater.get_integrater_batches()
self._batch_offset = 0
self._image_to_epoch = integrater.get_integrater_sweep().get_image_to_epoch()
self._image_to_dose = {}
self._reflections = None
self._experiments = None
def to_dict(self):
obj = {}
obj["__id__"] = "SweepInformation"
import inspect
attributes = inspect.getmembers(self, lambda m: not (inspect.isroutine(m)))
for a in attributes:
if a[0].startswith("__"):
continue
elif a[0] == "_integrater":
obj[a[0]] = a[1].to_dict()
else:
obj[a[0]] = a[1]
return obj
@classmethod
def from_dict(cls, obj):
assert obj["__id__"] == "SweepInformation"
return_obj = cls.__new__(cls)
for k, v in obj.iteritems():
if k == "_integrater":
from libtbx.utils import import_python_object
integrater_cls = import_python_object(
import_path=".".join((v["__module__"], v["__name__"])),
error_prefix="",
target_must_be="",
where_str="",
).object
v = integrater_cls.from_dict(v)
setattr(return_obj, k, v)
return return_obj
def get_project_info(self):
return self._project_info
def get_sweep_name(self):
return self._sweep_name
def get_integrater(self):
return self._integrater
def get_batches(self):
return self._batches
def set_batches(self, batches):
Debug.write(
"Setting batches for sweep %s: %i to %i"
% (self.get_sweep_name(), batches[0], batches[1])
)
self._batches = batches
def set_batch_offset(self, batch_offset):
self._batch_offset = batch_offset
def get_batch_offset(self):
return self._batch_offset
def get_batch_range(self):
return min(self._batches), max(self._batches)
def get_header(self):
return self._integrater.get_header()
def get_template(self):
return self._integrater.get_template()
def set_dose_information(self, epoch_to_dose):
for i in self._image_to_epoch:
e = self._image_to_epoch[i]
d = epoch_to_dose[e]
self._image_to_dose[i] = d
def get_circle_resolution(self):
"""Get the resolution of the inscribed circle used for this sweep."""
header = self._integrater.get_header()
wavelength = self._integrater.get_wavelength()
detector_width = header["size"][0] * header["pixel"][0]
detector_height = header["size"][1] * header["pixel"][1]
distance = self._integrater.get_integrater_indexer().get_indexer_distance()
beam = self._integrater.get_integrater_indexer().get_indexer_beam_centre()
radius = min(
[beam[0], detector_width - beam[0], beam[1], detector_height - beam[1]]
)
theta = 0.5 * math.atan(radius / distance)
return wavelength / (2 * math.sin(theta))
def get_integrater_resolution(self):
return self._integrater.get_integrater_high_resolution()
def get_reflections(self):
if self._reflections:
return self._reflections
else:
return self._integrater.get_integrater_intensities()
def set_reflections(self, reflections):
self._reflections = reflections
def set_experiments(self, experiments):
self._experiments = experiments
def get_experiments(self):
return self._experiments
class SweepInformationHandler(object):
def __init__(self, epoch_to_integrater):
self._sweep_information = {}
for epoch in epoch_to_integrater:
self._sweep_information[epoch] = SweepInformation(
epoch_to_integrater[epoch]
)
self._first = sorted(self._sweep_information)[0]
def to_dict(self):
obj = {}
obj["__id__"] = "SweepInformationHandler"
d = {}
for k, v in self._sweep_information.iteritems():
d[k] = v.to_dict()
obj["_sweep_information"] = d
return obj
@classmethod
def from_dict(cls, obj):
assert obj["__id__"] == "SweepInformationHandler"
return_obj = cls.__new__(cls)
d = {}
for k, v in obj["_sweep_information"].iteritems():
d[k] = SweepInformation.from_dict(v)
return_obj._sweep_information = d
return_obj._first = sorted(return_obj._sweep_information)[0]
return return_obj
def get_epochs(self):
return sorted(self._sweep_information)
def remove_epoch(self, epoch):
del self._sweep_information[epoch]
self._first = sorted(self._sweep_information)[0]
def get_sweep_information(self, epoch):
return self._sweep_information[epoch]
def get_project_info(self):
si = self._sweep_information[self._first]
pname, xname, dname = si.get_project_info()
for e in self._sweep_information:
si = self._sweep_information[e]
assert si.get_project_info()[0] == pname
assert si.get_project_info()[1] == xname
return pname, xname
def anomalous_signals(hklin):
"""
Compute some measures of anomalous signal: df / f and di / sig(di).
"""
m = mtz.object(hklin)
mas = m.as_miller_arrays()
data = None
for ma in mas:
if not ma.anomalous_flag():
continue
if str(ma.observation_type()) != "xray.intensity":
continue
data = ma
if not data:
Debug.write("no anomalous data found")
return
df_f = data.anomalous_signal()
differences = data.anomalous_differences()
di_sigdi = sum(abs(differences.data())) / sum(differences.sigmas())
return df_f, di_sigdi
def mosflm_B_matrix(uc):
from scitbx.matrix import sqr
from math import sin, cos, pi
parameters = uc.parameters()
r_parameters = uc.reciprocal_parameters()
a = parameters[:3]
al = [pi * p / 180.0 for p in parameters[3:]]
b = r_parameters[:3]
be = [pi * p / 180.0 for p in r_parameters[3:]]
mosflm_B = sqr(
(
b[0],
b[1] * cos(be[2]),
b[2] * cos(be[1]),
0,
b[1] * sin(be[2]),
-b[2] * sin(be[1]) * cos(al[0]),
0,
0,
1.0 / a[2],
)
)
return mosflm_B
def get_umat_bmat_lattice_symmetry_from_mtz(mtz_file):
"""Get the U matrix and lattice symmetry derived from the unit cell
constants from an MTZ file."""
from iotbx import mtz
m = mtz.object(mtz_file)
# assert first U matrix from batches is OK
uc = m.crystals()[0].unit_cell()
from cctbx.sgtbx import lattice_symmetry_group
lattice_symm = lattice_symmetry_group(uc, max_delta=0.0)
return tuple(m.batches()[0].umat()), mosflm_B_matrix(uc), lattice_symm
if __name__ == "__main__":
for arg in sys.argv[1:]:
df_f, di_sigdi = anomalous_signals(arg)
print("%s: %.3f %.3f" % (os.path.split(arg)[-1], df_f, di_sigdi))
```
#### File: Modules/Scaler/CommonScaler.py
```python
from __future__ import absolute_import, division, print_function
import math
import os
from iotbx import mtz
from xia2.Handlers.Files import FileHandler
from xia2.Handlers.Phil import PhilIndex
from xia2.Handlers.Streams import Chatter, Debug
from xia2.Handlers.CIF import CIF, mmCIF
from xia2.lib.bits import nifty_power_of_ten, auto_logfiler
from xia2.Modules.AnalyseMyIntensities import AnalyseMyIntensities
from xia2.Modules import MtzUtils
from xia2.Modules.CCP4InterRadiationDamageDetector import (
CCP4InterRadiationDamageDetector,
)
from xia2.Modules.Scaler.CCP4ScalerHelpers import anomalous_signals
from xia2.Schema.Interfaces.Scaler import Scaler
# new resolution limit code
from xia2.Wrappers.XIA.Merger import Merger
def clean_reindex_operator(reindex_operator):
return reindex_operator.replace("[", "").replace("]", "")
class CommonScaler(Scaler):
"""Unified bits which the scalers have in common over the interface."""
def __init__(self):
super(CommonScaler, self).__init__()
self._sweep_handler = None
self._scalr_twinning_score = None
self._scalr_twinning_conclusion = None
self._spacegroup_reindex_operator = None
def _sort_together_data_ccp4(self):
"""Sort together in the right order (rebatching as we go) the sweeps
we want to scale together."""
max_batches = 0
for epoch in self._sweep_handler.get_epochs():
si = self._sweep_handler.get_sweep_information(epoch)
pname, xname, dname = si.get_project_info()
sname = si.get_sweep_name()
hklin = si.get_reflections()
# limit the reflections - e.g. if we are re-running the scaling step
# on just a subset of the integrated data
hklin = si.get_reflections()
limit_batch_range = None
for sweep in PhilIndex.params.xia2.settings.sweep:
if sweep.id == sname and sweep.range is not None:
limit_batch_range = sweep.range
break
if limit_batch_range is not None:
Debug.write(
"Limiting batch range for %s: %s" % (sname, limit_batch_range)
)
start, end = limit_batch_range
hklout = os.path.splitext(hklin)[0] + "_tmp.mtz"
FileHandler.record_temporary_file(hklout)
rb = self._factory.Pointless()
rb.set_hklin(hklin)
rb.set_hklout(hklout)
rb.limit_batches(start, end)
si.set_reflections(hklout)
si.set_batches(limit_batch_range)
# keep a count of the maximum number of batches in a block -
# this will be used to make rebatch work below.
hklin = si.get_reflections()
batches = MtzUtils.batches_from_mtz(hklin)
if 1 + max(batches) - min(batches) > max_batches:
max_batches = max(batches) - min(batches) + 1
Debug.write("Biggest sweep has %d batches" % max_batches)
max_batches = nifty_power_of_ten(max_batches)
# then rebatch the files, to make sure that the batch numbers are
# in the same order as the epochs of data collection.
counter = 0
for epoch in self._sweep_handler.get_epochs():
si = self._sweep_handler.get_sweep_information(epoch)
hklin = si.get_reflections()
pname, xname, dname = si.get_project_info()
sname = si.get_sweep_name()
hklout = os.path.join(
self.get_working_directory(),
"%s_%s_%s_%s_integrated.mtz" % (pname, xname, dname, sname),
)
first_batch = min(si.get_batches())
si.set_batch_offset(counter * max_batches - first_batch + 1)
from xia2.Modules.Scaler.rebatch import rebatch
new_batches = rebatch(
hklin,
hklout,
first_batch=counter * max_batches + 1,
pname=pname,
xname=xname,
dname=dname,
)
# update the "input information"
si.set_reflections(hklout)
si.set_batches(new_batches)
# update the counter & recycle
counter += 1
s = self._factory.Sortmtz()
hklout = os.path.join(
self.get_working_directory(),
"%s_%s_sorted.mtz" % (self._scalr_pname, self._scalr_xname),
)
s.set_hklout(hklout)
for epoch in self._sweep_handler.get_epochs():
s.add_hklin(
self._sweep_handler.get_sweep_information(epoch).get_reflections()
)
s.sort()
# verify that the measurements are in the correct setting
# choice for the spacegroup
hklin = hklout
hklout = hklin.replace("sorted.mtz", "temp.mtz")
if not self.get_scaler_reference_reflection_file():
if PhilIndex.params.xia2.settings.symmetry.program == "dials":
p = self._factory.dials_symmetry()
else:
p = self._factory.Pointless()
FileHandler.record_log_file(
"%s %s pointless" % (self._scalr_pname, self._scalr_xname),
p.get_log_file(),
)
if len(self._sweep_handler.get_epochs()) > 1:
p.set_hklin(hklin)
else:
# permit the use of pointless preparation...
epoch = self._sweep_handler.get_epochs()[0]
p.set_hklin(
self._prepare_pointless_hklin(
hklin,
self._sweep_handler.get_sweep_information(epoch)
.get_integrater()
.get_phi_width(),
)
)
if self._scalr_input_spacegroup:
Debug.write(
"Assigning user input spacegroup: %s" % self._scalr_input_spacegroup
)
p.decide_spacegroup()
spacegroup = p.get_spacegroup()
reindex_operator = p.get_spacegroup_reindex_operator()
Debug.write(
"Pointless thought %s (reindex as %s)"
% (spacegroup, reindex_operator)
)
spacegroup = self._scalr_input_spacegroup
reindex_operator = "h,k,l"
self._spacegroup_reindex_operator = reindex_operator
else:
p.decide_spacegroup()
spacegroup = p.get_spacegroup()
reindex_operator = p.get_spacegroup_reindex_operator()
self._spacegroup_reindex_operator = clean_reindex_operator(
reindex_operator
)
Debug.write(
"Pointless thought %s (reindex as %s)"
% (spacegroup, reindex_operator)
)
if self._scalr_input_spacegroup:
self._scalr_likely_spacegroups = [self._scalr_input_spacegroup]
else:
self._scalr_likely_spacegroups = p.get_likely_spacegroups()
Chatter.write("Likely spacegroups:")
for spag in self._scalr_likely_spacegroups:
Chatter.write("%s" % spag)
Chatter.write(
"Reindexing to first spacegroup setting: %s (%s)"
% (spacegroup, clean_reindex_operator(reindex_operator))
)
else:
spacegroup = MtzUtils.space_group_name_from_mtz(
self.get_scaler_reference_reflection_file()
)
reindex_operator = "h,k,l"
self._scalr_likely_spacegroups = [spacegroup]
Debug.write("Assigning spacegroup %s from reference" % spacegroup)
# then run reindex to set the correct spacegroup
ri = self._factory.Reindex()
ri.set_hklin(hklin)
ri.set_hklout(hklout)
ri.set_spacegroup(spacegroup)
ri.set_operator(reindex_operator)
ri.reindex()
FileHandler.record_temporary_file(hklout)
# then resort the reflections (one last time!)
s = self._factory.Sortmtz()
temp = hklin
hklin = hklout
hklout = temp
s.add_hklin(hklin)
s.set_hklout(hklout)
s.sort()
# done preparing!
self._prepared_reflections = s.get_hklout()
def _sort_together_data_xds(self):
if len(self._sweep_information) == 1:
return self._sort_together_data_xds_one_sweep()
max_batches = 0
for epoch in self._sweep_information.keys():
hklin = self._sweep_information[epoch]["scaled_reflections"]
if self._sweep_information[epoch]["batches"] == [0, 0]:
Chatter.write("Getting batches from %s" % hklin)
batches = MtzUtils.batches_from_mtz(hklin)
self._sweep_information[epoch]["batches"] = [min(batches), max(batches)]
Chatter.write("=> %d to %d" % (min(batches), max(batches)))
batches = self._sweep_information[epoch]["batches"]
if 1 + max(batches) - min(batches) > max_batches:
max_batches = max(batches) - min(batches) + 1
Debug.write("Biggest sweep has %d batches" % max_batches)
max_batches = nifty_power_of_ten(max_batches)
epochs = sorted(self._sweep_information.keys())
counter = 0
for epoch in epochs:
hklin = self._sweep_information[epoch]["scaled_reflections"]
pname = self._sweep_information[epoch]["pname"]
xname = self._sweep_information[epoch]["xname"]
dname = self._sweep_information[epoch]["dname"]
hklout = os.path.join(
self.get_working_directory(),
"%s_%s_%s_%d.mtz" % (pname, xname, dname, counter),
)
# we will want to delete this one exit
FileHandler.record_temporary_file(hklout)
# record this for future reference - will be needed in the
# radiation damage analysis...
# hack - reset this as it gets in a muddle...
intgr = self._sweep_information[epoch]["integrater"]
self._sweep_information[epoch]["batches"] = intgr.get_integrater_batches()
first_batch = min(self._sweep_information[epoch]["batches"])
offset = counter * max_batches - first_batch + 1
self._sweep_information[epoch]["batch_offset"] = offset
from xia2.Modules.Scaler.rebatch import rebatch
new_batches = rebatch(
hklin, hklout, add_batch=offset, pname=pname, xname=xname, dname=dname
)
# update the "input information"
self._sweep_information[epoch]["hklin"] = hklout
self._sweep_information[epoch]["batches"] = new_batches
# update the counter & recycle
counter += 1
s = self._factory.Sortmtz()
hklout = os.path.join(
self.get_working_directory(),
"%s_%s_sorted.mtz" % (self._scalr_pname, self._scalr_xname),
)
s.set_hklout(hklout)
for epoch in epochs:
s.add_hklin(self._sweep_information[epoch]["hklin"])
s.sort(vrset=-99999999.0)
self._prepared_reflections = hklout
if self.get_scaler_reference_reflection_file():
spacegroups = [
MtzUtils.space_group_name_from_mtz(
self.get_scaler_reference_reflection_file()
)
]
reindex_operator = "h,k,l"
else:
pointless = self._factory.Pointless()
pointless.set_hklin(hklout)
pointless.decide_spacegroup()
FileHandler.record_log_file(
"%s %s pointless" % (self._scalr_pname, self._scalr_xname),
pointless.get_log_file(),
)
spacegroups = pointless.get_likely_spacegroups()
reindex_operator = pointless.get_spacegroup_reindex_operator()
if self._scalr_input_spacegroup:
Debug.write(
"Assigning user input spacegroup: %s" % self._scalr_input_spacegroup
)
spacegroups = [self._scalr_input_spacegroup]
reindex_operator = "h,k,l"
self._scalr_likely_spacegroups = spacegroups
spacegroup = self._scalr_likely_spacegroups[0]
self._scalr_reindex_operator = reindex_operator
Chatter.write("Likely spacegroups:")
for spag in self._scalr_likely_spacegroups:
Chatter.write("%s" % spag)
Chatter.write(
"Reindexing to first spacegroup setting: %s (%s)"
% (spacegroup, clean_reindex_operator(reindex_operator))
)
hklin = self._prepared_reflections
hklout = os.path.join(
self.get_working_directory(),
"%s_%s_reindex.mtz" % (self._scalr_pname, self._scalr_xname),
)
FileHandler.record_temporary_file(hklout)
ri = self._factory.Reindex()
ri.set_hklin(hklin)
ri.set_hklout(hklout)
ri.set_spacegroup(spacegroup)
ri.set_operator(reindex_operator)
ri.reindex()
hklin = hklout
hklout = os.path.join(
self.get_working_directory(),
"%s_%s_sorted.mtz" % (self._scalr_pname, self._scalr_xname),
)
s = self._factory.Sortmtz()
s.set_hklin(hklin)
s.set_hklout(hklout)
s.sort(vrset=-99999999.0)
self._prepared_reflections = hklout
Debug.write(
"Updating unit cell to %.2f %.2f %.2f %.2f %.2f %.2f" % tuple(ri.get_cell())
)
self._scalr_cell = tuple(ri.get_cell())
return
def _sort_together_data_xds_one_sweep(self):
assert len(self._sweep_information) == 1
epoch = self._sweep_information.keys()[0]
hklin = self._sweep_information[epoch]["scaled_reflections"]
if self.get_scaler_reference_reflection_file():
spacegroups = [
MtzUtils.space_group_name_from_mtz(
self.get_scaler_reference_reflection_file()
)
]
reindex_operator = "h,k,l"
elif self._scalr_input_spacegroup:
Debug.write(
"Assigning user input spacegroup: %s" % self._scalr_input_spacegroup
)
spacegroups = [self._scalr_input_spacegroup]
reindex_operator = "h,k,l"
else:
pointless = self._factory.Pointless()
pointless.set_hklin(hklin)
pointless.decide_spacegroup()
FileHandler.record_log_file(
"%s %s pointless" % (self._scalr_pname, self._scalr_xname),
pointless.get_log_file(),
)
spacegroups = pointless.get_likely_spacegroups()
reindex_operator = pointless.get_spacegroup_reindex_operator()
self._scalr_likely_spacegroups = spacegroups
spacegroup = self._scalr_likely_spacegroups[0]
self._scalr_reindex_operator = clean_reindex_operator(reindex_operator)
Chatter.write("Likely spacegroups:")
for spag in self._scalr_likely_spacegroups:
Chatter.write("%s" % spag)
Chatter.write(
"Reindexing to first spacegroup setting: %s (%s)"
% (spacegroup, clean_reindex_operator(reindex_operator))
)
hklout = os.path.join(
self.get_working_directory(),
"%s_%s_reindex.mtz" % (self._scalr_pname, self._scalr_xname),
)
FileHandler.record_temporary_file(hklout)
if reindex_operator == "[h,k,l]":
# just assign spacegroup
from cctbx import sgtbx
s = sgtbx.space_group(sgtbx.space_group_symbols(str(spacegroup)).hall())
m = mtz.object(hklin)
m.set_space_group(s).write(hklout)
self._scalr_cell = m.crystals()[-1].unit_cell().parameters()
Debug.write(
"Updating unit cell to %.2f %.2f %.2f %.2f %.2f %.2f"
% tuple(self._scalr_cell)
)
del m
del s
else:
ri = self._factory.Reindex()
ri.set_hklin(hklin)
ri.set_hklout(hklout)
ri.set_spacegroup(spacegroup)
ri.set_operator(reindex_operator)
ri.reindex()
Debug.write(
"Updating unit cell to %.2f %.2f %.2f %.2f %.2f %.2f"
% tuple(ri.get_cell())
)
self._scalr_cell = tuple(ri.get_cell())
hklin = hklout
hklout = os.path.join(
self.get_working_directory(),
"%s_%s_sorted.mtz" % (self._scalr_pname, self._scalr_xname),
)
s = self._factory.Sortmtz()
s.set_hklin(hklin)
s.set_hklout(hklout)
s.sort(vrset=-99999999.0)
self._prepared_reflections = hklout
def _scale_finish(self):
# compute anomalous signals if anomalous
if self.get_scaler_anomalous():
self._scale_finish_chunk_1_compute_anomalous()
# next transform to F's from I's etc.
if not self._scalr_scaled_refl_files:
raise RuntimeError("no reflection files stored")
# run xia2.report on each unmerged mtz file
# self._scale_finish_chunk_2_report()
if not PhilIndex.params.xia2.settings.small_molecule:
self._scale_finish_chunk_3_truncate()
self._scale_finish_chunk_4_mad_mangling()
if PhilIndex.params.xia2.settings.small_molecule:
self._scale_finish_chunk_5_finish_small_molecule()
self._scale_finish_export_shelxt()
return
# finally add a FreeR column, and record the new merged reflection
# file with the free column added.
self._scale_finish_chunk_6_add_free_r()
self._scale_finish_chunk_7_twinning()
# next have a look for radiation damage... if more than one wavelength
if len(self._scalr_scaled_refl_files.keys()) > 1:
self._scale_finish_chunk_8_raddam()
# finally add xia2 version to mtz history
from iotbx.reflection_file_reader import any_reflection_file
from xia2.XIA2Version import Version
import time
mtz_files = [self._scalr_scaled_reflection_files["mtz"]]
mtz_files.extend(self._scalr_scaled_reflection_files["mtz_unmerged"].values())
for mtz_file in mtz_files:
reader = any_reflection_file(mtz_file)
mtz_object = reader.file_content()
date_str = time.strftime("%d/%m/%Y at %H:%M:%S", time.gmtime())
mtz_object.add_history("From %s, run on %s" % (Version, date_str))
mtz_object.write(mtz_file)
def _scale_finish_chunk_1_compute_anomalous(self):
for key in self._scalr_scaled_refl_files:
f = self._scalr_scaled_refl_files[key]
m = mtz.object(f)
if m.space_group().is_centric():
Debug.write("Spacegroup is centric: %s" % f)
continue
Debug.write("Running anomalous signal analysis on %s" % f)
a_s = anomalous_signals(f)
if a_s is not None:
self._scalr_statistics[(self._scalr_pname, self._scalr_xname, key)][
"dF/F"
] = [a_s[0]]
self._scalr_statistics[(self._scalr_pname, self._scalr_xname, key)][
"dI/s(dI)"
] = [a_s[1]]
def _scale_finish_chunk_2_report(self):
from cctbx.array_family import flex
from iotbx.reflection_file_reader import any_reflection_file
from xia2.lib.bits import auto_logfiler
from xia2.Wrappers.XIA.Report import Report
for wavelength in self._scalr_scaled_refl_files.keys():
mtz_unmerged = self._scalr_scaled_reflection_files["mtz_unmerged"][
wavelength
]
reader = any_reflection_file(mtz_unmerged)
mtz_object = reader.file_content()
batches = mtz_object.as_miller_arrays_dict()[
"HKL_base", "HKL_base", "BATCH"
]
dose = flex.double(batches.size(), -1)
batch_to_dose = self.get_batch_to_dose()
for i, b in enumerate(batches.data()):
dose[i] = batch_to_dose[b]
c = mtz_object.crystals()[0]
d = c.datasets()[0]
d.add_column("DOSE", "R").set_values(dose.as_float())
tmp_mtz = os.path.join(self.get_working_directory(), "dose_tmp.mtz")
mtz_object.write(tmp_mtz)
hklin = tmp_mtz
FileHandler.record_temporary_file(hklin)
report = Report()
report.set_working_directory(self.get_working_directory())
report.set_mtz_filename(hklin)
htmlout = os.path.join(
self.get_working_directory(),
"%s_%s_%s_report.html"
% (self._scalr_pname, self._scalr_xname, wavelength),
)
report.set_html_filename(htmlout)
report.set_chef_min_completeness(0.95) # sensible?
auto_logfiler(report)
try:
report.run()
FileHandler.record_html_file(
"%s %s %s report"
% (self._scalr_pname, self._scalr_xname, wavelength),
htmlout,
)
except Exception as e:
Debug.write("xia2.report failed:")
Debug.write(str(e))
def _scale_finish_chunk_3_truncate(self):
for wavelength in self._scalr_scaled_refl_files.keys():
hklin = self._scalr_scaled_refl_files[wavelength]
truncate = self._factory.Truncate()
truncate.set_hklin(hklin)
if self.get_scaler_anomalous():
truncate.set_anomalous(True)
else:
truncate.set_anomalous(False)
FileHandler.record_log_file(
"%s %s %s truncate"
% (self._scalr_pname, self._scalr_xname, wavelength),
truncate.get_log_file(),
)
hklout = os.path.join(
self.get_working_directory(), "%s_truncated.mtz" % wavelength
)
truncate.set_hklout(hklout)
truncate.truncate()
xmlout = truncate.get_xmlout()
if xmlout is not None:
FileHandler.record_xml_file(
"%s %s %s truncate"
% (self._scalr_pname, self._scalr_xname, wavelength),
xmlout,
)
Debug.write(
"%d absent reflections in %s removed"
% (truncate.get_nabsent(), wavelength)
)
b_factor = truncate.get_b_factor()
if math.isnan(b_factor):
b_factor = None
# record the b factor somewhere (hopefully) useful...
self._scalr_statistics[(self._scalr_pname, self._scalr_xname, wavelength)][
"Wilson B factor"
] = [b_factor]
# and record the reflection file..
self._scalr_scaled_refl_files[wavelength] = hklout
def _scale_finish_chunk_4_mad_mangling(self):
if len(self._scalr_scaled_refl_files.keys()) > 1:
reflection_files = {}
for wavelength in self._scalr_scaled_refl_files.keys():
cad = self._factory.Cad()
cad.add_hklin(self._scalr_scaled_refl_files[wavelength])
cad.set_hklout(
os.path.join(
self.get_working_directory(), "cad-tmp-%s.mtz" % wavelength
)
)
cad.set_new_suffix(wavelength)
cad.update()
reflection_files[wavelength] = cad.get_hklout()
FileHandler.record_temporary_file(cad.get_hklout())
# now merge the reflection files together...
hklout = os.path.join(
self.get_working_directory(),
"%s_%s_merged.mtz" % (self._scalr_pname, self._scalr_xname),
)
FileHandler.record_temporary_file(hklout)
Debug.write("Merging all data sets to %s" % hklout)
cad = self._factory.Cad()
for wavelength in reflection_files.keys():
cad.add_hklin(reflection_files[wavelength])
cad.set_hklout(hklout)
cad.merge()
self._scalr_scaled_reflection_files["mtz_merged"] = hklout
else:
self._scalr_scaled_reflection_files[
"mtz_merged"
] = self._scalr_scaled_refl_files[self._scalr_scaled_refl_files.keys()[0]]
def _scale_finish_chunk_5_finish_small_molecule(self):
# keep 'mtz' and remove 'mtz_merged' from the dictionary for
# consistency with non-small-molecule workflow
self._scalr_scaled_reflection_files[
"mtz"
] = self._scalr_scaled_reflection_files["mtz_merged"]
del self._scalr_scaled_reflection_files["mtz_merged"]
FileHandler.record_data_file(self._scalr_scaled_reflection_files["mtz"])
def _scale_finish_export_shelxt(self):
"""Read hklin (unmerged reflection file) and generate SHELXT input file
and HKL file"""
from iotbx.reflection_file_reader import any_reflection_file
from iotbx.shelx import writer
from iotbx.shelx.hklf import miller_array_export_as_shelx_hklf
from cctbx.xray.structure import structure
from cctbx.xray import scatterer
for wavelength_name in self._scalr_scaled_refl_files.keys():
prefix = wavelength_name
if len(self._scalr_scaled_refl_files.keys()) == 1:
prefix = "shelxt"
prefixpath = os.path.join(self.get_working_directory(), prefix)
mtz_unmerged = self._scalr_scaled_reflection_files["mtz_unmerged"][
wavelength_name
]
reader = any_reflection_file(mtz_unmerged)
intensities = [
ma
for ma in reader.as_miller_arrays(merge_equivalents=False)
if ma.info().labels == ["I", "SIGI"]
][0]
indices = reader.file_content().extract_original_index_miller_indices()
intensities = intensities.customized_copy(
indices=indices, info=intensities.info()
)
with open("%s.hkl" % prefixpath, "wb") as hkl_file_handle:
# limit values to 4 digits (before decimal point), as this is what shelxt
# writes in its output files, and shelxl seems to read. ShelXL apparently
# does not read values >9999 properly
miller_array_export_as_shelx_hklf(
intensities,
hkl_file_handle,
scale_range=(-9999.0, 9999.0),
normalise_if_format_overflow=True,
)
crystal_symm = intensities.crystal_symmetry()
unit_cell_dims = self._scalr_cell
unit_cell_esds = self._scalr_cell_esd
cb_op = crystal_symm.change_of_basis_op_to_reference_setting()
if cb_op.c().r().as_hkl() == "h,k,l":
print("Change of basis to reference setting: %s" % cb_op)
crystal_symm = crystal_symm.change_basis(cb_op)
if str(cb_op) != "a,b,c":
unit_cell_dims = None
unit_cell_esds = None
# Would need to apply operation to cell errors, too. Need a test case for this
# crystal_symm.show_summary()
xray_structure = structure(crystal_symmetry=crystal_symm)
for element in "CNOH":
xray_structure.add_scatterer(scatterer(label=element, occupancy=1))
wavelength = self._scalr_xcrystal.get_xwavelength(
wavelength_name
).get_wavelength()
with open("%s.ins" % prefixpath, "w") as insfile:
insfile.write(
"".join(
writer.generator(
xray_structure,
wavelength=wavelength,
full_matrix_least_squares_cycles=0,
title=prefix,
unit_cell_dims=unit_cell_dims,
unit_cell_esds=unit_cell_esds,
)
)
)
FileHandler.record_data_file("%s.ins" % prefixpath)
FileHandler.record_data_file("%s.hkl" % prefixpath)
def _scale_finish_chunk_6_add_free_r(self):
hklout = os.path.join(
self.get_working_directory(),
"%s_%s_free_temp.mtz" % (self._scalr_pname, self._scalr_xname),
)
FileHandler.record_temporary_file(hklout)
scale_params = PhilIndex.params.xia2.settings.scale
if self.get_scaler_freer_file():
# e.g. via .xinfo file
freein = self.get_scaler_freer_file()
Debug.write("Copying FreeR_flag from %s" % freein)
c = self._factory.Cad()
c.set_freein(freein)
c.add_hklin(self._scalr_scaled_reflection_files["mtz_merged"])
c.set_hklout(hklout)
c.copyfree()
elif scale_params.freer_file is not None:
# e.g. via -freer_file command line argument
freein = scale_params.freer_file
Debug.write("Copying FreeR_flag from %s" % freein)
c = self._factory.Cad()
c.set_freein(freein)
c.add_hklin(self._scalr_scaled_reflection_files["mtz_merged"])
c.set_hklout(hklout)
c.copyfree()
else:
if scale_params.free_total:
ntot = scale_params.free_total
# need to get a fraction, so...
nref = MtzUtils.nref_from_mtz(
self._scalr_scaled_reflection_files["mtz_merged"]
)
free_fraction = float(ntot) / float(nref)
else:
free_fraction = scale_params.free_fraction
f = self._factory.Freerflag()
f.set_free_fraction(free_fraction)
f.set_hklin(self._scalr_scaled_reflection_files["mtz_merged"])
f.set_hklout(hklout)
f.add_free_flag()
# then check that this FreeR set is complete
hklin = hklout
hklout = os.path.join(
self.get_working_directory(),
"%s_%s_free.mtz" % (self._scalr_pname, self._scalr_xname),
)
# default fraction of 0.05
free_fraction = 0.05
if scale_params.free_fraction:
free_fraction = scale_params.free_fraction
elif scale_params.free_total:
ntot = scale_params.free_total()
# need to get a fraction, so...
nref = MtzUtils.nref_from_mtz(hklin)
free_fraction = float(ntot) / float(nref)
f = self._factory.Freerflag()
f.set_free_fraction(free_fraction)
f.set_hklin(hklin)
f.set_hklout(hklout)
f.complete_free_flag()
# remove 'mtz_merged' from the dictionary - this is made
# redundant by the merged free...
del self._scalr_scaled_reflection_files["mtz_merged"]
# changed from mtz_merged_free to plain ol' mtz
self._scalr_scaled_reflection_files["mtz"] = hklout
# record this for future reference
FileHandler.record_data_file(hklout)
def _scale_finish_chunk_7_twinning(self):
hklout = self._scalr_scaled_reflection_files["mtz"]
m = mtz.object(hklout)
# FIXME in here should be able to just drop down to the lowest symmetry
# space group with the rotational elements for this calculation? I.e.
# P422 for P4/mmm?
if not m.space_group().is_centric():
from xia2.Toolkit.E4 import E4_mtz
E4s = E4_mtz(hklout, native=True)
self._scalr_twinning_score = E4s.items()[0][1]
if self._scalr_twinning_score > 1.9:
self._scalr_twinning_conclusion = "Your data do not appear twinned"
elif self._scalr_twinning_score < 1.6:
self._scalr_twinning_conclusion = "Your data appear to be twinned"
else:
self._scalr_twinning_conclusion = "Ambiguous score (1.6 < score < 1.9)"
else:
self._scalr_twinning_conclusion = "Data are centric"
self._scalr_twinning_score = 0
Chatter.write("Overall twinning score: %4.2f" % self._scalr_twinning_score)
Chatter.write(self._scalr_twinning_conclusion)
def _scale_finish_chunk_8_raddam(self):
crd = CCP4InterRadiationDamageDetector()
crd.set_working_directory(self.get_working_directory())
crd.set_hklin(self._scalr_scaled_reflection_files["mtz"])
if self.get_scaler_anomalous():
crd.set_anomalous(True)
hklout = os.path.join(self.get_working_directory(), "temp.mtz")
FileHandler.record_temporary_file(hklout)
crd.set_hklout(hklout)
status = crd.detect()
if status:
Chatter.write("")
Chatter.banner("Local Scaling %s" % self._scalr_xname)
for s in status:
Chatter.write("%s %s" % s)
Chatter.banner("")
else:
Debug.write("Local scaling failed")
def _estimate_resolution_limit(
self,
hklin,
batch_range=None,
use_isigma=True,
use_misigma=True,
reflections=None,
experiments=None,
):
params = PhilIndex.params.xia2.settings.resolution
m = Merger()
m.set_working_directory(self.get_working_directory())
from xia2.lib.bits import auto_logfiler
auto_logfiler(m)
if hklin:
m.set_hklin(hklin)
else:
assert reflections and experiments
m.set_reflections(reflections)
m.set_experiments(experiments)
m.set_limit_rmerge(params.rmerge)
m.set_limit_completeness(params.completeness)
m.set_limit_cc_half(params.cc_half)
m.set_cc_half_fit(params.cc_half_fit)
m.set_cc_half_significance_level(params.cc_half_significance_level)
if use_isigma:
m.set_limit_isigma(params.isigma)
if use_misigma:
m.set_limit_misigma(params.misigma)
if PhilIndex.params.xia2.settings.small_molecule:
m.set_nbins(20)
if batch_range is not None:
start, end = batch_range
m.set_batch_range(start, end)
m.run()
resolution_limits = []
reasoning = []
if params.completeness is not None:
r_comp = m.get_resolution_completeness()
resolution_limits.append(r_comp)
reasoning.append("completeness > %s" % params.completeness)
if params.cc_half is not None:
r_cc_half = m.get_resolution_cc_half()
resolution_limits.append(r_cc_half)
reasoning.append("cc_half > %s" % params.cc_half)
if params.rmerge is not None:
r_rm = m.get_resolution_rmerge()
resolution_limits.append(r_rm)
reasoning.append("rmerge > %s" % params.rmerge)
if params.isigma is not None:
r_uis = m.get_resolution_isigma()
resolution_limits.append(r_uis)
reasoning.append("unmerged <I/sigI> > %s" % params.isigma)
if params.misigma is not None:
r_mis = m.get_resolution_misigma()
resolution_limits.append(r_mis)
reasoning.append("merged <I/sigI> > %s" % params.misigma)
if any(resolution_limits):
resolution = max(resolution_limits)
reasoning = [
reason
for limit, reason in zip(resolution_limits, reasoning)
if limit >= resolution
]
reasoning = ", ".join(reasoning)
else:
resolution = 0.0
reasoning = None
return resolution, reasoning
def _compute_scaler_statistics(
self, scaled_unmerged_mtz, selected_band=None, wave=None
):
""" selected_band = (d_min, d_max) with None for automatic determination. """
# mapping of expected dictionary names to iotbx.merging_statistics attributes
key_to_var = {
"I/sigma": "i_over_sigma_mean",
"Completeness": "completeness",
"Low resolution limit": "d_max",
"Multiplicity": "mean_redundancy",
"Rmerge(I)": "r_merge",
#'Wilson B factor':,
"Rmeas(I)": "r_meas",
"High resolution limit": "d_min",
"Total observations": "n_obs",
"Rpim(I)": "r_pim",
"CC half": "cc_one_half",
"Total unique": "n_uniq",
}
anom_key_to_var = {
"Rmerge(I+/-)": "r_merge",
"Rpim(I+/-)": "r_pim",
"Rmeas(I+/-)": "r_meas",
"Anomalous completeness": "anom_completeness",
"Anomalous correlation": "anom_half_corr",
"Anomalous multiplicity": "mean_redundancy",
}
stats = {}
select_result, select_anom_result = None, None
# don't call self.get_scaler_likely_spacegroups() since that calls
# self.scale() which introduced a subtle bug
from cctbx import sgtbx
sg = sgtbx.space_group_info(str(self._scalr_likely_spacegroups[0])).group()
from xia2.Handlers.Environment import Environment
log_directory = Environment.generate_directory("LogFiles")
merging_stats_file = os.path.join(
log_directory,
"%s_%s%s_merging-statistics.txt"
% (
self._scalr_pname,
self._scalr_xname,
"" if wave is None else "_%s" % wave,
),
)
merging_stats_json = os.path.join(
log_directory,
"%s_%s%s_merging-statistics.json"
% (
self._scalr_pname,
self._scalr_xname,
"" if wave is None else "_%s" % wave,
),
)
result, select_result, anom_result, select_anom_result = None, None, None, None
n_bins = PhilIndex.params.xia2.settings.merging_statistics.n_bins
import iotbx.merging_statistics
while result is None:
try:
result = self._iotbx_merging_statistics(
scaled_unmerged_mtz, anomalous=False, n_bins=n_bins
)
result.as_json(file_name=merging_stats_json)
with open(merging_stats_file, "w") as fh:
result.show(out=fh)
four_column_output = selected_band and any(selected_band)
if four_column_output:
select_result = self._iotbx_merging_statistics(
scaled_unmerged_mtz,
anomalous=False,
d_min=selected_band[0],
d_max=selected_band[1],
n_bins=n_bins,
)
if sg.is_centric():
anom_result = None
anom_key_to_var = {}
else:
anom_result = self._iotbx_merging_statistics(
scaled_unmerged_mtz, anomalous=True, n_bins=n_bins
)
stats["Anomalous slope"] = [anom_result.anomalous_np_slope]
if four_column_output:
select_anom_result = self._iotbx_merging_statistics(
scaled_unmerged_mtz,
anomalous=True,
d_min=selected_band[0],
d_max=selected_band[1],
n_bins=n_bins,
)
except iotbx.merging_statistics.StatisticsError:
# Too few reflections for too many bins. Reduce number of bins and try again.
result = None
n_bins = n_bins - 3
if n_bins > 5:
continue
else:
raise
from six.moves import cStringIO as StringIO
result_cache = StringIO()
result.show(out=result_cache)
for d, r, s in (
(key_to_var, result, select_result),
(anom_key_to_var, anom_result, select_anom_result),
):
for k, v in d.iteritems():
if four_column_output:
values = (
getattr(s.overall, v),
getattr(s.bins[0], v),
getattr(s.bins[-1], v),
getattr(r.overall, v),
)
else:
values = (
getattr(r.overall, v),
getattr(r.bins[0], v),
getattr(r.bins[-1], v),
)
if "completeness" in v:
values = [v_ * 100 for v_ in values]
if values[0] is not None:
stats[k] = values
return stats
def _iotbx_merging_statistics(
self, scaled_unmerged_mtz, anomalous=False, d_min=None, d_max=None, n_bins=None
):
import iotbx.merging_statistics
params = PhilIndex.params.xia2.settings.merging_statistics
i_obs = iotbx.merging_statistics.select_data(
scaled_unmerged_mtz, data_labels=None
)
i_obs = i_obs.customized_copy(anomalous_flag=True, info=i_obs.info())
result = iotbx.merging_statistics.dataset_statistics(
i_obs=i_obs,
d_min=d_min,
d_max=d_max,
n_bins=n_bins or params.n_bins,
anomalous=anomalous,
use_internal_variance=params.use_internal_variance,
eliminate_sys_absent=params.eliminate_sys_absent,
assert_is_not_unique_set_under_symmetry=False,
)
result.anomalous_np_slope = None
if anomalous:
merged_intensities = i_obs.merge_equivalents(
use_internal_variance=params.use_internal_variance
).array()
slope, intercept, n_pairs = anomalous_probability_plot(merged_intensities)
if slope is not None:
Debug.write("Anomalous difference normal probability plot:")
Debug.write("Slope: %.2f" % slope)
Debug.write("Intercept: %.2f" % intercept)
Debug.write("Number of pairs: %i" % n_pairs)
slope, intercept, n_pairs = anomalous_probability_plot(
merged_intensities, expected_delta=0.9
)
if slope is not None:
result.anomalous_np_slope = slope
Debug.write(
"Anomalous difference normal probability plot (within expected delta 0.9):"
)
Debug.write("Slope: %.2f" % slope)
Debug.write("Intercept: %.2f" % intercept)
Debug.write("Number of pairs: %i" % n_pairs)
return result
def _update_scaled_unit_cell(self):
params = PhilIndex.params
fast_mode = params.dials.fast_mode
if (
params.xia2.settings.integrater == "dials"
and not fast_mode
and params.xia2.settings.scale.two_theta_refine
):
from xia2.Wrappers.Dials.TwoThetaRefine import TwoThetaRefine
from xia2.lib.bits import auto_logfiler
Chatter.banner("Unit cell refinement")
# Collect a list of all sweeps, grouped by project, crystal, wavelength
groups = {}
self._scalr_cell_dict = {}
tt_refine_experiments = []
tt_refine_reflections = []
tt_refine_reindex_ops = []
for epoch in self._sweep_handler.get_epochs():
si = self._sweep_handler.get_sweep_information(epoch)
pi = "_".join(si.get_project_info())
intgr = si.get_integrater()
groups[pi] = groups.get(pi, []) + [
(
intgr.get_integrated_experiments(),
intgr.get_integrated_reflections(),
intgr.get_integrater_reindex_operator(),
)
]
# Two theta refine the unit cell for each group
p4p_file = os.path.join(
self.get_working_directory(),
"%s_%s.p4p" % (self._scalr_pname, self._scalr_xname),
)
for pi in groups.keys():
tt_grouprefiner = TwoThetaRefine()
tt_grouprefiner.set_working_directory(self.get_working_directory())
auto_logfiler(tt_grouprefiner)
args = zip(*groups[pi])
tt_grouprefiner.set_experiments(args[0])
tt_grouprefiner.set_reflection_files(args[1])
tt_grouprefiner.set_output_p4p(p4p_file)
tt_refine_experiments.extend(args[0])
tt_refine_reflections.extend(args[1])
tt_refine_reindex_ops.extend(args[2])
reindex_ops = args[2]
from cctbx.sgtbx import change_of_basis_op as cb_op
if self._spacegroup_reindex_operator is not None:
reindex_ops = [
(
cb_op(str(self._spacegroup_reindex_operator))
* cb_op(str(op))
).as_hkl()
if op is not None
else self._spacegroup_reindex_operator
for op in reindex_ops
]
tt_grouprefiner.set_reindex_operators(reindex_ops)
tt_grouprefiner.run()
Chatter.write(
"%s: %6.2f %6.2f %6.2f %6.2f %6.2f %6.2f"
% tuple(
["".join(pi.split("_")[2:])]
+ list(tt_grouprefiner.get_unit_cell())
)
)
self._scalr_cell_dict[pi] = (
tt_grouprefiner.get_unit_cell(),
tt_grouprefiner.get_unit_cell_esd(),
tt_grouprefiner.import_cif(),
tt_grouprefiner.import_mmcif(),
)
if len(groups) > 1:
cif_in = tt_grouprefiner.import_cif()
cif_out = CIF.get_block(pi)
for key in sorted(cif_in.keys()):
cif_out[key] = cif_in[key]
mmcif_in = tt_grouprefiner.import_mmcif()
mmcif_out = mmCIF.get_block(pi)
for key in sorted(mmcif_in.keys()):
mmcif_out[key] = mmcif_in[key]
# Two theta refine everything together
if len(groups) > 1:
tt_refiner = TwoThetaRefine()
tt_refiner.set_working_directory(self.get_working_directory())
tt_refiner.set_output_p4p(p4p_file)
auto_logfiler(tt_refiner)
tt_refiner.set_experiments(tt_refine_experiments)
tt_refiner.set_reflection_files(tt_refine_reflections)
if self._spacegroup_reindex_operator is not None:
reindex_ops = [
(
cb_op(str(self._spacegroup_reindex_operator))
* cb_op(str(op))
).as_hkl()
if op is not None
else self._spacegroup_reindex_operator
for op in tt_refine_reindex_ops
]
else:
reindex_ops = tt_refine_reindex_ops
tt_refiner.set_reindex_operators(reindex_ops)
tt_refiner.run()
self._scalr_cell = tt_refiner.get_unit_cell()
Chatter.write(
"Overall: %6.2f %6.2f %6.2f %6.2f %6.2f %6.2f"
% tt_refiner.get_unit_cell()
)
self._scalr_cell_esd = tt_refiner.get_unit_cell_esd()
cif_in = tt_refiner.import_cif()
mmcif_in = tt_refiner.import_mmcif()
else:
self._scalr_cell, self._scalr_cell_esd, cif_in, mmcif_in = self._scalr_cell_dict.values()[
0
]
if params.xia2.settings.small_molecule:
FileHandler.record_data_file(p4p_file)
import dials.util.version
cif_out = CIF.get_block("xia2")
mmcif_out = mmCIF.get_block("xia2")
cif_out["_computing_cell_refinement"] = mmcif_out[
"_computing.cell_refinement"
] = ("DIALS 2theta refinement, %s" % dials.util.version.dials_version())
for key in sorted(cif_in.keys()):
cif_out[key] = cif_in[key]
for key in sorted(mmcif_in.keys()):
mmcif_out[key] = mmcif_in[key]
Debug.write("Unit cell obtained by two-theta refinement")
else:
ami = AnalyseMyIntensities()
ami.set_working_directory(self.get_working_directory())
average_unit_cell, ignore_sg = ami.compute_average_cell(
[
self._scalr_scaled_refl_files[key]
for key in self._scalr_scaled_refl_files
]
)
Debug.write("Computed average unit cell (will use in all files)")
self._scalr_cell = average_unit_cell
self._scalr_cell_esd = None
# Write average unit cell to .cif
cif_out = CIF.get_block("xia2")
cif_out["_computing_cell_refinement"] = "AIMLESS averaged unit cell"
for cell, cifname in zip(
self._scalr_cell,
[
"length_a",
"length_b",
"length_c",
"angle_alpha",
"angle_beta",
"angle_gamma",
],
):
cif_out["_cell_%s" % cifname] = cell
Debug.write("%7.3f %7.3f %7.3f %7.3f %7.3f %7.3f" % self._scalr_cell)
def unify_setting(self):
"""Unify the setting for the sweeps."""
# Currently implemented for CCP4ScalerA and DialsScaler
from scitbx.matrix import sqr
reference_U = None
i3 = sqr((1, 0, 0, 0, 1, 0, 0, 0, 1))
for epoch in self._sweep_handler.get_epochs():
si = self._sweep_handler.get_sweep_information(epoch)
intgr = si.get_integrater()
fixed = sqr(intgr.get_goniometer().get_fixed_rotation())
# delegate UB lattice symmetry calculation to individual Scalers.
u, b, s = self.get_UBlattsymm_from_sweep_info(si)
U = fixed.inverse() * sqr(u).transpose()
B = sqr(b)
if reference_U is None:
reference_U = U
continue
results = []
for op in s.all_ops():
R = B * sqr(op.r().as_double()).transpose() * B.inverse()
nearly_i3 = (U * R).inverse() * reference_U
score = sum([abs(_n - _i) for (_n, _i) in zip(nearly_i3, i3)])
results.append((score, op.r().as_hkl(), op))
results.sort()
best = results[0]
Debug.write("Best reindex: %s %.3f" % (best[1], best[0]))
reindex_op = best[2].r().inverse().as_hkl()
# delegate reindexing to individual Scalers.
self.apply_reindex_operator_to_sweep_info(
si, reindex_op, reason="unifying [U] setting"
)
# recalculate to verify
u, _, __ = self.get_UBlattsymm_from_sweep_info(si)
U = fixed.inverse() * sqr(u).transpose()
Debug.write("New reindex: %s" % (U.inverse() * reference_U))
# FIXME I should probably raise an exception at this stage if this
# is not about I3...
def brehm_diederichs_reindexing(self):
"""Run brehm diederichs reindexing algorithm."""
# Currently implemented for CCP4ScalerA and DialsScaler
brehm_diederichs_files_in = []
for epoch in self._sweep_handler.get_epochs():
si = self._sweep_handler.get_sweep_information(epoch)
brehm_diederichs_files_in.append(self.get_mtz_data_from_sweep_info(si))
# now run cctbx.brehm_diederichs to figure out the indexing hand for
# each sweep
from xia2.Wrappers.Cctbx.BrehmDiederichs import BrehmDiederichs
brehm_diederichs = BrehmDiederichs()
brehm_diederichs.set_working_directory(self.get_working_directory())
auto_logfiler(brehm_diederichs)
brehm_diederichs.set_input_filenames(brehm_diederichs_files_in)
# 1 or 3? 1 seems to work better?
brehm_diederichs.set_asymmetric(1)
brehm_diederichs.run()
reindexing_dict = brehm_diederichs.get_reindexing_dict()
for i, epoch in enumerate(self._sweep_handler.get_epochs()):
si = self._sweep_handler.get_sweep_information(epoch)
hklin = brehm_diederichs_files_in[i]
reindex_op = reindexing_dict.get(os.path.abspath(hklin))
assert reindex_op is not None
if reindex_op != "h,k,l":
self.apply_reindex_operator_to_sweep_info(
si, reindex_op, reason="match reference"
)
def assess_resolution_limits(
self,
hklin,
user_resolution_limits,
use_isigma=True,
use_misigma=True,
experiments=None,
reflections=None,
):
"""Assess resolution limits from hklin and sweep batch info"""
# Implemented for DialsScaler and CCP4ScalerA
highest_resolution = 100.0
highest_suggested_resolution = None
for epoch in self._sweep_handler.get_epochs():
si = self._sweep_handler.get_sweep_information(epoch)
_, __, dname = si.get_project_info()
sname = si.get_sweep_name()
intgr = si.get_integrater()
start, end = si.get_batch_range()
if (dname, sname) in self._scalr_resolution_limits:
continue
elif (dname, sname) in user_resolution_limits:
limit = user_resolution_limits[(dname, sname)]
self._scalr_resolution_limits[(dname, sname)] = (limit, None)
if limit < highest_resolution:
highest_resolution = limit
Chatter.write(
"Resolution limit for %s: %5.2f (user provided)" % (dname, limit)
)
continue
if hklin:
limit, reasoning = self._estimate_resolution_limit(
hklin,
batch_range=(start, end),
use_isigma=use_isigma,
use_misigma=use_misigma,
)
else:
limit, reasoning = self._estimate_resolution_limit(
hklin=None,
batch_range=(start, end),
use_isigma=use_isigma,
use_misigma=use_misigma,
reflections=reflections,
experiments=experiments,
)
if PhilIndex.params.xia2.settings.resolution.keep_all_reflections:
suggested = limit
if (
highest_suggested_resolution is None
or limit < highest_suggested_resolution
):
highest_suggested_resolution = limit
limit = intgr.get_detector().get_max_resolution(
intgr.get_beam_obj().get_s0()
)
self._scalr_resolution_limits[(dname, sname)] = (limit, suggested)
Debug.write("keep_all_reflections set, using detector limits")
Debug.write("Resolution for sweep %s: %.2f" % (sname, limit))
if (dname, sname) not in self._scalr_resolution_limits:
self._scalr_resolution_limits[(dname, sname)] = (limit, None)
self.set_scaler_done(False)
if limit < highest_resolution:
highest_resolution = limit
limit, suggested = self._scalr_resolution_limits[(dname, sname)]
if suggested is None or limit == suggested:
reasoning_str = ""
if reasoning:
reasoning_str = " (%s)" % reasoning
Chatter.write(
"Resolution for sweep %s/%s: %.2f%s"
% (dname, sname, limit, reasoning_str)
)
else:
Chatter.write(
"Resolution limit for %s/%s: %5.2f (%5.2f suggested)"
% (dname, sname, limit, suggested)
)
if highest_suggested_resolution is not None and highest_resolution >= (
highest_suggested_resolution - 0.004
):
Debug.write(
"Dropping resolution cut-off suggestion since it is"
" essentially identical to the actual resolution limit."
)
highest_suggested_resolution = None
self._scalr_highest_resolution = highest_resolution
if highest_suggested_resolution is not None:
Debug.write(
"Suggested highest resolution is %5.2f (%5.2f suggested)"
% (highest_resolution, highest_suggested_resolution)
)
else:
Debug.write("Scaler highest resolution set to %5.2f" % highest_resolution)
return highest_suggested_resolution
def anomalous_probability_plot(intensities, expected_delta=None):
from scitbx.math import distributions
from scitbx.array_family import flex
assert intensities.is_unique_set_under_symmetry()
assert intensities.anomalous_flag()
dI = intensities.anomalous_differences()
if not dI.size():
return None, None, None
y = dI.data() / dI.sigmas()
perm = flex.sort_permutation(y)
y = y.select(perm)
distribution = distributions.normal_distribution()
x = distribution.quantiles(y.size())
if expected_delta is not None:
sel = flex.abs(x) < expected_delta
x = x.select(sel)
y = y.select(sel)
fit = flex.linear_regression(x, y)
assert fit.is_well_defined()
return fit.slope(), fit.y_intercept(), x.size()
```
#### File: Modules/Xia2html/Magpie.py
```python
from __future__ import absolute_import, division, print_function
import copy
#######################################################################
# Import modules that this module depends on
#######################################################################
import re
import xia2.Modules.Xia2html.smartie as smartie
# Magpie.py: Text file processor
# Copyright (C) Diamond 2009 <NAME>
#
########################################################################
#
# Magpie.py
#
########################################################################
#
# Provide classes and functions for extracting information from
# text files based on pattern matching
#
__cvs_id__ = "$Id$"
__version__ = "0.0.1"
#######################################################################
# Module constants
#######################################################################
# Constants for defining Blocks
INCLUDE = 0
EXCLUDE = 1
EXCLUDE_START = 2
EXCLUDE_END = 3
#######################################################################
# Class definitions
#######################################################################
# Magpie
#
# Generic text file processing class
class Magpie:
"""Generic text processing class
Creates a configurable line-by-line text processor object which
can process input from a file or from text."""
def __init__(self, txtfile=None, verbose=False):
"""New Magpie object
Optionally, 'txtfile' is the name and full path of the file
to be processed.
"""
# Source text file
self.__txtfile = txtfile
# Verbose output
self.__verbose = verbose
# List of data items
self.__data = []
# List of blocks
self.__blocks = []
# Maximum buffer size (number of lines)
self.__buffersize = 50
# Patterns to match against
self.__regex = PatternMatcher()
return
def reset(self):
"""Reset the processor
Erases any results of previous processing but leaves the
pattern and block definitions intact. This enables an
application to use the processor multiple times without
needing to redefine patterns and blocks."""
self.__data = []
return
def defineBlock(
self,
name,
starts_with,
ends_with,
include_flag=INCLUDE,
pattern=None,
pattern_keys=None,
):
"""Define a block of lines to collect"""
new_block = Block(
name,
starts_with,
ends_with,
pattern,
pattern_keys,
include_flag,
verbose=self.__verbose,
)
self.__blocks.append(new_block)
return new_block
def addData(self, name, data):
"""Add a data element"""
new_data = Data(name, data)
self.__data.append(new_data)
return new_data
def addPattern(self, name, pattern, keys=None):
"""Define a new regexp pattern"""
self.__regex.addPattern(name, pattern, keys)
return
def getData(self, name=""):
"""Return a list of Data elements
If a 'name' string is specified then the list will
be limited to the Data elements that match the name;
otherwise the list will contain all Data elements."""
if name == "":
return copy.copy(self.__data)
else:
data = []
for datum in self.__data:
if datum.name() == name:
data.append(datum)
return data
def __getitem__(self, name):
"""Implements Magpie[name] for fetching items
Return a list of Data elements matching 'name'."""
return self.getData(name)
def __iter__(self):
"""Return an iterator for this object
Implements 'for item in Magpie:...'"""
# Use iter() to turn the list of data items into
# an iterator
return iter(self.__data)
def count(self, name):
"""Return number of occurances of Data elements called 'name'"""
return len(self.getData(name))
def process(self):
"""Run the processor on the source text"""
self.processFile(self.__txtfile)
def processFile(self, filename):
"""Run the processor on a file"""
with open(filename, "r") as txt:
self.__process(txt)
def processText(self, txt):
"""Run the processor on a block of text"""
self.__process(str(txt).split("\n"))
def __process(self, source):
"""Process source text
'source' must be an iterable object (typically either an
open file object, or a list of lines of text) which
acts as the data source.
This method steps through the data source line-by-line,
extracting and storing data from fragments that match
the Pattern and Block definitions."""
# Smartie buffer object stores chunks of text
buff = smartie.buffer(self.__buffersize)
# Move through the file buffering chunks
# and testing them against our stored patterns
for line in source:
buff.append(line)
# Get a chunk of text to process
##bufftext = buff.tail()
# Get the whole buffer as text
# Maybe later we can optimise by having different
# chunk sizes explicitly set for different patterns
bufftext = buff.all()
# Test the line for matches
for pattern in self.__regex.listPatterns():
test = self.__regex.test(pattern, bufftext)
if test:
self.__print("Matched pattern '" + str(pattern) + "'")
for key in test.keys():
self.__print(">>> " + str(key) + ": " + str(test[key]))
text = test[pattern]
self.addData(pattern, test)
# Clear the buffer and break out the loop
buff.clear()
break
# Deal with blocks
for block in self.__blocks:
if not block.isComplete():
block.add(line)
if block.isComplete():
# Create a new Data object to
# store the block and then reset
self.addData(block.name(), block.getData())
block.reset()
self.__print("Finished")
def __print(self, text):
"""Internal: print to stdout
Controlled by the __verbose attribute."""
if self.__verbose:
print(text)
class Data:
"""Data items from the output"""
def __init__(self, name, data):
self.__name = name
self.__data = data
return
def __getitem__(self, key):
"""Implement x = Data[key]
Wrapper for value() method."""
return self.value(key)
def __setitem__(self, key, value):
"""Implement Data[key] = x
Wrapper for setValue() method."""
return self.setValue(key, value)
def __str__(self):
"""Return string representation"""
try:
return self.__data[self.__name]
except KeyError:
# Assume that the name isn't defined
# Return a concatentation of all the
# data items
text = ""
for key in self.__data.keys():
text += str(self.__data[key]) + "\n"
text = text.strip("\n")
return text
def keys(self):
"""Return the keys of the data dictionary"""
return self.__data.keys()
def name(self):
"""Return the name of the Data object"""
return self.__name
def data(self):
"""Return the data dictionary"""
return self.__data
def value(self, key):
"""Return the value stored against key"""
return self.__data[key]
def setValue(self, key, value):
"""Set the value of a data item
Sets the value of 'key' to 'value'. Doesn't
check if 'key' already exists."""
self.__data[key] = value
return
class Block:
"""Chunk of output delimited by start/end patterns
'name' is an identifier, 'starts_with' and 'ends_with'
are text strings which mark the beginning and end of the
block of output that is of interest.
To match blocks ending (or starting) with a blank line
(i.e. a line containing whitespace only), set the 'ends_with'
(or 'starts_with') parameter to an empty string i.e. ''.
include_flag determines whether the delimiters should
also be added to the block. Values are:
INCLUDE : include both start and end delimiters (the default)
EXCLUDE : exclude both start and end delimiters
EXCLUDE_START : include only the end delimiter
EXCLUDE_END : include only the start delimiter
'pattern' defines an optional regular expression pattern.
If this provided then it will be applied to the matching
text when the block is complete. If 'pattern_keys' are also
provided then each key will create a data element with the
matching regular expression group."""
def __init__(
self,
name,
starts_with,
ends_with,
pattern=None,
pattern_keys=None,
include_flag=INCLUDE,
verbose=False,
):
self.__name = name
self.__text = ""
self.__start = starts_with
self.__end = ends_with
self.__include = include_flag
self.__verbose = verbose
if pattern:
self.__pattern = Pattern(name, pattern, pattern_keys)
else:
self.__pattern = None
self.__active = False
self.__complete = False
def __repr__(self):
return str(self.__name) + ":\n" + str(self.__text)
def name(self):
"""Returns the name of the block"""
return self.__name
def text(self):
"""Returns the block text"""
return self.__text
def isComplete(self):
"""Check if the block is complete (i.e. end delimiter reached)"""
return self.__complete
def isActive(self):
"""Check if the block is active (i.e. start delimiter supplied)"""
return self.__active
def getData(self):
"""Return data from the block"""
data = dict()
if self.__pattern:
# Apply the regular expression pattern
data = self.__pattern.test(self.__text)
if not data:
# Associate the name of the block with
# the stored text
data = {self.__name: self.__text}
self.__print("Matched block '" + str(self.__name) + "'")
for key in data.keys():
self.__print(">>> " + str(key) + ": " + str(data[key]))
return data
def add(self, text):
"""Present text to be added to the block
Text will only be added if the block is active but not
complete. The block is activated by text which includes the
start delimiter substring.
Once the block is active all text that is supplied is stored
until text is supplied which includes the end delimiter - at
this point ths block is complete and will not accept any more
text."""
if self.__complete:
# Can't add more
return
if not self.__active:
# Check for start delimiter
if self.__contains(text, self.__start):
self.__active = True
if self.__include == EXCLUDE or self.__include == EXCLUDE_START:
# Don't store the start delimiter line
return
else:
# Add text
self.__text += text
return
else:
return
# Check for end delimiter
if self.__contains(text, self.__end):
self.__complete = True
if self.__include == EXCLUDE or self.__include == EXCLUDE_END:
# Don't store the end delimiter line
return
# Add text
self.__text += text
return
def reset(self):
"""Reset the block to accept data
This frees a "completed" block by resetting it to the
initial (empty) state"""
self.__text = ""
self.__active = False
self.__complete = False
def __contains(self, text, pattern):
"""Internal: test if text contains a pattern
Used by the 'add' method to determine if supplied
'text' contains the text in 'pattern'. Returns True
if a match is found and False otherwise.
If 'pattern' evaluates as False (e.g. an empty string)
then 'text' will match if it contains whitespace only."""
if not pattern:
return str(text).isspace()
elif str(text).find(pattern) > -1:
return True
return False
def __print(self, text):
"""Internal: print to stdout
Controlled by the __verbose attribute."""
if self.__verbose:
print(text)
class PatternMatcher:
"""Store and invoke regexp pattern matches
For each regular expression supplied along with a name
via the addPattern method, a new Pattern object is
created and stored. Multiple patterns can be associated
with the same name.
A list of (unique) pattern names can be retrieved via the
listPatterns method.
A text string can be tested against the named expression(s)
using the test method."""
def __init__(self):
# List of the regular expression Pattern objects
self.__patterns = []
# List of stored (unique) pattern names
self.__names = []
return
def addPattern(self, name, pattern, keys=None):
"""Add a named pattern to the PatternMatcher
Adds the regular expression pattern associated with
'name'.
Optionally, also associate a list of keys with the
pattern. Each element of the list should correspond
to a group defined in the regular expression. Note
that keys cannot be the same as the pattern name."""
# Create and store the Pattern object
self.__patterns.append(Pattern(name, pattern, keys))
# Store the name
if not self.__names.count(name):
self.__names.append(name)
return
def test(self, name, text):
"""Test text against named regexp pattern(s)
Test each stored pattern associated with 'name'. When a
match is found then a Python dictionary is returned
with information about the match (see the test
method of the Pattern object for the details).
If no match is found (or if there are no patterns
with the supplied name) then an empty Python dictionary
instance is returned."""
for pattern in self.__patterns:
if pattern.name() == name:
# Test this pattern
test = pattern.test(text)
if test:
return test
# No matches - return an empty dictionary
return dict()
def listPatterns(self):
"""Return the list of pattern names"""
return self.__names
class Pattern:
"""Store and invoke a regular expression.
Stores a single regular expression associated with
a name. Arbitrary text can be tested against the stored
pattern using the test method.
Optionally, a list of keys can also be associated with
the pattern. Each element of the list should correspond
to a group defined in the regular expression. Note
that none of the keys can be the same as the pattern
name."""
def __init__(self, name, pattern, keys=None):
self.__name = name
self.__pattern = re.compile(pattern, re.DOTALL)
self.__keys = keys
def __repr__(self):
return str(self.__name)
def name(self):
"""Return the name of the pattern"""
return self.__name
def test(self, text):
"""Test text against the regular expression pattern
Returns a dictionary object. If the text matches the
regular expression then the dictionary will be populated
with data extracted from the text as described.
The element with key 'name' will always contain the full
matching text. If a set of keys was also supplied when
the pattern was defined then the dictionary will also
contain elements matching these keys, with the value of
the corresponding regexp group assigned.
If there is no match then the dictionary will be empty."""
data = dict()
match = self.__pattern.search(text)
if match:
# Build a dictionary for the match
data[self.__name] = match.group(0)
# Check if there are associated keys for
# this pattern
#
# Populate the "data" dictionary with the
# value of each regexp group assigned to
# the corresponding keys in order
#
# If there are more keys than groups then
# remaining
if self.__keys:
i = 1
for key in self.__keys:
try:
data[key] = match.group(i)
except IndexError:
# Insufficient groups for
# number of keys
data[key] = None
i += 1
##return match.group(0)
return data
# Tabulator
#
# Break up a raw text "table"
class Tabulator:
"""Extract data from a raw text 'table'
The Tabulator will break up a supplied block of text treating
each line as a table 'row', and split each row into individual
data items according to a specified delimiter.
The first data item in each "row" becomes a key to retrieve that
row (which is stored as a Python list containing all the data
items in the row).
For example to access the 'High' row of this 'table':
High 5.0 9.0
Medium 3.0 4.5
Low 1.0 0.0
use Tabulator['High']. To access the last data item in the 'Medium'
row, use Tabulator['Medium'][1]."""
def __init__(self, tabular_data, delimiter="\t"):
"""Create and populate a new Tabulator object
'tabular_data' is the raw text of the 'table';"""
self.__tabular_data = tabular_data
self.__delimiter = delimiter
# List of keys (stored data items)
self.__keys = []
self.__data = {}
# Extract data and populate the data structure
self.__extract_tabular_data(tabular_data)
def __extract_tabular_data(self, tabular_data):
"""Internal: build data structure from tabular data"""
for row in tabular_data.strip("\n").split("\n"):
row_data = row.split(self.__delimiter)
key = row_data[0].strip()
self.__keys.append(key)
self.__data[key] = row_data
def __getitem__(self, key):
"""Implement x = Tabulator[key] for get operations
Returns the 'row' of data associated with the key 'name'
i.e. a list of items."""
return self.__data[key]
def has_key(self, key):
"""Check if a row called 'key' exists"""
return key in self.__data
def keys(self):
"""Return the list of data item names (keys)"""
return self.__keys
def table(self):
"""Return the original data that was supplied"""
return self.__tabular_data
#######################################################################
# Module Functions
#######################################################################
def version():
"""Return the version of the Magpie module"""
return __version__
```
#### File: Schema/Exceptions/NegativeMosaicError.py
```python
from __future__ import absolute_import, division, print_function
class NegativeMosaicError(Exception):
"""An exception to be raised when the mosaic spread is negative."""
def __init__(self, value):
self.parameter = value
def __str__(self):
return repr(self.parameter)
```
#### File: xia2/Schema/__init__.py
```python
from __future__ import absolute_import, division, print_function
import collections
import os
from xia2.Handlers.Phil import PhilIndex
class _ImagesetCache(dict):
pass
imageset_cache = _ImagesetCache()
def longest_common_substring(s1, s2):
m = [[0] * (1 + len(s2)) for i in xrange(1 + len(s1))]
longest, x_longest = 0, 0
for x in xrange(1, 1 + len(s1)):
for y in xrange(1, 1 + len(s2)):
if s1[x - 1] == s2[y - 1]:
m[x][y] = m[x - 1][y - 1] + 1
if m[x][y] > longest:
longest = m[x][y]
x_longest = x
else:
m[x][y] = 0
return s1[x_longest - longest : x_longest]
def load_imagesets(
template,
directory,
id_image=None,
image_range=None,
use_cache=True,
reversephi=False,
):
global imageset_cache
from dxtbx.model.experiment_list import ExperimentListFactory
from xia2.Applications.xia2setup import known_hdf5_extensions
from dxtbx.imageset import ImageSequence as ImageSweep
full_template_path = os.path.join(directory, template)
if full_template_path not in imageset_cache or not use_cache:
from dxtbx.model.experiment_list import BeamComparison
from dxtbx.model.experiment_list import DetectorComparison
from dxtbx.model.experiment_list import GoniometerComparison
params = PhilIndex.params.xia2.settings
compare_beam = BeamComparison(
wavelength_tolerance=params.input.tolerance.beam.wavelength,
direction_tolerance=params.input.tolerance.beam.direction,
polarization_normal_tolerance=params.input.tolerance.beam.polarization_normal,
polarization_fraction_tolerance=params.input.tolerance.beam.polarization_fraction,
)
compare_detector = DetectorComparison(
fast_axis_tolerance=params.input.tolerance.detector.fast_axis,
slow_axis_tolerance=params.input.tolerance.detector.slow_axis,
origin_tolerance=params.input.tolerance.detector.origin,
)
compare_goniometer = GoniometerComparison(
rotation_axis_tolerance=params.input.tolerance.goniometer.rotation_axis,
fixed_rotation_tolerance=params.input.tolerance.goniometer.fixed_rotation,
setting_rotation_tolerance=params.input.tolerance.goniometer.setting_rotation,
)
scan_tolerance = params.input.tolerance.scan.oscillation
format_kwargs = {
"dynamic_shadowing": params.input.format.dynamic_shadowing,
"multi_panel": params.input.format.multi_panel,
}
if os.path.splitext(full_template_path)[-1] in known_hdf5_extensions:
# if we are passed the correct file, use this, else look for a master
# file (i.e. something_master.h5)
if os.path.exists(full_template_path) and os.path.isfile(
full_template_path
):
master_file = full_template_path
else:
import glob
g = glob.glob(os.path.join(directory, "*_master.h5"))
master_file = None
for p in g:
substr = longest_common_substring(template, p)
if substr:
if master_file is None or (
len(substr)
> len(longest_common_substring(template, master_file))
):
master_file = p
if master_file is None:
raise RuntimeError("Can't find master file for %s" % full_template_path)
unhandled = []
experiments = ExperimentListFactory.from_filenames(
[master_file],
verbose=False,
unhandled=unhandled,
compare_beam=compare_beam,
compare_detector=compare_detector,
compare_goniometer=compare_goniometer,
scan_tolerance=scan_tolerance,
format_kwargs=format_kwargs,
)
assert len(unhandled) == 0, (
"unhandled image files identified: %s" % unhandled
)
else:
from dxtbx.sequence_filenames import locate_files_matching_template_string
params = PhilIndex.get_python_object()
read_all_image_headers = params.xia2.settings.read_all_image_headers
if read_all_image_headers:
paths = sorted(
locate_files_matching_template_string(full_template_path)
)
unhandled = []
experiments = ExperimentListFactory.from_filenames(
paths,
verbose=False,
unhandled=unhandled,
compare_beam=compare_beam,
compare_detector=compare_detector,
compare_goniometer=compare_goniometer,
scan_tolerance=scan_tolerance,
format_kwargs=format_kwargs,
)
assert len(unhandled) == 0, (
"unhandled image files identified: %s" % unhandled
)
else:
from dxtbx.model.experiment_list import ExperimentListTemplateImporter
importer = ExperimentListTemplateImporter(
[full_template_path], format_kwargs=format_kwargs
)
experiments = importer.experiments
imagesets = [
iset for iset in experiments.imagesets() if isinstance(iset, ImageSweep)
]
assert len(imagesets) > 0, "no imageset found"
imageset_cache[full_template_path] = collections.OrderedDict()
if reversephi:
for imageset in imagesets:
goniometer = imageset.get_goniometer()
goniometer.set_rotation_axis(
tuple(-g for g in goniometer.get_rotation_axis())
)
reference_geometry = PhilIndex.params.xia2.settings.input.reference_geometry
if reference_geometry is not None and len(reference_geometry) > 0:
update_with_reference_geometry(imagesets, reference_geometry)
# Update the geometry
params = PhilIndex.params.xia2.settings
update_geometry = []
from dials.command_line.dials_import import ManualGeometryUpdater
from dials.util.options import geometry_phil_scope
# Then add manual geometry
work_phil = geometry_phil_scope.format(params.input)
diff_phil = geometry_phil_scope.fetch_diff(source=work_phil)
if diff_phil.as_str() != "":
update_geometry.append(ManualGeometryUpdater(params.input))
imageset_list = []
for imageset in imagesets:
for updater in update_geometry:
imageset = updater(imageset)
imageset_list.append(imageset)
imagesets = imageset_list
from scitbx.array_family import flex
for imageset in imagesets:
scan = imageset.get_scan()
exposure_times = scan.get_exposure_times()
epochs = scan.get_epochs()
if exposure_times.all_eq(0) or exposure_times[0] == 0:
exposure_times = flex.double(exposure_times.size(), 1)
scan.set_exposure_times(exposure_times)
elif not exposure_times.all_gt(0):
exposure_times = flex.double(exposure_times.size(), exposure_times[0])
scan.set_exposure_times(exposure_times)
if epochs.size() > 1 and not epochs.all_gt(0):
if epochs[0] == 0:
epochs[0] = 1
for i in range(1, epochs.size()):
epochs[i] = epochs[i - 1] + exposure_times[i - 1]
scan.set_epochs(epochs)
_id_image = scan.get_image_range()[0]
imageset_cache[full_template_path][_id_image] = imageset
if id_image is not None:
return [imageset_cache[full_template_path][id_image]]
elif image_range is not None:
for imageset in imageset_cache[full_template_path].values():
scan = imageset.get_scan()
scan_image_range = scan.get_image_range()
if (
image_range[0] >= scan_image_range[0]
and image_range[1] <= scan_image_range[1]
):
imagesets = [
imageset[
image_range[0]
- scan_image_range[0] : image_range[1]
+ 1
- scan_image_range[0]
]
]
assert len(imagesets[0]) == image_range[1] - image_range[0] + 1, len(
imagesets[0]
)
return imagesets
return imageset_cache[full_template_path].values()
def update_with_reference_geometry(imagesets, reference_geometry_list):
assert reference_geometry_list is not None
assert len(reference_geometry_list) >= 1
reference_components = load_reference_geometries(reference_geometry_list)
for imageset in imagesets:
reference_geometry = find_relevant_reference_geometry(
imageset, reference_components
)
imageset.set_beam(reference_geometry["beam"])
imageset.set_detector(reference_geometry["detector"])
def load_reference_geometries(geometry_file_list):
from dxtbx.serialize import load
reference_components = []
for file in geometry_file_list:
try:
experiments = load.experiment_list(file, check_format=False)
assert len(experiments.detectors()) == 1
assert len(experiments.beams()) == 1
reference_detector = experiments.detectors()[0]
reference_beam = experiments.beams()[0]
except Exception:
experiments = load.experiment_list(file)
imageset = experiments.imagesets()[0]
reference_detector = imageset.get_detector()
reference_beam = imageset.get_beam()
reference_components.append(
{"detector": reference_detector, "beam": reference_beam, "file": file}
)
import itertools
for combination in itertools.combinations(reference_components, 2):
if compare_geometries(combination[0]["detector"], combination[1]["detector"]):
from xia2.Handlers.Streams import Chatter
Chatter.write(
"Reference geometries given in %s and %s are too similar"
% (combination[0]["file"], combination[1]["file"])
)
raise Exception("Reference geometries too similar")
return reference_components
def compare_geometries(detectorA, detectorB):
return detectorA.is_similar_to(
detectorB,
fast_axis_tolerance=0.1,
slow_axis_tolerance=0.1,
origin_tolerance=10,
ignore_trusted_range=True,
)
def find_relevant_reference_geometry(imageset, geometry_list):
for geometry in geometry_list:
if compare_geometries(geometry["detector"], imageset.get_detector()):
break
else:
raise Exception("No appropriate reference geometry found")
return geometry
```
#### File: Modules/Indexer/test_labelit_indexer.py
```python
from __future__ import absolute_import, division, print_function
import pytest
def test_labelit_indexer(regression_test, ccp4, dials_data, run_in_tmpdir):
template = dials_data("insulin").join("insulin_1_###.img").strpath
from xia2.Modules.Indexer.LabelitIndexer import LabelitIndexer
from xia2.DriverExceptions.NotAvailableError import NotAvailableError
try:
ls = LabelitIndexer(indxr_print=True)
except NotAvailableError:
pytest.skip("labelit not found")
ls.set_working_directory(run_in_tmpdir.strpath)
from dxtbx.model.experiment_list import ExperimentListTemplateImporter
importer = ExperimentListTemplateImporter([template])
experiments = importer.experiments
imageset = experiments.imagesets()[0]
ls.add_indexer_imageset(imageset)
ls.index()
assert ls.get_indexer_cell() == pytest.approx(
(78.58, 78.58, 78.58, 90, 90, 90), abs=0.5
)
solution = ls.get_solution()
assert solution["rmsd"] <= 0.2
assert solution["metric"] <= 0.16
assert solution["number"] == 22
assert solution["lattice"] == "cI"
assert solution["mosaic"] <= 0.25
assert solution["nspots"] == pytest.approx(860, abs=30)
beam_centre = ls.get_indexer_beam_centre()
assert beam_centre == pytest.approx((94.3416, 94.4994), abs=2e-1)
assert ls.get_indexer_images() == [(1, 1), (22, 22), (45, 45)]
print(ls.get_indexer_experiment_list()[0].crystal)
print(ls.get_indexer_experiment_list()[0].detector)
json_str = ls.as_json()
# print(json_str)
ls1 = LabelitIndexer.from_json(string=json_str)
ls1.index()
print(ls.get_indexer_experiment_list()[0].crystal)
assert ls.get_indexer_beam_centre() == ls1.get_indexer_beam_centre()
assert ls1.get_indexer_images() == [
[1, 1],
[22, 22],
[45, 45],
] # in JSON tuples become lists
assert ls.get_distance() == ls1.get_distance()
ls.eliminate()
ls1.eliminate()
print(ls1.get_indexer_experiment_list()[0].crystal)
assert ls.get_indexer_beam_centre() == ls1.get_indexer_beam_centre()
assert ls.get_indexer_images() == [(1, 1), (22, 22), (45, 45)]
assert ls1.get_indexer_images() == [
[1, 1],
[22, 22],
[45, 45],
] # in JSON tuples become lists
assert ls.get_distance() == ls1.get_distance()
print(ls1.get_indexer_cell())
print(ls1.get_solution())
assert ls.get_indexer_cell() == pytest.approx(
(111.11, 111.11, 68.08, 90.0, 90.0, 120.0), abs=5e-1
)
solution = ls1.get_solution()
assert solution["rmsd"] >= 0.07, solution["rmsd"]
assert solution["metric"] == pytest.approx(0.1291, abs=1e-1)
assert solution["lattice"] == "hR", solution["lattice"]
assert solution["mosaic"] <= 0.3, solution["mosaic"]
assert solution["nspots"] == pytest.approx(856, abs=30)
```
#### File: Test/regression/test_insulin.py
```python
from __future__ import absolute_import, division, print_function
import procrunner
import pytest
import xia2.Test.regression
expected_data_files = [
"AUTOMATIC_DEFAULT_NATIVE_SWEEP1_INTEGRATE.mtz",
"AUTOMATIC_DEFAULT_free.mtz",
"AUTOMATIC_DEFAULT_scaled.sca",
"AUTOMATIC_DEFAULT_scaled_unmerged.mtz",
"AUTOMATIC_DEFAULT_scaled_unmerged.sca",
]
def test_2d(regression_test, dials_data, tmpdir, ccp4):
command_line = [
"xia2",
"pipeline=2di",
"nproc=1",
"trust_beam_centre=True",
dials_data("insulin").strpath,
]
result = procrunner.run(command_line, working_directory=tmpdir.strpath)
success, issues = xia2.Test.regression.check_result(
"insulin.2d", result, tmpdir, ccp4, expected_data_files=expected_data_files
)
assert success, issues
```
#### File: Test/System/test_run_xia2.py
```python
from __future__ import absolute_import, division, print_function
def test_start_xia2():
import procrunner
result = procrunner.run(["xia2"])
assert result["exitcode"] == 0
```
#### File: xia2/Toolkit/MendBKGINIT.py
```python
from __future__ import absolute_import, division, print_function
import binascii
import copy
import sys
from cbflib_adaptbx import compress, uncompress
from scitbx.array_family import flex
def recompute_BKGINIT(bkginit_in, init_lp, bkginit_out):
start_tag = binascii.unhexlify("0c1a04d5")
data = open(bkginit_in, "rb").read()
data_offset = data.find(start_tag) + 4
cbf_header = data[: data_offset - 4]
fast = 0
slow = 0
length = 0
for record in cbf_header.split("\n"):
if "X-Binary-Size-Fastest-Dimension" in record:
fast = int(record.split()[-1])
elif "X-Binary-Size-Second-Dimension" in record:
slow = int(record.split()[-1])
elif "X-Binary-Number-of-Elements" in record:
length = int(record.split()[-1])
assert length == fast * slow
pixel_values = uncompress(packed=data[data_offset:], fast=fast, slow=slow)
untrusted = []
for record in open(init_lp):
if "UNTRUSTED_RECTANGLE=" in record:
untrusted.append(map(int, record.replace(".", " ").split()[1:5]))
modified_pixel_values = copy.deepcopy(pixel_values)
for s in range(5, slow - 5):
y = s + 1
for f in range(5, fast - 5):
x = f + 1
trusted = True
for x0, x1, y0, y1 in untrusted:
if (x >= x0) and (x <= x1) and (y >= y0) and (y <= y1):
trusted = False
break
if trusted:
pixel = pixel_values[s * fast + f]
if pixel < 0:
pixels = []
for j in range(-2, 3):
for i in range(-2, 3):
p = pixel_values[(s + j) * fast + f + i]
if p > 0:
pixels.append(p)
modified_pixel_values[s * fast + f] = int(sum(pixels) / len(pixels))
open(bkginit_out, "wb").write(
cbf_header + start_tag + compress(modified_pixel_values)
)
return
if __name__ == "__main__":
recompute_BKGINIT("BKGINIT.cbf", "INIT.LP", sys.argv[1])
```
#### File: xia2/Toolkit/PolyFitter.py
```python
from __future__ import absolute_import, division, print_function
import math
from cctbx.array_family import flex
from scitbx import lbfgs
def poly_residual(xp, y, params):
"""Compute the residual between the observations y[i] and sum_j
params[j] x[i]^j. For efficiency, x[i]^j are pre-calculated in xp."""
r = 0.0
n = len(params)
c = len(y)
e = flex.double([flex.sum(xp[j] * params) for j in range(c)])
return flex.sum(flex.pow2(y - e))
def poly_gradients(xp, y, params):
"""Compute the gradient of the residual w.r.t. the parameters, N.B.
will be performed using a finite difference method. N.B. this should
be trivial to do algebraicly."""
eps = 1.0e-6
g = flex.double()
n = len(params)
for j in range(n):
rs = []
for signed_eps in [-eps, eps]:
params_eps = params[:]
params_eps[j] += signed_eps
rs.append(poly_residual(xp, y, params_eps))
g.append((rs[1] - rs[0]) / (2 * eps))
return g
class poly_fitter(object):
"""A class to do the polynomial fit. This will fit observations y
at points x with a polynomial of order n."""
def __init__(self, points, values, order):
self.x = flex.double([1.0 for j in range(order)])
self._x = flex.double(points)
self._y = flex.double(values)
# precalculate x[j]^[0-(n - 1)] values
self._xp = [
flex.double([math.pow(x, j) for j in range(order)]) for x in self._x
]
return
def refine(self):
"""Actually perform the parameter refinement."""
return lbfgs.run(target_evaluator=self)
def compute_functional_and_gradients(self):
return (
poly_residual(self._xp, self._y, self.x),
poly_gradients(self._xp, self._y, self.x),
)
def get_parameters(self):
return list(self.x)
def evaluate(self, x):
"""Evaluate the resulting fit at point x."""
return sum([math.pow(x, k) * self.x[k] for k in range(len(self.x))])
def fit(x, y, order):
"""Fit the values y(x) then return this fit. x, y should
be iterables containing floats of the same size. The order is the order
of polynomial to use for this fit. This will be useful for e.g. I/sigma."""
pf = poly_fitter(x, y, order)
pf.refine()
return [pf.evaluate(_x) for _x in x]
def log_fit(x, y, order):
"""Fit the values log(y(x)) then return exp() to this fit. x, y should
be iterables containing floats of the same size. The order is the order
of polynomial to use for this fit. This will be useful for e.g. I/sigma."""
ly = [math.log(_y) for _y in y]
pf = poly_fitter(x, ly, order)
pf.refine()
return [math.exp(pf.evaluate(_x)) for _x in x]
def log_inv_fit(x, y, order):
"""Fit the values log(1 / y(x)) then return the inverse of this fit.
x, y should be iterables, the order of the polynomial for the transformed
fit needs to be specified. This will be useful for e.g. Rmerge."""
ly = [math.log(1.0 / _y) for _y in y]
pf = poly_fitter(x, ly, order)
pf.refine()
return [(1.0 / math.exp(pf.evaluate(_x))) for _x in x]
def interpolate_value(x, y, t):
"""Find the value of x: y(x) = t."""
if t > max(y) or t < min(y):
raise RuntimeError("t outside of [%f, %f]" % (min(y), max(y)))
for j in range(1, len(x)):
x0 = x[j - 1]
y0 = y[j - 1]
x1 = x[j]
y1 = y[j]
if (y0 - t) * (y1 - t) < 0:
return x0 + (t - y0) * (x1 - x0) / (y1 - y0)
def get_positive_values(x):
"""Return a list of values v from x where v > 0."""
result = []
for _x in x:
if _x > 0:
result.append(_x)
else:
return result
return result
if __name__ == "__main__":
# trying to work out why something is slow...
x = [
0.28733375585344956,
0.3336648239480671,
0.37392475503798783,
0.4116791460480823,
0.44667362480391215,
0.48010999459819637,
0.5123842907520316,
0.5445830447029069,
0.5747600267080056,
0.605268188207491,
0.6348694178757428,
0.6628307139444256,
0.6915543733106164,
0.7190850546688736,
0.7466325833791124,
0.7726534107667972,
0.7991813564734889,
0.8246120592630442,
0.8509431563671859,
0.8752222362981207,
0.9003835108822839,
0.925531251174205,
0.9495577347489563,
0.9736107180716824,
0.9977616739729435,
1.0211126767435303,
1.0442229585861016,
1.0676870644761218,
1.089626948783452,
1.11325323064326,
1.1353748686331517,
1.157229309091089,
1.1793787289152926,
1.2012850147174827,
1.223192876382562,
1.2442806850714754,
1.2659456255540278,
1.2868725763092403,
1.3077684542819044,
1.329693962546648,
1.3497661431014192,
1.3703975279412275,
1.3913213083813614,
1.4118099020522166,
1.431944241466548,
1.451565968015303,
1.4726043408387703,
1.4926361862881505,
1.511947564871118,
1.531623424311822,
1.5518379642619582,
1.571415292664728,
1.590956013986232,
1.6101289757746151,
1.629504706812003,
1.6488436799317054,
1.6677873136267631,
1.6871236000102316,
1.7063804384195065,
1.7247788587316706,
1.74385084639364,
1.7632567530747427,
1.7810671017919066,
1.8000204739946506,
1.8187750413718835,
1.8362045669565548,
1.855888986697667,
1.8736099866108273,
1.8919543734165152,
1.9099014671201333,
1.9278705840578851,
1.9459285536685293,
1.9644838792250359,
1.9822046837796143,
1.9995268983422625,
2.0173386661672104,
2.0350303628559123,
2.0527713302473805,
2.0715436512758125,
2.088532979967127,
2.105448870913261,
2.122996752747121,
2.140658402767489,
2.1580900095590096,
2.1754356707821283,
2.19275774398331,
2.211194475389986,
2.232982621587298,
2.2551925602858534,
2.280016719289489,
2.3063211626596343,
2.3350430868315497,
2.3665452139200425,
2.4015454429869205,
2.440733908945748,
2.4858785233713427,
2.5418727931246536,
2.6144084555616858,
2.7959062808147896,
]
y = [
24.77724034532261,
24.37804249554226,
24.19024290469251,
24.060132497289498,
23.78910669554878,
23.490999254422075,
23.230491536468016,
23.05617339327898,
22.64620165329114,
22.579695553808385,
22.383003610771798,
22.262032410277936,
22.21201767180415,
21.93212194269467,
21.726772939444658,
21.460467444724543,
21.27059568803877,
21.06466968773921,
20.634888404569303,
20.238281789327637,
19.672916110100605,
19.546897202422976,
18.87739359459743,
18.59488191380871,
18.14880392608624,
17.6962994383689,
17.37710441451018,
16.81250842496295,
16.678882667587086,
16.182391499497715,
15.828587302315464,
15.205433904690839,
14.495596165710925,
14.511859823120211,
13.971753232798177,
13.658395498023248,
13.366842896276086,
13.05856744427929,
12.337465723961392,
12.29682965701954,
12.147839110097841,
11.760324551597702,
11.471407424003074,
11.049213704891022,
10.919965795059092,
10.601626749506291,
10.335411804585565,
9.718082839773091,
9.585767093427409,
9.423114689530454,
9.251666562241514,
9.124491493213558,
8.906324740537787,
8.29969595224133,
8.179515265478527,
8.078946904786891,
8.074081206125799,
7.795640184700349,
7.327064345560753,
7.180371145672737,
6.982901221348126,
6.831549776236767,
6.774329916623371,
6.598455395485047,
6.242034228013543,
6.211893244192715,
5.978124228824288,
5.616738659970417,
5.760183273642267,
5.255614400544779,
5.040337222517639,
4.970512178822339,
4.967344687551919,
4.548778129253488,
4.451021806395992,
4.264074612710173,
4.067343853822604,
4.043692161771108,
3.6569324304568642,
3.727811294231763,
3.4954349302961947,
3.345749115417511,
3.2665114375808058,
3.1220011432385397,
2.8973373248698233,
2.853040292102494,
2.713019895460359,
2.573460999432591,
2.4801019159829423,
2.2829226930395405,
2.1913185826611636,
2.0872962418506518,
1.9316795102089115,
1.6848508083758817,
1.5530229534306241,
1.361701873571922,
1.1916682079143257,
1.053122785634863,
0.771132065724789,
]
m = log_fit(x, y, 6)
for j in range(len(x)):
print(x[j], y[j], m[j])
```
#### File: Wrappers/CCP4/Pointless.py
```python
from __future__ import absolute_import, division, print_function
import math
import os
import xml.dom.minidom
from xia2.Decorators.DecoratorFactory import DecoratorFactory
from xia2.Driver.DriverFactory import DriverFactory
from xia2.Handlers.Phil import PhilIndex
from xia2.Handlers.Streams import Debug
# this was rather complicated - now simpler!
from xia2.lib.SymmetryLib import (
clean_reindex_operator,
lauegroup_to_lattice,
spacegroup_name_xHM_to_old,
)
def mend_pointless_xml(xml_file):
"""Repair XML document"""
text = open(xml_file, "r").read().split("\n")
result = []
for record in text:
if not "CenProb" in record:
result.append(record)
continue
if "/CenProb" in record:
result.append(record)
continue
tokens = record.split("CenProb")
assert len(tokens) == 3
result.append("%sCenProb%s/CenProb%s" % tuple(tokens))
open(xml_file, "w").write("\n".join(result))
def Pointless(DriverType=None):
"""A factory for PointlessWrapper classes."""
DriverInstance = DriverFactory.Driver(DriverType)
CCP4DriverInstance = DecoratorFactory.Decorate(DriverInstance, "ccp4")
class PointlessWrapper(CCP4DriverInstance.__class__):
"""A wrapper for Pointless, using the CCP4-ified Driver."""
def __init__(self):
# generic things
CCP4DriverInstance.__class__.__init__(self)
self.set_executable(os.path.join(os.environ.get("CBIN", ""), "pointless"))
self._input_laue_group = None
self._pointgroup = None
self._spacegroup = None
self._reindex_matrix = None
self._reindex_operator = None
self._spacegroup_reindex_matrix = None
self._spacegroup_reindex_operator = None
self._confidence = 0.0
self._hklref = None
self._xdsin = None
self._probably_twinned = False
self._allow_out_of_sequence_files = False
# pname, xname, dname stuff for when we are copying reflections
self._pname = None
self._xname = None
self._dname = None
# space to store all possible solutions, to allow discussion of
# the correct lattice with the indexer... this should be a
# list containing e.g. 'tP'
self._possible_lattices = []
self._lattice_to_laue = {}
# all "likely" spacegroups...
self._likely_spacegroups = []
# and unit cell information
self._cell_info = {}
self._cell = None
# and scale factors to use in conversion
self._scale_factor = 1.0
def set_scale_factor(self, scale_factor):
self._scale_factor = scale_factor
def set_hklref(self, hklref):
self._hklref = hklref
def set_allow_out_of_sequence_files(self, allow=True):
self._allow_out_of_sequence_files = allow
def get_hklref(self):
return self._hklref
def set_project_info(self, pname, xname, dname):
self._pname = pname
self._xname = xname
self._dname = dname
def check_hklref(self):
if self._hklref is None:
raise RuntimeError("hklref not defined")
if not os.path.exists(self._hklref):
raise RuntimeError("hklref %s does not exist" % self._hklref)
def set_xdsin(self, xdsin):
self._xdsin = xdsin
def get_xdsin(self):
return self._xdsin
def check_xdsin(self):
if self._xdsin is None:
raise RuntimeError("xdsin not defined")
if not os.path.exists(self._xdsin):
raise RuntimeError("xdsin %s does not exist" % self._xdsin)
def set_correct_lattice(self, lattice):
"""In a rerunning situation, set the correct lattice, which will
assert a correct lauegroup based on the previous run of the
program..."""
if self._lattice_to_laue == {}:
raise RuntimeError("no lattice to lauegroup mapping")
if lattice not in self._lattice_to_laue:
raise RuntimeError("lattice %s not possible" % lattice)
self._input_laue_group = self._lattice_to_laue[lattice]
def sum_mtz(self, summedlist):
"""Sum partials in an MTZ file from Mosflm to a text file."""
self.add_command_line("-c")
self.check_hklin()
self.start()
self.input("output summedlist %s" % summedlist)
self.close_wait()
# get out the unit cell - we will need this...
output = self.get_all_output()
cell = None
for j in range(len(output)):
line = output[j]
if "Space group from HKLIN file" in line:
cell = tuple(map(float, output[j + 1].split()[1:]))
return cell
def limit_batches(self, first, last):
"""Replacement for rebatch, removing batches."""
self.check_hklin()
self.check_hklout()
self.add_command_line("-c")
self.start()
if first > 1:
self.input("exclude batch %d to %d" % (0, first - 1))
self.input("exclude batch %d to %d" % (last + 1, 9999999))
self.close_wait()
def xds_to_mtz(self):
"""Use pointless to convert XDS file to MTZ."""
if not self._xdsin:
raise RuntimeError("XDSIN not set")
self.check_hklout()
# -c for copy - just convert the file to MTZ multirecord
self.add_command_line("-c")
self.start()
if self._pname and self._xname and self._dname:
self.input(
"name project %s crystal %s dataset %s"
% (self._pname, self._xname, self._dname)
)
self.input("xdsin %s" % self._xdsin)
if self._scale_factor:
Debug.write("Scaling intensities by factor %e" % self._scale_factor)
self.input("multiply %e" % self._scale_factor)
self.close_wait()
# FIXME need to check the status and so on here
if self._xdsin:
from xia2.Wrappers.XDS import XDS
XDS.add_xds_version_to_mtz_history(self.get_hklout())
def decide_pointgroup(self, ignore_errors=False, batches=None):
"""Decide on the correct pointgroup for hklin."""
if not self._xdsin:
self.check_hklin()
self.set_task(
"Computing the correct pointgroup for %s" % self.get_hklin()
)
else:
Debug.write("Pointless using XDS input file %s" % self._xdsin)
self.set_task(
"Computing the correct pointgroup for %s" % self.get_xdsin()
)
# FIXME this should probably be a standard CCP4 keyword
if self._xdsin:
self.add_command_line("xdsin")
self.add_command_line(self._xdsin)
self.add_command_line("xmlout")
self.add_command_line("%d_pointless.xml" % self.get_xpid())
if self._hklref:
self.add_command_line("hklref")
self.add_command_line(self._hklref)
self.start()
if self._allow_out_of_sequence_files:
self.input("allow outofsequencefiles")
# https://github.com/xia2/xia2/issues/125 pass in run limits for this
# HKLIN file - prevents automated RUN determination from causing errors
if batches:
self.input("run 1 batch %d to %d" % tuple(batches))
self.input("systematicabsences off")
self.input("setting symmetry-based")
if self._hklref:
dev = PhilIndex.params.xia2.settings.developmental
if dev.pointless_tolerance > 0.0:
self.input("tolerance %f" % dev.pointless_tolerance)
# may expect more %age variation for small molecule data
if PhilIndex.params.xia2.settings.small_molecule:
if self._hklref:
self.input("tolerance 5.0")
if PhilIndex.params.xia2.settings.symmetry.chirality is not None:
self.input(
"chirality %s" % PhilIndex.params.xia2.settings.symmetry.chirality
)
if self._input_laue_group:
self.input("lauegroup %s" % self._input_laue_group)
self.close_wait()
# check for errors
self.check_for_errors()
# check for fatal errors
output = self.get_all_output()
fatal_error = False
for j, record in enumerate(output):
if "FATAL ERROR message:" in record:
if ignore_errors:
fatal_error = True
else:
raise RuntimeError(
"Pointless error: %s" % output[j + 1].strip()
)
if (
"Resolution range of Reference data and observed data do not"
in record
and ignore_errors
):
fatal_error = True
if "All reflection pairs rejected" in record and ignore_errors:
fatal_error = True
if (
"Reference data and observed data do not overlap" in record
and ignore_errors
):
fatal_error = True
hklin_spacegroup = ""
# split loop - first seek hklin symmetry then later look for everything
# else
for o in self.get_all_output():
if "Spacegroup from HKLIN file" in o:
hklin_spacegroup = spacegroup_name_xHM_to_old(
o.replace("Spacegroup from HKLIN file :", "").strip()
)
if "Space group from HKLREF file" in o:
hklref_spacegroup = spacegroup_name_xHM_to_old(
o.replace("Space group from HKLREF file :", "").strip()
)
# https://github.com/xia2/xia2/issues/115
if fatal_error:
assert hklref_spacegroup
self._pointgroup = hklref_spacegroup
self._confidence = 1.0
self._totalprob = 1.0
self._reindex_matrix = [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0]
self._reindex_operator = "h,k,l"
return "ok"
for o in self.get_all_output():
if "No alternative indexing possible" in o:
# then the XML file will be broken - no worries...
self._pointgroup = hklin_spacegroup
self._confidence = 1.0
self._totalprob = 1.0
self._reindex_matrix = [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0]
self._reindex_operator = "h,k,l"
return "ok"
if "**** Incompatible symmetries ****" in o:
raise RuntimeError(
"reindexing against a reference with different symmetry"
)
if "***** Stopping because cell discrepancy between files" in o:
raise RuntimeError("incompatible unit cells between data sets")
if "L-test suggests that the data may be twinned" in o:
self._probably_twinned = True
# parse the XML file for the information I need...
xml_file = os.path.join(
self.get_working_directory(), "%d_pointless.xml" % self.get_xpid()
)
mend_pointless_xml(xml_file)
# catch the case sometimes on ppc mac where pointless adds
# an extra .xml on the end...
if not os.path.exists(xml_file) and os.path.exists("%s.xml" % xml_file):
xml_file = "%s.xml" % xml_file
if not self._hklref:
dom = xml.dom.minidom.parse(xml_file)
try:
best = dom.getElementsByTagName("BestSolution")[0]
except IndexError:
raise RuntimeError("error getting solution from pointless")
self._pointgroup = (
best.getElementsByTagName("GroupName")[0].childNodes[0].data
)
self._confidence = float(
best.getElementsByTagName("Confidence")[0].childNodes[0].data
)
self._totalprob = float(
best.getElementsByTagName("TotalProb")[0].childNodes[0].data
)
self._reindex_matrix = map(
float,
best.getElementsByTagName("ReindexMatrix")[0]
.childNodes[0]
.data.split(),
)
self._reindex_operator = clean_reindex_operator(
best.getElementsByTagName("ReindexOperator")[0]
.childNodes[0]
.data.strip()
)
else:
# if we have provided a HKLREF input then the xml output
# is changed...
# FIXME in here, need to check if there is the legend
# "No possible alternative indexing" in the standard
# output, as this will mean that the index scores are
# not there... c/f oppf1314, with latest pointless build
# 1.2.14.
dom = xml.dom.minidom.parse(xml_file)
try:
best = dom.getElementsByTagName("IndexScores")[0]
except IndexError:
Debug.write("Reindex not found in xml output")
# check for this legend then
found = False
for record in self.get_all_output():
if "No possible alternative indexing" in record:
found = True
if not found:
raise RuntimeError("error finding solution")
best = None
hklref_pointgroup = ""
# FIXME need to get this from the reflection file HKLREF
reflection_file_elements = dom.getElementsByTagName("ReflectionFile")
for rf in reflection_file_elements:
stream = rf.getAttribute("stream")
if stream == "HKLREF":
hklref_pointgroup = (
rf.getElementsByTagName("SpacegroupName")[0]
.childNodes[0]
.data.strip()
)
# Chatter.write('HKLREF pointgroup is %s' % \
# hklref_pointgroup)
if hklref_pointgroup == "":
raise RuntimeError("error finding HKLREF pointgroup")
self._pointgroup = hklref_pointgroup
self._confidence = 1.0
self._totalprob = 1.0
if best:
index = best.getElementsByTagName("Index")[0]
self._reindex_matrix = map(
float,
index.getElementsByTagName("ReindexMatrix")[0]
.childNodes[0]
.data.split(),
)
self._reindex_operator = clean_reindex_operator(
index.getElementsByTagName("ReindexOperator")[0]
.childNodes[0]
.data.strip()
)
else:
# no alternative indexing is possible so just
# assume the default...
self._reindex_matrix = [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0]
self._reindex_operator = "h,k,l"
if not self._input_laue_group and not self._hklref:
scorelist = dom.getElementsByTagName("LaueGroupScoreList")[0]
scores = scorelist.getElementsByTagName("LaueGroupScore")
for s in scores:
lauegroup = (
s.getElementsByTagName("LaueGroupName")[0].childNodes[0].data
)
netzc = float(
s.getElementsByTagName("NetZCC")[0].childNodes[0].data
)
# record this as a possible lattice if its Z score is positive
lattice = lauegroup_to_lattice(lauegroup)
if not lattice in self._possible_lattices:
if netzc > 0.0:
self._possible_lattices.append(lattice)
# do we not always want to have access to the
# solutions, even if they are unlikely - this will
# only be invoked if they are known to
# be right...
self._lattice_to_laue[lattice] = lauegroup
return "ok"
def decide_spacegroup(self):
"""Given data indexed in the correct pointgroup, have a
guess at the spacegroup."""
if not self._xdsin:
self.check_hklin()
self.set_task(
"Computing the correct spacegroup for %s" % self.get_hklin()
)
else:
Debug.write("Pointless using XDS input file %s" % self._xdsin)
self.set_task(
"Computing the correct spacegroup for %s" % self.get_xdsin()
)
# FIXME this should probably be a standard CCP4 keyword
if self._xdsin:
self.add_command_line("xdsin")
self.add_command_line(self._xdsin)
self.add_command_line("xmlout")
self.add_command_line("%d_pointless.xml" % self.get_xpid())
self.add_command_line("hklout")
self.add_command_line("pointless.mtz")
self.start()
self.input("lauegroup hklin")
self.input("setting symmetry-based")
if PhilIndex.params.xia2.settings.symmetry.chirality is not None:
self.input(
"chirality %s" % PhilIndex.params.xia2.settings.symmetry.chirality
)
self.close_wait()
# check for errors
self.check_for_errors()
xml_file = os.path.join(
self.get_working_directory(), "%d_pointless.xml" % self.get_xpid()
)
mend_pointless_xml(xml_file)
if not os.path.exists(xml_file) and os.path.exists("%s.xml" % xml_file):
xml_file = "%s.xml" % xml_file
dom = xml.dom.minidom.parse(xml_file)
sg_list = dom.getElementsByTagName("SpacegroupList")[0]
sg_node = sg_list.getElementsByTagName("Spacegroup")[0]
best_prob = float(
sg_node.getElementsByTagName("TotalProb")[0].childNodes[0].data.strip()
)
# FIXME 21/NOV/06 in here record a list of valid spacegroups
# (that is, those which are as likely as the most likely)
# for later use...
self._spacegroup = (
sg_node.getElementsByTagName("SpacegroupName")[0]
.childNodes[0]
.data.strip()
)
self._spacegroup_reindex_operator = (
sg_node.getElementsByTagName("ReindexOperator")[0]
.childNodes[0]
.data.strip()
)
self._spacegroup_reindex_matrix = tuple(
map(
float,
sg_node.getElementsByTagName("ReindexMatrix")[0]
.childNodes[0]
.data.split(),
)
)
# get a list of "equally likely" spacegroups
for node in sg_list.getElementsByTagName("Spacegroup"):
prob = float(
node.getElementsByTagName("TotalProb")[0].childNodes[0].data.strip()
)
name = (
node.getElementsByTagName("SpacegroupName")[0]
.childNodes[0]
.data.strip()
)
if math.fabs(prob - best_prob) < 0.01:
# this is jolly likely!
self._likely_spacegroups.append(name)
# now parse the output looking for the unit cell information -
# this should look familiar from mtzdump
output = self.get_all_output()
length = len(output)
a = 0.0
b = 0.0
c = 0.0
alpha = 0.0
beta = 0.0
gamma = 0.0
self._cell_info["datasets"] = []
self._cell_info["dataset_info"] = {}
for i in range(length):
line = output[i][:-1]
if "Dataset ID, " in line:
block = 0
while output[block * 5 + i + 2].strip():
dataset_number = int(output[5 * block + i + 2].split()[0])
project = output[5 * block + i + 2][10:].strip()
crystal = output[5 * block + i + 3][10:].strip()
dataset = output[5 * block + i + 4][10:].strip()
cell = map(float, output[5 * block + i + 5].strip().split())
wavelength = float(output[5 * block + i + 6].strip())
dataset_id = "%s/%s/%s" % (project, crystal, dataset)
self._cell_info["datasets"].append(dataset_id)
self._cell_info["dataset_info"][dataset_id] = {}
self._cell_info["dataset_info"][dataset_id][
"wavelength"
] = wavelength
self._cell_info["dataset_info"][dataset_id]["cell"] = cell
self._cell_info["dataset_info"][dataset_id][
"id"
] = dataset_number
block += 1
for dataset in self._cell_info["datasets"]:
cell = self._cell_info["dataset_info"][dataset]["cell"]
a += cell[0]
b += cell[1]
c += cell[2]
alpha += cell[3]
beta += cell[4]
gamma += cell[5]
n = len(self._cell_info["datasets"])
self._cell = (a / n, b / n, c / n, alpha / n, beta / n, gamma / n)
if self._xdsin:
from xia2.Wrappers.XDS import XDS
XDS.add_xds_version_to_mtz_history(self.get_hklout())
return "ok"
def get_reindex_matrix(self):
return self._reindex_matrix
def get_reindex_operator(self):
return self._reindex_operator
def get_pointgroup(self):
return self._pointgroup
def get_spacegroup(self):
return self._spacegroup
def get_cell(self):
return self._cell
def get_probably_twinned(self):
return self._probably_twinned
def get_spacegroup_reindex_operator(self):
return self._spacegroup_reindex_operator
def get_spacegroup_reindex_matrix(self):
return self._spacegroup_reindex_matrix
def get_likely_spacegroups(self):
return self._likely_spacegroups
def get_confidence(self):
return self._confidence
def get_possible_lattices(self):
return self._possible_lattices
return PointlessWrapper()
```
#### File: Wrappers/Dials/EstimateResolutionLimit.py
```python
from __future__ import absolute_import, division, print_function
def EstimateResolutionLimit(DriverType=None):
"""A factory for EstimateResolutionLimitWrapper classes."""
from xia2.Driver.DriverFactory import DriverFactory
DriverInstance = DriverFactory.Driver(DriverType)
class EstimateResolutionLimitWrapper(DriverInstance.__class__):
def __init__(self):
DriverInstance.__class__.__init__(self)
self.set_executable("dials.estimate_resolution_limit")
self._experiments_filename = None
self._reflections_filename = None
self._estimated_d_min = None
def set_experiments_filename(self, experiments_filename):
self._experiments_filename = experiments_filename
def set_reflections_filename(self, reflections_filename):
self._reflections_filename = reflections_filename
def get_estimated_d_min(self):
return self._estimated_d_min
def run(self):
from xia2.Handlers.Streams import Debug
Debug.write("Running dials.estimate_resolution_limit")
self.clear_command_line()
self.add_command_line(self._experiments_filename)
self.add_command_line(self._reflections_filename)
self.start()
self.close_wait()
self.check_for_errors()
for line in self.get_all_output():
if line.startswith("estimated d_min:"):
self._estimated_d_min = float(line.split(":")[1])
return self._estimated_d_min
return EstimateResolutionLimitWrapper()
```
#### File: Wrappers/Dials/ExportXDS.py
```python
from __future__ import absolute_import, division, print_function
def ExportXDS(DriverType=None):
"""A factory for ExportXDSWrapper classes."""
from xia2.Driver.DriverFactory import DriverFactory
DriverInstance = DriverFactory.Driver(DriverType)
class ExportXDSWrapper(DriverInstance.__class__):
def __init__(self):
DriverInstance.__class__.__init__(self)
self.set_executable("dials.export")
self._sweep_filename = None
self._crystal_filename = None
def set_experiments_filename(self, experiments_filename):
self._experiments_filename = experiments_filename
def run(self):
from xia2.Handlers.Streams import Debug
Debug.write("Running dials.export")
self.clear_command_line()
self.add_command_line(self._experiments_filename)
self.add_command_line("format=xds")
self.start()
self.close_wait()
self.check_for_errors()
return ExportXDSWrapper()
```
#### File: Wrappers/Dials/Report.py
```python
from __future__ import absolute_import, division, print_function
def Report(DriverType=None):
"""A factory for ReportWrapper classes."""
from xia2.Driver.DriverFactory import DriverFactory
DriverInstance = DriverFactory.Driver(DriverType)
class ReportWrapper(DriverInstance.__class__):
def __init__(self):
DriverInstance.__class__.__init__(self)
self.set_executable("dials.report")
self._experiments_filename = None
self._reflections_filename = None
self._html_filename = None
def set_experiments_filename(self, experiments_filename):
self._experiments_filename = experiments_filename
def set_reflections_filename(self, reflections_filename):
self._reflections_filename = reflections_filename
def set_html_filename(self, html_filename):
self._html_filename = html_filename
def run(self, wait_for_completion=False):
from xia2.Handlers.Streams import Debug
Debug.write("Running dials.report")
self.clear_command_line()
assert (
self._experiments_filename is not None
or self._reflections_filename is not None
)
if self._experiments_filename is not None:
self.add_command_line(self._experiments_filename)
if self._reflections_filename is not None:
self.add_command_line(self._reflections_filename)
if self._html_filename is not None:
self.add_command_line("output.html=%s" % self._html_filename)
self.start()
if wait_for_completion:
self.close_wait()
else:
self.close()
self.check_for_errors()
return ReportWrapper()
```
#### File: Wrappers/Labelit/LabelitDistl.py
```python
from __future__ import absolute_import, division, print_function
import os
from xia2.Driver.DriverFactory import DriverFactory
def LabelitDistl(DriverType=None):
"""Factory for LabelitDistl wrapper classes, with the specified
Driver type."""
DriverInstance = DriverFactory.Driver(DriverType)
class LabelitDistlWrapper(DriverInstance.__class__):
"""A wrapper for the program labelit.distl - which will provide
functionality for looking for ice rings and screening diffraction
images."""
def __init__(self):
DriverInstance.__class__.__init__(self)
self.set_executable("labelit.distl")
self._images = []
self._statistics = {}
def add_image(self, image):
"""Add an image for indexing."""
if not image in self._images:
self._images.append(image)
return
def distl(self):
"""Actually analyse the images."""
self._images.sort()
for i in self._images:
self.add_command_line(i)
task = "Screen images:"
for i in self._images:
task += " %s" % i
self.set_task(task)
self.start()
self.close_wait()
# check for errors
self.check_for_errors()
# ok now we're done, let's look through for some useful stuff
output = self.get_all_output()
current_image = None
for o in output:
if "None" in o and "Resolution" in o:
l = o.replace("None", "0.0").split()
else:
l = o.split()
if l[:1] == ["File"]:
current_image = l[2]
self._statistics[current_image] = {}
if l[:2] == ["Spot", "Total"]:
self._statistics[current_image]["spots_total"] = int(l[-1])
if l[:2] == ["In-Resolution", "Total"]:
self._statistics[current_image]["spots"] = int(l[-1])
if l[:3] == ["Good", "Bragg", "Candidates"]:
self._statistics[current_image]["spots_good"] = int(l[-1])
if l[:2] == ["Ice", "Rings"]:
self._statistics[current_image]["ice_rings"] = int(l[-1])
if l[:3] == ["Method", "1", "Resolution"]:
self._statistics[current_image]["resol_one"] = float(l[-1])
if l[:3] == ["Method", "2", "Resolution"]:
self._statistics[current_image]["resol_two"] = float(l[-1])
if l[:3] == ["%Saturation,", "Top", "50"]:
self._statistics[current_image]["saturation"] = float(l[-1])
return "ok"
# things to get results from the indexing
def get_statistics(self, image):
"""Get the screening statistics from image as dictionary.
The keys are spots_total, spots, spots_good, ice_rings,
resol_one, resol_two."""
return self._statistics[os.path.split(image)[-1]]
return LabelitDistlWrapper()
```
#### File: Wrappers/Labelit/LabelitStats_distl.py
```python
from __future__ import absolute_import, division, print_function
import os
from xia2.Driver.DriverFactory import DriverFactory
def LabelitStats_distl(DriverType=None):
"""Factory for LabelitStats_distl wrapper classes, with the specified
Driver type."""
DriverInstance = DriverFactory.Driver(DriverType)
class LabelitStats_distlWrapper(DriverInstance.__class__):
"""A wrapper for the program labelit.stats_distl - which will provide
functionality for looking for ice rings and screening diffraction
images."""
def __init__(self):
DriverInstance.__class__.__init__(self)
self.set_executable("labelit.stats_distl")
self._statistics = {}
def stats_distl(self):
"""Return the image statistics."""
self.set_task("Return statistics from last distl run")
self.start()
self.close_wait()
# check for errors
self.check_for_errors()
# ok now we're done, let's look through for some useful stuff
output = self.get_all_output()
current_image = None
for o in output:
if "None" in o and "Resolution" in o:
l = o.replace("None", "0.0").split()
else:
l = o.split()
if l[:1] == ["File"]:
current_image = l[2]
self._statistics[current_image] = {}
if l[:2] == ["Spot", "Total"]:
self._statistics[current_image]["spots_total"] = int(l[-1])
if l[:2] == ["In-Resolution", "Total"]:
self._statistics[current_image]["spots"] = int(l[-1])
if l[:3] == ["Good", "Bragg", "Candidates"]:
self._statistics[current_image]["spots_good"] = int(l[-1])
if l[:2] == ["Ice", "Rings"]:
self._statistics[current_image]["ice_rings"] = int(l[-1])
if l[:3] == ["Method", "1", "Resolution"]:
self._statistics[current_image]["resol_one"] = float(l[-1])
if l[:3] == ["Method", "2", "Resolution"]:
self._statistics[current_image]["resol_two"] = float(l[-1])
if l[:3] == ["%Saturation,", "Top", "50"]:
self._statistics[current_image]["saturation"] = float(l[-1])
return "ok"
# things to get results from the indexing
def get_statistics(self, image):
"""Get the screening statistics from image as dictionary.
The keys are spots_total, spots, spots_good, ice_rings,
resol_one, resol_two."""
return self._statistics[os.path.split(image)[-1]]
return LabelitStats_distlWrapper()
```
#### File: Wrappers/XDS/test_XDSIntegrateHelpers.py
```python
from xia2.Wrappers.XDS.XDSIntegrateHelpers import _parse_integrate_lp
def test_parse_integrate_lp(tmpdir):
integrate_lp = tmpdir.join("INTEGRATE.LP")
with integrate_lp.open("wb") as f:
f.write(integrate_lp_example_1)
per_image_stats = _parse_integrate_lp(integrate_lp.strpath)
assert per_image_stats.keys() == list(range(1, 22))
assert per_image_stats[1] == {
"distance": 213.68,
"all": 2486,
"scale": 0.01,
"overloads": 0,
"rmsd_phi": 0.2,
"rejected": 1,
"beam": [2217.03, 2306.1],
"fraction_weak": 0.9605792437650845,
"rmsd_pixel": 0.5,
"strong": 98,
"unit_cell": (57.687, 57.687, 149.879, 90.0, 90.0, 90.0),
"mosaic": 0.042,
}
integrate_lp = tmpdir.join("INTEGRATE.LP")
with integrate_lp.open("wb") as f:
f.write(integrate_lp_big_n_refl)
per_image_stats = _parse_integrate_lp(integrate_lp.strpath)
assert per_image_stats.keys() == list(range(2601, 2611))
print(per_image_stats[2601])
assert per_image_stats[2601] == {
"all": 1092650,
"scale": 0.975,
"overloads": 0,
"rmsd_phi": 15.4,
"rejected": 8,
"fraction_weak": 0.9997409966594976,
"rmsd_pixel": 1.86,
"strong": 283,
}
integrate_lp_example_1 = """\
OSCILLATION_RANGE= 0.250000 DEGREES
******************************************************************************
PROCESSING OF IMAGES 1 ... 21
******************************************************************************
IMAGE IER SCALE NBKG NOVL NEWALD NSTRONG NREJ SIGMAB SIGMAR
1 0 0.010 16768425 0 2486 98 1 0.01970 0.05000
2 0 0.010 16767966 0 2482 113 1 0.02078 0.05000
3 0 0.010 16768952 0 2457 92 1 0.01999 0.05000
4 0 0.010 16768674 0 2507 95 1 0.01898 0.05000
5 0 0.010 16768968 0 2531 90 0 0.02025 0.05000
6 0 0.010 16768173 0 2500 104 0 0.01927 0.05000
7 0 0.010 16768177 0 2528 102 0 0.01925 0.05000
8 0 0.010 16768711 0 2471 91 0 0.01968 0.05000
9 0 0.010 16769564 0 2439 77 0 0.02068 0.05000
10 0 0.010 16768394 0 2464 100 0 0.01907 0.05000
11 0 0.010 16769104 0 2501 85 0 0.01894 0.05000
12 0 0.010 16769057 0 2442 86 0 0.01787 0.05000
13 0 0.010 16768710 0 2542 89 0 0.01865 0.05000
14 0 0.010 16768293 0 2498 99 0 0.01954 0.05000
15 0 0.010 16768756 0 2485 91 0 0.01893 0.05000
16 0 0.010 16768205 0 2478 102 0 0.02038 0.05000
17 0 0.010 16768731 0 2494 98 0 0.01906 0.05000
18 0 0.010 16769096 0 2519 89 0 0.01905 0.05000
19 0 0.010 16769308 0 2470 84 1 0.01861 0.05000
20 0 0.010 16770288 0 2532 73 0 0.01845 0.05000
21 0 0.010 16768056 0 2521 110 0 0.01916 0.05000
1433 OUT OF 1433 REFLECTIONS ACCEPTED FOR REFINEMENT
REFINED PARAMETERS: POSITION ORIENTATION CELL
STANDARD DEVIATION OF SPOT POSITION (PIXELS) 0.50
STANDARD DEVIATION OF SPINDLE POSITION (DEGREES) 0.05
SPACE GROUP NUMBER 75
UNIT CELL PARAMETERS 57.687 57.687 149.879 90.000 90.000 90.000
REC. CELL PARAMETERS 0.017335 0.017335 0.006672 90.000 90.000 90.000
COORDINATES OF UNIT CELL A-AXIS 17.374 -51.023 20.558
COORDINATES OF UNIT CELL B-AXIS -51.259 -7.193 25.468
COORDINATES OF UNIT CELL C-AXIS -51.864 -67.388 -123.421
CRYSTAL ROTATION OFF FROM INITIAL ORIENTATION -0.004 0.014 0.003
shown as x,y,z components of rotation axis X angle (degrees)
CRYSTAL MOSAICITY (DEGREES) 0.042
LAB COORDINATES OF ROTATION AXIS 0.999943 -0.006986 -0.008051
DIRECT BEAM COORDINATES (REC. ANGSTROEM) 0.008789 0.010077 1.020035
DETECTOR COORDINATES (PIXELS) OF DIRECT BEAM 2217.03 2306.10
DETECTOR ORIGIN (PIXELS) AT 2192.48 2277.95
CRYSTAL TO DETECTOR DISTANCE (mm) 213.68
LAB COORDINATES OF DETECTOR X-AXIS 1.000000 0.000000 0.000000
LAB COORDINATES OF DETECTOR Y-AXIS 0.000000 1.000000 0.000000
STANDARD DEVIATIONS OF BEAM DIVERGENCE AND REFLECTING RANGE OBTAINED
FROM 1433 REFLECTION PROFILES AT 9 POSITIONS ON THE DETECTOR SURFACE.
POSITION NUMBER 1 2 3 4 5 6 7 8 9
X-COORDINATE (pixel) 2074.0 3619.9 3167.1 2074.0 980.9 528.1 980.9 2073.9 3167.0
Y-COORDINATE (pixel) 2181.0 2181.0 3330.5 3806.6 3330.5 2181.1 1031.6 555.4 1031.4
NUMBER 467 137 165 155 104 72 82 111 132
SIGMAB (degree) 0.016 0.017 0.017 0.017 0.017 0.017 0.016 0.016 0.017
SIGMAR (degree) 0.038 0.037 0.037 0.038 0.037 0.036 0.037 0.038 0.038
"""
integrate_lp_big_n_refl = """\
OSCILLATION_RANGE= 0.050000 DEGREES
******************************************************************************
PROCESSING OF IMAGES 2601 ... 2610
******************************************************************************
IMAGE IER SCALE NBKG NOVL NEWALD NSTRONG NREJ SIGMAB SIGMAR
2601 0 0.975 16680318 01092650 283 8 0.01281 0.40765
2602 0 0.969 16680543 01092588 268 6 0.01308 0.46286
2603 0 0.973 16680424 01092451 274 5 0.01419 0.44668
2604 0 0.979 16679844 01092457 282 7 0.01460 0.43985
2605 0 0.972 16679268 01092620 294 4 0.01438 0.43988
2606 0 0.979 16678476 01092578 301 8 0.01346 0.44049
2607 0 0.975 16679456 01092483 285 8 0.01428 0.43287
2608 0 0.980 16679874 01092729 283 6 0.01392 0.48593
2609 0 0.985 16679847 01092438 280 5 0.01349 0.46816
2610 0 0.978 16679911 01092542 276 5 0.01438 0.47244
6989 OUT OF 9308 REFLECTIONS ACCEPTED FOR REFINEMENT
REFINED PARAMETERS: POSITION BEAM ORIENTATION CELL
STANDARD DEVIATION OF SPOT POSITION (PIXELS) 1.86
STANDARD DEVIATION OF SPINDLE POSITION (DEGREES) 0.77
"""
```
#### File: Wrappers/XIA/Diffdump.py
```python
from __future__ import absolute_import, division, print_function
import copy
import datetime
import math
import os
import sys
import time
import traceback
import pycbf
from scitbx import matrix
from scitbx.math import r3_rotation_axis_and_angle_from_matrix
from xia2.Driver.DriverFactory import DriverFactory
if __name__ == "__main__":
debug = False
else:
debug = False
class _HeaderCache(object):
"""A cache for image headers."""
def __init__(self):
self._headers = {}
def put(self, image, header):
self._headers[image] = copy.deepcopy(header)
def get(self, image):
return self._headers[image]
def check(self, image):
return image in self._headers
def write(self, filename):
import json
json.dump(self._headers, open(filename, "w"))
return
def read(self, filename):
assert self._headers == {}
import json
self._headers = json.load(open(filename, "r"))
return len(self._headers)
HeaderCache = _HeaderCache()
# FIXME this does not include all MAR, RAXIS detectors
detector_class = {
("adsc", 2304, 81): "adsc q4",
("adsc", 1152, 163): "adsc q4 2x2 binned",
("adsc", 1502, 163): "adsc q4 2x2 binned",
("adsc", 4096, 51): "adsc q210",
("adsc", 2048, 102): "adsc q210 2x2 binned",
("adsc", 6144, 51): "adsc q315",
("adsc", 3072, 102): "adsc q315 2x2 binned",
("adsc", 4168, 64): "adsc q270",
("adsc", 4168, 65): "adsc q270",
("adsc", 2084, 128): "adsc q270 2x2 binned",
("adsc", 2084, 129): "adsc q270 2x2 binned",
("adsc", 2084, 130): "adsc q270 2x2 binned",
("cbf", 2463, 172): "pilatus 6M",
("mini-cbf", 2463, 172): "pilatus 6M",
("dectris", 2527, 172): "pilatus 6M",
("dectris", 1679, 172): "pilatus 2M",
("marccd", 4096, 73): "mar 300 ccd",
("marccd", 4096, 79): "mar 325 ccd",
("marccd", 3072, 73): "mar 225 ccd",
("marccd", 2048, 78): "mar 165 ccd",
("marccd", 2048, 79): "mar 165 ccd",
("marccd", 2048, 64): "mar 135 ccd",
("mar", 4096, 73): "mar 300 ccd",
("mar", 4096, 79): "mar 325 ccd",
("mar", 3072, 73): "mar 225 ccd",
("mar", 2048, 78): "mar 165 ccd",
("mar", 2048, 79): "mar 165 ccd",
("mar", 2048, 64): "mar 135 ccd",
("mar", 1200, 150): "mar 180",
("mar", 1600, 150): "mar 240",
("mar", 2000, 150): "mar 300",
("mar", 2300, 150): "mar 345",
("mar", 3450, 100): "mar 345",
("raxis", 3000, 100): "raxis IV",
("rigaku", 3000, 100): "raxis IV",
("saturn", 2048, 45): "rigaku saturn 92",
("saturn", 1024, 90): "rigaku saturn 92 2x2 binned",
("saturn", 2084, 45): "rigaku saturn 944",
("saturn", 1042, 90): "rigaku saturn 944 2x2 binned",
("rigaku", 2048, 45): "rigaku saturn 92",
("rigaku", 1024, 90): "rigaku saturn 92 2x2 binned",
("rigaku", 1042, 35): "rigaku saturn 724",
("rigaku", 1042, 70): "rigaku saturn 724 2x2 binned",
("rigaku", 2084, 45): "rigaku saturn 944",
("rigaku", 1042, 90): "rigaku saturn 944 2x2 binned",
("rigaku", 1042, 89): "rigaku saturn 944 2x2 binned",
}
def read_A200(image):
"""Read the header from a Rigaku A200 image. This is to work around the
diffdump program falling over with such images."""
raise RuntimeError("this needs implementing!")
# FIXME get proper specifications for these detectors...
def find_detector_id(cbf_handle):
detector_id = ""
cbf_handle.rewind_datablock()
nblocks = cbf_handle.count_datablocks()
for j in range(nblocks):
cbf_handle.select_datablock(0)
ncat = cbf_handle.count_categories()
for j in range(ncat):
cbf_handle.select_category(j)
if not cbf_handle.category_name() == "diffrn_detector":
continue
nrows = cbf_handle.count_rows()
ncols = cbf_handle.count_columns()
cbf_handle.rewind_column()
while True:
if cbf_handle.column_name() == "id":
detector_id = cbf_handle.get_value()
break
try:
cbf_handle.next_column()
except Exception:
break
return detector_id
def cbf_gonio_to_effective_axis(cbf_gonio):
"""Given a cbf goniometer handle, determine the real rotation axis."""
x = cbf_gonio.rotate_vector(0.0, 1, 0, 0)
y = cbf_gonio.rotate_vector(0.0, 0, 1, 0)
z = cbf_gonio.rotate_vector(0.0, 0, 0, 1)
R = matrix.rec(x + y + z, (3, 3)).transpose()
x1 = cbf_gonio.rotate_vector(1.0, 1, 0, 0)
y1 = cbf_gonio.rotate_vector(1.0, 0, 1, 0)
z1 = cbf_gonio.rotate_vector(1.0, 0, 0, 1)
R1 = matrix.rec(x1 + y1 + z1, (3, 3)).transpose()
RA = R1 * R.inverse()
axis = r3_rotation_axis_and_angle_from_matrix(RA).axis
return axis
def failover_full_cbf(cbf_file):
"""Use pycbf library to read full cbf file description."""
header = {}
cbf_handle = pycbf.cbf_handle_struct()
cbf_handle.read_widefile(cbf_file, pycbf.MSG_DIGEST)
detector_id_map = {
"Pilatus2M": "pilatus 2M",
"Pilatus6M": "pilatus 6M",
"i19-p300k": "pilatus 300K",
"ADSCQ315-SN920": "adsc q315 2x2 binned",
}
header["detector_class"] = detector_id_map[find_detector_id(cbf_handle)]
if "pilatus" in header["detector_class"]:
header["detector"] = "dectris"
elif "adsc" in header["detector_class"]:
header["detector"] = "adsc"
else:
raise RuntimeError("unknown detector %s" % header["detector_class"])
cbf_handle.rewind_datablock()
detector = cbf_handle.construct_detector(0)
# FIXME need to check that this is doing something sensible...!
header["beam"] = tuple(map(math.fabs, detector.get_beam_center()[2:]))
detector_normal = tuple(detector.get_detector_normal())
gonio = cbf_handle.construct_goniometer()
axis = tuple(gonio.get_rotation_axis())
angles = tuple(gonio.get_rotation_range())
header["distance"] = detector.get_detector_distance()
header["pixel"] = (
detector.get_inferred_pixel_size(1),
detector.get_inferred_pixel_size(2),
)
header["phi_start"], header["phi_width"] = angles
header["phi_end"] = header["phi_start"] + header["phi_width"]
year, month, day, hour, minute, second, x = cbf_handle.get_datestamp()
struct_time = datetime.datetime(year, month, day, hour, minute, second).timetuple()
header["date"] = time.asctime(struct_time)
header["epoch"] = cbf_handle.get_timestamp()[0]
header["size"] = tuple(cbf_handle.get_image_size(0))
header["exposure_time"] = cbf_handle.get_integration_time()
header["wavelength"] = cbf_handle.get_wavelength()
# compute the true two-theta offset... which is kind-of going around
# the houses. oh and the real rotation axis.
origin = detector.get_pixel_coordinates(0, 0)
fast = detector.get_pixel_coordinates(0, 1)
slow = detector.get_pixel_coordinates(1, 0)
dfast = matrix.col([fast[j] - origin[j] for j in range(3)]).normalize()
dslow = matrix.col([slow[j] - origin[j] for j in range(3)]).normalize()
dorigin = matrix.col(origin)
dnormal = dfast.cross(dslow)
centre = -(dorigin - dorigin.dot(dnormal) * dnormal)
f = centre.dot(dfast)
s = centre.dot(dslow)
header["fast_direction"] = dfast.elems
header["slow_direction"] = dslow.elems
header["detector_origin_mm"] = f, s
header["rotation_axis"] = cbf_gonio_to_effective_axis(gonio)
two_theta = dfast.angle(matrix.col((0.0, 1.0, 0.0)), deg=True)
if math.fabs(two_theta - 180.0) < 1.0:
header["two_theta"] = 0
else:
header["two_theta"] = two_theta
# find the direct beam vector - takes a few steps
cbf_handle.find_category("axis")
# find record with equipment = source
cbf_handle.find_column("equipment")
cbf_handle.find_row("source")
# then get the vector and offset from this
beam_direction = []
for j in range(3):
cbf_handle.find_column("vector[%d]" % (j + 1))
beam_direction.append(cbf_handle.get_doublevalue())
# FIXME in here add in code to compute from first principles the beam
# centre etc.
detector.__swig_destroy__(detector)
del detector
gonio.__swig_destroy__(gonio)
del gonio
return header
def failover_cbf(cbf_file):
"""CBF files from the latest update to the PILATUS detector cause a
segmentation fault in diffdump. This is a workaround."""
header = {}
header["two_theta"] = 0.0
for record in open(cbf_file):
if "_array_data.data" in record:
break
if "PILATUS 2M" in record:
header["detector_class"] = "pilatus 2M"
header["detector"] = "dectris"
header["size"] = (1679, 1475)
continue
if "PILATUS3 2M" in record:
header["detector_class"] = "pilatus 2M"
header["detector"] = "dectris"
header["size"] = (1679, 1475)
continue
if "PILATUS 6M" in record:
header["detector_class"] = "pilatus 6M"
header["detector"] = "dectris"
header["size"] = (2527, 2463)
continue
if "PILATUS3 6M" in record:
header["detector_class"] = "pilatus 6M"
header["detector"] = "dectris"
header["size"] = (2527, 2463)
continue
if "Start_angle" in record:
header["phi_start"] = float(record.split()[-2])
continue
if "Angle_increment" in record:
header["phi_width"] = float(record.split()[-2])
continue
if "Exposure_period" in record:
header["exposure_time"] = float(record.split()[-2])
continue
if "Silicon sensor" in record:
header["sensor"] = 1000 * float(record.split()[4])
continue
if "Count_cutoff" in record:
header["saturation"] = int(record.split()[2])
continue
if "Detector_distance" in record:
header["distance"] = 1000 * float(record.split()[2])
continue
if "Wavelength" in record:
header["wavelength"] = float(record.split()[-2])
continue
if "Pixel_size" in record:
header["pixel"] = (
1000 * float(record.split()[2]),
1000 * float(record.split()[5]),
)
continue
if "Beam_xy" in record:
# N.B. this is swapped again for historical reasons
beam_pixels = map(
float,
record.replace("(", "").replace(")", "").replace(",", "").split()[2:4],
)
header["beam"] = (
beam_pixels[1] * header["pixel"][1],
beam_pixels[0] * header["pixel"][0],
)
header["raw_beam"] = (
beam_pixels[1] * header["pixel"][1],
beam_pixels[0] * header["pixel"][0],
)
continue
# try to get the date etc. literally.
try:
datestring = record.split()[-1].split(".")[0]
format = "%Y-%b-%dT%H:%M:%S"
struct_time = time.strptime(datestring, format)
header["date"] = time.asctime(struct_time)
header["epoch"] = time.mktime(struct_time)
except Exception:
pass
try:
if not "date" in header:
datestring = record.split()[-1].split(".")[0]
format = "%Y-%m-%dT%H:%M:%S"
struct_time = time.strptime(datestring, format)
header["date"] = time.asctime(struct_time)
header["epoch"] = time.mktime(struct_time)
except Exception:
pass
try:
if not "date" in header:
datestring = record.replace("#", "").strip().split(".")[0]
format = "%Y/%b/%d %H:%M:%S"
struct_time = time.strptime(datestring, format)
header["date"] = time.asctime(struct_time)
header["epoch"] = time.mktime(struct_time)
except Exception:
pass
header["phi_end"] = header["phi_start"] + header["phi_width"]
return header
last_format = None
def failover_dxtbx(image_file):
"""Failover to use the dxtbx to read the image headers..."""
# replacement dxtbx for rigaku saturns sometimes
from dxtbx.format.Registry import Registry
from dxtbx.model.detector_helpers_types import detector_helpers_types
global last_format
if last_format:
iformat = last_format
else:
iformat = Registry.find(image_file)
from xia2.Handlers.Streams import Debug
Debug.write("Using dxtbx format instance: %s" % iformat.__name__)
if not iformat.understand(image_file):
raise RuntimeError("image file %s not understood by dxtbx" % image_file)
last_format = iformat
i = iformat(image_file)
b = i.get_beam()
g = i.get_goniometer()
d = i.get_detector()
s = i.get_scan()
header = {}
if not hasattr(d, "get_image_size"):
# cope with new detector as array of panels dxtbx api
fast, slow = map(int, d[0].get_image_size())
_f, _s = d[0].get_pixel_size()
F = matrix.col(d[0].get_fast_axis())
S = matrix.col(d[0].get_slow_axis())
N = F.cross(S)
origin = matrix.col(d[0].get_origin())
else:
fast, slow = map(int, d.get_image_size())
_f, _s = d.get_pixel_size()
F = matrix.col(d.get_fast_axis())
S = matrix.col(d.get_slow_axis())
N = F.cross(S)
origin = matrix.col(d.get_origin())
beam = matrix.col(b.get_sample_to_source_direction())
# FIXME detector has methods to compute the beam centre now...
centre = -(origin - origin.dot(N) * N)
x = centre.dot(F)
y = centre.dot(S)
header["fast_direction"] = F.elems
header["slow_direction"] = S.elems
header["rotation_axis"] = g.get_rotation_axis()
if hasattr(s, "get_exposure_time"):
header["exposure_time"] = s.get_exposure_time()
else:
header["exposure_time"] = s.get_exposure_times()[0]
header["distance"] = math.fabs(origin.dot(N))
if math.fabs(beam.angle(N, deg=True) - 180) < 0.1:
header["two_theta"] = 180 - beam.angle(N, deg=True)
else:
header["two_theta"] = -beam.angle(N, deg=True)
header["raw_beam"] = x, y
header["phi_start"] = s.get_oscillation()[0]
header["phi_width"] = s.get_oscillation()[1]
header["phi_end"] = sum(s.get_oscillation())
header["pixel"] = _f, _s
# FIXME this is very bad as it relates to teh legacy backwards Mosflm
# beam centre standard still... FIXME-SCI-948
header["beam"] = y, x
header["epoch"] = s.get_image_epoch(s.get_image_range()[0])
header["date"] = time.ctime(header["epoch"])
header["wavelength"] = b.get_wavelength()
header["size"] = fast, slow
if hasattr(i, "detector_class"):
header["detector_class"] = i.detector_class
header["detector"] = i.detector
else:
if hasattr(d, "get_type"):
# cope with new detector as array of panels API
dtype = d.get_type()
else:
dtype = d[0].get_type()
detector_type = detector_helpers_types.get(
dtype, fast, slow, int(1000 * _f), int(1000 * _s)
)
header["detector_class"] = detector_type.replace("-", " ")
header["detector"] = detector_type.split("-")[0]
return header
def Diffdump(DriverType=None):
"""A factory for wrappers for the diffdump."""
DriverInstance = DriverFactory.Driver(DriverType)
class DiffdumpWrapper(DriverInstance.__class__):
"""Provide access to the functionality in diffdump."""
def __init__(self):
DriverInstance.__class__.__init__(self)
self.set_executable("diffdump")
self._image = None
self._header = {}
self._previous_crashed = False
return
def set_image(self, image):
"""Set an image to read the header of."""
self._image = image
self._header = {}
return
def _get_time(self, datestring):
"""Unpack a date string to a structure."""
if not datestring:
raise RuntimeError("empty date")
if datestring == "N/A":
# we don't have the date!
# set default to 0-epoch
return datetime.datetime(1970, 0, 0, 0, 0, 0).timetuple(), 0.0
# problem here is multiple formats for date strings!
# so have to change the structure...
# FIXME!
# pilatus: 2007/Sep/22 21:15:03.229
# format: %Y/%b/%d %H:%M:%S
# allow for milliseconds
ms = 0.0
struct_time = None
try:
format = "%Y/%b/%d %H:%M:%S"
ms = 0.001 * int(datestring.split(".")[1])
_datestring = datestring.split(".")[0]
struct_time = time.strptime(_datestring, format)
except Exception:
struct_time = None
# ADSC CBF format
if not struct_time:
try:
format = "%d/%m/%Y %H:%M:%S"
ms = 0.001 * int(datestring.split(".")[1])
_datestring = datestring.split(".")[0]
struct_time = time.strptime(_datestring, format)
except Exception:
struct_time = None
if not struct_time:
try:
struct_time = time.strptime(datestring)
except Exception:
struct_time = None
if not struct_time:
# this may be a mar format date...
# MMDDhhmmYYYY.ss - go figure
# or it could also be the format from
# saturn images like:
# 23-Oct-2006 13:42:36
if not "-" in datestring:
month = int(datestring[:2])
day = int(datestring[2:4])
hour = int(datestring[4:6])
minute = int(datestring[6:8])
year = int(datestring[8:12])
second = int(datestring[-2:])
d = datetime.datetime(year, month, day, hour, minute, second)
struct_time = d.timetuple()
else:
struct_time = time.strptime(datestring, "%d-%b-%Y %H:%M:%S")
return struct_time, ms
def _epoch(self, datestring):
"""Compute an epoch from a date string."""
t, ms = self._get_time(datestring)
return time.mktime(t) + ms
def _date(self, datestring):
"""Compute a human readable date from a date string."""
return time.asctime(self._get_time(datestring)[0])
def readheader(self):
"""Read the image header."""
if self._header:
return copy.deepcopy(self._header)
if HeaderCache.check(self._image):
self._header = HeaderCache.get(self._image)
return copy.deepcopy(self._header)
if os.path.getsize(self._image) == 0:
raise RuntimeError("empty file: %s" % self._image)
if not self._previous_crashed:
try:
return self.readheader_diffdump()
except Exception:
self._previous_crashed = True
try:
self._header = failover_dxtbx(self._image)
HeaderCache.put(self._image, self._header)
return copy.deepcopy(self._header)
except Exception:
traceback.print_exc(file=sys.stdout)
def readheader_diffdump(self):
"""Read the image header."""
global detector_class
# check that the input file exists..
if not os.path.exists(self._image):
raise RuntimeError("image %s does not exist" % self._image)
# consider using more recent code to read these images in
# first instance, to replace diffdump
try:
if ".cbf" in self._image[-4:]:
header = failover_cbf(self._image)
assert header["detector_class"] in ["pilatus 2M", "pilatus 6M"]
self._header = header
HeaderCache.put(self._image, self._header)
return copy.deepcopy(self._header)
except Exception:
if ".cbf" in self._image[-4:]:
header = failover_full_cbf(self._image)
self._header = header
HeaderCache.put(self._image, self._header)
return copy.deepcopy(self._header)
self.clear_command_line()
self.add_command_line(self._image)
self.start()
self.close_wait()
# why is this commented out?
# self.check_for_errors()
# results were ok, so get all of the output out
output = self.get_all_output()
if debug:
print("! all diffdump output follows")
for o in output:
print("! %s" % o[:-1])
# note that some of the records in the image header
# will depend on the detector class - this should
# really be fixed in the program diffdump...
detector = None
fudge = {
"adsc": {"wavelength": 1.0, "pixel": 1.0},
"dectris": {"wavelength": 1.0, "pixel": 1.0},
"rigaku": {"wavelength": 1.0, "pixel": 1.0},
"raxis": {"wavelength": 1.0, "pixel": 1.0},
"saturn": {"wavelength": 1.0, "pixel": 1.0},
"marccd": {"wavelength": 1.0, "pixel": 0.001},
"mini-cbf": {"wavelength": 1.0, "pixel": 1.0},
"cbf": {"wavelength": 1.0, "pixel": 1.0},
"mar": {"wavelength": 1.0, "pixel": 1.0},
}
cbf_format = False
for o in output:
l = o.split(":")
if len(l) > 1:
l2 = l[1].split()
else:
l2 = ""
# latest version of diffdump prints out manufacturer in
# place of image type...
if ("Image type" in o) or ("Manufacturer" in o):
if debug:
print("! found image type: %s" % l[1].strip().lower())
self._header["detector"] = l[1].strip().lower()
# correct spelling, perhaps
if self._header["detector"] == "mar ccd":
self._header["detector"] = "marccd"
if self._header["detector"] == "mar 345":
self._header["detector"] = "mar"
if self._header["detector"] == "rigaku saturn":
self._header["detector"] = "saturn"
if self._header["detector"] == "rigaku raxis":
self._header["detector"] = "raxis"
if self._header["detector"] == "rigaku r-axis":
self._header["detector"] = "raxis"
detector = self._header["detector"]
if "Format" in o:
if o.split()[-1] == "CBF":
cbf_format = True
# FIXME in here need to check a trust file timestamp flag
if "Exposure epoch" in o or "Collection date" in o:
try:
d = o[o.index(":") + 1 :]
if d.strip():
self._header["epoch"] = self._epoch(d.strip())
self._header["date"] = self._date(d.strip())
if debug:
print(
"! exposure epoch: %d" % int(self._header["epoch"])
)
else:
self._header["epoch"] = 0.0
self._header["date"] = ""
except Exception as e:
if debug:
print("! error interpreting date: %s" % str(e))
# this is badly formed....
# so perhaps read the file creation date?
# time.ctime(os.stat(filename)[8]) -> date
# os.stat(filename)[8] -> epoch
self._header["epoch"] = float(os.stat(self._image)[8])
self._header["date"] = time.ctime(self._header["epoch"])
# self._header['epoch'] = 0.0
# self._header['date'] = ''
if "Exposure time" in o:
self._header["exposure_time"] = float(l2[0])
if "Wavelength" in o:
self._header["wavelength"] = (
float(l2[0]) * fudge[detector]["wavelength"]
)
if debug:
print("! found wavelength: %f" % self._header["wavelength"])
if "Distance" in o:
self._header["distance"] = float(l[1].replace("mm", "").strip())
if "Beam cent" in o:
beam = (
l[1]
.replace("(", "")
.replace(")", "")
.replace("mm", " ")
.split(",")
)
self._header["beam"] = map(float, beam)
self._header["raw_beam"] = map(float, beam)
if "Image Size" in o:
image = l[1].replace("px", "")
image = image.replace("(", "").replace(")", "").split(",")
self._header["size"] = map(float, image)
if "Pixel Size" in o:
image = l[1].replace("mm", "")
x, y = image.replace("(", "").replace(")", "").split(",")
if detector == "marccd" and math.fabs(float(x)) < 1.0:
self._header["pixel"] = (float(x), float(y))
else:
self._header["pixel"] = (
float(x) * fudge[detector]["pixel"],
float(y) * fudge[detector]["pixel"],
)
if "Angle range" in o:
phi = map(float, l[1].split("->"))
self._header["phi_start"] = phi[0]
self._header["phi_end"] = phi[1]
self._header["phi_width"] = phi[1] - phi[0]
if "Oscillation" in o:
phi = map(float, l[1].replace("deg", "").split("->"))
self._header["phi_start"] = phi[0]
self._header["phi_end"] = phi[1]
self._header["phi_width"] = phi[1] - phi[0]
if "Oscillation range" in o:
phi = map(float, l[1].replace("deg", "").split("->"))
self._header["phi_start"] = phi[0]
self._header["phi_end"] = phi[1]
self._header["phi_width"] = phi[1] - phi[0]
if "Two Theta value" in o:
try:
two_theta = float(o.split(":")[1].split()[0])
self._header["two_theta"] = two_theta * -1.0
except ValueError:
self._header["two_theta"] = 0.0
# check to see if the beam centre needs to be converted
# from pixels to mm - e.g. MAR 300 images from APS ID 23
if (
"beam" in self._header
and "pixel" in self._header
and "size" in self._header
):
# look to see if the current beam is somewhere in the middle
# pixel count wise...
beam = self._header["beam"]
size = self._header["size"]
pixel = self._header["pixel"]
if math.fabs((beam[0] - 0.5 * size[0]) / size[0]) < 0.25:
new_beam = (beam[0] * pixel[0], beam[1] * pixel[1])
self._header["beam"] = new_beam
# check beam centre is sensible i.e. not NULL
if (
math.fabs(self._header["beam"][0]) < 0.01
and math.fabs(self._header["beam"][1]) < 0.01
):
size = self._header["size"]
pixel = self._header["pixel"]
self._header["beam"] = (
0.5 * size[0] * pixel[0],
0.5 * size[1] * pixel[1],
)
if (
"detector" in self._header
and "pixel" in self._header
and "size" in self._header
):
# compute the detector class
detector = self._header["detector"]
width = int(self._header["size"][0])
pixel = int(1000 * self._header["pixel"][0])
key = (detector, width, pixel)
self._header["detector_class"] = detector_class[key]
# check for mar ccd and perhaps reassign
if detector == "mar" and "ccd" in self._header["detector_class"]:
self._header["detector"] = "marccd"
# currently diffdump swaps x, y in beam centre output
if self._header["detector_class"] == "pilatus 2M":
x, y = self._header["beam"]
self._header["beam"] = y, x
x, y = self._header["raw_beam"]
self._header["raw_beam"] = y, x
else:
self._header["detector_class"] = "unknown"
# quickly check diffdump didn't do something stupid...
if detector == "adsc" and not cbf_format:
osc_start = 0.0
osc_range = 0.0
size = int(open(self._image, "r").read(20).split()[-1])
hdr = open(self._image, "r").read(size)
for record in hdr.split("\n"):
if "OSC_START" in record:
osc_start = float(record.replace(";", "").split("=")[-1])
if "OSC_RANGE" in record:
osc_range = float(record.replace(";", "").split("=")[-1])
self._header["phi_start"] = osc_start
self._header["phi_width"] = osc_range
self._header["phi_end"] = osc_start + osc_range
if detector == "adsc" and abs(header["two_theta"]) > 1.0:
raise RuntimeError("adsc + two-theta not supported")
HeaderCache.put(self._image, self._header)
return copy.deepcopy(self._header)
def gain(self):
"""Estimate gain for this image."""
# check that the input file exists..
if not os.path.exists(self._image):
raise RuntimeError("image %s does not exist" % self._image)
self.add_command_line("-gain")
self.add_command_line(self._image)
self.start()
self.close_wait()
self.check_for_errors()
# results were ok, so get all of the output out
output = self.get_all_output()
gain = 0.0
for o in output:
l = o.split(":")
if "Estimation of gain" in o:
# This often seems to be an underestimate...
gain = 1.333 * float(l[1])
return gain
return DiffdumpWrapper()
```
#### File: Wrappers/XIA/FrenchWilson.py
```python
from __future__ import absolute_import, division, print_function
import os
from xia2.Driver.DriverFactory import DriverFactory
def FrenchWilson(DriverType=None):
"""A factory for FrenchWilsonWrapper classes."""
DriverInstance = DriverFactory.Driver(DriverType)
class FrenchWilsonWrapper(DriverInstance.__class__):
"""A wrapper for cctbx French and Wilson analysis."""
def __init__(self):
DriverInstance.__class__.__init__(self)
self.set_executable("cctbx.python")
self._anomalous = False
self._nres = 0
# should we do wilson scaling?
self._wilson = True
self._b_factor = 0.0
self._moments = None
self._wilson_fit_grad = 0.0
self._wilson_fit_grad_sd = 0.0
self._wilson_fit_m = 0.0
self._wilson_fit_m_sd = 0.0
self._wilson_fit_range = None
# numbers of reflections in and out, and number of absences
# counted
self._nref_in = 0
self._nref_out = 0
self._nabsent = 0
self._xmlout = None
def set_anomalous(self, anomalous):
self._anomalous = anomalous
def set_wilson(self, wilson):
"""Set the use of Wilson scaling - if you set this to False
Wilson scaling will be switched off..."""
self._wilson = wilson
def set_hklin(self, hklin):
self._hklin = hklin
def get_hklin(self):
return self._hklin
def set_hklout(self, hklout):
self._hklout = hklout
def get_hklout(self):
return self._hklout
def check_hklout(self):
return self.checkHklout()
def get_xmlout(self):
return self._xmlout
def truncate(self):
"""Actually perform the truncation procedure."""
from xia2.Modules import CctbxFrenchWilson as fw_module
self.add_command_line(fw_module.__file__)
self.add_command_line(self._hklin)
self.add_command_line("hklout=%s" % self._hklout)
if self._anomalous:
self.add_command_line("anomalous=true")
else:
self.add_command_line("anomalous=false")
self.start()
self.close_wait()
try:
self.check_for_errors()
except RuntimeError:
try:
os.remove(self.get_hklout())
except Exception:
pass
raise RuntimeError("truncate failure")
lines = self.get_all_output()
for i, line in enumerate(lines):
if "ML estimate of overall B value:" in line:
self._b_factor = float(lines[i + 1].strip().split()[0])
def get_b_factor(self):
return self._b_factor
def get_wilson_fit(self):
return (
self._wilson_fit_grad,
self._wilson_fit_grad_sd,
self._wilson_fit_m,
self._wilson_fit_m_sd,
)
def get_wilson_fit_range(self):
return self._wilson_fit_range
def get_moments(self):
return self._moments
def get_nref_in(self):
return self._nref_in
def get_nref_out(self):
return self._nref_out
def get_nabsent(self):
return self._nabsent
return FrenchWilsonWrapper()
```
#### File: Wrappers/XIA/Printpeaks.py
```python
from __future__ import absolute_import, division, print_function
import math
import os
from xia2.Driver.DriverFactory import DriverFactory
from xia2.Wrappers.XIA.Diffdump import Diffdump
from xia2.Wrappers.XIA.PrintpeaksMosflm import PrintpeaksMosflm
def Printpeaks(DriverType=None):
"""A factory for wrappers for the printpeaks."""
if not "XIA2_USE_PRINTPEAKS" in os.environ:
return PrintpeaksMosflm(DriverType=DriverType)
DriverInstance = DriverFactory.Driver(DriverType)
class PrintpeaksWrapper(DriverInstance.__class__):
"""Provide access to the functionality in printpeaks."""
def __init__(self):
DriverInstance.__class__.__init__(self)
self.set_executable("printpeaks")
self._image = None
self._peaks = {}
def set_image(self, image):
"""Set an image to read the header of."""
self._image = image
self._peaks = {}
def get_maxima(self):
"""Run diffdump, printpeaks to get a list of diffraction maxima
at their image positions, to allow for further analysis."""
if not self._image:
raise RuntimeError("image not set")
if not os.path.exists(self._image):
raise RuntimeError("image %s does not exist" % self._image)
dd = Diffdump()
dd.set_image(self._image)
header = dd.readheader()
beam = header["raw_beam"]
pixel = header["pixel"]
self.add_command_line(self._image)
self.start()
self.close_wait()
self.check_for_errors()
# results were ok, so get all of the output out
output = self.get_all_output()
peaks = []
for record in output:
if not "Peak" in record[:4]:
continue
lst = record.replace(":", " ").split()
x = float(lst[4])
y = float(lst[6])
i = float(lst[-1])
x += beam[0]
y += beam[1]
x /= pixel[0]
y /= pixel[1]
peaks.append((x, y, i))
return peaks
def printpeaks(self):
"""Run printpeaks and get the list of peaks out, then decompose
this to a histogram."""
if not self._image:
raise RuntimeError("image not set")
if not os.path.exists(self._image):
raise RuntimeError("image %s does not exist" % self._image)
self.add_command_line(self._image)
self.start()
self.close_wait()
self.check_for_errors()
# results were ok, so get all of the output out
output = self.get_all_output()
peaks = []
self._peaks = {}
for record in output:
if not "Peak" in record[:4]:
continue
intensity = float(record.split(":")[-1])
peaks.append(intensity)
# now construct the histogram
log_max = int(math.log10(peaks[0])) + 1
max_limit = int(math.pow(10.0, log_max))
for limit in [5, 10, 20, 50, 100, 200, 500, 1000]:
if limit > max_limit:
continue
self._peaks[float(limit)] = len([j for j in peaks if j > limit])
return self._peaks
def threshold(self, nspots):
if not self._peaks:
peaks = self.printpeaks()
else:
peaks = self._peaks
keys = sorted(peaks.keys())
keys.reverse()
for thresh in keys:
if peaks[thresh] > nspots:
return thresh
return min(keys)
def screen(self):
if not self._image:
raise RuntimeError("image not set")
if not os.path.exists(self._image):
raise RuntimeError("image %s does not exist" % self._image)
self.add_command_line("-th")
self.add_command_line("10")
self.add_command_line(self._image)
self.start()
self.close_wait()
self.check_for_errors()
# results were ok, so get all of the output out
output = self.get_all_output()
peaks = []
self._peaks = {}
for record in output:
if not "Peak" in record[:4]:
continue
intensity = float(record.split(":")[-1])
peaks.append(intensity)
print(len(peaks), max(peaks))
if len(peaks) < 10:
return "blank"
return "ok"
def getpeaks(self):
"""Just get the list of peaks out, as (x, y, i)."""
if not self._image:
raise RuntimeError("image not set")
if not os.path.exists(self._image):
raise RuntimeError("image %s does not exist" % self._image)
self.add_command_line(self._image)
self.start()
self.close_wait()
self.check_for_errors()
# results were ok, so get all of the output out
output = self.get_all_output()
peaks = []
for record in output:
if not "Peak" in record[:4]:
continue
lst = record.split(":")
x = float(lst[1].split()[0])
y = float(lst[2].split()[0])
i = float(lst[4])
peaks.append((x, y, i))
return peaks
return PrintpeaksWrapper()
``` |
{
"source": "jorgediazjr/fast_dp",
"score": 3
} |
#### File: fast_dp/lib/xds_reader.py
```python
import sys
import re
from cell_spacegroup import constrain_cell, lattice_to_spacegroup
from pointless_reader import read_pointless_xml
def read_xds_idxref_lp(idxref_lp_file):
'''Read the XDS IDXREF.LP file and return a dictionary indexed by the
spacegroup number (ASSERT: this is the lowest symmetry spacegroup for
the corresponding lattice) containing unit cell constants and a
penalty. N.B. this also works from CORRECT.LP for the autoindexing
results.'''
# try doing this with regular expression: * int lattice...`
regexp = re.compile(r'^ \*\ ')
results = {}
for record in open(idxref_lp_file, 'r').readlines():
if regexp.match(record):
tokens = record.split()
spacegroup = lattice_to_spacegroup(tokens[2])
cell = tuple(map(float, tokens[4:10]))
constrained_cell = constrain_cell(tokens[2][0], cell)
penalty = float(tokens[3])
if spacegroup in results:
if penalty < results[spacegroup][0]:
results[spacegroup] = penalty, constrained_cell
else:
results[spacegroup] = penalty, constrained_cell
if 'DETECTOR COORDINATES (PIXELS) OF DIRECT BEAM' in record:
results['beam centre pixels'] = map(float, record.split()[-2:])
assert('beam centre pixels' in results)
return results
def read_xds_correct_lp(correct_lp_file):
'''Read the XDS CORRECT.LP file and get out the spacegroup and
unit cell constants it decided on.'''
unit_cell = None
space_group_number = None
for record in open(correct_lp_file, 'r').readlines():
if 'SPACE_GROUP_NUMBER=' in record:
try:
space_group_number = int(record.split()[-1])
except:
space_group_number = 0
if 'UNIT_CELL_CONSTANTS=' in record and 'used' not in record:
unit_cell = tuple(map(float, record.split()[-6:]))
return unit_cell, space_group_number
def read_correct_lp_get_resolution(correct_lp_file):
'''Read the CORRECT.LP file and get an estimate of the resolution limit.
This should then be recycled to a rerun of CORRECT, from which the
reflections will be merged to get the statistics.'''
correct_lp = open(correct_lp_file, 'r').readlines()
rec = -1
for j in range(len(correct_lp)):
record = correct_lp[j]
if 'RESOLUTION RANGE I/Sigma Chi^2 R-FACTOR R-FACTOR' in record:
rec = j + 3
break
if rec < 0:
raise RuntimeError('resolution information not found')
j = rec
while '--------' not in correct_lp[j]:
isigma = float(correct_lp[j].split()[2])
if isigma < 1:
return float(correct_lp[j].split()[1])
j += 1
# this will assume that strong reflections go to the edge of the detector
# => do not need to feed back a resolution limit...
return None
``` |
{
"source": "jorgediazjr/zero_fast_dp",
"score": 2
} |
#### File: zero_fast_dp/lib/cell_spacegroup.py
```python
import os
from cctbx import xray
from cctbx.sgtbx import space_group
from cctbx.sgtbx import space_group_symbols
from cctbx.uctbx import unit_cell
from cctbx.crystal import symmetry
import sys
if sys.version_info < (3, 0):
version = 2
def ersatz_pointgroup(spacegroup_name):
'''Guess the pointgroup for the spacegroup by mapping from short to
long name, then taking 1st character from each block.'''
pg = None
for record in open(
os.path.join(os.environ['CLIBD'], 'symop.lib'), 'r').readlines():
if ' ' in record[:1]:
continue
if spacegroup_name == record.split()[3]:
pg = record.split()[4][2:]
elif spacegroup_name == record.split('\'')[1].replace(' ', ''):
pg = record.split()[4][2:]
if not pg:
if version == 2:
try:
raise RuntimeError, 'spacegroup %s unknown' % spacegroup_name
except:
pass
else:
raise RuntimeError('spacegroup {} unknown'.format(spacegroup_name))
# FIXME this is probably not correct for small molecule work...
# just be aware of this, in no danger right now of handling non-chiral
# spacegroups
if '/' in pg:
pg = pg.split('/')[0]
result = spacegroup_name[0] + pg
if 'H3' in result:
result = result.replace('H3', 'R3')
return result
def spacegroup_to_lattice(input_spacegroup):
''' This generates a lattics from a the imported file bu chopping off
the first letter of the cell type, changing to lowercase and then
prepending it to the first letter of the spacegroup.'''
def fix_hH(lattice):
if lattice != 'hH':
return lattice
return 'hR'
mapping = {'TRICLINIC':'a',
'MONOCLINIC':'m',
'ORTHORHOMBIC':'o',
'TETRAGONAL':'t',
'TRIGONAL':'h',
'HEXAGONAL':'h',
'CUBIC':'c'}
if type(input_spacegroup) == type(u''):
input_spacegroup = str(input_spacegroup)
if type(input_spacegroup) == type(''):
for record in open(
os.path.join(os.environ['CLIBD'], 'symop.lib'), 'r').readlines():
if ' ' in record[:1]:
continue
if input_spacegroup == record.split()[3]:
return fix_hH(mapping[record.split()[5]] + record.split()[3][0])
elif type(input_spacegroup) == type(0):
for record in open(
os.path.join(os.environ['CLIBD'], 'symop.lib'), 'r').readlines():
if ' ' in record[:1]:
continue
if input_spacegroup == int(record.split()[0]):
return fix_hH(mapping[record.split()[5]] + record.split()[3][0])
else:
if version == 2:
try:
raise RuntimeError, 'bad type for input: %s' % type(input_spacegroup)
except:
pass
else:
raise RuntimeError('bad type for input: {}'.format(type(input_spacegroup)))
return None
def check_spacegroup_name(spacegroup_name):
'''Will return normalised name if spacegroup name is recognised,
raise exception otherwise. For checking command-line options.'''
try:
j = int(spacegroup_name)
if j > 230 or j <= 0:
if version == 2:
try:
raise RuntimeError, 'spacegroup number nonsense: %s' \
% spacegroup_name
except:
pass
else:
raise RuntimeError('spacegroup number nonsense: {}'.format(
spacegroup_name))
return spacegroup_number_to_name(j)
except ValueError as e:
pass
found_spacegroup = None
for record in open(
os.path.join(os.environ['CLIBD'], 'symop.lib'), 'r').readlines():
if ' ' in record[:1]:
continue
if spacegroup_name == record.split()[3]:
return spacegroup_name
if version == 2:
try:
raise RuntimeError, 'spacegroup name "%s" not recognised' % spacegroup_name
except:
pass
else:
raise RuntimeError('spacegroup name "{}" not recognised'.format(spacegroup_name))
def check_split_cell(cell_string):
'''Will return tuple of floats a, b, c, alpha, beta, gamma from input
cell string which contains a,b,c,alpha,beta,gamma raising an exception
if there is a problem.'''
ideal_string = 'a,b,c,alpha,beta,gamma'
if not cell_string.count(',') == 5:
if version == 2:
try:
raise RuntimeError, '%s should be of the form %s' % \
(cell_string, ideal_string)
except:
pass
else:
raise RuntimeError('{} should be of the form {}'.format(
cell_string, ideal_string))
a, b, c, alpha, beta, gamma = tuple(
map(float, cell_string.split(',')))
return a, b, c, alpha, beta, gamma
def constrain_cell(lattice_class, cell):
'''Constrain cell to fit lattice class x.'''
a, b, c, alpha, beta, gamma = cell
if lattice_class == 'a':
return (a, b, c, alpha, beta, gamma)
elif lattice_class == 'm':
return (a, b, c, 90.0, beta, 90.0)
elif lattice_class == 'o':
return (a, b, c, 90.0, 90.0, 90.0)
elif lattice_class == 't':
e = (a + b) / 2.0
return (e, e, c, 90.0, 90.0, 90.0)
elif lattice_class == 'h':
e = (a + b) / 2.0
return (e, e, c, 90.0, 90.0, 120.0)
elif lattice_class == 'c':
e = (a + b + c) / 3.0
return (e, e, e, 90.0, 90.0, 90.0)
if version == 2:
try:
raise RuntimeError, 'lattice class not recognised: %s' % lattice_class
except:
pass
else:
raise RuntimeError('lattice class not recognised: {}'.format(lattice_class))
def spacegroup_number_to_name(spg_num):
'''Convert a spacegroup number to a more readable name.'''
database = {}
for record in open(
os.path.join(os.environ['CLIBD'], 'symop.lib'), 'r').readlines():
if ' ' in record[:1]:
continue
number = int(record.split()[0])
name = record.split('\'')[1].strip()
database[number] = name
return database[spg_num]
def lattice_to_spacegroup(lattice):
''' Converts a lattice to the spacegroup with the lowest symmetry
possible for that lattice'''
l2s = {
'aP':1, 'mP':3, 'mC':5, 'mI':5,
'oP':16, 'oC':21, 'oI':23, 'oF':22,
'tP':75, 'tI':79, 'hP':143, 'hR':146,
'hH':146, 'cP':195, 'cF':196, 'cI':197
}
return l2s[lattice]
def lauegroup_to_lattice(lauegroup):
'''Convert a Laue group representation (from pointless, e.g. I m m m)
to something useful, like the implied crystal lattice (in this
case, oI.)'''
# this has been calculated from the results of Ralf GK's sginfo and a
# little fiddling...
#
# 19/feb/08 added mI record as pointless has started producing this -
# why??? this is not a "real" spacegroup... may be able to switch this
# off...
# 'I2/m': 'mI',
lauegroup_to_lattice = {'Ammm': 'oA',
'C2/m': 'mC',
'I2/m': 'mI',
'Cmmm': 'oC',
'Fm-3': 'cF',
'Fm-3m': 'cF',
'Fmmm': 'oF',
'H-3': 'hR',
'H-3m': 'hR',
'R-3:H': 'hR',
'R-3m:H': 'hR',
'R-3': 'hR',
'R-3m': 'hR',
'I4/m': 'tI',
'I4/mmm': 'tI',
'Im-3': 'cI',
'Im-3m': 'cI',
'Immm': 'oI',
'P-1': 'aP',
'P-3': 'hP',
'P-3m': 'hP',
'P2/m': 'mP',
'P4/m': 'tP',
'P4/mmm': 'tP',
'P6/m': 'hP',
'P6/mmm': 'hP',
'Pm-3': 'cP',
'Pm-3m': 'cP',
'Pmmm': 'oP'}
updated_laue = ''
for l in lauegroup.split():
if not l == '1':
updated_laue += l
return lauegroup_to_lattice[updated_laue]
def generate_primitive_cell(unit_cell_constants, space_group_name):
'''For a given set of unit cell constants and space group, determine the
corresponding primitive unit cell...'''
uc = unit_cell(unit_cell_constants)
sg = space_group(space_group_symbols(space_group_name).hall())
cs = symmetry(unit_cell = uc,
space_group = sg)
csp = cs.change_basis(cs.change_of_basis_op_to_primitive_setting())
return csp.unit_cell()
if __name__ == '__main__':
for token in sys.argv[1:]:
print ersatz_pointgroup(token)
``` |
{
"source": "Jorgedlara/challenge-python-03",
"score": 3
} |
#### File: challenge-python-03/src/main.py
```python
import re
def run():
with open ('encoded.txt', 'r', encoding='utf-8') as f:
texto_cifrado = f.read()
mensaje = ""
mensaje = re.findall("[a-z]", texto_cifrado)
mensaje_oculto = ''.join(mensaje)
print(mensaje_oculto)
if __name__ == '__main__':
run()
``` |
{
"source": "jorgedouglas71/pyplan-ide",
"score": 2
} |
#### File: pyplan_engine/application/views.py
```python
import os
from django.http import HttpResponse, JsonResponse
from django.shortcuts import render
from rest_framework import status
from rest_framework.decorators import api_view
from rest_framework.response import Response
from pyplan_engine.classes.Application import Application
from pyplan_engine.utils.exception_handler import genericApiException
@api_view(["GET"])
def test(request):
try:
return JsonResponse({"app": "OK"}, safe=False)
except Exception as e:
return genericApiException(e)
@api_view(["GET"])
def getStatus(request):
"""GET app status"""
try:
app = Application()
return JsonResponse(app.getStatus(), safe=False)
except Exception as e:
return genericApiException(e)
@api_view(["GET"])
def sleep(request):
""" Try sleep 10 seconds
"""
try:
import time
time.sleep(10)
return JsonResponse({"sleep": "OK"}, safe=False)
except ValueError as e:
return genericApiException(e)
@api_view(["GET"])
def checkRead(request):
"""Check read"""
try:
_path = request.query_params.get("path")
import os
res = str(os.listdir(_path))
return HttpResponse(res)
except ValueError as e:
return genericApiException(e)
@api_view(["GET"])
def checkWrite(request):
"""Check write"""
try:
_path = request.query_params.get("path")
import os
_file = open(_path + "test.txt", "w")
_file.write("engine write test")
_file.close()
return HttpResponse(_path + "test.txt")
except ValueError as e:
return genericApiException(e)
@api_view(["GET"])
def healthCheck(request, *args, **kargs):
return Response(status=status.HTTP_200_OK)
@api_view(["GET"])
def cmdtest(request, *args, **kargs):
_cmd = request.query_params.get("cmd")
_list = str(_cmd).split(",")
import subprocess
p = subprocess.Popen(_list, stdout=subprocess.PIPE)
out = p.stdout.read()
out = str(out).replace("\\n", "<br>")
return HttpResponse(out)
```
#### File: classes/evaluators/PandasEvaluator.py
```python
import json
import numpy as np
import pandas as pd
from pyplan_engine.classes.evaluators.BaseEvaluator import BaseEvaluator
from pyplan_engine.common.classes.filterChoices import filterChoices
from pyplan_engine.common.classes.indexValuesReq import IndexValuesReq
from cubepy.cube import kindToString
class PandasEvaluator(BaseEvaluator):
PAGESIZE = 100
def evaluateNode(self, result, nodeDic, nodeId, dims=None, rows=None, columns=None, summaryBy="sum", bottomTotal=False, rightTotal=False, fromRow=0, toRow=0):
sby = np.nansum
if summaryBy == 'avg':
sby = np.nanmean
elif summaryBy == 'max':
sby = np.nanmax
elif summaryBy == 'min':
sby = np.nanmin
if (fromRow is None) or int(fromRow) <= 0:
fromRow = 1
if (toRow is None) or int(toRow) < 1:
toRow = 100
fromRow = int(fromRow)
toRow = int(toRow)
_filters = {}
_rows = []
_columns = []
theResult = self.prepareDataframeForTable(result)
if not rows is None:
for row in rows:
if self.hasDim(theResult, str(row["field"]).split(".")[0]):
_rows.append(str(row["field"]).split(".")[0])
self.addToFilter(row, _filters)
if not columns is None:
for column in columns:
if self.hasDim(theResult, str(column["field"]).split(".")[0]):
_columns.append(str(column["field"]).split(".")[0])
self.addToFilter(column, _filters)
if not dims is None:
for dim in dims:
if self.hasDim(theResult, str(dim["field"]).split(".")[0]):
self.addToFilter(dim, _filters)
res = None
pageInfo = None
dfResult = None
if len(_rows) == 0 and len(_columns) == 0:
dfResult = self.applyFilter(theResult, _filters)
# if have indexes sum all
if not dfResult.index is None and not dfResult.index.names is None and len(dfResult.index.names) > 0 and not dfResult.index.names[0] is None:
serieResult = dfResult.agg(sby)
dfResult = pd.DataFrame({"total": serieResult}).T
else:
needT = False
if len(_rows) == 0:
needT = True
aux = _rows
_rows = _columns
_columns = aux
_filteredDataFrame = self.applyFilter(theResult, _filters)
# Don't use margins = True to obtain totals. This have a bug for dataframes with more than 5 level indexes
dfResult = pd.DataFrame.pivot_table(
_filteredDataFrame, index=_rows, columns=_columns, aggfunc=sby, margins=False, margins_name="Total")
if needT:
dfResult = dfResult.T
aux = _rows
_rows = _columns
_columns = aux
if bottomTotal and dfResult.shape[0] > 1:
row_total = sby(dfResult.values, axis=0)
new_values = np.concatenate(
[dfResult.values, [row_total]], axis=0)
new_index = pd.Index(np.concatenate(
[dfResult.index.values, ["Total"]]))
_df = pd.DataFrame(
data=new_values, columns=dfResult.columns, index=new_index)
dfResult = _df
if rightTotal and dfResult.shape[1] > 1:
row_total = sby(dfResult.values, axis=1)
new_values = np.concatenate(
[dfResult.values, row_total.reshape(row_total.size, 1)], axis=1)
new_columns = np.concatenate([dfResult.columns, ["Total"]])
_df = pd.DataFrame(
data=new_values, columns=new_columns, index=dfResult.index)
dfResult = _df
if (dfResult.shape[0] > self.PAGESIZE):
if int(toRow) > dfResult.shape[0]:
toRow = dfResult.shape[0]
pageInfo = {
"fromRow": int(fromRow),
"toRow": int(toRow),
"totalRows": dfResult.shape[0]
}
#res = dfResult[fromRow-1:toRow].to_json(orient='split')
_range = list(range(fromRow-1, toRow))
if bottomTotal:
_range = _range + [len(dfResult)-1]
res = dfResult.iloc[_range].to_json(
orient='split', date_format='iso')
else:
res = dfResult[:300].to_json(orient='split', date_format='iso')
return self.createResult(res, type(theResult), resultIsJson=True, pageInfo=pageInfo, node=nodeDic[nodeId], onRow=(_rows[0] if len(_rows) > 0 else None), onColumn=(_columns[0] if len(_columns) > 0 else None))
def addToFilter(self, dim, filters):
if "values" in dim and dim["values"] is not None and len(dim["values"]) > 0:
for itemValue in dim["values"]:
field = str(dim["field"]).split(".")[0]
# if (field in filters):
# filters[field] += " or " + field + "==" + "'" + itemValue["value"] + "'"
# else:
# filters[field] = "( " + field + "==" + "'" + itemValue["value"] + "'"
if (field in filters):
filters[field].append(itemValue["value"])
else:
filters[field] = [itemValue["value"]]
def applyFilter(self, result, filters):
if not result is None:
if len(filters) > 0:
res = result
for key in filters:
res = res[res.index.get_level_values(
key).isin(filters[key])]
return res
else:
return result
def hasDim(self, result, dim):
if dim in result.index.names:
return True
elif dim in result.dtypes.index:
return True
elif dim in result.columns:
return True
else:
return False
def isIndexed(self, result):
if not result is None:
result = self.prepareDataframeForTable(result)
obj = result
if isinstance(obj, pd.DataFrame):
return self._isIndexedDataframe(obj)
return False
def getIndexes(self, node, result=None):
res = []
if not node._result is None:
obj = self.prepareDataframeForTable(node._result)
if isinstance(obj, pd.DataFrame):
if self.isIndexed(obj):
res = list(obj.index.names)
res = [x + "." + node.identifier for x in res]
return res
def getIndexesWithLevels(self, node, result=None):
res = []
if result is None:
result = node._result
if not result is None:
result = self.prepareDataframeForTable(result)
if self.isIndexed(result):
for indexItem in result.index.names:
itemDim = indexItem.split(",")[0]
item = {"field": itemDim+"."+node.identifier,
"name": itemDim, "description": "", "levels": []}
if node.model.existNode(itemDim):
levelNode = node.model.getNode(itemDim)
if levelNode.title:
item["name"] = levelNode.title
item["description"] = levelNode.description
if levelNode.numberFormat:
item["numberFormat"] = levelNode.numberFormat
else:
# try generate default formatter
if "datetime" in result.index.get_level_values(itemDim).dtype.name:
item["numberFormat"] = "2,DD,0,,0,0,4,0,$,5,FULL,0"
res.append(item)
return res
def getIndexValues(self, nodeDic, data: IndexValuesReq, result=None):
res = []
if data.node_id:
if (not data.node_id is None) & (data.node_id in nodeDic):
node = nodeDic[data.node_id]
if result is None:
result = node.result
if (f"{data.index_id}.{data.node_id}") in self.getIndexes(node):
if self.isIndexed(result):
prepared_result = self.prepareDataframeForTable(
node.result)
for index in prepared_result.index.levels:
if index.name == data.index_id:
res = self.checkDateFormat(
index.values).tolist()
break
else:
res = result[data.index_id].unique().tolist()
elif data.index_id:
if result is None:
result = nodeDic[data.index_id].result
res = list(result)
if data.text1:
text1 = data.text1.lower()
if data.filter == filterChoices.CONTAINS.value:
res = list(
filter(lambda item: text1 in str(item).lower(), res))
elif data.filter == filterChoices.NOT_CONTAINS.value:
res = list(
filter(lambda item: not text1 in str(item).lower(), res))
return res
def getIndexType(self, nodeDic, nodeId, indexId):
numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
res = "S"
if nodeId:
if (not nodeId is None) & (nodeId in nodeDic):
node = nodeDic[nodeId]
nodeIndexes = self.getIndexes(node)
if (indexId+"."+nodeId) in nodeIndexes:
if self.isIndexed(node.result):
prepared_result = self.prepareDataframeForTable(
node.result)
for index in prepared_result.index.levels:
if index.name == indexId:
res = "S"
break
else:
#res = list(node.result[indexId].unique())[:1000]
res = "S"
elif indexId in nodeIndexes and isinstance(node.result, cubepy.Cube):
if str(node.result.axis(indexId).values.dtype) in numerics:
res = "N"
else:
res = "S"
return res
def getCubeMetadata(self, result, nodeDic, nodeId):
res = None
_result = self.prepareDataframeForPivot(result)
if isinstance(_result, pd.DataFrame):
res = {
"dims": [],
"measures": [],
"aggregator": "sum",
"isEditable": False,
"nodeProperties": {
"title": nodeDic[nodeId].title if not nodeDic[nodeId].title is None else nodeDic[nodeId].identifier,
"numberFormat": nodeDic[nodeId].numberFormat
}
}
for dim in self.getCubeIndexes(_result, nodeDic, nodeId):
field = dim.split(".")[0]
itemDim = {
"field": dim,
"name": field
}
if field in nodeDic:
if nodeDic[field].numberFormat:
itemDim["numberFormat"] = nodeDic[field].numberFormat
else:
if "datetime" in _result.index.get_level_values(field).dtype.name:
itemDim["numberFormat"] = "2,DD,0,,0,0,4,0,$,5,FULL,0"
res["dims"].append(itemDim)
res["dims"].append({
"field": "data_index",
"name": "Data Index"
})
numerics = ['int16', 'int32', 'int64',
'float16', 'float32', 'float64']
for col in _result.columns:
res["measures"].append({
"field": str(col),
"name": str(col)
})
_result = None
return res
def getCubeIndexes(self, result, nodeDic, nodeId):
res = list(result.index.names)
res = [x + "." + nodeDic[nodeId].identifier for x in res]
return res
def getCubeValues(self, result, nodeDic, nodeId, query):
_result = self.prepareDataframeForPivot(result)
if isinstance(_result, pd.DataFrame):
cube_indexes = self.getCubeIndexes(_result, nodeDic, nodeId)
_filters = {}
if not query["filters"] is None:
for dim in query["filters"]:
if "values" in dim and dim["values"] is not None and len(dim["values"]) > 0:
for itemValue in dim["values"]:
field = str(dim["field"]).split(".")[0]
if (field in _filters):
_filters[field].append(itemValue)
else:
_filters[field] = [itemValue]
_filteredResult = self.applyFilter(_result, _filters)
for col in query["columns"]:
if col in cube_indexes:
item = {
"field": col,
"count": 0,
"values": _filteredResult.index.get_level_values(col.split(".")[0]).unique().tolist()
}
# "values": _filteredResult[col.split(".")[0]].unique().tolist()
item["count"] = len(item["values"])
_cols = [x.split(".")[0] for x in query["columns"]]
if len(_cols) == 0:
listResult = _filteredResult[query["measures"]].sum(
).reset_index().values.tolist()
if len(listResult) > 0 and len(listResult[0]) > 1:
if np.isinf(listResult[0][1]):
listResult[0][1] = None
return [["data_index", "data_value"]] + listResult
else:
"""
# cambiado por lo de abajo para tome columnas con string
dfValues = pd.DataFrame.pivot_table(_filteredResult, index=_cols, aggfunc=np.sum)
firstCol = query["columns"] + ["data_index","data_value"]
res = [firstCol] + dfValues.reset_index().melt(id_vars=_cols, value_vars=query["measures"]).values.tolist()
return res
"""
"""
@cambiado por lo de abajo para permitir multiples agrupaciones por medida...muy picante
t1= _filteredResult.stack()
t1.index.set_names("data_index",level=t1.index.nlevels-1,inplace=True)
t2 = t1.iloc[t1.index.get_level_values("data_index").isin(query["measures"]) ].reset_index()[_cols + ["data_index",0]]
firstCol = query["columns"] + ["data_index","data_value"]
t3 = t2.groupby( _cols + ["data_index"]).aggregate({0:"sum"}).reset_index()
res = [firstCol] + t3.values[:10000].tolist()
t1=None
t2=None
t3=None
_result = None
return res
@cambiado por lo de abajo para permitir custom measures ... extra picante
"""
_measures = list(query["measures"])
useCustomFillMeasures = False
try:
# test if have groupMeasures property
_aa = _result.groupMeasures
useCustomFillMeasures = True
except AttributeError as ex:
pass
_groupedDF = None
if useCustomFillMeasures:
_groupedDF = _filteredResult.groupby(
_cols, sort=False).sum()
else:
_agg = dict()
for measure in _measures:
# TODO: POR AHORA sum, Mas adelante tomar de la query el tipo de agrupamiento
_agg[measure] = "sum"
_groupedDF = _filteredResult.groupby(
_cols, sort=False).agg(_agg)
if useCustomFillMeasures:
for key in _result.groupMeasures:
_groupedDF[key] = _result.groupMeasures[key](
_groupedDF)
finalDF = _groupedDF.reset_index().melt(id_vars=_cols,
value_vars=query["measures"], var_name="data_index", value_name="data_value")
# fill inf values only if is numeric
_kind = finalDF["data_value"].dtype.kind
if _kind in {'i', 'u', 'f', 'c'}:
if np.isinf(finalDF["data_value"]).any():
finalDF["data_value"][np.isinf(
finalDF["data_value"])] = 0
# fill nan values
finalDF["data_value"].fillna(0, inplace=True)
firstCol = query["columns"] + ["data_index", "data_value"]
sortedColumns = [
x.split(".")[0] for x in query["columns"]] + ["data_index", "data_value"]
res = [firstCol] + \
finalDF[sortedColumns].values[:1000000].tolist()
_result = None
return res
def getCubeDimensionValues(self, result, nodeDic, nodeId, query):
_result = self.prepareDataframeForPivot(result)
if isinstance(_result, pd.DataFrame):
if len(query["columns"]) > 0:
dimension = query["columns"][-1]
if dimension in nodeDic[nodeId].indexes:
#uniquelist = _result[dimension.split(".")[0]].unique()
uniquelist = _result.index.get_level_values(
dimension.split(".")[0]).unique()
# uniquelist.sort()
return uniquelist.sort_values().tolist()[:1000]
return []
def previewNode(self, nodeDic, nodeId):
from pyplan_engine.classes.Helpers import Helpers
from sys import getsizeof
res = {
"resultType": str(type(nodeDic[nodeId].result)),
"dims": [],
"columns": [],
"console": nodeDic[nodeId].lastEvaluationConsole,
"preview": ""
}
if isinstance(nodeDic[nodeId].result, pd.DataFrame):
cube = nodeDic[nodeId].result
if self.isIndexed(cube):
res["dims"] = list(cube.index.names)
for idx, col in enumerate(cube.columns.values[:500]):
res["columns"].append(
str(col) + " (" + kindToString(cube.dtypes[idx].kind) + ")")
res["preview"] += "Rows: " + str(len(cube.index))
#res += "\nColumns: " + ', '.join([''.join(row) for row in cube.columns.values[:500]])
res["preview"] += "\nShape: " + str(cube.shape)
res["preview"] += "\nMemory: " + \
str(round(cube.memory_usage(deep=True).sum() / 1024/1024, 2)) + " Mb"
#res["preview"] += "\nValues: \n" + str(cube.head(20))
res["preview"] += "\nValues: \n" + cube.head(20).to_string()
elif isinstance(nodeDic[nodeId].result, pd.Series):
serie = nodeDic[nodeId].result
if self.isIndexed(serie):
res["dims"] = list(serie.index.names)
res["preview"] += "Rows: " + str(len(serie.index))
res["preview"] += "\nMemory: " + \
str(round(serie.memory_usage(deep=True) / 1024/1024, 2)) + " Mb"
#res["preview"] += "\nValues: \n" + str(serie.head(20))
res["preview"] += "\nValues: \n" + serie.head(20).to_string()
elif isinstance(nodeDic[nodeId].result, pd.Index):
res["preview"] = str(nodeDic[nodeId].result)[:1000]
return json.dumps(res)
def ensureDataFrame(self, result):
res = result
if isinstance(res, pd.Series):
res = pd.DataFrame({"values": res})
return res
def exportFlatNode(self, nodeDic, nodeId, numberFormat, columnFormat, fileName):
if columnFormat == "tab":
columnFormat = "\t"
decimalSep = "."
if numberFormat == 'TSPDSC':
decimalSep = ","
_result = self.ensureDataFrame(nodeDic[nodeId].result)
if isinstance(_result, pd.DataFrame):
_result.to_csv(fileName, sep=columnFormat, encoding="iso-8859-1")
return True
return False
def postCalculate(self, node, result):
"""Method executed after calculate node
"""
if node.nodeClass == "index":
if isinstance(result, pd.Index) and result.name is None:
result.name = node.identifier
def copyAsValues(self, result, nodeDic, nodeId):
""" Copy node as values """
newDef = ""
if isinstance(result, pd.Index):
np.set_printoptions(threshold=np.prod(result.values.shape))
values = np.array2string(result.values, separator=",", precision=20, formatter={
'float_kind': lambda x: repr(x)}).replace('\n', '')
newDef = f"result = pd.Index({values})"
else:
return False
nodeDic[nodeId].definition = newDef
return True
def _isIndexedDataframe(self, dataframe):
"""Return True if dataframe is an indexed dataframe"""
return len(dataframe.index.names) > 1 or not dataframe.index.names[0] is None
def prepareDataframeForTable(self, result):
""" Prepare dataframe for use un tables and charts """
df = result
if isinstance(df, pd.Series):
df = pd.DataFrame({"values": df})
if self._isIndexedDataframe(df):
if df.size == 0:
df["values"] = np.nan
elif len(df.columns) > 1:
if isinstance(df.columns, pd.MultiIndex):
df.columns = df.columns.map(' | '.join)
df = df.stack()
if isinstance(df, pd.Series):
df = pd.DataFrame({"values": df})
current_columns_name = list(df.index.names)
current_columns_name[len(current_columns_name)-1] = "Measures"
df.index.names = current_columns_name
return df
def prepareDataframeForPivot(self, result):
""" Prepare dataframe for use in pivot cube"""
df = result
if isinstance(df, pd.Series):
df = pd.DataFrame({"values": df})
if self._isIndexedDataframe(df):
if isinstance(df.columns, pd.MultiIndex):
df.columns = df.columns.map(' | '.join)
df = df.select_dtypes(include=['float64', 'int64'])
if df.size == 0:
df["values"] = np.nan
# try to keep group measures
try:
df.groupMeasures = result.groupMeasures
except:
pass
return df
```
#### File: classes/evaluators/XArrayEvaluator.py
```python
import json
import math
import numpy as np
import pandas as pd
import xarray as xr
from pyplan_engine.classes.evaluators.BaseEvaluator import BaseEvaluator
from pyplan_engine.classes.evaluators.PandasEvaluator import PandasEvaluator
from pyplan_engine.classes.XHelpers import XHelpers, XIndex
from pyplan_engine.common.classes.filterChoices import filterChoices
from pyplan_engine.common.classes.indexValuesReq import IndexValuesReq
class XArrayEvaluator(BaseEvaluator):
PAGESIZE = 100
def evaluateNode(self, result, nodeDic, nodeId, dims=None, rows=None, columns=None, summaryBy="sum", bottomTotal=False, rightTotal=False, fromRow=0, toRow=0):
if isinstance(result, xr.DataArray):
return self.cubeEvaluate(result, nodeDic, nodeId, dims, rows, columns, summaryBy, bottomTotal, rightTotal, fromRow, toRow)
elif isinstance(result, XIndex):
return self.indexEvaluate(result, nodeDic, nodeId, dims, rows, columns, summaryBy, bottomTotal, rightTotal, fromRow, toRow)
def cubeEvaluate(self, result, nodeDic, nodeId, dims=None, rows=None, columns=None, summaryBy="sum", bottomTotal=False, rightTotal=False, fromRow=0, toRow=0):
sby = np.sum
if summaryBy == 'avg':
sby = np.mean
elif summaryBy == 'max':
sby = np.max
elif summaryBy == 'min':
sby = np.min
if (fromRow is None) or int(fromRow) <= 0:
fromRow = 1
if (toRow is None) or int(toRow) < 1:
toRow = 100
fromRow = int(fromRow)
toRow = int(toRow)
result = self.applyHierarchy(
result, nodeDic, nodeId, dims, rows, columns, sby)
_filters = {}
_rows = []
_columns = []
if not rows is None:
for row in rows:
if self.hasDim(result, str(row["field"])):
_rows.append(str(row["field"]).split(".")[0])
self.addToFilter(nodeDic, row, _filters)
if not columns is None:
for column in columns:
if self.hasDim(result, str(column["field"])):
_columns.append(str(column["field"]).split(".")[0])
self.addToFilter(nodeDic, column, _filters)
if not dims is None:
for dim in dims:
if self.hasDim(result, str(dim["field"]).split(".")[0]):
self.addToFilter(nodeDic, dim, _filters)
tmp = None
filteredResult = result
if len(_filters) > 0:
filteredResult = result.sel(_filters)
if len(_rows) == 0 and len(_columns) == 0 and result.ndim > 0:
try:
tmp = sby(filteredResult)
except Exception as ex:
if "flexible type" in str(ex):
tmp = sby(filteredResult.astype("O"))
else:
raise ex
else:
otherDims = [
xx for xx in filteredResult.dims if xx not in (_rows + _columns)]
if len(otherDims) > 0:
try:
tmp = filteredResult.reduce(
sby, otherDims).transpose(*(_rows + _columns))
except Exception as ex:
if "flexible type" in str(ex):
tmp = filteredResult.astype("O").reduce(
sby, otherDims).transpose(*(_rows + _columns))
else:
tmp = filteredResult.transpose(*(_rows + _columns))
finalValues = tmp.values
finalIndexes = []
if tmp.ndim > 0:
finalIndexes = tmp.coords[tmp.dims[0]].values
finalColumns = ["Total"]
if tmp.ndim == 2:
finalColumns = tmp.coords[tmp.dims[1]].values
# Add totales
_totalRow = None
if bottomTotal and len(_rows) > 0:
# add total row
if tmp.ndim == 1:
_totalRow = finalValues.sum(axis=0).reshape(1)
else:
_totalRow = finalValues.sum(
axis=0).reshape(1, len(finalValues[0]))
_totalRow = _totalRow[0]
if rightTotal:
_totalRow = np.append(_totalRow, finalValues.sum())
if rightTotal and len(_columns) > 0:
# add total column
if tmp.ndim == 1:
finalIndexes = np.append(finalIndexes, "Total")
finalValues = np.append(
finalValues, finalValues.sum(axis=0).reshape(1), axis=0)
else:
finalColumns = np.append(finalColumns, "Total")
finalValues = np.append(finalValues, finalValues.sum(
axis=1).reshape(len(finalValues), 1), axis=1)
# chek inf
if self.kindToString(finalValues.dtype.kind) == "numeric":
if np.isinf(finalValues).any():
finalValues[np.isinf(finalValues)] = None
# chec if haver nan values
if pd.isnull(finalValues).any():
try:
finalValues = np.where(
np.isnan(finalValues), None, finalValues)
except:
finalValues[pd.isnull(finalValues)] = None
res = {}
pageInfo = None
onRow = None
onColumn = None
if len(_rows) == 0 and len(_columns) == 0:
res = {
"columns": [],
"index": ["Total"],
"data": [[finalValues.tolist()]]
}
elif len(_rows) == 0:
onColumn = _columns[0]
res = {
"columns": self.checkDateFormat(finalIndexes[:300]).tolist(),
"index": finalColumns,
"data": [finalValues[:300].tolist()]
}
elif len(_columns) == 0:
if (len(finalIndexes) > self.PAGESIZE):
pageInfo = {
"fromRow": int(fromRow),
"toRow": int(toRow),
"totalRows": len(finalIndexes)
}
onRow = _rows[0]
res = {
"columns": finalColumns,
"index": self.checkDateFormat(finalIndexes[fromRow-1:toRow]).tolist(),
"data": [[x] for x in finalValues[fromRow-1:toRow].tolist()]
}
# add total rows
if not _totalRow is None:
res["index"].append("Total")
res["data"].append(_totalRow.tolist())
else:
onColumn = _columns[0]
onRow = _rows[0]
if (len(finalIndexes) > self.PAGESIZE):
pageInfo = {
"fromRow": int(fromRow),
"toRow": int(toRow),
"totalRows": len(finalIndexes)
}
res = {
"columns": self.checkDateFormat(finalColumns[:300]).tolist(),
"index": self.checkDateFormat(finalIndexes[fromRow-1:toRow]).tolist(),
"data": finalValues[fromRow-1:toRow, :300].tolist()
}
# add total rows
if not _totalRow is None:
res["index"].append("Total")
res["data"].append(_totalRow[:300].tolist())
return self.createResult(res, type(tmp), onRow=onRow, onColumn=onColumn, node=nodeDic[nodeId], pageInfo=pageInfo)
def hasDim(self, result, dim):
return True if dim.split(".")[0] in result.dims else False
def addToFilter(self, nodeDic, dim, filters):
if "values" in dim and dim["values"] is not None and len(dim["values"]) > 0:
field = str(dim["field"]).split(".")[0]
# chek if the node id of the index we are using to filter has changed
nodeId = None
indexType = None
indexType = self.getIndexType(nodeDic, nodeId, field)
# check if the indexes have change
_values = None
if indexType == "S":
_values = [str(xx["value"]) for xx in dim["values"]]
else:
_values = [int(xx["value"]) for xx in dim["values"]]
all_values = None
npValues = np.array(_values)
if field in nodeDic:
all_values = nodeDic[field].result.values
elif len(dim["field"].split(".")) > 1:
node_id = str(dim["field"]).split(".")[1]
if field in nodeDic[node_id].result.dims:
all_values = nodeDic[node_id].result.coords[field].values
serie = pd.Series(all_values)
if not all_values is None and serie.isin(npValues).any():
npValues = all_values[serie.isin(npValues)]
if len(npValues) > 0:
filters[field] = npValues
def getIndexes(self, node, result=None):
if result is None:
result = node._result
return [(xx+"."+node.identifier) for xx in result.dims]
def getIndexesWithLevels(self, node, result=None):
res = []
if result is None:
result = node._result
if not result is None:
_model = node.model
for indexItem in result.dims:
itemDim = indexItem.split(",")[0]
item = {"field": itemDim+"."+node.identifier,
"name": itemDim, "description": "", "levels": []}
if _model.existNode(itemDim):
levelNode = _model.getNode(itemDim)
if levelNode.title:
item["name"] = levelNode.title
item["description"] = levelNode.description
if levelNode.numberFormat:
item["numberFormat"] = levelNode.numberFormat
# check for levels
if not levelNode.hierarchy_parents is None:
def buildLevels(parents, levelList):
if not isinstance(parents, list):
parents = [parents]
for parentIndexId in parents:
parentIndexNode = _model.getNode(parentIndexId)
if parentIndexNode is None:
raise ValueError(
f"Node {parentIndexId} not found")
levelItem = {
"field": parentIndexId, "name": parentIndexNode.title or parentIndexId}
levelList.append(levelItem)
_dummy = parentIndexNode.result # to force calc
if not parentIndexNode.hierarchy_parents is None:
buildLevels(
parentIndexNode.hierarchy_parents, levelList)
listOfLevels = [
{"field": itemDim, "name": item["name"]}]
indexParents = levelNode.hierarchy_parents
buildLevels(indexParents, listOfLevels)
item["levels"] = listOfLevels
elif "datetime" in result.coords[itemDim].dtype.name:
item["numberFormat"] = "2,DD,0,,0,0,4,0,$,5,FULL,0"
res.append(item)
return res
def isIndexed(self, result):
if not result is None:
obj = result
if isinstance(obj, pd.Series):
obj = pd.DataFrame({"values": obj})
if isinstance(obj, pd.DataFrame):
if (isinstance(obj.index, pd.MultiIndex) or isinstance(obj.index, pd.Index)) and len(obj.index.names) > 0 and (not obj.index.names[0] is None):
return True
return False
def getIndexValues(self, nodeDic, data: IndexValuesReq, result=None):
res = []
if data.node_id:
if (not data.node_id is None) & (data.node_id in nodeDic):
node = nodeDic[data.node_id]
if result is None:
result = node.result
res = self.checkDateFormat(
result[data.index_id].values).tolist()
elif (not data.index_id is None) & (data.index_id in nodeDic):
node = nodeDic[data.index_id]
if result is None:
result = node.result
if isinstance(result, XIndex):
res = result.values.tolist()
elif isinstance(result, np.ndarray):
res = self.checkDateFormat(result).tolist()
else:
res = list(result)
if data.text1:
text1 = data.text1.lower()
if data.filter == filterChoices.CONTAINS.value:
res = list(
filter(lambda item: text1 in str(item).lower(), res))
elif data.filter == filterChoices.NOT_CONTAINS.value:
res = list(
filter(lambda item: not text1 in str(item).lower(), res))
return res
def getIndexType(self, nodeDic, nodeId, indexId):
numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
res = "S"
if (not indexId is None) & (indexId in nodeDic):
node = nodeDic[indexId]
if isinstance(node.result, XIndex) or isinstance(node.result, pd.Index):
if str(node.result.values.dtype) in numerics:
res = "N"
elif isinstance(node.result, np.ndarray):
if str(node.result.dtype) in numerics:
res = "N"
elif nodeId:
if (not nodeId is None) & (nodeId in nodeDic):
node = nodeDic[nodeId]
if str(node.result.coords[indexId].values.dtype) in numerics:
res = "N"
return res
def getCubeValues(self, result, nodeDic, nodeId, query):
if isinstance(result, xr.DataArray):
res = {
"dims": [],
"values": []
}
# fix field.array
query["columns"] = [xx.split(".")[0] for xx in query["columns"]]
_filters = {}
if not query["filters"] is None:
for dimFilter in query["filters"]:
field = str(dimFilter["field"]).split(".")[0]
if self.hasDim(result, field):
dimFilter["values"] = [{"value": xx}
for xx in dimFilter["values"]]
self.addToFilter(nodeDic, dimFilter, _filters)
_filteredResult = result
if len(_filters):
_filteredResult = result.sel(_filters)
nodeIndexes = self.getIndexes(nodeDic[nodeId], result)
nodeIndexes = [xx.split(".")[0] for xx in nodeIndexes]
for col in query["columns"]:
if col in nodeIndexes:
item = {
"field": col,
"count": 0,
"values": [str(v) for v in self.checkDateFormat(_filteredResult.coords[col].values).tolist()]
# "values": result.filter(_filters).axis(col).values.tolist()
}
item["count"] = len(item["values"])
res["dims"].append(item)
otherDims = [
xx for xx in _filteredResult.dims if xx not in query["columns"]]
resultValues = None
if len(otherDims) > 0:
resultValues = _filteredResult.sum(otherDims)
else:
resultValues = _filteredResult
#resultValues = _filteredResult.sum(keep=query["columns"])
if isinstance(resultValues, xr.DataArray):
if len(query["columns"]) > 0:
res["values"] = resultValues.transpose(
*query["columns"]).values.reshape(resultValues.size).tolist()
else:
res["values"] = [resultValues.values.tolist()]
else:
res["values"] = resultValues
return res
def getCubeDimensionValues(self, result, nodeDic, nodeId, query):
if isinstance(result, xr.DataArray):
if len(query["columns"]) > 0:
dimension = query["columns"][-1]
if (dimension+"."+nodeId) in self.getIndexes(nodeDic[nodeId], result):
finalList = [
str(v) for v in self.checkDateFormat(result.coords[dimension].values).tolist()[:1000]]
finalList.sort()
return finalList
return []
def getCubeMetadata(self, result, nodeDic, nodeId):
res = None
if isinstance(result, xr.DataArray):
res = {
"dims": [],
"measures": [],
"aggregator": "sum",
"isEditable": True if self.isTable(nodeDic[nodeId]) == "1" else False,
"nodeProperties": {
"title": nodeDic[nodeId].title if not nodeDic[nodeId].title is None else nodeDic[nodeId].identifier,
"numberFormat": nodeDic[nodeId].numberFormat
}
}
# check if is in scenario
if nodeDic[nodeId].model.isNodeInScenario(nodeDic[nodeId].identifier):
res["nodeProperties"]["scenario"] = True
for dim in result.dims:
indexPart = str(dim).split(".")[0]
itemDim = {
"field": dim,
"name": indexPart
}
if indexPart in nodeDic:
if not nodeDic[indexPart].title is None:
itemDim["name"] = nodeDic[indexPart].title
if nodeDic[indexPart].numberFormat:
itemDim["numberFormat"] = nodeDic[indexPart].numberFormat
res["dims"].append(itemDim)
res["measures"].append({
"field": "datavalue",
"name": "datavalue"
})
return res
def isTable(self, node):
res = "0"
if isinstance(node.result, xr.DataArray):
if not node.definition is None and node.definition != "":
import re
deff = re.sub(
'[\s+]', '', str(node.definition).strip(' \t\n\r')).lower()
if (deff.startswith("result=pp.dataarray(") or deff.startswith("result=pp.cube(") or deff.startswith("result=xr.dataarray(") or deff.startswith("result=create_dataarray(")):
res = "1"
return res
def setNodeValueChanges(self, nodeDic, nodeId, nodeChanges):
if isinstance(nodeDic[nodeId].result, xr.DataArray):
for change in nodeChanges["changes"]:
newValue = change["definition"]
filters = {}
for filterItem in change["filterList"]:
aux = {
"field": filterItem["Key"],
"values": [{
"value": filterItem["Value"]
}]}
self.addToFilter(nodeDic, aux, filters)
for key in filters:
filters[key] = slice(filters[key][0], filters[key][0])
nodeDic[nodeId].result.loc[filters] = newValue
nodeDic[nodeId].definition = self.generateNodeDefinition(
nodeDic, nodeId)
return "ok"
def generateNodeDefinition(self, nodeDic, nodeId, forceXArray=False):
array = nodeDic[nodeId].result
"""Generate code for cube definition"""
np.set_printoptions(threshold=np.prod(array.values.shape))
data = np.array2string(array.values, separator=",", precision=20, formatter={
'float_kind': lambda x: repr(x)}).replace('\n', '')
indexes = []
for dim in list(array.dims):
if dim in nodeDic:
indexes.append(dim)
else:
index_values = np.array2string(array[dim].values, separator=",", precision=20, formatter={
'float_kind': lambda x: repr(x)}).replace('\n', '')
coord = f"pd.Index({index_values},name='{dim}')"
indexes.append(coord)
indexes = "[" + ",".join(indexes).replace("'", '"') + "]"
if forceXArray or "xr.DataArray" in nodeDic[nodeId].definition or "create_dataarray" in nodeDic[nodeId].definition:
if self.kindToString(array.values.dtype.kind) == "string" or self.kindToString(array.values.dtype.kind) == "object":
deff = f'result = xr.DataArray({data},{indexes}).astype("O")'
else:
deff = f'result = xr.DataArray({data},{indexes})'
else:
if self.kindToString(array.values.dtype.kind) == "string" or self.kindToString(array.values.dtype.kind) == "object":
deff = "result = pp.cube(" + indexes + \
"," + data + ", dtype='O')"
else:
deff = "result = pp.cube(" + indexes + "," + data + ")"
return deff
def dumpNodeToFile(self, nodeDic, nodeId, fileName):
definition = self.generateNodeDefinition(nodeDic, nodeId)
with open(fileName, 'w') as f:
f.write(definition)
f.close()
def applyHierarchy(self, result, nodeDic, nodeId, dims, rows, columns, sby):
def hierarchize(dataArray, levels, maps, hierarchyDic):
mapArray = nodeDic[maps[0]].result
coordValues = mapArray.values.copy()
targetIndexId = nodeDic[levels[1]].result.name
for pos, level in enumerate(levels):
if pos > 0:
if not maps[pos] is None:
mapArrayLevel = nodeDic[maps[pos]].result
for ii in range(len(coordValues)):
if not coordValues[ii] is None:
try:
newVal = mapArrayLevel.sel(
{mapArrayLevel.dims[0]: coordValues[ii]}, drop=True).values.item(0)
coordValues[ii] = newVal
except Exception as ex:
coordValues[ii] = None
#raise ValueError("Hierarchy not found. Level: " + targetIndexId + ", value: " + coordValues[ii])
pass
# perform aggregate
dataArray.coords[levels[0]].values = coordValues
_df = dataArray.to_series()
_df = _df.groupby(list(dataArray.dims), sort=False).agg(sby)
_da = _df.to_xarray()
reindex_dic = dict()
for dimension in _da.dims:
if dimension == levels[0]:
reindex_dic[dimension] = nodeDic[levels[-1:]
[0]].result.values
elif dimension in nodeDic and isinstance(nodeDic[dimension].result, pd.Index):
node_id = dimension
if not hierarchyDic is None and dimension in hierarchyDic:
node_id = hierarchyDic[dimension]
reindex_dic[dimension] = nodeDic[node_id].result.values
_db = _da.reindex(reindex_dic)
return _db
allDims = (dims or []) + (rows or []) + (columns or [])
hierarchyDic = dict()
for dim in allDims:
if dim and dim["currentLevel"] and dim["currentLevel"] != str(dim["field"]).split(".")[0]:
hierarchyDic[str(dim["field"]).split(".")[
0]] = dim["currentLevel"]
# recursive fn for search parent
def findPath(indexNode, level, levels, maps):
if indexNode.identifier == level:
levels.append(indexNode.identifier)
maps.append(None)
return True
else:
_for_calc = indexNode.result
parents = indexNode.hierarchy_parents
if parents is None:
return False
else:
if not isinstance(parents, list):
parents = [parents]
mapArrays = indexNode.hierarchy_maps
if not isinstance(mapArrays, list):
mapArrays = [mapArrays]
mapPos = 0
for parentId in parents:
parent = nodeDic[parentId]
if findPath(parent, level, levels, maps):
levels.append(indexNode.identifier)
maps.append(mapArrays[mapPos])
return True
mapPos += 1
return False
field = str(dim["field"]).split(".")[0]
currentLevel = dim["currentLevel"]
indexNode = nodeDic[field]
levels = []
maps = []
findPath(indexNode, currentLevel, levels, maps)
levels.reverse()
maps.reverse()
result = hierarchize(result.copy(), levels, maps, hierarchyDic)
return result
def geoUnclusterData(self, result, nodeDic, nodeId, rowIndex, attIndex, latField="latitude", lngField="longitude", geoField="geoField", labelField="labelField", sizeField="sizeField", colorField="colorField", iconField="iconField"):
_tmp_for_geo = XIndex('tmp_for_geo', [
latField, lngField, geoField, labelField, sizeField, colorField, iconField])
attIndex = attIndex.split(".")[0]
rowIndex = rowIndex.split(".")[0]
_idx = nodeDic[attIndex].result
rowIndexObj = nodeDic[rowIndex].result
#mapCube = result.sel({_idx.name:_tmp_for_geo}).transpose([rowIndex,"tmp_for_geo"]).values
mapCube = XHelpers.changeIndex(None, result, _idx, _tmp_for_geo).transpose(
*[rowIndex, "tmp_for_geo"]).values
res = dict()
points = []
pos = 0
for itemRow in mapCube:
vo = dict()
vo["id"] = str(rowIndexObj.values[pos])
vo["lat"] = itemRow[0]
vo["lng"] = itemRow[1]
vo["geoDef"] = itemRow[2]
vo["labelRes"] = itemRow[3]
vo["sizeRes"] = itemRow[4]
vo["colorRes"] = itemRow[5]
vo["iconRes"] = itemRow[6]
points.append(vo)
pos += 1
res["points"] = points
for nn, point in enumerate(res["points"]):
if nn == 0:
try:
if not math.isnan(float(point["sizeRes"])):
res["minSize"] = float(point["sizeRes"])
res["maxSize"] = float(point["sizeRes"])
except Exception as ex:
pass
try:
if not math.isnan(float(point["colorRes"])):
res["minColor"] = float(point["colorRes"])
res["maxColor"] = float(point["colorRes"])
except Exception as ex:
pass
try:
if not math.isnan(float(point["iconRes"])):
res["minIcon"] = float(point["iconRes"])
res["maxIcon"] = float(point["iconRes"])
except Exception as ex:
pass
else:
try:
if not math.isnan(float(point["sizeRes"])):
if point["sizeRes"] > res["maxSize"]:
res["maxSize"] = point["sizeRes"]
if point["sizeRes"] < res["minSize"]:
res["minSize"] = point["sizeRes"]
except Exception as ex:
pass
try:
if not math.isnan(float(point["colorRes"])):
if point["colorRes"] > res["maxColor"]:
res["maxColor"] = point["colorRes"]
if point["colorRes"] < res["minColor"]:
res["minColor"] = point["colorRes"]
except Exception as ex:
pass
try:
if not math.isnan(float(point["iconRes"])):
if point["iconRes"] > res["maxIcon"]:
res["maxIcon"] = point["iconRes"]
if point["iconRes"] < res["minIcon"]:
res["minIcon"] = point["iconRes"]
except Exception as ex:
pass
return res
def postCalculate(self, node, result):
"""Method executed after calculate node
"""
if isinstance(result, xr.DataArray):
result.name = node.title
def copyAsValues(self, result, nodeDic, nodeId):
""" Copy node as values """
newDef = ""
if isinstance(result, float) or isinstance(result, int):
newDef = "result = " + str(result)
elif isinstance(result, xr.DataArray):
newDef = self.generateNodeDefinition(nodeDic, nodeId, True)
else:
return False
nodeDic[nodeId].definition = newDef
return True
def kindToString(self, kind):
"""Returns the data type on human-readable string
"""
if kind in {'U', 'S'}:
return "string"
elif kind in {'b'}:
return "boolean"
elif kind in {'i', 'u', 'f', 'c'}:
return "numeric"
elif kind in {'m', 'M'}:
return "date"
elif kind in {'O'}:
return "object"
elif kind in {'V'}:
return "void"
```
#### File: pyplan_engine/classes/IOEngine.py
```python
class IOEngine(object):
def __init__(self, node):
self.node = node
self.inputs = []
self.outputs = []
def release(self):
self.inputs = None
self.outputs = None
self.node = None
def updateInputs(self, names):
# remove prior outputs
for inputNode in self.inputs:
if not inputNode in names:
if self.node.model.existNode(inputNode):
self.node.model.getNode(inputNode).ioEngine.removeOutput(
self.node.identifier)
newInputs = []
for nodeId in names:
if self.node.model.existNode(nodeId):
newInputs.append(nodeId)
if not nodeId in self.inputs:
self.node.model.getNode(nodeId).ioEngine.addOutput(
self.node.identifier)
self.inputs = newInputs
def removeOutput(self, nodeId):
if nodeId in self.outputs:
self.outputs.remove(nodeId)
def removeInput(self, nodeId):
if nodeId in self.inputs:
self.inputs.remove(nodeId)
def addOutput(self, nodeId):
self.outputs.append(nodeId)
def updateNodeId(self, oldId, newId):
for inputNode in self.inputs:
if self.node.model.existNode(inputNode):
self.node.model.getNode(
inputNode).ioEngine.updateOutputId(oldId, newId)
for outputNode in self.outputs:
if self.node.model.existNode(outputNode):
self.node.model.getNode(
outputNode).ioEngine.updateInputId(oldId, newId)
def updateOnDeleteNode(self):
for inputNode in self.inputs:
if self.node.model.existNode(inputNode):
self.node.model.getNode(inputNode).ioEngine.removeOutput(
self.node.identifier)
for outputNode in self.outputs:
if self.node.model.existNode(outputNode):
self.node.model.getNode(outputNode).ioEngine.removeInput(
self.node.identifier)
def updateOutputId(self, oldId, newId):
if oldId in self.outputs:
self.outputs.remove(oldId)
self.outputs.append(newId)
def updateInputId(self, oldId, newId):
if oldId in self.inputs:
self.inputs.remove(oldId)
self.inputs.append(newId)
self.node.updateDefinitionForChangeId(oldId, newId)
```
#### File: classes/wizards/DataframeIndex.py
```python
from pyplan_engine.classes.wizards.BaseWizard import BaseWizard
import pandas as pd
import jsonpickle
class Wizard(BaseWizard):
def __init__(self):
self.code = "DataframeIndex"
def generateDefinition(self, model, params):
nodeId = params["nodeId"]
if model.existNode(nodeId):
currentDef = model.getNode(nodeId).definition
newDef = self.getLastDefinition(currentDef)
newDef = newDef + "\n# Set index"
reset_index = ""
if not isinstance(model.getNode(nodeId).result.index, pd.RangeIndex):
reset_index = ".reset_index()"
if not params is None and "columns" in params and len(params["columns"]) > 0:
newDef = newDef + \
f"\nresult = _df{reset_index}.set_index({params['columns']})"
else:
newDef = newDef + f"\nresult = _df{reset_index}"
model.getNode(nodeId).definition = self.formatDefinition(newDef)
return "ok"
return ""
```
#### File: common/classes/indexValuesReq.py
```python
class IndexValuesReq(object):
def __init__(self, **kargs):
self.node_id = kargs['node_id'] if 'node_id' in kargs else None
self.index_id = kargs['index_id'] if 'index_id' in kargs else None
self.filter = kargs['filter'] if 'filter' in kargs else None
self.text1 = kargs['text1'] if 'text1' in kargs else None
self.text2 = kargs['text2'] if 'text2' in kargs else None
```
#### File: pyplan_engine/utils/exception_handler.py
```python
import traceback
from django.conf import settings
from django.http import HttpResponse
from rest_framework.exceptions import APIException
from rest_framework.response import Response
from rest_framework.views import exception_handler
def genericApiException(ex, engine=None):
msg = "No error data"
if not engine is None and engine.getStopReason():
msg = engine.getStopReason()
else:
if not ex is None:
msg = str(ex.args)
if settings.DEBUG:
msg += "\n----------------\n" + traceback.format_exc()
return HttpResponse(msg, content_type="text/plain", status=207)
def ex_handler(exc, context):
# Call REST framework's default exception handler first,
# to get the standard error response.
#response = exception_handler(exc, context)
msg = "Unmanaged error."
if not exc is None:
msg += " "+str(exc.args[0])
if settings.DEBUG:
msg += "\n----------------\n" + traceback.format_exc()
return HttpResponse(msg, content_type="text/plain", status=207)
```
#### File: email/classes/eEmailType.py
```python
from enum import Enum
class eEmailType(Enum):
WORKFLOW_ASSIGNED_TASK = 0
WORKFLOW_CHANGE_STATE = 1
WORKFLOW_CHANGE_PERCENT = 2
INTERFACE_COMMENT = 3
INTERFACE_REFRESH_USER_IN_COMMENT = 4 # TODO:Implement this
INTERFACE_SHARED = 5
APPLICATION_SHARED = 6
RESET_PASSWORD = 7
CHANGED_PASSWORD = 8
TEST = 9
WELCOME_USER = 10
CREATED_USER = 11
ACTIVATED_USER = 12
SCHEDULE_TASK_STATUS_CHANGED = 13
DEACTIVATED_USER = 14 # TODO:Implement this
def __str__(self):
return self.value
```
#### File: common/engineTypes/engineType.py
```python
from abc import ABC, abstractmethod
class IEngineType(ABC):
from_app_pool = False
engineParams = ""
currentSession = None
engineURI = ""
engineUID = ""
engineParams = ""
lock = None
@abstractmethod
def createEngine(self): raise NotImplementedError
@abstractmethod
def releaseEngine(self): raise NotImplementedError
@abstractmethod
def stop(self): raise NotImplementedError
@abstractmethod
def getEngineURI(self): raise NotImplementedError
@abstractmethod
def getEngineUID(self): raise NotImplementedError
@abstractmethod
def getEngineParams(self): raise NotImplementedError
@abstractmethod
def openModel(self, file): raise NotImplementedError
@abstractmethod
def getDiagram(self, module_id=None): raise NotImplementedError
@abstractmethod
def getArrows(self, module_id=None): raise NotImplementedError
@abstractmethod
def getNodeProperty(
self, node_id, property_name): raise NotImplementedError
@abstractmethod
def getNodeProperties(self, node_id, properties): raise NotImplementedError
@abstractmethod
def setNodeProperties(self, node_id, properties): raise NotImplementedError
@abstractmethod
def saveModel(self, file_path): raise NotImplementedError
@abstractmethod
def getNodeInputs(self, node_id): raise NotImplementedError
@abstractmethod
def getNodeOutputs(self, node_id): raise NotImplementedError
@abstractmethod
def searchNodes(self, text, module_id, node_class,
extra_classes, fill_detail): raise NotImplementedError
@abstractmethod
def isChild(self, node: str, module_ids: list = []
): raise NotImplementedError
@abstractmethod
def getModelPreferences(self): raise NotImplementedError
@abstractmethod
def setModelProperties(self, modelProperties): raise NotImplementedError
@abstractmethod
def closeModel(self): raise NotImplementedError
@abstractmethod
def getModelName(self): raise NotImplementedError
@abstractmethod
def getModelId(self): raise NotImplementedError
@abstractmethod
def previewNode(self, node): raise NotImplementedError
@abstractmethod
def evaluate(self, definition): raise NotImplementedError
@abstractmethod
def callFunction(self, nodeId, params): raise NotImplementedError
@abstractmethod
def evaluateNode(
self, node_id, dims, rows, columns, summary_by="sum",
from_row=0, to_row=0, bottom_total=False, right_total=False
): raise NotImplementedError
@abstractmethod
def getIndexType(self, id): raise NotImplementedError
@abstractmethod
def getIndexValues(self, data): raise NotImplementedError
@abstractmethod
def isResultComputed(self, nodes): raise NotImplementedError
@abstractmethod
def getNodeIndexes(self, node_id): raise NotImplementedError
@abstractmethod
def isChoice(self, node_id): raise NotImplementedError
@abstractmethod
def isSelector(self, node_id): raise NotImplementedError
@abstractmethod
def getSelector(self, node_id): raise NotImplementedError
@abstractmethod
def isIndex(self, node_id): raise NotImplementedError
@abstractmethod
def isTable(self, node_id): raise NotImplementedError
@abstractmethod
def isTime(self, field): raise NotImplementedError
@abstractmethod
def getToolbars(self, extra_path): raise NotImplementedError
@abstractmethod
def profileNode(self, node_id): raise NotImplementedError
@abstractmethod
def createNode(self, node): raise NotImplementedError
@abstractmethod
def deleteNodes(self, node_ids): raise NotImplementedError
@abstractmethod
def createAlias(self, node_ids): raise NotImplementedError
@abstractmethod
def createInputNode(self, node_ids): raise NotImplementedError
@abstractmethod
def copyNodes(self, nodes): raise NotImplementedError
@abstractmethod
def copyAsValues(self, params): raise NotImplementedError
@abstractmethod
def moveNodes(self, nodes): raise NotImplementedError
@abstractmethod
def createNewModel(self, modelFile, modelName): raise NotImplementedError
@abstractmethod
def existNode(self, nodeId): raise NotImplementedError
@abstractmethod
def setNodeIdFromTitle(self, node_id): raise NotImplementedError
@abstractmethod
def exportFlatNode(self, nodeId, numberFormat, columnFormat,
fileName): raise NotImplementedError
@abstractmethod
def exportModule(self, moduleId, filename): raise NotImplementedError
@abstractmethod
def importModule(self, moduleId, filename,
importType): raise NotImplementedError
@abstractmethod
def callWizard(self, wizardRequest): raise NotImplementedError
@abstractmethod
def getCubeMetadata(self, query): raise NotImplementedError
@abstractmethod
def getCubeValues(self, query): raise NotImplementedError
@abstractmethod
def setNodeValueChanges(self, changes): raise NotImplementedError
@abstractmethod
def getCubeDimensionValues(self, query): raise NotImplementedError
@abstractmethod
def getUnclusterData(self, query): raise NotImplementedError
@abstractmethod
def executeButton(self, nodeId): raise NotImplementedError
@abstractmethod
def is_healthy(self): raise NotImplementedError
@abstractmethod
def getSystemResources(self): raise NotImplementedError
@abstractmethod
def installLibrary(self, lib, target): raise NotImplementedError
@abstractmethod
def getInstallProgress(self, from_line): raise NotImplementedError
```
#### File: common/serializers/ObjectField.py
```python
from rest_framework import serializers
class ObjectField(serializers.Field):
def to_representation(self, value):
return value
```
#### File: pyplan/companies/service.py
```python
from pyplan.pyplan.common.baseService import BaseService
from pyplan.pyplan.companies.models import Company
from pyplan.pyplan.department.models import Department
class CompaniesService(BaseService):
def list_with_groups_and_depts(self):
res = []
companies = None
# check for superuser
if self.current_user.is_superuser:
companies = Company.objects.all()
else:
companies = Company.objects.filter(
id=self.client_session.companyId)
if companies:
for company in companies:
comp = {}
comp['company'] = company
departments = Department.objects.filter(company=company)
comp['departments'] = departments.values()
res.append(comp)
return res
```
#### File: pyplan/company_preference/admin.py
```python
from django.contrib import admin
from .models import CompanyPreference
@admin.register(CompanyPreference)
class CompanyPreferenceAdmin(admin.ModelAdmin):
list_display = ('id', 'company_name', 'definition', 'preference')
def company_name(self, obj):
return obj.company.name
```
#### File: dashboard/classes/nodeEvalProperties.py
```python
class NodeEvalProperties(object):
def __init__(self):
self.node = ""
self.properties = list()
self.forceThisNode = False
class NodeEvalProperty(object):
def __init__(self, name, value):
self.name = name
self.value = value
```
#### File: dashboard/classes/nodeFullData.py
```python
from .nodeResult import NodeResult
class NodeFullData(object):
def __init__(self):
self.nodeId = ""
self.nodeName = ""
self.dims = list()
self.rows = list()
self.columns = list()
self.itemType = ""
self.objectType = ""
self.itemProperties = object
self.nodeResult = NodeResult()
```
#### File: pyplan/dashboard_comment/permissions.py
```python
from rest_framework import permissions
class DashboardCommentPermissions(permissions.BasePermission):
def has_permission(self, request, view):
"""
Checks permissions
"""
has_action_permission = False
if request.user and request.user.is_authenticated:
if view.action in ['list']:
has_action_permission = request.user.has_perm('pyplan.list_dashboardcomments')
elif view.action in ['retrieve']:
has_action_permission = request.user.has_perm('pyplan.view_dashboardcomment')
elif view.action in ['create']:
has_action_permission = request.user.has_perm('pyplan.add_dashboardcomment')
elif view.action in ['update', 'partial_update']:
has_action_permission = request.user.has_perm('pyplan.change_dashboardcomment')
elif view.action in ['destroy']:
has_action_permission = request.user.has_perm('pyplan.delete_dashboardcomment')
return has_action_permission
```
#### File: pyplan/dashboardstyle/service.py
```python
from rest_framework import exceptions
from pyplan.pyplan.common.baseService import BaseService
from .models import DashboardStyle
class DashboardStyleService(BaseService):
def getByStyleType(self, style_type):
"""Get By Style Type"""
if style_type and isinstance(style_type, int):
return DashboardStyle.objects.filter(owner_id=self.client_session.userCompanyId, style_type=int(style_type))
return DashboardStyle.objects.filter(owner_id=self.client_session.userCompanyId)
def getById(self, id):
"""Get By Style Type"""
return DashboardStyle.objects.filter(
owner_id=self.client_session.userCompanyId, pk=id)
```
#### File: pyplan/department/models.py
```python
from jsonfield import JSONField
from django.db import models
from pyplan.pyplan.companies.models import Company
class Department(models.Model):
code = models.CharField(max_length=50, blank=False, null=False)
name = models.CharField(max_length=255, blank=True, null=True)
engine_definitions = JSONField(blank=True, null=True)
login_action = JSONField(blank=True, null=True)
# denied folders and modules
denied_items = JSONField(
blank=True, null=True, help_text='{ "folders": ["folder_a"], "modules": [{ "model_id": "model_a", "modules_ids": ["id_of_module"] }] }')
company = models.ForeignKey(Company, on_delete=models.CASCADE)
def __str__(self):
return f"[{self.company.code}] - {self.code} - {self.name}"
```
#### File: pyplan/department/permissions.py
```python
from rest_framework import permissions
class DepartmentPermissions(permissions.BasePermission):
def has_permission(self, request, view):
"""
Checks permissions
"""
has_action_permission = False
if request.user and request.user.is_authenticated:
if view.action in ['list', 'retrieve', 'by_current_company']:
has_action_permission = request.user.has_perm('pyplan.view_department')
elif view.action in ['create']:
has_action_permission = request.user.has_perm('pyplan.add_department')
elif view.action in ['update', 'partial_update', 'denied', 'deny_items']:
has_action_permission = request.user.has_perm('pyplan.change_department')
elif view.action in ['destroy']:
has_action_permission = request.user.has_perm('pyplan.delete_department')
return has_action_permission
```
#### File: pyplan/diagram_shortcut/views.py
```python
from rest_framework import viewsets
from rest_framework.response import Response
from .models import DiagramShortcut
from .permissions import DiagramShortcutPermissions
from .serializers import (DiagramShortcutCreateSerializer,
DiagramShortcutSerializer)
from .service import DiagramShortcutService
class DiagramShortcutViewSet(viewsets.ModelViewSet):
"""
Updates and retrieves diagram shortcut relations
"""
queryset = DiagramShortcut.objects.all()
serializer_class = DiagramShortcutSerializer
permission_classes = (DiagramShortcutPermissions,)
pagination_class = None
def create(self, request, *args, **kwargs):
service = DiagramShortcutService(self.request)
serializer = DiagramShortcutCreateSerializer(data=request.query_params)
serializer.is_valid(raise_exception=True)
shortcut = service.create(serializer.data)
return Response(DiagramShortcutSerializer(shortcut).data)
```
#### File: external_link/dashboard/permissions.py
```python
from rest_framework import permissions
class DashboardExternalLinkPermissions(permissions.BasePermission):
def has_permission(self, request, view):
"""
Checks permissions
"""
has_action_permission = False
if request.user and request.user.is_authenticated:
if view.action == 'list' or view.action == 'retrieve':
has_action_permission = request.user.has_perm('pyplan.view_dashboardexternallink')
elif view.action == 'create':
has_action_permission = request.user.has_perm('pyplan.add_dashboardexternallink')
elif view.action == 'update' or view.action == 'partial_update':
has_action_permission = request.user.has_perm('pyplan.change_dashboardexternallink')
elif view.action == 'destroy':
has_action_permission = request.user.has_perm('pyplan.delete_dashboardexternallink')
return has_action_permission
```
#### File: external_link/dashboard/service.py
```python
from pyplan.pyplan.common.baseService import BaseService
from pyplan.pyplan.usercompanies.models import UserCompany
from .models import DashboardExternalLink
class DashboardExternalLinkService(BaseService):
def getDashboardExternalLink(self, dashboard_id):
model_path = self.client_session.modelInfo.uri
user_company_id = self.client_session.userCompanyId
external_links = DashboardExternalLink.objects.filter(
owner=user_company_id,
model_path=model_path,
)
if not dashboard_id is None and dashboard_id.isnumeric():
external_links = external_links.filter(
dashboard__pk=int(dashboard_id),
)
return external_links.order_by('created_at')
def createDashboardExternalLink(self, dashboard_id):
return DashboardExternalLink.objects.create(
dashboard_id=dashboard_id,
model_path=self.client_session.modelInfo.uri,
owner_id=self.client_session.userCompanyId,
)
```
#### File: pyplan/external_link/models.py
```python
import uuid
from django.db import models
from pyplan.pyplan.usercompanies.models import UserCompany
class ExternalLink(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
model_path = models.CharField(max_length=255, blank=False, null=False)
is_active = models.BooleanField(default=True)
common_instance = models.BooleanField(default=False)
owner = models.ForeignKey(UserCompany, on_delete=models.DO_NOTHING, related_name='external_links')
created_at = models.DateTimeField(auto_now_add=True)
class Meta:
ordering = ['created_at']
def __str__(self):
return str(self.id)
```
#### File: external_link/report/models.py
```python
from django.db import models
from pyplan.pyplan.report.models import Report
from pyplan.pyplan.external_link.models import ExternalLink
class ReportExternalLink(ExternalLink):
external_link = models.OneToOneField(
ExternalLink, on_delete=models.CASCADE, parent_link=True, related_name='report_external_link')
report = models.ForeignKey(Report, on_delete=models.CASCADE, related_name='external_links')
def __str__(self):
return self.report.name
```
#### File: external_link/report/service.py
```python
from pyplan.pyplan.common.baseService import BaseService
from pyplan.pyplan.usercompanies.models import UserCompany
from .models import ReportExternalLink
class ReportExternalLinkService(BaseService):
def getReportExternalLink(self, report_id):
model_path = self.client_session.modelInfo.uri
user_company_id = self.client_session.userCompanyId
external_links = ReportExternalLink.objects.filter(
owner=user_company_id,
model_path=model_path,
)
if not report_id is None and report_id.isnumeric():
external_links = external_links.filter(
report__pk=int(report_id),
)
return external_links.order_by('created_at')
def createReportExternalLink(self, report_id):
return ReportExternalLink.objects.create(
report_id=report_id,
model_path=self.client_session.modelInfo.uri,
owner_id=self.client_session.userCompanyId,
)
```
#### File: filemanager/classes/fileEntry.py
```python
from enum import Enum
from .fileEntryData import FileEntryData
class eFileTypes(Enum):
NOTHING = 0
MY_FOLDER = 1
PUBLIC = 2
COMPANY = 3
MODELS_PATH = 4
SHARED_WITH_ME = 5
class FileEntry(object):
def __init__(self, show=True, text="", type=eFileTypes.NOTHING, data=FileEntryData()):
self.show = show
self.text = text
self.type = type.value
self.data = data
```
#### File: pyplan/filemanager/service.py
```python
import base64
import csv
import json
import os
import shutil
import tempfile
import zipfile
from datetime import datetime
from errno import ENOTDIR
from itertools import islice
from shlex import split
from subprocess import PIPE, Popen
from uuid import uuid4
import pandas as pd
from django.conf import settings
from django.core.files.storage import FileSystemStorage
from openpyxl import load_workbook
from rest_framework import exceptions
from pyplan.pyplan.common.baseService import BaseService
from pyplan.pyplan.companies.models import Company
from pyplan.pyplan.preference.models import Preference
from pyplan.pyplan.user_company_preference.models import UserCompanyPreference
from .classes.fileEntry import FileEntry, eFileTypes
from .classes.fileEntryData import (FileEntryData, eSpecialFileType,
eSpecialFolder)
class FileManagerService(BaseService):
optimizable_templates = [".xls", ".xlsx", ".xlsm", ".xlsb"]
def getMainFolders(self):
"""
list:
Return a list of all folders.
"""
result = list()
company_code = self.client_session.company_code
# User Workspace
result.append(
FileEntry(
text="My Workspace",
type=eFileTypes.NOTHING,
data=FileEntryData(
fullPath="",
specialFolderType=eSpecialFolder.MY_FOLDER
)
)
)
# if self.current_user.has_perm("pyplan.view_model_path_root"):
# result.append(
# FileEntry(
# text="Root",
# type=eFileTypes.NOTHING,
# data=FileEntryData(
# fullPath="",
# specialFolderType=eSpecialFolder.MODELS_PATH
# )
# )
# )
# if self.current_user.has_perm("pyplan.view_company_root"):
# result.append(
# FileEntry(
# text=self.client_session.companyName,
# type=eFileTypes.NOTHING,
# data=FileEntryData(
# fullPath=company_code,
# specialFolderType=eSpecialFolder.MODELS_PATH
# )
# )
# )
# else:
# # Special Folders
# result.append(
# FileEntry(
# text="Public",
# type=eFileTypes.NOTHING,
# data=FileEntryData(
# fullPath=f"{company_code}/Public",
# specialFolderType=eSpecialFolder.PUBLIC
# )
# )
# )
return result
def getFoldersAndFiles(self, folder=''):
"""
list:
Return a list of all folders.
"""
storage = FileSystemStorage(
os.path.join(settings.MEDIA_ROOT, 'models'))
result = list()
base_path = ""
if folder.startswith("/"):
base_path = folder[1:]
elif folder:
base_path = f"{folder}/"
items = storage.listdir(base_path)
denied_folders = []
if not self.current_user.has_perm('pyplan.change_group_permissions'):
denied_folders = self._getDeniedFolders()
# folders
for item in sorted(items[0], key=str.lower):
full_path = f"{base_path}{item}"
if not denied_folders or not item in denied_folders:
result.append(
FileEntry(
show=not item.startswith('.'),
text=item,
type=eFileTypes.MY_FOLDER,
data=FileEntryData(
fileSize=None,
fullPath=full_path,
# specialFolderType=eSpecialFolder.MODELS_PATH
lastUpdateTime=storage.get_modified_time(
full_path),
)
)
)
# files
for item in sorted(items[1], key=str.lower):
full_path = f"{base_path}{item}"
specialFileType = eSpecialFileType.FILE
lowerItem = item.lower()
if lowerItem.endswith('.ppl') | lowerItem.endswith('.cbpy') | \
lowerItem.endswith('.model') | lowerItem.endswith('.ana'):
specialFileType = eSpecialFileType.MODEL
elif lowerItem.endswith('.zip'):
specialFileType = eSpecialFileType.ZIP
result.append(
FileEntry(
text=item,
type=eFileTypes.PUBLIC,
data=FileEntryData(
fileSize=storage.size(full_path),
fullPath=full_path,
extension=full_path[full_path.rfind('.')+1:],
specialFileType=specialFileType,
lastUpdateTime=storage.get_modified_time(full_path),
)
)
)
return result
def createFolder(self, folder_path, folder_name):
"""
create:
Creates a folder inside provided path.
"""
storage = FileSystemStorage(
os.path.join(settings.MEDIA_ROOT, 'models'))
full_path = os.path.join(
storage.base_location, folder_path, folder_name)
if storage.exists(full_path):
raise exceptions.NotAcceptable('Folder already exists')
else:
os.mkdir(full_path)
return os.path.join(folder_path, folder_name)
def createFile(self, my_file, folder_path, name, chunk):
file_path = os.path.join(
settings.MEDIA_ROOT, 'models', folder_path, name)
# Moves file if it already exists
if chunk is 0 and os.path.isfile(file_path):
new_name = f"{name}-{datetime.today().strftime('%Y%m%d-%H:%M:%S')}.old"
self._copy(
file_path,
os.path.join(settings.MEDIA_ROOT, 'models',
folder_path, new_name)
)
os.remove(file_path)
# Appends all chunks of this request (chunks of chunks)
# UI sends multiple requests with multiple chunks each per file
with open(file_path, 'ab+') as temp_file:
for chunk in my_file.chunks():
temp_file.write(chunk)
def copyFileOrFolder(self, source, destination):
"""
create:
Duplicate file or Folder.
"""
storage = FileSystemStorage(
os.path.join(settings.MEDIA_ROOT, 'models'))
return self._linuxCopy(
f"{storage.base_location}/{source}",
f"{storage.base_location}/{destination}"
)
def ensureUserWorkspace(self):
storage = FileSystemStorage(
os.path.join(settings.MEDIA_ROOT, 'models'))
# User Workspace
if not storage.exists(storage.base_location):
os.makedirs(storage.base_location)
def renameFile(self, source, new_name):
storage = FileSystemStorage(
os.path.join(settings.MEDIA_ROOT, 'models'))
src = f"{storage.base_location}/{source}"
dest = f"{src[0:src.rfind('/')+1]}{new_name}"
os.rename(src, dest)
return dest
def duplicateFiles(self, sources):
result = []
storage = FileSystemStorage(
os.path.join(settings.MEDIA_ROOT, 'models'))
for source in sources:
src = f"{storage.base_location}/{source}"
dest_path, dest_name = source.rsplit('/', 1)
dest = f"{storage.base_location}/{dest_path}/Copy 1 of {dest_name}"
n = 1
while storage.exists(dest):
n += 1
dest = f"{storage.base_location}/{dest_path}/Copy {n} of {dest_name}"
result.append(self._linuxCopy(src, dest))
return result
def moveFiles(self, sources, target):
result = []
storage = FileSystemStorage(
os.path.join(settings.MEDIA_ROOT, 'models'))
for source in sources:
src = f"{storage.base_location}/{source}"
dest_path, dest_name = source.rsplit('/', 1)
dest = f"{storage.base_location}/{target}/{dest_name}"
result.append(self.recursive_overwrite(src, dest))
if os.path.isdir(src):
shutil.rmtree(src)
else:
storage.delete(src)
return result
def copyFiles(self, sources, target):
storage = FileSystemStorage(
os.path.join(settings.MEDIA_ROOT, 'models'))
for source in sources:
src = f"{storage.base_location}/{source}"
dest_path, dest_name = source.rsplit('/', 1)
dest = f"{storage.base_location}/{target}/"
self._linuxCopy(src, dest)
return True
def copyToMyWorkspace(self, source):
storage = FileSystemStorage(
os.path.join(settings.MEDIA_ROOT, 'models'))
target = f"{storage.base_location}/{self.client_session.company_code}/{self.current_user.username}"
src = f"{storage.base_location}/{source}"
return self._linuxCopy(src, target)
def deleteFiles(self, sources):
storage = FileSystemStorage(
os.path.join(settings.MEDIA_ROOT, 'models'))
files = []
for source in sources:
full_path = f'{storage.base_location}/{source}'
if not storage.exists(full_path):
raise exceptions.NotAcceptable(f'File {source} does not exist')
else:
files.append(full_path)
for file_to_delete in files:
if os.path.isfile(file_to_delete):
storage.delete(file_to_delete)
else:
shutil.rmtree(file_to_delete)
def download(self, sources):
storage = FileSystemStorage(
os.path.join(settings.MEDIA_ROOT, 'models'))
src_0 = f"{storage.base_location}/{sources[0]}"
if len(sources) is 1 and os.path.isfile(src_0):
return open(src_0, 'rb'), os.path.relpath(src_0, os.path.join(src_0, '..'))
else:
temp = tempfile.SpooledTemporaryFile()
with zipfile.ZipFile(temp, 'w', zipfile.ZIP_DEFLATED) as zfobj:
for source in sources:
src = f"{storage.base_location}/{source}"
if os.path.isfile(src):
zfobj.write(src, os.path.relpath(
src, os.path.join(src, '..')))
else:
self._zipdir(src, zfobj)
for zfile in zfobj.filelist:
zfile.create_system = 0
temp.seek(0)
return temp, f"{os.path.relpath(sources[0], os.path.join(sources[0], '..'))}.zip"
def makeJsonStream(self, json_string: str):
"""
Returns streamed temp file from json string
"""
temp = tempfile.SpooledTemporaryFile(mode='w+b')
temp.write(str.encode(json_string))
temp.seek(0)
return temp
def unzipFile(self, source, target_folder):
storage = FileSystemStorage(
os.path.join(settings.MEDIA_ROOT, 'models'))
src = f"{storage.base_location}/{source}"
dest = f"{storage.base_location}/{target_folder}"
# Unzip the file, creating subdirectories as needed
zfobj = zipfile.ZipFile(src)
zfobj.extractall(dest)
def zipFiles(self, sources):
storage = FileSystemStorage(
os.path.join(settings.MEDIA_ROOT, 'models'))
zip_file = f"{storage.base_location}/{sources[0]}.zip"
if storage.exists(zip_file):
file_name, file_extension = os.path.splitext(zip_file)
zip_file = f'{file_name}_{uuid4().hex}{file_extension}'
with zipfile.ZipFile(zip_file, 'w', zipfile.ZIP_DEFLATED) as zfobj:
for source in sources:
src = f"{storage.base_location}/{source}"
if os.path.isfile(src):
zfobj.write(src, os.path.relpath(
src, os.path.join(src, '..')))
else:
self._zipdir(src, zfobj)
for zfile in zfobj.filelist:
zfile.create_system = 0
def getHome(self):
company_id = self.getSession().companyId
model_path = eSpecialFolder.MODELS_PATH
res = {}
filepath = os.path.join(settings.MEDIA_ROOT,
'models', 'home.json')
if os.path.isfile(filepath):
with open(filepath, "r") as json_file:
try:
res = json.load(json_file)
if "tabs" in res:
for tab in res["tabs"]:
if "folders" in tab:
for folder in tab["folders"]:
if "items" in folder:
for item in folder["items"]:
if "image" in item:
image_path = os.path.join(
settings.MEDIA_ROOT, 'models', item["image"])
if os.path.isfile(image_path):
with open(image_path, "rb") as f_image:
item["imagesrc"] = str(
base64.b64encode(f_image.read()), "utf-8")
except Exception as ex:
raise exceptions.NotAcceptable(ex)
return res
def optimizeTemplates(self, sources):
"""Generate csv file for each named range in template for future read.
"""
if sources:
preference = Preference.objects.filter(
code="optimize_templates").first()
if preference:
user_company_id = self.client_session.userCompanyId
c_pref = UserCompanyPreference.objects.filter(
user_company_id=user_company_id, preference__code="optimize_templates").first()
if c_pref:
preference.definition = c_pref.definition
if preference.definition["value"]:
for template in sources:
filename, file_extension = os.path.splitext(template)
if file_extension in self.optimizable_templates:
template_filename = os.path.join(
settings.MEDIA_ROOT, 'models', template)
self._generate_csv_from_excel(template_filename)
# Private
def _zipdir(self, path, ziph):
denied_folders = self._getDeniedFolders()
# ziph is zipfile handle
for root, dirs, files in os.walk(path):
# check if folder is not in any department denied folders
if not denied_folders or not any(list(map(lambda item: item in denied_folders, root.rsplit('/')))):
for file in files:
ziph.write(os.path.join(root, file), os.path.relpath(
os.path.join(root, file), os.path.join(path, '..')))
def _generate_csv_from_excel(self, filename):
"""Generate compressed csv from excel file
"""
target_dir = os.path.dirname(filename)
file_name, file_extension = os.path.splitext(filename)
target_dir = os.path.join(target_dir, file_name)
if not os.path.isdir(target_dir):
os.mkdir(target_dir)
wb = load_workbook(filename, data_only=True, read_only=True)
for item in wb.defined_names.definedName:
if not item.is_external and item.type == "RANGE" and item.attr_text and "!$" in item.attr_text:
target_filename = os.path.join(target_dir, item.name+".pkl")
if os.path.isfile(target_filename):
os.remove(target_filename)
dests = item.destinations
for title, coord in dests:
if title in wb:
ws = wb[title]
rangeToRead = ws[coord]
if not isinstance(rangeToRead, tuple):
rangeToRead = ((rangeToRead,),)
nn = 0
cols = []
values = []
for row in rangeToRead:
if nn == 0:
cols = [str(c.value) for c in row]
else:
values.append([c.value for c in row])
nn += 1
nn = 0
_finalCols = []
for _col in cols:
if _col is None:
_finalCols.append("Unnamed" + str(nn))
nn += 1
else:
_finalCols.append(_col)
df = pd.DataFrame(
values, columns=_finalCols).dropna(how="all")
df.to_pickle(target_filename, compression='gzip')
def recursive_overwrite(self, src, dest, ignore=None):
if os.path.isdir(src):
if not os.path.isdir(dest):
os.makedirs(dest)
files = os.listdir(src)
if ignore is not None:
ignored = ignore(src, files)
else:
ignored = set()
for f in files:
if f not in ignored:
self.recursive_overwrite(
os.path.join(src, f),
os.path.join(dest, f),
ignore
)
else:
shutil.copy(src, dest)
return dest
def _copy(self, src, dest):
try:
return shutil.copytree(src, dest)
except OSError as e:
# If the error was caused because the source wasn't a directory
if e.errno == ENOTDIR:
return shutil.copy(src, dest)
else:
raise exceptions.NotAcceptable(
'Directory not copied. Error: %s' % e)
except Exception as e:
raise e
def _linuxCopy(self, src, dest):
src_path = src.replace(' ', '\ ')
dest_path = dest.replace(' ', '\ ')
# -R, -r, --recursive
# copy directories recursively
# -u, --update
# copy only when the SOURCE file is newer
# than the destination file or when the
# destination file is missing
# -v, --verbose
# explain what is being done
cmd = f'cp -ruv {src_path} {dest_path}'
popen = Popen(split(cmd), stdout=PIPE, universal_newlines=True)
stdout, stderr = popen.communicate()
if stderr:
raise exceptions.NotAcceptable(stderr)
return True
```
#### File: pyplan/migrations/0013_guestuser_permissions_20190919_1715.py
```python
from django.contrib.contenttypes.management import (
create_contenttypes, get_contenttypes_and_models)
from django.db import migrations
def update_guest_groups_permissions(apps, schema_editor):
"""
This migration adds necessary permissions to guest groups.
"""
Dashboard = apps.get_model('pyplan', 'Dashboard')
InputTemplate = apps.get_model('pyplan', 'InputTemplate')
Group = apps.get_model('auth', 'Group')
Permission = apps.get_model('auth', 'Permission')
ContentType = apps.get_model('contenttypes', 'ContentType')
content_types, app_models = get_contenttypes_and_models(
apps.app_configs['pyplan'], 'default', ContentType)
# If content_types are not created yet, then it's a clean install.
# In order to add permissions after, we need to force ContentType creation.
if not content_types:
create_contenttypes(apps.app_configs['pyplan'])
# Dashboard content type
ctype_dashboard = ContentType.objects.get_for_model(Dashboard)
# Dashboard permissions
view_dashboard_perm, view_dash_perm_created = Permission.objects.get_or_create(
codename='view_dashboard',
content_type=ctype_dashboard,
)
if view_dash_perm_created:
view_dashboard_perm.name = 'Can view dashboard'
view_dashboard_perm.save()
change_dashboard_perm, change_dash_perm_created = Permission.objects.get_or_create(
codename='change_dashboard',
content_type=ctype_dashboard,
)
if change_dash_perm_created:
change_dashboard_perm.name = 'Can change dashboard'
change_dashboard_perm.save()
# InputTemplate content type
ctype_inputtemplate = ContentType.objects.get_for_model(InputTemplate)
# InputTemplate permissions
view_inputtemplate_perm, view_inptmp_created = Permission.objects.get_or_create(
codename='view_inputtemplate',
content_type=ctype_inputtemplate,
)
if view_inptmp_created:
view_inputtemplate_perm.name = 'Can view input template'
view_inputtemplate_perm.save()
change_inputtemplate_perm, change_inptmp_created = Permission.objects.get_or_create(
codename='change_inputtemplate',
content_type=ctype_inputtemplate,
)
if change_inptmp_created:
change_inputtemplate_perm.name = 'Can change input template'
change_inputtemplate_perm.save()
guest_groups = Group.objects.filter(name__icontains='guest')
for guest_group in guest_groups:
guest_group.permissions.add(view_dashboard_perm)
guest_group.permissions.add(change_dashboard_perm)
guest_group.permissions.add(view_inputtemplate_perm)
guest_group.permissions.add(change_inputtemplate_perm)
guest_group.save()
class Migration(migrations.Migration):
dependencies = [
('pyplan', '0012_new_email_preferences_20190824'),
]
operations = [
migrations.RunPython(update_guest_groups_permissions),
]
```
#### File: modelmanager/classes/copyAsValuesParam.py
```python
class CopyAsValuesParam(object):
def __init__(self, **kargs):
self.nodeId = kargs["nodeId"] if "nodeId" in kargs else None
self.asNewNode = kargs["asNewNode"] if "asNewNode" in kargs else False
```
#### File: modelmanager/classes/eFormNodeType.py
```python
from enum import Enum
class eFormNodeType(Enum):
CHECKBOX = 0
COMBOBOX = 1
SCALARINPUT = 2
BUTTON = 3
def __str__(self):
return self.value
```
#### File: pyplan/preference_module/models.py
```python
from django.db import models
class PreferenceModule(models.Model):
code = models.CharField(max_length=50, blank=False, null=False)
description = models.CharField(max_length=255, blank=True, null=True)
def __str__(self):
return self.code
```
#### File: pyplan/report/service.py
```python
from datetime import datetime
from django.db.models import Q
from pyplan.pyplan.common.baseService import BaseService
from pyplan.pyplan.dashboard.models import Dashboard
from pyplan.pyplan.dashboardstyle.models import DashboardStyle
from pyplan.pyplan.department.models import Department
from pyplan.pyplan.usercompanies.models import UserCompany
from .models import Report
class ReportManagerService(BaseService):
def getReport(self, report_id):
return Report.objects.get(pk=report_id)
def myReports(self, parent_id=None, favs=None):
usercompany_id = self.client_session.userCompanyId
model_id = self.client_session.modelInfo.modelId
reports = Report.objects.filter(
model=model_id,
)
if parent_id and parent_id.isnumeric():
reports = reports.filter(
parent__pk=int(parent_id),
)
else:
reports = reports.filter(
owner_id=usercompany_id,
parent__pk__isnull=True,
)
if type(favs) is bool and favs:
reports = reports.filter(
is_fav=True,
)
return reports.order_by('order').distinct()
def sharedWithMe(self, parent):
company_id = self.client_session.companyId
usercompany_id = self.client_session.userCompanyId
model_id = self.client_session.modelInfo.modelId
reports = Report.objects.filter(
# shared to specific users, me included
Q(usercompanies__pk=usercompany_id) |
# shared to departments where I belong
Q(departments__usercompanies__pk=usercompany_id) |
# public but not mine
(Q(is_public=True) & ~Q(owner_id=usercompany_id)),
# from the same company
owner__company__pk=company_id,
model=model_id,
)
if parent and isinstance(parent, int):
reports = reports.filter(
parent__pk=int(parent),
)
else:
reports = reports.filter(
parent__pk__isnull=True,
)
return reports.order_by('order').distinct()
def mySharedReports(self, parent):
usercompany_id = self.client_session.userCompanyId
model_id = self.client_session.modelInfo.modelId
reports = Report.objects.filter(
Q(departments__isnull=False) | Q(usercompanies__isnull=False) | Q(is_public=True),
owner__pk=usercompany_id,
model=model_id,
)
if parent and isinstance(parent, int):
reports = reports.filter(
parent__pk=int(parent),
)
else:
reports = reports.filter(
parent__pk__isnull=True,
)
return reports.order_by("order").distinct()
def createReport(self, data):
user_company = UserCompany(id=self.client_session.userCompanyId)
parent = None
if "parentId" in data and not data["parentId"] is None:
parent = Report(id=data["parentId"])
report = Report.objects.create(
model=self.client_session.modelInfo.modelId,
name=data["name"],
is_fav=data["is_fav"],
is_public=data["is_public"],
parent=parent,
owner=user_company,
)
return report
def updateReport(self, pk, data):
report = Report.objects.get(pk=pk)
if report:
if "name" in data:
report.name = data["name"]
report.is_fav = data["is_fav"]
report.is_public = data["is_public"]
if "parentId" in data and not data["parentId"] is None:
report.parent = Report(id=data["parentId"])
report.save()
return report
def getNavigator(self, report_id, dashboard_id):
result = {
"priorId": None,
"priorName": None,
"nextId": None,
"nextName": None,
"list": [],
}
report = None
dashboards = []
if report_id:
report = Report.objects.get(pk=report_id)
dashboards = report.dashboards.all()
dash_count = report.dashboards.count()
# only next, on click dashboard id will come and sort
if dash_count > 0:
next_dashboard = dashboards[1]
result["nextId"] = next_dashboard['id']
result["nextName"] = next_dashboard['name']
if dashboard_id:
dashboard = Dashboard.objects.get(pk=dashboard_id)
report = report if report_id else dashboard.report
if report:
dashboards = report.dashboards
previous = Dashboard.objects.filter(report=report, order=dashboard.order-1).all()
if previous.count() > 0:
result["priorId"] = previous[0].id
result["priorName"] = previous[0].name
following = Dashboard.objects.filter(report=report, order=dashboard.order+1).all()
if following.count() > 0:
result["nextId"] = following[0].id
result["nextName"] = following[0].name
else:
dashboards.append(dashboard)
result['list'] = dashboards
return result
def bulkDelete(self, ids):
return Report.objects.filter(pk__in=ids).delete()
def changeOrder(self, ids):
for index, val in enumerate(ids):
if val.isnumeric():
report = Report.objects.get(pk=int(val))
report.order = index + 1
report.save()
def search(self, text):
reports = Report.objects.filter(name__icontains=text)
dashboards = Dashboard.objects.filter(name__icontains=text)
return {"reports": reports, "dashboards": dashboards}
def duplicateItems(self, data):
for report_id in data["report_ids"]:
report = Report.objects.get(pk=report_id)
self._duplicateReport(report, True)
for dashboard_id in data["dashboard_ids"]:
dashboard = Dashboard.objects.get(pk=dashboard_id)
self._duplicateDashboard(dashboard, True)
def copyToMyReports(self, data):
owner_id = self.client_session.userCompanyId
for report_id in data["report_ids"]:
report = Report.objects.get(pk=report_id)
self._copyReport(report, owner_id)
for dashboard_id in data["dashboard_ids"]:
dashboard = Dashboard.objects.get(pk=dashboard_id)
self._copyDashboard(dashboard, owner_id)
def setAsFav(self, data):
reports = Report.objects.filter(pk__in=data["report_ids"])
reports.update(is_fav=data["is_fav"])
dashboards = Dashboard.objects.filter(pk__in=data["dashboard_ids"])
dashboards.update(is_fav=data["is_fav"])
return {"reports": reports, "dashboards": dashboards}
def dropOnReport(self, report_ids, dashboard_ids, report_id=None):
reports = Report.objects.filter(pk__in=report_ids)
reports.update(parent_id=report_id)
dashboards_childrens = None
quantity = 0
if report_id:
parent_report = Report.objects.get(pk=report_id)
dashboards_childrens = parent_report.dashboards.all()
quantity = len(dashboards_childrens.values())
else:
dashboards_childrens = Dashboard.objects.filter(report_id__isnull=True)
quantity = dashboards_childrens.count()
for dash_id in dashboard_ids:
dash = Dashboard.objects.get(pk=dash_id)
dash.report_id = report_id
dash.order = quantity+1
dash.save()
quantity += 1
dashboards = Dashboard.objects.filter(pk__in=dashboard_ids)
return {"reports": reports, "dashboards": dashboards}
def exportItems(self, data):
reports = Report.objects.filter(pk__in=data['report_ids'])
dashboards = Dashboard.objects.filter(pk__in=data['dashboard_ids'])
styles = []
styles.extend(DashboardStyle.objects.filter(dashboards__id__in=data['dashboard_ids']).all())
self._getStyles(reports, styles)
return {
'dashboards': dashboards,
'reports': reports,
'styles': list(set(styles)),
}, f"dashboards-{datetime.today().strftime('%Y%m%d-%H%M%S')}"
def _getStyles(self, reports, styles):
for report in reports:
styles.extend(DashboardStyle.objects.filter(pk__in=list(
set(report.dashboards.all().values_list('styles', flat=True)))).all())
if report.reports.count() > 0:
self._getStyles(report.reports.all(), styles)
def importItems(self, data):
user_company_id = self.client_session.userCompanyId
model_id = self.client_session.modelInfo.modelId
result = {"reports": [], "dashboards": [], "styles": []}
styles = dict()
for item in data["styles"]:
ds = DashboardStyle.objects.create(
name=item["name"],
definition=item["definition"],
style_type=item["style_type"],
owner_id=user_company_id,
)
styles.update({item["id"]: ds.pk})
for item in data["reports"]:
report = Report.objects.create(
model=model_id,
name=item["name"],
is_fav=item["is_fav"],
is_public=item["is_public"],
order=item["order"],
parent_id=item["parent_id"],
owner_id=user_company_id,
)
self._createChildReportsAndDashboards(report, item, result, styles)
for item in data["dashboards"]:
dash_created = Dashboard.objects.create(
model=model_id,
name=item["name"],
node=item["node"] if "node" in item else None,
is_fav=item["is_fav"],
definition=item["definition"] if "definition" in item else None,
order=item["order"],
owner_id=user_company_id,
)
dash_created.styles.set(DashboardStyle.objects.filter(pk__in=list(
map(lambda style: styles[style], item["styles"]))))
result["dashboards"].append(dash_created)
return result
def getShares(self, report_id):
report = Report.objects.get(pk=report_id)
is_shared = report.usercompanies.count() > 0 or report.departments.count() > 0
return {
"departments": Department.objects.filter(company_id=self.client_session.companyId).all(),
"usercompanies_shares": report.usercompanies,
"departments_shares": report.departments,
"sharedToEveryone": report.is_public,
"sharedTo": is_shared,
"noShared": not is_shared,
}
def setShares(self, report_id, data):
report = Report.objects.get(pk=report_id)
report.is_public = data["sharedToEveryone"]
report.usercompanies.clear()
report.departments.clear()
if not data["noShared"]:
for usercompany_id in data["usercompanies_ids"]:
usercompany = UserCompany.objects.get(pk=usercompany_id)
report.usercompanies.add(usercompany)
for department_id in data["departments_ids"]:
department = Department.objects.get(pk=department_id)
report.departments.add(department)
report.save()
return {
"departments": Department.objects.filter(company_id=self.client_session.companyId).all(),
"usercompanies_shares": report.usercompanies,
"departments_shares": report.departments,
"sharedToEveryone": report.is_public,
"sharedTo": report.usercompanies.count() > 0 or report.departments.count() > 0,
}
# private
def _createChildReportsAndDashboards(self, parent, item, result, styles):
user_company_id = self.client_session.userCompanyId
model_id = self.client_session.modelInfo.modelId
for rep in item["reports"]:
report = Report.objects.create(
model=model_id,
name=rep["name"],
is_fav=rep["is_fav"],
is_public=rep["is_public"],
order=item["order"],
parent=parent,
owner_id=user_company_id,
)
self._createChildReportsAndDashboards(report, rep, result, styles)
for dash in item["dashboards"]:
dash_created = Dashboard.objects.create(
model=model_id,
name=dash["name"],
node=dash["node"] if "node" in dash else None,
is_fav=dash["is_fav"],
definition=dash["definition"] if "definition" in dash else None,
order=item["order"],
report=parent,
owner_id=user_company_id
)
dash_created.styles.set(DashboardStyle.objects.filter(pk__in=list(
map(lambda item: styles[item], dash["styles"]))))
result["reports"].append(parent)
def _copyReport(self, report, owner_id, parent_id=None):
old_report_id = report.pk
report.owner_id = owner_id
report.parent_id = parent_id
report.pk = None
report.is_public = False
report.save()
for child_report in Report.objects.filter(parent_id=old_report_id):
child_report.parent = report
self._copyReport(child_report, owner_id, report.pk)
for child_dashboard in Dashboard.objects.filter(report_id=old_report_id):
child_dashboard.report = report
self._copyDashboard(child_dashboard, owner_id, report.pk)
def _copyDashboard(self, dashboard, owner_id, report_id=None):
dashboard.pk = None
dashboard.owner_id = owner_id
dashboard.is_public = False
dashboard.report_id = report_id
dashboard.save()
def _duplicateReport(self, report, is_main=False):
old_report_id = report.pk
report.pk = None
report.is_public = False
if is_main:
report.name += f"_copy {datetime.today().strftime('%Y%m%d-%H:%M:%S')}"
report.save()
for child_report in Report.objects.filter(parent_id=old_report_id):
child_report.parent = report
self._duplicateReport(child_report)
for child_dashboard in Dashboard.objects.filter(report_id=old_report_id):
child_dashboard.report = report
self._duplicateDashboard(child_dashboard)
def _duplicateDashboard(self, dashboard, is_main=False):
dashboard.pk = None
dashboard.is_public = False
if is_main:
dashboard.name += f"_copy {datetime.today().strftime('%Y%m%d-%H:%M:%S')}"
dashboard.save()
```
#### File: pyplan/usercompanies/views.py
```python
from rest_framework import status, viewsets
from rest_framework.decorators import action
from rest_framework.response import Response
from pyplan.pyplan.preference.serializers import PreferenceSerializer
from pyplan.pyplan.user_company_preference.service import \
UserCompanyPreferenceService
from .models import UserCompany
from .permissions import UserCompanyPermissions
from .serializers import UserCompanySerializer, UserCompanyCreateUpdateSerializer, UserCompanyPartialUpdateSerializer, ListByCompanyIdSerializer
from .service import UserCompanyService
from .pagination import UserCompaniesPagination
class UserCompanyViewSet(viewsets.ModelViewSet):
"""
Updates and retrieves user companies relations
"""
queryset = UserCompany.objects.all()
serializer_class = UserCompanySerializer
permission_classes = (UserCompanyPermissions,)
pagination_class = UserCompaniesPagination
def list(self, request, *args, **kwargs):
try:
service = UserCompanyService(request)
response = service.list()
if len(response) > 0:
return Response(UserCompanySerializer(response, many=True).data, status=status.HTTP_200_OK)
return Response("There was an error in the list", status=status.HTTP_406_NOT_ACCEPTABLE)
except Exception as ex:
return Response(str({ex}), status=status.HTTP_406_NOT_ACCEPTABLE)
def create(self, request, *args, **kwargs):
try:
serializer = UserCompanyCreateUpdateSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
service = UserCompanyService(request)
response = service.create(serializer.validated_data)
if len(response) > 0:
return Response(UserCompanySerializer(response, many=True).data, status=status.HTTP_201_CREATED)
return Response("There was an error in the creation", status=status.HTTP_406_NOT_ACCEPTABLE)
except Exception as ex:
return Response(str({ex}), status=status.HTTP_406_NOT_ACCEPTABLE)
def update(self, request, *args, **kwargs):
try:
user_id = kwargs.get("pk")
serializer = UserCompanyPartialUpdateSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
service = UserCompanyService(request)
response = service.partial_update(user_id, serializer.validated_data)
if response:
return Response(UserCompanySerializer(response, many=True).data, status=status.HTTP_200_OK)
return Response("There was an error in the update", status=status.HTTP_406_NOT_ACCEPTABLE)
except Exception as ex:
return Response(str({ex}), status=status.HTTP_406_NOT_ACCEPTABLE)
def partial_update(self, request, *args, **kwargs):
try:
user_id = kwargs.get("pk")
serializer = UserCompanyPartialUpdateSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
service = UserCompanyService(request)
response = service.partial_update(user_id, serializer.validated_data)
if response:
return Response(UserCompanySerializer(response, many=True).data, status=status.HTTP_200_OK)
return Response("There was an error in the update", status=status.HTTP_406_NOT_ACCEPTABLE)
except Exception as ex:
return Response(str({ex}), status=status.HTTP_406_NOT_ACCEPTABLE)
@action(detail=False, methods=['get'])
def preferences(self, request):
preferences = UserCompanyPreferenceService(request).getUserCompanyPreferences()
if preferences:
return Response(PreferenceSerializer(preferences, many=True).data)
return Response(status=status.HTTP_204_NO_CONTENT)
@action(detail=False, methods=['get'])
def preference_by_code(self, request):
code = str(request.query_params.get("code", None))
if code:
preference = UserCompanyPreferenceService(request).getUserCompanyPreference(code)
if preference:
return Response(PreferenceSerializer(preference).data)
return Response(status=status.HTTP_204_NO_CONTENT)
@action(detail=False, methods=['get'])
def list_by_company_id(self, request):
try:
service = UserCompanyService(request)
users = service.listByCompanyId()
if len(users) > 0:
return Response(ListByCompanyIdSerializer(users, many=True).data)
return Response(status=status.HTTP_204_NO_CONTENT)
except Exception as ex:
return Response(str(ex), status=status.HTTP_406_NOT_ACCEPTABLE)
```
#### File: pyplan/user_company_preference/permissions.py
```python
from rest_framework import permissions
class UserCompanyPreferencePermissions(permissions.BasePermission):
def has_permission(self, request, view):
"""
Checks permissions
"""
has_action_permission = False
if request.user and request.user.is_authenticated:
if view.action in ['list', 'retrieve']:
has_action_permission = request.user.has_perm('pyplan.view_usercompanypreference')
elif view.action in ['create']:
has_action_permission = request.user.has_perm('pyplan.add_usercompanypreference')
elif view.action in ['update', 'partial_update']:
has_action_permission = request.user.has_perm('pyplan.change_usercompanypreference')
elif view.action in ['destroy']:
has_action_permission = request.user.has_perm('pyplan.delete_usercompanypreference')
return has_action_permission
```
#### File: pyplan/users/permissions.py
```python
from rest_framework import permissions
class UserPermissions(permissions.BasePermission):
"""
Object-level permission to only allow owners of an object to edit it.
"""
def has_object_permission(self, request, view, obj):
"""
Checks permissions
"""
has_action_permission = False
if request.user and request.user.is_authenticated:
if view.action in ['list', 'retrieve']:
return True
elif view.action in ['create']:
has_action_permission = request.user.has_perm('pyplan.add_user')
elif view.action in ['update', 'partial_update']:
has_action_permission = request.user.has_perm('pyplan.change_user') or obj == request.user
elif view.action in ['destroy']:
has_action_permission = request.user.has_perm('pyplan.delete_user')
return has_action_permission
``` |
{
"source": "jorgedroguett/cajonera",
"score": 3
} |
#### File: jorgedroguett/cajonera/cajonera_3_cajones.py
```python
hol_lado = 13
# variables
h = 900
a = 600
prof_c = 400
h_c = 120
a_c = 200
hol_sup = 20
hol_inf = 10
hol_int = 40
hol_lateral = 2
esp_lado = 18
esp_sup = 18
esp_inf = 18
esp_c = 15
cubre_der_total = True
cubre_iz_total = True
def calcular_lado_cajon(prof_c, esp_c):
lado_cajon = prof_c - 2 * esp_c
return lado_cajon
def calcular_a_c(cubre_iz_total, cubre_der_total, esp_lado, hol_lado, hol_lateral, a):
if cubre_der_total:
espesor_derecho = esp_lado
else:
espesor_derecho = esp_lado / 2 - hol_lateral
if cubre_iz_total:
espesor_izquierdo = esp_lado
else:
espesor_izquierdo = esp_lado / 2 - hol_lateral
ancho_cajon = a - espesor_izquierdo - espesor_derecho - 2 * hol_lateral
return ancho_cajon
def calcular_h_c(h, esp_sup, esp_inf, hol_sup, hol_int, hol_inf):
suma_holhura = hol_sup + hol_int + hol_inf
suma_espesor = esp_sup + esp_inf
espacio_cajones = h - suma_espesor - suma_holhura
altura_cajon = espacio_cajones / 3
return altura_cajon
h_c = calcular_h_c(h, esp_sup, esp_inf, hol_sup, hol_int, hol_inf)
a_c = calcular_a_c(cubre_iz_total, cubre_der_total, esp_lado, hol_lado, hol_lateral, a)
l_c = calcular_lado_cajon(prof_c, esp_c)
print("frente cajon: ", a_c, " X ", round(h_c))
print("lado cajon: ", l_c, " X ", round(h_c))
``` |
{
"source": "Jorgee1/Sam_Excel_Exporter",
"score": 2
} |
#### File: Jorgee1/Sam_Excel_Exporter/main.py
```python
import sys
import csv
import ntpath
import os.path
from PyQt5.QtWidgets import QMainWindow, QApplication, QInputDialog, QLineEdit, QFileDialog, QListWidgetItem
from PyQt5.QtCore import (QCoreApplication, Qt, QEvent)
from mainUI import Ui_MainWindow
from data_extractor import *
"""
C&W Networks
<NAME>
<NAME>
"""
#pyuic5 mainWindow.ui -o mainUI.py
class Qfile(QListWidgetItem):
def __init__(self, path, parent=None):
self.path = path
self.filename = os.path.basename(self.path)
super().__init__(self.filename)
class AppWindow(QMainWindow):
def __init__(self):
super().__init__()
self.listIndex = -1
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.ui.open_fileB.triggered.connect(self.openFileNameDialog)
self.ui.pushButton.clicked.connect(self.extract_data)
self.ui.listWidget.itemSelectionChanged.connect(self.test)
def test(self):
self.listIndex = self.ui.listWidget.currentRow()
print(self.ui.listWidget.currentRow())
def keyPressEvent(self, event):
key = event.key()
if key == Qt.Key_Escape:
QCoreApplication.quit()
elif key == Qt.Key_Delete:
print(self.ui.listWidget.currentRow())
if self.listIndex>=0:
item = self.ui.listWidget.takeItem(self.ui.listWidget.currentRow())
item = None
for i in range(self.ui.listWidget.count()):
self.ui.listWidget.item(i).setSelected(False)
self.listIndex = -1
self.ui.pushButton.setFocus()
def mousePressEvent(self, event):
#print("X:",event.x(),", Y:",event.y())
for i in range(self.ui.listWidget.count()):
self.ui.listWidget.item(i).setSelected(False)
self.listIndex = -1
self.ui.pushButton.setFocus()
def openFileNameDialog(self):
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
fileNames, _ = QFileDialog.getOpenFileNames(self,"Abrir TXT", "","TXT (*.txt)", options=options)
if fileNames:
for i in fileNames:
self.ui.listWidget.addItem(Qfile(i))
self.ui.pushButton.setFocus()
def extract_data(self):
outFile = 'Resultado_SAPs'
final_data = [["Name", "Service", "Saps", "Ports"]]
if os.path.isfile(outFile +'.csv'):
index = 0
print("Problem")
while True:
if os.path.isfile(outFile + '_' + str(index) + '.csv'):
index = index + 1
else:
outFile = outFile + '_' + str(index)
break;
print( outFile + '_' + str(index) + '.csv')
with open(outFile +'.csv', 'w', newline='') as f:
for i in range(self.ui.listWidget.count()):
#print(self.ui.listWidget.item(i).text())
data = extract(self.ui.listWidget.item(i).path)
final_data = final_data + data + [["","","",""]]
writer = csv.writer(f)
writer.writerows(final_data)
self.ui.listWidget.clear()
app = QApplication(sys.argv)
w = AppWindow()
w.show()
sys.exit(app.exec_())
w.end()
``` |
{
"source": "Jorgee97/CovidTwilio",
"score": 3
} |
#### File: app/data/scrapping.py
```python
import requests
from bs4 import BeautifulSoup
from .model import DayImage, Covid, Movies
from ..config import Config
import pandas as pd
from sodapy import Socrata
def get_day_image():
r = requests.get('https://www.minsalud.gov.co/salud/publica/PET/Paginas/Covid-19_copia.aspx')
soup = BeautifulSoup(r.text, 'html.parser')
results_row = soup.find_all('div', attrs={'id': 'WebPartWPQ4'})
results = []
origin_url = 'https://www.minsalud.gov.co'
for resultRow in results_row:
text = "Casos de COVID19 en Colombia"
img = resultRow.find('img').get('src')
results.append({
'text': text,
'img': origin_url + img
})
save_data_image(results)
def save_data_image(data: list):
DayImage(title=data[0]['text'], url=data[0]['img']).save()
def get_data():
client = Socrata("www.datos.gov.co", Config.DATOS_GOV_KEY)
results = client.get("gt2j-8ykr", limit=10000000)
results_df = pd.DataFrame.from_records(results)
results_df.index = results_df['id_de_caso']
results_df = results_df.drop('id_de_caso', axis=1)
f = lambda x: x.astype(str).str.lower()
translate_lambda = lambda s: s.astype(str).str.normalize('NFKD').str.encode('ascii', errors='ignore').str.decode(
'utf-8')
results_df = results_df.apply(f)
results_df = results_df.apply(translate_lambda)
results_df.to_csv('covid19-colombia.csv')
# Covid.drop_collection()
# Verify how much data do we have saved and then start inserting from there
start_point = len(Covid.objects)
save_covid_data(start_point + 1)
def save_covid_data(start_point):
with open('covid19-colombia.csv', 'r') as file:
data = file.readlines()
for info in data[start_point:]:
id_case, date, code, city, departament, attention, age, sex, tipo, state, precedence = info.split(',')[:11]
Covid(id_caso=id_case, fecha_diagnostico=date, ciudad_ubicacion=city,
departamento=departament, atencion=attention, edad=age, sexo=sex,
tipo=tipo, pais_procedencia=precedence).save()
def get_movies_series():
req = requests.get(
'http://finde.latercera.com/series-y-peliculas/que-ver-en-netflix-peliculas-series-buenas-abril-2/')
info = BeautifulSoup(req.text, 'html.parser')
info_row = info.find_all('div', attrs={'class': 'bg-white collapse-fix-xs'})
h2_titles = info_row[0].find_all('h2')[:-2]
images = info_row[0].select('figure > img')[:-2]
Movies.drop_collection()
for title, img in zip(h2_titles, images):
Movies(title=title.text, url=img.get('src')).save()
``` |
{
"source": "Jorgee97/Python_Projects",
"score": 4
} |
#### File: Python_Projects/Scripts/hangMan.py
```python
import random
words = ["house", "car", "creation", "junior", "developer", "glasses", "kitchen"]
word = words[random.randint(0, len(words) - 1)]
print(word)
def find_letters_on_word(letter):
index = []
for i, l in enumerate(word):
if l == letter:
index.append(i)
return index
def assign_size():
string = ""
for i in range(0, len(word)):
string += str(i)
return string
def assign_value_found(my_index, guess):
l = list(guess)
for i, v in enumerate(my_index):
l[v] = word[v]
return "".join(l)
guessed = assign_size()
maximumTry = 3
while maximumTry > 0:
maximumTry -= 1
letter = input("Guess the word, please write one letter: \n")
indexArray = find_letters_on_word(letter)
if not len(indexArray) == 0:
guessed = assign_value_found(indexArray, guessed)
print(guessed)
if maximumTry == 0:
guess = input("Please write the completed word: \n")
if guess == word:
print("You have won the game, the word was " + word.upper())
else:
print("You have failed, the word was " + word.upper())
```
#### File: Python_Projects/Takenotes/takeNotes.py
```python
import click
import datetime
import getpass
class Note(object):
def __init__(self, text, user):
self.text = text
self.user = user
self.date = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
@click.group()
def setup_directory():
pass
@setup_directory.command()
@click.argument('save_directory')
def setup(save_directory):
# create setup file
'''
Provide the location where you want your notes to be safe, default file name is notes.txt,
that cannot be change yet.
Here is an example of how you should use this command argument:
takeNotes.py setup /home/username/some_folder
'''
with open("setup.txt", "w") as file:
file.write(save_directory + "/notes.txt")
@click.group()
def note_taking():
pass
@note_taking.command()
@click.argument('note')
def note(note):
'''
This command only receive the notes under quotes.
Example:
takeNotes.py note "This is a cool note!"
'''
with open("setup.txt", "r") as file:
with open(file.readline(), "a") as notes:
n = Note(note, getpass.getuser())
notes.write(n.text + "/" + str(n.user) + "/" + str(n.date) + "\n")
@click.group()
def list_notes():
pass
@list_notes.command()
def list():
'''
This command does not take any argument.
This command will return all your notes, in the order of writing from top to bottom, there is not any sort
implementation yet.
Example:
takeNotes.py list
'''
with open("setup.txt", "r") as file:
with open(file.readline(), "r") as note:
for l in note:
l = l.strip('\n')
n = l.split('/')
click.echo(f"{n[1]}: {n[0]} - {n[2]}")
main = click.CommandCollection(sources=[setup_directory, note_taking, list_notes])
if __name__ == '__main__':
main()
``` |
{
"source": "jorgeebn16/Project_2",
"score": 2
} |
#### File: jorgeebn16/Project_2/app.py
```python
import numpy as np
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
from sqlalchemy.inspection import inspect
from flask import Flask, jsonify, render_template, redirect, request
from etl import *
import plotly
import plotly.graph_objs as go
from collections import defaultdict
from urllib.request import urlopen
import json
#################################################
# Database Setup
#################################################
engine = create_engine("sqlite:///assets/data/mortality.db")
# reflect an existing database into a new model
Base = automap_base()
# reflect the tables
Base.prepare(engine, reflect=True)
# Save reference to the table
mortality_county = Base.classes.mortality_county
mortality_state = Base.classes.mortality_state
mortality_us = Base.classes.mortality_us
#mortality_categories = Base.classes.vw_Category
#################################################
# Flask Setup
#################################################
app = Flask(__name__)
#################################################
# Flask Routes
#################################################
@app.route("/")
def index():
feature = 'Bar'
plot_1 = create_plot_1(feature)
plot_top5 = create_plot_top5(feature)
plot_bot5 = create_plot_bot5(feature)
plot_2 = create_plot_2()
plot_3 = create_plot_3()
plot_4 = create_plot_4()
plot_5 = create_plot_5()
return render_template('index.html', plot_1 = plot_1, plot_top5 = plot_top5, plot_bot5 = plot_bot5, plot_2 = plot_2
, plot_3 = plot_3
, plot_4 = plot_4
, plot_5 = plot_5)
@app.route("/etl")
@app.route("/etl/")
def etl():
#return process_etl()
try:
process_etl()
return redirect("/", code=302)
except:
return 'Something went horribly wrong!'
@app.route("/api/")
@app.route("/api")
def api():
"""List all available api routes."""
return (
f"Available Routes:<br/>"
f'<hr>'
f'<a href="/api/v1.0/categories">/api/v1.0/categories</a></br>'
f'<a href="/api/v1.0/years">/api/v1.0/years</a></br>'
f'<a href="/api/v1.0/county_all">/api/v1.0/county_all</a></br>'
f'<a href="/api/v1.0/county_year/2000">/api/v1.0/county_year/<year></a></br>'
f'<a href="/api/v1.0/state_year/2000">/api/v1.0/state_year/<year></a></br>'
f'<a href="/api/v1.0/us_year/2000">/api/v1.0/us_year/<year></a></br>'
f'</br>'
f'<hr>'
f'<a href="/">Return to the Dashboard</a>'
)
@app.route("/api/v1.0/categories/")
@app.route("/api/v1.0/categories")
def categories():
# Create our session (link) from Python to the DB
session = Session(engine)
"""Return a list of all Category names"""
# Query all passengers
results = session.query(mortality_us.Category).distinct().all()
session.close()
# Convert list of tuples into normal list
all_categories = list(np.ravel(results))
return jsonify(all_categories)
@app.route("/api/v1.0/years/")
@app.route("/api/v1.0/years")
def years():
# Create our session (link) from Python to the DB
session = Session(engine)
results = session.query(mortality_us.Date).distinct().all()
session.close()
# Convert list of tuples into normal list
all_categories = list(np.ravel(results))
return jsonify(all_categories)
@app.route("/api/v1.0/county_all/")
@app.route("/api/v1.0/county_all")
def county_all():
# Create our session (link) from Python to the DB
session = Session(engine)
results = session.query(mortality_county.FIPS, mortality_county.Category, mortality_county.Date, mortality_county.Value).all()
session.close()
# Create a dictionary from the row data and append to a list of all_passengers
all_data = []
for FIPS, Category, Date, Value in results:
mortality_dict = {}
mortality_dict["FIPS"] = FIPS
mortality_dict["Category"] = Category
mortality_dict["Date"] = Date
mortality_dict["Value"] = Value
all_data.append(mortality_dict)
return jsonify(all_data)
@app.route("/api/v1.0/county_year/<year>/")
@app.route("/api/v1.0/county_year/<year>")
def county_year(year):
# Create our session (link) from Python to the DB
session = Session(engine)
results = session.query(mortality_county.FIPS, mortality_county.Category, mortality_county.Date, mortality_county.Value).\
filter(mortality_county.Date == year).all()
session.close()
# Create a dictionary from the row data and append to a list of all_passengers
all_data = []
for FIPS, Category, Date, Value in results:
mortality_dict = {}
mortality_dict["FIPS"] = FIPS
mortality_dict["Category"] = Category
mortality_dict["Date"] = Date
mortality_dict["Value"] = Value
all_data.append(mortality_dict)
return jsonify(all_data)
@app.route("/api/v1.0/state_year/<year>/")
@app.route("/api/v1.0/state_year/<year>")
def state_year(year):
# Create our session (link) from Python to the DB
session = Session(engine)
results = session.query(mortality_state.FIPS, mortality_state.Category, mortality_state.Date, mortality_state.Value).\
filter(mortality_state.Date == year).all()
session.close()
# Create a dictionary from the row data and append to a list of all_passengers
all_data = []
for FIPS, Category, Date, Value in results:
mortality_dict = {}
mortality_dict["FIPS"] = FIPS
mortality_dict["Category"] = Category
mortality_dict["Date"] = Date
mortality_dict["Value"] = Value
all_data.append(mortality_dict)
return jsonify(all_data)
@app.route("/api/v1.0/us_year/<year>/")
@app.route("/api/v1.0/us_year/<year>")
def us_year(year):
# Create our session (link) from Python to the DB
session = Session(engine)
results = session.query(mortality_us.Category, mortality_us.Date, mortality_us.Value).\
filter(mortality_us.Date == year).all()
session.close()
# Create a dictionary from the row data and append to a list of all_passengers
all_data = []
for Category, Date, Value in results:
mortality_dict = {}
mortality_dict["Category"] = Category
mortality_dict["Date"] = Date
mortality_dict["Value"] = Value
all_data.append(mortality_dict)
return jsonify(all_data)
@app.route('/bar', methods=['GET', 'POST'])
def change_features():
feature = request.args['selected']
graphJSON = create_plot_1(feature)
return graphJSON
@app.route('/bar2', methods=['GET', 'POST'])
def change_features2():
feature = request.args['selected']
graphJSON = create_plot_top5(feature)
return graphJSON
@app.route('/bar3', methods=['GET', 'POST'])
def change_features3():
feature = request.args['selected']
graphJSON = create_plot_bot5(feature)
return graphJSON
def create_plot_1(feature):
session = Session(engine)
#df = pd.read_sql_table(table_name = 'mortality_us', con=session.connection(), index_col="index")
df = pd.read_sql(f"select Value, Category, Date from mortality_state", con=session.connection())
session.close()
cat = df['Category'].unique()
dates = df['Date'].unique()
ymax = df['Value'].max()
fig = go.Figure()
if feature == 'Box':
cnt = 1
for i in dates:
val = df.query(f'Date == "{i}"')['Value']
#cat = df.query(f'Date == "{i}"')['Category']
fig.add_trace(
go.Box(
visible=False,
x=df.query(f'Date == "{i}"')['Category'], # assign x as the dataframe column 'x'
y=val,
name = i
)
)
if cnt == len(df['Date'].unique()):
fig.data[0].visible = True
else:
cnt += 1
# fig = go.Figure(data = [[
# go.Box(name=i, x=df.query(f'Category == "{i}"')['Date'], y=df.query(f'Category == "{i}"')['Value']) for i in df['Category'].unique()]
# ,go.Box(name='All', x=df['Date'], y=df['Value'])
# ])
fig.update_layout(title="Box Plot - All Years by Category")
elif feature == 'Bar':
cnt = 1
for i in df['Date'].unique():
val = df.query(f'Date == "{i}"')['Value']
fig.add_trace(
go.Bar(
visible=False,
x=cat, # assign x as the dataframe column 'x'
y=val,
name = i
)
)
if cnt == len(df['Date'].unique()):
fig.data[0].visible = True
else:
cnt += 1
#fig = go.Figure(data = [go.Bar(name=i, x=df.query(f'Date == "{i}"')['Category'], y=df.query(f'Date == "{i}"')['Value']) for i in df['Date'].unique()])
fig.update_layout(barmode='stack')
fig.update_layout(title="Stacked Bar Chart - Category by Year")
else:
cnt = 1
for i in df['Date'].unique():
val = df.query(f'Date == "{i}"')['Value']
fig.add_trace(
go.Scatter(
visible=False,
x=cat, # assign x as the dataframe column 'x'
y=val,
name = i
)
)
if cnt == len(df['Date'].unique()):
fig.data[0].visible = True
else:
cnt += 1
#fig = go.Figure(data = [go.Scatter(name=i, x=df.query(f'Category == "{i}"')['Date'], y=df.query(f'Category == "{i}"')['Value']) for i in df['Category'].unique()])
fig.update_layout(title="Line Chart - Category by Year")
steps = []
for i in range(len(fig.data)):
step = dict(
method="restyle",
args=["visible", [False] * len(fig.data)],
label=fig.data[i]['name']
)
step["args"][1][i] = True # Toggle i'th trace to "visible"
steps.append(step)
sliders = [dict(
active=0,
currentvalue={"prefix": "Year: "},
pad={"t": 50},
steps=steps
)]
fig.update_layout(
sliders=sliders
)
fig.update_yaxes(range=[0, ymax])
graphJSON = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)
#print(graphJSON)
return graphJSON
def create_plot_top5(feature):
session = Session(engine)
#df = pd.read_sql_table(table_name = 'mortality_us', con=session.connection(), index_col="index")
df = pd.read_sql(f"select f.Category,f.date, f.Value from mortality_us f where rowid in (select rowid from mortality_us where Date = f.Date order by Value desc limit 6) order by f.Date asc;", con=session.connection())
session.close()
if feature == 'Box':
fig = go.Figure(data = [
go.Box(
x=df['Category'], # assign x as the dataframe column 'x'
y=df['Value']
)
])
fig.update_layout(title="Box Plot - Top 6 - All Years by Category")
elif feature == 'Bar':
fig = go.Figure(data = [go.Bar(name=i, x=df.query(f'Date == "{i}"')['Category'], y=df.query(f'Date == "{i}"')['Value']) for i in df['Date'].unique()])
fig.update_layout(barmode='stack')
fig.update_layout(title="Stacked Bar Chart (Top 6) - Category by Year")
else:
fig = go.Figure(data = [go.Scatter(name=i, x=df.query(f'Category == "{i}"')['Date'], y=df.query(f'Category == "{i}"')['Value']) for i in df['Category'].unique()])
fig.update_layout(title="Line Chart (Top 6) - Category by Year")
graphJSON = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)
#print(graphJSON)
return graphJSON
def create_plot_bot5(feature):
session = Session(engine)
#df = pd.read_sql_table(table_name = 'mortality_us', con=session.connection(), index_col="index")
df = pd.read_sql(f"select f.Category,f.date, f.Value from mortality_us f where rowid in (select rowid from mortality_us where Date = f.Date order by Value asc limit 5) order by f.Date asc;", con=session.connection())
session.close()
if feature == 'Box':
fig = go.Figure(data = [
go.Box(
x=df['Category'], # assign x as the dataframe column 'x'
y=df['Value']
)
])
fig.update_layout(title="Box Plot - Top 6 - All Years by Category")
elif feature == 'Bar':
fig = go.Figure(data = [go.Bar(name=i, x=df.query(f'Date == "{i}"')['Category'], y=df.query(f'Date == "{i}"')['Value']) for i in df['Date'].unique()])
fig.update_layout(barmode='stack')
fig.update_layout(title="Stacked Bar Chart (Top 6) - Category by Year")
else:
fig = go.Figure(data = [go.Scatter(name=i, x=df.query(f'Category == "{i}"')['Date'], y=df.query(f'Category == "{i}"')['Value']) for i in df['Category'].unique()])
fig.update_layout(title="Line Chart (Top 6) - Category by Year")
graphJSON = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)
#print(graphJSON)
return graphJSON
def create_plot_2():
session = Session(engine)
df = pd.read_sql_table(table_name = 'mortality_us', con=session.connection(), index_col="index")
session.close()
ymax = df['Value'].max()
fig = go.Figure(data = [
go.Scatterpolar(
theta=df['Category'], # assign x as the dataframe column 'x'
r=df['Value']
)
])
fig.update_layout(title="Radar Chart - Category (All Years)")
steps = []
for i in range(len(fig.data)):
step = dict(
method="restyle",
args=["visible", [False] * len(fig.data)],
label=fig.data[i]['name']
)
step["args"][1][i] = True # Toggle i'th trace to "visible"
steps.append(step)
sliders = [dict(
active=10,
currentvalue={"prefix": "Year: "},
pad={"t": 50},
steps=steps
)]
fig.update_layout(
sliders=sliders
)
fig.update_yaxes(range=[0, ymax])
graphJSON = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)
#print(graphJSON)
return graphJSON
def create_plot_3():
session = Session(engine)
df = pd.read_sql_table(table_name = 'mortality_us', con=session.connection(), index_col="index")
session.close()
fig = go.Figure(data = [
go.Pie(
labels=df['Category'], # assign x as the dataframe column 'x'
values=df['Value']
)
])
fig.update_layout(title="Pie Chart - Category (All Years)")
graphJSON = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)
#print(graphJSON)
return graphJSON
def create_plot_4():
with urlopen('https://raw.githubusercontent.com/plotly/datasets/master/geojson-counties-fips.json') as response:
counties = json.load(response)
session = Session(engine)
#df = pd.read_sql_table(table_name = 'mortality_county', con=session.connection(), index_col="index")
#df = pd.read_sql(f"select * from mortality_county where Date = '2014' and Category = 'Diabetes'", con=session.connection(), index_col="index")
df = pd.read_sql(f"select Value, FIPS, Category, Date from mortality_county where Date = '2014'", con=session.connection())
session.close()
cat = df['Category'].unique()
#dates = df['Date'].unique()
#data = []
fig = go.Figure()
cnt = 1
for c in cat:
cat_df = df.query(f'Category == "{c}"')
fig.add_trace( go.Choroplethmapbox(geojson=counties, locations=cat_df.FIPS, z=cat_df.Value,
colorscale = "Viridis",
name = c,
#text =regions,
#colorbar = dict(thickness=20, ticklen=3),
marker_line_width=0, marker_opacity=0.7,
visible=False))
if cnt == len(cat):
fig.data[cnt-1].visible = True
else:
cnt += 1
fig.update_layout(mapbox_style="carto-positron",
mapbox_zoom=3, mapbox_center = {"lat": 37.0902, "lon": -95.7129})
#fig.update_layout(margin={"r":0,"t":0,"l":0,"b":0})
fig.update_layout(title="US Mortality by County - Category (2014)")
fig.update_layout(height=600)
button_layer_1_height = 1.12 #08
fig.update_layout(
updatemenus=[
go.layout.Updatemenu(
buttons=list([
dict(
args=["colorscale", "Viridis"],
label="Viridis",
method="restyle"
),
dict(
args=["colorscale", "Cividis"],
label="Cividis",
method="restyle"
),
dict(
args=["colorscale", "Blues"],
label="Blues",
method="restyle"
),
dict(
args=["colorscale", "Greens"],
label="Greens",
method="restyle"
),
]),
direction="down",
pad={"r": 10, "t": 10},
showactive=True,
x=0.1,
xanchor="left",
y=button_layer_1_height,
yanchor="top"
),
go.layout.Updatemenu(
buttons=list([
dict(
args=["reversescale", False],
label="False",
method="restyle"
),
dict(
args=["reversescale", True],
label="True",
method="restyle"
)
]),
direction="down",
pad={"r": 10, "t": 10},
showactive=True,
x=0.37,
xanchor="left",
y=button_layer_1_height,
yanchor="top"
)
]
)
steps = []
for i in range(len(fig.data)):
step = dict(
method="restyle",
args=["visible", [False] * len(fig.data)],
label=fig.data[i]['name']
)
step["args"][1][i] = True # Toggle i'th trace to "visible"
steps.append(step)
sliders = [dict(
active=10,
currentvalue={"prefix": "COD: "},
pad={"t": 50},
steps=steps
)]
fig.update_layout(
sliders=sliders
)
fig.update_layout(
annotations=[
go.layout.Annotation(text="colorscale", x=0, xref="paper", y=1.06, yref="paper",
align="left", showarrow=False),
go.layout.Annotation(text="Reverse<br>Colorscale", x=0.25, xref="paper", y=1.07,
yref="paper", showarrow=False)
])
graphJSON = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)
#print(graphJSON)
return graphJSON
def create_plot_5x():
session = Session(engine)
df = pd.read_sql(f"select Value, Category, Date from mortality_state", con=session.connection())
session.close()
cat = df['Category'].unique()
dates = df['Date'].unique()
ymax = df['Value'].max()
fig = go.Figure()
cnt = 1
for i in df['Date'].unique():
val = df.query(f'Date == "{i}"')['Value']
fig.add_trace(
go.Scatter(
visible=False,
x=cat, # assign x as the dataframe column 'x'
y=val,
name = i,
mode='markers'
)
)
if cnt == len(df['Date'].unique()):
fig.data[cnt-1].visible = True
else:
cnt += 1
steps = []
for i in range(len(fig.data)):
step = dict(
method="restyle",
args=["visible", [False] * len(fig.data)],
label=fig.data[i]['name']
)
step["args"][1][i] = True # Toggle i'th trace to "visible"
steps.append(step)
sliders = [dict(
active=10,
currentvalue={"prefix": "Year: "},
pad={"t": 50},
steps=steps
)]
fig.update_layout(
sliders=sliders
)
fig.update_yaxes(range=[0, ymax])
graphJSON = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)
#print(graphJSON)
return graphJSON
#fig.show()
def create_plot_5():
session = Session(engine)
df = pd.read_sql(f"select Value, Category, Date from mortality_us", con=session.connection())
session.close()
df = df.pivot(index='Date',columns='Category',values='Value')
ymax=df.max()
fig = go.Figure()
fig = go.Figure(data=go.Splom(dimensions=[dict(label=c, values=df[c]) for c in df.columns],text=df.index
,marker=dict(color=df.index.astype('int'),size=5,colorscale='Bluered',line=dict(width=0.5,color='rgb(230,230,230)'))))
# cnt = 1
# for i in df['Date'].unique():
# val = df.query(f'Date == "{i}"')['Value']
# fig.add_trace(
# go.Scatter(
# visible=False,
# x=cat, # assign x as the dataframe column 'x'
# y=val,
# name = i,
# mode='markers'
# )
# )
# if cnt == len(df['Date'].unique()):
# fig.data[cnt-1].visible = True
# else:
# cnt += 1
steps = []
for i in range(len(fig.data)):
step = dict(
method="restyle",
args=["visible", [False] * len(fig.data)],
label=fig.data[i]['name']
)
step["args"][1][i] = True # Toggle i'th trace to "visible"
steps.append(step)
sliders = [dict(
active=10,
currentvalue={"prefix": "Year: "},
pad={"t": 50},
steps=steps
)]
fig.update_layout(
sliders=sliders
)
fig.update_yaxes(range=[0, ymax])
fig.update_layout(title='Scatter Plot Matrix',
dragmode='select',
width=1000,
height=1000,
hovermode='closest')
graphJSON = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)
#print(graphJSON)
return graphJSON
#fig.show()
if __name__ == '__main__':
app.run(debug=True)
``` |
{
"source": "jorgeebn16/sqlalchemy-challenge",
"score": 3
} |
#### File: jorgeebn16/sqlalchemy-challenge/climate_app.py
```python
from flask import Flask , jsonify
import sqlalchemy
from sqlalchemy import create_engine, func
from sqlalchemy.orm import Session
from sqlalchemy.ext.automap import automap_base
import datetime as dt
from sqlalchemy.orm import scoped_session, sessionmaker
#############################################
# Database Setup
#############################################
engine = create_engine("sqlite:///Resources/hawaii.sqlite",echo=False)
Base = automap_base()
Base.prepare(engine, reflect=True)
Measurement = Base.classes.measurement
Station = Base.classes.station
session = scoped_session(sessionmaker(bind=engine))
last_date = session.query(
Measurement.date
).order_by(
Measurement.date.desc()
).first()[0]
last_date = dt.datetime.strptime(last_date,"%Y-%m-%d")
first_date = last_date - dt.timedelta(days = 365)
############################################
# Flask Setup
############################################
app = Flask(__name__)
############################################
# Flask Setup
############################################
@app.route("/")
def welcome():
return (
f"Available Routes:<br/>"
f"The dates and temperature observations from the last year:<br/>"
f"/api/v1.0/precipitation<br/>"
f"List of stations from the dataset:<br/>"
f"/api/v1.0/stations<br/>"
f"List of Temperature Observations (tobs) for the previous year:<br/>"
f"/api/v1.0/tobs<br/>"
f"List of the minimum temperature, the average temperature, and the max temperature for a given start(i.e.2017-1-1):<br/>"
f"/api/v1.0/<start><br/>"
f"List of the minimum temperature, the average temperature, and the max temperature for a given start and end(i.e.2017-01-01/2017-01-07):<br/>"
f"/api/v1.0/<start>/<end><br/>"
)
@app.route("/api/v1.0/precipitation")
def precipitation():
twelve_month_data = session.query(
Measurement.date,
Measurement.prcp
).filter(
Measurement.date > first_date
).order_by(
Measurement.date
).all()
precipitation_data = dict(twelve_month_data)
return jsonify({'Data':precipitation_data})
@app.route("/api/v1.0/stations")
def stations():
stations = session.query(Station).all()
stations_list = list()
for station in stations:
stations_dict = dict()
stations_dict['Station'] = station.station
stations_dict["Station Name"] = station.name
stations_dict["Latitude"] = station.latitude
stations_dict["Longitude"] = station.longitude
stations_dict["Elevation"] = station.elevation
stations_list.append(stations_dict)
return jsonify ({'Data':stations_list})
@app.route("/api/v1.0/tobs")
def tobs():
twelve_month_tobs = session.query(
Measurement.tobs,
Measurement.date,
Measurement.station
).filter(
Measurement.date > first_date
).all()
temp_list = list()
for data in twelve_month_tobs:
temp_dict = dict()
temp_dict['Station'] = data.station
temp_dict['Date'] = data.date
temp_dict['Temp'] = data.tobs
temp_list.append(temp_dict)
return jsonify ({'Data':temp_list})
@app.route("/api/v1.0/<start>")
def start_temp(start=None):
start_temps = session.query(
func.min(Measurement.tobs),
func.avg(Measurement.tobs),
func.max(Measurement.tobs)
).filter(
Measurement.date >= start
).all()
start_list = list()
for tmin, tavg, tmax in start_temps:
start_dict = {}
start_dict["Min Temp"] = tmin
start_dict["Max Temp"] = tavg
start_dict["Avg Temp"] = tmax
start_list.append(start_dict)
return jsonify ({'Data':start_list})
@app.route("/api/v1.0/<start>/<end>")
def calc_temps(start=None,end=None):
temps = session.query(
func.min(Measurement.tobs),
func.avg(Measurement.tobs),
func.max(Measurement.tobs)
).filter(
Measurement.date >= start,
Measurement.date <= end
).all()
temp_list = list()
for tmin, tavg, tmax in temps:
temp_dict = dict()
temp_dict["Min Temp"] = tmin
temp_dict["Avg Temo"] = tavg
temp_dict["Max Temp"] = tmax
temp_list.append(temp_dict)
return jsonify ({'Data':temp_list})
if __name__ == '__main__':
app.run(debug=True)
``` |
{
"source": "jorgefalconcampos/ECAD",
"score": 2
} |
#### File: app/ecad_app/mailer.py
```python
from django.core import mail #EmailMessage lives here
from django.utils.text import format_lazy as fl
from django.conf import settings as conf_settings
from django.template.loader import render_to_string
from django.utils.translation import ugettext_lazy as _
from django.core.mail.backends.smtp import EmailBackend
from django.core.mail import send_mail, get_connection, send_mass_mail
class BaseMassiveMailer():
def __init__(self, message_to, context, subject, template, sndr_host, sndr_username, sndr_pass, sndr_port, sndr_tls, **substitutions):
self.con = mail.get_connection()
self.subject = subject
self.message_to = message_to #A list of emails
self.template = template
self.context = context
self.sndr_host = sndr_host
self.sndr_port = sndr_port
self.sndr_username = sndr_username
self.sndr_pass = <PASSWORD>
self.sndr_tls = sndr_tls
self.substitutions = { }
for key in substitutions:
self.substitutions.update({key:substitutions[key]})
def create_emails(self):
try:
# creating connection
self.con.open()
#filling the connection (EmailBackend) object
self.mail_obj = EmailBackend(
host=self.sndr_host,
port=self.sndr_port,
username=self.sndr_username,
password=self.sndr_pass,
use_tls=self.sndr_tls)
self.mails = []
# filling the EmailMessage objects
for email in self.message_to:
for k,v in self.context.items():
if k == email:
ctxt = {
'email':k,
'unsubscribe_url':v,
'index_url':self.substitutions["index_url"],
'post_title':self.substitutions["post_title"],
'post_url':self.substitutions["post_url"],
'post_preview':self.substitutions["post_preview"],
'post_bg_img':self.substitutions["post_bg_img"],
'privacy_url':self.substitutions["privacy_url"],}
body_msg = render_to_string(self.template, ctxt)
new_mail_msg = mail.EmailMessage(
subject=self.subject,
body=body_msg,
from_email=self.sndr_username,
to=[email],
connection=self.con)
self.mails.append(new_mail_msg)
except Exception as e:
print(f'\n\n# --- PY: Error while creating the connection or mail(s) message(s): --- #\n{e}')
def send_massive_email(self):
self.create_emails()
try:
for mail in self.mails:
mail.content_subtype = 'html'
self.mail_obj.send_messages(self.mails)
self.con.close()
return True
except Exception as e:
print(f'\n\n# --- PY: Error sending massive emails: --- #\n{e}')
return False
class BaseMailer():
def __init__(self, message_to, context, subject, template, sndr_host, sndr_port, sndr_username, sndr_pass, sndr_tls, **substitutions):
self.con = mail.get_connection()
self.message_to = message_to
self.subject = subject
self.body_msg = render_to_string(template, context)
self.sndr_host = sndr_host
self.sndr_port = sndr_port
self.sndr_username = sndr_username
self.sndr_pass = <PASSWORD>
self.sndr_tls = sndr_tls
# sth like request o algo asi xd
self.substitutions = {}
for key in substitutions:
self.substitutions.update({key:substitutions[key]})
def create_email(self):
try:
# creating connection
self.con.open()
#filling the connection (EmailBackend) object
self.mail_obj = EmailBackend(
host=self.sndr_host,
port=self.sndr_port,
username=self.sndr_username,
password=<PASSWORD>.<PASSWORD>,
use_tls=self.sndr_tls)
# filling the EmailMessage object
self.mail = mail.EmailMessage(
subject=self.subject,
body=self.body_msg,
from_email=self.sndr_username,
to=[self.message_to],
connection=self.con)
except Exception as e:
print(f'\n\n# --- PY: Error while creating the connection or mail(s) message(s): --- #\n{e}')
def send_email(self):
self.create_email()
try:
self.mail.content_subtype = 'html'
self.con.send_messages([self.mail]) #sending email with the current connection, this is intended to send messages to multiple mails w/ the same conn
# self.mail.send(self.mail) #sending email with the EmailMessage object
# self.mail_obj.send_messages([self.mail]) #sending email with EmailBackend
self.con.close()
return True
except Exception as e:
print(f'\n\n# --- PY: Error sending email: --- #\n{e}')
return False
class SendNewsletterConfirmation(BaseMailer):
def __init__(self, message_to, context, **substitutions):
super().__init__(
message_to,
context,
subject = _('str_confirmYourEmail'),
template = 'ecad_app/mails/blog/confirm-mail.html',
sndr_host = conf_settings.EMAIL_HOST,
sndr_port = conf_settings.EMAIL_PORT,
sndr_username = conf_settings.NEWSLETTER_HOST_USER,
sndr_pass = conf_settings.NEWSLETTER_HOST_PASSWORD,
sndr_tls = conf_settings.EMAIL_USE_TLS,
**substitutions
)
class SendConfirmationMail(BaseMailer):
def __init__(self, message_to, context, **substitutions):
super().__init__(
message_to,
context,
subject = fl('{} {} {}', _('str_mails_activationMail4'), substitutions["first_name"], substitutions["last_name"]),
template = 'ecad_app/mails/user/activate-email.html',
sndr_host = conf_settings.EMAIL_HOST,
sndr_username = conf_settings.USERS_HOST_USER,
sndr_pass = conf_settings.USERS_HOST_PASSWORD,
sndr_port = conf_settings.EMAIL_PORT,
sndr_tls = conf_settings.EMAIL_USE_TLS,
**substitutions
)
class SendContactMail(BaseMailer):
def __init__(self, message_to, context, **substitutions):
super().__init__(
message_to,
context,
subject = fl('{}: {}', _('str_mails_newMail'), substitutions["subj"]),
template = 'ecad_app/user/contact-mail.html',
sndr_host = conf_settings.EMAIL_HOST,
# ---- change only username and pass
sndr_username = conf_settings.CONTACT_HOST_USER,
sndr_pass = conf_settings.CONTACT_HOST_PASSWORD,
# change only username and pass ----
sndr_port = conf_settings.EMAIL_PORT,
sndr_tls = conf_settings.EMAIL_USE_TLS,
**substitutions
)
class SendNewsletterMessage(BaseMassiveMailer):
def __init__(self, message_to, context, **substitutions):
super().__init__(
message_to,
context,
subject = fl('{}: {}', _('str_mails_newsletterNewMail'), substitutions["post_title"]),
template = 'ecad_app/mails/blog/avg-mail.html',
sndr_host = conf_settings.EMAIL_HOST,
sndr_username = conf_settings.NEWSLETTER_HOST_USER,
sndr_pass = conf_settings.NEWSLETTER_HOST_PASSWORD,
sndr_port = conf_settings.EMAIL_PORT,
sndr_tls = conf_settings.EMAIL_USE_TLS,
**substitutions
)
```
#### File: app/ecad_app/models.py
```python
import os
from django.db import models as m
#Importando "pre_save" para auto-generar el slug del post y "post_save" para auto-generar/actualizar el modelo Autor
from django.db.models.signals import pre_save, post_save
from django.dispatch import receiver
from taggit.managers import TaggableManager
from django.utils import timezone
from ecad_project.utils import unique_slug_generator #Importando el the auto slug generator
from django.contrib.auth.models import User
from django.db.models import Count
from django.utils.translation import gettext as _
from django.template.defaultfilters import slugify
# Modelo Author, representa un "autor" o profesor quién escribe posts y tiene accesos al dashboard
class Author(m.Model):
name = m.OneToOneField(User, on_delete=m.CASCADE, related_name='autor')
slug = m.SlugField(unique=True, null=True, blank=True)
title = m.CharField(max_length=100, blank=True)
email = m.EmailField(unique=True, null=True)
image = m.ImageField(upload_to='author/img/', default='author/default-img.png')
bio = m.TextField(max_length=500)
facebook_URL = m.URLField(null=True, blank=True)
twitter_URL = m.URLField(null=True, blank=True)
linkedin_URL = m.URLField(null=True, blank=True)
activated_account = m.BooleanField(default=False)
def __str__(self):
return self.name.username
@receiver(post_save, sender=User)
def create_profile(sender, instance, created, **kwargs):
if created:
Author.objects.create(name=instance)
if not User.is_superuser:
instance.Author.save()
# @receiver(pre_save, sender=User)
# def save_instance(sender, instance, **kwargs):
# instance.Author.save()
def image_filename(self):
return os.path.basename(self.image.name)
def save(self, *args, **kwargs):
self.slug = slugify(self.name.first_name+' '+self.name.last_name)
super (Author, self).save(*args, **kwargs)
def activate_account(self, *args, **kwargs):
self.activated_account = True
super (Author, self).save(*args, **kwargs)
# Modelo Category, representa una "categoría", tema, o materia en términos generales
class Category(m.Model):
name = m.CharField(max_length=100)
description = m.CharField(max_length=300)
slug = m.SlugField(unique=True)
image = m.ImageField(upload_to='categories/', default='no-category.png')
def __str__(self):
return self.name
def save(self, *args, **kwargs):
self.slug = slugify(self.name)
super (Category, self).save(*args, **kwargs)
# Status en los que se puede encontrar el post
post_status = (
(0, 'Draft'),
(1, 'Approved'),
(2, 'Rejected'),
(3, 'Archived')
)
# Modelo Post, representa una "nota", apunte o anotación
class Post(m.Model):
author = m.ForeignKey(Author, on_delete=m.CASCADE, related_name='author')
title = m.CharField(max_length=150)
subtitle = m.CharField(max_length=200, blank=True) #Subtitulo es opcional
slug = m.SlugField(unique=True)
category = m.ForeignKey(Category, on_delete=m.CASCADE, related_name='catego', default='1')
image = m.ImageField(upload_to='img/', default='no-img.png')
unsplash_URL = m.URLField(null=True, blank=True)
post_body = m.TextField() #Post body
tags = TaggableManager() #Tags
created_date = m.DateTimeField(default=timezone.now)
published_date = m.DateTimeField(blank=True, null=True)
status = m.IntegerField(choices=post_status, default=0)
# Reacciones y votos para el post: fav, util, like o dislike
vote_fav = m.IntegerField(default=0)
vote_util = m.IntegerField(default=0)
vote_tmbup = m.IntegerField(default=0)
vote_tmbdn = m.IntegerField(default=0)
send_to_newsletter = m.BooleanField(null=True, default=False)
def publish(self):
self.published_date = timezone.now()
self.save()
def approve_post(self):
self.published_date = timezone.now()
self.status = 1
self.save()
def reject_post(self):
self.status = 2
self.save()
def archive_post(self):
self.status = 3
self.save()
def unarchive_post(self):
self.status = 0
self.save()
def all_cmts(self):
return Comment.objects.filter(in_post=self).count() or False
def approved_cmts(self):
return Comment.objects.filter(in_post=self, is_approved=True).count()
def non_approved_cmts(self):
return Comment.objects.filter(in_post=self, is_approved=False).count()
def __str__(self):
return self.title
def save(self, *args, **kwargs):
self.slug = slugify(self.title)
super (Post, self).save(*args, **kwargs)
# Modelo Comment, representa un "comentario" dentro de un post
class Comment(m.Model):
in_post = m.ForeignKey(Post, on_delete=m.CASCADE, related_name="comments")
author = m.CharField(max_length=125)
author_email = m.EmailField()
comment_body = m.TextField()
created_date = m.DateTimeField(auto_now_add=True)
is_approved = m.BooleanField(default=False)
has_report = m.BooleanField(default=False)
# approve method, to approve or disapprove comments
def approve(self):
self.is_approved = True
self.save()
def report(self):
self.has_report = True
self.save()
def approved_comments(self):
return self.Comment.objects.filter(is_approved=True)
# Modelo Misc, representa un objeto "misceláneo", puede ser un anuncio general, página de reglas, cookies (si aplica) etc
class Misc(m.Model):
name = m.CharField(max_length=100)
date = m.DateTimeField(default=timezone.now)
head_desc = m.CharField(max_length=200) #Description for HTML
bgImage = m.URLField(max_length=500)
content = m.TextField() #Post body
def __str__(self):
return self.name
# Modelo Subscriber, representa un "suscriptor" que está suscrito al Newsletter
class Subscriber(m.Model):
email = m.EmailField(unique=True)
conf_num = m.CharField(max_length=15)
confirmed = m.BooleanField(default=False)
def __str__(self):
return self.email + " (" + ("not " if not self.confirmed else "") + "confirmed)"
# Auto-generador de slug para una instancia del modelo Post cuando es guardado de una petición HTTP (form)
def slug_generator(sender, instance, *args, **kwargs):
if not instance.slug:
instance.slug = unique_slug_generator(instance)
pre_save.connect(slug_generator, sender=Post)
```
#### File: ecad_app/templatetags/url_builder_openGraph.py
```python
from django import template
from django.templatetags import static
register = template.Library()
class FullStaticNode(static.StaticNode):
def url(self, context):
request = context['request']
return request.build_absolute_uri(super().url(context))
@register.tag('fullstatic')
def do_static(parser, token):
return FullStaticNode.handle_token(parser, token)
```
#### File: app/ecad_app/tokens.py
```python
from django.contrib.auth.tokens import PasswordResetTokenGenerator
import six
class TokenGenerator(PasswordResetTokenGenerator):
def _make_hash_value(self, user, timestamp):
return (
six.text_type(user.pk) + six.text_type(timestamp) +
six.text_type(user.is_active)
)
account_activation_token = TokenGenerator()
# from django.contrib.auth.tokens import PasswordResetTokenGenerator
# import six
# class UserTokenGenerator(PasswordResetTokenGenerator):
# def _make_hash_value(self, user, timestamp):
# user_id = six.text_type(user.pk)
# ts = six.text_type(timestamp)
# is_active = six.text_type(user.is_active)
# return f"{user_id}{ts}{is_active}"
# user_tokenizer = UserTokenGenerator()
``` |
{
"source": "jorgefandinno/eclingo",
"score": 2
} |
#### File: src/eclingo/control.py
```python
from pprint import pprint
import sys
from typing import Iterable, Tuple
from clingo import Symbol
from eclingo import internal_states
from eclingo.config import AppConfig
from eclingo.grounder import Grounder
from eclingo.solver import Solver
from eclingo.util.logger import logger
from eclingo import __version__
class Control(object):
def __init__(self, control=None, config=None):
if control is not None:
self.project = control.configuration.solve.project
self.max_models = int(control.configuration.solve.models)
control.configuration.solve.project = "auto,3"
control.configuration.solve.models = 0
self.control = control
else:
self.project = None
self.max_models = 1
self.control = internal_states.InternalStateControl(['0', '--project'], logger=logger)
if config is None:
config = AppConfig()
self.config = config
if self.max_models == 0:
self.max_models = sys.maxsize
self.epistemic_signature = dict()
self.grounder = Grounder(self.control, self.config)
self.models = 0
self.grounded = False
self.solver = None
self.epistemic_signature_symbol = dict()
def add_program(self, program):
self.grounder.add_program(program)
def load(self, input_path):
with open(input_path, 'r') as program:
self.add_program(program.read())
def ground(self, parts: Iterable[Tuple[str, Iterable[Symbol]]] = (("base", []),)):
self.grounder.ground(parts)
self.epistemic_signature = self.grounder.epistemic_signature
self.epistemic_signature_symbol = dict(
(s.epistemic_literal, s) for s in self.epistemic_signature.values()
)
self.grounded = True
def preprocess(self):
pass
def prepare_solver(self):
if not self.grounded:
self.ground()
self.solver = Solver(self.control, self.config)
def solve(self):
if self.solver is None:
self.prepare_solver()
for model in self.solver.solve():
self.models += 1
yield model
if self.models >= self.max_models:
break
```
#### File: eclingo/solver/preprocessor.py
```python
from typing import Set
from clingo import Symbol
from eclingo.config import AppConfig
import eclingo.internal_states.internal_control as internal_control
class Preprocessor():
def __init__(self,
config: AppConfig,
control: internal_control.InternalStateControl) -> None:
self._control = control
self._config = config
self._epistemic_to_test_mapping = self._control.epistemic_to_test_mapping
self.facts = set(self._control.facts())
self._epistemic_facts = self._generate_epistemic_facts(self._epistemic_to_test_mapping, self.facts)
def __call__(self):
self._add_epistemic_facts_to_control()
def _generate_epistemic_facts(self, epistemic_to_test, facts) -> Set[Symbol]:
epistemic_facts: Set[Symbol] = set()
for epistemic_literal, test_literal in epistemic_to_test.items():
if test_literal in facts:
epistemic_facts.add(epistemic_literal)
return epistemic_facts
def _add_epistemic_facts_to_control(self):
with self._control.symbolic_backend() as backend:
for fact in self._epistemic_facts:
backend.add_rule([fact], [], [], False)
# self._control.cleanup()
```
#### File: eclingo/tests/test_app.py
```python
import os
from os import register_at_fork
import subprocess
import unittest
from clingo import Number
import eclingo
from eclingo.util.logger import silent_logger
APP_PATH = '../src/eclingo/main.py'
INPUT_PROG_PATH = 'prog/input/'
OUTPUT_PROG_PATH = 'prog/output/'
KB_ELIGIBLE_PATH = 'eligible/eligible.lp'
INPUT_ELIGIBLE_PATH = 'eligible/input/'
OUTPUT_ELIGIBLE_PATH = 'eligible/output/'
KB_YALE_PATH = 'yale/yale.lp'
INPUT_YALE_PATH = 'yale/input/'
OUTPUT_YALE_PATH = 'yale/output/'
def parse_output(output):
lines = output.split('\n')
world_views = []
is_world_view = False
for line in lines:
if is_world_view:
world_view = line.split()
world_views.append(world_view)
is_world_view = False
elif line.startswith('World view:'):
is_world_view = True
return world_views
class TestExamples(unittest.TestCase):
def assert_world_views(self, command, output_path):
process = subprocess.Popen(command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
output = stdout.decode('utf-8')
world_views = parse_output(output)
for world_view in world_views:
world_view.sort()
world_views.sort()
world_views = [str(wv) for wv in world_views]
world_views = str(world_views).replace(' ', '').replace("'", '').replace('"', '')
with open(output_path, 'r') as output_prog:
sol = output_prog.read()
sol = sol.replace('\n', '').replace(' ', '')
self.assertEqual(world_views, sol, 'in ' + str(command))
def test_prog_g94(self):
for i in range(1, 11):
path = os.path.dirname(os.path.realpath(__file__))
input_path = os.path.join(path, INPUT_PROG_PATH)
input_path = os.path.join(input_path, f'prog{i:02d}.lp')
output_path = os.path.join(path, OUTPUT_PROG_PATH)
output_path = os.path.join(output_path, f'sol{i:02d}.txt')
app_path = os.path.join(path, APP_PATH)
command = ['python', app_path, '0', input_path]
self.assert_world_views(command, output_path)
def test_eligible_g94(self):
for i in range(1, 17):
path = os.path.dirname(os.path.realpath(__file__))
elegible_path = os.path.join(path, KB_ELIGIBLE_PATH)
input_path = os.path.join(path, INPUT_ELIGIBLE_PATH)
input_path = os.path.join(input_path, f'eligible{i:02d}.lp')
output_path = os.path.join(path, OUTPUT_ELIGIBLE_PATH)
output_path = os.path.join(output_path, f'sol_eligible{i:02d}.txt')
app_path = os.path.join(path, APP_PATH)
command = ['python', app_path, '0', elegible_path, input_path]
self.assert_world_views(command, output_path)
def test_yale_g94(self):
for i in range(1, 9):
if i != 6:
path = os.path.dirname(os.path.realpath(__file__))
yale_path = os.path.join(path, KB_YALE_PATH)
input_path = os.path.join(path, INPUT_YALE_PATH)
input_path = os.path.join(input_path, f'yale{i:02d}.lp')
output_path = os.path.join(path, OUTPUT_YALE_PATH)
output_path = os.path.join(output_path, f'sol_yale{i:02d}.txt')
app_path = os.path.join(path, APP_PATH)
constant = '-c length=%d' % i
command = ['python', app_path, constant, '0', yale_path, input_path]
self.assert_world_views(command, output_path)
```
#### File: eclingo/tests/test_eclingo_examples.py
```python
import os
import unittest
from clingo import Number
from eclingo import control as _control, internal_states, config as _config
from eclingo.util.logger import silent_logger
INPUT_PROG_PATH = 'prog/input/'
OUTPUT_PROG_PATH = 'prog/output/'
KB_ELIGIBLE_PATH = 'eligible/eligible.lp'
INPUT_ELIGIBLE_PATH = 'eligible/input/'
OUTPUT_ELIGIBLE_PATH = 'eligible/output/'
KB_YALE_PATH = 'yale/yale-parameter.lp'
INPUT_YALE_PATH = 'yale/input/'
OUTPUT_YALE_PATH = 'yale/output/'
class TestExamples(unittest.TestCase):
def test_prog_g94(self):
for i in range(1, 11):
control = internal_states.InternalStateControl(logger=silent_logger)
control.configuration.solve.models = 0
eclingo_control = _control.Control(control=control)
path = os.path.dirname(os.path.realpath(__file__))
input_path = os.path.join(path, INPUT_PROG_PATH)
input_path = os.path.join(input_path, f'prog{i:02d}.lp')
output_path = os.path.join(path, OUTPUT_PROG_PATH)
output_path = os.path.join(output_path, f'sol{i:02d}.txt')
eclingo_control.load(input_path)
eclingo_control.ground()
result = [[ str(symbol) for symbol in model.symbols ] for model in eclingo_control.solve()]
result = [sorted(model) for model in result]
result = str(sorted(result)).replace(' ', '').replace("'", '')
with open(output_path, 'r') as output_prog:
sol = output_prog.read()
sol = sol.replace('\n', '').replace(' ', '')
self.assertEqual(result, sol, 'in ' + input_path)
def test_eligible_g94(self):
for i in range(1, 17):
control = internal_states.InternalStateControl(logger=silent_logger)
control.configuration.solve.models = 0
_config.add_efacts = True
eclingo_control = _control.Control(control=control)
# eclingo_control.config.eclingo_verbose = 2
path = os.path.dirname(os.path.realpath(__file__))
elegible_path = os.path.join(path, KB_ELIGIBLE_PATH)
input_path = os.path.join(path, INPUT_ELIGIBLE_PATH)
input_path = os.path.join(input_path, f'eligible{i:02d}.lp')
output_path = os.path.join(path, OUTPUT_ELIGIBLE_PATH)
output_path = os.path.join(output_path, f'sol_eligible{i:02d}.txt')
eclingo_control.load(elegible_path)
eclingo_control.load(input_path)
eclingo_control.ground()
result = [[ str(symbol) for symbol in model.symbols ] for model in eclingo_control.solve()]
result = [sorted(model) for model in result]
result = str(sorted(result)).replace(' ', '').replace("'", '')
with open(output_path, 'r') as output_prog:
sol = output_prog.read()
sol = sol.replace('\n', '').replace(' ', '')
self.assertEqual(result, sol, 'in ' + input_path)
def test_yale_g94(self):
for i in range(1, 9):
if i != 6:
control = internal_states.InternalStateControl(logger=silent_logger)
control.configuration.solve.models = 0
eclingo_control = _control.Control(control=control)
# eclingo_control.config.eclingo_verbose = 10
path = os.path.dirname(os.path.realpath(__file__))
yale_path = os.path.join(path, KB_YALE_PATH)
input_path = os.path.join(path, INPUT_YALE_PATH)
input_path = os.path.join(input_path, f'yale{i:02d}.lp')
output_path = os.path.join(path, OUTPUT_YALE_PATH)
output_path = os.path.join(output_path, f'sol_yale{i:02d}.txt')
eclingo_control.load(yale_path)
eclingo_control.load(input_path)
parts = []
parts.append(('base', []))
parts.append(('base', [Number(i)]))
eclingo_control.ground(parts)
result = [[ str(symbol) for symbol in model.symbols ] for model in eclingo_control.solve()]
result = [sorted(model) for model in result]
result = str(sorted(result)).replace(' ', '').replace("'", '')
with open(output_path, 'r') as output_prog:
sol = output_prog.read()
sol = sol.replace('\n', '').replace(' ', '')
self.assertEqual(result, sol, 'in ' + input_path)
```
#### File: eclingo/tests/test_transformers.py
```python
import unittest
import clingo
from clingo import ast as _ast
from eclingo.parsing.transformers.theory_parser_epistemic import \
parse_epistemic_literals_elements as _parse_epistemic_literals_elements
clingo_version = clingo.__version__
def parse_string(statement, fun):
if clingo_version < '5.5.1':
return clingo.parse_program(statement, fun)
return _ast.parse_string(statement, fun)
def ast_type(stm):
if clingo_version < '5.5.1':
return stm.type
return stm.ast_type
def theory_atom_element_terms(stm):
if clingo_version < '5.5.1':
return stm.tuple
return stm.terms
class ProgramParser(object):
def __init__(self):
self._statements = []
def _parse_statement(self, statement: _ast.AST) -> None: # pylint: disable=no-member
self._statements.append(statement)
def parse_statement(self, statement):
parse_string(statement, self._parse_statement)
def literal_statment_from_str(s):
parser = ProgramParser()
parser.parse_statement(":- " + s)
return parser._statements[1].body[0]
def theory_atom_statment_from_str(s):
return literal_statment_from_str(s).atom
class Test(unittest.TestCase):
def test_epistemic_atom(self):
statement = theory_atom_statment_from_str("&k{a}.")
self.assertEqual(len(statement.elements), 1)
element = statement.elements[0]
self.assertEqual(ast_type(element), _ast.ASTType.TheoryAtomElement)
terms = theory_atom_element_terms(element)
self.assertEqual(len(terms), 1)
term = terms[0]
if clingo_version < '5.5.1':
self.assertEqual(ast_type(term), _ast.ASTType.TheoryUnparsedTerm)
else:
self.assertEqual(ast_type(term), _ast.ASTType.SymbolicTerm)
result = _parse_epistemic_literals_elements(statement)
self.assertEqual(len(result.elements), 1)
element = result.elements[0]
self.assertEqual(ast_type(element), _ast.ASTType.TheoryAtomElement)
terms = theory_atom_element_terms(element)
self.assertEqual(len(terms), 1)
term = terms[0]
self.assertEqual(ast_type(term), _ast.ASTType.Literal)
``` |
{
"source": "JorgeFCS/Deep-Learning-fire-segmentation",
"score": 3
} |
#### File: Deep-Learning-fire-segmentation/Functions/data_augmentation.py
```python
import tensorflow as tf
__author__ = "<NAME>"
__credits__ = ["<NAME>"]
__license__ = "MIT"
__version__ = "0.1.0"
__status__ = "Development"
# Data augmentation: horizontal flip.
def a_mirror_image(image):
"""
Data augmentation: horizontal flip.
"""
#image = tf.cast(image, tf.float32)
image = tf.image.flip_left_right(image)
image = tf.image.resize(image,[384,512])
return image
# Data augmentation: central crop.
def a_central_crop(image):
"""
Data augmentation: central crop.
"""
#image = tf.cast(image, tf.float32)
image = tf.image.central_crop(image,central_fraction=0.5)
image = tf.image.resize(image,[384,512])
return image
# Data augmentation: rotation 180 degrees.
def a_rotate_180(image):
"""
Data augmentation: rotation - 180 degrees.
"""
#image = tf.cast(image, tf.float32)
image = tf.image.rot90(image,2)
image = tf.image.resize(image,[384,512])
return image
# Data augmentation: rotation 90 degrees.
def a_rotate_90(image):
"""
Data augmentation: rotation - 90 degrees.
"""
#image = tf.cast(image, tf.float32)
image = tf.image.rot90(image,1)
image = tf.image.resize(image,[384,512])
return image
# Auxiliary function to convert to uint8.
def convert_binary(image):
"""
Auxiliary function to cast an image to uint8.
"""
image = tf.cast(image, tf.uint8)
return image
``` |
{
"source": "JorgeFelix11/MCA_automatizacion",
"score": 3
} |
#### File: python/mac_files_config/mac_files_config.py
```python
import json
import os
import re
import shutil
import sys
from argparse import ArgumentParser, RawTextHelpFormatter
WORKDIR = os.getcwd() # Current working directory (where script is run)
RESULTS_PATH = os.path.join(WORKDIR, 'resulting_mac_files')
BASE_MAC_FILE = os.path.join(WORKDIR, 'base.mac')
CONFIG_JSON = os.path.join(WORKDIR, 'config.json')
def main():
"""Generate new .mac files."""
try:
args = _parse_args(sys.argv[1:])
config = _get_config(args.config_json)
base_mac = _verify_base_mac_file(args.base_mac)
_set_results_path()
destination_path = _verify_destination_path(args.destination_path)
for _, particle in config.items():
_generate_particle_mac(
particle, args.iterations, args.add_zeros,
args.iteration_digits, destination_path, base_mac
)
except KeyboardInterrupt:
print('Stopped due user interruption.\n')
except Exception as error:
_print_error(f'An error occurred: {error}\n')
def _parse_args(args):
"""Parse input arguments and return input values."""
parser = ArgumentParser(
formatter_class=lambda prog: RawTextHelpFormatter(
prog, max_help_position=7
),
description=('Script to generate new .mac files combining different '
'parameters from a config.json file.')
)
parser.add_argument(
'--add_zeros', action='store_true',
help='Add zeros to the left side of the iteration number.'
)
parser.add_argument(
'--base_mac', default=BASE_MAC_FILE,
help='Absolute path of the base .mac file.'
)
parser.add_argument(
'--config_json', default=CONFIG_JSON,
help='Absolute path of the configuration JSON file.'
)
parser.add_argument(
'-d', '--destination_path', default='',
help=('Absolute path of destination location. Generated files will be '
'copied to this location.')
)
parser.add_argument(
'-i', '--iterations', default=1, type=int,
help='Number of files creation iterations (copies).'
)
parser.add_argument(
'--iteration_digits', default=4, type=int,
help=('Number of iteration digits. Applicable only if --add_zeros is '
'specified.')
)
return parser.parse_args(args)
def _get_config(config_file):
"""Return an object with the dumped configuration JSON file."""
if not os.path.isfile(config_file):
_print_error(f'Configuration file "{config_file}" does not exist.')
with open(config_file) as json_file:
return json.load(json_file)
def _set_results_path():
"""Set the results path.
If the results path does not exist, it will be created.
If the results path exists, it will be emptied.
"""
if not os.path.isdir(RESULTS_PATH):
print(f'Creating results path "{RESULTS_PATH}"...')
os.mkdir(RESULTS_PATH)
else:
print(f'Removing files in results path "{RESULTS_PATH}"...')
_empty_results_path()
def _verify_base_mac_file(base_mac_file):
"""Verify the base .mac file exists."""
if not os.path.isfile(base_mac_file):
_print_error(f'The base .mac file "{base_mac_file}" does not exist.')
return base_mac_file
def _verify_destination_path(destination_path):
"""Verify the destination path exists."""
if not destination_path:
destination_path = RESULTS_PATH
if not os.path.isdir(destination_path):
_print_error(
f'The destination path "{destination_path}" does not exist.'
)
print(f'Destination path is "{destination_path}".')
return destination_path
def _empty_results_path():
"""Remove all the files in the results path."""
for filename in os.listdir(RESULTS_PATH):
file_path = os.path.join(RESULTS_PATH, filename)
try:
if os.path.isfile(file_path) or os.path.islink(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
except Exception as error:
print(f'Failed to delete {file_path}. Reason: {error}')
def _generate_particle_mac(particle, iterations, add_zeros, iteration_digits,
destination_path, base_mac):
"""Generate the .mac files with all the particle combinations."""
specification_list = [ # Split elements and remove white spaces
i.strip() for i in particle['specification'].split(',')
]
particle_name = particle['name']
energy_list = particle['energy']
simulations = particle['simulations']
creation_count = 0
print(f'Generating the .mac files for particle "{particle_name}"...')
for specification in specification_list:
for energy_unit, energy_levels in energy_list.items():
# Split the energy level list in config JSON (separated by commas)
energy_levels = [
i.strip() for i in energy_levels.split(',')
]
for energy in energy_levels:
new_files_number = _create_mac_files(
iterations, add_zeros, iteration_digits, destination_path,
base_mac, {
'specification': specification,
'particle_name': particle_name,
'energy': energy,
'energy_unit': energy_unit,
'simulations': simulations
}
)
creation_count += new_files_number
print(f'{creation_count} files were created for "{particle_name}" '
'particle.')
def _create_mac_files(iterations, add_zeros, iteration_digits,
destination_path, base_mac, metrics):
"""Create a .mac with metrics given, the given number of iterations."""
_validate_integer_metrics({ # Check set metrics are integers
'energy': metrics["energy"], 'simulations': metrics['simulations']
})
count = 0
for run_number in range(iterations):
if add_zeros: # Add zeros to the left size of number
run_number = _add_left_zeros(run_number, iteration_digits)
mac_name = ( # Set the name for .mac (and .root) file
f'wcs_{metrics["specification"]}_{metrics["particle_name"]}'
f'__{run_number}_{metrics["energy"]}_{metrics["energy_unit"]}'
)
# Dictionary containing .mac placeholders (left) and their values
# to be replaced (right)
replacing_dict = {
'@particle': metrics['particle_name'],
'@energy_value': metrics['energy'],
'@energy_unit': metrics['energy_unit'],
'@output_name': mac_name,
'@simulations': metrics['simulations']
}
# Set the new .mac file path
new_mac_file = os.path.join(RESULTS_PATH, f'{mac_name}.mac')
_replace_string_in_file(
base_mac, new_mac_file, replacing_dict
)
_copy_new_mac_file( # Copy new file in custom destination
new_mac_file, f'{mac_name}.mac', destination_path
)
count += 1
return count
def _validate_integer_metrics(metrics):
"""Validate elements of a list of metrics are integers."""
for metric_name, metric_value in metrics.items():
try:
int(metric_value)
except ValueError:
_print_error(
f'Metric "{metric_name}" must contain integers only. '
f'Incorrect value: "{metric_value}".'
)
def _add_left_zeros(number, iteration_digits):
"""Add zeros to the left side of the experiment run number.
Zeros will be added according to missing spaces until iterations_digits are
reached.
"""
number = str(number)
return f'{"0" * (iteration_digits - len(number))}{number}'
def _replace_string_in_file(source_file, new_file, repldict):
"""Find strings within a file and creates a new one with strings replaced.
Keyword arguments:
source_file -- Source file, with the string(s) to be replaced
new_file -- File name that will have the content of the source file with
the string(s) replaced.
repldict -- Dictionary containing one or more strings to be replaced. The
keys will be the matching strings and the values will be the replacing
strings. I.e.
repldict = {
'old string1': 'new string1',
'old string2': 'new string2', ...
}
"""
pfile_path = source_file
def _replfunc(match): # Replacing function
return repldict[match.group(0)]
regex = re.compile('|'.join(re.escape(i) for i in repldict))
with open(pfile_path) as fin:
with open(new_file, 'w') as fout:
for line in fin:
fout.write(regex.sub(_replfunc, line))
def _copy_new_mac_file(mac_file_path, mac_file_name, destination_path):
"""Copy the generated .mac file in the results path."""
if destination_path != RESULTS_PATH:
destination_file = os.path.join(destination_path, mac_file_name)
shutil.copyfile(mac_file_path, destination_file)
def _print_error(msg):
"""Print the error message and exit program."""
print(msg)
sys.exit(1)
if __name__ == '__main__':
main()
``` |
{
"source": "JorgeFrancoIbanez/Flask-api",
"score": 3
} |
#### File: Flask-api/app/models.py
```python
from app import db
from app import login
from sqlalchemy.sql import func
from werkzeug.security import generate_password_hash, check_password_hash
from flask_login import UserMixin
class User(UserMixin, db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(64), index=True, unique=True)
email = db.Column(db.String(120), index=True, unique=True)
password_hash = db.Column(db.String(128))
timestamp = db.Column(db.DateTime(timezone=True), index=True, server_default=func.now())
profile_image = db.Column(db.LargeBinary)
def __init__(self, username, email):
self.username = username
self.email = email
def __repr__(self):
return '<User {}>'.format(self.username)
def set_password(self, password):
self.password_hash = generate_password_hash(password)
def check_password(self, password):
return check_password_hash(self.password_hash, password)
class Post(db.Model):
id = db.Column(db.Integer, primary_key=True)
message = db.Column(db.String(140))
node_id = db.Column(db.Integer, db.ForeignKey('node.id'))
timestamp = db.Column(db.DateTime(timezone=True), index=True, server_default=func.now())
def __init__(self, message, node_id):
self.message = message
self.node_id = node_id
def __repr__(self):
return '<Post {}>'.format(self.message)
class Node(db.Model):
id = db.Column(db.Integer, primary_key=True)
mac = db.Column(db.String(140))
name = db.Column(db.String(140))
pool_id = db.Column(db.Integer, db.ForeignKey('pool.id'))
timestamp = db.Column(db.DateTime(timezone=True), index=True, server_default=func.now())
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
def __init__(self, mac, name, user_id):
self.mac = mac
self.name = name
self.user_id = user_id
def __repr__(self):
return '<Node {}>'.format(self.name)
class Object(db.Model):
id = db.Column(db.Integer, primary_key=True)
message = db.Column(db.String(140))
pool_id = db.Column(db.Integer, db.ForeignKey('pool.id'))
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
def __init__(self, name, data):
self.name = name
self.data = data
def __repr__(self):
return '<Node {}>'.format(self.name)
class Pool(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(140))
timestamp = db.Column(db.DateTime(timezone=True), index=True, server_default=func.now())
repository = db.Column(db.String(140))
def __init__(self, name, repository):
self.name = name
self.repository = repository
def __repr__(self):
return '<Node {}>'.format(self.name)
@login.user_loader
def load_user(id):
return User.query.get(int(id))
```
#### File: app/utils/check_if_exist.py
```python
from app.models import Node, User, Pool
def check_node(data):
node = Node.query.filter(Node.mac == data).all()
if not node:
return False
else:
return True
def check_node_pool(pool):
pool = Pool.query.filter(Node.pool == pool).all()
if not pool:
return False
else:
return True
def check_user(email):
user = User.query.filter(User.email == email).all()
if not user:
return False
else:
return user
```
#### File: Flask-api/utils/get_mac.py
```python
import os
def get_mac():
try:
for i in os.listdir('/sys/class/net/'):
operstate = open('/sys/class/net/%s/operstate' %i).read()
if operstate == 'up' and i == 'eth0' or i == 'enp0s31f6':
mac_address = open('/sys/class/net/%s/address' %i).read()
return str(mac_address[0:17])
except ValueError:
print 'Please check your available connections before continuing'
``` |
{
"source": "Jorge-fubar/raspberry-thermostat",
"score": 3
} |
#### File: Jorge-fubar/raspberry-thermostat/thermostat.py
```python
import config
import os
import glob
import time
import datetime
import logging
from logging import StreamHandler, Formatter
from logging.handlers import TimedRotatingFileHandler
import sqlite3
def get_logger(logger_name):
_logger = logging.getLogger(logger_name)
file_formatter = Formatter(
'%(levelname)s | %(asctime)s | %(name)s | %(message)s | %(pathname)s:%(lineno)d'
)
#TODO the path for the logs file needs to be absolute when the script is executed on startup when registered in the /etc/rc.local file
time_rotating_handler = TimedRotatingFileHandler(\
'{0}/{1}.log'.format('./logs', logger_name), when="midnight", backupCount=10, encoding='utf-8')
time_rotating_handler.suffix = "%Y-%m-%d"
time_rotating_handler.setFormatter(file_formatter)
_logger.addHandler(time_rotating_handler)
_logger.setLevel(logging.DEBUG)
return _logger
def init_db():
#TODO the path for the db file needs to be absolute
conn = sqlite3.connect('thermostat.db')
return conn
def read_temp_raw():
f = open(device_file, 'r')
lines = f.readlines()
f.close()
return lines
def read_temp():
lines = read_temp_raw()
while lines[0].strip()[-3:] != 'YES':
time.sleep(0.2)
lines = read_temp_raw()
equals_pos = lines[1].find('t=')
if equals_pos != -1:
temp_string = lines[1][equals_pos+2:]
temp_c = float(temp_string) / 1000.0
return temp_c
def get_time_range(datetime : datetime.datetime):
time_range = datetime.hour * 2
if (datetime.minute <= 30):
time_range += 1
return time_range
logger = get_logger('thermostat')
os.system('modprobe w1-gpio')
os.system('modprobe w1-therm')
base_dir = '/sys/bus/w1/devices/'
device_folder = glob.glob(base_dir + '28*')[0]
device_file = device_folder + '/w1_slave'
try:
conn = init_db()
while True:
now = datetime.datetime.now()
current_time = now.strftime('%Y%m%dT%H:%M')
current_temp = read_temp()
logger.debug('Read temperature %s° at time %s',
current_temp, current_time)
conn.execute('INSERT INTO temperatures VALUES (?, ?)',
(current_time, current_temp))
conn.commit()
for row in conn.execute('SELECT temperature FROM week_schedule WHERE day = ? AND time_range = ?', (0, get_time_range(now))):
print(row[0])
time.sleep(60)
except KeyboardInterrupt:
logger.warn('Temperature daemon stopped by user')
finally:
conn.close()
``` |
{
"source": "Jorge-Fuentes/flaskrplus",
"score": 3
} |
#### File: Jorge-Fuentes/flaskrplus/flaskr.py
```python
import os
import sqlite3
from flask import Flask, request, session, g, redirect, url_for, abort, \
render_template, flash
app = Flask(__name__)
app.config.from_object(__name__)
app.config.update(dict(
DATABASE=os.path.join(app.root_path, 'flaskr.db'),
SECRET_KEY='development key',
USERNAME='admin',
PASSWORD='<PASSWORD>'
))
app.config.from_envvar('FLASKR_SETTINGS', silent=True)
def connect_db():
"""Connects to the specific database."""
rv = sqlite3.connect(app.config['DATABASE'])
rv.row_factory = sqlite3.Row
return rv
export FLASK_APP=flaskr
export FLASK_DEBUG=1
flask run
def init_db():
db = get_db()
with app.open_resource('schema.sql', mode='r') as f:
db.cursor().executescript(f.read())
db.commit()
@app.cli.command('initdb')
def initdb_command():
"""Initializes the database."""
init_db()
print 'Initialized the database.'
def get_db():
if not hasattr(g, 'sqlite_db'):
g.sqlite_db = connect_db()
return g.sqlite_db
@app.teardown_appcontext
def close_db(error):
"""Closes the database again at the end of the request."""
if hasattr(g, 'sqlite_db'):
g.sqlite_db.close()
@app.route('/')
def show_entries():
#Shows entries#
db = get_db()
cur = db.execute('select title, text from entries order by id desc')
entries = cur.fetchall()
return render_template('show_entries.html', entries=entries)
g.b.execute('delete from entries where word=?', [request.form['entry_to_delete']])
g.db.commit()
flash('The entry was removed! HAHA!')
return redirect(url_for('show_entries'))
if__name__'__main__':
app.run()
``` |
{
"source": "JorgeG94/roofline_plot",
"score": 3
} |
#### File: JorgeG94/roofline_plot/roofline.py
```python
import csv
import sys
import argparse
import numpy
import matplotlib.pyplot
import matplotlib
#matplotlib.rc('font', family='Times New Roman')
# Constants
# The following constants define the span of the intensity axis
START = -3
STOP = 4
N = abs(STOP - START + 1)
def roofline(num_platforms, peak_performance, peak_bandwidth, intensity):
"""
Computes the roofline model for the given platforms.
Returns The achievable performance
"""
assert isinstance(num_platforms, int) and num_platforms > 0
assert isinstance(peak_performance, numpy.ndarray)
assert isinstance(peak_bandwidth, numpy.ndarray)
assert isinstance(intensity, numpy.ndarray)
assert (num_platforms == peak_performance.shape[0] and
num_platforms == peak_bandwidth.shape[0])
achievable_performance = numpy.zeros((num_platforms, len(intensity)))
for i in range(num_platforms):
achievable_performance[i:] = numpy.minimum(peak_performance[i],
peak_bandwidth[i] * intensity)
return achievable_performance
def process(hw_platforms, sw_apps, xkcd):
"""
Processes the hw_platforms and sw_apps to plot the Roofline.
"""
assert isinstance(hw_platforms, list)
assert isinstance(sw_apps, list)
assert isinstance(xkcd, bool)
# arithmetic intensity
arithmetic_intensity = numpy.logspace(START, STOP, num=N, base=10)
# Hardware platforms
platforms = [p[0] for p in hw_platforms]
# Compute the rooflines
achievable_performance = roofline(len(platforms),
numpy.array([p[1] for p in hw_platforms]),
numpy.array([p[2] for p in hw_platforms]),
arithmetic_intensity)
# Apps
if sw_apps != []:
apps = [a[0] for a in sw_apps]
apps_intensity = numpy.array([a[1] for a in sw_apps])
floprate = numpy.array([a[2] for a in sw_apps])
# Plot the graphs
if xkcd:
matplotlib.pyplot.xkcd()
fig, axis = matplotlib.pyplot.subplots()
axis.set_xscale('log', basex=10)
axis.set_yscale('log', basey=10)
axis.set_xlabel('Arithmetic Intensity (FLOP/byte)', fontsize=14)
axis.grid(True, which='both', color='gray', linestyle='-',linewidth=0.2)
#matplotlib.pyplot.setp(axis, xticks=numpy.logspace(1,20,num=20,base=10),
matplotlib.pyplot.setp(axis, xticks=arithmetic_intensity,
yticks=numpy.logspace(1, 20, num=20, base=10))
matplotlib.pyplot.yticks(fontsize=12)
matplotlib.pyplot.xticks(fontsize=12)
axis.set_ylabel("FLOP-rate (GFLOP/s)", fontsize=14)
axis.set_ylim(bottom=10, top=100500)
#axis.set_title('Roofline Plot', fontsize=14)
l1 = numpy.array((0.12,100))
l2 = numpy.array((10,35))
trans_angle = matplotlib.pyplot.gca().transData.transform_angles(numpy.array((75,)), l2.reshape((1,2)))[0]
th1 = matplotlib.pyplot.text(l1[0],l1[1], ' HBM BW: 778 GB/s ', fontsize=10, rotation=trans_angle,rotation_mode='anchor')
th2 = matplotlib.pyplot.text(20,10000, ' DP Peak: 7.66 TF/s ', fontsize=10)
for idx, val in enumerate(platforms):
axis.plot(arithmetic_intensity, achievable_performance[idx, 0:],
label=val)
if sw_apps != []:
color = matplotlib.pyplot.cm.rainbow(numpy.linspace(0, 1, len(apps)))
for idx, val in enumerate(apps):
axis.plot(apps_intensity[idx], floprate[idx], label=val,
linestyle='-.', marker='o', color=color[idx])
axis.legend(loc='upper left', prop={'size': 9})
fig.tight_layout()
#matplotlib.pyplot.show()
matplotlib.pyplot.savefig('plot_roofline.png', dpi=500 )
def read_file(filename, row_len, csv_name):
"""
Reads CSV file and returns a list of row_len-ary tuples
"""
assert isinstance(row_len, int)
elements = list()
try:
in_file = open(filename, 'r') if filename is not None else sys.stdin
reader = csv.reader(in_file, dialect='excel')
for row in reader:
if len(row) != row_len:
print("Error: Each row in %s must be contain exactly %d entries!"
% (csv_name, row_len), file=sys.stderr)
sys.exit(1)
element = tuple([row[0]] + [float(r) for r in row[1:]])
elements.append(element)
if filename is not None:
in_file.close()
except IOError as ex:
print(ex, file=sys.stderr)
sys.exit(1)
return elements
def main():
"""
main function
"""
hw_platforms = list()
apps = list()
parser = argparse.ArgumentParser()
parser.add_argument("-i", metavar="hw_csv", help="HW platforms CSV file", type=str)
parser.add_argument("-a", metavar="apps_csv", help="applications CSV file", type=str)
parser.add_argument("--hw-only", action='store_true', default=False)
parser.add_argument("--xkcd", action='store_true', default=False)
args = parser.parse_args()
# HW
print("Reading HW characteristics...")
hw_platforms = read_file(args.i, 4, "HW CSV")
# apps
if args.hw_only:
print("Plotting only HW characteristics without any applications...")
apps = list()
else:
print("Reading applications intensities...")
apps = read_file(args.a, 3, "SW CSV")
print(hw_platforms)
print("Plotting using XKCD plot style is set to %s" % (args.xkcd))
if apps != []:
print(apps)
process(hw_platforms, apps, args.xkcd)
sys.exit(0)
if __name__ == "__main__":
main()
'''
BSD 3-Clause License
Copyright (c) 2018, <NAME>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
``` |
{
"source": "jorgegarcia7/flumine",
"score": 2
} |
#### File: flumine/execution/betfairexecution.py
```python
import logging
import requests
from typing import Callable
from betfairlightweight import BetfairError
from .baseexecution import BaseExecution
from ..clients.clients import ExchangeType
from ..order.orderpackage import BaseOrderPackage, OrderPackageType
from ..exceptions import OrderExecutionError
logger = logging.getLogger(__name__)
class BetfairExecution(BaseExecution):
EXCHANGE = ExchangeType.BETFAIR
def execute_place(
self, order_package: BaseOrderPackage, http_session: requests.Session
) -> None:
response = self._execution_helper(self.place, order_package, http_session)
if response:
for (order, instruction_report) in zip(
order_package, response.place_instruction_reports
):
with order.trade:
self._order_logger(
order, instruction_report, OrderPackageType.PLACE
)
if instruction_report.status == "SUCCESS":
if instruction_report.order_status == "PENDING":
pass # async request pending processing
else:
order.executable() # let process.py pick it up
elif instruction_report.status == "FAILURE":
order.lapsed() # todo correct?
elif instruction_report.status == "TIMEOUT":
# https://docs.developer.betfair.com/display/1smk3cen4v3lu3yomq5qye0ni/Betting+Enums#BettingEnums-ExecutionReportStatus
pass
# update transaction counts
order_package.client.add_transaction(len(order_package))
def place(self, order_package: OrderPackageType, session: requests.Session):
return order_package.client.betting_client.betting.place_orders(
market_id=order_package.market_id,
instructions=order_package.place_instructions,
customer_ref=order_package.id.hex,
market_version=order_package.market_version,
customer_strategy_ref=order_package.customer_strategy_ref,
async_=order_package.async_,
session=session,
)
def execute_cancel(
self, order_package: BaseOrderPackage, http_session: requests.Session
) -> None:
response = self._execution_helper(self.cancel, order_package, http_session)
if response:
failed_transaction_count = 0
order_lookup = {o.bet_id: o for o in order_package}
for instruction_report in response.cancel_instruction_reports:
# get order (can't rely on the order they are returned)
order = order_lookup.pop(instruction_report.instruction.bet_id)
with order.trade:
self._order_logger(
order, instruction_report, OrderPackageType.CANCEL
)
if instruction_report.status == "SUCCESS":
if (
instruction_report.size_cancelled == order.size_remaining
or order.size_remaining
== 0 # handle orders stream update / race condition
):
order.execution_complete()
else:
order.executable()
elif instruction_report.status == "FAILURE":
order.executable()
failed_transaction_count += 1
elif instruction_report.status == "TIMEOUT":
order.executable()
# reset any not returned so that they can be picked back up
for order in order_lookup.values():
with order.trade:
order.executable()
# update transaction counts
if failed_transaction_count:
order_package.client.add_transaction(
failed_transaction_count, failed=True
)
def cancel(self, order_package: OrderPackageType, session: requests.Session):
# temp copy to prevent an empty list of instructions sent
# this can occur if order is matched during the execution
# cycle, resulting in all orders being cancelled!
cancel_instructions = list(order_package.cancel_instructions)
if not cancel_instructions:
logger.warning("Empty cancel_instructions", extra=order_package.info)
raise OrderExecutionError()
return order_package.client.betting_client.betting.cancel_orders(
market_id=order_package.market_id,
instructions=cancel_instructions,
customer_ref=order_package.id.hex,
session=session,
)
def execute_update(
self, order_package: BaseOrderPackage, http_session: requests.Session
) -> None:
response = self._execution_helper(self.update, order_package, http_session)
if response:
failed_transaction_count = 0
for (order, instruction_report) in zip(
order_package, response.update_instruction_reports
):
with order.trade:
self._order_logger(
order, instruction_report, OrderPackageType.UPDATE
)
if instruction_report.status == "SUCCESS":
order.executable()
elif instruction_report.status == "FAILURE":
order.executable()
failed_transaction_count += 1
elif instruction_report.status == "TIMEOUT":
order.executable()
# update transaction counts
if failed_transaction_count:
order_package.client.add_transaction(
failed_transaction_count, failed=True
)
def update(self, order_package: OrderPackageType, session: requests.Session):
return order_package.client.betting_client.betting.update_orders(
market_id=order_package.market_id,
instructions=order_package.update_instructions,
customer_ref=order_package.id.hex,
session=session,
)
def execute_replace(
self, order_package: BaseOrderPackage, http_session: requests.Session
) -> None:
response = self._execution_helper(self.replace, order_package, http_session)
if response:
failed_transaction_count = 0
market = self.flumine.markets.markets[order_package.market_id]
for (order, instruction_report) in zip(
order_package, response.replace_instruction_reports
):
with order.trade:
# process cancel response
if (
instruction_report.cancel_instruction_reports.status
== "SUCCESS"
):
self._order_logger(
order,
instruction_report.cancel_instruction_reports,
OrderPackageType.CANCEL,
)
order.execution_complete()
elif (
instruction_report.cancel_instruction_reports.status
== "FAILURE"
):
order.executable()
failed_transaction_count += 1
elif (
instruction_report.cancel_instruction_reports.status
== "TIMEOUT"
):
order.executable()
# process place response
if instruction_report.place_instruction_reports.status == "SUCCESS":
# create new order
replacement_order = order.trade.create_order_replacement(
order,
instruction_report.place_instruction_reports.instruction.limit_order.price,
instruction_report.place_instruction_reports.instruction.limit_order.size,
)
self._order_logger(
replacement_order,
instruction_report.place_instruction_reports,
OrderPackageType.REPLACE,
)
# add to blotter
market.place_order(replacement_order, execute=False)
replacement_order.executable()
elif (
instruction_report.place_instruction_reports.status == "FAILURE"
):
pass # todo
elif (
instruction_report.place_instruction_reports.status == "TIMEOUT"
):
pass # todo
# update transaction counts
order_package.client.add_transaction(len(order_package))
if failed_transaction_count:
order_package.client.add_transaction(
failed_transaction_count, failed=True
)
def replace(self, order_package: OrderPackageType, session: requests.Session):
return order_package.client.betting_client.betting.replace_orders(
market_id=order_package.market_id,
instructions=order_package.replace_instructions,
customer_ref=order_package.id.hex,
market_version=order_package.market_version,
async_=order_package.async_,
session=session,
)
def _execution_helper(
self,
trading_function: Callable,
order_package: BaseOrderPackage,
http_session: requests.Session,
):
if order_package.elapsed_seconds > 0.1 and order_package.retry_count == 0:
logger.warning(
"High latency between current time and OrderPackage creation time, it is likely that the thread pool is currently exhausted",
extra={
"trading_function": trading_function.__name__,
"session": http_session,
"latency": round(order_package.elapsed_seconds, 3),
"order_package": order_package.info,
},
)
if order_package.orders:
try:
response = trading_function(order_package, http_session)
except BetfairError as e:
logger.error(
"Execution error",
extra={
"trading_function": trading_function.__name__,
"response": e,
"order_package": order_package.info,
},
exc_info=True,
)
if order_package.retry():
self.handler(order_package)
self._return_http_session(http_session, err=True)
return
except Exception as e:
logger.critical(
"Execution unknown error",
extra={
"trading_function": trading_function.__name__,
"exception": e,
"order_package": order_package.info,
},
exc_info=True,
)
self._return_http_session(http_session, err=True)
return
logger.info(
"execute_%s" % trading_function.__name__,
extra={
"trading_function": trading_function.__name__,
"elapsed_time": response.elapsed_time,
"response": response._data,
"order_package": order_package.info,
},
)
self._return_http_session(http_session)
return response
else:
logger.warning("Empty package, not executing", extra=order_package.info)
self._return_http_session(http_session)
```
#### File: flumine/flumine/flumine.py
```python
import logging
from .baseflumine import BaseFlumine
from .events.events import EventType
from . import worker
logger = logging.getLogger(__name__)
class Flumine(BaseFlumine):
def run(self) -> None:
"""
Main run thread
"""
with self:
while True:
event = self.handler_queue.get()
if event.EVENT_TYPE == EventType.TERMINATOR:
self._process_end_flumine()
break
elif event.EVENT_TYPE == EventType.MARKET_CATALOGUE:
self._process_market_catalogues(event)
elif event.EVENT_TYPE == EventType.MARKET_BOOK:
self._process_market_books(event)
elif event.EVENT_TYPE == EventType.RAW_DATA:
self._process_raw_data(event)
elif event.EVENT_TYPE == EventType.CURRENT_ORDERS:
self._process_current_orders(event)
elif event.EVENT_TYPE == EventType.CLEARED_MARKETS:
self._process_cleared_markets(event)
elif event.EVENT_TYPE == EventType.CLEARED_ORDERS:
self._process_cleared_orders(event)
elif event.EVENT_TYPE == EventType.CLOSE_MARKET:
self._process_close_market(event)
elif event.EVENT_TYPE == EventType.CUSTOM_EVENT:
self._process_custom_event(event)
else:
logger.error("Unknown item in handler_queue: %s" % str(event))
del event
def _add_default_workers(self):
ka_interval = min((self.client.betting_client.session_timeout / 2), 1200)
self.add_worker(
worker.BackgroundWorker(
self, function=worker.keep_alive, interval=ka_interval
)
)
self.add_worker(
worker.BackgroundWorker(
self,
function=worker.poll_account_balance,
interval=120,
start_delay=10, # wait for login
)
)
self.add_worker(
worker.BackgroundWorker(
self,
function=worker.poll_market_catalogue,
interval=60,
start_delay=10, # wait for streams to populate
)
)
self.add_worker(
worker.BackgroundWorker(
self,
function=worker.poll_market_closure,
interval=60,
start_delay=10, # wait for login
)
)
def __repr__(self) -> str:
return "<Flumine>"
def __str__(self) -> str:
return "<Flumine>"
```
#### File: flumine/tests/test_integration.py
```python
import unittest
from flumine import FlumineBacktest, clients, BaseStrategy, config
from flumine.order.trade import Trade
from flumine.order.order import OrderStatus
from flumine.order.ordertype import LimitOrder, MarketOnCloseOrder
from flumine.utils import get_price
class IntegrationTest(unittest.TestCase):
def setUp(self) -> None:
# change config to raise errors
config.raise_errors = True
def test_backtest_basic(self):
class Ex(BaseStrategy):
def check_market_book(self, market, market_book):
return True
def process_market_book(self, market, market_book):
return
client = clients.BacktestClient()
framework = FlumineBacktest(client=client)
strategy = Ex(market_filter={"markets": ["tests/resources/BASIC-1.132153978"]})
framework.add_strategy(strategy)
framework.run()
def test_backtest_pro(self):
class LimitOrders(BaseStrategy):
def check_market_book(self, market, market_book):
if not market_book.inplay and market.seconds_to_start < 100:
return True
def process_market_book(self, market, market_book):
with market.transaction() as t:
for runner in market_book.runners:
if runner.status == "ACTIVE":
back = get_price(runner.ex.available_to_back, 0)
runner_context = self.get_runner_context(
market.market_id, runner.selection_id
)
if runner_context.trade_count == 0:
trade = Trade(
market_book.market_id,
runner.selection_id,
runner.handicap,
self,
)
order = trade.create_order(
side="BACK",
order_type=LimitOrder(back, 2.00),
)
t.place_order(order)
class LimitReplaceOrders(BaseStrategy):
def check_market_book(self, market, market_book):
if not market_book.inplay and market.seconds_to_start < 100:
return True
def process_market_book(self, market, market_book):
with market.transaction() as t:
for runner in market_book.runners:
if runner.status == "ACTIVE":
runner_context = self.get_runner_context(
market.market_id, runner.selection_id
)
if runner_context.trade_count == 0:
trade = Trade(
market_book.market_id,
runner.selection_id,
runner.handicap,
self,
)
order = trade.create_order(
side="BACK",
order_type=LimitOrder(1000, 2.00),
)
t.place_order(order)
def process_orders(self, market, orders: list) -> None:
with market.transaction() as t:
for order in orders:
if order.status == OrderStatus.EXECUTABLE:
if order.size_matched == 0:
t.replace_order(order, new_price=1.01)
class LimitOrdersInplay(BaseStrategy):
def check_market_book(self, market, market_book):
if market_book.inplay:
return True
def process_market_book(self, market, market_book):
for runner in market_book.runners:
if runner.status == "ACTIVE" and runner.last_price_traded < 2:
lay = get_price(runner.ex.available_to_lay, 0)
trade = Trade(
market_book.market_id,
runner.selection_id,
runner.handicap,
self,
)
order = trade.create_order(
side="LAY",
order_type=LimitOrder(lay, 2.00),
)
market.place_order(order)
def process_orders(self, market, orders):
for order in orders:
if order.status == OrderStatus.EXECUTABLE:
if order.elapsed_seconds and order.elapsed_seconds > 2:
market.cancel_order(order)
class MarketOnCloseOrders(BaseStrategy):
def check_market_book(self, market, market_book):
if not market_book.inplay and market.seconds_to_start < 100:
return True
def process_market_book(self, market, market_book):
for runner in market_book.runners:
if runner.status == "ACTIVE":
runner_context = self.get_runner_context(
market.market_id, runner.selection_id
)
if runner_context.trade_count == 0:
trade = Trade(
market_book.market_id,
runner.selection_id,
runner.handicap,
self,
)
order = trade.create_order(
side="LAY",
order_type=MarketOnCloseOrder(100.00),
)
market.place_order(order)
client = clients.BacktestClient()
framework = FlumineBacktest(client=client)
limit_strategy = LimitOrders(
market_filter={"markets": ["tests/resources/PRO-1.170258213"]},
max_order_exposure=1000,
max_selection_exposure=105,
max_trade_count=1,
)
framework.add_strategy(limit_strategy)
limit_replace_strategy = LimitReplaceOrders(
market_filter={"markets": ["tests/resources/PRO-1.170258213"]},
max_order_exposure=1000,
max_selection_exposure=105,
max_trade_count=1,
)
framework.add_strategy(limit_replace_strategy)
limit_inplay_strategy = LimitOrdersInplay(
market_filter={"markets": ["tests/resources/PRO-1.170258213"]},
max_order_exposure=1000,
max_selection_exposure=105,
)
framework.add_strategy(limit_inplay_strategy)
market_strategy = MarketOnCloseOrders(
market_filter={"markets": ["tests/resources/PRO-1.170258213"]},
max_order_exposure=1000,
max_selection_exposure=105,
)
framework.add_strategy(market_strategy)
framework.run()
self.assertEqual(len(framework.markets), 1)
for market in framework.markets:
limit_orders = market.blotter.strategy_orders(limit_strategy)
self.assertEqual(
round(sum([o.simulated.profit for o in limit_orders]), 2), -16.8
)
self.assertEqual(len(limit_orders), 14)
limit_replace_orders = market.blotter.strategy_orders(
limit_replace_strategy
)
self.assertEqual(
round(sum([o.simulated.profit for o in limit_replace_orders]), 2), -16.8
)
self.assertEqual(len(limit_replace_orders), 28)
limit_inplay_orders = market.blotter.strategy_orders(limit_inplay_strategy)
self.assertEqual(
round(sum([o.simulated.profit for o in limit_inplay_orders]), 2), 18.96
)
self.assertEqual(len(limit_inplay_orders), 15)
market_orders = market.blotter.strategy_orders(market_strategy)
self.assertEqual(
round(sum([o.simulated.profit for o in market_orders]), 2), -6.68
)
self.assertEqual(len(market_orders), 14)
# check transaction count
self.assertEqual(market._transaction_id, 25428)
def tearDown(self) -> None:
config.simulated = False
config.raise_errors = False
``` |
{
"source": "JorgeGarciaIrazabal/baby_tracker_be",
"score": 2
} |
#### File: src/controllers/feed.py
```python
from datetime import datetime
from typing import List, Optional
from fastapi import Depends
from fastapi_jwt_auth import AuthJWT
from sqlalchemy import desc
from src.app import app, get_db
from src.models import Feed, PFeed
from sqlalchemy.orm import Session
from src.services.auth import validate_baby_relationship
@app.get("/baby/{baby_id}/feed", response_model=List[PFeed], tags=["api"])
def get_baby_feeds(
baby_id: int,
start_at: Optional[datetime] = None,
end_at: Optional[datetime] = None,
page: Optional[int] = None,
page_size: Optional[int] = None,
db: Session = Depends(get_db),
auth: AuthJWT = Depends(),
):
validate_baby_relationship(auth, baby_id)
current_filter = Feed.baby_id == baby_id
if start_at is not None:
current_filter &= Feed.start_at >= start_at
if end_at is not None:
current_filter &= Feed.end_at <= end_at
if page_size is not None:
page = page or 0
if page is not None:
page_size = page_size or 30
feeds_query = db.query(Feed).order_by(desc(Feed.start_at)).filter(current_filter)
if page is not None:
feeds = feeds_query[page * page_size : (page * page_size) + page_size]
else:
feeds = feeds_query.all()
return [PFeed.from_orm(feed) for feed in feeds]
@app.post("/feed", response_model=PFeed, tags=["api"])
def create_feed(
p_feed: PFeed, db: Session = Depends(get_db), auth: AuthJWT = Depends()
):
validate_baby_relationship(auth, p_feed.baby_id)
feed = Feed(**p_feed.dict())
db.add(feed)
db.commit()
return PFeed.from_orm(feed)
@app.put("/feed", response_model=PFeed, tags=["api"])
def update_feed(
p_feed: PFeed, db: Session = Depends(get_db), auth: AuthJWT = Depends()
):
validate_baby_relationship(auth, p_feed.baby_id)
feed: Feed = db.query(Feed).get(p_feed.id)
feed.update(p_feed)
db.add(feed)
db.commit()
return PFeed.from_orm(feed)
@app.delete("/feed/{id}", response_model=PFeed, tags=["api"])
def delete_feed(id: int, db: Session = Depends(get_db), auth: AuthJWT = Depends()):
feed: Feed = db.query(Feed).get(id)
validate_baby_relationship(auth, feed.baby_id)
db.delete(feed)
db.commit()
return PFeed.from_orm(feed)
```
#### File: src/controllers/pee.py
```python
from typing import List, Optional
from fastapi import Depends
from fastapi_jwt_auth import AuthJWT
from sqlalchemy import desc
from sqlalchemy.orm import Session
from src.app import app, get_db
from src.models import Pee, PPee
from src.services.auth import validate_baby_relationship
@app.get("/baby/{baby_id}/pee", response_model=List[PPee], tags=["api"])
def get_baby_pees(
baby_id: int,
page: Optional[int] = None,
page_size: Optional[int] = None,
db: Session = Depends(get_db),
auth: AuthJWT = Depends(),
):
validate_baby_relationship(auth, baby_id)
current_filter = Pee.baby_id == baby_id
if page_size is not None:
page = page or 0
if page is not None:
page_size = page_size or 30
pees_query = db.query(Pee).order_by(desc(Pee.at)).filter(current_filter)
if page is not None:
pees = pees_query[page * page_size: (page * page_size) + page_size]
else:
pees = pees_query.all()
return [PPee.from_orm(pee) for pee in pees]
@app.post("/pee", response_model=PPee, tags=["api"])
def create_pee(
p_pee: PPee, db: Session = Depends(get_db), auth: AuthJWT = Depends()
):
validate_baby_relationship(auth, p_pee.baby_id)
pee = Pee(**p_pee.dict())
db.add(pee)
db.commit()
return PPee.from_orm(pee)
@app.put("/pee", response_model=PPee, tags=["api"])
def update_pee(
p_pee: PPee, db: Session = Depends(get_db), auth: AuthJWT = Depends()
):
validate_baby_relationship(auth, p_pee.baby_id)
pee: Pee = db.query(Pee).get(p_pee.id)
pee.update(p_pee)
db.add(pee)
db.commit()
return PPee.from_orm(pee)
@app.delete("/pee/{id}", response_model=PPee, tags=["api"])
def delete_pee(id: int, db: Session = Depends(get_db), auth: AuthJWT = Depends()):
pee: Pee = db.query(Pee).get(id)
validate_baby_relationship(auth, pee.baby_id)
db.delete(pee)
db.commit()
return PPee.from_orm(pee)
``` |
{
"source": "JorgeGarciaIrazabal/cf-scripts",
"score": 2
} |
#### File: conda_forge_tick/migrators/conda_forge_yaml_cleanup.py
```python
import os
import typing
from typing import Any
from ruamel.yaml import YAML
from conda_forge_tick.xonsh_utils import indir
from conda_forge_tick.migrators.core import MiniMigrator
if typing.TYPE_CHECKING:
from ..migrators_types import AttrsTypedDict
class CondaForgeYAMLCleanup(MiniMigrator):
# if you add a key here, you need to add it to the set of
# keys kept in the graph in make_graph.py
keys_to_remove = [
"min_r_ver",
"max_r_ver",
"min_py_ver",
"max_py_ver",
"compiler_stack",
]
def filter(self, attrs: "AttrsTypedDict", not_bad_str_start: str = "") -> bool:
"""only remove the keys if they are there"""
cfy = attrs.get("conda-forge.yml", {})
if any(k in cfy for k in self.keys_to_remove):
return False
else:
return True
def migrate(self, recipe_dir: str, attrs: "AttrsTypedDict", **kwargs: Any) -> None:
with indir(recipe_dir):
cfg_path = os.path.join("..", "conda-forge.yml")
yaml = YAML()
yaml.indent(mapping=2, sequence=4, offset=2)
with open(cfg_path) as fp:
cfg = yaml.load(fp.read())
for k in self.keys_to_remove:
if k in cfg:
del cfg[k]
with open(cfg_path, "w") as fp:
yaml.dump(cfg, fp)
```
#### File: conda_forge_tick/migrators/license.py
```python
import os
import re
import tempfile
import subprocess
import typing
from typing import Any
import logging
from rever.tools import replace_in_file
from conda_forge_tick.xonsh_utils import indir
from conda_forge_tick.utils import eval_cmd, _get_source_code
from conda_forge_tick.recipe_parser import CondaMetaYAML
from conda_forge_tick.migrators.core import MiniMigrator
try:
from conda_smithy.lint_recipe import NEEDED_FAMILIES
except ImportError:
NEEDED_FAMILIES = ["gpl", "bsd", "mit", "apache", "psf", "agpl", "lgpl"]
if typing.TYPE_CHECKING:
from ..migrators_types import AttrsTypedDict
LICENSE_SPLIT = re.compile(r"\||\+")
LOGGER = logging.getLogger("conda_forge_tick.migrators.license")
def _to_spdx(lic):
"""
we are munging this stuff from conda-build
d_license = {'agpl3': ['AGPL-3', 'AGPL (>= 3)', 'AGPL',
'GNU Affero General Public License'],
'artistic2': ['Artistic-2.0', 'Artistic License 2.0'],
'gpl2': ['GPL-2', 'GPL (>= 2)', 'GNU General Public License (>= 2)'],
'gpl3': ['GPL-3', 'GPL (>= 3)', 'GNU General Public License (>= 3)',
'GPL', 'GNU General Public License'],
'lgpl2': ['LGPL-2', 'LGPL (>= 2)'],
'lgpl21': ['LGPL-2.1', 'LGPL (>= 2.1)'],
'lgpl3': ['LGPL-3', 'LGPL (>= 3)', 'LGPL',
'GNU Lesser General Public License'],
'bsd2': ['BSD_2_clause', 'BSD_2_Clause', 'BSD 2-clause License'],
'bsd3': ['BSD_3_clause', 'BSD_3_Clause', 'BSD 3-clause License'],
'mit': ['MIT'],
}
"""
r_to_spdx = {
"AGPL-3": "AGPL-3.0-only",
"AGPL (>= 3)": "AGPL-3.0-or-later",
"Artistic License 2.0": "Artistic-2.0",
"GPL-2": "GPL-2.0-only",
"GPL (>= 2)": "GPL-2.0-or-later",
"GNU General Public License (>= 2)": "GPL-2.0-or-later",
"GPL-3": "GPL-3.0-only",
"GPL (>= 3)": "GPL-3.0-or-later",
"GNU General Public License (>= 3)": "GPL-3.0-or-later",
"LGPL-2": "LGPL-2.0-only",
"LGPL (>= 2)": "LGPL-2.0-or-later",
"LGPL-2.1": "LGPL-2.1-only",
"LGPL (>= 2.1)": "LGPL-2.1-or-later",
"LGPL-3": "LGPL-3.0-only",
"LGPL (>= 3)": "LGPL-3.0-or-later",
"BSD_2_clause": "BSD-2-Clause",
"BSD_2_Clause": "BSD-2-Clause",
"BSD 2-clause License": "BSD-2-Clause",
"BSD_3_clause": "BSD-3-Clause",
"BSD_3_Clause": "BSD-3-Clause",
"BSD 3-clause License": "BSD-3-Clause",
"Apache License 2.0": "Apache-2.0",
"CC BY-SA 4.0": "CC-BY-SA-4.0",
"Apache License (== 2.0)": "Apache-2.0",
"FreeBSD": "BSD-2-Clause-FreeBSD",
"Apache License (>= 2.0)": "Apache-2.0",
"CC0": "CC0-1.0",
"MIT License": "MIT",
"CeCILL-2": "CECILL-2.0",
"CC BY-NC-SA 4.0": "CC-BY-NC-SA-4.0",
"CC BY 4.0": "CC-BY-4.0",
}
return r_to_spdx.get(lic, lic)
def _remove_file_refs(_parts):
_p = []
for p in _parts:
if p.strip().startswith("file "):
pass
else:
_p.append(p)
return _p
def _munge_licenses(lparts):
new_lparts = []
for lpart in lparts:
if " | " in lpart:
_parts = _remove_file_refs(lpart.split(" | "))
last = len(_parts) - 1
for i, _p in enumerate(_parts):
_p = _munge_licenses([_p])
if len(_p) > 1:
new_lparts.append("(")
new_lparts.extend(_p)
if len(_p) > 1:
new_lparts.append(")")
if i != last:
new_lparts.append(" OR ")
elif " + " in lpart:
_parts = _remove_file_refs(lpart.split(" + "))
last = len(_parts) - 1
for i, _p in enumerate(_parts):
_p = _munge_licenses([_p])
if len(_p) > 1:
new_lparts.append("(")
new_lparts.extend(_p)
if len(_p) > 1:
new_lparts.append(")")
if i != last:
new_lparts.append(" AND ")
else:
new_lparts.append(_to_spdx(lpart.strip()))
return new_lparts
def _scrape_license_string(pkg):
d = {}
if pkg.startswith("r-"):
pkg = pkg[2:]
LOGGER.info("LICENSE running cran skeleton for pkg %s" % pkg)
with tempfile.TemporaryDirectory() as tmpdir, indir(tmpdir):
subprocess.run(
[
"conda",
"skeleton",
"cran",
"--allow-archived",
"--use-noarch-generic",
pkg,
],
check=True,
)
with open("r-%s/meta.yaml" % pkg) as fp:
in_about = False
meta_yaml = []
for line in fp.readlines():
if line.startswith("about:"):
in_about = True
elif line.startswith("extra:"):
in_about = False
if in_about:
meta_yaml.append(line)
if line.startswith("# License:"):
d["cran_license"] = line[len("# License:") :].strip()
cmeta = CondaMetaYAML("".join(meta_yaml))
d["license_file"] = [
lf for lf in cmeta.meta.get("about", {}).get("license_file", [])
]
if len(d["license_file"]) == 0:
d["license_file"] = None
if "cran_license" in d:
spdx = _munge_licenses([d["cran_license"]])
if "(Restricts use)" in cmeta.meta.get("about", {}).get("license", ""):
if len(spdx) > 1:
spdx = ["("] + spdx + [")", " AND ", "LicenseRef-RestrictsUse"]
else:
spdx = spdx + [" AND ", "LicenseRef-RestrictsUse"]
d["spdx_license"] = "".join(spdx)
else:
d["spdx_license"] = None
if d:
return d
else:
return None
def _do_r_license_munging(pkg, recipe_dir):
try:
d = _scrape_license_string(pkg)
LOGGER.info("LICENSE R package license data: %s" % d)
with open(os.path.join(recipe_dir, "meta.yaml")) as fp:
cmeta = CondaMetaYAML(fp.read())
if d["license_file"] is not None:
cmeta.meta["about"]["license_file"] = d["license_file"]
if d["spdx_license"] is not None:
cmeta.meta["about"]["license"] = d["spdx_license"]
elif d["cran_license"] is not None:
cmeta.meta["about"]["license"] = d["cran_license"]
with open(os.path.join(recipe_dir, "meta.yaml"), "w") as fp:
cmeta.dump(fp)
except Exception as e:
LOGGER.info("LICENSE R license ERROR: %s" % repr(e))
pass
def _is_r(attrs):
if (
attrs.get("feedstock_name", "").startswith("r-")
or attrs.get("name", "").startswith("r-")
) and (
"r-base" in attrs.get("raw_meta_yaml", "")
or "r-base" in attrs.get("requirements", {}).get("run", set())
):
return True
else:
return False
class LicenseMigrator(MiniMigrator):
post_migration = True
def filter(self, attrs: "AttrsTypedDict", not_bad_str_start: str = "") -> bool:
license = attrs.get("meta_yaml", {}).get("about", {}).get("license", "")
license_fam = (
attrs.get("meta_yaml", {})
.get("about", {})
.get("license_family", "")
.lower()
or license.lower().partition("-")[0].partition("v")[0].partition(" ")[0]
)
if (
license_fam in NEEDED_FAMILIES
or any(n in license_fam for n in NEEDED_FAMILIES)
or _is_r(attrs)
) and "license_file" not in attrs.get("meta_yaml", {}).get("about", {}):
return False
return True
def migrate(self, recipe_dir: str, attrs: "AttrsTypedDict", **kwargs: Any) -> None:
# r- recipes have a special syntax here
if (
attrs.get("feedstock_name", "").startswith("r-")
or attrs.get("name", "").startswith("r-")
) and "r-base" in attrs["raw_meta_yaml"]:
if attrs.get("feedstock_name", None) is not None:
if attrs.get("feedstock_name", None).endswith("-feedstock"):
name = attrs.get("feedstock_name")[: -len("-feedstock")]
else:
name = attrs.get("feedstock_name")
else:
name = attrs.get("name", None)
_do_r_license_munging(name, recipe_dir)
return
try:
cb_work_dir = _get_source_code(recipe_dir)
except Exception:
return
if cb_work_dir is None:
return
with indir(cb_work_dir):
# look for a license file
license_files = [
s
for s in os.listdir(".")
if any(
s.lower().startswith(k) for k in ["license", "copying", "copyright"]
)
]
eval_cmd(f"rm -r {cb_work_dir}")
# if there is a license file in tarball update things
if license_files:
with indir(recipe_dir):
"""BSD 3-Clause License
Copyright (c) 2017, <NAME>
Copyright (c) 2018, The Regro Developers
All rights reserved."""
with open("meta.yaml") as f:
raw = f.read()
lines = raw.splitlines()
ptn = re.compile(r"(\s*?)" + "license:")
for i, line in enumerate(lines):
m = ptn.match(line)
if m is not None:
break
# TODO: Sketchy type assertion
assert m is not None
ws = m.group(1)
if len(license_files) == 1:
replace_in_file(
line,
line + "\n" + ws + f"license_file: {list(license_files)[0]}",
"meta.yaml",
)
else:
# note that this white space is not perfect but works for
# most of the situations
replace_in_file(
line,
line
+ "\n"
+ ws
+ "license_file: \n"
+ "".join(f"{ws*2}- {z} \n" for z in license_files),
"meta.yaml",
)
# if license not in tarball do something!
# check if github in dev url, then use that to get the license
```
#### File: conda_forge_tick/migrators/matplotlib_base.py
```python
import os
import typing
from typing import Any
import copy
from conda_forge_tick.migrators.core import Replacement, _parse_bad_attr
from conda_forge_tick.utils import frozen_to_json_friendly
if typing.TYPE_CHECKING:
from ..migrators_types import AttrsTypedDict, MigrationUidTypedDict
class MatplotlibBase(Replacement):
migrator_version = 0
def filter(self, attrs: "AttrsTypedDict", not_bad_str_start: str = "") -> bool:
# I shipped a bug where I added an entry to the migrator uid and now the
# graph is corrupted - this is being fixed here
def parse_already_pred() -> bool:
migrator_uid: "MigrationUidTypedDict" = typing.cast(
"MigrationUidTypedDict",
frozen_to_json_friendly(self.migrator_uid(attrs))["data"],
)
already_migrated_uids: typing.Iterable["MigrationUidTypedDict"] = list(
copy.deepcopy(z["data"]) for z in attrs.get("PRed", [])
)
# we shipped a bug, so fixing this here -
# need to ignore name in uuid
for uid in already_migrated_uids:
if uid["migrator_name"] == "MatplotlibBase" and "name" in uid:
del uid["name"]
del migrator_uid["name"]
return migrator_uid in already_migrated_uids
_is_archived = attrs.get("archived", False)
_is_pred = parse_already_pred()
_is_bad = _parse_bad_attr(attrs, not_bad_str_start)
requirements = attrs.get("requirements", {})
rq = (
requirements.get("build", set())
| requirements.get("host", set())
| requirements.get("run", set())
| requirements.get("test", set())
)
_no_dep = len(rq & self.packages) == 0
return _is_archived or _is_pred or _is_bad or _no_dep
def migrate(
self, recipe_dir: str, attrs: "AttrsTypedDict", **kwargs: Any
) -> "MigrationUidTypedDict":
yum_pth = os.path.join(recipe_dir, "yum_requirements.txt")
if not os.path.exists(yum_pth):
yum_lines = []
else:
with open(yum_pth) as fp:
yum_lines = fp.readlines()
if "xorg-x11-server-Xorg\n" not in yum_lines:
yum_lines.append("xorg-x11-server-Xorg\n")
for i in range(len(yum_lines)):
if yum_lines[i][-1] != "\n":
yum_lines[i] = yum_lines[i] + "\n"
with open(yum_pth, "w") as fp:
for line in yum_lines:
fp.write(line)
return super().migrate(recipe_dir, attrs, **kwargs)
```
#### File: cf-scripts/tests/test_cos7_config_migrator.py
```python
import os
import pytest
from conda_forge_tick.migrators import (
Version,
Cos7Config,
)
from conda_forge_tick.migrators.cos7 import REQUIRED_RE_LINES, _has_line_set
from test_migrators import run_test_migration
VERSION_COS7 = Version(
set(),
piggy_back_migrations=[Cos7Config()],
)
YAML_PATH = os.path.join(os.path.dirname(__file__), "test_yaml")
@pytest.mark.parametrize("remove_quay", [False, True])
@pytest.mark.parametrize("case", list(range(len(REQUIRED_RE_LINES))))
def test_version_cos7_config(case, remove_quay, tmpdir):
with open(os.path.join(YAML_PATH, "version_cos7_config_simple.yaml")) as fp:
in_yaml = fp.read()
with open(
os.path.join(YAML_PATH, "version_cos7_config_simple_correct.yaml"),
) as fp:
out_yaml = fp.read()
os.makedirs(os.path.join(tmpdir, "recipe"), exist_ok=True)
cfg = os.path.join(tmpdir, "recipe", "conda_build_config.yaml")
with open(cfg, "w") as fp:
for i, (_, _, first, second) in enumerate(REQUIRED_RE_LINES):
if i != case:
fp.write(first + "\n")
if "docker_image" in first and remove_quay:
fp.write(
second.replace("quay.io/condaforge/", "condaforge/") + "\n",
)
run_test_migration(
m=VERSION_COS7,
inp=in_yaml,
output=out_yaml,
kwargs={"new_version": "0.9"},
prb="Dependencies have been updated if changed",
mr_out={
"migrator_name": "Version",
"migrator_version": Version.migrator_version,
"version": "0.9",
},
tmpdir=os.path.join(tmpdir, "recipe"),
)
with open(cfg) as fp:
cfg_lines = fp.readlines()
for first_re, second_re, first, second in REQUIRED_RE_LINES:
assert _has_line_set(cfg_lines, first_re, second_re), (first, second)
@pytest.mark.parametrize("case", list(range(len(REQUIRED_RE_LINES))))
def test_version_cos7_config_skip(case, tmpdir):
with open(os.path.join(YAML_PATH, "version_cos7_config_simple.yaml")) as fp:
in_yaml = fp.read()
with open(
os.path.join(YAML_PATH, "version_cos7_config_simple_correct.yaml"),
) as fp:
out_yaml = fp.read()
os.makedirs(os.path.join(tmpdir, "recipe"), exist_ok=True)
cfg = os.path.join(tmpdir, "recipe", "conda_build_config.yaml")
with open(cfg, "w") as fp:
for i, (_, _, first, second) in enumerate(REQUIRED_RE_LINES):
if i != case:
fp.write(first + "blarg\n")
fp.write(second + "blarg\n")
run_test_migration(
m=VERSION_COS7,
inp=in_yaml,
output=out_yaml,
kwargs={"new_version": "0.9"},
prb="Dependencies have been updated if changed",
mr_out={
"migrator_name": "Version",
"migrator_version": Version.migrator_version,
"version": "0.9",
},
tmpdir=os.path.join(tmpdir, "recipe"),
)
with open(cfg) as fp:
cfg_lines = fp.readlines()
for i, (first_re, second_re, first, second) in enumerate(REQUIRED_RE_LINES):
if i != case:
assert _has_line_set(cfg_lines, first_re, second_re), (first, second)
```
#### File: cf-scripts/tests/test_cross_compile.py
```python
import os
from conda_forge_tick.migrators import (
UpdateConfigSubGuessMigrator,
Version,
GuardTestingMigrator,
UpdateCMakeArgsMigrator,
CrossRBaseMigrator,
CrossPythonMigrator,
Build2HostMigrator,
NoCondaInspectMigrator,
)
from test_migrators import run_test_migration
config_migrator = UpdateConfigSubGuessMigrator()
guard_testing_migrator = GuardTestingMigrator()
cmake_migrator = UpdateCMakeArgsMigrator()
cross_python_migrator = CrossPythonMigrator()
cross_rbase_migrator = CrossRBaseMigrator()
b2h_migrator = Build2HostMigrator()
nci_migrator = NoCondaInspectMigrator()
version_migrator_autoconf = Version(
set(),
piggy_back_migrations=[config_migrator, cmake_migrator, guard_testing_migrator],
)
version_migrator_cmake = Version(
set(),
piggy_back_migrations=[
cmake_migrator,
guard_testing_migrator,
cross_rbase_migrator,
cross_python_migrator,
],
)
version_migrator_python = Version(
set(),
piggy_back_migrations=[cross_python_migrator],
)
version_migrator_rbase = Version(
set(),
piggy_back_migrations=[cross_rbase_migrator],
)
version_migrator_b2h = Version(
set(),
piggy_back_migrations=[b2h_migrator],
)
version_migrator_nci = Version(
set(),
piggy_back_migrations=[nci_migrator],
)
config_recipe = """\
{% set version = "7.0" %}
package:
name: readline
version: {{ version }}
source:
url: https://ftp.gnu.org/gnu/readline/readline-{{ version }}.tar.gz
sha256: 750d437185286f40a369e1e4f4764eda932b9459b5ec9a731628393dd3d32334
build:
skip: true # [win]
number: 2
run_exports:
# change soname at major ver: https://abi-laboratory.pro/tracker/timeline/readline/
- {{ pin_subpackage('readline') }}
requirements:
build:
- pkg-config
- {{ compiler('c') }}
- make
- cmake
host:
- ncurses
run:
- ncurses
about:
home: https://cnswww.cns.cwru.edu/php/chet/readline/rltop.html
license: GPL-3.0-only
license_file: COPYING
summary: library for editing command lines as they are typed in
extra:
recipe-maintainers:
- croth1
"""
config_recipe_correct = """\
{% set version = "8.0" %}
package:
name: readline
version: {{ version }}
source:
url: https://ftp.gnu.org/gnu/readline/readline-{{ version }}.tar.gz
sha256: e339f51971478d369f8a053a330a190781acb9864cf4c541060f12078948e461
build:
skip: true # [win]
number: 0
run_exports:
# change soname at major ver: https://abi-laboratory.pro/tracker/timeline/readline/
- {{ pin_subpackage('readline') }}
requirements:
build:
- pkg-config
- gnuconfig # [unix]
- {{ compiler('c') }}
- make
- cmake
host:
- ncurses
run:
- ncurses
about:
home: https://cnswww.cns.cwru.edu/php/chet/readline/rltop.html
license: GPL-3.0-only
license_file: COPYING
summary: library for editing command lines as they are typed in
extra:
recipe-maintainers:
- croth1
"""
config_recipe_correct_cmake = """\
{% set version = "8.0" %}
package:
name: readline
version: {{ version }}
source:
url: https://ftp.gnu.org/gnu/readline/readline-{{ version }}.tar.gz
sha256: e339f51971478d369f8a053a330a190781acb9864cf4c541060f12078948e461
build:
skip: true # [win]
number: 0
run_exports:
# change soname at major ver: https://abi-laboratory.pro/tracker/timeline/readline/
- {{ pin_subpackage('readline') }}
requirements:
build:
- pkg-config
- {{ compiler('c') }}
- make
- cmake
host:
- ncurses
run:
- ncurses
about:
home: https://cnswww.cns.cwru.edu/php/chet/readline/rltop.html
license: GPL-3.0-only
license_file: COPYING
summary: library for editing command lines as they are typed in
extra:
recipe-maintainers:
- croth1
"""
rbase_recipe = """\
{% set version = "2.0.0" %}
{% set posix = 'm2-' if win else '' %}
{% set native = 'm2w64-' if win else '' %}
package:
name: r-magrittr
version: {{ version|replace("-", "_") }}
source:
url:
- {{ cran_mirror }}/src/contrib/magrittr_{{ version }}.tar.gz
- {{ cran_mirror }}/src/contrib/Archive/magrittr/magrittr_{{ version }}.tar.gz
sha256: 05c45943ada9443134caa0ab24db4a962b629f00b755ccf039a2a2a7b2c92ae8
build:
merge_build_host: true # [win]
number: 1
rpaths:
- lib/R/lib/
- lib/
requirements:
build:
- {{ compiler('c') }} # [not win]
- {{ compiler('m2w64_c') }} # [win]
- {{ posix }}filesystem # [win]
- {{ posix }}make
- {{ posix }}sed # [win]
- {{ posix }}coreutils # [win]
- {{ posix }}zip # [win]
host:
- r-base
- r-rlang
run:
- r-base
- r-rlang
- {{ native }}gcc-libs # [win]
test:
commands:
- $R -e "library('magrittr')" # [not win]
- "\\"%R%\\" -e \\"library('magrittr')\\"" # [win]
about:
home: https://magrittr.tidyverse.org, https://github.com/tidyverse/magrittr
license: MIT
summary: Provides a mechanism for chaining commands with a new forward-pipe operator, %>%. This operator will forward a value, or the result of an expression, into the next function call/expression. There is flexible support for the type of right-hand side expressions. For more information, see package vignette. To quote
<NAME>, "Ceci n'est pas un pipe."
license_family: MIT
license_file:
- {{ environ["PREFIX"] }}/lib/R/share/licenses/MIT
- LICENSE
extra:
recipe-maintainers:
- conda-forge/r
- ocefpaf
"""
rbase_recipe_correct = """\
{% set version = "2.0.1" %}
{% set posix = 'm2-' if win else '' %}
{% set native = 'm2w64-' if win else '' %}
package:
name: r-magrittr
version: {{ version|replace("-", "_") }}
source:
url:
- {{ cran_mirror }}/src/contrib/magrittr_{{ version }}.tar.gz
- {{ cran_mirror }}/src/contrib/Archive/magrittr/magrittr_{{ version }}.tar.gz
sha256: 75c265d51cc2b34beb27040edb09823c7b954d3990a7a931e40690b75d4aad5f
build:
merge_build_host: true # [win]
number: 0
rpaths:
- lib/R/lib/
- lib/
requirements:
build:
- cross-r-base {{ r_base }} # [build_platform != target_platform]
- r-rlang # [build_platform != target_platform]
- {{ compiler('c') }} # [not win]
- {{ compiler('m2w64_c') }} # [win]
- {{ posix }}filesystem # [win]
- {{ posix }}make
- {{ posix }}sed # [win]
- {{ posix }}coreutils # [win]
- {{ posix }}zip # [win]
host:
- r-base
- r-rlang
run:
- r-base
- r-rlang
- {{ native }}gcc-libs # [win]
test:
commands:
- $R -e "library('magrittr')" # [not win]
- "\\"%R%\\" -e \\"library('magrittr')\\"" # [win]
about:
home: https://magrittr.tidyverse.org, https://github.com/tidyverse/magrittr
license: MIT
summary: Provides a mechanism for chaining commands with a new forward-pipe operator, %>%. This operator will forward a value, or the result of an expression, into the next function call/expression. There is flexible support for the type of right-hand side expressions. For more information, see package vignette. To quote
Rene Magritte, "Ceci n'est pas un pipe."
license_family: MIT
license_file:
- {{ environ["PREFIX"] }}/lib/R/share/licenses/MIT
- LICENSE
extra:
recipe-maintainers:
- conda-forge/r
- ocefpaf
"""
python_recipe = """\
{% set version = "1.19.0" %}
package:
name: numpy
version: {{ version }}
source:
url: https://github.com/numpy/numpy/releases/download/v{{ version }}/numpy-{{ version }}.tar.gz
sha256: 153cf8b0176e57a611931981acfe093d2f7fef623b48f91176efa199798a6b90
build:
number: 0
skip: true # [py27]
entry_points:
- f2py = numpy.f2py.f2py2e:main # [win]
requirements:
build:
- {{ compiler('c') }}
# gcc 7.3 segfaults on aarch64
- clangdev # [aarch64]
host:
- python
- pip
- cython
- libblas
- libcblas
- liblapack
run:
- python
test:
requires:
- pytest
- hypothesis
commands:
- f2py -h
- export OPENBLAS_NUM_THREADS=1 # [unix]
- set OPENBLAS_NUM_THREADS=1 # [win]
imports:
- numpy
- numpy.linalg.lapack_lite
about:
home: http://numpy.scipy.org/
license: BSD-3-Clause
license_file: LICENSE.txt
summary: Array processing for numbers, strings, records, and objects.
doc_url: https://docs.scipy.org/doc/numpy/reference/
dev_url: https://github.com/numpy/numpy
extra:
recipe-maintainers:
- jakirkham
- msarahan
- pelson
- rgommers
- ocefpaf
""" # noqa
python_recipe_correct = """\
{% set version = "1.19.1" %}
package:
name: numpy
version: {{ version }}
source:
url: https://github.com/numpy/numpy/releases/download/v{{ version }}/numpy-{{ version }}.tar.gz
sha256: 1396e6c3d20cbfc119195303b0272e749610b7042cc498be4134f013e9a3215c
build:
number: 0
skip: true # [py27]
entry_points:
- f2py = numpy.f2py.f2py2e:main # [win]
requirements:
build:
- python # [build_platform != target_platform]
- cross-python_{{ target_platform }} # [build_platform != target_platform]
- cython # [build_platform != target_platform]
- {{ compiler('c') }}
# gcc 7.3 segfaults on aarch64
- clangdev # [aarch64]
host:
- python
- pip
- cython
- libblas
- libcblas
- liblapack
run:
- python
test:
requires:
- pytest
- hypothesis
commands:
- f2py -h
- export OPENBLAS_NUM_THREADS=1 # [unix]
- set OPENBLAS_NUM_THREADS=1 # [win]
imports:
- numpy
- numpy.linalg.lapack_lite
about:
home: http://numpy.scipy.org/
license: BSD-3-Clause
license_file: LICENSE.txt
summary: Array processing for numbers, strings, records, and objects.
doc_url: https://docs.scipy.org/doc/numpy/reference/
dev_url: https://github.com/numpy/numpy
extra:
recipe-maintainers:
- jakirkham
- msarahan
- pelson
- rgommers
- ocefpaf
""" # noqa
python_no_build_recipe = """\
{% set version = "2020.4.5.2" %}
{% set pip_version = "19.1.1" %}
{% set setuptools_version = "41.0.1" %}
package:
name: certifi
version: {{ version }}
source:
- url: https://pypi.io/packages/source/c/certifi/certifi-{{ version }}.tar.gz
sha256: 5ad7e9a056d25ffa5082862e36f119f7f7cec6457fa07ee2f8c339814b80c9b1
folder: certifi
# bootstrap pip and setuptools to avoid circular dependency
# but without losing metadata
- url: https://pypi.io/packages/py2.py3/p/pip/pip-{{ pip_version }}-py2.py3-none-any.whl
sha256: 993134f0475471b91452ca029d4390dc8f298ac63a712814f101cd1b6db46676
folder: pip_wheel
- url: https://pypi.io/packages/py2.py3/s/setuptools/setuptools-{{ setuptools_version }}-py2.py3-none-any.whl
sha256: c7769ce668c7a333d84e17fe8b524b1c45e7ee9f7908ad0a73e1eda7e6a5aebf
folder: setuptools_wheel
build:
number: 0
requirements:
host:
- python
run:
- python
test:
imports:
- certifi
about:
home: http://certifi.io/
license: ISC
license_file: certifi/LICENSE
summary: Python package for providing Mozilla's CA Bundle.
description: |
Certifi is a curated collection of Root Certificates for validating the
trustworthiness of SSL certificates while verifying the identity of TLS
hosts.
doc_url: https://pypi.python.org/pypi/certifi
dev_url: https://github.com/certifi/python-certifi
doc_source_url: https://github.com/certifi/certifi.io/blob/master/README.rst
extra:
recipe-maintainers:
- jakirkham
- pelson
- sigmavirus24
- ocefpaf
- mingwandroid
- jjhelmus
""" # noqa
python_no_build_recipe_correct = """\
{% set version = "2020.6.20" %}
{% set pip_version = "19.1.1" %}
{% set setuptools_version = "41.0.1" %}
package:
name: certifi
version: {{ version }}
source:
- url: https://pypi.io/packages/source/c/certifi/certifi-{{ version }}.tar.gz
sha256: 5930595817496dd21bb8dc35dad090f1c2cd0adfaf21204bf6732ca5d8ee34d3
folder: certifi
# bootstrap pip and setuptools to avoid circular dependency
# but without losing metadata
- url: https://pypi.io/packages/py2.py3/p/pip/pip-{{ pip_version }}-py2.py3-none-any.whl
sha256: 993134f0475471b91452ca029d4390dc8f298ac63a712814f101cd1b6db46676
folder: pip_wheel
- url: https://pypi.io/packages/py2.py3/s/setuptools/setuptools-{{ setuptools_version }}-py2.py3-none-any.whl
sha256: c7769ce668c7a333d84e17fe8b524b1c45e7ee9f7908ad0a73e1eda7e6a5aebf
folder: setuptools_wheel
build:
number: 0
requirements:
build:
- python # [build_platform != target_platform]
- cross-python_{{ target_platform }} # [build_platform != target_platform]
host:
- python
run:
- python
test:
imports:
- certifi
about:
home: http://certifi.io/
license: ISC
license_file: certifi/LICENSE
summary: Python package for providing Mozilla's CA Bundle.
description: |
Certifi is a curated collection of Root Certificates for validating the
trustworthiness of SSL certificates while verifying the identity of TLS
hosts.
doc_url: https://pypi.python.org/pypi/certifi
dev_url: https://github.com/certifi/python-certifi
doc_source_url: https://github.com/certifi/certifi.io/blob/master/README.rst
extra:
recipe-maintainers:
- jakirkham
- pelson
- sigmavirus24
- ocefpaf
- mingwandroid
- jjhelmus
""" # noqa
python_recipe_b2h = """\
{% set version = "1.19.0" %}
package:
name: numpy
version: {{ version }}
source:
url: https://github.com/numpy/numpy/releases/download/v{{ version }}/numpy-{{ version }}.tar.gz
sha256: 153cf8b0176e57a611931981acfe093d2f7fef623b48f91176efa199798a6b90
build:
number: 0
skip: true # [py27]
entry_points:
- f2py = numpy.f2py.f2py2e:main # [win]
requirements:
build:
- python
- pip
- cython
- libblas
- libcblas
- liblapack
run:
- python
test:
requires:
- pytest
- hypothesis
commands:
- f2py -h
- export OPENBLAS_NUM_THREADS=1 # [unix]
- set OPENBLAS_NUM_THREADS=1 # [win]
imports:
- numpy
- numpy.linalg.lapack_lite
about:
home: http://numpy.scipy.org/
license: BSD-3-Clause
license_file: LICENSE.txt
summary: Array processing for numbers, strings, records, and objects.
doc_url: https://docs.scipy.org/doc/numpy/reference/
dev_url: https://github.com/numpy/numpy
extra:
recipe-maintainers:
- jakirkham
- msarahan
- pelson
- rgommers
- ocefpaf
""" # noqa
python_recipe_b2h_correct = """\
{% set version = "1.19.1" %}
package:
name: numpy
version: {{ version }}
source:
url: https://github.com/numpy/numpy/releases/download/v{{ version }}/numpy-{{ version }}.tar.gz
sha256: 1396e6c3d20cbfc119195303b0272e749610b7042cc498be4134f013e9a3215c
build:
number: 0
skip: true # [py27]
entry_points:
- f2py = numpy.f2py.f2py2e:main # [win]
requirements:
host:
- python
- pip
- cython
- libblas
- libcblas
- liblapack
run:
- python
test:
requires:
- pytest
- hypothesis
commands:
- f2py -h
- export OPENBLAS_NUM_THREADS=1 # [unix]
- set OPENBLAS_NUM_THREADS=1 # [win]
imports:
- numpy
- numpy.linalg.lapack_lite
about:
home: http://numpy.scipy.org/
license: BSD-3-Clause
license_file: LICENSE.txt
summary: Array processing for numbers, strings, records, and objects.
doc_url: https://docs.scipy.org/doc/numpy/reference/
dev_url: https://github.com/numpy/numpy
extra:
recipe-maintainers:
- jakirkham
- msarahan
- pelson
- rgommers
- ocefpaf
""" # noqa
python_recipe_b2h_buildok = """\
{% set version = "1.19.0" %}
package:
name: numpy
version: {{ version }}
source:
url: https://github.com/numpy/numpy/releases/download/v{{ version }}/numpy-{{ version }}.tar.gz
sha256: 153cf8b0176e57a611931981acfe093d2f7fef623b48f91176efa199798a6b90
build:
number: 0
skip: true # [py27]
entry_points:
- f2py = numpy.f2py.f2py2e:main # [win]
requirements:
build:
- {{ compiler('c') }}
- python
- pip
- cython
- libblas
- libcblas
- liblapack
run:
- python
test:
requires:
- pytest
- hypothesis
commands:
- f2py -h
- export OPENBLAS_NUM_THREADS=1 # [unix]
- set OPENBLAS_NUM_THREADS=1 # [win]
imports:
- numpy
- numpy.linalg.lapack_lite
about:
home: http://numpy.scipy.org/
license: BSD-3-Clause
license_file: LICENSE.txt
summary: Array processing for numbers, strings, records, and objects.
doc_url: https://docs.scipy.org/doc/numpy/reference/
dev_url: https://github.com/numpy/numpy
extra:
recipe-maintainers:
- jakirkham
- msarahan
- pelson
- rgommers
- ocefpaf
""" # noqa
python_recipe_b2h_buildok_correct = """\
{% set version = "1.19.1" %}
package:
name: numpy
version: {{ version }}
source:
url: https://github.com/numpy/numpy/releases/download/v{{ version }}/numpy-{{ version }}.tar.gz
sha256: 1396e6c3d20cbfc119195303b0272e749610b7042cc498be4134f013e9a3215c
build:
number: 0
skip: true # [py27]
entry_points:
- f2py = numpy.f2py.f2py2e:main # [win]
requirements:
build:
- {{ compiler('c') }}
- python
- pip
- cython
- libblas
- libcblas
- liblapack
run:
- python
test:
requires:
- pytest
- hypothesis
commands:
- f2py -h
- export OPENBLAS_NUM_THREADS=1 # [unix]
- set OPENBLAS_NUM_THREADS=1 # [win]
imports:
- numpy
- numpy.linalg.lapack_lite
about:
home: http://numpy.scipy.org/
license: BSD-3-Clause
license_file: LICENSE.txt
summary: Array processing for numbers, strings, records, and objects.
doc_url: https://docs.scipy.org/doc/numpy/reference/
dev_url: https://github.com/numpy/numpy
extra:
recipe-maintainers:
- jakirkham
- msarahan
- pelson
- rgommers
- ocefpaf
""" # noqa
python_recipe_b2h_bhskip = """\
{% set version = "1.19.0" %}
package:
name: numpy
version: {{ version }}
source:
url: https://github.com/numpy/numpy/releases/download/v{{ version }}/numpy-{{ version }}.tar.gz
sha256: 153cf8b0176e57a611931981acfe093d2f7fef623b48f91176efa199798a6b90
build:
number: 0
skip: true # [py27]
entry_points:
- f2py = numpy.f2py.f2py2e:main # [win]
requirements:
build:
- {{ compiler('c') }}
host:
run:
- python
test:
requires:
- pytest
- hypothesis
commands:
- f2py -h
- export OPENBLAS_NUM_THREADS=1 # [unix]
- set OPENBLAS_NUM_THREADS=1 # [win]
imports:
- numpy
- numpy.linalg.lapack_lite
about:
home: http://numpy.scipy.org/
license: BSD-3-Clause
license_file: LICENSE.txt
summary: Array processing for numbers, strings, records, and objects.
doc_url: https://docs.scipy.org/doc/numpy/reference/
dev_url: https://github.com/numpy/numpy
extra:
recipe-maintainers:
- jakirkham
- msarahan
- pelson
- rgommers
- ocefpaf
""" # noqa
python_recipe_b2h_bhskip_correct = """\
{% set version = "1.19.1" %}
package:
name: numpy
version: {{ version }}
source:
url: https://github.com/numpy/numpy/releases/download/v{{ version }}/numpy-{{ version }}.tar.gz
sha256: 1396e6c3d20cbfc119195303b0272e749610b7042cc498be4134f013e9a3215c
build:
number: 0
skip: true # [py27]
entry_points:
- f2py = numpy.f2py.f2py2e:main # [win]
requirements:
build:
- {{ compiler('c') }}
host:
run:
- python
test:
requires:
- pytest
- hypothesis
commands:
- f2py -h
- export OPENBLAS_NUM_THREADS=1 # [unix]
- set OPENBLAS_NUM_THREADS=1 # [win]
imports:
- numpy
- numpy.linalg.lapack_lite
about:
home: http://numpy.scipy.org/
license: BSD-3-Clause
license_file: LICENSE.txt
summary: Array processing for numbers, strings, records, and objects.
doc_url: https://docs.scipy.org/doc/numpy/reference/
dev_url: https://github.com/numpy/numpy
extra:
recipe-maintainers:
- jakirkham
- msarahan
- pelson
- rgommers
- ocefpaf
""" # noqa
python_recipe_nci = """\
{% set version = "1.19.0" %}
package:
name: numpy
version: {{ version }}
source:
url: https://github.com/numpy/numpy/releases/download/v{{ version }}/numpy-{{ version }}.tar.gz
sha256: 153cf8b0176e57a611931981acfe093d2f7fef623b48f91176efa199798a6b90
build:
number: 0
skip: true # [py27]
entry_points:
- f2py = numpy.f2py.f2py2e:main # [win]
requirements:
host:
- python
- pip
- cython
- libblas
- libcblas
- liblapack
run:
- python
test:
requires:
- pytest
- hypothesis
commands:
- f2py -h
- export OPENBLAS_NUM_THREADS=1 # [unix]
- set OPENBLAS_NUM_THREADS=1 # [win]
- conda inspect linkages # comment
- conda inspect objects # comment
- conda inspect cars # comment
imports:
- numpy
- numpy.linalg.lapack_lite
about:
home: http://numpy.scipy.org/
license: BSD-3-Clause
license_file: LICENSE.txt
summary: Array processing for numbers, strings, records, and objects.
doc_url: https://docs.scipy.org/doc/numpy/reference/
dev_url: https://github.com/numpy/numpy
extra:
recipe-maintainers:
- jakirkham
- msarahan
- pelson
- rgommers
- ocefpaf
""" # noqa
python_recipe_nci_correct = """\
{% set version = "1.19.1" %}
package:
name: numpy
version: {{ version }}
source:
url: https://github.com/numpy/numpy/releases/download/v{{ version }}/numpy-{{ version }}.tar.gz
sha256: 1396e6c3d20cbfc119195303b0272e749610b7042cc498be4134f013e9a3215c
build:
number: 0
skip: true # [py27]
entry_points:
- f2py = numpy.f2py.f2py2e:main # [win]
requirements:
host:
- python
- pip
- cython
- libblas
- libcblas
- liblapack
run:
- python
test:
requires:
- pytest
- hypothesis
commands:
- f2py -h
- export OPENBLAS_NUM_THREADS=1 # [unix]
- set OPENBLAS_NUM_THREADS=1 # [win]
imports:
- numpy
- numpy.linalg.lapack_lite
about:
home: http://numpy.scipy.org/
license: BSD-3-Clause
license_file: LICENSE.txt
summary: Array processing for numbers, strings, records, and objects.
doc_url: https://docs.scipy.org/doc/numpy/reference/
dev_url: https://github.com/numpy/numpy
extra:
recipe-maintainers:
- jakirkham
- msarahan
- pelson
- rgommers
- ocefpaf
""" # noqa
def test_correct_config_sub(tmpdir):
with open(os.path.join(tmpdir, "build.sh"), "w") as f:
f.write("#!/bin/bash\n./configure")
run_test_migration(
m=version_migrator_autoconf,
inp=config_recipe,
output=config_recipe_correct,
prb="Dependencies have been updated if changed",
kwargs={"new_version": "8.0"},
mr_out={
"migrator_name": "Version",
"migrator_version": Version.migrator_version,
"version": "8.0",
},
tmpdir=tmpdir,
)
with open(os.path.join(tmpdir, "build.sh")) as f:
assert len(f.readlines()) == 4
def test_make_check(tmpdir):
with open(os.path.join(tmpdir, "build.sh"), "w") as f:
f.write("#!/bin/bash\nmake check")
run_test_migration(
m=version_migrator_autoconf,
inp=config_recipe,
output=config_recipe_correct,
prb="Dependencies have been updated if changed",
kwargs={"new_version": "8.0"},
mr_out={
"migrator_name": "Version",
"migrator_version": Version.migrator_version,
"version": "8.0",
},
tmpdir=tmpdir,
)
expected = [
"#!/bin/bash\n",
"# Get an updated config.sub and config.guess\n",
"cp $BUILD_PREFIX/share/gnuconfig/config.* ./support\n",
'if [[ "${CONDA_BUILD_CROSS_COMPILATION}" != "1" ]]; then\n',
"make check\n",
"fi\n",
]
with open(os.path.join(tmpdir, "build.sh")) as f:
lines = f.readlines()
assert lines == expected
def test_cmake(tmpdir):
with open(os.path.join(tmpdir, "build.sh"), "w") as f:
f.write("#!/bin/bash\ncmake ..\nctest")
run_test_migration(
m=version_migrator_cmake,
inp=config_recipe,
output=config_recipe_correct_cmake,
prb="Dependencies have been updated if changed",
kwargs={"new_version": "8.0"},
mr_out={
"migrator_name": "Version",
"migrator_version": Version.migrator_version,
"version": "8.0",
},
tmpdir=tmpdir,
)
expected = [
"#!/bin/bash\n",
"cmake ${CMAKE_ARGS} ..\n",
'if [[ "${CONDA_BUILD_CROSS_COMPILATION}" != "1" ]]; then\n',
"ctest\n",
"fi\n",
]
with open(os.path.join(tmpdir, "build.sh")) as f:
lines = f.readlines()
assert lines == expected
def test_cross_rbase(tmpdir):
run_test_migration(
m=version_migrator_rbase,
inp=rbase_recipe,
output=rbase_recipe_correct,
prb="Dependencies have been updated if changed",
kwargs={"new_version": "2.0.1"},
mr_out={
"migrator_name": "Version",
"migrator_version": Version.migrator_version,
"version": "2.0.1",
},
tmpdir=tmpdir,
)
def test_cross_rbase_build_sh(tmpdir):
with open(os.path.join(tmpdir, "build.sh"), "w") as f:
f.write("#!/bin/bash\nR CMD INSTALL --build .")
run_test_migration(
m=version_migrator_rbase,
inp=rbase_recipe,
output=rbase_recipe_correct,
prb="Dependencies have been updated if changed",
kwargs={"new_version": "2.0.1"},
mr_out={
"migrator_name": "Version",
"migrator_version": Version.migrator_version,
"version": "2.0.1",
},
tmpdir=tmpdir,
)
expected = [
"#!/bin/bash\n",
"\n",
"export DISABLE_AUTOBREW=1\n",
"\n",
"# shellcheck disable=SC2086\n",
"${R} CMD INSTALL --build . ${R_ARGS}\n",
]
with open(os.path.join(tmpdir, "build.sh")) as f:
lines = f.readlines()
assert lines == expected
def test_cross_python(tmpdir):
run_test_migration(
m=version_migrator_python,
inp=python_recipe,
output=python_recipe_correct,
prb="Dependencies have been updated if changed",
kwargs={"new_version": "1.19.1"},
mr_out={
"migrator_name": "Version",
"migrator_version": Version.migrator_version,
"version": "1.19.1",
},
tmpdir=tmpdir,
)
def test_cross_python_no_build(tmpdir):
run_test_migration(
m=version_migrator_python,
inp=python_no_build_recipe,
output=python_no_build_recipe_correct,
prb="Dependencies have been updated if changed",
kwargs={"new_version": "2020.6.20"},
mr_out={
"migrator_name": "Version",
"migrator_version": Version.migrator_version,
"version": "2020.6.20",
},
tmpdir=tmpdir,
)
def test_build2host(tmpdir):
run_test_migration(
m=version_migrator_b2h,
inp=python_recipe_b2h,
output=python_recipe_b2h_correct,
prb="Dependencies have been updated if changed",
kwargs={"new_version": "1.19.1"},
mr_out={
"migrator_name": "Version",
"migrator_version": Version.migrator_version,
"version": "1.19.1",
},
tmpdir=tmpdir,
)
def test_build2host_buildok(tmpdir):
run_test_migration(
m=version_migrator_b2h,
inp=python_recipe_b2h_buildok,
output=python_recipe_b2h_buildok_correct,
prb="Dependencies have been updated if changed",
kwargs={"new_version": "1.19.1"},
mr_out={
"migrator_name": "Version",
"migrator_version": Version.migrator_version,
"version": "1.19.1",
},
tmpdir=tmpdir,
)
def test_build2host_bhskip(tmpdir):
run_test_migration(
m=version_migrator_b2h,
inp=python_recipe_b2h_bhskip,
output=python_recipe_b2h_bhskip_correct,
prb="Dependencies have been updated if changed",
kwargs={"new_version": "1.19.1"},
mr_out={
"migrator_name": "Version",
"migrator_version": Version.migrator_version,
"version": "1.19.1",
},
tmpdir=tmpdir,
)
def test_nocondainspect(tmpdir):
run_test_migration(
m=version_migrator_nci,
inp=python_recipe_nci,
output=python_recipe_nci_correct,
prb="Dependencies have been updated if changed",
kwargs={"new_version": "1.19.1"},
mr_out={
"migrator_name": "Version",
"migrator_version": Version.migrator_version,
"version": "1.19.1",
},
tmpdir=tmpdir,
)
```
#### File: cf-scripts/tests/test_env_management.py
```python
import os
from conda_forge_tick.env_management import SensitiveEnv
def test_simple_sensitive_env(env_setup):
os.environ["PASSWORD"] = "hi"
s = SensitiveEnv()
s.hide_env_vars()
assert "PASSWORD" not in os.environ
s.reveal_env_vars()
assert "PASSWORD" in os.environ
assert os.environ["PASSWORD"] == "hi"
def test_ctx_sensitive_env(env_setup):
os.environ["PASSWORD"] = "hi"
s = SensitiveEnv()
with s.sensitive_env():
assert "PASSWORD" in os.environ
assert os.environ["PASSWORD"] == "hi"
assert "PASSWORD" not in os.environ
def test_double_sensitive_env(env_setup):
os.environ["PASSWORD"] = "hi"
os.environ["pwd"] = "<PASSWORD>"
s = SensitiveEnv()
s.hide_env_vars()
s.SENSITIVE_KEYS.append("pwd")
s.hide_env_vars()
s.reveal_env_vars()
assert os.environ["pwd"] == "<PASSWORD>"
assert os.environ["PASSWORD"] == "hi"
```
#### File: cf-scripts/tests/test_feedstock_parser.py
```python
import os
import pprint
import pytest
from conda_forge_tick.feedstock_parser import _get_requirements
from conda_forge_tick.utils import parse_meta_yaml
@pytest.mark.parametrize(
"plat,arch,cfg,has_cudnn",
[
(
"linux",
"64",
"linux_64_cuda_compiler_version10.2numpy1.19python3.9.____cpython.yaml",
True,
),
("osx", "64", "osx_64_numpy1.16python3.6.____cpython.yaml", False),
],
)
def test_parse_cudnn(plat, arch, cfg, has_cudnn):
recipe_dir = os.path.abspath(
os.path.join(
os.path.dirname(__file__),
"pytorch-cpu-feedstock",
"recipe",
),
)
with open(os.path.join(recipe_dir, "meta.yaml")) as fp:
recipe_text = fp.read()
meta = parse_meta_yaml(
recipe_text,
for_pinning=False,
platform=plat,
arch=arch,
recipe_dir=recipe_dir,
cbc_path=os.path.join(recipe_dir, "..", ".ci_support", cfg),
log_debug=True,
)
if has_cudnn:
assert any(
"cudnn" in out.get("requirements", {}).get("host", [])
for out in meta["outputs"]
), pprint.pformat(meta)
else:
assert all(
"cudnn" not in out.get("requirements", {}).get("host", [])
for out in meta["outputs"]
), pprint.pformat(meta)
def test_get_requirements():
meta_yaml = {
"requirements": {"build": ["1", "2"], "host": ["2", "3"]},
"outputs": [
{"requirements": {"host": ["4"]}},
{"requirements": {"run": ["5"]}},
{"requirements": ["6"]},
],
}
assert _get_requirements({}) == set()
assert _get_requirements(meta_yaml) == {"1", "2", "3", "4", "5", "6"}
assert _get_requirements(meta_yaml, outputs=False) == {"1", "2", "3"}
assert _get_requirements(meta_yaml, host=False) == {"1", "2", "5", "6"}
```
#### File: cf-scripts/tests/test_migrators.py
```python
import os
import builtins
import re
import pytest
import networkx as nx
from conda_forge_tick.contexts import MigratorSessionContext, MigratorContext
from conda_forge_tick.migrators import (
Version,
MigrationYaml,
Replacement,
)
# Legacy THINGS
from conda_forge_tick.migrators.disabled.legacy import (
JS,
Compiler,
Noarch,
Pinning,
NoarchR,
BlasRebuild,
Rebuild,
)
from conda_forge_tick.utils import (
parse_meta_yaml,
frozen_to_json_friendly,
)
from conda_forge_tick.feedstock_parser import populate_feedstock_attributes
from xonsh.lib import subprocess
from xonsh.lib.os import indir
sample_yaml_rebuild = """
{% set version = "1.3.2" %}
package:
name: scipy
version: {{ version }}
source:
url: https://github.com/scipy/scipy/archive/v{{ version }}.tar.gz
sha256: ac0937d29a3f93cc26737fdf318c09408e9a48adee1648a25d0cdce5647b8eb4
patches:
- gh10591.patch
- relax_gmres_error_check.patch # [aarch64]
- skip_problematic_boost_test.patch # [aarch64 or ppc64le]
- skip_problematic_root_finding.patch # [aarch64 or ppc64le]
- skip_TestIDCTIVFloat_aarch64.patch # [aarch64]
- skip_white_tophat03.patch # [aarch64 or ppc64le]
# remove this patch when updating to 1.3.3
{% if version == "1.3.2" %}
- scipy-1.3.2-bad-tests.patch # [osx and py == 38]
- gh11046.patch # [ppc64le]
{% endif %}
build:
number: 0
skip: true # [win or py2k]
requirements:
build:
- {{ compiler('fortran') }}
- {{ compiler('c') }}
- {{ compiler('cxx') }}
host:
- libblas
- libcblas
- liblapack
- python
- setuptools
- cython
- numpy
- pip
run:
- python
- {{ pin_compatible('numpy') }}
test:
requires:
- pytest
- pytest-xdist
- mpmath
{% if version == "1.3.2" %}
- blas * netlib # [ppc64le]
{% endif %}
about:
home: http://www.scipy.org/
license: BSD-3-Clause
license_file: LICENSE.txt
summary: Scientific Library for Python
description: |
SciPy is a Python-based ecosystem of open-source software for mathematics,
science, and engineering.
doc_url: http://www.scipy.org/docs.html
dev_url: https://github.com/scipy/scipy
extra:
recipe-maintainers:
- jakirkham
- msarahan
- rgommers
- ocefpaf
- beckermr
"""
updated_yaml_rebuild = """
{% set version = "1.3.2" %}
package:
name: scipy
version: {{ version }}
source:
url: https://github.com/scipy/scipy/archive/v{{ version }}.tar.gz
sha256: ac0937d29a3f93cc26737fdf318c09408e9a48adee1648a25d0cdce5647b8eb4
patches:
- gh10591.patch
- relax_gmres_error_check.patch # [aarch64]
- skip_problematic_boost_test.patch # [aarch64 or ppc64le]
- skip_problematic_root_finding.patch # [aarch64 or ppc64le]
- skip_TestIDCTIVFloat_aarch64.patch # [aarch64]
- skip_white_tophat03.patch # [aarch64 or ppc64le]
# remove this patch when updating to 1.3.3
{% if version == "1.3.2" %}
- scipy-1.3.2-bad-tests.patch # [osx and py == 38]
- gh11046.patch # [ppc64le]
{% endif %}
build:
number: 1
skip: true # [win or py2k]
requirements:
build:
- {{ compiler('fortran') }}
- {{ compiler('c') }}
- {{ compiler('cxx') }}
host:
- libblas
- libcblas
- liblapack
- python
- setuptools
- cython
- numpy
- pip
run:
- python
- {{ pin_compatible('numpy') }}
test:
requires:
- pytest
- pytest-xdist
- mpmath
{% if version == "1.3.2" %}
- blas * netlib # [ppc64le]
{% endif %}
about:
home: http://www.scipy.org/
license: BSD-3-Clause
license_file: LICENSE.txt
summary: Scientific Library for Python
description: |
SciPy is a Python-based ecosystem of open-source software for mathematics,
science, and engineering.
doc_url: http://www.scipy.org/docs.html
dev_url: https://github.com/scipy/scipy
extra:
recipe-maintainers:
- jakirkham
- msarahan
- rgommers
- ocefpaf
- beckermr
"""
updated_yaml_rebuild_no_build_number = """
{% set version = "1.3.2" %}
package:
name: scipy
version: {{ version }}
source:
url: https://github.com/scipy/scipy/archive/v{{ version }}.tar.gz
sha256: ac0937d29a3f93cc26737fdf318c09408e9a48adee1648a25d0cdce5647b8eb4
patches:
- gh10591.patch
- relax_gmres_error_check.patch # [aarch64]
- skip_problematic_boost_test.patch # [aarch64 or ppc64le]
- skip_problematic_root_finding.patch # [aarch64 or ppc64le]
- skip_TestIDCTIVFloat_aarch64.patch # [aarch64]
- skip_white_tophat03.patch # [aarch64 or ppc64le]
# remove this patch when updating to 1.3.3
{% if version == "1.3.2" %}
- scipy-1.3.2-bad-tests.patch # [osx and py == 38]
- gh11046.patch # [ppc64le]
{% endif %}
build:
number: 0
skip: true # [win or py2k]
requirements:
build:
- {{ compiler('fortran') }}
- {{ compiler('c') }}
- {{ compiler('cxx') }}
host:
- libblas
- libcblas
- liblapack
- python
- setuptools
- cython
- numpy
- pip
run:
- python
- {{ pin_compatible('numpy') }}
test:
requires:
- pytest
- pytest-xdist
- mpmath
{% if version == "1.3.2" %}
- blas * netlib # [ppc64le]
{% endif %}
about:
home: http://www.scipy.org/
license: BSD-3-Clause
license_file: LICENSE.txt
summary: Scientific Library for Python
description: |
SciPy is a Python-based ecosystem of open-source software for mathematics,
science, and engineering.
doc_url: http://www.scipy.org/docs.html
dev_url: https://github.com/scipy/scipy
extra:
recipe-maintainers:
- jakirkham
- msarahan
- rgommers
- ocefpaf
- beckermr
"""
class NoFilter:
def filter(self, attrs, not_bad_str_start=""):
return False
class _MigrationYaml(NoFilter, MigrationYaml):
pass
yaml_rebuild = _MigrationYaml(yaml_contents="hello world", name="hi")
yaml_rebuild.cycles = []
yaml_rebuild_no_build_number = _MigrationYaml(
yaml_contents="hello world",
name="hi",
bump_number=0,
)
yaml_rebuild_no_build_number.cycles = []
def run_test_yaml_migration(
m, *, inp, output, kwargs, prb, mr_out, tmpdir, should_filter=False
):
os.makedirs(os.path.join(tmpdir, "recipe"), exist_ok=True)
with open(os.path.join(tmpdir, "recipe", "meta.yaml"), "w") as f:
f.write(inp)
with indir(tmpdir):
subprocess.run(["git", "init"])
# Load the meta.yaml (this is done in the graph)
try:
pmy = parse_meta_yaml(inp)
except Exception:
pmy = {}
if pmy:
pmy["version"] = pmy["package"]["version"]
pmy["req"] = set()
for k in ["build", "host", "run"]:
pmy["req"] |= set(pmy.get("requirements", {}).get(k, set()))
try:
pmy["meta_yaml"] = parse_meta_yaml(inp)
except Exception:
pmy["meta_yaml"] = {}
pmy["raw_meta_yaml"] = inp
pmy.update(kwargs)
assert m.filter(pmy) is should_filter
if should_filter:
return
mr = m.migrate(os.path.join(tmpdir, "recipe"), pmy)
assert mr_out == mr
pmy.update(PRed=[frozen_to_json_friendly(mr)])
with open(os.path.join(tmpdir, "recipe/meta.yaml")) as f:
actual_output = f.read()
assert actual_output == output
assert os.path.exists(os.path.join(tmpdir, ".ci_support/migrations/hi.yaml"))
with open(os.path.join(tmpdir, ".ci_support/migrations/hi.yaml")) as f:
saved_migration = f.read()
assert saved_migration == m.yaml_contents
def test_yaml_migration_rebuild(tmpdir):
run_test_yaml_migration(
m=yaml_rebuild,
inp=sample_yaml_rebuild,
output=updated_yaml_rebuild,
kwargs={"feedstock_name": "scipy"},
prb="This PR has been triggered in an effort to update **hi**.",
mr_out={
"migrator_name": yaml_rebuild.__class__.__name__,
"migrator_version": yaml_rebuild.migrator_version,
"name": "hi",
"bot_rerun": False,
},
tmpdir=tmpdir,
)
def test_yaml_migration_rebuild_no_buildno(tmpdir):
run_test_yaml_migration(
m=yaml_rebuild_no_build_number,
inp=sample_yaml_rebuild,
output=updated_yaml_rebuild_no_build_number,
kwargs={"feedstock_name": "scipy"},
prb="This PR has been triggered in an effort to update **hi**.",
mr_out={
"migrator_name": yaml_rebuild.__class__.__name__,
"migrator_version": yaml_rebuild.migrator_version,
"name": "hi",
"bot_rerun": False,
},
tmpdir=tmpdir,
)
sample_js = """{% set name = "jstz" %}
{% set version = "1.0.11" %}
{% set sha256 = "985d5fd8705930aab9cc59046e99c1f512d05109c9098039f880df5f5df2bf24" %}
package:
name: {{ name|lower }}
version: {{ version }}
source:
url: https://github.com/iansinnott/{{ name }}/archive/v{{ version }}.tar.gz
sha256: {{ sha256 }}
build:
number: 0
noarch: generic
script: npm install -g .
requirements:
build:
- nodejs
test:
commands:
- npm list -g jstz
requires:
- nodejs
about:
home: https://github.com/iansinnott/jstz
license: MIT
license_family: MIT
license_file: LICENCE
summary: 'Timezone detection for JavaScript'
description: |
This library allows you to detect a user's timezone from within their browser.
It is often useful to use JSTZ in combination with a timezone parsing library
such as Moment Timezone.
doc_url: http://pellepim.bitbucket.org/jstz/
dev_url: https://github.com/iansinnott/jstz
extra:
recipe-maintainers:
- cshaley
- sannykr"""
sample_js2 = """{% set name = "jstz" %}
{% set version = "1.0.11" %}
{% set sha256 = "985d5fd8705930aab9cc59046e99c1f512d05109c9098039f880df5f5df2bf24" %}
package:
name: {{ name|lower }}
version: {{ version }}
source:
url: https://github.com/iansinnott/{{ name }}/archive/v{{ version }}.tar.gz
sha256: {{ sha256 }}
build:
number: 0
noarch: generic
script: |
tgz=$(npm pack)
npm install -g $tgz
requirements:
build:
- nodejs
test:
commands:
- npm list -g jstz
requires:
- nodejs
about:
home: https://github.com/iansinnott/jstz
license: MIT
license_family: MIT
license_file: LICENCE
summary: 'Timezone detection for JavaScript'
description: |
This library allows you to detect a user's timezone from within their browser.
It is often useful to use JSTZ in combination with a timezone parsing library
such as Moment Timezone.
doc_url: http://pellepim.bitbucket.org/jstz/
dev_url: https://github.com/iansinnott/jstz
extra:
recipe-maintainers:
- cshaley
- sannykr"""
correct_js = """{% set name = "jstz" %}
{% set version = "1.0.11" %}
{% set sha256 = "985d5fd8705930aab9cc59046e99c1f512d05109c9098039f880df5f5df2bf24" %}
package:
name: {{ name|lower }}
version: {{ version }}
source:
url: https://github.com/iansinnott/{{ name }}/archive/v{{ version }}.tar.gz
sha256: {{ sha256 }}
build:
number: 1
noarch: generic
script: |
tgz=$(npm pack)
npm install -g $tgz
requirements:
build:
- nodejs
test:
commands:
- npm list -g jstz
requires:
- nodejs
about:
home: https://github.com/iansinnott/jstz
license: MIT
license_family: MIT
license_file: LICENCE
summary: 'Timezone detection for JavaScript'
description: |
This library allows you to detect a user's timezone from within their browser.
It is often useful to use JSTZ in combination with a timezone parsing library
such as Moment Timezone.
doc_url: http://pellepim.bitbucket.org/jstz/
dev_url: https://github.com/iansinnott/jstz
extra:
recipe-maintainers:
- cshaley
- sannykr
"""
sample_cb3 = """
{# sample_cb3 #}
{% set version = "1.14.5" %}
{% set build_number = 0 %}
{% set variant = "openblas" %}
{% set build_number = build_number + 200 %}
package:
name: numpy
version: {{ version }}
source:
url: https://github.com/numpy/numpy/releases/download/v{{ version }}/numpy-{{ version }}.tar.gz
sha256: 1b4a02758fb68a65ea986d808867f1d6383219c234aef553a8741818e795b529
build:
number: {{ build_number }}
skip: true # [win32 or (win and py27)]
features:
- blas_{{ variant }}
requirements:
build:
- python
- pip
- cython
- toolchain
- blas 1.1 {{ variant }}
- openblas 0.2.20|0.2.20.*
run:
- python
- blas 1.1 {{ variant }}
- openblas 0.2.20|0.2.20.*
test:
requires:
- nose
commands:
- f2py -h
- conda inspect linkages -p $PREFIX $PKG_NAME # [not win]
- conda inspect objects -p $PREFIX $PKG_NAME # [osx]
imports:
- numpy
- numpy.linalg.lapack_lite
about:
home: http://numpy.scipy.org/
license: BSD 3-Clause
license_file: LICENSE.txt
summary: 'Array processing for numbers, strings, records, and objects.'
doc_url: https://docs.scipy.org/doc/numpy/reference/
dev_url: https://github.com/numpy/numpy
extra:
recipe-maintainers:
- jakirkham
- msarahan
- pelson
- rgommers
- ocefpaf
""" # noqa
correct_cb3 = """
{# correct_cb3 #}
{% set version = "1.14.5" %}
{% set build_number = 1 %}
{% set variant = "openblas" %}
{% set build_number = build_number + 200 %}
package:
name: numpy
version: {{ version }}
source:
url: https://github.com/numpy/numpy/releases/download/v{{ version }}/numpy-{{ version }}.tar.gz
sha256: 1b4a02758fb68a65ea986d808867f1d6383219c234aef553a8741818e795b529
build:
number: {{ build_number }}
skip: true # [win32 or (win and py27)]
features:
- blas_{{ variant }}
requirements:
build:
- {{ compiler('fortran') }}
- {{ compiler('c') }}
- {{ compiler('cxx') }}
host:
- python
- pip
- cython
- blas 1.1 {{ variant }}
- openblas
run:
- python
- blas 1.1 {{ variant }}
- openblas
test:
requires:
- nose
commands:
- f2py -h
- conda inspect linkages -p $PREFIX $PKG_NAME # [not win]
- conda inspect objects -p $PREFIX $PKG_NAME # [osx]
imports:
- numpy
- numpy.linalg.lapack_lite
about:
home: http://numpy.scipy.org/
license: BSD 3-Clause
license_file: LICENSE.txt
summary: 'Array processing for numbers, strings, records, and objects.'
doc_url: https://docs.scipy.org/doc/numpy/reference/
dev_url: https://github.com/numpy/numpy
extra:
recipe-maintainers:
- jakirkham
- msarahan
- pelson
- rgommers
- ocefpaf
""" # noqa
sample_r_base = """
{# sample_r_base #}
{% set version = '0.7-1' %}
{% set posix = 'm2-' if win else '' %}
{% set native = 'm2w64-' if win else '' %}
package:
name: r-stabledist
version: {{ version|replace("-", "_") }}
source:
fn: stabledist_{{ version }}.tar.gz
url:
- https://cran.r-project.org/src/contrib/stabledist_{{ version }}.tar.gz
- https://cran.r-project.org/src/contrib/Archive/stabledist/stabledist_{{ version }}.tar.gz
sha256: 06c5704d3a3c179fa389675c537c39a006867bc6e4f23dd7e406476ed2c88a69
build:
number: 1
rpaths:
- lib/R/lib/
- lib/
skip: True # [win32]
requirements:
build:
- r-base
run:
- r-base
test:
commands:
- $R -e "library('stabledist')" # [not win]
- "\\"%R%\\" -e \\"library('stabledist')\\"" # [win]
""" # noqa
updated_r_base = """
{# updated_r_base #}
{% set version = '0.7-1' %}
{% set posix = 'm2-' if win else '' %}
{% set native = 'm2w64-' if win else '' %}
package:
name: r-stabledist
version: {{ version|replace("-", "_") }}
source:
fn: stabledist_{{ version }}.tar.gz
url:
- https://cran.r-project.org/src/contrib/stabledist_{{ version }}.tar.gz
- https://cran.r-project.org/src/contrib/Archive/stabledist/stabledist_{{ version }}.tar.gz
sha256: 06c5704d3a3c179fa389675c537c39a006867bc6e4f23dd7e406476ed2c88a69
build:
noarch: generic
number: 2
rpaths:
- lib/R/lib/
- lib/
requirements:
build:
- r-base
run:
- r-base
test:
commands:
- $R -e "library('stabledist')" # [not win]
- "\\"%R%\\" -e \\"library('stabledist')\\"" # [win]
""" # noqa
sample_r_base2 = """
{% set version = '0.7-1' %}
{% set posix = 'm2-' if win else '' %}
{% set native = 'm2w64-' if win else '' %}
package:
name: r-stabledist
version: {{ version|replace("-", "_") }}
source:
fn: stabledist_{{ version }}.tar.gz
url:
- https://cran.r-project.org/src/contrib/stabledist_{{ version }}.tar.gz
- https://cran.r-project.org/src/contrib/Archive/stabledist/stabledist_{{ version }}.tar.gz
sha256: 06c5704d3a3c179fa389675c537c39a006867bc6e4f23dd7e406476ed2c88a69
build:
number: 1
rpaths:
- lib/R/lib/
- lib/
skip: True # [win32]
requirements:
build:
- r-base
- {{ compiler('c') }}
run:
- r-base
test:
commands:
- $R -e "library('stabledist')" # [not win]
- "\\"%R%\\" -e \\"library('stabledist')\\"" # [win]
""" # noqa
updated_r_base2 = """
{% set version = '0.7-1' %}
{% set posix = 'm2-' if win else '' %}
{% set native = 'm2w64-' if win else '' %}
package:
name: r-stabledist
version: {{ version|replace("-", "_") }}
source:
fn: stabledist_{{ version }}.tar.gz
url:
- https://cran.r-project.org/src/contrib/stabledist_{{ version }}.tar.gz
- https://cran.r-project.org/src/contrib/Archive/stabledist/stabledist_{{ version }}.tar.gz
sha256: 06c5704d3a3c179fa389675c537c39a006867bc6e4f23dd7e406476ed2c88a69
build:
number: 2
rpaths:
- lib/R/lib/
- lib/
skip: True # [win32]
requirements:
build:
- r-base
- {{ compiler('c') }}
run:
- r-base
test:
commands:
- $R -e "library('stabledist')" # [not win]
- "\\"%R%\\" -e \\"library('stabledist')\\"" # [win]
""" # noqa
# Test that filepaths to various licenses are updated for a noarch recipe
sample_r_licenses_noarch = """
{% set version = '0.7-1' %}
{% set posix = 'm2-' if win else '' %}
{% set native = 'm2w64-' if win else '' %}
package:
name: r-stabledist
version: {{ version|replace("-", "_") }}
source:
fn: stabledist_{{ version }}.tar.gz
url:
- https://cran.r-project.org/src/contrib/stabledist_{{ version }}.tar.gz
- https://cran.r-project.org/src/contrib/Archive/stabledist/stabledist_{{ version }}.tar.gz
sha256: 06c5704d3a3c179fa389675c537c39a006867bc6e4f23dd7e406476ed2c88a69
build:
number: 1
rpaths:
- lib/R/lib/
- lib/
skip: True # [win32]
requirements:
build:
- r-base
run:
- r-base
test:
commands:
- $R -e "library('stabledist')" # [not win]
- "\\"%R%\\" -e \\"library('stabledist')\\"" # [win]
about:
license_family: GPL3
license_file: '{{ environ["PREFIX"] }}/lib/R/share/licenses/GPL-3' # [unix]
license_file: '{{ environ["PREFIX"] }}\\R\\share\\licenses\\GPL-3' # [win]
license_family: MIT
license_file: '{{ environ["PREFIX"] }}/lib/R/share/licenses/MIT' # [unix]
license_file: '{{ environ["PREFIX"] }}\\R\\share\\licenses\\MIT' # [win]
license_family: LGPL
license_file: '{{ environ["PREFIX"] }}/lib/R/share/licenses/LGPL-2' # [unix]
license_file: '{{ environ["PREFIX"] }}\\R\\share\\licenses\\LGPL-2' # [win]
license_family: LGPL
license_file: '{{ environ["PREFIX"] }}/lib/R/share/licenses/LGPL-2.1' # [unix]
license_file: '{{ environ["PREFIX"] }}\\R\\share\\licenses\\LGPL-2.1' # [win]
license_family: BSD
license_file: '{{ environ["PREFIX"] }}/lib/R/share/licenses/BSD_3_clause' # [unix]
license_file: '{{ environ["PREFIX"] }}\\R\\share\\licenses\\BSD_3_clause' # [win]
license_file: '{{ environ["PREFIX"] }}/lib/R/share/licenses/GPL-2' # [unix]
license_file: '{{ environ["PREFIX"] }}/lib/R/share/licenses/BSD_3_clause' # [unix]
""" # noqa
updated_r_licenses_noarch = """
{% set version = '0.7-1' %}
{% set posix = 'm2-' if win else '' %}
{% set native = 'm2w64-' if win else '' %}
package:
name: r-stabledist
version: {{ version|replace("-", "_") }}
source:
fn: stabledist_{{ version }}.tar.gz
url:
- https://cran.r-project.org/src/contrib/stabledist_{{ version }}.tar.gz
- https://cran.r-project.org/src/contrib/Archive/stabledist/stabledist_{{ version }}.tar.gz
sha256: 06c5704d3a3c179fa389675c537c39a006867bc6e4f23dd7e406476ed2c88a69
build:
noarch: generic
number: 2
rpaths:
- lib/R/lib/
- lib/
requirements:
build:
- r-base
run:
- r-base
test:
commands:
- $R -e "library('stabledist')" # [not win]
- "\\"%R%\\" -e \\"library('stabledist')\\"" # [win]
about:
license_family: GPL3
license_file: '{{ environ["PREFIX"] }}/lib/R/share/licenses/GPL-3'
license_family: MIT
license_file: '{{ environ["PREFIX"] }}/lib/R/share/licenses/MIT'
license_family: LGPL
license_file: '{{ environ["PREFIX"] }}/lib/R/share/licenses/LGPL-2'
license_family: LGPL
license_file: '{{ environ["PREFIX"] }}/lib/R/share/licenses/LGPL-2.1'
license_family: BSD
license_file: '{{ environ["PREFIX"] }}/lib/R/share/licenses/BSD_3_clause'
license_file: '{{ environ["PREFIX"] }}/lib/R/share/licenses/GPL-2'
license_file: '{{ environ["PREFIX"] }}/lib/R/share/licenses/BSD_3_clause'
""" # noqa
# Test that filepaths to various licenses are updated for a compiled recipe
sample_r_licenses_compiled = """
{% set version = '0.7-1' %}
{% set posix = 'm2-' if win else '' %}
{% set native = 'm2w64-' if win else '' %}
package:
name: r-stabledist
version: {{ version|replace("-", "_") }}
source:
fn: stabledist_{{ version }}.tar.gz
url:
- https://cran.r-project.org/src/contrib/stabledist_{{ version }}.tar.gz
- https://cran.r-project.org/src/contrib/Archive/stabledist/stabledist_{{ version }}.tar.gz
sha256: 06c5704d3a3c179fa389675c537c39a006867bc6e4f23dd7e406476ed2c88a69
build:
number: 1
rpaths:
- lib/R/lib/
- lib/
skip: True # [win32]
requirements:
build:
- r-base
- {{ compiler('c') }}
run:
- r-base
test:
commands:
- $R -e "library('stabledist')" # [not win]
- "\\"%R%\\" -e \\"library('stabledist')\\"" # [win]
about:
license_family: GPL3
license_file: '{{ environ["PREFIX"] }}/lib/R/share/licenses/GPL-3' # [unix]
license_file: '{{ environ["PREFIX"] }}\\R\\share\\licenses\\GPL-3' # [win]
license_family: MIT
license_file: '{{ environ["PREFIX"] }}/lib/R/share/licenses/MIT' # [unix]
license_file: '{{ environ["PREFIX"] }}\\R\\share\\licenses\\MIT' # [win]
license_family: LGPL
license_file: '{{ environ["PREFIX"] }}/lib/R/share/licenses/LGPL-2' # [unix]
license_file: '{{ environ["PREFIX"] }}\\R\\share\\licenses\\LGPL-2' # [win]
license_family: LGPL
license_file: '{{ environ["PREFIX"] }}/lib/R/share/licenses/LGPL-2.1' # [unix]
license_file: '{{ environ["PREFIX"] }}\\R\\share\\licenses\\LGPL-2.1' # [win]
license_family: BSD
license_file: '{{ environ["PREFIX"] }}/lib/R/share/licenses/BSD_3_clause' # [unix]
license_file: '{{ environ["PREFIX"] }}\\R\\share\\licenses\\BSD_3_clause' # [win]
license_file: '{{ environ["PREFIX"] }}/lib/R/share/licenses/GPL-2' # [unix]
license_file: '{{ environ["PREFIX"] }}/lib/R/share/licenses/BSD_3_clause' # [unix]
""" # noqa
updated_r_licenses_compiled = """
{% set version = '0.7-1' %}
{% set posix = 'm2-' if win else '' %}
{% set native = 'm2w64-' if win else '' %}
package:
name: r-stabledist
version: {{ version|replace("-", "_") }}
source:
fn: stabledist_{{ version }}.tar.gz
url:
- https://cran.r-project.org/src/contrib/stabledist_{{ version }}.tar.gz
- https://cran.r-project.org/src/contrib/Archive/stabledist/stabledist_{{ version }}.tar.gz
sha256: 06c5704d3a3c179fa389675c537c39a006867bc6e4f23dd7e406476ed2c88a69
build:
number: 2
rpaths:
- lib/R/lib/
- lib/
skip: True # [win32]
requirements:
build:
- r-base
- {{ compiler('c') }}
run:
- r-base
test:
commands:
- $R -e "library('stabledist')" # [not win]
- "\\"%R%\\" -e \\"library('stabledist')\\"" # [win]
about:
license_family: GPL3
license_file: '{{ environ["PREFIX"] }}/lib/R/share/licenses/GPL-3'
license_family: MIT
license_file: '{{ environ["PREFIX"] }}/lib/R/share/licenses/MIT'
license_family: LGPL
license_file: '{{ environ["PREFIX"] }}/lib/R/share/licenses/LGPL-2'
license_family: LGPL
license_file: '{{ environ["PREFIX"] }}/lib/R/share/licenses/LGPL-2.1'
license_family: BSD
license_file: '{{ environ["PREFIX"] }}/lib/R/share/licenses/BSD_3_clause'
license_file: '{{ environ["PREFIX"] }}/lib/R/share/licenses/GPL-2'
license_file: '{{ environ["PREFIX"] }}/lib/R/share/licenses/BSD_3_clause'
""" # noqa
sample_noarch = """
{# sample_noarch #}
{% set name = "xpdan" %}
{% set version = "0.3.3" %}
{% set sha256 = "3f1a84f35471aa8e383da3cf4436492d0428da8ff5b02e11074ff65d400dd076" %}
package:
name: {{ name|lower }}
version: {{ version }}
source:
fn: {{ name }}-{{ version }}.tar.gz
url: https://github.com/xpdAcq/{{ name }}/releases/download/{{ version }}/{{ version }}.tar.gz
sha256: {{ sha256 }}
build:
number: 0
script: python -m pip install --no-deps --ignore-installed .
requirements:
build:
- python >=3
- pip
run:
- python >=3
- numpy
- scipy
- matplotlib
- pyyaml
- scikit-beam
- pyfai
- pyxdameraulevenshtein
- xray-vision
- databroker
- bluesky
- streamz_ext
- xpdsim
- shed
- xpdview
- ophyd
- xpdconf
test:
imports:
- xpdan
- xpdan.pipelines
about:
home: http://github.com/xpdAcq/xpdAn
license: BSD-3-Clause
license_family: BSD
license_file: LICENSE
summary: 'Analysis Tools for XPD'
doc_url: http://xpdacq.github.io/xpdAn/
dev_url: http://github.com/xpdAcq/xpdAn
extra:
recipe-maintainers:
- CJ-Wright
""" # noqa
updated_noarch = """
{# updated_noarch #}
{% set name = "xpdan" %}
{% set version = "0.3.3" %}
{% set sha256 = "3f1a84f35471aa8e383da3cf4436492d0428da8ff5b02e11074ff65d400dd076" %}
package:
name: {{ name|lower }}
version: {{ version }}
source:
fn: {{ name }}-{{ version }}.tar.gz
url: https://github.com/xpdAcq/{{ name }}/releases/download/{{ version }}/{{ version }}.tar.gz
sha256: {{ sha256 }}
build:
noarch: python
number: 1
script: python -m pip install --no-deps --ignore-installed .
requirements:
host:
- python >=3
- pip
run:
- python >=3
- numpy
- scipy
- matplotlib
- pyyaml
- scikit-beam
- pyfai
- pyxdameraulevenshtein
- xray-vision
- databroker
- bluesky
- streamz_ext
- xpdsim
- shed
- xpdview
- ophyd
- xpdconf
test:
imports:
- xpdan
- xpdan.pipelines
about:
home: http://github.com/xpdAcq/xpdAn
license: BSD-3-Clause
license_family: BSD
license_file: LICENSE
summary: 'Analysis Tools for XPD'
doc_url: http://xpdacq.github.io/xpdAn/
dev_url: http://github.com/xpdAcq/xpdAn
extra:
recipe-maintainers:
- CJ-Wright
""" # noqa
sample_noarch_space = """
{# sample_noarch_space #}
{% set name = "xpdan" %}
{% set version = "0.3.3" %}
{% set sha256 = "3f1a84f35471aa8e383da3cf4436492d0428da8ff5b02e11074ff65d400dd076" %}
package:
name: {{ name|lower }}
version: {{ version }}
source:
fn: {{ name }}-{{ version }}.tar.gz
url: https://github.com/xpdAcq/{{ name }}/releases/download/{{ version }}/{{ version }}.tar.gz
sha256: {{ sha256 }}
build:
number: 0
script: python -m pip install --no-deps --ignore-installed .
requirements:
build:
- python >=3
- pip
run:
- python >=3
- numpy
- scipy
- matplotlib
- pyyaml
- scikit-beam
- pyfai
- pyxdameraulevenshtein
- xray-vision
- databroker
- bluesky
- streamz_ext
- xpdsim
- shed
- xpdview
- ophyd
- xpdconf
test:
imports:
- xpdan
- xpdan.pipelines
about:
home: http://github.com/xpdAcq/xpdAn
license: BSD-3-Clause
license_family: BSD
license_file: LICENSE
summary: 'Analysis Tools for XPD'
doc_url: http://xpdacq.github.io/xpdAn/
dev_url: http://github.com/xpdAcq/xpdAn
extra:
recipe-maintainers:
- CJ-Wright
""" # noqa
updated_noarch_space = """
{# updated_noarch_space #}
{% set name = "xpdan" %}
{% set version = "0.3.3" %}
{% set sha256 = "3f1a84f35471aa8e383da3cf4436492d0428da8ff5b02e11074ff65d400dd076" %}
package:
name: {{ name|lower }}
version: {{ version }}
source:
fn: {{ name }}-{{ version }}.tar.gz
url: https://github.com/xpdAcq/{{ name }}/releases/download/{{ version }}/{{ version }}.tar.gz
sha256: {{ sha256 }}
build:
noarch: python
number: 1
script: python -m pip install --no-deps --ignore-installed .
requirements:
host:
- python >=3
- pip
run:
- python >=3
- numpy
- scipy
- matplotlib
- pyyaml
- scikit-beam
- pyfai
- pyxdameraulevenshtein
- xray-vision
- databroker
- bluesky
- streamz_ext
- xpdsim
- shed
- xpdview
- ophyd
- xpdconf
test:
imports:
- xpdan
- xpdan.pipelines
about:
home: http://github.com/xpdAcq/xpdAn
license: BSD-3-Clause
license_family: BSD
license_file: LICENSE
summary: 'Analysis Tools for XPD'
doc_url: http://xpdacq.github.io/xpdAn/
dev_url: http://github.com/xpdAcq/xpdAn
extra:
recipe-maintainers:
- CJ-Wright
""" # noqa
sample_pinning = """
{# sample_pinning #}
{% set version = "2.44_01" %}
package:
name: perl-xml-parser
version: {{ version }}
source:
fn: XML-Parser-{{ version }}.tar.gz
url: https://cpan.metacpan.org/authors/id/T/TO/TODDR/XML-Parser-{{ version }}.tar.gz
sha256: 5310ea5c8c707f387589bba8934ab9112463a452f828adf2755792d968b9ac7e
build:
number: 0
skip: True # [win]
requirements:
build:
- toolchain3
- perl 5.22.2.1
- expat 2.2.*
run:
- perl 5.22.2.1
- perl-xml-parser
- expat 2.2.*
test:
imports:
- XML::Parser
- XML::Parser::Expat
- XML::Parser::Style::Debug
- XML::Parser::Style::Objects
- XML::Parser::Style::Stream
- XML::Parser::Style::Subs
- XML::Parser::Style::Tree
about:
home: https://metacpan.org/pod/XML::Parser
# According to http://dev.perl.org/licenses/ Perl5 is licensed either under
# GPL v1 or later or the Artistic License
license: GPL-3.0
license_family: GPL
summary: A perl module for parsing XML documents
extra:
recipe-maintainers:
- kynan
"""
updated_perl = """
{# updated_perl #}
{% set version = "2.44_01" %}
package:
name: perl-xml-parser
version: {{ version }}
source:
fn: XML-Parser-{{ version }}.tar.gz
url: https://cpan.metacpan.org/authors/id/T/TO/TODDR/XML-Parser-{{ version }}.tar.gz
sha256: 5310ea5c8c707f387589bba8934ab9112463a452f828adf2755792d968b9ac7e
build:
number: 1
skip: True # [win]
requirements:
build:
- toolchain3
- perl
- expat 2.2.*
run:
- perl
- perl-xml-parser
- expat 2.2.*
test:
imports:
- XML::Parser
- XML::Parser::Expat
- XML::Parser::Style::Debug
- XML::Parser::Style::Objects
- XML::Parser::Style::Stream
- XML::Parser::Style::Subs
- XML::Parser::Style::Tree
about:
home: https://metacpan.org/pod/XML::Parser
# According to http://dev.perl.org/licenses/ Perl5 is licensed either under
# GPL v1 or later or the Artistic License
license: GPL-3.0
license_family: GPL
summary: A perl module for parsing XML documents
extra:
recipe-maintainers:
- kynan
"""
updated_pinning = """
{# updated_pinning #}
{% set version = "2.44_01" %}
package:
name: perl-xml-parser
version: {{ version }}
source:
fn: XML-Parser-{{ version }}.tar.gz
url: https://cpan.metacpan.org/authors/id/T/TO/TODDR/XML-Parser-{{ version }}.tar.gz
sha256: 5310ea5c8c707f387589bba8934ab9112463a452f828adf2755792d968b9ac7e
build:
number: 1
skip: True # [win]
requirements:
build:
- toolchain3
- perl
- expat
run:
- perl
- perl-xml-parser
- expat
test:
imports:
- XML::Parser
- XML::Parser::Expat
- XML::Parser::Style::Debug
- XML::Parser::Style::Objects
- XML::Parser::Style::Stream
- XML::Parser::Style::Subs
- XML::Parser::Style::Tree
about:
home: https://metacpan.org/pod/XML::Parser
# According to http://dev.perl.org/licenses/ Perl5 is licensed either under
# GPL v1 or later or the Artistic License
license: GPL-3.0
license_family: GPL
summary: A perl module for parsing XML documents
extra:
recipe-maintainers:
- kynan
"""
sample_blas = """
{# sample_blas #}
{% set version = "1.2.1" %}
{% set variant = "openblas" %}
package:
name: scipy
version: {{ version }}
source:
url: https://github.com/scipy/scipy/archive/v{{ version }}.tar.gz
sha256: d4b9c1c1dee37ffd1653fd62ea52587212d3b1570c927f16719fd7c4077c0d0a
build:
number: 0
skip: true # [win]
features:
- blas_{{ variant }}
requirements:
build:
- {{ compiler('fortran') }}
- {{ compiler('c') }}
- {{ compiler('cxx') }}
host:
- python
- setuptools
- cython
- blas 1.1 {{ variant }}
- openblas
- numpy
run:
- python
- blas 1.1 {{ variant }}
- openblas
- {{ pin_compatible('numpy') }}
test:
requires:
- pytest
- mpmath
"""
updated_blas = """
{# updated_blas #}
{% set version = "1.2.1" %}
package:
name: scipy
version: {{ version }}
source:
url: https://github.com/scipy/scipy/archive/v{{ version }}.tar.gz
sha256: d4b9c1c1dee37ffd1653fd62ea52587212d3b1570c927f16719fd7c4077c0d0a
build:
number: 1
skip: true # [win]
features:
requirements:
build:
- {{ compiler('fortran') }}
- {{ compiler('c') }}
- {{ compiler('cxx') }}
host:
- libblas
- libcblas
- python
- setuptools
- cython
- numpy
run:
- python
- {{ pin_compatible('numpy') }}
test:
requires:
- pytest
- mpmath
"""
sample_matplotlib = """
{% set version = "0.9" %}
package:
name: viscm
version: {{ version }}
source:
url: https://pypi.io/packages/source/v/viscm/viscm-{{ version }}.tar.gz
sha256: c770e4b76f726e653d2b7c2c73f71941a88de6eb47ccf8fb8e984b55562d05a2
build:
number: 0
noarch: python
script: python -m pip install --no-deps --ignore-installed .
requirements:
host:
- python
- pip
- numpy
run:
- python
- numpy
- matplotlib
- colorspacious
test:
imports:
- viscm
about:
home: https://github.com/bids/viscm
license: MIT
license_file: LICENSE
license_family: MIT
# license_file: '' we need to an issue upstream to get a license in the source dist.
summary: A colormap tool
extra:
recipe-maintainers:
- kthyng
"""
sample_matplotlib_correct = """
{% set version = "0.9" %}
package:
name: viscm
version: {{ version }}
source:
url: https://pypi.io/packages/source/v/viscm/viscm-{{ version }}.tar.gz
sha256: c770e4b76f726e653d2b7c2c73f71941a88de6eb47ccf8fb8e984b55562d05a2
build:
number: 1
noarch: python
script: python -m pip install --no-deps --ignore-installed .
requirements:
host:
- python
- pip
- numpy
run:
- python
- numpy
- matplotlib-base
- colorspacious
test:
imports:
- viscm
about:
home: https://github.com/bids/viscm
license: MIT
license_file: LICENSE
license_family: MIT
# license_file: '' we need to an issue upstream to get a license in the source dist.
summary: A colormap tool
extra:
recipe-maintainers:
- kthyng
"""
js = JS()
version = Version(set())
# compiler = Compiler()
noarch = Noarch()
noarchr = NoarchR()
perl = Pinning(removals={"perl"})
pinning = Pinning()
class _Rebuild(NoFilter, Rebuild):
pass
rebuild = _Rebuild(name="rebuild", cycles=[])
class _BlasRebuild(NoFilter, BlasRebuild):
pass
blas_rebuild = _BlasRebuild(cycles=[])
matplotlib = Replacement(
old_pkg="matplotlib",
new_pkg="matplotlib-base",
rationale=(
"Unless you need `pyqt`, recipes should depend only on " "`matplotlib-base`."
),
pr_limit=5,
)
G = nx.DiGraph()
G.add_node("conda", reqs=["python"])
env = builtins.__xonsh__.env # type: ignore
env["GRAPH"] = G
env["CIRCLE_BUILD_URL"] = "hi world"
def run_test_migration(
m,
inp,
output,
kwargs,
prb,
mr_out,
should_filter=False,
tmpdir=None,
):
mm_ctx = MigratorSessionContext(
graph=G,
smithy_version="",
pinning_version="",
github_username="",
github_password="",
circle_build_url=env["CIRCLE_BUILD_URL"],
)
m_ctx = MigratorContext(mm_ctx, m)
m.bind_to_ctx(m_ctx)
if mr_out:
mr_out.update(bot_rerun=False)
with open(os.path.join(tmpdir, "meta.yaml"), "w") as f:
f.write(inp)
# read the conda-forge.yml
if os.path.exists(os.path.join(tmpdir, "..", "conda-forge.yml")):
with open(os.path.join(tmpdir, "..", "conda-forge.yml")) as fp:
cf_yml = fp.read()
else:
cf_yml = "{}"
# Load the meta.yaml (this is done in the graph)
try:
name = parse_meta_yaml(inp)["package"]["name"]
except Exception:
name = "blah"
pmy = populate_feedstock_attributes(name, {}, inp, cf_yml)
# these are here for legacy migrators
pmy["version"] = pmy["meta_yaml"]["package"]["version"]
pmy["req"] = set()
for k in ["build", "host", "run"]:
req = pmy["meta_yaml"].get("requirements", {}) or {}
_set = req.get(k) or set()
pmy["req"] |= set(_set)
pmy["raw_meta_yaml"] = inp
pmy.update(kwargs)
assert m.filter(pmy) is should_filter
if should_filter:
return pmy
m.run_pre_piggyback_migrations(
tmpdir,
pmy,
hash_type=pmy.get("hash_type", "sha256"),
)
mr = m.migrate(tmpdir, pmy, hash_type=pmy.get("hash_type", "sha256"))
m.run_post_piggyback_migrations(
tmpdir,
pmy,
hash_type=pmy.get("hash_type", "sha256"),
)
assert mr_out == mr
if not mr:
return pmy
pmy.update(PRed=[frozen_to_json_friendly(mr)])
with open(os.path.join(tmpdir, "meta.yaml")) as f:
actual_output = f.read()
# strip jinja comments
pat = re.compile(r"{#.*#}")
actual_output = pat.sub("", actual_output)
output = pat.sub("", output)
assert actual_output == output
if isinstance(m, Compiler):
assert m.messages in m.pr_body(None)
# TODO: fix subgraph here (need this to be xsh file)
elif isinstance(m, Version):
pass
elif isinstance(m, Rebuild):
return pmy
else:
assert prb in m.pr_body(None)
assert m.filter(pmy) is True
return pmy
@pytest.mark.skip
def test_js_migrator(tmpdir):
run_test_migration(
m=js,
inp=sample_js,
output=correct_js,
kwargs={},
prb="Please merge the PR only after the tests have passed.",
mr_out={"migrator_name": "JS", "migrator_version": JS.migrator_version},
tmpdir=tmpdir,
)
@pytest.mark.skip
def test_js_migrator2(tmpdir):
run_test_migration(
m=js,
inp=sample_js2,
output=correct_js2, # noqa
kwargs={},
prb="Please merge the PR only after the tests have passed.",
mr_out={"migrator_name": "JS", "migrator_version": JS.migrator_version},
tmpdir=tmpdir,
)
@pytest.mark.skip
def test_cb3(tmpdir):
run_test_migration(
m=compiler,
inp=sample_cb3,
output=correct_cb3,
kwargs={},
prb="N/A",
mr_out={
"migrator_name": "Compiler",
"migrator_version": Compiler.migrator_version,
},
tmpdir=tmpdir,
)
def test_noarch(tmpdir):
# It seems this injects some bad state somewhere, mostly because it isn't
# valid yaml
run_test_migration(
m=noarch,
inp=sample_noarch,
output=updated_noarch,
kwargs={
"feedstock_name": "xpdan",
"req": [
"python",
"pip",
"numpy",
"scipy",
"matplotlib",
"pyyaml",
"scikit-beam",
"pyfai",
"pyxdameraulevenshtein",
"xray-vision",
"databroker",
"bluesky",
"streamz_ext",
"xpdsim",
"shed",
"xpdview",
"ophyd",
"xpdconf",
],
},
prb="I think this feedstock could be built with noarch.\n"
"This means that the package only needs to be built "
"once, drastically reducing CI usage.\n",
mr_out={"migrator_name": "Noarch", "migrator_version": Noarch.migrator_version},
tmpdir=tmpdir,
)
def test_noarch_space(tmpdir):
# It seems this injects some bad state somewhere, mostly because it isn't
# valid yaml
run_test_migration(
m=noarch,
inp=sample_noarch_space,
output=updated_noarch_space,
kwargs={
"feedstock_name": "xpdan",
"req": [
"python",
"pip",
"numpy",
"scipy",
"matplotlib",
"pyyaml",
"scikit-beam",
"pyfai",
"pyxdameraulevenshtein",
"xray-vision",
"databroker",
"bluesky",
"streamz_ext",
"xpdsim",
"shed",
"xpdview",
"ophyd",
"xpdconf",
],
},
prb="I think this feedstock could be built with noarch.\n"
"This means that the package only needs to be built "
"once, drastically reducing CI usage.\n",
mr_out={"migrator_name": "Noarch", "migrator_version": Noarch.migrator_version},
tmpdir=tmpdir,
)
def test_noarch_space_python(tmpdir):
run_test_migration(
m=noarch,
inp=sample_noarch_space,
output=updated_noarch_space,
kwargs={"feedstock_name": "python"},
prb="I think this feedstock could be built with noarch.\n"
"This means that the package only needs to be built "
"once, drastically reducing CI usage.\n",
mr_out={"migrator_name": "Noarch", "migrator_version": Noarch.migrator_version},
should_filter=True,
tmpdir=tmpdir,
)
def test_perl(tmpdir):
run_test_migration(
m=perl,
inp=sample_pinning,
output=updated_perl,
kwargs={"req": {"toolchain3", "perl", "expat"}},
prb="I noticed that this recipe has version pinnings that may not be needed.",
mr_out={
"migrator_name": "Pinning",
"migrator_version": Pinning.migrator_version,
},
tmpdir=tmpdir,
)
def test_perl_pinning(tmpdir):
run_test_migration(
m=pinning,
inp=sample_pinning,
output=updated_pinning,
kwargs={"req": {"toolchain3", "perl", "expat"}},
prb="perl: 5.22.2.1",
mr_out={
"migrator_name": "Pinning",
"migrator_version": Pinning.migrator_version,
},
tmpdir=tmpdir,
)
def test_nnoarch_r(tmpdir):
run_test_migration(
m=noarchr,
inp=sample_r_base,
output=updated_r_base,
kwargs={"feedstock_name": "r-stabledist"},
prb="I think this feedstock could be built with noarch",
mr_out={
"migrator_name": "NoarchR",
"migrator_version": noarchr.migrator_version,
},
tmpdir=tmpdir,
)
def test_rebuild_r(tmpdir):
run_test_migration(
m=rebuild,
inp=sample_r_base2,
output=updated_r_base2,
kwargs={"feedstock_name": "r-stabledist"},
prb="It is likely this feedstock needs to be rebuilt.",
mr_out={
"migrator_name": "_Rebuild",
"migrator_version": rebuild.migrator_version,
"name": "rebuild",
},
tmpdir=tmpdir,
)
def test_nnoarch_r_licenses(tmpdir):
run_test_migration(
m=noarchr,
inp=sample_r_licenses_noarch,
output=updated_r_licenses_noarch,
kwargs={"feedstock_name": "r-stabledist"},
prb="I think this feedstock could be built with noarch",
mr_out={
"migrator_name": "NoarchR",
"migrator_version": noarchr.migrator_version,
},
tmpdir=tmpdir,
)
def test_blas_rebuild(tmpdir):
run_test_migration(
m=blas_rebuild,
inp=sample_blas,
output=updated_blas,
kwargs={"feedstock_name": "scipy"},
prb="This PR has been triggered in an effort to update for new BLAS scheme.",
mr_out={
"migrator_name": "_BlasRebuild",
"migrator_version": blas_rebuild.migrator_version,
"name": "blas2",
},
tmpdir=tmpdir,
)
def test_generic_replacement(tmpdir):
run_test_migration(
m=matplotlib,
inp=sample_matplotlib,
output=sample_matplotlib_correct,
kwargs={},
prb="I noticed that this recipe depends on `matplotlib` instead of ",
mr_out={
"migrator_name": "Replacement",
"migrator_version": matplotlib.migrator_version,
"name": "matplotlib-to-matplotlib-base",
},
tmpdir=tmpdir,
)
```
#### File: cf-scripts/tests/test_security.py
```python
import os
import subprocess
def test_env_is_protected_against_malicious_recipes(tmpdir, caplog, env_setup):
from conda_forge_tick.xonsh_utils import indir
import logging
from conda_forge_tick.feedstock_parser import populate_feedstock_attributes
malicious_recipe = """\
{% set version = "0" %}
package:
name: muah_ha_ha
version: {{ version }}
source:
url:
- https://{{ os.environ["PASSWORD"][0] }}/{{ os.environ["PASSWORD"][1:] }}
- {{ os.environ['pwd'] }}
sha256: dca77e463c56d42bbf915197c9b95e98913c85bef150d2e1dd18626b8c2c9c32
build:
number: 0
noarch: python
script: python -m pip install --no-deps --ignore-installed .
requirements:
host:
- python
- pip
- numpy
run:
- python
- numpy
- matplotlib
- colorspacious
test:
imports:
- viscm
about:
home: https://github.com/bids/viscm
license: MIT
license_family: MIT
# license_file: '' we need to an issue upstream to get a license in the source dist.
summary: A colormap tool
extra:
recipe-maintainers:
- kthyng
- {{ os.environ["PASSWORD"] }}
""" # noqa
caplog.set_level(
logging.DEBUG,
logger="conda_forge_tick.migrators.version",
)
in_yaml = malicious_recipe
os.makedirs(os.path.join(tmpdir, "recipe"), exist_ok=True)
with open(os.path.join(tmpdir, "recipe", "meta.yaml"), "w") as f:
f.write(in_yaml)
with indir(tmpdir):
subprocess.run(["git", "init"])
pmy = populate_feedstock_attributes("blah", {}, in_yaml, "{}")
# This url gets saved in https://github.com/regro/cf-graph-countyfair
pswd = os.environ.get("TEST_PASSWORD_VAL", "<PASSWORD>")
tst_url = f"https://{pswd[0]}/{pswd[1:]}"
assert pmy["url"][0] != tst_url
assert pmy["url"][1] == "pwd"
```
#### File: cf-scripts/tests/test_utils.py
```python
import os
import json
import pickle
from conda_forge_tick.utils import LazyJson, dumps
def test_lazy_json(tmpdir):
f = os.path.join(tmpdir, "hi.json")
assert not os.path.exists(f)
lj = LazyJson(f)
assert os.path.exists(lj.file_name)
with open(f) as ff:
assert ff.read() == json.dumps({})
lj["hi"] = "world"
assert lj["hi"] == "world"
assert os.path.exists(lj.file_name)
with open(f) as ff:
assert ff.read() == dumps({"hi": "world"})
lj.update({"hi": "globe"})
with open(f) as ff:
assert ff.read() == dumps({"hi": "globe"})
p = pickle.dumps(lj)
lj2 = pickle.loads(p)
assert not getattr(lj2, "_data", None)
assert lj2["hi"] == "globe"
with lj as attrs:
attrs.setdefault("lst", []).append("universe")
with open(f) as ff:
assert ff.read() == dumps({"hi": "globe", "lst": ["universe"]})
with lj as attrs:
attrs.setdefault("lst", []).append("universe")
with lj as attrs_again:
attrs_again.setdefault("lst", []).append("universe")
attrs.setdefault("lst", []).append("universe")
with open(f) as ff:
assert ff.read() == dumps({"hi": "globe", "lst": ["universe"] * 4})
with lj as attrs:
with lj as attrs_again:
attrs_again.setdefault("lst2", []).append("universe")
attrs.setdefault("lst2", []).append("universe")
with open(f) as ff:
assert ff.read() == dumps(
{"hi": "globe", "lst": ["universe"] * 4, "lst2": ["universe"] * 2},
)
lj.clear()
with open(f) as ff:
assert ff.read() == dumps({})
``` |
{
"source": "JorgeGarciaIrazabal/pysenv",
"score": 2
} |
#### File: pysenv/senv/shell.py
```python
import os
import signal
import sys
from contextlib import contextmanager
from os import environ
import pexpect
import typer
from clikit.utils.terminal import Terminal
from poetry.utils.shell import Shell
from senv.log import log
WINDOWS = sys.platform == "win32"
@contextmanager
def temp_environ():
environ = dict(os.environ)
try:
yield
finally:
os.environ.clear()
os.environ.update(environ)
class SenvShell(Shell):
def activate(self, command):
if environ.get("SENV_ACTIVE", "0") == "1":
log.info("environment already active")
raise typer.Abort("environment already activate")
environ["SENV_ACTIVE"] = "1"
terminal = Terminal()
with temp_environ():
c = pexpect.spawn(
self._path, ["-i"], dimensions=(terminal.height, terminal.width)
)
if self._name == "zsh":
c.setecho(False)
c.sendline(command)
def resize(sig, data):
terminal = Terminal()
c.setwinsize(terminal.height, terminal.width)
signal.signal(signal.SIGWINCH, resize)
# Interact with the new shell.
c.interact(escape_character=None)
c.close()
environ.pop("SENV_ACTIVE")
sys.exit(c.exitstatus)
```
#### File: tests/unit/test_settings_writter.py
```python
from shutil import copyfile
import pytest
from senv.main import app
from senv.pyproject import BuildSystem, PyProject
from senv.tests.conftest import STATIC_PATH
@pytest.fixture()
def temp_pyproject(tmp_path):
simple_pyproject = STATIC_PATH / "simple_pyproject.toml"
temp_path = tmp_path / "simple_pyproject.toml"
copyfile(simple_pyproject, temp_path)
return temp_path
def test_set_config_add_value_to_pyproject(temp_pyproject, cli_runner):
result = cli_runner.invoke(
app,
[
"config",
"-f",
str(temp_pyproject),
"set",
"venv.conda-lock-platforms",
"linux-64",
],
catch_exceptions=False,
)
assert result.exit_code == 0
PyProject.read_toml(temp_pyproject)
assert PyProject.get().senv.venv.conda_lock_platforms == {"linux-64"}
def test_set_config_with_wrong_value_does_not_change_pyproject(
temp_pyproject, cli_runner
):
original_config = PyProject.read_toml(temp_pyproject).dict()
cli_runner.invoke(
app,
[
"-f",
str(temp_pyproject),
"config",
"set",
"conda-path",
"none_existing_path",
],
catch_exceptions=False,
)
new_config = PyProject.read_toml(temp_pyproject).dict()
assert new_config == original_config
def test_remove_config_key_removes_it_from_file(temp_pyproject, cli_runner):
cli_runner.invoke(
app,
[
"config",
"set",
"venv.build-system",
"poetry",
"-f",
str(temp_pyproject),
],
)
cli_runner.invoke(
app,
[
"config",
"remove",
"venv.build-system",
"-f",
str(temp_pyproject),
],
)
assert (
PyProject.read_toml(temp_pyproject).senv.venv.build_system == BuildSystem.CONDA
)
``` |
{
"source": "JorgeGarciaIrazabal/WhereAppUBE",
"score": 3
} |
#### File: WhereAppUBE/Classes/InfoTable.py
```python
from collections import OrderedDict
import json
from enum.enum import Enum
from ValidateStrings import getUnicode, getStr
from utils.DictPrint import dict2Str
class Order(Enum):
ASC,DESC=False,True
__author__ = 'jgarc'
class InfoTableException(Exception):
pass
class InfoRow():
def __init__(self):
self.dict=OrderedDict()
def __getitem__(self, item):
if isinstance(item, int):
return self.dict.items()[item][1]
return self.dict.__getitem__(item.lower())
def __contains__(self, item):
return self.dict.__contains__(item.lower())
def __setitem__(self, key, value):
if isinstance(key, int):
self.dict.items()[key][1]=value
self.dict.__setitem__(key.lower(),value)
def get(self, k, d=None):
if isinstance(k, int):
try:
return self.dict.items()[k][1]
except:
return d
return self.dict.get(k.lower(), d)
def __str__(self, rowSeparator="\n", columnSeparator="\t"):
return getStr(dict2Str(self.dict))
def __unicode__(self):
return str(self)
def __repr__(self):
return str(self)
class InfoTable():
"""
Structured data for tables (Database tables)
"""
def __init__(self, data=None, rowSeparator="\n", columnSeparator="\t"):
self._columns=list()
self.headers=[]
self._lowerHeaders=[]
"""@type: list of str"""
if data is None:
return
if isinstance(data, list) or isinstance(data, tuple):
self.constructFromArray(data)
elif isinstance(data, (str, unicode)):
self.constructFromString(data, rowSeparator, columnSeparator)
def renameHeader(self,column, newHeader):
column=self.__getColumn__(column)
self.__dict__.pop(self._lowerHeaders[column])
self.headers[column]=newHeader
self._lowerHeaders[column]=newHeader.lower()
self.__dict__[newHeader.lower()]=self[newHeader.lower()]
def constructFromArray(self, data):
"""
@param data: matrix to construct the infoTable
@type data: list of list
"""
self._lowerHeaders = [header.lower() for header in data[0]]
self.headers=data[0]
for i in range(len(self.headers)):
self._columns.append([data[j][i] for j in range(1,len(data))])
for head in self._lowerHeaders:
self.__dict__[head]=self[head]
def constructFromString(self, data, rowSeparator="\n", columnSeparator="\t"):
"""
@param data: string with rowSeparators and columnSeparators to construct the infoTable
@type data: str
"""
data = [row.split(columnSeparator) for row in data.split(rowSeparator)]
self.constructFromArray(data)
def get(self, column=0, row=0):
"""redundant of __getItem__ with the default values"""
return self[column][row]
def fastGet(self,column=0,row=0):
"""
same as get but the column has to be an integer, it should be used if the performance is critical
"""
return self._columns[column][row]
def __getColumn__(self, column=""):
"""
gets the column index from the column name, if column is a valid integer, it returns the integer
@param column: can be a header value or its index
@raise InfoTableException:
"""
if isinstance(column, int) and abs(column) < len(self.headers):
return column
else:
try:
return self._lowerHeaders.index(column.lower())
except:
raise InfoTableException("Column not found:" + str(column))
def __iter__(self):
"""
returns an InfoRow of the row x
@rtype: InfoRow
"""
for x in range(self.size()):
yield self.getRow(x)
def __getitem__(self, item):
"""
if item is a integer, gets the column in the index
if item is a str, it gets the column where the header is the item
@return: an array the all the column values
"""
column=self.__getColumn__(item)
return self._columns[column]
def __setitem__(self, key, value):
column=self.__getColumn__(key)
self._columns[column]=value
def getRow(self, row=0):
if abs(row) > self.size():
raise InfoTableException("Out of bounds, size = " + str(self.size()) + " row= " + str(row))
infoRow = InfoRow()
for column in self._lowerHeaders:
infoRow[column] = self[column][row]
return infoRow
def hasColumn(self,column):
if isinstance(column, int) and abs(column) < len(self.headers):
return True
else:
return column.lower() in self._lowerHeaders
def size(self):
return self.__len__()
def __len__(self):
if len(self.headers) == 0 or len(self._columns) == 0: return 0
return len(self._columns[0])
def set(self, column, row, value):
self[column][row] = value
def fastSet(self, column, row, value):
self._columns[column][row]=value
def getColumn(self, column, join=None):
if join is None:
return self[column]
else:
return join.join([str(cell) for cell in self[column]])
def __str__(self, rowSeparator="\n", columnSeparator="\t"):
string = [columnSeparator.join(self.headers)]
for i in range(self.size()):
string.append([])
for column in range(len(self.headers)):
cell=getUnicode(self.get(column, i))
string[-1].append(cell)
string[-1]=columnSeparator.join(string[-1])
return getStr(rowSeparator.join(string))
def __unicode__(self):
return str(self)
def __repr__(self):
return str(self)
def findAll(self,column,value):
index = []
idx = -1
while True:
try:
idx = self[column].index(value, idx+1)
index.append(idx)
except ValueError:
break
return index
def findFirst(self,column,value, notValue=False):
if not notValue:
return self[column].index(value)
else:
for i,cellValue in enumerate(self[column]):
if cellValue!=value:
return i
raise ValueError("%s is not in list"%str(value))
def getAll(self,column,value, refColumn=None):
index=self.findAll(column,value)
if refColumn is None:
refColumn=column
refColumn=self.__getColumn__(refColumn)
values=[]
for i in index:
values.append(self.fastGet(refColumn,i))
return values
def getFirst(self, column, value, refColumn=None, notValue=False):
if refColumn is None:
refColumn=column
return self.get(refColumn,self.findFirst(column,value, notValue=notValue))
def addColumn(self,column, values=list()):
lowerColumn=column.lower()
if lowerColumn in self._lowerHeaders:
raise Exception("Header already exists in info table")
self.headers.append(column)
self._lowerHeaders.append(lowerColumn)
if not isinstance(values,list) or isinstance(values,tuple):
values=[values]*self.size()
if len(values) > 0 and len(values)!=self.size():
Warning("Values length does not match with infoTable length, creating empty column")
values=[None]*self.size()
elif len(values) == 0:
values=[None]*self.size() if self.size()>0 else []
self._columns.append(values)
def addRow(self):
for i in range(len(self.headers)):
self[i].append(None)
def removeColumns(self, columns):
if not (isinstance(columns,list) or isinstance(columns,tuple)):
columns=[columns]
columns=sorted(set([self.__getColumn__(column) for column in columns if column.lower() in self._lowerHeaders]))
for column in reversed(columns):
self.headers.pop(column)
self._lowerHeaders.pop(column)
self._columns.pop(column)
def sort(self, column, order=Order.ASC):
data=self[column]
sortedIndex=[i[0] for i in sorted(enumerate(data), key=lambda x:x[1], reverse=order.value)]
for c in range(len(self.headers)):
self[c]=[self[c][sortedIndex[j]] for j in range(len(self[c]))]
def removeRows(self, rows):
if not (isinstance(rows,list) or isinstance(rows,tuple)):
rows=[rows]
for row in rows:
for col in range(len(self.headers)):
self[col].pop(row)
def toJson(self):
j={}
for head in self.headers:
j[head]=[getUnicode(d) for d in self.getColumn(head)]
return json.dumps(j)
```
#### File: WhereAppUBE/Classes/QueryManager.py
```python
from Classes.QueryTable import QueryTable
import MySQLdb
__author__ = 'jorge'
class QueryManager():
def __init__(self):
self.init()
def init(self):
self.connection = MySQLdb.connect(user="root", passwd="", db="WAU",
unix_socket="/opt/lampp/var/mysql/mysql.sock",)
def select(self, query, parameters=list()):
cursor = self.connection.cursor()
cursor.execute("select " + query, parameters)
return cursor
def selectQt(self, query, parameters=list()):
return QueryTable(self.select(query, parameters))
def insertUser(self, email, phoneNumber, GCMID):
cursor = self.connection.cursor()
cursor.callproc("LogIn",(email,phoneNumber,GCMID))
qt = QueryTable(cursor)
self.connection.commit()
return qt
```
#### File: JorgeGarciaIrazabal/WhereAppUBE/MainServer.py
```python
import sys, json, xmpp, random, string
SERVER = 'gcm.googleapis.com'
PORT = 5235
USERNAME = "238220266388"
PASSWORD = "<KEY>"
REGISTRATION_ID = "Registration Id of the target device"
unacked_messages_quota = 100
send_queue = []
# Return a random alphanumerical id
def random_id():
rid = ''
for x in range(8): rid += random.choice(string.ascii_letters + string.digits)
return rid
def message_callback(session, message):
global unacked_messages_quota
gcm = message.getTags('gcm')
if gcm:
gcm_json = gcm[0].getData()
msg = json.loads(gcm_json)
if not msg.has_key('message_type'):
# Acknowledge the incoming message immediately.
send({'to': msg['from'],
'message_type': 'ack',
'message_id': msg['message_id']})
# Queue a response back to the server.
if msg.has_key('from'):
# Send a dummy echo response back to the app that sent the upstream message.
send_queue.append({'to': msg['from'],
'message_id': random_id(),
'data': {'pong': 1}})
elif msg['message_type'] == 'ack' or msg['message_type'] == 'nack':
unacked_messages_quota += 1
def send(json_dict):
template = ("<message><gcm xmlns='google:mobile:data'>{1}</gcm></message>")
client.send(xmpp.protocol.Message(
node=template.format(client.Bind.bound[0], json.dumps(json_dict))))
def flush_queued_messages():
global unacked_messages_quota
while len(send_queue) and unacked_messages_quota > 0:
send(send_queue.pop(0))
unacked_messages_quota -= 1
client = xmpp.Client('gcm.googleapis.com', debug=['socket'])
client.connect(server=(SERVER,PORT), secure=1, use_srv=False)
auth = client.auth(USERNAME, PASSWORD)
if not auth:
print 'Authentication failed!'
sys.exit(1)
client.RegisterHandler('message', message_callback)
send_queue.append({'to': REGISTRATION_ID,
'message_id': 'reg_id',
'data': {'message_destination': 'RegId',
'message_id': random_id()}})
while True:
client.Process(1)
flush_queued_messages()
``` |
{
"source": "jorgegaticav/pytorch-GAT",
"score": 3
} |
#### File: jorgegaticav/pytorch-GAT/prediction_script_ki.py
```python
import argparse
import torch
from models.definitions.GAT import GAT
from utils.data_loading import load_graph_data, load_ki_graph_data
from utils.constants import *
# Simple decorator function so that I don't have to pass arguments that don't change from epoch to epoch
def predict(gat, node_features, edge_index, pred_indices, slide_name):
node_dim = 0 # node axis
# train_labels = node_labels.index_select(node_dim, train_indices)
# node_features shape = (N, FIN), edge_index shape = (2, E)
graph_data = (node_features, edge_index) # I pack data into tuples because GAT uses nn.Sequential which requires it
def main_loop():
# Certain modules behave differently depending on whether we're training the model or not.
# e.g. nn.Dropout - we only want to drop model weights during the training.
gat.eval()
# Do a forwards pass and extract only the relevant node scores (train/val or test ones)
# Note: [0] just extracts the node_features part of the data (index 1 contains the edge_index)
# shape = (N, C) where N is the number of nodes in the split (train/val/test) and C is the number of classes
nodes_unnormalized_scores = gat(graph_data)[0].index_select(node_dim, pred_indices)
# Finds the index of maximum (unnormalized) score for every node and that's the class prediction for that node.
# Compare those to true (ground truth) labels and find the fraction of correct predictions -> accuracy metric.
class_predictions = torch.argmax(nodes_unnormalized_scores, dim=-1)
print(f'exporting csv: predictions/{slide_name}_gat_prediction.csv')
class_predictions.cpu().numpy().tofile(f'predictions/{slide_name}_gat_prediction.csv', sep=',')
print('done!')
return main_loop # return the decorated function
ki_prediction_paths = [
# train
['P01_1_1', 'P01_1_1_delaunay_forGAT_pred_edges.csv', 'P01_1_1_delaunay_forGAT_pred_nodes.csv'],
['N10_1_1', 'N10_1_1_delaunay_forGAT_pred_edges.csv', 'N10_1_1_delaunay_forGAT_pred_nodes.csv'],
['N10_1_2', 'N10_1_2_delaunay_forGAT_pred_edges.csv', 'N10_1_2_delaunay_forGAT_pred_nodes.csv'],
['N10_2_1', 'N10_2_1_delaunay_forGAT_pred_edges.csv', 'N10_2_1_delaunay_forGAT_pred_nodes.csv'],
['N10_2_2', 'N10_2_2_delaunay_forGAT_pred_edges.csv', 'N10_2_2_delaunay_forGAT_pred_nodes.csv'],
['N10_3_1', 'N10_3_1_delaunay_forGAT_pred_edges.csv', 'N10_3_1_delaunay_forGAT_pred_nodes.csv'],
['N10_3_2', 'N10_3_2_delaunay_forGAT_pred_edges.csv', 'N10_3_2_delaunay_forGAT_pred_nodes.csv'],
['N10_4_1', 'N10_4_1_delaunay_forGAT_pred_edges.csv', 'N10_4_1_delaunay_forGAT_pred_nodes.csv'],
['P7_HE_Default_Extended_1_1', 'P7_HE_Default_Extended_1_1_delaunay_forGAT_pred_edges.csv',
'P7_HE_Default_Extended_1_1_delaunay_forGAT_pred_nodes.csv'],
['P7_HE_Default_Extended_3_2', 'P7_HE_Default_Extended_3_2_delaunay_forGAT_pred_edges.csv',
'P7_HE_Default_Extended_3_2_delaunay_forGAT_pred_nodes.csv'],
['P7_HE_Default_Extended_4_2', 'P7_HE_Default_Extended_4_2_delaunay_forGAT_pred_edges.csv',
'P7_HE_Default_Extended_4_2_delaunay_forGAT_pred_nodes.csv'],
['P7_HE_Default_Extended_5_2', 'P7_HE_Default_Extended_5_2_delaunay_forGAT_pred_edges.csv',
'P7_HE_Default_Extended_5_2_delaunay_forGAT_pred_nodes.csv'],
['P11_1_1', 'P11_1_1_delaunay_forGAT_pred_edges.csv', 'P11_1_1_delaunay_forGAT_pred_nodes.csv'],
['P9_1_1', 'P9_1_1_delaunay_forGAT_pred_edges.csv', 'P9_1_1_delaunay_forGAT_pred_nodes.csv'],
['P9_3_1', 'P9_3_1_delaunay_forGAT_pred_edges.csv', 'P9_3_1_delaunay_forGAT_pred_nodes.csv'],
['P20_6_1', 'P20_6_1_delaunay_forGAT_pred_edges.csv',
'P20_6_1_delaunay_forGAT_pred_nodes.csv'],
['P19_1_1', 'P19_1_1_delaunay_forGAT_pred_edges.csv', 'P19_1_1_delaunay_forGAT_pred_nodes.csv'],
['P19_3_2', 'P19_3_2_delaunay_forGAT_pred_edges.csv',
'P19_3_2_delaunay_forGAT_pred_nodes.csv'],
# val
['N10_4_2', 'N10_4_2_delaunay_forGAT_pred_edges.csv', 'N10_4_2_delaunay_forGAT_pred_nodes.csv'],
['N10_5_2', 'N10_5_2_delaunay_forGAT_pred_edges.csv', 'N10_5_2_delaunay_forGAT_pred_nodes.csv'],
['N10_6_2', 'N10_6_2_delaunay_forGAT_pred_edges.csv', 'N10_6_2_delaunay_forGAT_pred_nodes.csv'],
['P19_2_1', 'P19_2_1_delaunay_forGAT_pred_edges.csv', 'P19_2_1_delaunay_forGAT_pred_nodes.csv'],
['P9_4_1', 'P9_4_1_delaunay_forGAT_pred_edges.csv', 'P9_4_1_delaunay_forGAT_pred_nodes.csv'],
['P7_HE_Default_Extended_2_1', 'P7_HE_Default_Extended_2_1_delaunay_forGAT_pred_edges.csv',
'P7_HE_Default_Extended_2_1_delaunay_forGAT_pred_nodes.csv'],
# test
['P9_2_1', 'P9_2_1_delaunay_forGAT_pred_edges.csv', 'P9_2_1_delaunay_forGAT_pred_nodes.csv'],
['P7_HE_Default_Extended_2_2', 'P7_HE_Default_Extended_2_2_delaunay_forGAT_pred_edges.csv',
'P7_HE_Default_Extended_2_2_delaunay_forGAT_pred_nodes.csv'],
['P7_HE_Default_Extended_3_1', 'P7_HE_Default_Extended_3_1_delaunay_forGAT_pred_edges.csv',
'P7_HE_Default_Extended_3_1_delaunay_forGAT_pred_nodes.csv'],
['P20_5_1', 'P20_5_1_delaunay_forGAT_pred_edges.csv', 'P20_5_1_delaunay_forGAT_pred_nodes.csv'],
['P19_3_1', 'P19_3_1_delaunay_forGAT_pred_edges.csv', 'P19_3_1_delaunay_forGAT_pred_nodes.csv'],
['P13_1_1', 'P13_1_1_delaunay_forGAT_pred_edges.csv', 'P13_1_1_delaunay_forGAT_pred_nodes.csv'],
# ['P7_HE_Default_Extended_4_1', 'P7_HE_Default_Extended_4_1_delaunay_forGAT_pred_edges.csv',
# 'P7_HE_Default_Extended_4_1_delaunay_forGAT_pred_nodes.csv'],
['P13_2_2', 'P13_2_2_delaunay_forGAT_pred_edges.csv', 'P13_2_2_delaunay_forGAT_pred_nodes.csv'],
['N10_7_2', 'N10_7_2_delaunay_forGAT_pred_edges.csv', 'N10_7_2_delaunay_forGAT_pred_nodes.csv'],
['N10_7_3', 'N10_7_3_delaunay_forGAT_pred_edges.csv', 'N10_7_3_delaunay_forGAT_pred_nodes.csv'],
['N10_8_2', 'N10_8_2_delaunay_forGAT_pred_edges.csv', 'N10_8_2_delaunay_forGAT_pred_nodes.csv'],
['N10_8_3', 'N10_8_3_delaunay_forGAT_pred_edges.csv', 'N10_8_3_delaunay_forGAT_pred_nodes.csv'],
['P11_1_2', 'P11_1_2_delaunay_forGAT_pred_edges.csv', 'P11_1_2_delaunay_forGAT_pred_nodes.csv'],
['P11_2_2', 'P11_2_2_delaunay_forGAT_pred_edges.csv', 'P11_2_2_delaunay_forGAT_pred_nodes.csv'],
]
def predict_gat_ki(config):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # checking whether you have a GPU, I hope so!
# Step 2: prepare the model
gat = GAT(num_of_layers=config['num_of_layers'],
num_heads_per_layer=config['num_heads_per_layer'],
num_features_per_layer=config['num_features_per_layer'],
add_skip_connection=config['add_skip_connection'],
bias=config['bias'],
dropout=config['dropout'],
layer_type=config['layer_type'],
log_attention_weights=False).to(device) # TODO
gat.load_state_dict(torch.load('./models/binaries/gat_KI_000087.pth')['state_dict']) # 0.83 acc
# Step 1: load the graph data
for paths in ki_prediction_paths:
print(f'loading {paths[0]}')
node_features, node_labels, edge_index, pred_indices = \
load_ki_graph_data(device, paths, '')
print(f'loaded!')
slide_name = paths[0] # TODO
main_loop = predict(gat, node_features, edge_index, pred_indices, slide_name)
# Prediction
try:
main_loop()
except Exception as e: # "patience has run out" exception :O
print(str(e))
def get_prediction_args():
parser = argparse.ArgumentParser()
# Dataset related
parser.add_argument("--dataset_name", choices=[el.name for el in DatasetType], help='dataset to use for training',
default=DatasetType.KI.name)
# Logging/debugging/checkpoint related (helps a lot with experimentation)
args = parser.parse_args()
# Model architecture related
gat_config = {
"num_of_layers": 2, # GNNs, contrary to CNNs, are often shallow (it ultimately depends on the graph properties)
# "num_heads_per_layer": [8, 1],
"num_heads_per_layer": [8, 1],
"num_features_per_layer": [KI_NUM_INPUT_FEATURES, 8, KI_NUM_CLASSES],
"add_skip_connection": True, # hurts perf on Cora
"bias": False, # result is not so sensitive to bias
# "dropout": 0.6, # result is sensitive to dropout
"dropout": 0, # result is sensitive to dropout
"layer_type": LayerType.IMP3 # fastest implementation enabled by default
}
# Wrapping training configuration into a dictionary
prediction_config = dict()
for arg in vars(args):
prediction_config[arg] = getattr(args, arg)
# Add additional config information
prediction_config.update(gat_config)
return prediction_config
if __name__ == '__main__':
# Train the graph attention network (GAT)
predict_gat_ki(get_prediction_args())
``` |
{
"source": "jorgegil96/Yargi",
"score": 2
} |
#### File: Yargi/src/parser.py
```python
import ply.lex as lex
import ply.yacc as yacc
from src.semantics.model import *
keywords = {
'if': 'IF',
'else': 'ELSE',
'and': 'AND',
'or': 'OR',
'int': 'INT',
'float': 'FLOAT',
'bool': 'BOOL',
'string': 'STRING',
'fun': 'FUN',
'when': 'WHEN',
'for': 'FOR',
'while': 'WHILE',
'in': 'IN',
'return': 'RETURN',
'write': 'WRITE',
'read': 'READ',
'global': 'GLOBAL',
'data': 'DATA',
'class': 'CLASS',
'interface': 'INTERFACE',
'list': 'LIST',
'range': 'RANGE',
'main': 'MAIN',
'private': 'PRIVATE'
}
tokens = [
'INTNUM',
'FLOATNUM',
'TRUE',
'FALSE',
'PARIZQ',
'PARDER',
'LLAVEIZQ',
'LLAVEDER',
'MAS',
'MENOS',
'POR',
'SOBRE',
'DOSPUNTOS',
'COMA',
'COLON',
'MAYORQUE',
'MENORQUE',
'DIFERENTE',
'IGUALIGUAL',
'MENOROIGUAL',
'MAYOROIGUAL',
'COMENTARIOS',
'CORCHIZQ',
'CORCHDER',
'COMILLAS',
'PUNTO',
'IGUAL',
'PUNTOSRANGO',
'FLECHITA',
'STRINGVAL',
'NULL',
'ID',
'CID',
'EOL'
] + list(keywords.values())
t_PARIZQ = r'\('
t_PARDER = r'\)'
t_LLAVEIZQ = r'\{'
t_LLAVEDER = r'\}'
t_MAS = r'\+'
t_MENOS = r'\-'
t_POR = r'\*'
t_SOBRE = r'\/'
t_DOSPUNTOS = r'\:'
t_COMA = r'\,'
t_COLON = r'\;'
t_MAYORQUE = r'\>'
t_MENORQUE = r'\<'
t_DIFERENTE = r'\!='
t_IGUALIGUAL = r'\=='
t_MENOROIGUAL = r'\<='
t_MAYOROIGUAL = r'\>='
t_COMENTARIOS = r'\//'
t_CORCHIZQ = r'\['
t_CORCHDER = r'\]'
t_COMILLAS = r'\"'
t_PUNTO = r'\.'
t_IGUAL = r'='
t_PUNTOSRANGO = r'\.\.'
t_FLECHITA = r'\-\>'
def t_FLOATNUM(token):
r'[0-9]+\.[0-9]+'
token.type = keywords.get(token.value, 'FLOATNUM')
token.value = float(token.value)
return token
def t_INTNUM(token):
r'[0-9]+'
token.type = keywords.get(token.value, 'INTNUM')
token.value = int(token.value)
return token
def t_TRUE(token):
'true'
token.type = keywords.get(token.value, 'TRUE')
token.value = True
return token
def t_FALSE(token):
'false'
token.type = keywords.get(token.value, 'FALSE')
token.value = False
return token
def t_NULL(token):
'null'
token.type = keywords.get(token.value, 'NULL')
token.value = None
return token
def t_CID(token):
r'[A-Z][a-zA-z0-9]*'
token.type = keywords.get(token.value, 'CID')
return token
def t_ID(token):
r'[a-zA-Z][a-zA-z0-9]*'
token.type = keywords.get(token.value, 'ID')
return token
def t_STRINGVAL(token):
r'[\"][^"]*[\"]'
token.type = keywords.get(token.value, 'STRINGVAL')
token.value = str(token.value)[1:-1]
return token
t_ignore = " \t"
def t_EOL(token):
r'\n+'
token.lexer.lineno += token.value.count("\n")
'''
Parses a series of interfaces (0 or more), followed by a series of classes (1 or more).
Returns a Pair containing a list of interfaces and a list of classes.
'''
def p_file(p):
'''
file : interface_r class classr
'''
p[0] = p[1], [p[2]] + p[3]
'''
Parses a series of interfaces.
An interface is defined with the following syntax:
interface MyInterface {
fun foo();
fun bar(int x=;
}
Returns a list containing the available interfaces or an empty list if none were declared.
'''
def p_interface_r(p):
'''
interface_r : INTERFACE CID interface_body interface_r
| empty
'''
if len(p) == 2:
p[0] = []
else:
p[0] = [Interface(p[2], p[3])] + p[4]
def p_interface_body(p):
'''
interface_body : LLAVEIZQ interface_fun interface_fun_r LLAVEDER
'''
p[0] = [p[2]] + p[3]
def p_interface_fun(p):
'''
interface_fun : FUN ID PARIZQ fun2 PARDER COLON
'''
p[0] = InterfaceFun(p[2], p[4])
def p_interface_fun_r(p):
'''
interface_fun_r : interface_fun interface_fun_r
| empty
'''
if len(p) == 2:
p[0] = []
else:
p[0] = [p[1]] + p[2]
def p_classr(p):
'''
classr : class classr
| empty
'''
if len(p) == 2:
p[0] = []
else:
p[0] = [p[1]] + p[2]
'''
Parses a class or a data class.
A class follows the syntax:
class MyClass() : MyInterface, MySuperClass() {
< functions >
< main >
}
A data classes follows the syntax:
data class MyDataClass(int x, bool y)
Returns a Class object containing the information of the declared class.
'''
def p_class(p):
'''
class : CLASS CID classparams class2 body
| DATA CLASS CID classparams
'''
if len(p) == 6:
interfaces, parent = p[4]
p[0] = Class(name=p[2], members=p[3], body=p[5], class_parent=parent, interfaces=interfaces)
else:
p[0] = Class(name=p[3], members=p[4], body=None, class_parent=None, interfaces=[])
def p_class2(p):
'''
class2 : DOSPUNTOS class_extras
| empty
'''
if len(p) == 2:
p[0] = [], None
elif len(p) == 3:
p[0] = p[2]
def p_class_extras(p):
'''
class_extras : CID class_extras_2
'''
arg1, arg2, arg3, type = p[2]
if type == "SUPERCLASS":
p[0] = [], ClassParent(p[1], arg2)
elif type == "BOTH":
p[0] = [p[1]] + arg2, arg3
else:
raise Exception("Illegal state")
def p_class_extras_2(p):
'''
class_extras_2 : COMA CID class_extras_2
| PARIZQ vars2 PARDER
'''
type = p.slice[1].type
if type == "PARIZQ":
p[0] = p[1], p[2], p[3], "SUPERCLASS"
else:
arg1, arg2, arg3, type = p[3]
if type == "SUPERCLASS":
p[0] = None, [], ClassParent(p[2], arg2), "BOTH"
elif type == "BOTH":
p[0] = None, [p[2]] + arg2, arg3, "BOTH"
else:
raise Exception("Illegal state")
def p_classparams(p):
'''
classparams : PARIZQ classparams2 PARDER
| empty
'''
if len(p) == 2:
p[0] = []
else:
p[0] = p[2]
def p_classparams2(p):
'''
classparams2 : vars3 tipo ID classparams3
| empty
'''
if len(p) == 2:
p[0] = []
else:
p[0] = [VarDeclaration(p[3], p[2], p[1])] + p[4]
def p_classparams3(p):
'''
classparams3 : COMA vars3 tipo ID classparams3
| empty
'''
if len(p) == 2:
p[0] = []
else:
p[0] = [VarDeclaration(p[4], p[3], p[2])] + p[5]
def p_varcte(p):
'''
varcte : ID
| INTNUM
| FLOATNUM
| TRUE
| FALSE
| STRINGVAL
| NULL
| ID CORCHIZQ varcte CORCHDER
| ID PUNTO ID varcte_param_fun
| ID PARIZQ llamada_param PARDER
'''
if len(p) == 2:
type = p.slice[1].type
p[0] = ConstantVar(p[1], type)
elif len(p) == 5:
type = p.slice[2].type
if type == "PARIZQ":
p[0] = FunCall(p[1], p[3])
else:
if p[4] is None:
p[0] = ObjectMember(p[1], p[3])
else:
p[0] = FunCall(p[3], p[4], p[1])
def p_varcte_param_fun(p):
'''
varcte_param_fun : PARIZQ llamada_param PARDER
| empty
'''
if len(p) == 2:
p[0] = None
else:
p[0] = p[2]
def p_expresion(p):
'''
expresion : megaexp
'''
p[0] = p[1]
def p_expresionr(p):
'''
expresionr : COMA expresion expresionr
| empty
'''
if len(p) == 2:
p[0] = []
else:
p[0] = [p[2]] + p[3]
def p_expresion2(p):
'''
expresion2 : expresion expresionr
| empty
'''
def p_superexp(p):
'''
superexp : exp oplog
'''
p[0] = RelationalOperation(p[1], p[2])
def p_oplog(p):
'''
oplog : MAYORQUE exp
| MENORQUE exp
| DIFERENTE exp
| MAYOROIGUAL exp
| MENOROIGUAL exp
| IGUALIGUAL exp
| empty
'''
if len(p) == 2:
p[0] = None
else:
p[0] = RelationalOperand(p[1], p[2])
def p_megaexp(p):
'''
megaexp : superexp megaexpr
'''
p[0] = LogicalOperation(p[1], p[2])
def p_megaexpr(p):
'''
megaexpr : AND superexp megaexpr
| OR superexp megaexpr
| empty
'''
if len(p) == 2:
p[0] = None
else:
p[0] = LogicalOperand(p[1], p[2], p[3])
def p_vars(p):
'''
vars : vars3 tipo vars2 COLON
| vars3 tipo LIST vars2 COLON
'''
variables = []
visibility = p[1]
type = p[2]
for var in p[3]:
variables.append(VarDeclaration(var, type, visibility))
p[0] = variables
def p_varsr(p):
'''
varsr : COMA ID varsr
| empty
'''
if len(p) == 2:
p[0] = []
else:
p[0] = [p[2]] + p[3]
def p_vars2(p):
'''
vars2 : ID varsr
'''
p[0] = [p[1]] + p[2]
def p_vars3(p):
'''
vars3 : PRIVATE
| empty
'''
p[0] = p[1]
def p_estatuto(p):
'''
estatuto : asignacion estatuto
| condicion estatuto
| escritura estatuto
| for estatuto
| while estatuto
| when estatuto
| llamada estatuto
| obj_call estatuto
| empty
'''
if len(p) == 2:
p[0] = []
else:
p[0] = [p[1]] + p[2]
def p_asignacion(p):
'''
asignacion : ID asignacion3 IGUAL asignacion2 COLON
'''
p[0] = Assignment(p[1], p[4])
def p_asignacion2(p):
'''
asignacion2 : expresion
| CORCHDER expresion asignacion2r CORCHIZQ
| READ PARIZQ assign_read PARDER
| CID PARIZQ class_call_args expresionr PARDER
'''
if len(p) == 2:
p[0] = p[1]
elif len(p) == 5:
p[0] = Read(p[3])
elif len(p) == 6:
p[0] = NewObject(p[1], p[3] + p[4])
def p_class_call_args(p):
'''
class_call_args : expresion
| empty
'''
if p[1] is None:
p[0] = []
else:
p[0] = [p[1]]
def p_assign_read(p):
'''
assign_read : STRINGVAL
| empty
'''
p[0] = p[1]
def p_asignacion2r(p):
'''
asignacion2r : COMA expresion asignacion2r
| empty
'''
def p_asignacion3(p):
'''
asignacion3 : CORCHIZQ expresion CORCHDER
| PUNTO ID
| empty
'''
def p_condicion(p):
'''
condicion : IF condicion2 estatutor
'''
base_exp, stmts = p[2]
p[0] = If(base_exp, stmts, p[3])
def p_condicion2(p):
'''
condicion2 : PARIZQ expresion PARDER bloque
'''
p[0] = p[2], p[4]
def p_condicionr(p):
'''
condicionr : ELSE IF condicion2
| empty
'''
def p_bloque(p):
'''
bloque : LLAVEIZQ estatuto bloque2 LLAVEDER
'''
p[0] = p[2]
def p_bloque2(p):
'''
bloque2 : RETURN bloque3
| empty
'''
if len(p) == 2:
p[0] = None
else:
p[0] = p[2]
def p_bloque3(p):
'''
bloque3 : expresion COLON
| empty
'''
if len(p) == 2:
p[0] = None
else:
p[0] = p[1]
def p_estatutor(p):
'''
estatutor : ELSE bloque
| empty
'''
if len(p) == 2:
p[0] = []
else:
p[0] = p[2]
def p_escritura(p):
'''
escritura : WRITE PARIZQ esc1 esc2 PARDER COLON
'''
p[0] = Write(p[3] + p[4])
def p_esc1(p):
'''
esc1 : expresion
| STRING
'''
p[0] = [p[1]]
def p_esc2(p):
'''
esc2 : COMA esc1 esc2
| empty
'''
if len(p) == 2:
p[0] = []
else:
p[0] = p[2] + p[3]
def p_tipo(p):
'''
tipo : INT
| FLOAT
| BOOL
| STRING
| CID
'''
p[0] = p[1]
def p_factor(p):
'''
factor : PARIZQ expresion PARDER
| factor2 varcte
'''
if p[1] is None:
p[0] = p[2]
else:
p[0] = ArithmeticOperand(p[1], p[2])
def p_terminor(p):
'''
terminor : POR factor terminor
| SOBRE factor terminor
| empty
'''
if len(p) == 2:
p[0] = None
else:
type = p.slice[1].type
if type == 'POR':
p[0] = TerminoR(operator.mul, p[2], p[3])
elif type == 'SOBRE':
p[0] = TerminoR(operator.floordiv, p[2], p[3])
else:
raise Exception("Invalid operator type %s in terminor" % type)
def p_termino(p):
'''
termino : factor terminor
'''
p[0] = Termino(p[1], p[2])
def p_exp(p):
'''
exp : termino expr
'''
p[0] = Exp(p[1], p[2])
def p_expr(p):
'''
expr : MAS termino expr
| MENOS termino expr
| empty
'''
if len(p) == 2:
p[0] = None
else:
type = p.slice[1].type
if type == 'MAS':
p[0] = ExpR(operator.add, p[2], p[3])
elif type == 'MENOS':
p[0] = ExpR(operator.sub, p[2], p[3])
else:
raise Exception("Invalid operator type %s in expr" % type)
def p_varcter(p):
'''
varcter : COMA varcte varcter
| empty
'''
def p_factor2(p):
'''
factor2 : MAS
| MENOS
| empty
'''
if p[1] is None:
p[0] = None
else:
type = p.slice[1].type
if type == 'MAS':
p[0] = operator.add
elif type == 'MENOS':
p[0] = operator.sub
else:
raise Exception("Invalid operator type %s in factor2" % type)
def p_for(p):
'''
for : FOR PARIZQ ID IN for2 PARDER bloque
'''
p[0] = ForIn(p[3], p[5], p[7])
def p_for2(p):
'''
for2 : ID
| range
'''
p[0] = p[1]
def p_range(p):
'''
range : INTNUM PUNTOSRANGO INTNUM
| ID PUNTOSRANGO ID
| ID PUNTOSRANGO INTNUM
| INTNUM PUNTOSRANGO ID
'''
type = p.slice[1].type
type2 = p.slice[3].type
p[0] = Range(ConstantVar(p[1], type), ConstantVar(p[3], type2))
def p_while(p):
'''
while : WHILE PARIZQ expresion PARDER bloque
'''
p[0] = While(p[3], p[5])
def p_when(p):
'''
when : WHEN LLAVEIZQ when2 LLAVEDER
'''
p[0] = p[3]
def p_when2(p):
'''
when2 : expresion FLECHITA bloque when2
| ELSE FLECHITA bloque
| empty
'''
if len(p) == 2:
p[0] = None
elif len(p) == 4:
p[0] = WhenBranch(None, p[3], None)
else:
p[0] = WhenBranch(p[1], p[3], p[4])
def p_fun(p):
'''
fun : vars3 FUN ID PARIZQ fun2 PARDER fun3 funbody
'''
body: FunBody = p[8]
body.params = p[5]
p[0] = Fun(name=p[3], type=p[7], visibility=p[1], body=body)
def p_fun2(p):
'''
fun2 : tipo ID funparamr
| empty
'''
if len(p) == 2:
p[0] = []
else:
p[0] = [VarDeclaration(name=p[2], type=p[1], visibility=None)] + p[3]
def p_funparamr(p):
'''
funparamr : COMA tipo ID funparamr
| empty
'''
if len(p) == 2:
p[0] = []
else:
p[0] = [VarDeclaration(name=p[3], type=p[2], visibility=None)] + p[4]
def p_fun3(p):
'''
fun3 : DOSPUNTOS tipo
| empty
'''
if len(p) == 2:
p[0] = "void"
else:
p[0] = p[2]
def p_funbody(p):
'''
funbody : LLAVEIZQ opc1 opc2 bloque2 LLAVEDER
'''
p[0] = FunBody([], p[2], p[3], p[4])
def p_opc1(p):
'''
opc1 : vars multvarsdecl
| empty
'''
if p[1] is None:
p[0] = []
else:
p[0] = p[1] + p[2]
def p_opc2(p):
'''
opc2 : estatuto
| empty
'''
if p[1] is None:
return []
else:
p[0] = p[1]
def p_body(p):
'''
body : LLAVEIZQ body2 funr body_main LLAVEDER
| empty
'''
if len(p) == 2:
p[0] = None
else:
p[0] = ClassBody(vars=p[2], funs=p[3], main=p[4])
def p_body_main(p):
'''
body_main : MAIN PARIZQ PARDER mainbloque
| empty
'''
if len(p) == 2:
p[0] = None
else:
p[0] = p[4]
def p_body2(p):
'''
body2 : vars multvarsdecl
| empty
'''
if p[1] is None:
p[0] = []
else:
p[0] = p[1] + p[2]
def p_mainbloque(p):
'''
mainbloque : LLAVEIZQ body2 estatuto LLAVEDER
'''
p[0] = Main(p[2], p[3])
def p_multvarsdecl(p):
'''
multvarsdecl : vars multvarsdecl
| empty
'''
if len(p) == 2:
p[0] = []
else:
p[0] = p[1] + p[2]
def p_funr(p):
'''
funr : fun funr
| empty
'''
if len(p) == 2:
p[0] = []
else:
p[0] = [p[1]] + p[2]
def p_llamada(p):
'''
llamada : ID PARIZQ llamada_param PARDER COLON
| empty
'''
p[0] = FunCall(p[1], p[3])
def p_obj_call(p):
'''
obj_call : ID PUNTO ID PARIZQ llamada_param PARDER COLON
| empty
'''
p[0] = FunCall(p[3], p[5], p[1])
def p_llamada_param(p):
'''
llamada_param : expresion expresionr
| empty
'''
if len(p) == 2:
p[0] = []
else:
p[0] = [p[1]] + p[2]
def p_error(p):
if p is not None:
print("Syntax error at line %d, illegal token '%s' found" % (p.lineno, p.value))
print("Unexpected end of input")
def p_empty(p):
'''empty : '''
lex.lex()
parser = yacc.yacc(start='file')
``` |
{
"source": "JorgeGMarques/micropython-scikit-fuzzy",
"score": 4
} |
#### File: docs/examples/plot_cmeans.py
```python
from ulab import numpy as np
import ulab_extended as _np
from bipes import databoard as db
import _cmeans
import normalize_columns
import math
import random
import gc
# Enable garbage collection
gc.enable()
NV_MAGICCONST = 4 * math.exp(-0.5)/2.0**.5
# Number os points per cluster
POINTS = 50
# Define three cluster centers
centers = [[4, 2],
[1, 7],
[5, 6]]
# Define three cluster sigmas in x and y, respectively
sigmas = [[0.8, 0.3],
[0.3, 0.5],
[1.1, 0.7]]
# Generate test data
xpts = np.zeros(1)
ypts = np.zeros(1)
labels = np.zeros(1)
def hstack_1d(a,b):
ca = a.size
cb = b.size
_hstack = np.zeros(ca+cb)
for i in range(0, ca+cb):
if i < ca:
_hstack[i] = a[i]
else:
_hstack[i] = b[i-ca]
return _hstack
def normalvariate(x):
"""Normal distribution.
mu is the mean, and sigma is the standard deviation.
`source <https://microbit-micropython.readthedocs.io/en/latest/_modules/random.html>`_
"""
# Uses Kinderman and Monahan method. Reference: Kinderman,
# A.J. and <NAME>., "Computer generation of random
# variables using the ratio of uniform deviates", ACM Trans
# Math Software, 3, (1977), pp257-260.
mu = 1
sigma = 1
while 1:
u1 = random.random()
u2 = 1.0 - random.random()
z = NV_MAGICCONST*(u1-0.5)/u2
zz = z*z/4.0
if zz <= -math.log(u2):
break
return mu + z*sigma
for i, ((xmu, ymu), (xsigma, ysigma)) in enumerate(zip(centers, sigmas)):
u0 = np.zeros(POINTS)
vrand = np.vectorize(normalvariate)
u0 = vrand(u0)
u1 = np.zeros(POINTS)
vrand = np.vectorize(normalvariate)
u1 = vrand(u1)
xpts = hstack_1d(xpts, u0 * xsigma + xmu)
ypts = hstack_1d(ypts, u1 * ysigma + ymu)
labels = hstack_1d(labels, np.ones(POINTS) * i)
# Print test data
for label in range(0,3):
db.push(xpts[labels == label], ypts[labels == label], label, 'clusters0')
gc.collect()
"""
Clustering
----------
Above is our test data. We see three distinct blobs. However, what would happen
if we didn't know how many clusters we should expect? Perhaps if the data were
not so clearly clustered?
Let's try clustering our data several times, with between 2 and 5 clusters.
"""
def vstack_1d(a,b):
ca = a.size
cb = b.size
_hstack = np.zeros(1,ca+cb)
for i in range(0, ca+cb):
if i < ca:
_hstack[i] = a[i]
else:
_hstack[i] = b[i-ca]
# Set up the loop and plot
alldata = _np.vstack(xpts, ypts)
fpcs = []
for ncenters in range(2, 6):
cntr, u, u0, d, jm, p, fpc = _cmeans.cmeans(
alldata, ncenters, 2, error=0.005, maxiter=1000, init=None)
# Store fpc values for later
fpcs.append(fpc)
# Plot assigned clusters, for each data point in training set
cluster_membership = np.argmax(u, axis=0)
_comma = ','
for j in range(ncenters):
db.push(xpts[cluster_membership == j], ypts[cluster_membership == j], j, 'clusters1_' + str(ncenters))
# Mark the center of each fuzzy cluster
i = 0
for pt in cntr:
db.push(pt[0], pt[1], ncenters + i, 'clusters1_' + str(ncenters))
i+=1
gc.collect()
"""
The fuzzy partition coefficient (FPC)
-------------------------------------
The FPC is defined on the range from 0 to 1, with 1 being best. It is a metric
which tells us how cleanly our data is described by a certain model. Next we
will cluster our set of data - which we know has three clusters - several
times, with between 2 and 9 clusters. We will then show the results of the
clustering, and plot the fuzzy partition coefficient. When the FPC is
maximized, our data is described best.
"""
label_ = np.zeros(len(fpcs))
for i_ in range(0,len(fpcs)):
label_[i_] = i_+2
db.push(label_, fpcs, 0, 'clusters2_fpc')
"""
As we can see, the ideal number of centers is 3. This isn't news for our
contrived example, but having the FPC available can be very useful when the
structure of your data is unclear.
Note that we started with *two* centers, not one; clustering a dataset with
only one cluster center is the trivial solution and will by definition return
FPC == 1.
====================
Classifying New Data
====================
Now that we can cluster data, the next step is often fitting new points into
an existing model. This is known as prediction. It requires both an existing
model and new data to be classified.
Building the model
------------------
We know our best model has three cluster centers. We'll rebuild a 3-cluster
model for use in prediction, generate new uniform data, and predict which
cluster to which each new data point belongs.
"""
# Regenerate fuzzy model with 3 cluster centers - note that center ordering
# is random in this clustering algorithm, so the centers may change places
cntr, u_orig, _, _, _, _, _ = _cmeans.cmeans(
alldata, 3, 2, error=0.005, maxiter=1000)
# Show 3-cluster model
for j in range(3):
db.push(xpts[np.argmax(u_orig, axis=0) == j], ypts[np.argmax(u_orig, axis=0) == j], j, 'clusters3')
"""
Prediction
----------
Finally, we generate uniformly sampled data over this field and classify it
via ``cmeans_predict``, incorporating it into the pre-existing model.
"""
# Generate uniformly sampled data spread across the range [0, 10] in x and y
newdata = np.zeros((2,100))
vrand = np.vectorize(_cmeans.rand)
newdata = vrand(u0) * 10
# Predict new cluster membership with `cmeans_predict` as well as
# `cntr` from the 3-cluster model
u, u0, d, jm, p, fpc = _cmeans.cmeans_predict(
newdata, cntr, 2, error=0.005, maxiter=1000)
# Plot the classified uniform data. Note for visualization the maximum
# membership value has been taken at each point (i.e. these are hardened,
# not fuzzy results visualized) but the full fuzzy result is the output
# from cmeans_predict.
cluster_membership = np.argmax(u, axis=0) # Hardening for visualization
for j in range(3):
db.push(newdata[0][cluster_membership == j],
newdata[1][cluster_membership == j], j, 'clusters4')
```
#### File: skfuzzy/fuzzymath/fuzzy_logic.py
```python
import numpy as np
def _resampleuniverse(x, mfx, y, mfy):
"""
Resamples fuzzy universes `x` and `y` to include the full range of either
universe, with resolution of the lowest difference between any two
reported points.
"""
minstep = np.asarray([np.diff(x).min(), np.diff(y).min()]).min()
mi = min(x.min(), y.min())
ma = max(x.max(), y.max())
z = np.r_[mi:ma:minstep]
xidx = np.argsort(x)
mfx = mfx[xidx]
x = x[xidx]
mfx2 = np.interp(z, x, mfx)
yidx = np.argsort(y)
mfy = mfy[yidx]
y = y[yidx]
mfy2 = np.interp(z, y, mfy)
return z, mfx2, mfy2
def fuzzy_norm(x, mfx, y, mfy, norm):
"""
Fuzzy operator, logic operatrion of two fuzzy sets.
Parameters
----------
x : 1d array
Universe variable for fuzzy membership function `mfx`.
mfx : 1d array
Fuzzy membership function for universe variable `x`.
y : 1d array
Universe variable for fuzzy membership function `mfy`.
mfy : 1d array
Fuzzy membership function for universe variable `y`.
norm : Function
T-norm or T-conorm (S-norm)
Returns
-------
z : 1d array
Universe variable for union of the two provided fuzzy sets.
mfz : 1d array
Fuzzy membership function, the result of the operation
of `mfx` and `mfy`.
Notes
-------
See `T-Norm <https://en.wikipedia.org/wiki/T-norm>`_ for t-norms.
"""
# Check if universes are the same
sameuniverse = False
if x.shape == y.shape and (x == y).all():
z = x
mfx2 = mfx
mfy2 = mfy
sameuniverse = True
if not sameuniverse:
z, mfx2, mfy2 = _resampleuniverse(x, mfx, y, mfy)
return z, norm(mfx2, mfy2)
def fuzzy_and(x, mfx, y, mfy):
"""
Fuzzy AND operator, a.k.a. the intersection of two fuzzy sets.
Parameters
----------
x : 1d array
Universe variable for fuzzy membership function `mfx`.
mfx : 1d array
Fuzzy membership function for universe variable `x`.
y : 1d array
Universe variable for fuzzy membership function `mfy`.
mfy : 1d array
Fuzzy membership function for universe variable `y`.
Returns
-------
z : 1d array
Universe variable for union of the two provided fuzzy sets.
mfz : 1d array
Fuzzy AND (intersection) of `mfx` and `mfy`.
"""
# Check if universes are the same
return fuzzy_norm(x, mfx, y, mfy, norm=np.fmin)
def fuzzy_or(x, mfx, y, mfy):
"""
Fuzzy OR operator, a.k.a. union of two fuzzy sets.
Parameters
----------
x : 1d array
Universe variable for fuzzy membership function `mfx`.
mfx : 1d array
Fuzzy membership function for universe variable `x`.
y : 1d array
Universe variable for fuzzy membership function `mfy`.
mfy : 1d array
Fuzzy membership function for universe variable `y`.
Returns
-------
z : 1d array
Universe variable for intersection of the two provided fuzzy sets.
mfz : 1d array
Fuzzy OR (union) of `mfx` and `mfy`.
"""
# Check if universes are the same
return fuzzy_norm(x, mfx, y, mfy, norm=np.fmax)
def fuzzy_not(mfx):
"""
Fuzzy NOT operator, a.k.a. complement of a fuzzy set.
Parameters
----------
mfx : 1d array
Fuzzy membership function.
Returns
-------
mfz : 1d array
Fuzzy NOT (complement) of `mfx`.
Notes
-----
This operation does not require a universe variable, because the
complement is defined for a single set. The output remains defined on the
same universe.
"""
return 1. - mfx
```
#### File: skfuzzy/intervals/intervalops.py
```python
import numpy as np
from ..defuzzify import lambda_cut_series
def addval(interval1, interval2):
"""
Add intervals interval1 and interval2.
Parameters
----------
interval1 : 2-element iterable
First interval set.
interval2 : 2-element iterable
Second interval set.
Returns
-------
Z : 2-element array
Sum of interval1 and interval2, defined as::
Z = interval1 + interval2 = [a + c, b + d]
"""
# Handle arrays
if not isinstance(interval1, np.ndarray):
interval1 = np.asarray(interval1)
if not isinstance(interval2, np.ndarray):
interval2 = np.asarray(interval2)
try:
return np.r_[interval1] + np.r_[interval2]
except Exception:
return interval1 + interval2
def divval(interval1, interval2):
"""
Divide ``interval2`` into ``interval1``, by inversion and multiplication.
Parameters
----------
interval1 : 2-element iterable
First interval set.
interval2 : 2-element iterable
Second interval set.
Returns
-------
z : 2-element array
Interval result of interval1 / interval2.
"""
# Handle arrays
if not isinstance(interval1, np.ndarray):
interval1 = np.asarray(interval1)
if not isinstance(interval2, np.ndarray):
interval2 = np.asarray(interval2)
# Invert interval2 and multiply
interval2 = 1. / interval2
return multval(interval1, interval2)
def dsw_add(x, mfx, y, mfy, n):
"""
Add two fuzzy variables together using the restricted DSW method [1].
Parameters
----------
x : 1d array
Universe for first fuzzy variable.
mfx : 1d array
Fuzzy membership for universe ``x``. Must be convex.
y : 1d array
Universe for second fuzzy variable.
mfy : 1d array
Fuzzy membership for universe ``y``. Must be convex.
n : int
Number of lambda-cuts to use; a higher number will have greater
resolution toward the limit imposed by input sets ``x`` and ``y``.
Returns
-------
z : 1d array
Output universe variable.
mfz : 1d array
Output fuzzy membership on universe ``z``.
Notes
-----
The Dong, Shah, and Wong (DSW) method requires convex fuzzy membership
functions. The ``dsw_*`` functions return results similar to Matplotlib's
``fuzarith`` function.
References
----------
.. [1] <NAME> and <NAME> and <NAME>, Fuzzy computations in risk and
decision analysis, Civ Eng Syst, 2, 1985, pp 201-208.
"""
# Restricted DSW w/n lambda cuts
x = lambda_cut_series(x, mfx, n)
y = lambda_cut_series(y, mfy, n)
n1, n2 = x.shape
ff = np.zeros((n1, n2))
ff[:, 0] = x[:, 0]
# Compute F = x + y
for n in range(n1):
ff[n, [1, 2]] = addval(x[n, [1, 2]], y[n, [1, 2]])
# Arrange for output or plotting
out = np.zeros((2 * n1, 2))
out[0:n1, 1] = ff[:, 0]
out[n1:2 * n1, 1] = np.flipud(ff[:, 0])
out[0:n1, 0] = ff[:, 1]
out[n1:2 * n1, 0] = np.flipud(ff[:, 2])
# No need for transposes; rank-1 arrays have no transpose in Python
return out[:, 0], out[:, 1]
def dsw_div(x, mfx, y, mfy, n):
"""
Divide one fuzzy variable by another using the restricted DSW method [1].
Parameters
----------
x : 1d array
Universe for first fuzzy variable.
mfx : 1d array
Fuzzy membership for universe ``x``. Must be convex.
y : 1d array
Universe for second fuzzy variable.
mfy : 1d array
Fuzzy membership for universe ``y``. Must be convex.
n : int
Number of lambda-cuts to use; a higher number will have greater
resolution toward the limit imposed by input sets ``x`` and ``y``.
Returns
-------
z : 1d array
Output universe variable.
mfz : 1d array
Output fuzzy membership on universe ``z``.
Notes
-----
The Dong, Shah, and Wong (DSW) method requires convex fuzzy membership
functions. The ``dsw_*`` functions return results similar to Matplotlib's
``fuzarith`` function.
References
----------
.. [1] <NAME> and <NAME> and <NAME>, Fuzzy computations in risk and
decision analysis, Civ Eng Syst, 2, 1985, pp 201-208.
"""
# Restricted DSW w/n lambda cuts
x = lambda_cut_series(x, mfx, n)
y = lambda_cut_series(y, mfy, n)
n1, n2 = x.shape
ff = np.zeros((n1, n2))
ff[:, 0] = x[:, 0]
# Compute F = x / y
for n in range(n1):
ff[n, [1, 2]] = divval(x[n, [1, 2]], y[n, [1, 2]])
# Arrange for output or plotting
out = np.zeros((2 * n1, 2))
out[0:n1, 1] = ff[:, 0]
out[n1:2 * n1, 1] = np.flipud(ff[:, 0])
out[0:n1, 0] = ff[:, 1]
out[n1:2 * n1, 0] = np.flipud(ff[:, 2])
# No need for transposes; rank-1 arrays have no transpose in Python
return out[:, 0], out[:, 1]
def dsw_mult(x, mfx, y, mfy, n):
"""
Multiply two fuzzy variables using the restricted DSW method [1].
Parameters
----------
x : 1d array
Universe for first fuzzy variable.
mfx : 1d array
Fuzzy membership for universe ``x``. Must be convex.
y : 1d array
Universe for second fuzzy variable.
mfy : 1d array
Fuzzy membership for universe ``y``. Must be convex.
n : int
Number of lambda-cuts to use; a higher number will have greater
resolution toward the limit imposed by input sets ``x`` and ``y``.
Returns
-------
z : 1d array
Output universe variable.
mfz : 1d array
Output fuzzy membership on universe ``z``.
Notes
-----
The Dong, Shah, and Wong (DSW) method requires convex fuzzy membership
functions. The ``dsw_*`` functions return results similar to Matplotlib's
``fuzarith`` function.
References
----------
.. [1] <NAME> and <NAME> and <NAME>, Fuzzy computations in risk and
decision analysis, Civ Eng Syst, 2, 1985, pp 201-208.
"""
# Restricted DSW w/n lambda cuts
x = lambda_cut_series(x, mfx, n)
y = lambda_cut_series(y, mfy, n)
n1, n2 = x.shape
ff = np.zeros((n1, n2))
ff[:, 0] = x[:, 0]
# Compute F = x * y
for n in range(n1):
ff[n, [1, 2]] = multval(x[n, [1, 2]], y[n, [1, 2]])
# Arrange for output or plotting
out = np.zeros((2 * n1, 2))
out[0:n1, 1] = ff[:, 0]
out[n1:2 * n1, 1] = np.flipud(ff[:, 0])
out[0:n1, 0] = ff[:, 1]
out[n1:2 * n1, 0] = np.flipud(ff[:, 2])
# No need for transposes; rank-1 arrays have no transpose in Python
return out[:, 0], out[:, 1]
def dsw_sub(x, mfx, y, mfy, n):
"""
Subtract a fuzzy variable from another by the restricted DSW method [1].
Parameters
----------
x : 1d array
Universe for first fuzzy variable.
mfx : 1d array
Fuzzy membership for universe ``x``. Must be convex.
y : 1d array
Universe for second fuzzy variable, which will be subtracted from
``x``.
mfy : 1d array
Fuzzy membership for universe ``y``. Must be convex.
n : int
Number of lambda-cuts to use; a higher number will have greater
resolution toward the limit imposed by input sets ``x`` and ``y``.
Returns
-------
z : 1d array
Output universe variable.
mfz : 1d array
Output fuzzy membership on universe ``z``.
Notes
-----
The Dong, Shah, and Wong (DSW) method requires convex fuzzy membership
functions. The ``dsw_*`` functions return results similar to Matplotlib's
``fuzarith`` function.
References
----------
.. [1] <NAME> and <NAME> and <NAME>, Fuzzy computations in risk and
decision analysis, Civ Eng Syst, 2, 1985, pp 201-208.
"""
# Restricted DSW w/n lambda cuts
x = lambda_cut_series(x, mfx, n)
y = lambda_cut_series(y, mfy, n)
n1, n2 = x.shape
ff = np.zeros((n1, n2))
ff[:, 0] = x[:, 0]
# Compute F = x - y
for n in range(n1):
ff[n, [1, 2]] = subval(x[n, [1, 2]], y[n, [1, 2]])
# Arrange for output or plotting
out = np.zeros((2 * n1, 2))
out[0:n1, 1] = ff[:, 0]
out[n1:2 * n1, 1] = np.flipud(ff[:, 0])
out[0:n1, 0] = ff[:, 1]
out[n1:2 * n1, 0] = np.flipud(ff[:, 2])
# No need for transposes; rank-1 arrays have no transpose in Python
return out[:, 0], out[:, 1]
def multval(interval1, interval2):
"""
Multiply intervals interval1 and interval2.
Parameters
----------
interval1 : 1d array, length 2
First interval.
interval2 : 1d array, length 2
Second interval.
Returns
-------
z : 1d array, length 2
Interval resulting from multiplication of interval1 and interval2.
"""
# Handle arrays
if not isinstance(interval1, np.ndarray):
interval1 = np.asarray(interval1)
if not isinstance(interval2, np.ndarray):
interval2 = np.asarray(interval2)
try:
crosses = np.r_[interval1[0] * interval2[0],
interval1[0] * interval2[1],
interval1[1] * interval2[0],
interval1[1] * interval2[1]]
return np.r_[crosses.min(), crosses.max()]
except Exception:
return interval1 * interval2
def scaleval(q, interval):
"""
Multiply scalar q with interval ``interval``.
Parameters
----------
q : float
Scalar to multiply interval with.
interval : 1d array, length 2
Interval. Must have exactly two elements.
Returns
-------
z : 1d array, length 2
New interval; z = q x interval.
"""
# Handle array
if not isinstance(interval, np.ndarray):
interval = np.asarray(interval)
try:
return np.r_[min(q * interval[0], q * interval[1]),
max(q * interval[0], q * interval[1])]
except Exception:
return q * interval
def subval(interval1, interval2):
"""
Subtract interval interval2 from interval interval1.
Parameters
----------
interval1 : 1d array, length 2
First interval.
interval2 : 1d array, length 2
Second interval.
Returns
-------
Z : 1d array, length 2
Resultant subtracted interval.
"""
# Handle arrays
if not isinstance(interval1, np.ndarray):
interval1 = np.asarray(interval1)
if not isinstance(interval2, np.ndarray):
interval2 = np.asarray(interval2)
try:
return np.r_[interval1[0] - interval2[1], interval1[1] - interval2[0]]
except Exception:
return interval1 - interval2
``` |
{
"source": "jorgehatccrma/pygrfnn",
"score": 2
} |
#### File: pygrfnn/examples/rhythm2.py
```python
from __future__ import division
from time import time
import sys
sys.path.append('../') # needed to run the examples from within the package folder
import numpy as np
from scipy.signal import hilbert
from scipy.io import loadmat
from pygrfnn.network import Model, make_connections, modelFromJSON
from pygrfnn.oscillator import Zparam
from pygrfnn.grfnn import GrFNN
import matplotlib.pyplot as plt
from pygrfnn.vis import plot_connections
from pygrfnn.vis import tf_detail
from pygrfnn.vis import GrFNN_RT_plot
from pyrhythm.library import get_pattern
from daspy import Signal
from daspy.processing import onset_detection_signal
use_matlab_stimulus = False
RT_display = True
def get_stimulus(pattern_name="iso"):
if use_matlab_stimulus:
D = loadmat('examples/iso44_signal')
sr = float(D['Fs'][0][0])
s = D['signal'][0] # analytical signal (generated in matlab)
s = Signal(s, sr=sr)
else:
p = get_pattern(pattern_name)
sr = 22050
x, _ = p.as_signal(tempo=120.0,
reps=6.0,
lead_silence=0.0,
sr=sr,
click_freq=1200.0,
with_beat=False,
beat_freq=1800.0,
accented=False)
x = Signal(x, sr=sr)
s = onset_detection_signal(x)
rms = np.sqrt(np.sum(s**2)/len(s))
s *= 0.06/rms
s = Signal(hilbert(s), sr=s.sr)
t = s.time_vector()
dt = 1/s.sr
# print "SR: ", s.sr
return s, t, dt
def rhythm_model():
rhythm_model_definition = """
{
"name": "Sensory Motor Rhythm model",
"layers": [
{
"name": "sensory network",
"zparams": {
"alpha": 0.00001,
"beta1": 0.0,
"beta2": -2.0,
"delta1": 0.0,
"delta2": 0.0,
"epsilon": 1.0
},
"frequency_range": [0.375, 12.0],
"num_oscs": 321,
"stimulus_conn_type": "linear",
"w": 3.0,
"input_channel": 0
},
{
"name": "motor network",
"zparams": {
"alpha": -0.4,
"beta1": 1.75,
"beta2": -1.25,
"delta1": 0.0,
"delta2": 0.0,
"epsilon": 1.0
},
"frequency_range": [0.375, 12.0],
"num_oscs": 321,
"stimulus_conn_type": "active"
}
],
"connections": [
{
"source_name": "sensory network",
"target_name": "sensory network",
"modes": [0.333333333333, 0.5, 1, 2.0, 3.0],
"amps": [1, 1, 1, 1, 1],
"strength": 1.0,
"range": 1.05,
"connection_type": "2freq",
"self_connect": false,
"weight": 0.1
},
{
"source_name": "sensory network",
"target_name": "motor network",
"modes": [0.333333333333, 0.5, 1, 2.0, 3.0],
"amps": [1, 1, 1, 1, 1],
"strength": 1.25,
"range": 1.05,
"connection_type": "2freq",
"self_connect": true,
"weight": 0.4
},
{
"source_name": "motor network",
"target_name": "motor network",
"modes": [0.333333333333, 0.5, 1, 2.0, 3.0],
"amps": [1, 1, 1, 1, 1],
"strength": 1.0,
"range": 1.05,
"connection_type": "2freq",
"self_connect": false,
"weight": 0.1
},
{
"source_name": "motor network",
"target_name": "sensory network",
"modes": [0.333333333333, 0.5, 1, 2.0, 3.0],
"amps": [1, 1, 1, 1, 1],
"strength": 0.2,
"range": 1.05,
"connection_type": "2freq",
"self_connect": true,
"weight": 0.05
}
]
}
"""
return modelFromJSON(rhythm_model_definition)
if __name__ == '__main__':
if len(sys.argv) > 1:
pattern_name = sys.argv[1]
else:
pattern_name = "iso"
s, t, dt = get_stimulus(pattern_name)
model = rhythm_model()
layer1, layer2 = model.layers()
# Simulation
if RT_display:
plt.ion()
plt.plot(t, s);
plt.title('Stimulus')
GrFNN_RT_plot(layer1, update_interval=2.0/s.sr, title='First Layer')
GrFNN_RT_plot(layer2, update_interval=2.0/s.sr, title='Second Layer')
tic = time()
model.run(s, t, dt)
print "Run time: {:0.1f} seconds".format(time() - tic)
TF = layer2.Z
r = np.sum(TF, 0)
rms = np.sqrt(np.sum(r*np.conj(r))/len(r))
r *= 0.06/rms
plt.figure()
plt.plot(t, np.real(r))
plt.plot(t, np.real(s))
``` |
{
"source": "Jorgehdzdata/sqlalchemy-challenge",
"score": 3
} |
#### File: Jorgehdzdata/sqlalchemy-challenge/app.py
```python
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
import numpy as np
from flask import Flask, jsonify
# Database Setup
engine = create_engine("sqlite:///Resources//hawaii.sqlite")
# reflect an existing database into a new model
conn = engine.connect()
Base = automap_base()
# reflect the tables
Base.prepare(engine, reflect=True)
# View all of the classes that automap found
Base.classes.keys()
# Save references to each table
mes = Base.classes.measurement
station = Base.classes.station
# # Create our session (link) from Python to the DB
# session = Session(conn)
# Flask Setup
app = Flask(__name__)
# Flask Routes
@app.route("/")
def welcome():
"""List all available api routes."""
return (
f"Available Routes:<br/>"
f"/api/v1.0/precipitation<br/>"
f"/api/v1.0/stations<br/>"
f"/api/v1.0/tobs"
)
@app.route("/api/v1.0/precipitation")
def measurement():
"""Convert the query results to a dictionary using date as the key and prcp as the value.
Return the JSON representation of your dictionary. """
session = Session(engine)
# Query all measurements
results = session.query(mes.date , mes.prcp).all()
session.close()
all_measurements = []
for d, p in results:
measurements_dict = {}
measurements_dict["date"] = d
measurements_dict["prcp"] = p
all_measurements.append(measurements_dict)
return jsonify(all_measurements)
@app.route("/api/v1.0/stations")
def stations():
"""Return a JSON list of stations from the dataset."""
session = Session(engine)
# Query all Stations
result2 = session.query(station.station).all()
session.close()
# Convert list of tuples into normal list
all_stat = list(np.ravel(result2))
return jsonify(all_stat)
@app.route("/api/v1.0/tobs")
def tob():
"""Return a JSON list of stations from the dataset."""
session = Session(engine)
# Query all Stations
result3 = session.query(mes.date, mes.tobs).filter(mes.date >= '2016-08-23').filter(mes.station == 'USC00519281')
session.close()
tobs_365 = []
for d, t in result3:
tobs_dict = {}
tobs_dict["date"] = d
tobs_dict["prcp"] = t
tobs_365.append(tobs_dict)
return jsonify(tobs_365)
if __name__ == "__main__":
app.run(debug=True)
``` |
{
"source": "Jorgehdzdata/web-scraping-challenge",
"score": 3
} |
#### File: web-scraping-challenge/Mission_to_Mars/app.py
```python
from flask_pymongo import PyMongo
from flask import Flask, render_template, redirect
import scrape_mars
app = Flask(__name__)
# Use flask_pymongo to set up mongo connection
app.config["MONGO_URI"] = "mongodb://localhost:27017/mission_to_mars_app"
mongodb = PyMongo(app)
# Create main page
@app.route("/")
def index():
mars_data = mongo.db.mars_data.find_one()
return render_template("index.html", data = mars_data)
# Create scrape page
@app.route("/scrape")
def scraper():
mars_data = mongo.db.mars_data
mars_item_data = scrape_mars.scrape()
mars_data.update({}, mars_item_data, upsert=True)
return redirect("/", code=302)
if __name__ == "__main__":
app.run(debug=True)
``` |
{
"source": "jorgeibesi/PyDatSet",
"score": 3
} |
#### File: PyDatSet/pydatset/sfddd.py
```python
import numpy as np
import os
from scipy.ndimage import imread
def get_data(directory, num_validation=2000):
'''
Load the SFDDD dataset from disk and perform preprocessing to prepare
it for the neural net classifier.
'''
# Load the raw SFDDD data
Xtr, Ytr = load(directory)
X_test = Xtr[:num_validation]
y_test = Ytr[:num_validation]
X_train = Xtr[num_validation:]
y_train = Ytr[num_validation:]
l = len(X_train)
mask = np.random.choice(l, l)
X_train = X_train[mask]
y_train = y_train[mask]
X_train = X_train.astype(np.float64)
X_test = X_test.astype(np.float64)
# Transpose so that channels come first
X_train = X_train.transpose(0, 3, 1, 2)
X_test = X_test.transpose(0, 3, 1, 2)
mean_image = np.mean(X_train, axis=0)
std = np.std(X_train)
X_train -= mean_image
X_test -= mean_image
X_train /= std
X_test /= std
return {
'X_train': X_train, 'y_train': y_train,
'X_test': X_test, 'y_test': y_test,
'mean': mean_image, 'std': std
}
def load_imgs(folder):
'''
Load all images in a folder
'''
names = [x for x in os.listdir(folder) if '.jpg' in x]
num_of_images = len(names)
imgs = []
for i in range(num_of_images):
imgs.append(imread(os.path.join(folder, names[i]), mode='RGB'))
return imgs
def load(ROOT):
''' load all of SFDDD '''
xs = []
ys = []
for b in range(10):
imgs = load_imgs(os.path.join(ROOT, 'train', 'c%d' % (b, )))
ys.append([b] * len(imgs))
xs.append(imgs)
Xtr = np.concatenate(xs)
Ytr = np.concatenate(ys)
mask = np.arange(len(Ytr))
np.random.shuffle(mask)
Xtr = Xtr[mask]
Ytr = Ytr[mask]
return Xtr, Ytr
``` |
{
"source": "jorge-imperial/mongo_ftdc",
"score": 3
} |
#### File: mongo_ftdc/tests/test_parse_file.py
```python
def test_import():
import pyftdc
assert 0 == 0
diagnostics_file = './diagnostic.data_40/metrics.2021-07-22T17-16-31Z-00000'
def test_parse():
import pyftdc
# Create a parser object
p = pyftdc.FTDCParser()
status = p.parse_file(diagnostics_file)
assert status == 0
def test_parse_get_metadata():
import pyftdc
# Create a parser object
p = pyftdc.FTDCParser()
status = p.parse_file(diagnostics_file)
assert status == 0
meta = p.metadata
if len(meta) > 0:
print(meta[0])
print(f"metadata has {len(meta)} elements")
assert len(meta) > 0
def test_parse_get_timestamps():
import pyftdc
# Create a parser object
p = pyftdc.FTDCParser()
status = p.parse_file(diagnostics_file)
assert status == 0
ts = p.get_timestamps()
print(f"There are {len(ts)} timestamps")
assert len(ts) > 0
def test_parse_metrics():
import pyftdc
# Create a parser object
p = pyftdc.FTDCParser()
status = p.parse_file(diagnostics_file)
assert status == 0
metrics = p.metric_names
for m in metrics:
print(f"\tMetric: {m}")
print(f"There are {len(metrics)} metrics")
assert len(metrics) > 0
def test_metrics_samples():
import pyftdc
# Create a parser object
p = pyftdc.FTDCParser()
status = p.parse_file(diagnostics_file)
assert status == 0
metrics = p.metric_names
m = p.get_metric(metrics[37])
n = p.get_metric(metrics[73])
assert len(n) == len(m)
ts = p.get_timestamps()
middle_ts = ts[int(len(ts)/2)]
h1 = p.get_metric(metrics[73], end=middle_ts)
h2 = p.get_metric(metrics[73], start=middle_ts)
assert len(ts) == len(h1) + len(h2)
# Ten samples (same chunk, for this metrics file)
ten_more = ts[int(len(ts)/2)+10]
m_10 = p.get_metric(metrics[37], start=middle_ts, end=ten_more)
assert 10 == len(m_10)
# Four hundred so me use two chunks (again, for this particular metrics file)
four_hundred_more = ts[int(len(ts)/2)+400]
m_400 = p.get_metric(metrics[37], start=middle_ts, end=four_hundred_more)
assert 400 == len(m_400)
#TODO: Test for lists of samples
def test_metrics_numpy():
import pyftdc
# Create a parser object
p = pyftdc.FTDCParser()
status = p.parse_file(diagnostics_file)
assert status == 0
metrics = p.metric_names
m = p.get_metric(metrics[37])
n = p.get_metric(metrics[73])
assert len(n) == len(m)
ts = p.get_timestamps()
middle_ts = ts[int(len(ts)/2)]
h1 = p.get_metric_numpy(metrics[73], end=middle_ts)
h2 = p.get_metric_numpy(metrics[73], start=middle_ts)
assert len(ts) == len(h1) + len(h2)
# Ten samples (same chunk, for this metrics file)
ten_more = ts[int(len(ts)/2)+10]
m_10 = p.get_metric_numpy(metrics[37], start=middle_ts, end=ten_more)
assert 10 == len(m_10)
# Four hundred so me use two chunks (again, for this particular metrics file)
four_hundred_more = ts[int(len(ts)/2)+400]
m_400 = p.get_metric_numpy(metrics[37], start=middle_ts, end=four_hundred_more)
assert 400 == len(m_400)
assert str(type(m_400)) == "<class 'numpy.ndarray'>"
mm = p.get_metrics_list_numpy([metrics[73], metrics[37]])
assert str(type(mm[0])) == "<class 'numpy.ndarray'>"
assert len(mm) == 2
assert len(mm[0]) == len(n)
assert len(mm[1]) == len(m)
def test_metrics_rated_numpy():
import pyftdc
# Create a parser object
p = pyftdc.FTDCParser()
status = p.parse_file(diagnostics_file)
assert status == 0
metrics = p.metric_names
m = p.get_metric(metrics[37])
n = p.get_metric(metrics[73])
assert len(n) == len(m)
m_rated_with_name = p.get_metric('@'+metrics[37])
m_rated = p.get_metric(metrics[37], rated_metric=True)
assert len(m_rated_with_name) == len(m_rated)
def test_metrics_stall():
import pyftdc
# Create a parser object
p = pyftdc.FTDCParser()
status = p.parse_file(diagnostics_file)
assert status == 0
ftdc_metrics_keys = [
"start",
# When starting with @ apply a rated differential
# WT tickets
"serverStatus.wiredTiger.concurrentTransactions.read.out",
"serverStatus.wiredTiger.concurrentTransactions.write.out",
"serverStatus.wiredTiger.concurrentTransactions.read.totalTickets",
"serverStatus.wiredTiger.concurrentTransactions.write.totalTickets",
# application threads
"@serverStatus.wiredTiger.lock.checkpoint lock application thread wait time (usecs)",
"@serverStatus.wiredTiger.lock.dhandle lock application thread time waiting (usecs)",
"@serverStatus.wiredTiger.lock.durable timestamp queue lock application thread time waiting (usecs)",
"@serverStatus.wiredTiger.lock.metadata lock application thread wait time (usecs)",
"@serverStatus.wiredTiger.lock.read timestamp queue lock application thread time waiting (usecs)",
"@serverStatus.wiredTiger.lock.schema lock application thread wait time (usecs)",
"@serverStatus.wiredTiger.lock.table lock application thread time waiting for the table lock (usecs)",
"@serverStatus.wiredTiger.lock.txn global lock application thread time waiting (usecs)",
"@serverStatus.wiredTiger.cache.cache overflow cursor application thread wait time (usecs)",
# global WT lock acquistions
"@serverStatus.wiredTiger.lock.txn global read lock acquisitions",
"@serverStatus.wiredTiger.connection.pthread mutex shared lock read-lock calls",
"@serverStatus.wiredTiger.connection.pthread mutex shared lock write-lock calls",
"@serverStatus.wiredTiger.connection.pthread mutex condition wait calls",
"@serverStatus.wiredTiger.transaction.transaction range of IDs currently pinned",
"@serverStatus.wiredTiger.transaction.transaction range of IDs currently pinned by a checkpoint",
# internal threads
"@serverStatus.wiredTiger.lock.checkpoint lock internal thread wait time (usecs)",
"@serverStatus.wiredTiger.lock.dhandle lock internal thread time waiting (usecs)",
"@serverStatus.wiredTiger.lock.metadata lock internal thread wait time (usecs)",
"@serverStatus.wiredTiger.lock.durable timestamp queue lock internal thread time waiting (usecs)",
"@serverStatus.wiredTiger.lock.read timestamp queue lock internal thread time waiting (usecs)",
"@serverStatus.wiredTiger.lock.schema lock internal thread wait time (usecs)",
"@serverStatus.wiredTiger.lock.table lock internal thread time waiting for the table lock (usecs)",
"@serverStatus.wiredTiger.lock.txn global lock internal thread time waiting (usecs)",
# capacities? learning how these work as a function of available CPU time (what domain?)...
"@serverStatus.wiredTiger.capacity.time waiting due to total capacity (usecs)",
"@serverStatus.wiredTiger.capacity.time waiting during checkpoint (usecs)",
"@serverStatus.wiredTiger.capacity.time waiting during eviction (usecs)",
"@serverStatus.wiredTiger.capacity.time waiting during logging (usecs)",
"@serverStatus.wiredTiger.capacity.time waiting during read (usecs)",
# cache, full-ness & pressure - unrated
"serverStatus.wiredTiger.cache.tracked dirty bytes in the cache",
"serverStatus.wiredTiger.cache.bytes currently in the cache",
"serverStatus.wiredTiger.cache.bytes dirty in the cache cumulative",
"serverStatus.wiredTiger.cache.bytes not belonging to page images in the cache",
"serverStatus.wiredTiger.cache.bytes belonging to the cache overflow table in the cache",
# cache, storage demand & pressure
"@serverStatus.wiredTiger.cache.bytes read into cache",
"@serverStatus.wiredTiger.cache.bytes written from cache",
"@serverStatus.wiredTiger.connection.total read I/Os",
"@serverStatus.wiredTiger.connection.total write I/Os",
"@serverStatus.wiredTiger.block-manager.bytes read",
"@serverStatus.wiredTiger.block-manager.bytes written",
# checkpoint pressure
"serverStatus.wiredTiger.transaction.transaction checkpoint currently running", # unrated
"serverStatus.wiredTiger.connection.files currently open", # unrated
"@serverStatus.wiredTiger.cache.eviction worker thread evicting pages",
"@serverStatus.wiredTiger.cache.hazard pointer check calls",
"@serverStatus.wiredTiger.cursor.cursor search near calls",
# overflow / lookaside pressure (on host demand) pre-4.4
"@serverStatus.wiredTiger.cache.cache overflow score",
"@serverStatus.wiredTiger.cache.cache overflow table entries",
"@serverStatus.wiredTiger.cache.cache overflow table insert calls",
"@serverStatus.wiredTiger.cache.cache overflow table remove calls",
# "@serverStatus.wiredTiger.cache.cache overflow cursor internal thread wait time (usecs)"
# new in 4.4:
"@serverStatus.wiredTiger.cache.history store score",
"@serverStatus.wiredTiger.cache.history store table on-disk size"
"@serverStatus.wiredTiger.cache.history store table insert calls",
"@serverStatus.wiredTiger.cache.history store table reads",
"@local.oplog.rs.stats.wiredTiger.reconciliation.overflow values written",
"@local.oplog.rs.stats.wiredTiger.btree.overflow pages",
"@serverStatus.wiredTiger.cache.overflow pages read into cache",
"@serverStatus.wiredTiger.cache.pages selected for eviction unable to be evicted as the parent page has overflow items",
# WT backup cursors
"serverStatus.storageEngine.backupCursorOpen",
"serverStatus.storageEngine.oldestRequiredTimestampForCrashRecovery.t",
"serverStatus.storageEngine.oldestRequiredTimestampForCrashRecovery.i",
# dhandles
"serverStatus.wiredTiger.data-handle.connection data handles currently active",
]
mm = p.get_metrics_list_numpy(ftdc_metrics_keys)
print(len(mm))
``` |
{
"source": "jorgeisa/Respaldo_EDD_Fase1",
"score": 3
} |
#### File: Respaldo_EDD_Fase1/team13/Main.py
```python
from timeit import default_timer
import Funciones as j
# Test para funciones
def test():
print("Creacion de DBs:")
print(j.createDatabase('DB1'), end="-")
print(j.createDatabase('DB2'), end="-")
print(j.createDatabase('DB3'), end="-")
print(j.createDatabase('DB4'), end="-")
print(j.createDatabase('DB5'), end="-")
print(j.createDatabase('DB6'), end="-")
print(j.createDatabase('DB7'), end="-")
print(j.createDatabase('DB8'), end="-")
print(j.createDatabase('DB9'), end="-")
print(j.createDatabase('DB10'), end="-")
print(j.createDatabase('DB11'), end="-")
print(j.createDatabase('DB12'), end="-")
print(j.createDatabase('DB13'), end="-")
print(j.createDatabase('DB14'), end="-")
print(j.createDatabase('DB15'), end="-")
print(j.createDatabase('DB16'), end="-")
print(j.createDatabase('DB17'), end="-")
print(j.createDatabase('DB18'), end="-")
print(j.createDatabase('DB19'), end="-")
print(j.createDatabase('DB20'), end="-")
print(j.createDatabase('DB21'), end="-")
print(j.createDatabase('DB22'), end="-")
print(j.createDatabase('DB23'), end="-")
print(j.createDatabase('DB24'), end="-")
print(j.createDatabase('DB25'))
print(j.createDatabase('DB26'), end="-")
print(j.createDatabase('DB27'), end="-")
print(j.createDatabase('DB28'), end="-")
print(j.createDatabase('DB29'), end="-")
print(j.createDatabase('DB30'), end="-")
print(j.createDatabase('DB31'), end="-")
print(j.createDatabase('DB32'), end="-")
print(j.createDatabase('DB33'), end="-")
print(j.createDatabase('DB34'), end="-")
print(j.createDatabase('DB35'), end="-")
print(j.createDatabase('DB36'), end="-")
print(j.createDatabase('DB37'), end="-")
print(j.createDatabase('DB38'), end="-")
print(j.createDatabase('DB39'), end="-")
print(j.createDatabase('DB40'), end="-")
print(j.createDatabase('DB41'), end="-")
print(j.createDatabase('DB42'), end="-")
print(j.createDatabase('DB43'), end="-")
print(j.createDatabase('DB44'), end="-")
print(j.createDatabase('DB45'), end="-")
print(j.createDatabase('DB46'), end="-")
print(j.createDatabase('DB47'), end="-")
print(j.createDatabase('DB48'), end="-")
print(j.createDatabase('DB49'), end="-")
print(j.createDatabase('DB50'))
print(j.createDatabase('DB51'), end="-")
print(j.createDatabase('DB52'), end="-")
print(j.createDatabase('DB53'), end="-")
print(j.createDatabase('DB54'), end="-")
print(j.createDatabase('DB55'), end="-")
print(j.createDatabase('DB56'), end="-")
print(j.createDatabase('DB57'), end="-")
print(j.createDatabase('DB58'), end="-")
print(j.createDatabase('DB59'), end="-")
print(j.createDatabase('DB60'), end="-")
print(j.createDatabase('DB61'), end="-")
print(j.createDatabase('DB62'), end="-")
print(j.createDatabase('DB63'), end="-")
print(j.createDatabase('DB64'), end="-")
print(j.createDatabase('DB65'), end="-")
print(j.createDatabase('DB66'), end="-")
print(j.createDatabase('DB67'), end="-")
print(j.createDatabase('DB68'), end="-")
print(j.createDatabase('DB69'), end="-")
print(j.createDatabase('DB70'), end="-")
print(j.createDatabase('DB71'), end="-")
print(j.createDatabase('DB72'), end="-")
print(j.createDatabase('DB73'), end="-")
print(j.createDatabase('DB74'), end="-")
print(j.createDatabase('DB75'))
print(j.createDatabase('DB76'), end="-")
print(j.createDatabase('DB77'), end="-")
print(j.createDatabase('DB78'), end="-")
print(j.createDatabase('DB79'), end="-")
print(j.createDatabase('DB80'), end="-")
print(j.createDatabase('DB81'), end="-")
print(j.createDatabase('DB82'), end="-")
print(j.createDatabase('DB83'), end="-")
print(j.createDatabase('DB84'), end="-")
print(j.createDatabase('DB85'), end="-")
print(j.createDatabase('DB86'), end="-")
print(j.createDatabase('DB87'), end="-")
print(j.createDatabase('DB88'), end="-")
print(j.createDatabase('DB89'), end="-")
print(j.createDatabase('DB90'), end="-")
print(j.createDatabase('DB91'), end="-")
print(j.createDatabase('DB92'), end="-")
print(j.createDatabase('DB93'), end="-")
print(j.createDatabase('DB94'), end="-")
print(j.createDatabase('DB95'), end="-")
print(j.createDatabase('DB96'), end="-")
print(j.createDatabase('DB97'), end="-")
print(j.createDatabase('DB98'), end="-")
print(j.createDatabase('DB99'), end="-")
print(j.createDatabase('DB100'))
print("\nCreacion de Tablas en DBs:")
print("Creacion Tablas DB2:")
print(j.createTable('DB2', 'Table1_DB2', 2), end="-")
print(j.createTable('DB2', 'Table2_DB2', 2), end="-")
print(j.createTable('DB2', 'Table3_DB2', 2), end="-")
print(j.createTable('DB2', 'Table4_DB2', 2), end="-")
print(j.createTable('DB2', 'Table5_DB2', 2), end="-")
print(j.createTable('DB2', 'Table6_DB2', 2), end="-")
print(j.createTable('DB2', 'Table7_DB2', 2), end="-")
print(j.createTable('DB2', 'Table8_DB2', 2), end="-")
print(j.createTable('DB2', 'Table9_DB2', 2), end="-")
print(j.createTable('DB2', 'Table10_DB2', 2), end="-")
print(j.createTable('DB2', 'Table11_DB2', 2), end="-")
print(j.createTable('DB2', 'Table12_DB2', 2), end="-")
print(j.createTable('DB2', 'Table13_DB2', 2), end="-")
print(j.createTable('DB2', 'Table14_DB2', 2), end="-")
print(j.createTable('DB2', 'Table15_DB2', 2), end="-")
print(j.createTable('DB2', 'Table16_DB2', 2), end="-")
print(j.createTable('DB2', 'Table17_DB2', 2), end="-")
print(j.createTable('DB2', 'Table18_DB2', 2), end="-")
print(j.createTable('DB2', 'Table19_DB2', 2), end="-")
print(j.createTable('DB2', 'Table20_DB2', 2), end="-")
print(j.createTable('DB2', 'Table21_DB2', 2), end="-")
print(j.createTable('DB2', 'Table22_DB2', 2), end="-")
print(j.createTable('DB2', 'Table23_DB2', 2), end="-")
print(j.createTable('DB2', 'Table24_DB2', 2), end="-")
print(j.createTable('DB2', 'Table25_DB2', 2))
print(j.createTable('DB2', 'Table26_DB2', 2), end="-")
print(j.createTable('DB2', 'Table27_DB2', 2), end="-")
print(j.createTable('DB2', 'Table28_DB2', 2), end="-")
print(j.createTable('DB2', 'Table29_DB2', 2), end="-")
print(j.createTable('DB2', 'Table30_DB2', 2), end="-")
print(j.createTable('DB2', 'Table31_DB2', 2), end="-")
print(j.createTable('DB2', 'Table32_DB2', 2), end="-")
print(j.createTable('DB2', 'Table33_DB2', 2), end="-")
print(j.createTable('DB2', 'Table34_DB2', 2), end="-")
print(j.createTable('DB2', 'Table35_DB2', 2), end="-")
print(j.createTable('DB2', 'Table36_DB2', 2), end="-")
print(j.createTable('DB2', 'Table37_DB2', 2), end="-")
print(j.createTable('DB2', 'Table38_DB2', 2), end="-")
print(j.createTable('DB2', 'Table39_DB2', 2), end="-")
print(j.createTable('DB2', 'Table40_DB2', 2), end="-")
print(j.createTable('DB2', 'Table41_DB2', 2), end="-")
print(j.createTable('DB2', 'Table42_DB2', 2), end="-")
print(j.createTable('DB2', 'Table43_DB2', 2), end="-")
print(j.createTable('DB2', 'Table44_DB2', 2), end="-")
print(j.createTable('DB2', 'Table45_DB2', 2), end="-")
print(j.createTable('DB2', 'Table46_DB2', 2), end="-")
print(j.createTable('DB2', 'Table47_DB2', 2), end="-")
print(j.createTable('DB2', 'Table48_DB2', 2), end="-")
print(j.createTable('DB2', 'Table49_DB2', 2), end="-")
print(j.createTable('DB2', 'Table50_DB2', 2))
# Creando tablas a DB4
print("Creacion Tablas DB4:")
print(j.createTable('DB4', 'Table1_DB4', 4), end="-")
print(j.createTable('DB4', 'Table2_DB4', 4), end="-")
print(j.createTable('DB4', 'Table3_DB4', 4), end="-")
print(j.createTable('DB4', 'Table4_DB4', 4), end="-")
print(j.createTable('DB4', 'Table5_DB4', 4), end="-")
print(j.createTable('DB4', 'Table6_DB4', 4), end="-")
print(j.createTable('DB4', 'Table7_DB4', 4), end="-")
print(j.createTable('DB2', 'Table8_DB4', 4), end="-")
print(j.createTable('DB4', 'Table9_DB4', 4), end="-")
print(j.createTable('DB4', 'Table10_DB4', 4), end="-")
print(j.createTable('DB4', 'Table11_DB4', 4), end="-")
print(j.createTable('DB4', 'Table12_DB4', 4), end="-")
print(j.createTable('DB4', 'Table13_DB4', 4), end="-")
print(j.createTable('DB4', 'Table14_DB4', 4), end="-")
print(j.createTable('DB4', 'Table15_DB4', 4), end="-")
print(j.createTable('DB4', 'Table16_DB4', 4), end="-")
print(j.createTable('DB4', 'Table17_DB4', 4), end="-")
print(j.createTable('DB4', 'Table18_DB4', 4), end="-")
print(j.createTable('DB4', 'Table19_DB4', 4), end="-")
print(j.createTable('DB4', 'Table20_DB4', 4), end="-")
print(j.createTable('DB4', 'Table21_DB4', 4), end="-")
print(j.createTable('DB4', 'Table22_DB4', 4), end="-")
print(j.createTable('DB4', 'Table23_DB4', 4), end="-")
print(j.createTable('DB4', 'Table24_DB4', 4), end="-")
print(j.createTable('DB4', 'Table25_DB4', 4))
print(j.createTable('DB4', 'Table26_DB4', 4), end="-")
print(j.createTable('DB4', 'Table27_DB4', 4), end="-")
print(j.createTable('DB4', 'Table28_DB4', 4), end="-")
print(j.createTable('DB4', 'Table29_DB4', 4), end="-")
print(j.createTable('DB4', 'Table30_DB4', 4), end="-")
print(j.createTable('DB4', 'Table31_DB4', 4), end="-")
print(j.createTable('DB4', 'Table32_DB4', 4), end="-")
print(j.createTable('DB4', 'Table33_DB4', 4), end="-")
print(j.createTable('DB4', 'Table34_DB4', 4), end="-")
print(j.createTable('DB4', 'Table35_DB4', 4), end="-")
print(j.createTable('DB4', 'Table36_DB4', 4), end="-")
print(j.createTable('DB4', 'Table37_DB4', 4), end="-")
print(j.createTable('DB4', 'Table38_DB4', 4), end="-")
print(j.createTable('DB4', 'Table39_DB4', 4), end="-")
print(j.createTable('DB4', 'Table40_DB4', 4), end="-")
print(j.createTable('DB4', 'Table41_DB4', 4), end="-")
print(j.createTable('DB4', 'Table42_DB4', 4), end="-")
print(j.createTable('DB4', 'Table43_DB4', 4), end="-")
print(j.createTable('DB4', 'Table44_DB4', 4), end="-")
print(j.createTable('DB4', 'Table45_DB4', 4), end="-")
print(j.createTable('DB4', 'Table46_DB4', 4), end="-")
print(j.createTable('DB4', 'Table47_DB4', 4), end="-")
print(j.createTable('DB4', 'Table48_DB4', 4), end="-")
print(j.createTable('DB4', 'Table49_DB4', 4), end="-")
print(j.createTable('DB4', 'Table50_DB4', 4))
print("Creacion Tablas DB6:")
print(j.createTable('DB6', 'Table1_DB6', 6), end="-")
print(j.createTable('DB6', 'Table2_DB6', 6), end="-")
print(j.createTable('DB6', 'Table3_DB6', 6), end="-")
print(j.createTable('DB6', 'Table4_DB6', 6), end="-")
print(j.createTable('DB6', 'Table5_DB6', 6), end="-")
print(j.createTable('DB6', 'Table6_DB6', 6), end="-")
print(j.createTable('DB6', 'Table7_DB6', 6), end="-")
print(j.createTable('DB6', 'Table8_DB6', 6), end="-")
print(j.createTable('DB6', 'Table9_DB6', 6), end="-")
print(j.createTable('DB6', 'Table10_DB6', 6), end="-")
print(j.createTable('DB6', 'Table11_DB6', 6), end="-")
print(j.createTable('DB6', 'Table12_DB6', 6), end="-")
print(j.createTable('DB6', 'Table13_DB6', 6), end="-")
print(j.createTable('DB6', 'Table14_DB6', 6), end="-")
print(j.createTable('DB6', 'Table15_DB6', 6), end="-")
print(j.createTable('DB6', 'Table16_DB6', 6), end="-")
print(j.createTable('DB6', 'Table17_DB6', 6), end="-")
print(j.createTable('DB6', 'Table18_DB6', 6), end="-")
print(j.createTable('DB6', 'Table19_DB6', 6), end="-")
print(j.createTable('DB6', 'Table20_DB6', 6), end="-")
print(j.createTable('DB6', 'Table21_DB6', 6), end="-")
print(j.createTable('DB6', 'Table22_DB6', 6), end="-")
print(j.createTable('DB6', 'Table23_DB6', 6), end="-")
print(j.createTable('DB6', 'Table24_DB6', 6), end="-")
print(j.createTable('DB6', 'Table25_DB6', 6))
print(j.createTable('DB6', 'Table26_DB6', 6), end="-")
print(j.createTable('DB6', 'Table27_DB6', 6), end="-")
print(j.createTable('DB6', 'Table28_DB6', 6), end="-")
print(j.createTable('DB6', 'Table29_DB6', 6), end="-")
print(j.createTable('DB6', 'Table30_DB6', 6), end="-")
print(j.createTable('DB6', 'Table31_DB6', 6), end="-")
print(j.createTable('DB6', 'Table32_DB6', 6), end="-")
print(j.createTable('DB6', 'Table33_DB6', 6), end="-")
print(j.createTable('DB6', 'Table34_DB6', 6), end="-")
print(j.createTable('DB6', 'Table35_DB6', 6), end="-")
print(j.createTable('DB6', 'Table36_DB6', 6), end="-")
print(j.createTable('DB6', 'Table37_DB6', 6), end="-")
print(j.createTable('DB6', 'Table38_DB6', 6), end="-")
print(j.createTable('DB6', 'Table39_DB6', 6), end="-")
print(j.createTable('DB6', 'Table40_DB6', 6), end="-")
print(j.createTable('DB6', 'Table41_DB6', 6), end="-")
print(j.createTable('DB6', 'Table42_DB6', 6), end="-")
print(j.createTable('DB6', 'Table43_DB6', 6), end="-")
print(j.createTable('DB6', 'Table44_DB6', 6), end="-")
print(j.createTable('DB6', 'Table45_DB6', 6), end="-")
print(j.createTable('DB6', 'Table46_DB6', 6), end="-")
print(j.createTable('DB6', 'Table47_DB6', 6), end="-")
print(j.createTable('DB6', 'Table48_DB6', 6), end="-")
print(j.createTable('DB6', 'Table49_DB6', 6), end="-")
print(j.createTable('DB6', 'Table50_DB6', 6))
print("Creacion Tablas DB8:")
print(j.createTable('DB8', 'Table1_DB8', 8), end="-")
print(j.createTable('DB8', 'Table2_DB8', 8), end="-")
print(j.createTable('DB8', 'Table3_DB8', 8), end="-")
print(j.createTable('DB8', 'Table4_DB8', 8), end="-")
print(j.createTable('DB8', 'Table5_DB8', 8), end="-")
print(j.createTable('DB8', 'Table6_DB8', 8), end="-")
print(j.createTable('DB8', 'Table7_DB8', 8), end="-")
print(j.createTable('DB8', 'Table8_DB8', 8), end="-")
print(j.createTable('DB8', 'Table9_DB8', 8), end="-")
print(j.createTable('DB8', 'Table10_DB8', 8), end="-")
print(j.createTable('DB8', 'Table11_DB8', 8), end="-")
print(j.createTable('DB8', 'Table12_DB8', 8), end="-")
print(j.createTable('DB8', 'Table13_DB8', 8), end="-")
print(j.createTable('DB8', 'Table14_DB8', 8), end="-")
print(j.createTable('DB8', 'Table15_DB8', 8), end="-")
print(j.createTable('DB8', 'Table16_DB8', 8), end="-")
print(j.createTable('DB8', 'Table17_DB8', 8), end="-")
print(j.createTable('DB8', 'Table18_DB8', 8), end="-")
print(j.createTable('DB8', 'Table19_DB8', 8), end="-")
print(j.createTable('DB8', 'Table20_DB8', 8), end="-")
print(j.createTable('DB8', 'Table21_DB8', 8), end="-")
print(j.createTable('DB8', 'Table22_DB8', 8), end="-")
print(j.createTable('DB8', 'Table23_DB8', 8), end="-")
print(j.createTable('DB8', 'Table24_DB8', 8), end="-")
print(j.createTable('DB8', 'Table25_DB8', 8))
print(j.createTable('DB8', 'Table26_DB8', 8), end="-")
print(j.createTable('DB8', 'Table27_DB8', 8), end="-")
print(j.createTable('DB8', 'Table28_DB8', 8), end="-")
print(j.createTable('DB8', 'Table29_DB8', 8), end="-")
print(j.createTable('DB8', 'Table30_DB8', 8), end="-")
print(j.createTable('DB8', 'Table31_DB8', 8), end="-")
print(j.createTable('DB8', 'Table32_DB8', 8), end="-")
print(j.createTable('DB8', 'Table33_DB8', 8), end="-")
print(j.createTable('DB8', 'Table34_DB8', 8), end="-")
print(j.createTable('DB8', 'Table35_DB8', 8), end="-")
print(j.createTable('DB8', 'Table36_DB8', 8), end="-")
print(j.createTable('DB8', 'Table37_DB8', 8), end="-")
print(j.createTable('DB8', 'Table38_DB8', 8), end="-")
print(j.createTable('DB8', 'Table39_DB8', 8), end="-")
print(j.createTable('DB8', 'Table40_DB8', 8), end="-")
print(j.createTable('DB8', 'Table41_DB8', 8), end="-")
print(j.createTable('DB8', 'Table42_DB8', 8), end="-")
print(j.createTable('DB8', 'Table43_DB8', 8), end="-")
print(j.createTable('DB8', 'Table44_DB8', 8), end="-")
print(j.createTable('DB8', 'Table45_DB8', 8), end="-")
print(j.createTable('DB8', 'Table46_DB8', 8), end="-")
print(j.createTable('DB8', 'Table47_DB8', 8), end="-")
print(j.createTable('DB8', 'Table48_DB8', 8), end="-")
print(j.createTable('DB8', 'Table49_DB8', 8), end="-")
print(j.createTable('DB8', 'Table50_DB8', 8))
print("Creacion Tablas DB10:")
print(j.createTable('DB10', 'Table1_DB10', 10), end="-")
print(j.createTable('DB10', 'Table2_DB10', 10), end="-")
print(j.createTable('DB10', 'Table3_DB10', 10), end="-")
print(j.createTable('DB10', 'Table4_DB10', 10), end="-")
print(j.createTable('DB10', 'Table5_DB10', 10), end="-")
print(j.createTable('DB10', 'Table6_DB10', 10), end="-")
print(j.createTable('DB10', 'Table7_DB10', 10), end="-")
print(j.createTable('DB10', 'Table8_DB10', 10), end="-")
print(j.createTable('DB10', 'Table9_DB10', 10), end="-")
print(j.createTable('DB10', 'Table10_DB10', 10), end="-")
print(j.createTable('DB10', 'Table11_DB10', 10), end="-")
print(j.createTable('DB10', 'Table12_DB10', 10), end="-")
print(j.createTable('DB10', 'Table13_DB10', 10), end="-")
print(j.createTable('DB10', 'Table14_DB10', 10), end="-")
print(j.createTable('DB10', 'Table15_DB10', 10), end="-")
print(j.createTable('DB10', 'Table16_DB10', 10), end="-")
print(j.createTable('DB10', 'Table17_DB10', 10), end="-")
print(j.createTable('DB10', 'Table18_DB10', 10), end="-")
print(j.createTable('DB10', 'Table19_DB10', 10), end="-")
print(j.createTable('DB10', 'Table20_DB10', 10), end="-")
print(j.createTable('DB10', 'Table21_DB10', 10), end="-")
print(j.createTable('DB10', 'Table22_DB10', 10), end="-")
print(j.createTable('DB10', 'Table23_DB10', 10), end="-")
print(j.createTable('DB10', 'Table24_DB10', 10), end="-")
print(j.createTable('DB10', 'Table25_DB10', 10))
print(j.createTable('DB10', 'Table26_DB10', 10), end="-")
print(j.createTable('DB10', 'Table27_DB10', 10), end="-")
print(j.createTable('DB10', 'Table28_DB10', 10), end="-")
print(j.createTable('DB10', 'Table29_DB10', 10), end="-")
print(j.createTable('DB10', 'Table30_DB10', 10), end="-")
print(j.createTable('DB10', 'Table31_DB10', 10), end="-")
print(j.createTable('DB10', 'Table32_DB10', 10), end="-")
print(j.createTable('DB10', 'Table33_DB10', 10), end="-")
print(j.createTable('DB10', 'Table34_DB10', 10), end="-")
print(j.createTable('DB10', 'Table35_DB10', 10), end="-")
print(j.createTable('DB10', 'Table36_DB10', 10), end="-")
print(j.createTable('DB10', 'Table37_DB10', 10), end="-")
print(j.createTable('DB10', 'Table38_DB10', 10), end="-")
print(j.createTable('DB10', 'Table39_DB10', 10), end="-")
print(j.createTable('DB10', 'Table40_DB10', 10), end="-")
print(j.createTable('DB10', 'Table41_DB10', 10), end="-")
print(j.createTable('DB10', 'Table42_DB10', 10), end="-")
print(j.createTable('DB10', 'Table43_DB10', 10), end="-")
print(j.createTable('DB10', 'Table44_DB10', 10), end="-")
print(j.createTable('DB10', 'Table45_DB10', 10), end="-")
print(j.createTable('DB10', 'Table46_DB10', 10), end="-")
print(j.createTable('DB10', 'Table47_DB10', 10), end="-")
print(j.createTable('DB10', 'Table48_DB10', 10), end="-")
print(j.createTable('DB10', 'Table49_DB10', 10), end="-")
print(j.createTable('DB10', 'Table50_DB10', 10))
print('\nj.insertando tuplas en DB4:')
print(j.insert('DB4', 'Table1_DB4', [1, "Manuel", 0.5, "Azul"]), end='-')
print(j.insert('DB4', 'Table1_DB4', [2, "Gabriela", 1.5, "Amarillo"]), end='-')
print(j.insert('DB4', 'Table1_DB4', [3, "Diego", 2.5, "Verde"]), end='-')
print(j.insert('DB4', 'Table1_DB4', [4, "Antonio", 3.5, "Rosado"]), end='-')
print(j.insert('DB4', 'Table1_DB4', [5, "Jorge", 4.5, "Anaranjado"]), end='-')
print(j.insert('DB4', 'Table1_DB4', [6, "Mynor", 5.5, "Gris"]), end='-')
print(j.insert('DB4', 'Table1_DB4', [7, "Kevin", 6.5, "Celeste"]), end='-')
print(j.insert('DB4', 'Table1_DB4', [8, "Aejandro", 7.5, "Blanco"]), end='-')
print(j.insert('DB4', 'Table1_DB4', [9, "Nathan", 8.5, "Negro"]), end='-')
print(j.insert('DB4', 'Table1_DB4', [10, "Jessica", 9.5, "Rojo"]), end='-')
print(j.insert('DB4', 'Table1_DB4', [11, "Ericha", 10.5, "Azul"]), end='-')
print(j.insert('DB4', 'Table1_DB4', [12, "Merry", 11.5, "Amarillo"]), end='-')
print(j.insert('DB4', 'Table1_DB4', [13, "Sib", 12.5, "Verde"]), end='-')
print(j.insert('DB4', 'Table1_DB4', [14, "Violetta", 13.5, "Rosado"]), end='-')
print(j.insert('DB4', 'Table1_DB4', [15, "Meghan", 14.5, "Anaranjado"]), end='-')
print(j.insert('DB4', 'Table1_DB4', [16, "Heinrick", 15.5, "Gris"]), end='-')
print(j.insert('DB4', 'Table1_DB4', [17, "Tiler", 16.5, "Celeste"]), end='-')
print(j.insert('DB4', 'Table1_DB4', [18, "Dennie", 17.5, "Blanco"]), end='-')
print(j.insert('DB4', 'Table1_DB4', [19, "Dorie", 18.5, "Negro"]), end='-')
print(j.insert('DB4', 'Table1_DB4', [20, "Niles", 19.5, "Rojo"]), end='-')
print(j.insert('DB4', 'Table1_DB4', [21, "Olag", 20.5, "Azul"]), end='-')
print(j.insert('DB4', 'Table1_DB4', [22, "Noland", 21.5, "Amarillo"]), end='-')
print(j.insert('DB4', 'Table1_DB4', [23, "Paulita", 22.5, "Verde"]), end='-')
print(j.insert('DB4', 'Table1_DB4', [24, "Forrest", 23.5, "Rosado"]), end='-')
print(j.insert('DB4', 'Table1_DB4', [25, "Ulrick", 24.5, "Anaranjado"]))
print(j.insert('DB4', 'Table1_DB4', [26, "Angil", 25.5, "Gris"]), end='-')
print(j.insert('DB4', 'Table1_DB4', [27, "Fiona", 26.5, "Celeste"]), end='-')
print(j.insert('DB4', 'Table1_DB4', [28, "Rodrick", 27.5, "Blanco"]), end='-')
print(j.insert('DB4', 'Table1_DB4', [29, "Carolyne", 28.5, "Negro"]), end='-')
print(j.insert('DB4', 'Table1_DB4', [30, "Cortney", 29.5, "Rojo"]), end='-')
print(j.insert('DB4', 'Table1_DB4', [31, "Byron", 30.5, "Azul"]), end='-')
print(j.insert('DB4', 'Table1_DB4', [32, "Lazarus", 31.5, "Amarillo"]), end='-')
print(j.insert('DB4', 'Table1_DB4', [33, "Cyndy", 32.5, "Verde"]), end='-')
print(j.insert('DB4', 'Table1_DB4', [34, "Becca", 33.5, "Rosado"]), end='-')
print(j.insert('DB4', 'Table1_DB4', [35, "Brody", 34.5, "Anaranjado"]), end='-')
print(j.insert('DB4', 'Table1_DB4', [36, "Darda", 35.5, "Gris"]), end='-')
print(j.insert('DB4', 'Table1_DB4', [37, "Patrice", 36.5, "Celeste"]), end='-')
print(j.insert('DB4', 'Table1_DB4', [38, "Bay", 37.5, "Blanco"]), end='-')
print(j.insert('DB4', 'Table1_DB4', [39, "Giffy", 38.5, "Negro"]), end='-')
print(j.insert('DB4', 'Table1_DB4', [40, "Hallsy", 39.5, "Rojo"]), end='-')
print(j.insert('DB4', 'Table1_DB4', [41, "Elinor", 40.5, "Azul"]), end='-')
print(j.insert('DB4', 'Table1_DB4', [42, "Maitilde", 41.5, "Amarillo"]), end='-')
print(j.insert('DB4', 'Table1_DB4', [43, "Van", 42.5, "Verde"]), end='-')
print(j.insert('DB4', 'Table1_DB4', [44, "Marcel", 43.5, "Rosado"]), end='-')
print(j.insert('DB4', 'Table1_DB4', [45, "Giselle", 44.5, "Anaranjado"]), end='-')
print(j.insert('DB4', 'Table1_DB4', [46, "Olympe", 45.5, "Gris"]), end='-')
print(j.insert('DB4', 'Table1_DB4', [47, "Roxi", 46.5, "Celeste"]), end='-')
print(j.insert('DB4', 'Table1_DB4', [48, "Debbi", 47.5, "Blanco"]), end='-')
print(j.insert('DB4', 'Table1_DB4', [49, "Baron", 48.5, "Negro"]), end='-')
print(j.insert('DB4', 'Table1_DB4', [50, "Debera", 49.5, "Rojo"]))
print("Impresion 1")
testShow('DB4', 'Table1_DB4')
print("\nProbando j.alterDatabase: DB4")
print(j.alterDatabase("DB4", "DB4P"), end='-') # Caso bueno
print(j.alterDatabase("DB4_Plus2", 7), end='-') # error
print(j.alterDatabase("DB4", "DB4_Plus"), end='-') # no existe old
print(j.alterDatabase("DB4_Plus", "DB5")) # existe new
print("\nImpresion 2:")
testShow("DB4P", 'Table1_DB4')
print('\nProbando j.dropDatabase:')
print(j.dropDatabase('DB5'), end='-')
# print(j.dropDatabase('DB4P'), end='-')
print(j.dropDatabase('DB11'), end='-')
print(j.dropDatabase('@'), end='-') # error
print(j.dropDatabase('DB4_Plus8')) # No existente
print("\nProbando j.extractTable:")
print(j.extractTable("DB4P", "Table1_DB4")) # Lista de tuplas
print(j.extractTable("DB4P", "Table2_DB4"), end="-") # Lista vacia, sin tupla
print(j.extractTable("DB4Pll", "Table2_DB4"), end="-") # DB no existe
print(j.extractTable("DB4P", "Table2_DB4ll"), end="-") # Tabla no existe
print(j.extractTable("DB4P", 1)) # Error
print("\nImpresion 3:")
testShow("DB4P", 'Table1_DB4')
print("\nProbado el j.extractRangeTable:")
print(j.extractRangeTable("DB4P", "Table1_DB4", 2, 22.5, 39.5)) # Existe valido
print(j.extractRangeTable("DB4P", "Table2_DB4", 2, 22.5, 39.5), end="-") # No existen tuplas
print(j.extractRangeTable("DB4P", 1, 2, 22.5, 39.5)) # no existe base, tabla o error
print("\nProbado el j.alterAddPK:")
print(j.alterAddPK("DB4P", "Table1_DB4", [0, 5]), end='-') # Columna fuera de limite
print(j.alterAddPK("DB4P", "Table1_DB4", [0, 1, 2, 3, 4, 5]), end='-') # Fuera de rango
print(j.alterAddPK("DB4P", "Table1_DB4", [0, "Colu"]), end="-") # Error
print(j.alterAddPK("DB4Pl", "Table1_DB4", [0, 2]), end="-") # DB no existente
print(j.alterAddPK("DB4P", "Table1_DB4l", [0, 2]), end="-") # Tabla no existente
print(j.alterAddPK("DB4P", "Table1_DB4", [3]), end="-") # Hay repetidos, error
# print(j.alterAddPK("DB4P", "Table1_DB4", [0, 2, 3]), end="-") # Hay repetidos, error
print(j.alterAddPK("DB4P", "Table1_DB4", [0, 2]), end="-") # Exitoso
print(j.alterAddPK("DB4P", "Table1_DB4", [1])) # ya existe llave primaria
print("\nProbando el AlterDrop:")
print(j.alterDropPK("DB4P", 7), end="-") # Error
print(j.alterDropPK("DB4Pll", "Table1_DB4"), end="-") # db no existe
print(j.alterDropPK("DB4P", "Table1_DB4ll"), end="-") # tabla no existe
print(j.alterDropPK("DB4P", "Table2_DB4"), end="-") # pk no existe
print(j.alterDropPK("DB4P", "Table1_DB4")) # Exito
print("\nProbando el j.alterTable:")
print(j.alterTable("DB4P", [0], "nuevaTabla"), end="-") # error
print(j.alterTable("DB4Pll", "tablaAntigua", "nuevaTabla"), end="-") # DB no existe
print(j.alterTable("DB4P", "Table1_DB4ll", "TableNew_DB4"), end="-") # Tabla no existe
print(j.alterTable("DB4P", "Table1_DB4", "Table2_DB4"), end="-") # New existe
print(j.alterTable("DB4P", "Table1_DB4", "TableNew_DB4")) # New existe
print("\nProbando el j.alterAddColumn:")
print(j.alterAddColumn("DB4P", "TableNew_DB4", "NuevaColumna"), end="-")
print(j.alterAddColumn("DB4P", "TableNew_DB4", []), end="-") # error
print(j.alterAddColumn("DB4Plll", "TableNew_DB4", "NuevaColumna"), end="-") # db no existe
print(j.alterAddColumn("DB4P", "TableNew_DB4lll", "NuevaColumna")) # tabla no existe
print("\nImpresion 4:")
testShow("DB4P", 'TableNew_DB4')
print("\nProbando j.alterDropColumn:")
print(j.alterDropColumn("DB4P", "TableNew_DB4", [0]), end="-") # Error
print(j.alterDropColumn("DB4Pll", "TableNew_DB4", 0), end="-") # DB no existe
print(j.alterDropColumn("DB4P", "TableNew_DB4ll", 0), end="-") # Tabla no existe
print(j.alterDropColumn("DB4P", "TableNew_DB4", 5), end="-") # Fuera de limites
print(j.alterDropColumn("DB4P", "TableNew_DB4", 4), end="-") # Correcto
print(j.alterDropColumn("DB4P", "TableNew_DB4", 3), end="-") # Correcto
print(j.alterDropColumn("DB4P", "TableNew_DB4", 2), end="-") # Correcto
# print(j.alterDropColumn("DB4P", "TableNew_DB4", 1), end="-") # Correcto
print(j.alterAddPK("DB4P", "TableNew_DB4", [1])) # **Prueba de llave primaria. comentar linea superior**
print(j.alterDropColumn("DB4P", "TableNew_DB4", 1)) # Incorrecto, no se puede vaciar la tabala
print("\nImpresion 5:")
testShow("DB4P", 'TableNew_DB4')
print("\nProbando j.dropTable:")
print(j.dropTable("DB4P", "TableNew_DB4"))
def testShow(db, tabla):
print("\nImprimiento DB")
print(j.showDatabases())
print("\nImprimiento Tablas", db)
print(j.showTables(db))
print("\nImprimiento Tuplas", db)
print(j.extractTable(db, tabla))
# Main()
inicio = default_timer()
test()
fin = default_timer()
print(fin - inicio)
``` |
{
"source": "jorgeisa/Respaldo_EDD_Fase2",
"score": 3
} |
#### File: fase2/team13/main.py
```python
from Funciones import *
import Funciones as j
def test():
print('#' * 10 + ' DATABASES ')
print(createDatabase('db6', 'b', 'utf8'), end='-')
print(createDatabase('db6', 'bplus', 'ascii'), end='-') # 2, exist db
print(createDatabase('db1', 'avl', 'asci'), end='-') # 4, encoding incorrect
print(createDatabase('db2', 'avl', 'utf8'), end='-')
print(createDatabase('db8', 'isam', 'utf8'), end='-')
print(createDatabase('db4', 'jsona', 'utf8'), end='-') # 3, mode incorrect
print(createDatabase('db5', 'json', 'utf8'), end='-')
print(createDatabase('db7', 'hash', 'ascii'))
print('#' * 10 + ' TABLES ')
print(createTable('db6', 'Table1_DB6', 3), end='-')
print(createTable('db6', 'Table2_DB6', 2), end='-')
print(createTable('db6', 'Table3_DB6', 4), end='-')
print(createTable('db7', 'Table1_DB7', 4), end='-')
print(createTable('db7', 'Table2_DB7', 3))
print('#' * 10 + ' REGISTERS ')
print('DB6 - TABLE1', end=': ')
print(insert('db66', 'Table1', ['A1', 'B1', 'C1']), end='-') # 2, database doesn't exist
print(insert('db6', 'Table1', ['A2', 'B2', 'C2']), end='-')
print(insert('db6', 'Table1', ['A3', 'B3', 'C3']))
print('DB6 - TABLE2', end=': ')
print(insert('db6', 'Table2', ['A12', 'B12', 'C12']), end='-')
print(insert('db6', 'Table2', ['A22', 'B22', 'C22']), end='-')
print(insert('db6', 'Table2', ['A32', 'B32', 'C32']))
print('#' * 10 + ' ALTER_DATABASE_MODE')
print('BEFORE')
dictionary = load('metadata')
showDict(dictionary)
showMode('b')
showMode('bplus')
print(alterTableAddFK('db6', 'Table2_DB6', 'Rel1', [0, 1], 'Table1_DB6', [0])) # 0
print(alterTableAddFK('db63', 'Table2_DB6', 'Rel1', [0, 1], 'Table1_DB6', [0])) # 2
print(alterTableAddFK('db6', 'Table2', 'Rel1', [0, 1], 'Table1_DB6', [0])) # 3
print(alterTableAddFK('db6', 'Table2_DB6', 'Rel1', [0, 1], 'Table1', [0])) # 3
print(alterTableAddFK('db6', 'Table2_DB6', 'Rel1', [], 'Table1_DB6', [0])) # 4
print(alterTableAddFK('db6', 'Table2_DB6', 'Rel1', [0, 1], 'Table1_DB6', [])) # 4
print(alterTableAddFK('db6', 'Table2_DB6', 'Rel1', [0, 1], 'Table1_DB6', [0])) # 6
print(alterTableAddFK('db6', 'Table2_DB6', 'Rel2', [0, 1], 'Table1_DB6', [0])) # 0
print(alterTableAddFK('db6', 'Table2_DB6', 'Rel3', [0, 1], 'Table1_DB6', [0])) # 0
print(alterTableAddFK('db6', 'Table2_DB6', 'Rel4', [0, 1], 'Table1_DB6', [0])) # 0
showFK(load('FK'))
print('alterDatabaseMode: ', alterDatabaseMode('db6', 'bplus'))
print('AFTER')
dictionary = load('metadata')
showDict(dictionary)
showMode('b')
showMode('bplus')
j = checkMode('b')
print('MODE B - DB6: ', j.showTables('db6'))
j = checkMode('bplus')
print('MODE BPLUS - DB6', j.showTables('db6'))
print(showTables('db6'))
print(alterTableDropFK('db63', 'Table2_DB6', 'Rel1')) # 2
print(alterTableDropFK('db6', 'Table4', 'Rel3')) # 3
print(alterTableDropFK('db6', 'Table2_DB6', 'Rel3')) # 0
print(alterTableDropFK('db6', 'Table2_DB6', 'Rel1')) # 0
print(alterTableDropFK('db6', 'Table2_DB6', 'Rel2')) # 0
print(alterTableDropFK('db6', 'Table2_DB6', 'Rel3')) # 4
print(alterTableDropFK('db6', 'Table2_DB6', 'Rel4')) # 0
showFK(load('FK'))
def test2():
print('#' * 20 + ' Create Database')
print(createDatabase('db1', 'avl', 'utf8'), end='-')
print(createDatabase('db2', 'b', 'utf8'), end='-')
print(createDatabase('db3', 'bplus', 'utf8'))
print('#' * 20 + ' Create Table')
print(createTable('db1', 'Table1_DB1', 3), end='-')
print(createTable('db2', 'Table1_DB2', 3))
print('#' * 20 + ' Insert')
print(insert('db1', 'Table1_DB1', ['A1', 'B1', 'C1']), end='-')
print(insert('db1', 'Table1_DB1', ['A2', 'B2', 'C2']), end='-')
print(insert('db1', 'Table1_DB1', ['A3', 'B3', 'C3']), end='-')
print(insert('db2', 'Table1_DB2', ['A1_DB2', 'B1', 'C1']), end='-')
print(insert('db2', 'Table1_DB2', ['A2_DB2', 'B2', 'C2']), end='-')
print(insert('db2', 'Table1_DB2', ['A3_DB2', 'B3', 'C3']))
print(alterDatabaseEncoding('db1', 'iso-8859-1'))
print(extractTable('db1', 'Table1_DB1'))
print(checksumDatabase('db1', 'MD5'))
print(checksumDatabase('db1', 'MD5'))
print(insert('db1', 'Table1_DB1', ['A4', 'B4', 'C4']))
print(checksumDatabase('db1', 'MD5'))
print('----------------------------------------------')
print(checksumTable('db2', 'Table1_DB2', 'SHA256'))
print(checksumTable('db2', 'Table1_DB2', 'SHA256'))
print(insert('db2', 'Table1_DB2', ['A4_DB2', 'B4', 'C4']))
print(checksumTable('db2', 'Table1_DB2', 'SHA256'))
print(showChecksums(load('checksum')))
print('----------------------------------------------')
print(alterTableCompress('db1', 'Table1_DB1', 8))
print(extractTable('db1', 'Table1_DB1'))
print(alterTableDecompress('db1', 'Table1_DB1', ))
print(extractTable('db1', 'Table1_DB1'))
print('#' * 20 + ' Extract Row')
print(extractRow('db1', 'Table1_DB1', [1]))
print('#' * 20 + ' Update')
print(update('db1', 'Table1_DB1', {1: 'B1_Update'}, [1]))
print('#' * 20 + ' Extract Row')
print(extractRow('db1', 'Table1_DB1', [1]))
print('#' * 20 + ' delete')
print(delete('db2', 'Table1_DB2', [0]))
print(delete('db2', 'Table1_DB2', [1]))
print('#' * 20 + ' Extract Row')
print(extractRow('db2', 'Table1_DB2', [2]))
print('#' * 20 + ' Extract Row Before')
print(extractRow('db2', 'Table1_DB2', [0]))
print(extractRow('db2', 'Table1_DB2', [1]))
print(extractRow('db2', 'Table1_DB2', [2]))
print('#' * 20 + ' truncate')
print(truncate('db2', 'Table1_DB2'))
print('#' * 20 + ' Extract Row After')
print(extractRow('db2', 'Table1_DB2', [0]))
print(extractRow('db2', 'Table1_DB2', [1]))
print(extractRow('db2', 'Table1_DB2', [2]))
def test3():
print('#' * 20 + ' Create Database')
print(createDatabase('db1', 'avl', 'utf8'), end='-')
print(createDatabase('db2', 'b', 'utf8'), end='-')
print(createDatabase('db3', 'bplus', 'utf8'), end='-')
print(createDatabase('db4', 'isam', 'utf8'), end='-')
print(createDatabase('db5', 'json', 'utf8'), end='-')
print(createDatabase('db6', 'hash', 'utf8'))
print('#' * 20 + ' Create Table')
print(createTable('db1', 'Table1_DB1', 3), end='-')
print(createTable('db1', 'Table2_DB1', 3), end='-')
print(createTable('db1', 'Table3_DB1', 3), end='-')
print(createTable('db1', 'Table4_DB1', 3), end='-')
print(createTable('db2', 'Table1_DB2', 3), end='-')
print(createTable('db2', 'Table2_DB2', 3), end='-')
print(createTable('db2', 'Table3_DB2', 3), end='-')
print(createTable('db2', 'Table4_DB2', 3), end='-')
print(createTable('db3', 'Table1_DB3', 3), end='-')
print(createTable('db3', 'Table2_DB3', 3), end='-')
print(createTable('db3', 'Table3_DB3', 3), end='-')
print(createTable('db3', 'Table4_DB3', 3), end='-')
print(createTable('db4', 'Table1_DB4', 3), end='-')
print(createTable('db4', 'Table2_DB4', 3), end='-')
print(createTable('db4', 'Table3_DB4', 3), end='-')
print(createTable('db4', 'Table4_DB4', 3), end='-')
print(createTable('db5', 'Table1_DB5', 3), end='-')
print(createTable('db5', 'Table2_DB5', 3), end='-')
print(createTable('db5', 'Table3_DB5', 3), end='-')
print(createTable('db5', 'Table4_DB5', 3), end='-')
print(createTable('db6', 'Table1_DB6', 3), end='-')
print(createTable('db6', 'Table2_DB6', 3), end='-')
print(createTable('db6', 'Table3_DB6', 3), end='-')
print(createTable('db6', 'Table4_DB6', 3))
print('#' * 20 + ' Insert')
print(insert('db1', 'Table1_DB1', ['A1', 'B1', 'C1']), end='-')
print(insert('db1', 'Table2_DB1', ['A2', 'B2', 'C2']), end='-')
print(insert('db1', 'Table3_DB1', ['A3', 'B3', 'C3']), end='-')
print(insert('db2', 'Table1_DB2', ['A', 'B', 'C']), end='-')
print(insert('db2', 'Table1_DB2', ['AA', 'BB', 'CC']), end='-')
print(insert('db2', 'Table1_DB2', ['AAA', 'BBB', 'CCC']), end='-')
print(insert('db2', 'Table2_DB2', ['A2', 'B2', 'C2']), end='-')
print(insert('db2', 'Table3_DB2', ['A3', 'B3', 'C3']), end='-')
print(insert('db3', 'Table1_DB3', ['A1', 'B1', 'C1']), end='-')
print(insert('db3', 'Table2_DB3', ['A2', 'B2', 'C2']), end='-')
print(insert('db3', 'Table3_DB3', ['A3', 'B3', 'C3']), end='-')
print(insert('db4', 'Table1_DB4', ['A1', 'B1', 'C1']), end='-')
print(insert('db4', 'Table2_DB4', ['A2', 'B2', 'C2']), end='-')
print(insert('db4', 'Table3_DB4', ['A3', 'B3', 'C3']), end='-')
print(insert('db5', 'Table1_DB5', ['A1', 'B1', 'C1']), end='-')
print(insert('db5', 'Table2_DB5', ['A2', 'B2', 'C2']), end='-')
print(insert('db5', 'Table3_DB5', ['A3', 'B3', 'C3']), end='-')
print(insert('db6', 'Table1_DB6', ['A1', 'B1', 'C1']), end='-')
print(insert('db6', 'Table2_DB6', ['A2', 'B2', 'C2']), end='-')
print(insert('db6', 'Table3_DB6', ['A3', 'B3', 'C3']))
print()
print('#' * 20 + ' EXTRACTROW ' + '#' * 20)
print(extractRow('db1', 'Table1_DB1', [2]))
print('#' * 20 + ' Show Tables')
print(showTables('db1'))
print(showTables('db2'))
print(showTables('db3'))
print(showTables('db4'))
print(showTables('db5'))
print(showTables('db6'))
print()
print('#' * 20 + ' Security Mode ' + '#' * 20)
print(safeModeOn('db1', 'Table1_DB1'))
print(insert('db1', 'Table1_DB1', ['A11', 'B12', 'C13']), end='-')
print(insert('db1', 'Table1_DB1', ['A21', 'B22', 'C23']), end='-')
print(insert('db1', 'Table1_DB1', ['A31', 'B32', 'C33']), end='-')
print(insert('db1', 'Table1_DB1', ['41', 'B42', 'C43']))
dictionary = load('metadata')
showDict(dictionary)
print('#' * 20 + ' Extract Table')
print(extractTable('db1', 'Table1_DB1'))
print(extractTable('db2', 'Table1_DB2'))
print(extractTable('db3', 'Table1_DB3'))
print(extractTable('db4', 'Table1_DB4'))
print(extractTable('db5', 'Table1_DB5'))
print(extractTable('db6', 'Table1_DB6'))
print('#' * 20 + ' Extract Range Table')
print(extractRangeTable('db1', 'Table1_DB1', 0, 'A1', 'C1'))
print(extractRangeTable('db2', 'Table1_DB2', 0, 'A1', 'C1'))
print(extractRangeTable('db3', 'Table1_DB3', 0, 'A1', 'C1'))
print(extractRangeTable('db4', 'Table1_DB4', 0, 'A1', 'C1'))
print(extractRangeTable('db5', 'Table1_DB5', 0, 'A1', 'C1'))
print(extractRangeTable('db6', 'Table1_DB6', 0, 'A1', 'C1'))
print('#' * 20 + ' Alter Add PK')
print(alterAddPK('db1', 'Table1_DB1', [1]))
print(alterAddPK('db2', 'Table1_DB2', [1]))
print(alterAddPK('db3', 'Table1_DB3', [1]))
print(alterAddPK('db4', 'Table1_DB4', [1]))
print(alterAddPK('db5', 'Table1_DB5', [1]))
print(alterAddPK('db6', 'Table1_DB6', [1]))
print('#' * 20 + ' UPDATE' + '#' * 20)
print(update('db1', 'Table1_DB1', {0: 'JORGE8'}, ['B32']))
# print(safeModeOff('db1', 'Table1_DB1'))
print('#' * 20 + ' Alter Drop PK')
print(alterDropPK('db1', 'Table1_DB1'))
print(alterDropPK('db2', 'Table1_DB2'))
print(alterDropPK('db3', 'Table1_DB3'))
print(alterDropPK('db4', 'Table1_DB4'))
print(alterDropPK('db5', 'Table1_DB5'))
print(alterDropPK('db6', 'Table1_DB6'))
print('#' * 20 + ' Alter Table')
print(alterTable('db1', 'Table1_DB1', 'Table1New_DB1'))
print(alterTable('db2', 'Table1_DB2', 'Table1New_DB2'))
print(alterTable('db3', 'Table1_DB3', 'Table1New_DB3'))
print(alterTable('db4', 'Table1_DB4', 'Table1New_DB4'))
print(alterTable('db5', 'Table1_DB5', 'Table1New_DB5'))
print(alterTable('db6', 'Table1_DB6', 'Table1New_DB6'))
dictionary = load('metadata')
showDict(dictionary)
print('#' * 20 + ' Alter drop Column')
print(alterDropColumn('db1', 'Table2_DB1', 1))
print(alterDropColumn('db2', 'Table2_DB2', 1))
print(alterDropColumn('db3', 'Table2_DB3', 1))
print(alterDropColumn('db4', 'Table2_DB4', 1))
print(alterDropColumn('db5', 'Table2_DB5', 1))
print(alterDropColumn('db6', 'Table2_DB6', 1))
print('#' * 20 + ' drop Table')
print(dropTable('db1', 'Table3_DB1'))
print(dropTable('db2', 'Table3_DB2'))
print(dropTable('db3', 'Table3_DB3'))
print(dropTable('db4', 'Table3_DB4'))
print(dropTable('db5', 'Table3_DB5'))
print(dropTable('db6', 'Table3_DB6'))
dictionary = load('metadata')
showDict(dictionary)
def testCompress():
print(createDatabase('db1', 'b', 'utf8'))
print(createDatabase('db2', 'avl', 'utf8'))
print(createTable('db1', 'Table1_DB1', 3))
print(createTable('db1', 'Table2_DB1', 3))
print(createTable('db1', 'Table3_DB1', 3))
print(createTable('db2', 'Table1_DB2', 3))
print(insert('db1', 'Table1_DB1', ['Ya valines todos', 'F con el proyecto xdxd', 123]))
print(insert('db1', 'Table1_DB1', ['ABC', 'DEF', 'GHI']))
print(insert('db1', 'Table2_DB1', ['A ver si funciona esto xd', 'F en el chat', 'Si sale el lab']))
print(insert('db1', 'Table3_DB1', ['ya super F', 'Isaac es mal coordinador', 'Son bromas es un papucho']))
print(insert('db2', 'Table1_DB2',
['Test de compresion de las bases de datos xdxd', 'Ya no se que mas poner en estas tuplas jsjsjs',
'F en el chat']))
print('#' * 10 + ' Before')
print(extractTable('db1', 'Table1_DB1'))
print(extractTable('db1', 'Table2_DB1'))
print(extractTable('db1', 'Table3_DB1'))
print(extractTable('db2', 'Table1_DB2'))
print(alterDatabaseCompress('db2', 6))
print('#' * 10 + ' After')
print(extractTable('db1', 'Table1_DB1'))
print(extractTable('db1', 'Table2_DB1'))
print(extractTable('db1', 'Table3_DB1'))
print(extractTable('db2', 'Table1_DB2'))
print(alterDatabaseDecompress('db2'))
print('#' * 10 + ' After')
print(extractTable('db1', 'Table1_DB1'))
print(extractTable('db1', 'Table2_DB1'))
print(extractTable('db1', 'Table3_DB1'))
print(extractTable('db2', 'Table1_DB2'))
# print(alterTableCompress('db1', 'Table1_DB1', 6))
# # table = extractTable('db1', 'Table1_DB1')
# # print('Prueba')
# # for tuple in table[0]:
# # print("Tamaño Real %d" % len(tuple))
# print('#' * 10 + ' After')
# print(extractTable('db1', 'Table1_DB1'))
#
# print(alterTableDecompress('db1', 'Table1_DB1'))
# print('#' * 10 + ' Decompress')
# print(extractTable('db1', 'Table1_DB1'))
def final_test():
# def createDatabase(database: str, mode: string, encoding: string) -> int:
# mode: 'avl', 'b', 'bplus', 'dict', 'isam', 'json', 'hash'
# encoding: 'ascii', 'iso-8859-1', 'utf8'
# {0: success, 1: error, 2: db exist, 3: incorrect mode, 4: incorrect encoding
print('#' * 20 + ' Create Database' + '#' * 20)
print(j.createDatabase('db1', 'avl', 'ascii'), end='-')
print(j.createDatabase('db2', 'avl', 'ascii'), end='-')
print(j.createDatabase('db3', 'avl', 'ascii'), end='-')
print(j.createDatabase('db4', 'b', 'ascii'), end='-')
print(j.createDatabase('db5', 'b', 'ascii'), end='-')
print(j.createDatabase('db6', 'b', 'ascii'), end='-')
print(j.createDatabase('db7', 'bplus', 'ascii'), end='-')
print(j.createDatabase('db8', 'bplus', 'iso-8859-1'), end='-')
print(j.createDatabase('db9', 'bplus', 'iso-8859-1'), end='-')
print(j.createDatabase('db10', 'dict', 'iso-8859-1'), end='-')
print(j.createDatabase('db11', 'dict', 'iso-8859-1'), end='-')
print(j.createDatabase('db12', 'dict', 'iso-8859-1'), end='-')
print(j.createDatabase('db13', 'isam', 'iso-8859-1'), end='-')
print(j.createDatabase('db14', 'isam', 'iso-8859-1'), end='-')
print(j.createDatabase('db15', 'isam', 'utf8'), end='-')
print(j.createDatabase('db16', 'json', 'utf8'), end='-')
print(j.createDatabase('db17', 'json', 'utf8'), end='-')
print(j.createDatabase('db18', 'json', 'utf8'), end='-')
print(j.createDatabase('db19', 'hash', 'utf8'), end='-')
print(j.createDatabase('db20', 'hash', 'utf8'), end='-')
print(j.createDatabase('db21', 'hash', 'utf8'))
print()
print('#' * 20 + ' Show Databases ' + '#' * 20)
print(j.showDatabases())
# def createTable(database: str, table: str, numberColumns: int) -> int:
# {0: success, 1: error, 2: db not exist, 3: table exist
print()
print('#' * 20 + ' Create Table ' + '#' * 20)
print(j.createTable('db1', 'T1DB1', 1), end='-')
print(j.createTable('db1', 'T2DB1', 1), end='-')
print(j.createTable('db1', 'T3DB1', 1))
print(j.createTable('db4', 'T1DB4', 2), end='-')
print(j.createTable('db4', 'T2DB4', 2), end='-')
print(j.createTable('db4', 'T3DB4', 2))
print(j.createTable('db7', 'T1DB7', 3), end='-')
print(j.createTable('db7', 'T2DB7', 3), end='-')
print(j.createTable('db7', 'T3DB7', 3))
print(j.createTable('db8', 'T1DB8', 4), end='-')
print(j.createTable('db8', 'T2DB8', 4), end='-')
print(j.createTable('db8', 'T3DB8', 4))
print(j.createTable('db9', 'T1DB9', 5), end='-')
print(j.createTable('db9', 'T2DB9', 5), end='-')
print(j.createTable('db9', 'T3DB9', 5))
print(j.createTable('db10', 'T1DB10', 3), end='-')
print(j.createTable('db10', 'T2DB10', 3), end='-')
print(j.createTable('db10', 'T3DB10', 3))
print(j.createTable('db13', 'T1DB13', 3), end='-')
print(j.createTable('db13', 'T2DB13', 3), end='-')
print(j.createTable('db13', 'T3DB13', 3))
print(j.createTable('db16', 'T1DB16', 3), end='-')
print(j.createTable('db16', 'T2DB16', 3), end='-')
print(j.createTable('db16', 'T3DB16', 3))
print(j.createTable('db19', 'T1DB19', 3), end='-')
print(j.createTable('db19', 'T2DB19', 3), end='-')
print(j.createTable('db19', 'T3DB19', 3))
# def insert(database: str, table: str, register: list) -> int:
# {0: success, 1: error, 2: db not exist, 3: table not exist, 4: PK duplicated, 5: out limit
print('#' * 20 + ' Insert ' + '#' * 20)
print(j.insert('db1', 'T1DB1', ['A1']), end='-')
print(j.insert('db1', 'T1DB1', ['A11']), end='-')
print(j.insert('db1', 'T1DB1', ['A111']), end='-')
print(j.insert('db1', 'T1DB1', ['A1111']), end='-')
print(j.insert('db1', 'T2DB1', ['B1']), end='-')
print(j.insert('db1', 'T3DB1', ['C1']))
print(j.insert('db4', 'T1DB4', ['A1', 'A2']), end='-')
print(j.insert('db4', 'T1DB4', ['A11', 'A22']), end='-')
print(j.insert('db4', 'T1DB4', ['A111', 'A222']), end='-')
print(j.insert('db4', 'T1DB4', ['A1111', 'A2222']), end='-')
print(j.insert('db4', 'T2DB4', ['B1', 'B2']), end='-')
print(j.insert('db4', 'T3DB4', ['C1', 'C2']))
print(j.insert('db7', 'T1DB7', ['A1', 'A2', 'A3']), end='-')
print(j.insert('db7', 'T1DB7', ['A11', 'A22', 'A33']), end='-')
print(j.insert('db7', 'T1DB7', ['A111', 'A222', 'A333']), end='-')
print(j.insert('db7', 'T1DB7', ['A1111', 'A2222', 'A3333']), end='-')
print(j.insert('db7', 'T2DB7', ['B1', 'B2', 'B3']), end='-')
print(j.insert('db7', 'T3DB7', ['C1', 'C2', 'C3']))
print(j.insert('db10', 'T1DB10', ['A1', 'A2', 'A3']), end='-')
print(j.insert('db10', 'T1DB10', ['A11', 'A22', 'A33']), end='-')
print(j.insert('db10', 'T1DB10', ['A111', 'A222', 'A333']), end='-')
print(j.insert('db10', 'T1DB10', ['A1111', 'A2222', 'A3333']), end='-')
print(j.insert('db10', 'T2DB10', ['B1', 'B2', 'B3']), end='-')
print(j.insert('db10', 'T3DB10', ['C1', 'C2', 'C3']))
print(j.insert('db13', 'T1DB13', ['A1', 'A2', 'A3']), end='-')
print(j.insert('db13', 'T1DB13', ['A11', 'A22', 'A33']), end='-')
print(j.insert('db13', 'T1DB13', ['A111', 'A222', 'A333']), end='-')
print(j.insert('db13', 'T1DB13', ['A1111', 'A2222', 'A3333']), end='-')
print(j.insert('db13', 'T2DB13', ['B1', 'B2', 'B3']), end='-')
print(j.insert('db13', 'T3DB13', ['C1', 'C2', 'C3']))
print(j.insert('db16', 'T1DB16', ['A1', 'A2', 'A3']), end='-')
print(j.insert('db16', 'T1DB16', ['A11', 'A22', 'A33']), end='-')
print(j.insert('db16', 'T1DB16', ['A111', 'A222', 'A333']), end='-')
print(j.insert('db16', 'T1DB16', ['A1111', 'A2222', 'A3333']), end='-')
print(j.insert('db16', 'T2DB16', ['B1', 'B2', 'B3']), end='-')
print(j.insert('db16', 'T3DB16', ['C1', 'C2', 'C3']))
print(j.insert('db19', 'T1DB19', ['A1', 'A2', 'A3']), end='-')
print(j.insert('db19', 'T1DB19', ['A11', 'A22', 'A33']), end='-')
print(j.insert('db19', 'T1DB19', ['A111', 'A222', 'A333']), end='-')
print(j.insert('db19', 'T1DB19', ['A1111', 'A2222', 'A3333']), end='-')
print(j.insert('db19', 'T2DB19', ['B1', 'B2', 'B3']), end='-')
print(j.insert('db19', 'T3DB19', ['C1', 'C2', 'C3']))
# def showDatabases() -> list:
# {[]: error, not db }
# dictionary = load('metadata')
# showDict(dictionary)
print()
print('#' * 20 + ' showDatabases ' + '#' * 20)
print(j.showDatabases())
print()
print('#' * 20 + ' extract Table Prueba1: sin cambiar nombre ' + '#' * 20)
print(j.extractTable("db1", "T1DB1"))
print(j.extractTable("db4", "T1DB4"))
print(j.extractTable("db7", "T1DB7"))
print(j.extractTable("db10", "T1DB10"))
print(j.extractTable("db13", "T1DB13"))
print(j.extractTable("db16", "T1DB16"))
print(j.extractTable("db19", "T1DB19"))
print()
print('#' * 20 + ' Show Tables y exctract Table ISAM ' + '#' * 20)
print(j.showTables("db13"))
print(j.extractTable("db13", "T3DB13")) # isam mode
# def alterDatabase(databaseOld, databaseNew) -> int:
# {0: success, 1: error, 2: dbOld not exist, 3: dbNew exist}
print()
print('#' * 20 + ' alterdatabase ' + '#' * 20)
print(j.alterDatabase("db1", "db101"))
print(j.alterDatabase("db4", "db104"))
print(j.alterDatabase("db7", "db107"))
print(j.alterDatabase("db10", "db110"))
print(j.alterDatabase("db13", "db113"))
print(j.alterDatabase("db16", "db116"))
print(j.alterDatabase("db19", "db119"))
print()
print('#' * 20 + ' Show Databases ' + '#' * 20)
print(j.showDatabases())
print()
print('#' * 20 + ' drop database ' + '#' * 20)
# def dropDatabase(database: str) -> int:
# {0: success, 1: error, 2: db not exist}
print(j.dropDatabase("db2"))
print(j.dropDatabase("db3"))
print(j.dropDatabase("db5"))
print(j.dropDatabase("db6"))
print(j.dropDatabase("db8"))
print(j.dropDatabase("db9"))
print(j.dropDatabase("db11"))
print()
print('#' * 20 + ' Show Databases ' + '#' * 20)
print(j.showDatabases())
print()
# def showDatabases() -> list:
# {[]: error, not tables, None: not db }
print('#' * 20 + ' showTables ' + '#' * 20)
print(j.showTables("db101"))
print(j.showTables("db104"))
print(j.showTables("db107"))
print(j.showTables("db110"))
print(j.showTables("db113"))
print(j.showTables("db116"))
print(j.showTables("db119"))
print()
# def extractTable(database: str, table: str) -> list:
# {[]: , []: not registers, None: error}
print('#' * 20 + ' extract Table ' + '#' * 20)
print(j.extractTable("db101", "T1DB1"))
print(j.extractTable("db104", "T1DB4"))
print(j.extractTable("db107", "T1DB7"))
print(j.extractTable("db110", "T1DB10"))
print(j.extractTable("db113", "T3DB13"))
print(j.extractTable("db116", "T1DB16"))
print(j.extractTable("db119", "T1DB19"))
print()
print('#' * 20 + ' extract Range Table ' + '#' * 20)
# def extractRangeTable(database: str, table: str, columnNumber: int, lower: any, upper: any) -> list:
# {[]: not registers, None: not db, not table, error}
print(j.extractRangeTable("db1", "T1DB1", 1, "A", "A"))
print('#' * 20 + ' alter Drop PK ' + '#' * 20)
print(j.alterDropPK('db101', 'T1DB1'))
print(j.alterDropPK('db104', 'T1DB4'))
print(j.alterDropPK('db107', 'T1DB7'))
print(j.alterDropPK('db110', 'T1DB10'))
print(j.alterDropPK('db113', 'T1DB13'))
print(j.alterDropPK('db116', 'T1DB16'))
print(j.alterDropPK('db119', 'T1DB19'))
print()
# def alterAddPK(database: str, table: str, columns: list) -> int:
# {0: success, 1: error, 2: db not exist, 3: table not exist, 4: PK exist}
print('#' * 20 + ' alter Add PK ' + '#' * 20)
print(j.alterAddPK('db101', 'T1DB1', [0]))
print(j.alterAddPK('db104', 'T1DB4', [0]))
print(j.alterAddPK('db107', 'T1DB7', [0]))
print(j.alterAddPK('db110', 'T1DB10', [0]))
print(j.alterAddPK('db113', 'T1DB13', [0]))
print(j.alterAddPK('db116', 'T1DB16', [0]))
print(j.alterAddPK('db119', 'T1DB19', [0]))
print()
# def alterDropPK(database: str, table: str) -> int:
# {0: success, 1: error, 2: db not exist, 3: table not exist, 4: PK not exist}
print('#' * 20 + ' alter Drop PK ' + '#' * 20)
print(j.alterDropPK('db101', 'T1DB1'))
print(j.alterDropPK('db104', 'T1DB4'))
print(j.alterDropPK('db107', 'T1DB7'))
print(j.alterDropPK('db110', 'T1DB10'))
print(j.alterDropPK('db113', 'T1DB13'))
print(j.alterDropPK('db116', 'T1DB16'))
print(j.alterDropPK('db119', 'T1DB19'))
print()
# def alterAddPK(database: str, table: str, columns: list) -> int:
# {0: success, 1: error, 2: db not exist, 3: table not exist, 4: PK exist}
print('#' * 20 + ' alter Add PK ' + '#' * 20)
print(j.alterAddPK('db101', 'T1DB1', [0]))
print(j.alterAddPK('db104', 'T1DB4', [0]))
print(j.alterAddPK('db107', 'T1DB7', [0]))
print(j.alterAddPK('db110', 'T1DB10', [0]))
print(j.alterAddPK('db113', 'T1DB13', [0]))
print(j.alterAddPK('db116', 'T1DB16', [0]))
print(j.alterAddPK('db119', 'T1DB19', [0]))
print()
# def alterTable(database: str, tableOld: str, tableNew: str) -> int:
# {0: success, 1: error, 2: db not exist, 3:tableOld not exist, 4: tableNew exist}
print(j.alterTable('db101', 'T1DB1', 'T1DB101'))
print(j.alterTable('db104', 'T1DB4', 'T1DB104')) ####
print(j.alterTable('db107', 'T1DB7', 'T1DB107'))
print(j.alterTable('db110', 'T1DB10', 'T1DB110'))
print(j.alterTable('db113', 'T1DB13', 'T1DB113'))
print(j.alterTable('db116', 'T1DB16', 'T1DB116'))
print(j.alterTable('db119', 'T1DB19', 'T1DB119'))
print()
# def showDatabases() -> list:
# {[]: error, not tables, None: not db }
print('#' * 20 + ' showTables ' + '#' * 20)
print(j.showTables("db101"))
print(j.showTables("db104"))
print(j.showTables("db107"))
print(j.showTables("db110"))
print(j.showTables("db113"))
print(j.showTables("db116"))
print(j.showTables("db119"))
print()
print('#' * 20 + ' alter Add Column ' + '#' * 20)
# def alterAddColumn(database: str, table: str, default: any) -> int:
# {0: success, 1: error, 2: db not exist, 3: table not exist}
print(j.alterAddColumn('db101', 'T1DB101', 'NuevaColumna101'))
print(j.alterAddColumn('db104', 'T1DB104', 'NuevaColumna104')) ####
print(j.alterAddColumn('db107', 'T1DB107', 'NuevaColumna107'))
print(j.alterAddColumn('db110', 'T1DB110', 'NuevaColumna110'))
print(j.alterAddColumn('db113', 'T1DB113', 'NuevaColumna113'))
print(j.alterAddColumn('db116', 'T1DB116', 'NuevaColumna116'))
print(j.alterAddColumn('db119', 'T1DB119', 'NuevaColumna119'))
print()
# def extractTable(database: str, table: str) -> list:
# {[]: , []: not registers, None: error}
print('#' * 20 + ' extract Table ' + '#' * 20)
print(j.extractTable("db101", "T1DB101"))
print(j.extractTable("db104", "T1DB104"))
print(j.extractTable("db107", "T1DB107"))
print(j.extractTable("db110", "T1DB110"))
print(j.extractTable("db113", "T1DB113"))
print(j.extractTable("db116", "T1DB116"))
print(j.extractTable("db119", "T1DB119"))
print()
print('#' * 20 + ' alter Drop Column ' + '#' * 20)
# def alterDropColumn(database: str, table: str, columnNumber: int) -> int:
# {0: success, 1: error, 2: db not exist, 3: table not exist, 4: not eliminated or without columns, 5: out limit}
print(j.alterDropColumn('db101', 'T1DB101', 1))
print(j.alterDropColumn('db104', 'T1DB104', 2))
print(j.alterDropColumn('db107', 'T1DB107', 3))
print(j.alterDropColumn('db110', 'T1DB110', 3))
print(j.alterDropColumn('db113', 'T1DB113', 3))
print(j.alterDropColumn('db116', 'T1DB116', 3))
print(j.alterDropColumn('db119', 'T1DB119', 3))
print()
# def extractTable(database: str, table: str) -> list:
# {[]: , []: not registers, None: error}
print('#' * 20 + ' extract Table ' + '#' * 20)
print(j.extractTable("db101", "T1DB101"))
print(j.extractTable("db104", "T1DB104"))
print(j.extractTable("db107", "T1DB107"))
print(j.extractTable("db110", "T1DB110"))
print(j.extractTable("db113", "T1DB113"))
print(j.extractTable("db116", "T1DB116"))
print(j.extractTable("db119", "T1DB119"))
print()
print('#' * 20 + ' dropTable ' + '#' * 20)
# def dropTable(database: str, table: str) -> int:
# {0: success, 1: error, 2 db not exist, 3: table not exist}
print(j.dropTable('db101', 'T2DB1')) ####
print(j.dropTable('db104', 'T2DB4'))
print(j.dropTable('db107', 'T2DB7'))
print(j.dropTable('db110', 'T2DB10'))
print(j.dropTable('db113', 'T2DB13'))
print(j.dropTable('db116', 'T2DB16'))
print(j.dropTable('db119', 'T2DB19'))
print()
# def showDatabases() -> list:
# {[]: error, not tables, None: not db }
print('#' * 20 + ' showTables ' + '#' * 20)
print(j.showTables("db101"))
print(j.showTables("db104"))
print(j.showTables("db107"))
print(j.showTables("db110"))
print(j.showTables("db113"))
print(j.showTables("db116"))
print(j.showTables("db119"))
# Probando update con numeros
print(j.insert('db101', 'T3DB1', [110]))
print(j.insert('db101', 'T3DB1', [120]))
print(j.insert('db101', 'T3DB1', [130]))
print(j.insert('db101', 'T3DB1', [140]))
print(j.alterAddPK('db101', 'T3DB1', [0]))
print()
print('#' * 20 + ' update ' + '#' * 20)
# def update(database: str, table: str, register: dict, columns: list) -> int:
# {0: success, 1: error, 2 db not exist, 3: table not exist, 4:PK not exist}
print(j.update('db101', 'T1DB101', {0: 'Nuevo1'}, ['A1']))
print(j.update('db101', 'T3DB1', {0: 'Clear'}, [110]))
print(j.update('db101', 'T3DB1', {0: 'Clear1'}, ['C1']))
print(j.update('db104', 'T1DB104', {1: 'Nuevo1'}, ['A1']))
print(j.update('db107', 'T1DB107', {1: 'Nuevo1'}, ['A1']))
print(j.update('db110', 'T1DB110', {1: 'Nuevo1'}, ['A1']))
print(j.update('db113', 'T1DB113', {1: 'Nuevo1'}, ['A1']))
print(j.update('db116', 'T1DB116', {1: 'Nuevo1'}, ['A1']))
print(j.update('db119', 'T1DB119', {1: 'Nuevo1'}, ['A1']))
print()
# def extractTable(database: str, table: str) -> list:
# {[]: , []: not registers, None: error}
print('#' * 20 + ' extract Table ' + '#' * 20)
print(j.extractTable("db101", "T1DB101"))
print(j.extractTable("db101", "T3DB1"))
print(j.extractTable("db104", "T1DB104"))
print(j.extractTable("db107", "T1DB107"))
print(j.extractTable("db110", "T1DB110"))
print(j.extractTable("db113", "T1DB113"))
print(j.extractTable("db116", "T1DB116"))
print(j.extractTable("db119", "T1DB119"))
print()
# def extractRow(database: str, table: str, columns: list) -> list:
# {[tuple]: success , []: not registers, error}
print('#' * 20 + ' extract Row ' + '#' * 20)
print(j.extractRow("db101", "T1DB101", ['Nuevo1']))
print(j.extractRow("db101", "T3DB1", [110]))
print(j.extractRow("db104", "T1DB104", ['A1']))
print(j.extractRow("db107", "T1DB107", ['A1']))
print(j.extractRow("db110", "T1DB110", ['A1']))
print(j.extractRow("db113", "T1DB113", ['A1']))
print(j.extractRow("db116", "T1DB116", ['A1']))
print(j.extractRow("db119", "T1DB119", ['A1']))
print()
# def extractTable(database: str, table: str) -> list:
# {[]: , []: not registers, None: error}
print('#' * 20 + ' extract Table ' + '#' * 20)
print(j.extractTable("db101", "T1DB101"))
print(j.extractTable("db101", "T3DB1"))
print(j.extractTable("db104", "T1DB104"))
print(j.extractTable("db107", "T1DB107"))
print(j.extractTable("db110", "T1DB110"))
print(j.extractTable("db113", "T1DB113"))
print(j.extractTable("db116", "T1DB116"))
print(j.extractTable("db119", "T1DB119"))
print()
# def delete(database: str, table: str, columns: list) -> int:
# {0: success, 1: error, 2: db not exist, 3: table not exist, 4: PK not exist}
print('#' * 20 + ' delete ' + '#' * 20)
print(j.delete("db101", "T1DB101", ['Nuevo1']))
print(j.delete("db101", "T3DB1", ['Clear1']))
print(j.delete("db101", "T3DB1", [110]))
print(j.delete("db104", "T1DB104", ['A1']))
print(j.delete("db107", "T1DB107", ['A1']))
print(j.delete("db110", "T1DB110", ['A1']))
print(j.delete("db113", "T1DB113", ['A1']))
print(j.delete("db116", "T1DB116", ['A1']))
print(j.delete("db119", "T1DB119", ['A1']))
print()
# def extractTable(database: str, table: str) -> list:
# {[]: , []: not registers, None: error}
print('#' * 20 + ' extract Table ' + '#' * 20)
print(j.extractTable("db101", "T1DB101"))
print(j.extractTable("db101", "T3DB1"))
print(j.extractTable("db104", "T1DB104"))
print(j.extractTable("db107", "T1DB107"))
print(j.extractTable("db110", "T1DB110"))
print(j.extractTable("db113", "T1DB113"))
print(j.extractTable("db116", "T1DB116"))
print(j.extractTable("db119", "T1DB119"))
print()
# def truncate(database: str, table: str) -> int:
# {0: success, 1: error, 2: db not exist, 3: table not exist}
print('#' * 20 + ' extract Table ' + '#' * 20)
print(j.truncate("db101", "T1DB101"))
print(j.truncate("db101", "T3DB1"))
print(j.truncate("db104", "T1DB104"))
print(j.truncate("db107", "T1DB107"))
print(j.truncate("db110", "T1DB110"))
print(j.truncate("db113", "T1DB113"))
print(j.truncate("db116", "T1DB116"))
print(j.truncate("db119", "T1DB119"))
print()
# def extractTable(database: str, table: str) -> list:
# {[]: , []: not registers, None: error}
print('#' * 20 + ' extract Table ' + '#' * 20)
print(j.extractTable("db101", "T1DB101"))
print(j.extractTable("db101", "T3DB1"))
print(j.extractTable("db104", "T1DB104"))
print(j.extractTable("db107", "T1DB107"))
print(j.extractTable("db110", "T1DB110"))
print(j.extractTable("db113", "T1DB113"))
print(j.extractTable("db116", "T1DB116"))
print(j.extractTable("db119", "T1DB119"))
print()
# def loadCSV(file: str, database: str, table: str) -> list:
# {int insert: success, []: error}
# {0: success, 1: error, 2: db not exist, 3: table not exist, 4: PK duplicated, 5: out limit
print('#' * 20 + ' extract Table ' + '#' * 20)
print(j.loadCSV('ids.csv', "db101", "T1DB101"))
print(j.loadCSV('ids.csv', "db101", "T3DB1"))
print(j.loadCSV("idsnames.csv", "db104", "T1DB104"))
print(j.loadCSV("treeColumnsProfesors.csv", "db107", "T1DB107"))
print(j.loadCSV("treeColumnsProfesors.csv", "db110", "T1DB110"))
print(j.loadCSV("treeColumnsProfesors.csv", "db113", "T1DB113"))
print(j.loadCSV("treeColumnStudent.csv", "db116", "T1DB116"))
print(j.loadCSV("treeColumnStudent.csv", "db119", "T1DB119"))
print()
# def extractTable(database: str, table: str) -> list:
# {[]: , []: not registers, None: error}
print('#' * 20 + ' extract Table ' + '#' * 20)
print(j.extractTable("db101", "T1DB101"))
print(j.extractTable("db101", "T3DB1"))
print(j.extractTable("db104", "T1DB104"))
print(j.extractTable("db107", "T1DB107"))
print(j.extractTable("db110", "T1DB110"))
print(j.extractTable("db113", "T1DB113"))
print(j.extractTable("db116", "T1DB116"))
print(j.extractTable("db119", "T1DB119"))
# mode: 'avl', 'b', 'bplus', 'dict', 'isam', 'json', 'hash'
final_test()
# test()
# testCompress()
``` |
{
"source": "jorgejesus/pywps-flask",
"score": 2
} |
#### File: pywps-flask/processes/buffer.py
```python
from pywps import Process, LiteralInput, \
ComplexInput, ComplexOutput, Format, FORMATS
from pywps.validator.mode import MODE
__author__ = 'Brauni'
class Buffer(Process):
def __init__(self):
inputs = [ComplexInput('poly_in', 'Input vector file',
supported_formats=[Format('application/gml+xml')],
mode=MODE.STRICT),
LiteralInput('buffer', 'Buffer size', data_type='float',
allowed_values=(0, 1, 10, (10, 10, 100), (100, 100, 1000)))]
outputs = [ComplexOutput('buff_out', 'Buffered file',
supported_formats=[
Format('application/gml+xml')
]
)]
super(Buffer, self).__init__(
self._handler,
identifier='buffer',
version='0.1',
title="GDAL Buffer process",
abstract="""The process returns buffers around the input features,
using the GDAL library""",
profile='',
inputs=inputs,
outputs=outputs,
store_supported=True,
status_supported=True
)
def _handler(self, request, response):
from osgeo import ogr
inSource = ogr.Open(request.inputs['poly_in'][0].file)
inLayer = inSource.GetLayer()
out = inLayer.GetName() + '_buffer'
# create output file
driver = ogr.GetDriverByName('GML')
outSource = driver.CreateDataSource(
out,
["XSISCHEMAURI=\
http://schemas.opengis.net/gml/2.1.2/feature.xsd"])
outLayer = outSource.CreateLayer(out, None, ogr.wkbUnknown)
# for each feature
featureCount = inLayer.GetFeatureCount()
index = 0
while index < featureCount:
# get the geometry
inFeature = inLayer.GetNextFeature()
inGeometry = inFeature.GetGeometryRef()
# make the buffer
buff = inGeometry.Buffer(float(request.inputs['buffer'][0].data))
# create output feature to the file
outFeature = ogr.Feature(feature_def=outLayer.GetLayerDefn())
outFeature.SetGeometryDirectly(buff)
outLayer.CreateFeature(outFeature)
outFeature.Destroy() # makes it crash when using debug
index += 1
response.update_status('Buffering', 100*(index/featureCount))
outSource.Destroy()
response.outputs['buff_out'].output_format = FORMATS.GML
response.outputs['buff_out'].file = out
return response
``` |
{
"source": "jorgejesus/soils-revealed-lambda",
"score": 2
} |
#### File: soils-revealed-lambda/soils/utils.py
```python
def binds(dataset,variable):
binds_dic = {
"experimental": {'stocks': 80, 'concentration': 20},
"historic": {'stocks': 40},
"recent": {'stocks': 10},
"crop_I": {'stocks': 30},
"crop_MG": {'stocks': 30},
"crop_MGI": {'stocks': 30},
"grass_part": {'stocks': 30},
"grass_full": {'stocks': 30},
"rewilding": {'stocks': 60},
"degradation_ForestToGrass": {'stocks': 51},
"degradation_ForestToCrop": {'stocks': 51},
"degradation_NoDeforestation": {'stocks': 51}
}
return binds_dic[dataset][variable]
def ranges(dataset,variable):
ranges_dic = {
"experimental": {'stocks': [-50, 50], 'concentration': [-10, 10]},
"historic": {'stocks': [-40,40]},
"recent": {'stocks': [-50,50]},
"crop_I": {'stocks': [0,30]},
"crop_MG": {'stocks': [0,30]},
"crop_MGI": {'stocks': [0,30]},
"grass_part": {'stocks': [0,30]},
"grass_full": {'stocks': [0,30]},
"rewilding": {'stocks': [-30,30]},
"degradation_ForestToGrass": {'stocks': [-50,1]},
"degradation_ForestToCrop": {'stocks': [-50,1]},
"degradation_NoDeforestation": {'stocks': [-50,1]}
}
return ranges_dic[dataset][variable]
``` |
{
"source": "jorgejimenez98/auditorio-django-react",
"score": 2
} |
#### File: apps/core/serializers.py
```python
from django.contrib.auth import get_user_model
from rest_framework import serializers
from rest_framework_simplejwt.tokens import RefreshToken
from rest_framework import serializers
class UserMiniSerializer(serializers.ModelSerializer):
class Meta:
model = get_user_model()
fields = ['id', 'name']
class UserSerializer(serializers.ModelSerializer):
isAdmin = serializers.SerializerMethodField(read_only=True)
rol = serializers.SerializerMethodField(read_only=True)
bolRol = serializers.SerializerMethodField(read_only=True)
class Meta:
model = get_user_model()
fields = ['id', 'email', 'name', 'isAdmin', 'isBoosWorkOrder',
'isBoosPlan', 'isAuditor', 'rol', 'bolRol']
extra_kwargs = {'password': {'<PASSWORD>': <PASSWORD>, 'required': True}}
def get_isAdmin(self, obj):
return obj.is_staff
def get_rol(self, obj):
if obj.is_staff:
return 'Administrador'
elif obj.isBoosWorkOrder:
return 'Rector'
elif obj.isBoosPlan:
return 'Jefe de Plan'
elif obj.isAuditor:
return 'Auditor'
return 'Sin ROL'
def get_bolRol(self, obj):
if obj.is_staff:
return 'isAdmin'
elif obj.isBoosWorkOrder:
return 'isBoosWorkOrder'
elif obj.isBoosPlan:
return 'isBoosPlan'
elif obj.isAuditor:
return 'isAuditor'
return 'Sin ROL'
class UserSerializerWithToken(UserSerializer):
token = serializers.SerializerMethodField(read_only=True)
class Meta:
model = get_user_model()
fields = ['id', 'email', 'name', 'isAdmin',
'isBoosWorkOrder', 'isBoosPlan', 'isAuditor', 'token']
def get_token(self, obj):
token = RefreshToken.for_user(obj)
return str(token.access_token)
```
#### File: backend/apps/extraPermissions.py
```python
from rest_framework.permissions import BasePermission
class IsAuditor(BasePermission):
"""
Allows access only to Food And Drink Boss users.
"""
def has_permission(self, request, view):
return bool(request.user and request.user.isAuditor)
```
#### File: apps/inventory/serializers.py
```python
from rest_framework import serializers
from .models import Inventory, InventoryItem
class InventoryItemSerializer(serializers.ModelSerializer):
class Meta:
model = InventoryItem
fields = '__all__'
class InventorySerializer(serializers.ModelSerializer):
yearPlan = serializers.SerializerMethodField(read_only=True)
workOrder = serializers.SerializerMethodField(read_only=True)
inventoryItems = serializers.SerializerMethodField(read_only=True)
class Meta:
model = Inventory
fields = ["id", 'author', 'yearPlan', 'workOrder', 'dateTimeCreate', 'inventoryItems']
def get_yearPlan(self, obj):
return {'id': obj.yearPlan.pk, 'year': obj.yearPlan.year}
def get_workOrder(self, obj):
return {'id': obj.workOrder.pk, 'noWO': obj.workOrder.noWO}
def get_inventoryItems(self, obj):
serializer = InventoryItemSerializer(obj.inventoryItems.all(), many=True)
return serializer.data
```
#### File: apps/yearPlan/views.py
```python
from django.db import IntegrityError
from rest_framework.decorators import action
from .serializers import YearPlanSerializer, YearPlan, YearPlanMiniSerializer
from rest_framework import viewsets, status
from rest_framework.response import Response
from rest_framework.permissions import IsAuthenticated
from apps.extraPermissions import IsAuditor
from apps.errorMessages import *
class YearPlanViewSet(viewsets.ModelViewSet):
queryset = YearPlan.objects.all().order_by('-year')
serializer_class = YearPlanSerializer
permission_classes = [IsAuthenticated, IsAuditor]
@action(methods=['POST'], detail=False)
def createYearPlan(self, request):
data = request.data
try:
# Create Year Plan
yearPlan = YearPlan.objects.create(
year=int(data.get('year')),
author=data.get('author'),
cantidadAudit=int(data.get('cantidadAudit')),
diasAudit=int(data.get('diasAudit')),
diasFeriad=int(data.get('diasFeriad')),
diasVacaciones=int(data.get('diasVacaciones')),
diasCapacitacion=int(data.get('diasCapacitacion')),
diasReservas=int(data.get('diasReservas')),
controlInterno=int(data.get('controlInterno')),
)
# Return Response
return Response({"yearPlanId": yearPlan.pk}, status=status.HTTP_200_OK)
except Exception as e:
return Response({'detail': e.args[0]}, status=status.HTTP_400_BAD_REQUEST)
@action(methods=['POST'], detail=False)
def deleteYearPlan(self, request):
data = request.data
try:
# Delete All Years Plans
for yearPlan in data:
yearPlanToDelete = YearPlan.objects.get(id=int(yearPlan.get('id')))
yearPlanToDelete.delete()
# Return Response
return Response({"message": "Year Plan Deleted"}, status=status.HTTP_200_OK)
except IntegrityError:
message = getDeleteProtectedError("Ordenes de Trabajo")
return Response({'detail': message}, status=status.HTTP_400_BAD_REQUEST)
except Exception as e:
return Response({'detail': e.args[0]}, status=status.HTTP_400_BAD_REQUEST)
``` |
{
"source": "jorgejimenez98/backend-evaluacion-desempenno",
"score": 2
} |
#### File: apps/currency/models.py
```python
from django.db import models
class Currency(models.Model):
id = models.IntegerField(primary_key=True) # id_moneda
acronym = models.CharField(max_length=218) # cod_mone
description = models.CharField(max_length=218) # desc_mone
active = models.BooleanField() # activo
def __str__(self):
return f'Currency {self.description}'
```
#### File: evaluation/viewSets/monthlyGastronomyEvaluationViewSet.py
```python
from ..serializers.monthlyGastronomySerializer import MonthlyGastronomyEvaluationSerializer
from rest_framework import viewsets, status
from rest_framework.decorators import action
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from ..models import MonthlyGastronomyEvaluation, MonthlyMeliaEvaluation
from backend.extraPermissions import IsFoodAndDrinkBoss
from ...payTime.models import PayTime
from ...workers.models import Worker
def getMaxEval(indicators: []) -> int:
indicators.sort()
indicators.reverse()
count, bigger = 0, 0
for i in indicators:
if indicators.count(i) > count:
count = indicators.count(i)
bigger = i
return bigger
class MonthlyGastronomyEvaluationViewSet(viewsets.ModelViewSet):
queryset = MonthlyGastronomyEvaluation.objects.all()
serializer_class = MonthlyGastronomyEvaluationSerializer
permission_classes = [IsAuthenticated, IsFoodAndDrinkBoss]
@action(detail=False, methods=['POST'])
def createMonthlyGastronomyEvaluation(self, request):
data = request.data
try:
# Create Gastronomy Evaluation
payTime = PayTime.objects.get(id=int(data.get('payTimeId')))
evaluateWorker = Worker.objects.get(no_interno=data.get('evaluateWorkerId'))
evaluatorWorker = Worker.objects.get(no_interno=data.get('evaluatorWorkerId'))
evaluations = data.get('evaluations')
e = MonthlyGastronomyEvaluation.objects.create(
payTime=payTime,
evaluateWorker=evaluateWorker,
evaluateWorkerCharge=evaluateWorker.cargo,
evaluatorWorker=evaluatorWorker,
evaluatorWorkerCharge=evaluatorWorker.cargo,
ind1_CDRI=evaluations[0].get('points'),
ind2_AMD=evaluations[1].get('points'),
ind3_PAPPI=evaluations[2].get('points'),
ind4_CEDP=evaluations[3].get('points'),
ind5_ROCR=evaluations[4].get('points'),
ind6_PCRBBRC=evaluations[5].get('points'),
ind7_CNPE=evaluations[6].get('points'),
ind8_CCPC=evaluations[7].get('points'),
ind9_NSC=evaluations[8].get('points'),
ind10_CPI=evaluations[9].get('points'),
ind11_INI=evaluations[10].get('points'),
ind12_RAP=evaluations[11].get('points'),
ind13_GV=evaluations[12].get('points'),
ind14_DF=evaluations[13].get('points'),
ind15_CTP=evaluations[14].get('points'),
ind16_AC=evaluations[15].get('points'),
ind17_DIS=evaluations[16].get('points'),
ind18_CDPA=evaluations[17].get('points'),
ind19_CTA=evaluations[18].get('points'),
ind20_HOPT=evaluations[19].get('points'),
ind21_CNSS=evaluations[20].get('points'),
ind22_UIE=evaluations[21].get('points'),
ind23_LCH=evaluations[22].get('points'),
ind24_APAT=evaluations[23].get('points'),
ind25_UCU=evaluations[24].get('points'),
)
# Create Melia Evaluation from Gastronomy Evaluation
MonthlyMeliaEvaluation.objects.create(
payTime=payTime,
evaluateWorker=evaluateWorker,
evaluateWorkerCharge=evaluateWorker.cargo,
evaluatorWorker=evaluatorWorker,
evaluatorWorkerCharge=evaluatorWorker.cargo,
asist_punt=getMaxEval([e.ind1_CDRI, e.ind2_AMD, e.ind24_APAT]),
dom_cum_tars=getMaxEval([e.ind10_CPI, e.ind13_GV, e.ind14_DF, e.ind15_CTP, e.ind19_CTA]),
trab_equipo=getMaxEval([e.ind3_PAPPI, e.ind17_DIS, e.ind18_CDPA]),
cal_aten_cliente=getMaxEval(
[e.ind8_CCPC, e.ind9_NSC, e.ind16_AC, e.ind20_HOPT, e.ind21_CNSS, e.ind22_UIE, e.ind23_LCH]
),
cui_area_rec_medios=getMaxEval([e.ind5_ROCR, e.ind6_PCRBBRC]),
cump_normas=getMaxEval([e.ind4_CEDP, e.ind7_CNPE, e.ind25_UCU]),
cap_camb_ini_int=getMaxEval([e.ind11_INI, e.ind12_RAP]),
observations=''
)
return Response({'Monthly Gastronomy Evaluation Created'}, status=status.HTTP_200_OK)
except Exception as e:
return Response({'detail': e.args[0]}, status=status.HTTP_400_BAD_REQUEST)
@action(detail=False, methods=['PUT'])
def editMonthlyGastronomyEvaluation(self, request):
data = request.data
try:
monthlyEval = MonthlyGastronomyEvaluation.objects.get(pk=int(data.get('evalId')))
payTime = PayTime.objects.get(id=int(data.get('payTimeId')))
evaluateWorker = Worker.objects.get(no_interno=data.get('evaluateWorkerId'))
evaluatorWorker = Worker.objects.get(no_interno=data.get('evaluatorWorkerId'))
evaluations = data.get('evaluations')
# Update Gastronomy Evaluation
monthlyEval.payTime = payTime
monthlyEval.evaluateWorker = evaluateWorker
monthlyEval.evaluateWorkerCharge = evaluateWorker.cargo
monthlyEval.evaluatorWorker = evaluatorWorker
monthlyEval.evaluatorWorkerCharge = evaluatorWorker.cargo
monthlyEval.ind1_CDRI = evaluations[0].get('points')
monthlyEval.ind2_AMD = evaluations[1].get('points')
monthlyEval.ind3_PAPPI = evaluations[2].get('points')
monthlyEval.ind4_CEDP = evaluations[3].get('points')
monthlyEval.ind5_ROCR = evaluations[4].get('points')
monthlyEval.ind6_PCRBBRC = evaluations[5].get('points')
monthlyEval.ind7_CNPE = evaluations[6].get('points')
monthlyEval.ind8_CCPC = evaluations[7].get('points')
monthlyEval.ind9_NSC = evaluations[8].get('points')
monthlyEval.ind10_CPI = evaluations[9].get('points')
monthlyEval.ind11_INI = evaluations[10].get('points')
monthlyEval.ind12_RAP = evaluations[11].get('points')
monthlyEval.ind13_GV = evaluations[12].get('points')
monthlyEval.ind14_DF = evaluations[13].get('points')
monthlyEval.ind15_CTP = evaluations[14].get('points')
monthlyEval.ind16_AC = evaluations[15].get('points')
monthlyEval.ind17_DIS = evaluations[16].get('points')
monthlyEval.ind18_CDPA = evaluations[17].get('points')
monthlyEval.ind19_CTA = evaluations[18].get('points')
monthlyEval.ind20_HOPT = evaluations[19].get('points')
monthlyEval.ind21_CNSS = evaluations[20].get('points')
monthlyEval.ind22_UIE = evaluations[21].get('points')
monthlyEval.ind23_LCH = evaluations[22].get('points')
monthlyEval.ind24_APAT = evaluations[23].get('points')
monthlyEval.ind25_UCU = evaluations[24].get('points')
monthlyEval.save()
# Update Melia Evaluation
monthlyMeliaEval = MonthlyMeliaEvaluation.objects.get(evaluateWorker=evaluateWorker, payTime=payTime)
monthlyMeliaEval.payTime = payTime
monthlyMeliaEval.evaluateWorker = evaluateWorker
monthlyMeliaEval.evaluateWorkerCharge = evaluateWorker.cargo
monthlyMeliaEval.evaluatorWorker = evaluatorWorker
monthlyMeliaEval.evaluatorWorkerCharge = evaluatorWorker.cargo
monthlyMeliaEval.asist_punt = getMaxEval(
[monthlyEval.ind1_CDRI, monthlyEval.ind2_AMD, monthlyEval.ind24_APAT]
)
monthlyMeliaEval.dom_cum_tars = getMaxEval(
[monthlyEval.ind10_CPI, monthlyEval.ind13_GV, monthlyEval.ind14_DF, monthlyEval.ind15_CTP,
monthlyEval.ind19_CTA]
)
monthlyMeliaEval.trab_equipo = getMaxEval(
[monthlyEval.ind3_PAPPI, monthlyEval.ind17_DIS, monthlyEval.ind18_CDPA]
)
monthlyMeliaEval.cal_aten_cliente = getMaxEval(
[monthlyEval.ind8_CCPC, monthlyEval.ind9_NSC, monthlyEval.ind16_AC, monthlyEval.ind20_HOPT,
monthlyEval.ind21_CNSS, monthlyEval.ind22_UIE, monthlyEval.ind23_LCH]
)
monthlyMeliaEval.cui_area_rec_medios = getMaxEval([monthlyEval.ind5_ROCR, monthlyEval.ind6_PCRBBRC])
monthlyMeliaEval.cump_normas = getMaxEval(
[monthlyEval.ind4_CEDP, monthlyEval.ind7_CNPE, monthlyEval.ind25_UCU]
)
monthlyMeliaEval.cap_camb_ini_int = getMaxEval([monthlyEval.ind11_INI, monthlyEval.ind12_RAP])
monthlyMeliaEval.save()
return Response({'Monthly Gastronomy Evaluation EDITED'}, status=status.HTTP_200_OK)
except Exception as e:
return Response({'detail': e.args[0]}, status=status.HTTP_400_BAD_REQUEST)
```
#### File: apps/evaluation/views.py
```python
from rest_framework import status
from rest_framework.decorators import api_view, permission_classes
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from apps.evaluation.serializers.monthlyMeliaEvaluationSerliazer import MonthlyMeliaEvaluationSerliazer
from apps.hotel.models import Hotel
from backend.extraPermissions import IsFoodAndDrinkBoss
from apps.evaluation.models import MonthlyGastronomyEvaluation, MonthlyMeliaEvaluation
from apps.payTime.models import PayTime
from apps.workers.models import Worker
from backend.utils import insertion_sort
def getGastronomyEvaluationOnPayTime(pay_time: PayTime, worker: Worker):
if MonthlyGastronomyEvaluation.objects.filter(payTime__id=pay_time.id,
evaluateWorker__no_interno=worker.no_interno).exists():
model = MonthlyGastronomyEvaluation.objects.get(payTime__id=pay_time.id,
evaluateWorker__no_interno=worker.no_interno)
return model.id
return None
def getMeliaEvaluationOnPayTime(pay_time: PayTime, worker: Worker):
if MonthlyMeliaEvaluation.objects.filter(payTime__id=pay_time.id,
evaluateWorker__no_interno=worker.no_interno).exists():
model = MonthlyMeliaEvaluation.objects.get(payTime__id=pay_time.id,
evaluateWorker__no_interno=worker.no_interno)
return model.id
return None
@api_view(['POST'])
@permission_classes([IsAuthenticated, IsFoodAndDrinkBoss])
def getMonthlyPerformanceEvaluationReport(request):
data = request.data
try:
hotel = Hotel.objects.get(pk=int(data.get('hotelId')))
payTime = PayTime.objects.get(pk=int(data.get('payTimeId')))
listToOrder, listNone = [], []
for worker in hotel.workers.filter(activo=True):
evalId = getMeliaEvaluationOnPayTime(payTime, worker)
meliaEvaluation = None if evalId is None else MonthlyMeliaEvaluation.objects.get(pk=evalId)
serializer = None if evalId is None else MonthlyMeliaEvaluationSerliazer(meliaEvaluation, many=False).data
newItem = {
'worker': str(worker.nombreCompleto()).title(),
'meliaEvaluation': serializer,
'total': None if meliaEvaluation is None else meliaEvaluation.totalPoints(),
'discount': None if meliaEvaluation is None else meliaEvaluation.getDisscount(),
}
if newItem['meliaEvaluation'] is None:
listNone.append(newItem)
else:
listToOrder.append(newItem)
insertion_sort(listToOrder)
listToReturn = listToOrder + listNone
return Response(listToReturn, status=status.HTTP_200_OK)
except Exception as e:
return Response({"detail": e.args[0]}, status=status.HTTP_400_BAD_REQUEST)
```
#### File: apps/hotel/viewSet.py
```python
from rest_framework import viewsets
from rest_framework.decorators import action, api_view
from rest_framework.response import Response
from rest_framework import status
from django.db import IntegrityError
from rest_framework.permissions import IsAuthenticated, IsAdminUser
from .serializers import HotelSerializer, Hotel
from backend import utils
class HotelViewSet(viewsets.ModelViewSet):
queryset = Hotel.objects.all().order_by('-pk')
serializer_class = HotelSerializer
permission_classes = [IsAuthenticated, IsAdminUser]
def create(self, request, *args, **kwargs):
data = request.data
try:
Hotel.objects.create(
name=data.get('name'),
pos_db_name=data.get('pos_db_name'),
pms_db_name=data.get('pms_db_name'),
zunPrUnidadOrganizativaId=data.get('zunPrUnidadOrganizativaId'),
)
return Response({'Hotel Created Successfully'}, status=status.HTTP_200_OK)
except IntegrityError:
message = utils.getUniqueHotelErrorMessage(data['name'])
return Response({'detail': message}, status=status.HTTP_400_BAD_REQUEST)
except Exception as e:
return Response({'detail': e.args[0]}, status=status.HTTP_400_BAD_REQUEST)
@action(detail=False, methods=['POST'])
def deleteHotels(self, request):
try:
for hotel in request.data:
Hotel.objects.get(pk=hotel.get('id')).delete()
return Response({'Hotels Eliminated Successfully'}, status=status.HTTP_200_OK)
except IntegrityError:
return Response({'detail': utils.getDeleteErrorMessage('Hotel')}, status=status.HTTP_400_BAD_REQUEST)
except Exception as e:
return Response({'detail': e.args[0]}, status=status.HTTP_400_BAD_REQUEST)
@api_view(['GET'])
def getHotelsWithOutPermission(request):
try:
hotels = Hotel.objects.all()
return Response(HotelSerializer(hotels, many=True).data, status=status.HTTP_200_OK)
except Exception as e:
return Response({'detail': e.args[0]}, status=status.HTTP_400_BAD_REQUEST)
@api_view(['GET'])
def getHotelWithOutPermission(request, pk):
try:
hotel = Hotel.objects.get(pk=pk)
return Response(HotelSerializer(hotel, many=False).data, status=status.HTTP_200_OK)
except Exception as e:
return Response({'detail': e.args[0]}, status=status.HTTP_400_BAD_REQUEST)
```
#### File: apps/sellArea/models.py
```python
from django.db import models
from apps.hotel.models import Hotel
# Restpvta === ZunPosGH, ColonPos
class PuntoDeVenta(models.Model):
id_pvta = models.IntegerField(primary_key=True)
cod_pvta = models.CharField(max_length=5)
desc_pvta = models.CharField(max_length=25, blank=True, null=True)
activo = models.BooleanField()
hotel = models.ForeignKey(Hotel, on_delete=models.PROTECT, default=None, related_name='puntos_ventas')
def __str__(self):
return f'Punto De venta: {self.cod_pvta} - {self.desc_pvta} {self.activo}'
class Meta:
unique_together = (('cod_pvta', 'hotel'),)
index_together = (('cod_pvta', 'hotel'),)
```
#### File: users/serializers/userSerializer.py
```python
from django.contrib.auth import get_user_model
from rest_framework_simplejwt.tokens import RefreshToken
from rest_framework import serializers
class UserMiniSerializer(serializers.ModelSerializer):
isAdmin = serializers.SerializerMethodField(read_only=True)
name = serializers.SerializerMethodField(read_only=True)
email = serializers.SerializerMethodField(read_only=True)
isFoodAndDrinkBoss = serializers.SerializerMethodField(read_only=True)
rol = serializers.SerializerMethodField(read_only=True)
class Meta:
model = get_user_model()
fields = ['id', 'username', 'name', 'email', 'rol', 'isAdmin', 'isFoodAndDrinkBoss']
extra_kwargs = {'password': {'<PASSWORD>': <PASSWORD>, 'required': True}}
def get_isAdmin(self, obj):
return obj.is_staff
def get_name(self, obj):
return obj.get_full_name()
def get_email(self, obj):
return obj.email if obj.email != '' else 'No registrado'
def get_isFoodAndDrinkBoss(self, obj):
return obj.isFoodAndDrinkBoss
def get_rol(self, obj):
if self.get_isAdmin(obj):
return 'Administrador'
if self.get_isFoodAndDrinkBoss(obj):
return 'Jefe de Alimentos y Bebidas del complejo'
return 'Usuario normal'
class UserSerializer(UserMiniSerializer):
permissions = serializers.SerializerMethodField(read_only=True)
class Meta:
model = get_user_model()
fields = ['id', 'username', 'first_name', 'last_name', 'email', 'rol', 'isAdmin', 'isFoodAndDrinkBoss',
'date_joined', 'last_login', 'permissions']
def get_permissions(self, obj):
permissions = obj.get_user_permissions()
return permissions
class UserSerializerWithToken(UserSerializer):
token = serializers.SerializerMethodField(read_only=True)
class Meta:
model = get_user_model()
fields = ['id', 'username', 'email', 'name', 'isAdmin', 'isFoodAndDrinkBoss', 'rol', 'token']
def get_token(self, obj):
token = RefreshToken.for_user(obj)
return str(token.access_token)
```
#### File: backend/backend/extraPermissions.py
```python
from rest_framework.permissions import BasePermission
class IsFoodAndDrinkBoss(BasePermission):
"""
Allows access only to Food And Drink Boss users.
"""
def has_permission(self, request, view):
return bool(request.user and request.user.isFoodAndDrinkBoss)
``` |
{
"source": "jorgejimenez98/covid19-vaccine-system",
"score": 3
} |
#### File: apps/locality/formClass.py
```python
class ProvinceForm:
def __init__(self, id, name):
self.__id = id
self.__name = name
@property
def id(self):
return self.__id
@id.setter
def id(self, value):
self.__id = value
@property
def name(self):
return self.__name
@name.setter
def name(self, value):
self.__name = value
class MunicipalityForm:
def __init__(self, id, name, provinceId, provinceName):
self.__id = id
self.__name = name
self.__provinceId = provinceId
self.__provinceName = provinceName
@property
def id(self):
return self.__id
@id.setter
def id(self, value):
self.__id = value
@property
def name(self):
return self.__name
@name.setter
def name(self, value):
self.__name = value
@property
def provinceId(self):
return self.__provinceId
@provinceId.setter
def provinceId(self, value):
self.__provinceId = value
@property
def provinceName(self):
return self.__provinceName
@provinceName.setter
def provinceName(self, value):
self.__provinceName = value
class SchoolForm:
def __init__(self, id, name, munId, munName):
self.__id = id
self.__name = name
self.__munId = munId
self.__munName = munName
@property
def id(self):
return self.__id
@id.setter
def id(self, value):
self.__id = value
@property
def name(self):
return self.__name
@name.setter
def name(self, value):
self.__name = value
@property
def munId(self):
return self.__munId
@munId.setter
def munId(self, value):
self.__munId = value
@property
def munName(self):
return self.__munName
@munName.setter
def munName(self, value):
self.__munName = value
```
#### File: apps/main/decorators.py
```python
from django.contrib.auth.decorators import user_passes_test
def checkUserAccess(rol, error_url=None):
"""
Decorador para comprobar que un usuario tiene un rol dado, de no tenerlo lanza la vista ala url que se pasa por
parametro, se utizila la funcion user_passes_test de django.contrib.auth.decorators.
"""
def userHasRol(user):
if rol == 'SPECIALIST' and user.isSpecialist:
return True
elif rol == 'ADMIN' and user.is_staff:
return True
return False
return user_passes_test(userHasRol, login_url=error_url)
```
#### File: apps/people/views.py
```python
from django.shortcuts import render, redirect
from django.core.exceptions import ValidationError
from django.db.models import ProtectedError
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from ..main.decorators import checkUserAccess
from ..main.errorFunctions import *
from ..locality.models import ConsultingRoom
from .models import People
from .formClass import PersonForm
import django_excel as excel
""" PERSONS LIST """
@login_required(login_url='/login')
@checkUserAccess(rol='SPECIALIST', error_url='/403')
def peopleListView(request):
# Init Context
context = {"persons": People.objects.all()}
if request.method == 'POST':
val_edad = request.POST.get('val_edad')
if val_edad != '':
maximaEdad, minimaEdad = 0, 0
if '+' in val_edad:
context['persons'] = People.objects.filter(age__gte=60)
else:
aux = val_edad.split('-')
minimaEdad, maximaEdad = int(aux[0]), int(aux[1])
context['persons'] = People.objects.filter(age__gte=minimaEdad, age__lte=maximaEdad)
# Render List
return render(request, 'people/list.html', context)
""" ADD PERSON """
@login_required(login_url='/login')
@checkUserAccess(rol='SPECIALIST', error_url='/403')
def peopleAddView(request):
# Init Context
context = {
"person": PersonForm(),
"consults": ConsultingRoom.objects.all()
}
if request.method == 'POST':
# Get data from template
val_ci = request.POST.get('val_ci')
val_name = request.POST.get('val_name')
val_last_names = request.POST.get('val_last_names')
val_sex = request.POST.get('val_sex')
val_age = request.POST.get('val_age')
val_adress = request.POST.get('val_adress')
val_consultId = request.POST.get('val_consult')
val_prc_possitive = request.POST.get('val_prc_possitive') == 'on'
val_date_prc = request.POST.get('val_date_prc')
# Update context
context['person'].ci = val_ci.strip()
context['person'].name = val_name
context['person'].last_names = val_last_names
context['person'].sex = val_sex
context['person'].age = val_age
context['person'].address = val_adress
context['person'].consulting_room_id = val_consultId
context['person'].positive_pcr = val_prc_possitive
context['person'].date_pcr = val_date_prc
# Try execpt
try:
""" Validate CI """
validateCI(str(val_ci).strip())
# Validate pcr and date
if val_prc_possitive and val_date_prc == '':
raise Exception(getNoDatePcr())
elif not val_prc_possitive and val_date_prc != '':
raise Exception(getNoPcrSelect())
# Create Person
People.objects.create(
ci=val_ci.strip(),
name=val_name,
last_names=val_last_names,
sex=val_sex,
age=int(val_age),
address=val_adress,
consulting_room=ConsultingRoom.objects.get(
pk=int(val_consultId)),
positive_pcr=val_prc_possitive,
date_pcr=None if val_date_prc == '' else getDateFromString(
val_date_prc)
)
# Render to list after create
messages.success(request, getSuccessCreatedMessage("Persona"))
return redirect('peopleListView')
except ValidationError:
# Validate Unique CI
messages.error(request, getUniqueCIError("Persona", val_ci))
except Exception as e:
# Manage All posible Errors
messages.error(request, e.args[0])
# Render Form
return render(request, 'people/addOrEdit.html', context)
""" EDIT PERSON """
@login_required(login_url='/login')
@checkUserAccess(rol='SPECIALIST', error_url='/403')
def peopleEditView(request, pk):
# Init Context
person = People.objects.get(id=pk)
personForm = PersonForm()
personForm.updateValues(person)
context = {
"person": personForm,
"consults": ConsultingRoom.objects.all()
}
if request.method == 'POST':
# Get data from template
val_ci = request.POST.get('val_ci')
val_name = request.POST.get('val_name')
val_last_names = request.POST.get('val_last_names')
val_sex = request.POST.get('val_sex')
val_age = request.POST.get('val_age')
val_adress = request.POST.get('val_adress')
val_consultId = request.POST.get('val_consult')
val_prc_possitive = request.POST.get('val_prc_possitive') == 'on'
val_date_prc = request.POST.get('val_date_prc')
# Update context
context['person'].id = pk
context['person'].ci = val_ci.strip()
context['person'].name = val_name
context['person'].last_names = val_last_names
context['person'].sex = val_sex
context['person'].age = val_age
context['person'].address = val_adress
context['person'].consulting_room_id = val_consultId
context['person'].positive_pcr = val_prc_possitive
context['person'].date_pcr = val_date_prc
# Try execpt
try:
""" Validate CI """
validateCI(str(val_ci).strip())
# Validate pcr and date
if val_prc_possitive and val_date_prc == '':
raise Exception(getNoDatePcr())
elif not val_prc_possitive and val_date_prc != '':
raise Exception(getNoPcrSelect())
# Update Person Values
person.ci = val_ci.strip()
person.name = val_name
person.last_names = val_last_names
person.sex = val_sex
person.age = int(val_age)
person.address = val_adress
person.consulting_room = ConsultingRoom.objects.get(pk=int(val_consultId))
person.positive_pcr = val_prc_possitive
person.date_pcr = None if val_date_prc == '' else getDateFromString(val_date_prc)
person.save()
# Render to list after create
messages.success(request, getSuccessEditMessage("Persona"))
return redirect('peopleListView')
except ValidationError:
# Validate Unique CI
messages.error(request, getUniqueCIError("Persona", val_ci))
except Exception as e:
# Manage All posible Errors
messages.error(request, e.args[0])
# Render Form
return render(request, 'people/addOrEdit.html', context)
""" DELETE PERSON """
@login_required(login_url='/login')
@checkUserAccess(rol='SPECIALIST', error_url='/403')
def peopleDeleteView(request, pk):
people = People.objects.get(id=pk)
try:
people.delete()
messages.success(request, getDelSuccessText("Persona", people.name))
except ProtectedError:
messages.error(request, getDelProtectText("Persona", people.name))
return redirect('peopleListView')
""" Export to Excel """
def peopleExportView(request):
rows = [['CI', 'Nombre', 'Apellidos', "Sexo", "Edad", "Direccion", "Consultorio", "PCR Positivo"]]
for person in People.objects.all():
rows.append([
person.ci,
person.name,
person.last_names,
person.sex,
person.age,
person.address,
person.consulting_room.name + " " + person.consulting_room.polyclinic.name,
"SI" if person.positive_pcr is not None else "NO"
])
sheet = excel.pe.Sheet(rows)
return excel.make_response(sheet, "csv", file_name='listado de personas')
``` |
{
"source": "jorgejimenez98/pyqt5-ejemplo-taxis",
"score": 3
} |
#### File: pyqt5-ejemplo-taxis/modelo/servicio_repositorio.py
```python
from modelo.funciones_de_ayuda import *
class Servicio_Repositorio:
def __init__(self, rep):
self.__repositorio = rep
def importe_x_codigo_viaje(self, cod):
if len(self.__repositorio.lista_viajes) == 0:
raise Exception("No hay viajes registrados")
for i in self.__repositorio.lista_viajes:
if i.es_cod_viaje(cod):
return i.importe()
raise Exception("No hay viajes registrados con el codigo ({})".format(cod))
def prom_edad_chferes__viajes_locales_pas_embote_x_fecha(self, fecha):
if len(self.__repositorio.lista_viajes_locales()) == 0:
raise Exception("No hay viajes locales registrados")
cont, suma, hay_x_fecha, hay_embot = 0, 0, False, False
for i in self.__repositorio.lista_viajes_locales():
bol = valor_booleano(solo_fecha(i.fecha_inicio), solo_fecha(i.fecha_termina), fecha)
if bol:
hay_x_fecha = True
if i.paso_embotellamineto:
hay_embot = True
cont += 1
suma += i.chofer.edad
if not hay_x_fecha:
raise Exception("No hay viajes locales registrados en la fecha ({})".format(str(fecha)))
elif not hay_embot:
raise Exception("No hay viajes locales registrados que hayan pasado por enbotellamiento")
return round(suma / cont, 2)
def porciento_viajes_locales_q_hicieron_por_servicio_taxi(self):
if len(self.__repositorio.lista_viajes_locales()) == 0:
raise Exception("No hay viajes locales registrados")
total, parte, existe = 0, 0, False
for i in self.__repositorio.lista_viajes_locales():
total += 1
if i.fue_servicio_recorrida:
existe = True
parte += 1
if not existe:
raise Exception("No hay viajes locales registrados que hicieron el servicio se recorrida")
return round((100 * parte) / total, 2)
def datos_viaje_local_mayor_duracion_x_fecha(self, fecha):
if len(self.__repositorio.lista_viajes_locales()) == 0:
raise Exception("No hay viajes locales registrados")
existe = False
lista = []
for i in range(len(self.__repositorio.lista_viajes_locales())):
e = self.__repositorio.lista_viajes_locales()[i]
if solo_fecha(e.fecha_inicio) <= fecha <= solo_fecha(e.fecha_termina):
existe = True
dur = duracion(e.fecha_inicio, e.fecha_termina)
tupla = (dur, i, e)
lista.append(tupla)
if not existe:
raise Exception("No hay viajes locales registrados en la fecha ({})".format(str(fecha)))
lista.sort(), lista.reverse()
i = lista[0][2]
res = [i.cod_viaje, i.fecha_inicio, i.fecha_termina, i.num_taxi, i.chofer.nombre, i.chofer.sexo,
i.chofer.edad, i.chofer.experiencia, i.cant_pasajaros, i.fue_servicio_recorrida,
i.paso_embotellamineto]
return res
def listado_viajes_fuera_ciudad_ordenado_x_distancia(self):
if len(self.__repositorio.lista_viajes_fuera_ciudad()) == 0:
raise Exception("No hay viajes fuera de la ciudad registrados")
l = [i for i in self.__repositorio.lista_viajes_fuera_ciudad() if i.es_si_chofer_habilito(False)]
if len(l) == 0:
raise Exception("No hay viajes fuera de la ciudad registrados que no tengan el coche habilitado")
l.sort()
res = [[i.cod_viaje, i.fecha_inicio, i.fecha_termina, i.num_taxi, i.chofer.nombre, i.chofer.sexo,
i.chofer.edad, i.chofer.experiencia, i.distancia_klm, i.si_chofer_habilito] for i in l]
return res
```
#### File: pyqt5-ejemplo-taxis/presentador/presentador_operacion_3.py
```python
from modelo.servicio_repositorio import Servicio_Repositorio
from vista.dialogo_operacion_3 import Dialogo_3
class Presentador_Operacion_3:
def __init__(self, rep):
self.__repositorio = rep
self.__servicio_repositorio = Servicio_Repositorio(self.__repositorio)
def iniciar(self):
self.__vista = Dialogo_3(self)
self.__vista.calculo()
def fun_mostrar_informacion(self):
try:
res = self.__servicio_repositorio.porciento_viajes_locales_q_hicieron_por_servicio_taxi()
self.__vista.mostrar_informacoion(res)
except Exception as e:
self.__vista.mostrar_error(e.args[0])
```
#### File: pyqt5-ejemplo-taxis/presentador/presentador_operacion_4.py
```python
from modelo.servicio_repositorio import Servicio_Repositorio
from vista.dialogo_operacion_4 import Dialogo_4
from datetime import date
class Presentador_Operacion_4:
def __init__(self, rep):
self.__repositorio = rep
self.__servicio_repositorio = Servicio_Repositorio(self.__repositorio)
def iniciar(self):
self.__vista = Dialogo_4(self)
self.__vista.show()
def fun_mostrar_informacion(self):
try:
fecha = self.__vista.valor_fecha
i = self.__servicio_repositorio.datos_viaje_local_mayor_duracion_x_fecha(fecha)
cod, fecha_inicio, fecha_termina, num_taxi, nombre, llamada, emb = i[0], i[1], i[2], i[3], i[4], i[9], i[10]
llam = 'No'
if llamada:
llam = 'Si'
em = 'No'
if emb:
em = 'Si'
self.__vista.mostrar_informacion(fecha, cod, fecha_inicio, fecha_termina, num_taxi, nombre, llam, em)
self.__vista.valor_fecha = date(2018, 1, 1)
except Exception as e:
self.__vista.mostrar_error(e.args[0])
``` |
{
"source": "jorgejimenez98/recipe-app-api",
"score": 3
} |
#### File: user/tests/test_user_api.py
```python
from django.test import TestCase
from django.contrib.auth import get_user_model
from django.urls import reverse
# Rest-Framework Imports
from rest_framework.test import APIClient
from rest_framework import status
CREATE_USER_URL = reverse('user:create')
TOKEN_URL = reverse('user:token')
ME_URL = reverse('user:me')
def create_user(**params):
return get_user_model().objects.create_user(**params)
class PublicUserApiTest(TestCase):
""" Test the users api public """
def setUp(self):
self.client = APIClient()
def test_create_valid_user_success(self):
""" Test creating with valid payload is successful """
payload = {
'email': '<EMAIL>',
'password': '<PASSWORD>',
'name': 'Test Name'
}
res = self.client.post(CREATE_USER_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
user = get_user_model().objects.get(**res.data)
self.assertTrue(user.check_password(payload['password']))
self.assertNotIn('password', res.data)
def test_user_exists(self):
""" Test checking if a user exist """
payload = {'email': '<EMAIL>', 'password': '<PASSWORD>'}
create_user(**payload)
res = self.client.post(CREATE_USER_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_password_too_short(self):
""" Test that user password is less than 5 characters """
payload = {'email': '<EMAIL>', 'password': 'te'}
res = self.client.post(CREATE_USER_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
user_exist = get_user_model().objects.filter(
email=payload['email']
).exists()
self.assertFalse(user_exist)
def test_create_token_for_user(self):
""" Test that a token is created for the user """
payload = {'email': '<EMAIL>', 'password': '<PASSWORD>'}
create_user(**payload)
res = self.client.post(TOKEN_URL, payload)
self.assertIn('token', res.data)
self.assertEqual(res.status_code, status.HTTP_200_OK)
def test_create_token_invalid_credentials(self):
""" Test that token is not created in invalid credentials are given """
create_user(email='<EMAIL>', password='<PASSWORD>')
payload = {'email': '<EMAIL>', 'password': '<PASSWORD>'}
res = self.client.post(TOKEN_URL, payload)
self.assertNotIn('token', res.data)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_token_not_user(self):
""" Test tha token is not created without a user """
payload = {'email': '<EMAIL>', 'password': '<PASSWORD>'}
res = self.client.post(TOKEN_URL, payload)
self.assertNotIn('token', res.data)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_token_missing_field(self):
""" Test that email and password are required """
res = self.client.post(TOKEN_URL, {
'email': 'one',
'password': ''
})
self.assertNotIn('token', res.data)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_retrieve_user_unauthorize(self):
""" Test that authentication is required for users """
res = self.client.get(ME_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateUserApiTests(TestCase):
""" Tesit Api that require authentication """
def setUp(self):
self.user = create_user(
email='<EMAIL>',
password='<PASSWORD>',
name='Test Name'
)
self.client = APIClient()
self.client.force_authenticate(user=self.user)
def test_retrieve_profile_success(self):
""" Test retrieving profile for logged in user """
res = self.client.get(ME_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, {
'name': self.user.name,
'email': self.user.email
})
def test_post_not_allowed(self):
""" Test that post is not allowed no the me url """
res = self.client.post(ME_URL, {})
self.assertEqual(res.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_update_user_profile(self):
""" Test updatiing user profile for authenticated user """
payload = {'name': '<NAME>', 'password': '<PASSWORD>'}
res = self.client.patch(ME_URL, payload)
self.user.refresh_from_db()
self.assertEqual(self.user.name, payload['name'])
self.assertTrue(self.user.check_password(payload['password']))
self.assertEqual(res.status_code, status.HTTP_200_OK)
``` |
{
"source": "jorgelaranjo/generator-flask-api",
"score": 2
} |
#### File: model/templates/object_model.py
```python
<% if (database === 'postgresql') { -%>
import psycopg2
<% } else if (database === 'mysql') { -%>
import mysql.connector
<% } else if (database === 'sqlite') { -%>
import sqlite3
<% } -%>
<% if (database !== 'none') { -%>
<% } -%>
class <%= modelClass %>(object):
def __init__(self, id):
self.id = id
# Additional fields
def __repr__(self):
return '<%= modelClass %> {}>'.format(self.id)
``` |
{
"source": "jorgellop/orbitize",
"score": 2
} |
#### File: orbitize/tests/conftest.py
```python
import pytest
codeastro_mode = False
def pytest_addoption(parser):
parser.addoption(
"--mode",
action="store",
metavar="NAME",
help="different test modes NAME.",
)
def pytest_runtest_setup(item):
global codeastro_mode
envnames = [mark.args[0] for mark in item.iter_markers(name="mode")]
if envnames:
if item.config.getoption("--mode") == 'codeastro':
codeastro_mode = True
a1 = [83, 101, 99, 114, 101, 116, 32, 67, 111, 100, 101, 58, 32, 119, 101, 100, 105, 100, 105, 116, 33]
a2 = [78, 111, 32, 115, 101, 99, 114, 101, 116, 32, 99, 111, 100, 101, 32, 121, 101, 116, 46]
@pytest.hookimpl()
def pytest_terminal_summary(terminalreporter, exitstatus, config):
if config.getoption("--mode") == 'codeastro':
if terminalreporter._session.testsfailed == 0:
vals = a1
else:
vals = a2
output_str = "".join([chr(x) for x in vals])
print(output_str)
```
#### File: orbitize/tests/test_priors.py
```python
import numpy as np
import pytest
from scipy.stats import norm as nm
import orbitize.priors as priors
threshold = 1e-1
initialization_inputs = {
priors.GaussianPrior : [1000., 1.],
priors.LogUniformPrior : [1., 2.],
priors.UniformPrior : [0., 1.],
priors.SinPrior : [],
priors.LinearPrior : [-2., 2.]
}
expected_means_mins_maxes = {
priors.GaussianPrior : (1000.,0.,np.inf),
priors.LogUniformPrior : (1/np.log(2),1., 2.),
priors.UniformPrior : (0.5, 0., 1.),
priors.SinPrior : (np.pi/2., 0., np.pi),
priors.LinearPrior : (1./3.,0.,1.0)
}
lnprob_inputs = {
priors.GaussianPrior : np.array([-3.0, np.inf, 1000., 999.]),
priors.LogUniformPrior : np.array([-1., 0., 1., 1.5, 2., 2.5]),
priors.UniformPrior : np.array([0., 0.5, 1., -1., 2.]),
priors.SinPrior : np.array([0., np.pi/2., np.pi, 10., -1.]),
priors.LinearPrior : np.array([0., 0.5, 1., 2., -1.])
}
expected_probs = {
priors.GaussianPrior : np.array([0., 0., nm(1000.,1.).pdf(1000.), nm(1000.,1.).pdf(999.)]),
priors.LogUniformPrior : np.array([0., 0., 1., 2./3., 0.5, 0.])/np.log(2),
priors.UniformPrior : np.array([1., 1., 1., 0., 0.]),
priors.SinPrior : np.array([0., 0.5, 0., 0., 0.]),
priors.LinearPrior : np.array([2., 1., 0., 0., 0.])
}
def test_draw_samples():
"""
Test basic functionality of `draw_samples()` method of each `Prior` class.
"""
for Prior in initialization_inputs.keys():
inputs = initialization_inputs[Prior]
TestPrior = Prior(*inputs)
samples = TestPrior.draw_samples(10000)
exp_mean, exp_min, exp_max = expected_means_mins_maxes[Prior]
assert np.mean(samples) == pytest.approx(exp_mean, abs=threshold)
assert np.min(samples) > exp_min
assert np.max(samples) < exp_max
def test_compute_lnprob():
"""
Test basic functionality of `compute_lnprob()` method of each `Prior` class.
"""
for Prior in initialization_inputs.keys():
inputs = initialization_inputs[Prior]
TestPrior = Prior(*inputs)
values2test = lnprob_inputs[Prior]
lnprobs = TestPrior.compute_lnprob(values2test)
assert np.log(expected_probs[Prior]) == pytest.approx(lnprobs, abs=threshold)
if __name__=='__main__':
test_compute_lnprob()
test_draw_samples()
``` |
{
"source": "jorgelmp/sistop-2022-1",
"score": 4
} |
#### File: GarciaFigueroaAlberto-GarciaEdgar/DEAD_CODE/Cliente.py
```python
import threading
class Cliente(object):
def __init__(self,id,tienda):
self.numero = id
self.tienda = tienda
global CajaCliente
CajaCliente=threading.Semaphore(0)
def entrar(self):
print("Cliente "+str(self.numero)+" quiere entrar a la tienda")
self.tienda.pass_client(self)
def agarra(self):
if self.tienda.estante <15:
print("El cliente"+str(self.numero)+"notifica que se acaban los productos...")
self.tienda.checarEstante()
print("El cliente"+str(self.numero)+" agarra los productos")
self.pagar()
def pagar(self):
self.tienda.CajaCliente.release()
self.tienda.CajaEmpleado.acquire()
print("El cliente "+str(self.numero)+" ha pagado sus productos")
self.salir()
def salir(self):
print("El cliente"+str(self.numero)+" abandona la tienda")
self.tienda.leave_client(self)
```
#### File: 1/GarciaFigueroaAlberto-GarciaEdgar/main.py
```python
import threading
import random
import time
#Recursos compartidos
contador_personas=0
mercancia=100
#Administración de procesos
estante_mutex= threading.Semaphore(1)
entrada_mutex= threading.Semaphore(1)
salida_mutex= threading.Semaphore(1)
caja_mutex= threading.Semaphore(1)
suministro=threading.Event()
ticket=threading.Event()
caja=threading.Event()
## La función empleado define el proceso que debe seguir para administrar los pagos
def Empleado(num):
global id, caja
runEmpleado(num)
while(True):
flag=caja.wait() #Esta bandera nos indica si un cliente a solicitado que se le cobren sus productos
if flag:
print(" ╬ El empleado despacha el pedido del cliente")
ticket.set() #Una vez recibido el pago, se libera el ticket
break
#Iniciamos hilo Empleado
def runEmpleado(numC):
print(" ╬ Empleado "+str(numC)+" se prepara para trabajar")
##CLIENTE, define las acciones que debe realizar el cliente a lo largo del tiempo
def Cliente(num):
global contador_personas
runCliente(num)
entrada_mutex.acquire() #Implementamos torniquete para evitar que entren más personas
contador_personas+=1
if contador_personas < 4:
entrar(num)
entrada_mutex.release()
#Inicia hilo cliente
def runCliente(numC):
print(" ┼ Cliente "+str(numC)+" solicitando entrar")
#Con el método agarrar, se define como el cliente adquiere sus productos, utilizando un mutex para evitar posibles problemas
def agarrar(num):
global mercancia
viendo=random.randint(1,6)
time.sleep(viendo)
print(" ┼ El cliente "+str(num)+" decide realizar una compra en la tienda")
despensa=random.randint(5,100)
estante_mutex.acquire()
mercancia=mercancia-despensa
if mercancia<20: #Cuando se detecte que el inventario se está acabando, avisamos al proveedor a traves de una bandera.
print(" *** El inventario esta acabandose, el empleado debe llamar al provedor *** ")
suministro.set()
estante_mutex.release()
pagar(num)
#Método que inidica que un cliente ha solicitado el cobro de sus productos
def pagar(numC):
global ticket
caja_mutex.acquire()
print(" ┼ El cliente "+str(numC)+" está pagando por sus articulos")
caja.set()
while True:
flag=ticket.wait() #Avisamos al empleado que se ha iniciado el pago para que se generé su ticket
if flag:
caja_mutex.release()
salir(numC)
break
#Método que indica cuando un cliente está adentro de la tienda
def entrar(num):
print(" ┼ El cliente "+str(num)+" entró a la tienda")
agarrar(num)
#Método que indica que un cliente a salido de la tienda
def salir(num):
global contador_personas
print(" ┼ El cliente "+str(num)+ " ya compró y abandona la tienda")
salida_mutex.acquire()
contador_personas-=1
salida_mutex.release()
##PROVEDOR,
def Proveedor(num):
runProveedor(num)
#Una vez que se detecta que se está acabando la mercancia, proveedor suministra más mercancia
def runProveedor(numC):
global mercancia, suministro
print(" = El proveedor "+str(numC)+" está listo para despachar, esperando la llamada de empleado")
suministro.wait(timeout=None)
while True:
if suministro.wait(timeout=None):
estante_mutex.acquire()
print(" = Estoy surtiendo los estantes con de la tienda")
time.sleep(5)
mercancia=100
flag=suministro.clear()
estante_mutex.release()
# Almacenamos los hilos de actores
hilos_clientes = []
hilos_empleados = []
hilos_proveedores= []
clientes_atendidos=0
def main():
print(" ╔"+"═"*21+"╗")
print(" ║ TIENDA DE DISFRACES ║")
print(" ╚"+"═"*21+"╝")
numClientes = int(input(" - ¿Cuántos clientes llegarán a la tienda?"))
for k in range (1):
hilos_proveedores.append(threading.Thread(target=Proveedor, args=[k])) #Creamos los hilos de los proovedores
hilos_proveedores[k].start()
time.sleep(1.5)
for j in range (1):
hilos_empleados.append(threading.Thread(target=Empleado, args=[j]))#Creamos los hilos de los empleados
hilos_empleados[j].start()
time.sleep(1.5)
for i in range(numClientes):
hilos_clientes.append(threading.Thread(target=Cliente, args=[i])) #Creamos los hilos de los clientes
hilos_clientes[i].start()
time.sleep(0.5)
for k in range (1):
hilos_proveedores[k].join()
for j in range (1):
hilos_empleados[j].join()
for i in range(numClientes):
hilos_clientes[i].join()
main()
```
#### File: JimenezRodrigo/codigo/interfaz.py
```python
import curses
from time import sleep
import proyecto as pyt
import config as c
from threading import Semaphore, Thread
def menu():
#Se inicia pantalla, se obtienen dimensiones de la consola
scr = curses.initscr()
curses.noecho()
dims = scr.getmaxyx()
hilosCorriendo = False
q = -1
while q != 113 and q != 81:
scr.nodelay(1)
q = scr.getch()
scr.clear()
#Pantalla de titulo
scr.addstr(1,dims[1]-24, 'Presione \'q\' para salir')
scr.addstr(2,(dims[1]-39)//2,' _____ _ _ __ ')
scr.addstr(3,(dims[1]-39)//2,'| ___| | | | / _| ')
scr.addstr(4,(dims[1]-39)//2,'| |__ | | | |__ _ _| |_ ___ _ __ ')
scr.addstr(5,(dims[1]-39)//2,'| __|| | | \'_ \\| | | | _/ _ \\| \'_ \\ ')
scr.addstr(6,(dims[1]-39)//2,'| |___| | | |_) | |_| | || (_) | | | |')
scr.addstr(7,(dims[1]-39)//2,'\\____/|_| |_.__/ \\__,_|_| \\___/|_| |_|')
scr.addstr(8,(dims[1]-50)//2,' _ _ \n')
scr.addstr(9,(dims[1]-50)//2,' | | | | \n')
scr.addstr(10,(dims[1]-50)//2,' ___ _ __ ___| | | |_ _ __ ___ _ __ ___ \n')
scr.addstr(11,(dims[1]-50)//2,' / _ \\ \'_ \\ / _ \\ | | __| \'__/ _ \\| \'_ \\ / _ \\ \n')
scr.addstr(12,(dims[1]-50)//2,'| __/ | | | | __/ | | |_| | | (_) | | | | (_) |\n')
scr.addstr(13,(dims[1]-50)//2,' \\___|_| |_| \\___|_| \\__|_| \\___/|_| |_|\\___/ \n')
scr.addstr(16,(dims[1]//2)-15,'1. El problema')
scr.addstr(18,(dims[1]//2)-15,"""2. Ejecución visual
Opcion:""")
scr.refresh()
s = -1
#1. El problema
if q == 49:
scr.clear()
scr.nodelay(1)
#Mostrar la descripcion del problema hasta salir.
while s != 115 and s != 83:
scr.addstr(1, dims[1]-33,'Presiona \'s\' parar salir al menú')
scr.addstr(2, (dims[1]-20)//2,'El bufón en el trono')
scr.addstr(3, 2,"""
El bufón de la corte tiene un pasatiempo secreto:
le gusta disfrazarse del rey y sentarse en el trono.
Sin embargo, solo puede hacer esto cuando no hay nadie presente
en la sala: ni el rey ni los cortesanos.
-El bufón aprovechará cualquier oportunidad que tenga para darse este lujo.
-El rey suele ausentarse por periodos considerables de tiempo,
mientras que varios cortesanos pueden entrar y salir de la sala.
-Si el rey llega mientras el bufón está sentado,
el bufón tiene que levantarse inmediatamente y cederle el trono.
-Si un cortesano llega mientras el bufón está sentado,
pensará que es el rey y no lo molestará.
-El bufón también es impaciente, por lo que si cuenta que ya pasaron 10 cortesanos
por la sala y no lo han dejado a solas con el trono, aún en presencia del rey, cerrará maliciosamente
la puerta de los cortesanos y esperará a que todos se vayan.
-Los cortesanos tendrán que esperar afuera. Desafortunadamente,
cuando hay 5 cortesanos esperando, éstos se ponen impacientes,
y el bufón tiene abrirles la puerta, aún si no está sentado.""")
scr.nodelay(0)
s = scr.getch()
scr.clear()
#2. Ejecucion visual
elif q == 50:
scr.clear()
scr.nodelay(1)
#Lista de los últimos 10 eventos
textoEntrante = [""]*10
#Se crean y se inician los hilos la primera vez que se entra aquí
if not hilosCorriendo:
hiloRey = Thread(target = pyt.rey, args = [])
hiloBufon = Thread(target = pyt.bufon, args = [])
hiloCortesanos = Thread(target = pyt.llegadaCortesanos, args = [])
hiloRey.start()
hiloBufon.start()
hiloCortesanos.start()
hilosCorriendo = True
#Se abre el torniquete para generar cortesanos
c.pausa.release()
while s != 115 and s != 83:
s = scr.getch()
#Se espera a que un hilo avise de una actualización
c.sigHilos.acquire()
scr.clear()
#Se visualiza el estado actual del escenario
scr.addstr(1, dims[1]-33,'Presiona \'s\' parar salir al menú')
scr.addstr(2,(dims[1]-20)//2,"El bufón en el trono")
scr.addstr(4,(dims[1]-23)//2,c.grafico[0])
scr.addstr(5,(dims[1]-23)//2,c.grafico[1])
scr.addstr(6,(dims[1]-23)//2,c.grafico[2])
scr.addstr(7,(dims[1]-23)//2,c.grafico[3])
scr.addstr(8,(dims[1]-23)//2,c.grafico[4])
scr.addstr(9,(dims[1]-23)//2,c.grafico[5])
scr.addstr(10,(dims[1]-23)//2,c.grafico[6])
scr.addstr(12,(dims[1]-31)//2,"B-Bufon C-Cortesano K-Rey")
#Se actualiza la lista de eventos recientes, y se muestra
for i in reversed(range(9)):
textoEntrante[i+1] = textoEntrante[i]
textoEntrante[0] = c.grafico[7]
scr.addstr(14,(dims[1]-66)//2,textoEntrante[9])
scr.addstr(15,(dims[1]-66)//2,textoEntrante[8])
scr.addstr(16,(dims[1]-66)//2,textoEntrante[7])
scr.addstr(17,(dims[1]-66)//2,textoEntrante[6])
scr.addstr(18,(dims[1]-66)//2,textoEntrante[5])
scr.addstr(19,(dims[1]-66)//2,textoEntrante[4])
scr.addstr(20,(dims[1]-66)//2,textoEntrante[3])
scr.addstr(21,(dims[1]-66)//2,textoEntrante[2])
scr.addstr(22,(dims[1]-66)//2,textoEntrante[1])
scr.addstr(23,(dims[1]-66)//2,textoEntrante[0])
scr.refresh()
sleep(0.25)
#Se señaliza al actor que ya se termino de actualizar la pantalla.
c.sigInterfaz.release()
#Se cierra el torniquete para detener la generación de cortesanos
c.pausa.acquire()
sleep(0.05)
curses.endwin()
menu()
```
#### File: 1/MorenoEduardo/aeropuerto.py
```python
import threading
import time
import random
import sys
import os
num_maletas = 0
limite_maletas = False
num_pasajeros = 0
limite_pasajeros = False
num_hack = 0
num_serf = 0
maletas_ = threading.Semaphore(0) #💼
pasajeros_ = threading.Semaphore(0) #👤
mutex_ingreso = threading.Semaphore(3)
mutex_maletas = threading.Semaphore(2)
mutex_pasajeros = threading.Semaphore(2)
mutex_recoger = threading.Semaphore(2)
def maletas(): #maletas es la función que permite ingresar maletas a la banda, no recibe un argumento.
global num_maletas
global mutex_maletas
global limite_pasajeros
global limite_maletas
global maletas_
mutex_maletas.acquire()
if (num_maletas == 15 and limite_maletas==False):
limite_maletas = True
mutex_maletas.release()
elif (num_maletas >= 7 and num_maletas < 15 ):
num_maletas += 1
print("Ha llegado ", end="")
ingresa("maleta")
print("")
mutex_maletas.release()
else:
if(limite_maletas == False):
num_maletas += 1
print("Ha llegado ", end="")
ingresa("maleta")
print("")
mutex_maletas.release()
maletas_.acquire()
elif(limite_maletas == False and num_maletas != 15):
limite_maletas = True
else:
mutex_maletas.release()
print("¡No hay espacio en la cinta!")
def pasajeros(recoge): # Pasajeros es la función que permite ingresar pasajeros alrededor de la banda, como tal recibe el argumento
global num_pasajeros # recoge, el cual le indica si va a recoger una maleta o no, con esta información libera pasajero y maleta.
global limite_maletas
global limite_pasajeros
global mutex_pasajeros
global pasajeros_
global maletas_
mutex_pasajeros.acquire()
if (num_pasajeros == 20 and limite_pasajeros == False):
limite_pasajeros = True
mutex_pasajeros.release()
elif (num_pasajeros >= 7 and num_pasajeros <= 20 and recoge == 1):
#ingresa("pasajero")
#pantalla()
pasajeros_.release()
maletas_.release()
mutex_pasajeros.release()
recoger()
else:
if(limite_pasajeros == False):
num_pasajeros += 1
print("Ha llegado ", end="")
ingresa("pasajero")
print("")
mutex_pasajeros.release()
pasajeros_.acquire()
elif(limite_pasajeros == False and num_maletas !=20):
limite_pasajeros == True
else:
mutex_pasajeros.release()
print("¡No hay espacio alrededor de la cinta!")
def ingresa(tipo): # Esta función imprime una maleta o un pasajero dependiendo de lo que se le mande en su argumento de entrada
if(tipo == "maleta"):
print("💼", end="")
elif(tipo == "pasajero"):
print("👤", end="")
def imprimirProblema(): # Es el encargado de imprimir el enunciado inicial.
print(" ✈️ 🌎 ======== La cinta del aeropuerto ======== ✈️ 🌎 ")
print("Nos encontramos en un aeropuerto, en el aeropuerto tenemos una cinta que trae el equipaje,"
+ " alrededor de ella hay gente esperando para recogerlo, pasa el tiempo, entra y sale equipaje, así como personas, no todo" +
" el equipaje saldrá, pero observemos como se mueve el ritmo de maletas en el aeropuerto")
def recoger(): # Recoger es la función encargada de que el pasajero recoja la maleta, de esta manera se eliminan
global num_maletas # los hilos de una maleta y un pasajero.
global num_pasajeros # No necesita de ninguna entrada y no devuelve nada.
global limite_maletas
global limite_pasajeros
mutex_recoger.acquire()
num_maletas = num_maletas -1
num_pasajeros = num_pasajeros -1
if (num_pasajeros == 0 or num_maletas == 0):
print("No hay nada")
elif(num_pasajeros >= 1 and num_maletas >= 1):
print("Se va ", end="")
ingresa("pasajero")
print("")
print("Se va ", end="")
ingresa("maleta")
print("")
else:
print("Todo mal")
mutex_recoger.release()
def pantalla(): # Se imprimen las maletas y los pasajeros existentes alrededor y encima de la cinta.
global num_pasajeros # No es necesario un argumento, y tampoco tiene una salida
global num_maletas
a = 0
b = 0
mutex_ingreso.acquire()
while a < num_maletas: #Impresión de las maletas en la banda
ingresa("maleta")
a += 1
print(" ")
while b < num_pasajeros: #Impresión de los pasajeros alrededor de la banda
ingresa("pasajero")
b += 1
mutex_ingreso.release()
try:
imprimirProblema()
while True:
if round(random.choice([0,1])) == 0:
threading.Thread(target = maletas, args = []).start()
time.sleep(2)
pantalla()
print("")
else:
threading.Thread(target = pasajeros, args = [round(random.choice([0,1]))]).start()
time.sleep(2)
pantalla()
print("")
except KeyboardInterrupt:
print(" ║║ Vámonos a casa ║║ ")
sys.exit()
except AttributeError:
sys.exit()
```
#### File: 2/CarrilloRicardo/tarea2.py
```python
import threading
import time
import random
alumnos_sentados = []
mutex = threading.Semaphore(1)
barrera = threading.Semaphore(0)
contador_alumnos = 0
def alumno(id):
global mutex, barrera, contador_alumnos
#llego un alumno
if len(alumnos_sentados) < 5: #verifica si tiene espacio en el cubiculo
mutex.acquire()
print('\033[;37malumno sentadito: \033[;36m'+ str(id) )
alumnos_sentados.append(id)
contador_alumnos += 1
mutex.release()
barrera.release()
else: #si no hay lugar se duerme y espera a que se desocupe
print(f"\033[;37malumno \033[;36m{id} \033[;37mdice: no hay lugar mejor me duermo")
time.sleep(random.random())
pass
def profesor():
global mutex, barrera, contador_alumnos
while True:
print("\033[;33mESPERANDO A QUE SE JUNTEN ALUMNOS")
print(f"\033[;35mHAY {contador_alumnos} ALUMNOS EN ESPERA") #verifica si hay alumnos esperando ser atendidos
if contador_alumnos >= 3: # pasa grupo de 3 alumnos
print(f"\033[;32mPASANDO GRUPO DE {contador_alumnos} ALUMNOS")
barrera.acquire()
while alumnos_sentados: # mientras haya alumnos en su cubiculo
a = alumnos_sentados.pop() # atendemos las dudas del primer alumno
contador_alumnos -= 1
for duda in range(random.randint(1,5)):
print(f'\033[;37mATENDIENDO SU DUDA # \033[;31m{duda} \033[;37mALUMNO \033[;36m{a}')
time.sleep(random.random())
else:
print('\033[;37mMIMIDO, NO MOLESTAR') #si no se ha juntado grupo de alumnos el profesor duerme
time.sleep(5)
threading.Thread(target = profesor).start()
id = 0
while True:
threading.Thread(target=alumno,args=[id]).start()
id += 1
time.sleep(random.random())
if id >= 10:
time.sleep(random.randint(10,15))
```
#### File: 2/HernandezIvan/alum_ase.py
```python
from threading import Semaphore, Thread
import time
import random
cuenta = 0
num_alumnos = 9
descansa_asesor = Semaphore(0)
mutex = Semaphore(1)
torniquete = Semaphore(1)
duda = Semaphore(2)
cubiculo = Semaphore (0)
def asesor():
global duda
while(True):
global cuenta, num_alumnos,mutex, duda
duda.acquire()
print("Asesor dando asesoria")
time.sleep(3)
duda.release()
#print(cuenta)
if cuenta == num_alumnos:
print("Asesor se va a dormir")
descansa.asesor.acquire()
def alumnos(id):
global cuenta, mutex, duda
torniquete.acquire()
time.sleep(1)
torniquete.release()
mutex.acquire()
cuenta = cuenta + 1
duda.acquire()
print("Alumno %d tomando asesoria" % id)
time.sleep(3)
duda.release()
mutex.release()
Thread(target=asesor).start()
for i in range(num_alumnos):
Thread(target=alumnos, args=[i]).start()
```
#### File: 2/SantiagoDiego-UgaldeArmando/los_alumnos_y_el_asesor.py
```python
import threading, queue
import time
# El problema es parecido al productor-consumidor,
# sin embargo, en este caso una vez que se atienda a un
# alumno, se enviará a la cola de nuevo en caso de que
# aún tenga preguntas disponibles (tiene y inicialmente)
buf = queue.Queue()
# Máximo número de alumnos en el salón de clase
x = 10
# Máximo número de preguntas por alumno
y = 5
# Id de los alumnos
eid = 1
eid_lock = threading.Lock()
# Mutex de la cola de estudiantes
mutex = threading.Semaphore(1)
# Semáforo con el total de alumnos en la cola
estudiantes = threading.Semaphore(0)
class Estudiante:
def __init__(self, eid):
self.preguntas_restantes = y
self.id = eid
def preguntas_se_acabaron(self):
return self.preguntas_restantes == 0
def hacer_pregunta(self):
print("Estudiante " + str(self.id) + " hizo una pregunta")
time.sleep(0.5)
# Actualizar el número de preguntas de este estudiante
self.preguntas_restantes -= 1
# Llenar el salón de clases
def llenar_salon_inicial():
for i in range(x):
producir_estudiante()
# Simular que los alumnos llegan después de haber iniciado
def llenar_salon_periodicamente():
while True:
producir_estudiante()
time.sleep(5)
# Añadir un estudiante al salón de clases para que sea
# atendido. De no haber lugar, será rechazado
def producir_estudiante():
global eid
# Generar estudiante con nuevo id
with eid_lock:
estudiante = Estudiante(eid)
eid = eid + 1
mutex.acquire()
# Hay espacio en el salón, agregar estudiante
if buf.qsize() < x:
buf.put(estudiante)
print("Estudiante agregado (" + str(estudiante.id) + ")")
estudiantes.release()
# No hay espacio, rechazarlo
else:
print("Estudiante " + str(estudiante.id) + " rechazado")
mutex.release()
# El profesor atenderá a los estudiantes presentes en el
# salón de clase. Estos estarán formados en una cola. Cuando
# un estudiante es atendido, se envía al final de la "fila"
# para atender a los demás estudiantes lo más rápido posible.
# Cuando un estudiante termina todas sus preguntas, sale del
# salón (se elimina de la cola), dejando un espacio libre para
# otro estudiante.
def profe_consumidor():
while True:
mutex.acquire()
estudiante = buf.get()
estudiante.hacer_pregunta()
# Sacarlo del salón
if estudiante.preguntas_se_acabaron():
print("Estudiante " + str(estudiante.id) + " terminó sus preguntas")
estudiantes.acquire()
# Enviarlo al final de la fila
else:
buf.put(estudiante)
mutex.release()
time.sleep(1.5)
llenar_salon_inicial()
# El productor estará enviando estudiantes al salón
threading.Thread(target=llenar_salon_periodicamente, args=[]).start()
profe_consumidor()
```
#### File: 3/GarciaFigueroaAlberto-GarciaEdgar/Proceso.py
```python
class Proceso:
def __init__(self,tiempo_de_llegada,t,id):
self.t=t
self.tiempo_de_llegada=tiempo_de_llegada
self.id=id
self.inicio=0
self.fin=0
self.T=0
self.E=0
self.P=0
self.tRestantes = t
```
#### File: 3/GarciaFigueroaAlberto-GarciaEdgar/RR.py
```python
from collections import deque
import operator
class RR:
def __init__(self,ProcesoA,ProcesoB,ProcesoC,ProcesoD,ProcesoE,quantum): #Tiempos en donde entran al sistema
self.A=ProcesoA
self.B=ProcesoB
self.C=ProcesoC
self.E=ProcesoE
self.D=ProcesoD
self.T_total=0
self.P_total=0
self.E_total=0
self.quantum=quantum
self.TiempoTotal=0
self.Final = []
def runRR(self):
procesos=[self.A,self.B,self.C,self.D,self.E]
procesosBase=[self.A,self.B,self.C,self.D,self.E]
#PROCESOS ORDENADOS
sortedProcesses=sorted(procesos, key=operator.attrgetter("tiempo_de_llegada"))
queueProcesos = []
for i in sortedProcesses:
queueProcesos.append(i)
primero = True
terminados=[]
#COLA CON PROCESO EN ESPERA
waiting=[]
waiting.append(sortedProcesses.pop(0))
while waiting:
proc = waiting.pop(0)
if(primero==True):
proc.inicio = proc.tiempo_de_llegada
primero = False
banderaQuantum = 0
#SE REQUIERE QUE NO HAYA EXCEDIDO EL QUANTUM Y QUE AÚN LE FALTE TIEMPO PARA TERMINAR EL PROCESO
while (banderaQuantum < self.quantum and proc.tRestantes > 0):
banderaQuantum += 1
proc.tRestantes-=1
self.TiempoTotal += 1
self.Final += proc.id
for x in procesosBase: ##BUSCA EN LA BASE DE LOS PROCESOS SI ALGUNO TIENE SU TIEMPO DE LLEGADA EN EL QUANTUM ACTUAL PARA AGREGARLO A LA COLA DE PROCESOS EN ESPERA
if(x.tiempo_de_llegada == self.TiempoTotal):
try:
waiting.append(sortedProcesses.pop(0))
except(IndexError):
print("Somos estudiantes, Parece que algo malo paso")
if proc.tRestantes: ##SI LE FALTA TIEMPO DE EJECUCIÓN SE VUELVE A AGREGAR A LA COLA DE PROCESOS PENDIENTES
waiting.append(proc)
else:
proc.fin= self.TiempoTotal
terminados.append(proc)
#SE LLENAN LOS PARAMETROS CALCULADOS DE LOS PROCESOS
for i in terminados:
i.T =i.fin - i.tiempo_de_llegada
i.E = i.T -i.t
i.P =i.T /i.t
#print(i.id +" "+str(i.tiempo_de_llegada)+" "+str(i.t)+" "+str(i.inicio)+" "+str(i.fin)+" " +str(i.T)+" " + str(i.E)+" " + str(i.P))
self.T_total= self.T_total + i.T
self.E_total= self.E_total + i.E
self.P_total= self.P_total + i.P
self.T_total=self.T_total / len(terminados)
self.E_total=self.E_total / len(terminados)
self.P_total=self.P_total / len(terminados)
def listDiaf(self):
for i in self.Final:
print(str(i) , end = '')
print("")
print("\nEL valor de T:"+str(self.T_total))
print("EL valor de E:"+str(self.E_total))
print("EL valor de P:"+str(round((self.P_total),2)))
```
#### File: 3/ManzanaresJorge-SalazarJesus/fcfs.py
```python
from scheduler import Scheduler
from collections import deque
class Fcfs(Scheduler):
name = "First Come First Serve (FCFS)"
def __init__(self,procesos):
self.ejecutados = []
self.ejecutados_visual = ""
self.fcfs_queue = deque(procesos)
self.t = self.fcfs_queue[0].arrvl_time
def execute(self):
while len(self.fcfs_queue)>0:
if self.fcfs_queue[0].arrvl_time > self.t:
self.emptyExec()
else:
ejecutando = self.fcfs_queue.popleft()
while ejecutando.timeLeft > 0 :
self.ejecutados_visual+=ejecutando.id
ejecutando.execute(1)
self.t +=1
ejecutando.compl_time = self.t
self.ejecutados.append(ejecutando)
```
#### File: 3/VillanuevaMiguel/algoritmos_deplaneacion.py
```python
import random
import copy
def fifo(procesos_datos):
T=0
T1=0
t_inicial=0
E=0
P=0
resultado=[]
for i in range(len(procesos_datos)):
t_llegada=procesos_datos[i][1]
t_n=procesos_datos[i][2]
T1=((t_inicial - t_llegada)+t_n)
T=T+T1
#print('T1=',T1)
E1=T1-t_n
#print('E1=',E1)
P1=T1/t_n
#print('P1=',P1)
E=E+E1
P=P+P1
t_inicial=t_inicial + t_n
Ttotal=T/len(procesos_datos)
Etotal=E/len(procesos_datos)
Ptotal=P/len(procesos_datos)
letra=procesos_datos[i][0]
if t_n > 0:
letra=letra*t_n
resultado.append(letra)
print("".join(resultado))
print("FIFO: T=",Ttotal," E=",Etotal,"P=",Ptotal)
def rr1(procesos_datos):
proc_final=[]
cadena=''
datos=copy.deepcopy(procesos_datos)
E=0
T=0
P=0
cont=0
condicion=len(datos)
while condicion>0:
for i in range(len(procesos_datos)):
t_llegada1=datos[i][1]
t_n1=datos[i][2]
letra1=datos[i][0]
if t_llegada1 <= cont and t_n1>0:
cadena += letra1
t_n1 -= 1
datos[i][2]=t_n1
cont+=1
if t_n1==0 and letra1 not in proc_final:
t_llegada=procesos_datos[i][1]
t_n=procesos_datos[i][2]
T1=len(cadena)-t_llegada
#print(T1)
T=T1+T
E1=T1-t_n
E=E1+E
P1=T1/t_n
P=P+P1
Ttotal=T/len(procesos_datos)
Etotal=E/len(procesos_datos)
Ptotal=P/len(procesos_datos)
proc_final.append(letra1)
condicion-=1
print(cadena)
print("RR1: T=",Ttotal," E=",Etotal," P=",Ptotal)
def spn(procesos_datos):
espera=[]
proc_final=[]
datos=copy.deepcopy(procesos_datos)
cadena=''
cadena1=''
E=0
T=0
P=0
cont=0
i=0
while i < len(datos):
t_llegada1=datos[i][1]
t_n1=datos[i][2]
letra1=datos[i][0]
if t_llegada1 == cont:
cadena1= letra1*t_n1
cont += t_n1
t_n1 = 0
datos[i][2]=t_n1
cadena+=cadena1
i+=1
elif i ==len(datos)-1:
cadena1= letra1*t_n1
cont+= t_n1
t_n1 = 0
datos[i][2]=t_n1
i+=1
cadena+=cadena1
elif t_n1<=datos[i+1][2]:
cadena1= letra1*t_n1
cont+= t_n1
t_n1 = 0
datos[i][2]=t_n1
i+=1
cadena+=cadena1
elif t_n1>datos[i+1][2]:
cadena1= datos[i+1][0]*datos[i+1][2]
cont+= datos[i+1][2]
datos[i+1][2] = 0
cadena+=cadena1
D1=datos[i+1][0]
D2=datos[i+1][1]
D3=datos[i+1][2]
datos[i+1][0]=letra1
datos[i+1][1]=t_llegada1
datos[i+1][2]=t_n1
i+=1
if t_n1 == 0 and letra1 not in proc_final:
t_llegada=procesos_datos[i-1][1]
t_n=procesos_datos[i-1][2]
T1=len(cadena)-t_llegada
#print(T1)
T=T1+T
E1=T1-t_n
#print("e1=",E1)
E=E1+E
P1=T1/t_n
#print("p1=",P1)
P=P+P1
Ttotal=T/len(procesos_datos)
Etotal=E/len(procesos_datos)
Ptotal=P/len(procesos_datos)
proc_final.append(letra1)
elif D3 == 0 and D1 not in proc_final:
t_llegada=procesos_datos[i][1]
t_n=procesos_datos[i][2]
T1=len(cadena)-t_llegada
#print(T1)
T=T1+T
E1=T1-t_n
#print("e1=",E1)
E=E1+E
P1=T1/t_n
#print("p1=",P1)
P=P+P1
Ttotal=T/len(procesos_datos)
Etotal=E/len(procesos_datos)
Ptotal=P/len(procesos_datos)
proc_final.append(datos[i][2])
print(cadena)
print("SPN: T=",Ttotal," E=",Etotal," P=",Ptotal)
proceso=["A","B","C","D","E"]
procesos_datos=[]
proceso_llegada=0
#procesos_datos=[['A',0, 3], ['B', 1,5],['C',3,2],['D', 9, 5], ['E', 12, 5]]
for i in range(len(proceso)):
proceso_t=random.randint(1,10)
procesos_datos.append([proceso[i],proceso_llegada,proceso_t])
proceso_llegada += random.randint(1,5)
print(procesos_datos)
fifo(procesos_datos)
rr1(procesos_datos)
spn(procesos_datos)
``` |
{
"source": "Jorgelof/DAS_Sistemas",
"score": 4
} |
#### File: Ago-Dic-2020/Parcial 1/Practica1.py
```python
class Matrix:
def __init__(self, matrix_string):
self.lista = [[int(i) for i in j.split()] for j in matrix_string.splitlines()]
def row(self, fila):
return list(self.lista[fila-1])
def column(self, column):
return [i[column-1] for i in self.lista]
matriz = Matrix("9 8 7 \n5 3 2 \n6 6 7")
print(matriz.lista)
fila=input("fila a solicitar: " )
columna=input("Columna a solicitra: ")
print("Fila ", matriz.row(int(fila)))
print("Columna ", matriz.column(int(columna)))
``` |
{
"source": "jorgelopez1/hdfs",
"score": 2
} |
#### File: helloworld/tests/test_soak.py
```python
import logging
import os
import pytest
import shakedown # required by sdk_utils version checks
import sdk_cmd
import sdk_plan
import sdk_tasks
import sdk_upgrade
import sdk_utils
from tests import config
log = logging.getLogger(__name__)
FRAMEWORK_NAME = "secrets/hello-world"
NUM_HELLO = 2
NUM_WORLD = 3
# check environment first...
if "FRAMEWORK_NAME" in os.environ:
FRAMEWORK_NAME = os.environ["FRAMEWORK_NAME"]
if "NUM_HELLO" in os.environ:
NUM_HELLO = int(os.environ["NUM_HELLO"])
if "NUM_WORLD" in os.environ:
NUM_WORLD = int(os.environ["NUM_WORLD"])
@pytest.mark.soak_upgrade
def test_soak_upgrade_downgrade():
sdk_upgrade.soak_upgrade_downgrade(
config.PACKAGE_NAME,
config.SERVICE_NAME,
config.DEFAULT_TASK_COUNT)
@pytest.mark.soak_secrets_update
@sdk_utils.dcos_1_10_or_higher
def test_soak_secrets_update():
secret_content_alternative = "hello-world-secret-data-alternative"
test_soak_secrets_framework_alive()
sdk_cmd.run_cli("package install --cli dcos-enterprise-cli --yes")
sdk_cmd.run_cli("package install --cli hello-world --yes")
sdk_cmd.run_cli("security secrets update --value={} secrets/secret1".format(secret_content_alternative))
sdk_cmd.run_cli("security secrets update --value={} secrets/secret2".format(secret_content_alternative))
sdk_cmd.run_cli("security secrets update --value={} secrets/secret3".format(secret_content_alternative))
test_soak_secrets_restart_hello0()
# get new task ids - only first pod
hello_tasks = sdk_tasks.get_task_ids(FRAMEWORK_NAME, "hello-0")
world_tasks = sdk_tasks.get_task_ids(FRAMEWORK_NAME, "world-0")
# make sure content is changed
assert secret_content_alternative == task_exec(world_tasks[0], "bash -c 'echo $WORLD_SECRET1_ENV'")
assert secret_content_alternative == task_exec(world_tasks[0], "cat WORLD_SECRET2_FILE")
assert secret_content_alternative == task_exec(world_tasks[0], "cat secrets/secret3")
# make sure content is changed
assert secret_content_alternative == task_exec(hello_tasks[0], "bash -c 'echo $HELLO_SECRET1_ENV'")
assert secret_content_alternative == task_exec(hello_tasks[0], "cat HELLO_SECRET1_FILE")
assert secret_content_alternative == task_exec(hello_tasks[0], "cat HELLO_SECRET2_FILE")
# revert back to some other value
sdk_cmd.run_cli("security secrets update --value=SECRET1 secrets/secret1")
sdk_cmd.run_cli("security secrets update --value=SECRET2 secrets/secret2")
sdk_cmd.run_cli("security secrets update --value=SECRET3 secrets/secret3")
test_soak_secrets_restart_hello0()
@pytest.mark.soak_secrets_alive
@sdk_utils.dcos_1_10_or_higher
def test_soak_secrets_framework_alive():
sdk_plan.wait_for_completed_deployment(FRAMEWORK_NAME)
sdk_tasks.check_running(FRAMEWORK_NAME, NUM_HELLO + NUM_WORLD)
def test_soak_secrets_restart_hello0():
hello_tasks_old = sdk_tasks.get_task_ids(FRAMEWORK_NAME, "hello-0")
world_tasks_old = sdk_tasks.get_task_ids(FRAMEWORK_NAME, "world-0")
# restart pods to retrieve new secret's content
sdk_cmd.svc_cli(config.PACKAGE_NAME, FRAMEWORK_NAME, 'pod restart hello-0')
sdk_cmd.svc_cli(config.PACKAGE_NAME, FRAMEWORK_NAME, 'pod restart world-0')
# wait pod restart to complete
sdk_tasks.check_tasks_updated(FRAMEWORK_NAME, "hello-0", hello_tasks_old)
sdk_tasks.check_tasks_updated(FRAMEWORK_NAME, 'world-0', world_tasks_old)
# wait till it all running
sdk_tasks.check_running(FRAMEWORK_NAME, NUM_HELLO + NUM_WORLD)
def task_exec(task_name, command):
cmd_str = "task exec {} {}".format(task_name, command)
lines = sdk_cmd.run_cli(cmd_str).split('\n')
log.info('dcos %s output: %s', cmd_str, lines)
for i in lines:
# ignore text starting with:
# Overwriting Environment Variable ....
# Overwriting PATH ......
if not i.isspace() and not i.startswith("Overwriting"):
return i
return ""
```
#### File: kafka/tests/test_ports.py
```python
import pytest
import sdk_cmd
import sdk_install
import sdk_marathon
import sdk_tasks
from tests import config
STATIC_PORT_OPTIONS_DICT = {"brokers": {"port": 9092}}
DYNAMIC_PORT_OPTIONS_DICT = {"brokers": {"port": 0}}
@pytest.fixture(scope='module', autouse=True)
def configure_package(configure_security):
try:
sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME)
yield # let the test session execute
finally:
sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME)
@pytest.mark.sanity
def test_dynamic_port_comes_online():
sdk_install.install(
config.PACKAGE_NAME,
config.SERVICE_NAME,
config.DEFAULT_BROKER_COUNT,
additional_options=DYNAMIC_PORT_OPTIONS_DICT)
sdk_tasks.check_running(config.SERVICE_NAME, config.DEFAULT_BROKER_COUNT)
sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME)
@pytest.mark.sanity
def test_static_port_comes_online():
sdk_install.install(
config.PACKAGE_NAME,
config.SERVICE_NAME,
config.DEFAULT_BROKER_COUNT,
additional_options=STATIC_PORT_OPTIONS_DICT)
sdk_tasks.check_running(config.SERVICE_NAME, config.DEFAULT_BROKER_COUNT)
# static config continues to be used in the following tests:
@pytest.mark.sanity
def test_port_static_to_static_port():
sdk_tasks.check_running(config.SERVICE_NAME, config.DEFAULT_BROKER_COUNT)
broker_ids = sdk_tasks.get_task_ids(config.SERVICE_NAME, '{}-'.format(config.DEFAULT_POD_TYPE))
marathon_config = sdk_marathon.get_config(config.SERVICE_NAME)
for broker_id in range(config.DEFAULT_BROKER_COUNT):
result = sdk_cmd.svc_cli(config.PACKAGE_NAME, config.SERVICE_NAME, 'broker get {}'.format(broker_id), json=True)
assert result['port'] == 9092
result = sdk_cmd.svc_cli(config.PACKAGE_NAME, config.SERVICE_NAME, 'endpoints broker', json=True)
assert len(result['address']) == config.DEFAULT_BROKER_COUNT
assert len(result['dns']) == config.DEFAULT_BROKER_COUNT
for port in result['address']:
assert int(port.split(':')[-1]) == 9092
for port in result['dns']:
assert int(port.split(':')[-1]) == 9092
marathon_config['env']['BROKER_PORT'] = '9095'
sdk_marathon.update_app(config.SERVICE_NAME, marathon_config)
sdk_tasks.check_tasks_updated(config.SERVICE_NAME, '{}-'.format(config.DEFAULT_POD_TYPE), broker_ids)
# all tasks are running
sdk_tasks.check_running(config.SERVICE_NAME, config.DEFAULT_BROKER_COUNT)
result = sdk_cmd.svc_cli(config.PACKAGE_NAME, config.SERVICE_NAME, 'endpoints broker', json=True)
assert len(result['address']) == config.DEFAULT_BROKER_COUNT
assert len(result['dns']) == config.DEFAULT_BROKER_COUNT
for port in result['address']:
assert int(port.split(':')[-1]) == 9095
for port in result['dns']:
assert int(port.split(':')[-1]) == 9095
@pytest.mark.sanity
def test_port_static_to_dynamic_port():
sdk_tasks.check_running(config.SERVICE_NAME, config.DEFAULT_BROKER_COUNT)
broker_ids = sdk_tasks.get_task_ids(config.SERVICE_NAME, '{}-'.format(config.DEFAULT_POD_TYPE))
marathon_config = sdk_marathon.get_config(config.SERVICE_NAME)
marathon_config['env']['BROKER_PORT'] = '0'
sdk_marathon.update_app(config.SERVICE_NAME, marathon_config)
sdk_tasks.check_tasks_updated(config.SERVICE_NAME, '{}-'.format(config.DEFAULT_POD_TYPE), broker_ids)
# all tasks are running
sdk_tasks.check_running(config.SERVICE_NAME, config.DEFAULT_BROKER_COUNT)
for broker_id in range(config.DEFAULT_BROKER_COUNT):
result = sdk_cmd.svc_cli(config.PACKAGE_NAME, config.SERVICE_NAME, 'broker get {}'.format(broker_id), json=True)
assert result['port'] != 9092
result = sdk_cmd.svc_cli(config.PACKAGE_NAME, config.SERVICE_NAME, 'endpoints broker', json=True)
assert len(result['address']) == config.DEFAULT_BROKER_COUNT
assert len(result['dns']) == config.DEFAULT_BROKER_COUNT
for port in result['address']:
assert int(port.split(':')[-1]) != 9092
for port in result['dns']:
assert int(port.split(':')[-1]) != 9092
@pytest.mark.sanity
def test_port_dynamic_to_dynamic_port():
sdk_tasks.check_running(config.SERVICE_NAME, config.DEFAULT_BROKER_COUNT)
broker_ids = sdk_tasks.get_task_ids(config.SERVICE_NAME, '{}-'.format(config.DEFAULT_POD_TYPE))
sdk_marathon.bump_cpu_count_config(config.SERVICE_NAME, 'BROKER_CPUS')
sdk_tasks.check_tasks_updated(config.SERVICE_NAME, '{}-'.format(config.DEFAULT_POD_TYPE), broker_ids)
# all tasks are running
sdk_tasks.check_running(config.SERVICE_NAME, config.DEFAULT_BROKER_COUNT)
@pytest.mark.sanity
def test_can_adjust_config_from_dynamic_to_static_port():
sdk_tasks.check_running(config.SERVICE_NAME, config.DEFAULT_BROKER_COUNT)
broker_ids = sdk_tasks.get_task_ids(config.SERVICE_NAME, '{}-'.format(config.DEFAULT_POD_TYPE))
marathon_config = sdk_marathon.get_config(config.SERVICE_NAME)
marathon_config['env']['BROKER_PORT'] = '9092'
sdk_marathon.update_app(config.SERVICE_NAME, marathon_config)
sdk_tasks.check_tasks_updated(config.SERVICE_NAME, '{}-'.format(config.DEFAULT_POD_TYPE), broker_ids)
# all tasks are running
sdk_tasks.check_running(config.SERVICE_NAME, config.DEFAULT_BROKER_COUNT)
for broker_id in range(config.DEFAULT_BROKER_COUNT):
result = sdk_cmd.svc_cli(config.PACKAGE_NAME, config.SERVICE_NAME, 'broker get {}'.format(broker_id), json=True)
assert result['port'] == 9092
result = sdk_cmd.svc_cli(config.PACKAGE_NAME, config.SERVICE_NAME, 'endpoints broker', json=True)
assert len(result['address']) == config.DEFAULT_BROKER_COUNT
assert len(result['dns']) == config.DEFAULT_BROKER_COUNT
for port in result['address']:
assert int(port.split(':')[-1]) == 9092
for port in result['dns']:
assert int(port.split(':')[-1]) == 9092
```
#### File: kafka/tests/test_sanity.py
```python
import urllib
import dcos
import dcos.config
import dcos.http
import pytest
import sdk_cmd
import sdk_hosts
import sdk_install
import sdk_marathon
import sdk_metrics
import sdk_plan
import sdk_tasks
import sdk_upgrade
import sdk_utils
import shakedown
from tests import config, test_utils
@pytest.fixture(scope='module', autouse=True)
def configure_package(configure_security):
try:
foldered_name = sdk_utils.get_foldered_name(config.SERVICE_NAME)
sdk_install.uninstall(config.PACKAGE_NAME, foldered_name)
if shakedown.dcos_version_less_than("1.9"):
# Last beta-kafka release (1.1.25-0.10.1.0-beta) excludes 1.8. Skip upgrade tests with 1.8 and just install
sdk_install.install(
config.PACKAGE_NAME,
foldered_name,
config.DEFAULT_BROKER_COUNT,
additional_options={"service": {"name": foldered_name}})
else:
sdk_upgrade.test_upgrade(
config.PACKAGE_NAME,
foldered_name,
config.DEFAULT_BROKER_COUNT,
additional_options={"service": {"name": foldered_name}})
# wait for brokers to finish registering before starting tests
test_utils.broker_count_check(config.DEFAULT_BROKER_COUNT,
service_name=foldered_name)
yield # let the test session execute
finally:
sdk_install.uninstall(config.PACKAGE_NAME, foldered_name)
# --------- Endpoints -------------
@pytest.mark.smoke
@pytest.mark.sanity
def test_endpoints_address():
foldered_name = sdk_utils.get_foldered_name(config.SERVICE_NAME)
def fun():
ret = sdk_cmd.svc_cli(
config.PACKAGE_NAME, foldered_name,
'endpoints {}'.format(config.DEFAULT_TASK_NAME), json=True)
if len(ret['address']) == config.DEFAULT_BROKER_COUNT:
return ret
return False
endpoints = shakedown.wait_for(fun)
# NOTE: do NOT closed-to-extension assert len(endpoints) == _something_
assert len(endpoints['address']) == config.DEFAULT_BROKER_COUNT
assert len(endpoints['dns']) == config.DEFAULT_BROKER_COUNT
for i in range(len(endpoints['dns'])):
assert sdk_hosts.autoip_host(foldered_name, 'kafka-{}-broker'.format(i)) in endpoints['dns'][i]
assert endpoints['vip'] == sdk_hosts.vip_host(foldered_name, 'broker', 9092)
@pytest.mark.smoke
@pytest.mark.sanity
def test_endpoints_zookeeper_default():
foldered_name = sdk_utils.get_foldered_name(config.SERVICE_NAME)
zookeeper = sdk_cmd.svc_cli(config.PACKAGE_NAME, foldered_name, 'endpoints zookeeper')
assert zookeeper.rstrip('\n') == 'master.mesos:2181/{}'.format(sdk_utils.get_zk_path(foldered_name))
@pytest.mark.smoke
@pytest.mark.sanity
def test_custom_zookeeper():
foldered_name = sdk_utils.get_foldered_name(config.SERVICE_NAME)
broker_ids = sdk_tasks.get_task_ids(foldered_name, '{}-'.format(config.DEFAULT_POD_TYPE))
# create a topic against the default zk:
test_utils.create_topic(config.DEFAULT_TOPIC_NAME, service_name=foldered_name)
marathon_config = sdk_marathon.get_config(foldered_name)
# should be using default path when this envvar is empty/unset:
assert marathon_config['env']['KAFKA_ZOOKEEPER_URI'] == ''
# use a custom zk path that's WITHIN the 'dcos-service-' path, so that it's automatically cleaned up in uninstall:
zk_path = 'master.mesos:2181/{}/CUSTOMPATH'.format(sdk_utils.get_zk_path(foldered_name))
marathon_config['env']['KAFKA_ZOOKEEPER_URI'] = zk_path
sdk_marathon.update_app(foldered_name, marathon_config)
sdk_tasks.check_tasks_updated(foldered_name, '{}-'.format(config.DEFAULT_POD_TYPE), broker_ids)
sdk_plan.wait_for_completed_deployment(foldered_name)
# wait for brokers to finish registering
test_utils.broker_count_check(config.DEFAULT_BROKER_COUNT, service_name=foldered_name)
zookeeper = sdk_cmd.svc_cli(config.PACKAGE_NAME, foldered_name, 'endpoints zookeeper')
assert zookeeper.rstrip('\n') == zk_path
# topic created earlier against default zk should no longer be present:
topic_list_info = sdk_cmd.svc_cli(config.PACKAGE_NAME, foldered_name, 'topic list', json=True)
test_utils.assert_topic_lists_are_equal_without_automatic_topics([], topic_list_info)
# tests from here continue with the custom ZK path...
# --------- Broker -------------
@pytest.mark.smoke
@pytest.mark.sanity
def test_broker_list():
brokers = sdk_cmd.svc_cli(config.PACKAGE_NAME,
sdk_utils.get_foldered_name(config.SERVICE_NAME), 'broker list', json=True)
assert set(brokers) == set([str(i) for i in range(config.DEFAULT_BROKER_COUNT)])
@pytest.mark.smoke
@pytest.mark.sanity
def test_broker_invalid():
try:
sdk_cmd.svc_cli(
config.PACKAGE_NAME, sdk_utils.get_foldered_name(config.SERVICE_NAME),
'broker get {}'.format(config.DEFAULT_BROKER_COUNT + 1), json=True)
assert False, "Should have failed"
except AssertionError as arg:
raise arg
except:
pass # expected to fail
# --------- Pods -------------
@pytest.mark.smoke
@pytest.mark.sanity
def test_pods_restart():
test_utils.restart_broker_pods(sdk_utils.get_foldered_name(config.SERVICE_NAME))
@pytest.mark.smoke
@pytest.mark.sanity
def test_pod_replace():
test_utils.replace_broker_pod(sdk_utils.get_foldered_name(config.SERVICE_NAME))
# --------- Topics -------------
@pytest.mark.smoke
@pytest.mark.sanity
def test_topic_create():
test_utils.create_topic(config.EPHEMERAL_TOPIC_NAME, sdk_utils.get_foldered_name(config.SERVICE_NAME))
@pytest.mark.smoke
@pytest.mark.sanity
def test_topic_delete():
test_utils.delete_topic(config.EPHEMERAL_TOPIC_NAME, sdk_utils.get_foldered_name(config.SERVICE_NAME))
@pytest.mark.sanity
def test_topic_partition_count():
foldered_name = sdk_utils.get_foldered_name(config.SERVICE_NAME)
sdk_cmd.svc_cli(
config.PACKAGE_NAME, foldered_name,
'topic create {}'.format(config.DEFAULT_TOPIC_NAME), json=True)
topic_info = sdk_cmd.svc_cli(
config.PACKAGE_NAME, foldered_name,
'topic describe {}'.format(config.DEFAULT_TOPIC_NAME), json=True)
assert len(topic_info['partitions']) == config.DEFAULT_PARTITION_COUNT
@pytest.mark.sanity
def test_topic_offsets_increase_with_writes():
foldered_name = sdk_utils.get_foldered_name(config.SERVICE_NAME)
offset_info = sdk_cmd.svc_cli(
config.PACKAGE_NAME, foldered_name,
'topic offsets --time="-1" {}'.format(config.DEFAULT_TOPIC_NAME), json=True)
assert len(offset_info) == config.DEFAULT_PARTITION_COUNT
offsets = {}
for o in offset_info:
assert len(o) == config.DEFAULT_REPLICATION_FACTOR
offsets.update(o)
assert len(offsets) == config.DEFAULT_PARTITION_COUNT
num_messages = 10
write_info = sdk_cmd.svc_cli(
config.PACKAGE_NAME, foldered_name,
'topic producer_test {} {}'.format(config.DEFAULT_TOPIC_NAME, num_messages), json=True)
assert len(write_info) == 1
assert write_info['message'].startswith('Output: {} records sent'.format(num_messages))
offset_info = sdk_cmd.svc_cli(
config.PACKAGE_NAME, foldered_name,
'topic offsets --time="-1" {}'.format(config.DEFAULT_TOPIC_NAME), json=True)
assert len(offset_info) == config.DEFAULT_PARTITION_COUNT
post_write_offsets = {}
for offsets in offset_info:
assert len(o) == config.DEFAULT_REPLICATION_FACTOR
post_write_offsets.update(o)
assert not offsets == post_write_offsets
@pytest.mark.sanity
def test_decreasing_topic_partitions_fails():
partition_info = sdk_cmd.svc_cli(
config.PACKAGE_NAME, sdk_utils.get_foldered_name(config.SERVICE_NAME),
'topic partitions {} {}'.format(config.DEFAULT_TOPIC_NAME, config.DEFAULT_PARTITION_COUNT - 1), json=True)
assert len(partition_info) == 1
assert partition_info['message'].startswith('Output: WARNING: If partitions are increased')
assert ('The number of partitions for a topic can only be increased' in partition_info['message'])
@pytest.mark.sanity
def test_setting_topic_partitions_to_same_value_fails():
partition_info = sdk_cmd.svc_cli(
config.PACKAGE_NAME, sdk_utils.get_foldered_name(config.SERVICE_NAME),
'topic partitions {} {}'.format(config.DEFAULT_TOPIC_NAME, config.DEFAULT_PARTITION_COUNT), json=True)
assert len(partition_info) == 1
assert partition_info['message'].startswith('Output: WARNING: If partitions are increased')
assert ('The number of partitions for a topic can only be increased' in partition_info['message'])
@pytest.mark.sanity
def test_increasing_topic_partitions_succeeds():
partition_info = sdk_cmd.svc_cli(
config.PACKAGE_NAME, sdk_utils.get_foldered_name(config.SERVICE_NAME),
'topic partitions {} {}'.format(config.DEFAULT_TOPIC_NAME, config.DEFAULT_PARTITION_COUNT + 1), json=True)
assert len(partition_info) == 1
assert partition_info['message'].startswith('Output: WARNING: If partitions are increased')
assert ('The number of partitions for a topic can only be increased' not in partition_info['message'])
@pytest.mark.sanity
def test_no_under_replicated_topics_exist():
partition_info = sdk_cmd.svc_cli(
config.PACKAGE_NAME, sdk_utils.get_foldered_name(config.SERVICE_NAME),
'topic under_replicated_partitions', json=True)
assert len(partition_info) == 1
assert partition_info['message'] == ''
@pytest.mark.sanity
def test_no_unavailable_partitions_exist():
partition_info = sdk_cmd.svc_cli(
config.PACKAGE_NAME, sdk_utils.get_foldered_name(config.SERVICE_NAME),
'topic unavailable_partitions', json=True)
assert len(partition_info) == 1
assert partition_info['message'] == ''
# --------- CLI -------------
@pytest.mark.smoke
@pytest.mark.sanity
def test_help_cli():
sdk_cmd.svc_cli(config.PACKAGE_NAME, sdk_utils.get_foldered_name(config.SERVICE_NAME), 'help')
@pytest.mark.smoke
@pytest.mark.sanity
def test_config_cli():
foldered_name = sdk_utils.get_foldered_name(config.SERVICE_NAME)
configs = sdk_cmd.svc_cli(config.PACKAGE_NAME, foldered_name, 'config list', json=True)
assert len(configs) >= 1 # refrain from breaking this test if earlier tests did a config update
assert sdk_cmd.svc_cli(config.PACKAGE_NAME, foldered_name,
'config show {}'.format(configs[0]), print_output=False) # noisy output
assert sdk_cmd.svc_cli(config.PACKAGE_NAME, foldered_name, 'config target', json=True)
assert sdk_cmd.svc_cli(config.PACKAGE_NAME, foldered_name, 'config target_id', json=True)
@pytest.mark.smoke
@pytest.mark.sanity
def test_plan_cli():
foldered_name = sdk_utils.get_foldered_name(config.SERVICE_NAME)
assert sdk_cmd.svc_cli(config.PACKAGE_NAME, foldered_name, 'plan list', json=True)
assert sdk_cmd.svc_cli(config.PACKAGE_NAME, foldered_name, 'plan show {}'.format(config.DEFAULT_PLAN_NAME))
assert sdk_cmd.svc_cli(config.PACKAGE_NAME, foldered_name,
'plan show --json {}'.format(config.DEFAULT_PLAN_NAME), json=True)
assert sdk_cmd.svc_cli(config.PACKAGE_NAME, foldered_name,
'plan show {} --json'.format(config.DEFAULT_PLAN_NAME), json=True)
assert sdk_cmd.svc_cli(config.PACKAGE_NAME, foldered_name, 'plan force-restart {}'.format(config.DEFAULT_PLAN_NAME))
assert sdk_cmd.svc_cli(config.PACKAGE_NAME, foldered_name,
'plan interrupt {} {}'.format(config.DEFAULT_PLAN_NAME, config.DEFAULT_PHASE_NAME))
assert sdk_cmd.svc_cli(config.PACKAGE_NAME, foldered_name,
'plan continue {} {}'.format(config.DEFAULT_PLAN_NAME, config.DEFAULT_PHASE_NAME))
@pytest.mark.smoke
@pytest.mark.sanity
def test_state_cli():
foldered_name = sdk_utils.get_foldered_name(config.SERVICE_NAME)
assert sdk_cmd.svc_cli(config.PACKAGE_NAME, foldered_name, 'state framework_id', json=True)
assert sdk_cmd.svc_cli(config.PACKAGE_NAME, foldered_name, 'state properties', json=True)
@pytest.mark.smoke
@pytest.mark.sanity
def test_pod_cli():
foldered_name = sdk_utils.get_foldered_name(config.SERVICE_NAME)
assert sdk_cmd.svc_cli(config.PACKAGE_NAME, foldered_name, 'pod list', json=True)
assert sdk_cmd.svc_cli(config.PACKAGE_NAME, foldered_name,
'pod status {}-0'.format(config.DEFAULT_POD_TYPE), json=True)
assert sdk_cmd.svc_cli(config.PACKAGE_NAME, foldered_name,
'pod info {}-0'.format(config.DEFAULT_POD_TYPE), print_output=False) # noisy output
@pytest.mark.sanity
@pytest.mark.metrics
@sdk_utils.dcos_1_9_or_higher
def test_metrics():
expected_metrics = [
"kafka.network.RequestMetrics.ResponseQueueTimeMs.max",
"kafka.socket-server-metrics.io-ratio",
"kafka.controller.ControllerStats.LeaderElectionRateAndTimeMs.p95"
]
def expected_metrics_exist(emitted_metrics):
return sdk_metrics.check_metrics_presence(emitted_metrics, expected_metrics)
sdk_metrics.wait_for_service_metrics(
config.PACKAGE_NAME,
sdk_utils.get_foldered_name(config.SERVICE_NAME),
"kafka-0-broker",
config.DEFAULT_KAFKA_TIMEOUT,
expected_metrics_exist
)
```
#### File: hdfs/testing/sdk_utils.py
```python
import functools
import logging
import dcos
import shakedown
import pytest
import os
log = logging.getLogger(__name__)
def list_reserved_resources():
'''Displays the currently reserved resources on all agents via state.json;
Currently for INFINITY-1881 where we believe uninstall may not be
always doing its job correctly.'''
state_json_slaveinfo = dcos.mesos.DCOSClient().get_state_summary()['slaves']
for slave in state_json_slaveinfo:
reserved_resources = slave['reserved_resources']
if reserved_resources == {}:
continue
msg = 'on slaveid=%s hostname=%s reserved resources: %s'
log.info(msg % (slave['id'], slave['hostname'], reserved_resources))
def get_foldered_name(service_name):
# DCOS 1.9 & earlier don't support "foldered", service names aka marathon
# group names
if dcos_version_less_than('1.10'):
return service_name
return '/test/integration/' + service_name
def get_zk_path(service_name):
# Foldered services have slashes removed: '/test/integration/foo' => 'test__integration__foo'
return 'dcos-service-{}'.format(service_name.lstrip('/').replace('/', '__'))
@functools.lru_cache()
def dcos_version_less_than(version):
return shakedown.dcos_version_less_than(version)
def is_test_failure(pytest_request):
'''Determine if the test run failed using the request object from pytest.
The reports being evaluated are set in conftest.py:pytest_runtest_makereport()
https://docs.pytest.org/en/latest/builtin.html#_pytest.fixtures.FixtureRequest
'''
for report in ('rep_setup', 'rep_call', 'rep_teardown'):
if not hasattr(pytest_request.node, report):
continue
if not getattr(pytest_request.node, report).failed:
continue
return True
return False
def is_open_dcos():
'''Determine if the tests are being run against open DC/OS. This is presently done by
checking the envvar DCOS_ENTERPRISE.'''
return not (os.environ.get('DCOS_ENTERPRISE', 'true').lower() == 'true')
dcos_ee_only = pytest.mark.skipif(
'sdk_utils.is_open_dcos()',
reason="Feature only supported in DC/OS EE.")
# WARNING: Any file that uses these must also "import shakedown" in the same file.
dcos_1_9_or_higher = pytest.mark.skipif(
'sdk_utils.dcos_version_less_than("1.9")',
reason="Feature only supported in DC/OS 1.9 and up")
dcos_1_10_or_higher = pytest.mark.skipif(
'sdk_utils.dcos_version_less_than("1.10")',
reason="Feature only supported in DC/OS 1.10 and up")
``` |
{
"source": "jorgelopezcoronado/VAAR-VSR--CLI",
"score": 3
} |
#### File: VAAR-VSR--CLI/extras/solver.py
```python
import z3;
import sys;
import emoji;
def createSMTInput(values = []):
"""
Our variables that we can abstract from tosca-configs
So the user can use them to create their own validations
"""
variables = ";; ---------------"
variables += ";; VARIABLES"
variables += ";; ---------------"
variables += file('./variables.smt2').read()
variables += "(declare-const vars (Array Int Int))\n"
"""
Values found on the user tosca-conf request,
we assign the values to the corresponding variable
"""
configValues = ""
for i in range(0, len(values)):
configValues += "(assert (= (store vars " + str(i) +" "+ str(values[i]) +") vars))\n"
initVariables = ""
v = variables.strip().split("\n")
v.pop()
for i in range(0, len(v)):
name = v[i].split(' ')[1]
initVariables += "(assert (= " + name +" (select vars " + str(i) + ")))\n"
"""
User-Custom-Validations: specified by the user
"""
userRules = file('./user.smt').read()
blob = variables + configValues + initVariables + userRules
output = open('./output.smt2', mode="w")
output.write(blob)
output.close()
def check(values = []):
createSMTInput(values)
f = z3.parse_smt2_file("./output.smt2")
s = z3.Solver()
s.add(f)
sat = str(s.check())
if sat == "sat":
return { "sat": "sat", "model": s.model() }
else:
return { "sat": "unsat", "model": "" }
def main():
values = []
if len(sys.argv) == 3:
values = [int(sys.argv[1]), int(sys.argv[2])]
print("Input Values\n---------------------")
# + ", ".join(map(lambda x: str(x), values)))
print("total_cpus: " + str(values[0]))
print("mem_size: " + str(values[1]))
# print(check()["sat"])
sat = check()["sat"]
if sat == "unsat":
print emoji.emojize('sat: :x:', use_aliases=True)
print("Is unsat without initializing any variable")
exit(1)
solver = check(values)
if solver["sat"] == "sat":
print emoji.emojize('sat: :white_check_mark:', use_aliases=True)
else:
while solver["sat"] == "unsat" and len(values) > 0:
values.pop()
solver = check(values)
print emoji.emojize('sat: :x:', use_aliases=True)
print "\n" + emoji.emojize(':rotating_light: Suggestion :rotating_light:', use_aliases=True)+"\n---------------------"
for i in range(len(solver["model"])-2, 0, -1):
print solver["model"][i], ": ", solver["model"][solver["model"][i]]
# main()
f = z3.parse_smt2_file("./ouput2.smt2")
s = z3.Solver()
s.add(f)
print s.check()
```
#### File: VAAR-VSR--CLI/lib/preprocessor_types.py
```python
import md5
def dec2Bin(num):
sym = "B"
total = num
if total > 999:
sym = "KB"
total = total / 1000
if total > 999:
sym = "MB"
total = total / 1000
if total > 999:
sym = "GB"
total = total / 1000
if total > 999:
sym = "TB"
total = total / 1000
if total > 999:
sym = "PB"
total = total / 1000
return str(total) + " " + sym
def bin2Dec(rawBin):
rawBin = str(rawBin).upper()
if "MB" in rawBin:
return str(int(rawBin.split("MB")[0]) * (10**6))
if "GB" in rawBin:
return str(int(rawBin.split("GB")[0]) * (10**9))
if "KB" in rawBin:
return str(int(rawBin.split("KB")[0]) * (10**3))
if "TB" in rawBin:
return str(int(rawBin.split("TB")[0]) * (10**12))
if "PB" in rawBin:
return str(int(rawBin.split("PB")[0]) * (10**15))
if "B" in rawBin:
return str(int(rawBin.split("B")[0]))
return rawBin
def IP2Int(ip):
o = map(int, ip.split('.'))
res = (16777216 * o[0]) + (65536 * o[1]) + (256 * o[2]) + o[3]
return str(res)
def Int2IP(ipnum):
o1 = int(ipnum / 16777216) % 256
o2 = int(ipnum / 65536) % 256
o3 = int(ipnum / 256) % 256
o4 = int(ipnum) % 256
return '%(o1)s.%(o2)s.%(o3)s.%(o4)s' % locals()
def preProcessor(line, MainData):
if preProcessBytes(line):
return preProcessBytes(line)
elif preProcessIPs(line):
return preProcessIPs(line)
elif preProcessString(line, MainData):
return preProcessString(line, MainData)
# elif preProcessFloats(line, MainData):
# return preProcessString(line, MainData)
return line
def preProcessIPs(line):
try:
pattern = re.compile(r"([0-9]{1,3})\.([0-9]{1,3})\.([0-9]{1,3})\.([0-9]{1,3})")
start = pattern.search(line).start()
end = pattern.search(line).end()
ipRaw = line[start:end]
ipNumber = IP2Int(ipRaw)
return pattern.sub(ipNumber, line)
except:
return False
def preProcessString(line, MainData):
try:
pattern = re.compile(r"\'.+\'")
start = pattern.search(line).start()
end = pattern.search(line).end()
value = line[start+1:end-1]
result = str(int(md5.new(value).hexdigest(), 16))
MainData["stringsHashMap"][result] = value
return pattern.sub(result, line)
except:
return False
def preProcessBytes(line):
# detect bytes
try:
pattern = re.compile(r"(\d)+(\ )?(KB|MB|GB|TB|PB|B)")
start = pattern.search(line).start()
end = pattern.search(line).end()
bytesRaw = line[start:end]
bytesNumber = bin2Dec(bytesRaw)
return pattern.sub(bytesNumber, line)
except:
return False
def toscaRawValueToSMTCorrectType(attributeName, value, MainData):
# if is ip
if len(str(value).split('.')) is 4:
MainData["valueTypes"][attributeName] = "ip"
return IP2Int(value)
# if is memory size 100 GB
sizes = ["B", "KB", "MB", "GB", "TB", "PB"]
for k in sizes:
if k in str(value):
MainData["valueTypes"][attributeName] = "size"
return bin2Dec(value)
# if is true or false
if str(value) is "True" or str(value) is "False":
MainData["valueTypes"][attributeName] = "bool"
return str(int(value))
# if is int/port
try:
MainData["valueTypes"][attributeName] = "int"
return str(int(value))
except:
# if is float/version
# if is string
MainData["valueTypes"][attributeName] = "string"
result = str(int(md5.new(value).hexdigest(), 16))
MainData["stringsHashMap"][result] = value
return result
def isTypeIP(value):
value = value.strip()
parts = value.split('.')
if len(parts) is not 4:
return False
types = map(lambda x: type(x), parts)
typesDict = {}
for t in types:
typesDict.setdefault(t, [])
typesDict[t].append(t)
if len(typesDict.keys()) is not 1:
return False
if len(typesDict[typesDict.keys()[0]]) is not 4:
return False
if typesDict[typesDict.keys()[0]][0] is not type(1):
return False
return True
``` |
{
"source": "jorgeluis098/proyecto_cripto",
"score": 2
} |
#### File: algoritmos/views/sha3_view.py
```python
from django.shortcuts import render
from django.views import View
#Para SHA-3 de 384 y 512 bits
from Cryptodome.Hash import SHA3_384, SHA3_512
class SHA2_384(View):
template_name = 'dummy.html'
# Pueden ver a la función "get" como un main
def get(self, request, *args, **kwargs):
#codigo
#codigo
#codigo
context = {
# Nota: El contexto son las variables que se van a mandar a la página web
# Si quieren por el momento no vamos a mandar nada a la página web y todo lo manejamos por la consola
# Y a variables que decidan imprimir al final, son las que se mandarán en el conexto a la pg web
# Por el momento lo pueden dejar en blanco
# La estructura es la siguiente
# 'nombre_variable_html' : nombre_variable_python,
}
return render(request, self.template_name, context)
def funcion_2(self, arg1, arg2):
#codigo
#codigo
#codigo
return
class SHA2_512(View):
template_name = 'dummy.html'
# Pueden ver a la función "get" como un main
def get(self, request, *args, **kwargs):
#codigo
#codigo
#codigo
context = {
# Nota: El contexto son las variables que se van a mandar a la página web
# Si quieren por el momento no vamos a mandar nada a la página web y todo lo manejamos por la consola
# Y a variables que decidan imprimir al final, son las que se mandarán en el conexto a la pg web
# Por el momento lo pueden dejar en blanco
# La estructura es la siguiente
# 'nombre_variable_html' : nombre_variable_python,
}
return render(request, self.template_name, context)
def funcion_2(self, arg1, arg2):
#codigo
#codigo
#codigo
return
``` |
{
"source": "jorgeluis11/profile",
"score": 2
} |
#### File: profile/profile/views.py
```python
from django.shortcuts import render_to_response, HttpResponse, render, redirect, get_list_or_404
from django.template import RequestContext
import json, httplib, urllib
from project.models import Projects
def index(request):
data = {
"projects" : get_list_or_404(Projects.objects.all().order_by("-submit_date"))
}
return render(request,"index.html",data);
``` |
{
"source": "jorgeluis11/tpinterview",
"score": 2
} |
#### File: tpinterview/question/views.py
```python
import ast
import reportlab
from django.http import HttpResponse
from django.shortcuts import HttpResponse
from django.shortcuts import HttpResponseRedirect
from django.shortcuts import render_to_response
from django.contrib.auth import authenticate
from django.contrib.auth import login
from django.contrib.auth import logout
from django.contrib.auth.decorators import login_required
from django.template import RequestContext
from django.views.generic import TemplateView
from .models import Answer
from easy_pdf.views import PDFTemplateView
from reportlab.pdfgen import canvas
@login_required
def index(request):
return render_to_response("index.html", {})
@login_required
def languages(request):
# if not request.user.is_authenticated():
# return HttpResponseRedirect('/login')
# else:
return render_to_response("language/language_list.html", {})
@login_required
def languagesDetail(request):
return render_to_response("language/language_detail.html", {})
@login_required
def languagesQuestionList(request):
return render_to_response("language/language_question.html", {})
@login_required
def testList(request):
return render_to_response("test/test_list.html", {})
@login_required
def testCandidatesList(request):
return render_to_response("test/test_candidates_list.html", {})
@login_required
def testCandidatesTestRetrieve(request):
return render_to_response("test/test_candidates_retrieve.html", {})
class HelloPDFView(PDFTemplateView):
template_name = "easy_pdf/test-candidate.html"
def get_context_data(self, **kwargs):
candidate = self.request.GET.get("candidate")
test = self.request.GET.get('test')
answers = Answer.objects.filter(candidate__slug=candidate,
question__test__slug=test).order_by('question__order')
return super(HelloPDFView, self).get_context_data(
answers=answers, **kwargs)
def user_login_redirect(request):
return HttpResponseRedirect('/login')
def user_login(request):
# Like before, obtain the context for the user's request.
context = RequestContext(request)
# If the request is a HTTP POST, try to pull out the relevant information.
if request.method == 'POST':
# Gather the username and password provided by the user.
# This information is obtained from the login form.
username = request.POST['username']
password = request.POST['password']
# Use Django's machinery to attempt to see if the username/password
# combination is valid - a User object is returned if it is.
user = authenticate(username=username, password=password)
# If we have a User object, the details are correct.
# If None (Python's way of representing the absence of a value),
#no user with matching credentials was found.
if user:
# Is the account active? It could have been disabled.
if user.is_active:
# If the account is valid and active, we can log the user in.
# We'll send the user back to the homepage.
login(request, user)
return HttpResponseRedirect('/')
else:
# An inactive account was used - no logging in!
return HttpResponse("Your TP account is disabled.")
else:
# Bad login details were provided. So we can't log the user in.
print "Invalid login details: {0}, {1}".format(username, password)
return render_to_response('login.html', {'error': True}, context)
# The request is not a HTTP POST, so display the login form.
# This scenario would most likely be a HTTP GET.
else:
# No context variables to pass to the template system, hence the
# blank dictionary object...
return render_to_response('login.html', {}, context)
def user_logout(request):
logout(request)
return HttpResponseRedirect('/login')
def insert_question(request):
# Like before, obtain the context for the user's request.
answers = dict(request.POST)
# print answers['{"answers":"text"}']
# print request.POST.post['answers']
for key, value in request.POST.iteritems():
answer = key
print ast.literal_eval(answer)[0]
return HttpResponse(answers)
``` |
{
"source": "jorgeluisc-code2/ecommerce-master2",
"score": 2
} |
#### File: ecommerce-master2/ecom/views.py
```python
from django.shortcuts import render, redirect
from . import forms, models
from django.http import HttpResponseRedirect
from django.core.mail import send_mail
from django.contrib.auth.models import Group
from django.contrib.auth.decorators import login_required, user_passes_test
from django.contrib import messages
from math import ceil
from .models import Product, Orders
from django.http import JsonResponse
from django.contrib.auth.models import User
from django.db.models import Count
from django.db.models import Sum
from django.views.generic import ListView, CreateView, UpdateView, DetailView, View, TemplateView
from rest_framework.authtoken.models import Token
import os
from django.shortcuts import get_object_or_404, redirect, reverse
import json
from django.views import generic
def home_view(request):
products = models.Product.objects.all().order_by('-id')[:12]
categoria = models.Categoria.objects.all()
products_slider = models.Product.objects.all().order_by('-id')
productods_total = models.Product.objects.all()
proveedores = models.Proveedor.objects.all()
allProds = []
cats = {3}
for cat in cats:
prod = models.Product.objects.filter()
n = len(prod)
nSlides = n // 4 + ceil((n / 4) - (n // 4))
allProds.append([prod, range(1, nSlides), nSlides])
params = {'allProds': allProds}
if 'product_ids' in request.COOKIES:
product_ids = request.COOKIES['product_ids']
counter = product_ids.split('|')
product_count_in_cart = len(set(counter))
else:
product_count_in_cart = 0
if request.user.is_authenticated:
return HttpResponseRedirect('afterlogin')
return render(request, 'ecom/index.html',
{'products': products, 'proveedores': proveedores, 'categoria': categoria, 'allProds': allProds,
'products_slider': products_slider,
'product_count_in_cart': product_count_in_cart, 'productods_total': productods_total})
# para mostrar el botón de inicio de sesión para el administrador (por sumit)
def adminclick_view(request):
if request.user.is_authenticated:
return HttpResponseRedirect('afterlogin')
return HttpResponseRedirect('adminlogin')
def customer_signup_view(request):
customerForm = forms.CustomerForm()
mydict = {'customerForm': customerForm}
if request.method == 'POST':
usuario = request.POST.get("usuario")
contrasena = request.POST.get("contraseña")
nombres = request.POST.get("nombres")
apellidos = request.POST.get("apellidos")
customerForm = forms.CustomerForm(request.POST, request.FILES)
if customerForm.is_valid():
user = models.User(username=usuario, password=<PASSWORD>, first_name=nombres, last_name=apellidos)
user.set_password(<PASSWORD>)
user.save()
customer = customerForm.save(commit=False)
customer.user = user
customer.save()
my_customer_group = Group.objects.get_or_create(name='CUSTOMER')
my_customer_group[0].user_set.add(user)
t = Token.objects.create(user=user)
t.save()
return HttpResponseRedirect('customerlogin')
return render(request, 'ecom/customersignup.html', context=mydict)
def eleccion_registro_view(request):
if 'product_ids' in request.COOKIES:
product_ids = request.COOKIES['product_ids']
counter = product_ids.split('|')
product_count_in_cart = len(set(counter))
else:
product_count_in_cart = 0
return render(request, 'ecom/eleccion_registro.html', {"product_count_in_cart": product_count_in_cart})
# -----------para comprobar el usuario -> iscustomer
def is_customer(user):
return user.groups.filter(name='CUSTOMER').exists()
# -----------para comprobar el usuario -> isclienteempresarial
def is_customer_empresarial(user):
return user.groups.filter(name='CUSTOMEREMPRESARIAL').exists()
# -----------para comprobar el usuario -> isdistribuidor
def is_distribuidor(user):
return user.groups.filter(name='DISTRIBUIDOR').exists()
# ---------DESPUÉS DE INTRODUCIR LAS CREDENCIALES, COMPROBAMOS SI EL NOMBRE DE USUARIO Y LA CONTRASEÑA ES DE ADMIN, CLIENTE, DISTRIBUIDOR, CLIENTE EMPRESARIAL
def afterlogin_view(request):
if is_customer(request.user):
return redirect('customer-home')
elif is_distribuidor(request.user):
return redirect('distribuidor-home')
elif is_customer_empresarial(request.user):
return redirect('customer-empresarial-home')
else:
return redirect('admin-dashboard')
# ---------------------------------------------------------------------------------
# ------------------------ INICIO DE VISTAS RELACIONADAS CON ADMIN ------------------------------
# ---------------------------------------------------------------------------------
@login_required(login_url='adminlogin')
def admin_dashboard_view(request):
# para tarjetas en el tablero
customercount = models.Customer.objects.all().count()
productcount = models.Product.objects.all().count()
ordercount = models.Orders.objects.all().count()
categoria = Product.objects.values('categoria').annotate(Count('categoria'))
feedback = models.Feedback.objects.all().count()
feedbacks2 = models.Feedback.objects.all().filter(estado="Pendiente")
# para tablas de pedidos recientes
orders = models.Orders.objects.all()
ordered_products = []
ordered_bys = []
for order in orders:
ordered_product = models.Product.objects.all().filter(id=order.product.id)
ordered_by = models.Customer.objects.all().filter(id=order.customer.id)
ordered_products.append(ordered_product)
ordered_bys.append(ordered_by)
mydict = {
'customercount': customercount,
'productcount': productcount,
'ordercount': ordercount,
'distribuidorcount': models.Distribuidor.objects.all().count(),
'categoria': categoria,
'feedback': feedback,
'feedbacks2': feedbacks2,
'data': zip(ordered_products, ordered_bys, orders),
}
return render(request, 'ecom/admin_dashboard.html', context=mydict)
from django.db.models import Func
# Dashboard detalles --------------------------------------------------
@login_required(login_url='adminlogin')
def admin_dashboard_detalle(request):
camara = Product.objects.filter(categoria__categoria__icontains='camara').count()
pc = Product.objects.filter(categoria__categoria__icontains='PC').count()
impresora = Product.objects.filter(categoria__categoria__icontains='impresora').count()
clientes_registrados = User.objects.all().filter(groups__name='CUSTOMER').count()
# ----------------- USUARIO - GRUPOS -----------------------
total_usuarios = User.objects.all().count()
total_grupos = Group.objects.all().count()
r = Product.objects.all().aggregate(Sum('price'))
t = r.get("price__sum")
# y = ''.join(map(str, r))
# ----------------- ORDENES -----------------------
orders = models.Orders.objects.all().filter(status__contains='pendiente').order_by('-id')[:3]
ordered_products = []
ordered_bys = []
for order in orders:
ordered_product = models.Product.objects.all().filter(id=order.product.id)
ordered_by = models.Customer.objects.all().filter(id=order.customer.id)
ordered_products.append(ordered_product)
ordered_bys.append(ordered_by)
orders2 = models.Orders.objects.all().filter(status__contains='Orden Confirmada').order_by('-id')[:3]
ordered_products2 = []
ordered_bys2 = []
for order2 in orders2:
ordered_product2 = models.Product.objects.all().filter(id=order2.product.id)
ordered_by2 = models.Customer.objects.all().filter(id=order2.customer.id)
ordered_products2.append(ordered_product2)
ordered_bys2.append(ordered_by2)
mydict = {
'camara': camara,
'pc': pc,
'data': zip(ordered_products, ordered_bys, orders),
'data2': zip(ordered_products2, ordered_bys2, orders2),
'impresora': impresora,
'customercount': models.Customer.objects.all().count(),
'productcount': models.Product.objects.all().count(),
'ordercount': models.Orders.objects.all().count(),
'total_grupos': total_grupos,
'total_usuarios': total_usuarios,
'cliente_registrado': clientes_registrados,
'total_ordenes_pendientes': Orders.objects.filter(status__contains='Pendiente').count(),
'total_ordenes_confirmadas': Orders.objects.filter(status__contains='Orden Confirmada').count(),
'total_ordenes_delivery': Orders.objects.filter(status__contains='Fuera para entregar').count(),
'total_ordenes_entregada': Orders.objects.filter(status__contains='Entregada').count(),
'total_productos_precio': t,
'distribuidorcount': models.Distribuidor.objects.all().count(),
}
return render(request, 'ecom/dashboard/dashboard_detalle.html', context=mydict)
###########----------PAGOS-----------##############################
# administrador ver tabla de pagos
@login_required(login_url='adminlogin')
def admin_pagos_view(request):
pagos = models.Payment.objects.all().order_by('-id')
return render(request, 'ecom/admin/admin_pagos.html', {'pagos': pagos})
class admin_fecha_ordenes(View):
def get(self, request, *args, **kwargs):
return render(request, 'ecom/admin/admin_buscar_fecha_ordenes.html')
def post(self, request, *args, **kwargs):
fecha = request.POST.get("fecha")
fecha2 = request.POST.get("fecha2")
ordenes = Orders.objects.raw(
'select id AS id, order_date, address from ecom_orders where order_date between "' + fecha + '"and"' + fecha2 + '"')
return render(request, 'ecom/admin/admin_buscar_fecha_ordenes.html',
{"ordenes": ordenes})
# -------------------------------------------------------------------------
# administrador ver tabla de clientes
@login_required(login_url='adminlogin')
def view_customer_view(request):
customers = models.Customer.objects.all()
clientesempresarial = models.CustomerEmpresarial.objects.all()
return render(request, 'ecom/view_customer.html',
{'customers': customers, 'clientesempresarial': clientesempresarial})
# administrador eliminar cliente
@login_required(login_url='adminlogin')
def delete_customer_view(request, pk):
customer = models.Customer.objects.get(id=pk)
user = models.User.objects.get(id=customer.user_id)
user.delete()
customer.delete()
return redirect('view-customer')
# administrador eliminar cliente-empresarial
@login_required(login_url='adminlogin')
def delete_customerempresarial_view(request, pk):
customerempresarial = models.CustomerEmpresarial.objects.get(id=pk)
user = models.User.objects.get(id=customerempresarial.user_id)
user.delete()
customerempresarial.delete()
return redirect('view-customer')
# administrador actualizar cliente
@login_required(login_url='adminlogin')
def update_customer_view(request, pk):
customer = models.Customer.objects.get(id=pk)
user = models.User.objects.get(id=customer.user_id)
userForm = forms.CustomerUserForm(instance=user)
customerForm = forms.CustomerForm(request.FILES, instance=customer)
mydict = {'userForm': userForm, 'customerForm': customerForm}
if request.method == 'POST':
userForm = forms.CustomerUserForm(request.POST, instance=user)
customerForm = forms.CustomerForm(request.POST, instance=customer)
if userForm.is_valid() and customerForm.is_valid():
user = userForm.save()
user.set_password(<PASSWORD>)
user.save()
customerForm.save()
return redirect('view-customer')
return render(request, 'ecom/admin_update_customer.html', context=mydict)
# administrador actualizar cliente-empresarial
@login_required(login_url='adminlogin')
def update_customerempresarial_view(request, pk):
customerempresarial = models.CustomerEmpresarial.objects.get(id=pk)
user = models.User.objects.get(id=customerempresarial.user_id)
userempresarialForm = forms.CustomerEmpresarialUserForm(instance=user)
customerempresarialForm = forms.CustomerEmpresarialForm(request.FILES, instance=customerempresarial)
mydict = {'userempresarialForm': userempresarialForm, 'customerempresarialForm': customerempresarialForm}
if request.method == 'POST':
userempresarialForm = forms.CustomerEmpresarialUserForm(request.POST, instance=user)
customerempresarialForm = forms.CustomerEmpresarialForm(request.POST, instance=customerempresarial)
if userempresarialForm.is_valid() and customerempresarialForm.is_valid():
user = userempresarialForm.save()
user.set_password(<PASSWORD>)
user.save()
customerempresarialForm.save()
return redirect('view-customer')
return render(request, 'ecom/admin/admin_update_clienteempresarial.html', context=mydict)
def editProduct(request, pk):
prod = models.Product.objects.get(id=pk)
if request.method == "POST":
if len(request.FILES) != 0:
if len(prod.product_image) > 0:
os.remove(prod.product_image.path)
prod.product_image = request.FILES['product_image']
prod.save()
messages.success(request, "Product Updated Successfully")
return redirect('admin-products')
context = {'prod': prod}
return render(request, 'ecom/admin/admin_actualizar_producto_imagen.html', context)
def editCliente(request, pk):
prod = models.Customer.objects.get(id=pk)
if request.method == "POST":
if len(request.FILES) != 0:
if len(prod.profile_pic) > 0:
os.remove(prod.profile_pic.path)
prod.profile_pic = request.FILES['profile_pic']
prod.address = request.POST.get('address')
prod.movile = request.POST.get('mobile')
prod.save()
messages.success(request, "Product Updated Successfully")
return redirect('view-customer')
context = {'prod': prod}
return render(request, 'ecom/admin/admin_actualizar_cliente_imagen.html', context)
# administrador ver el producto
@login_required(login_url='adminlogin')
def admin_products_view(request):
products = models.Product.objects.all()
if products.filter(stock__lte=10):
messages.info(request, 'Cuidado el stock de algunos productos ha bajado!')
elif products.filter(stock__gte=15):
messages.success(request, 'Tu stock esta correcto!')
elif products.filter(stock__exact=0):
messages.error(request, 'El stock de tus productos se ha acabado :(')
else:
messages.warning(request, '¡¡Hey!!, uno de tus productos su stock se ha acabado!')
return render(request, 'ecom/admin_products.html', {'products': products})
# admin agregue producto haciendo clic en el botón flotante
@login_required(login_url='adminlogin')
def admin_add_product_view(request):
productForm = forms.ProductForm()
if request.method == 'POST':
productForm = forms.ProductForm(request.POST, request.FILES)
if productForm.is_valid():
productForm.save()
return HttpResponseRedirect('admin-products')
return render(request, 'ecom/admin_add_products.html', {'productForm': productForm})
@login_required(login_url='adminlogin')
def delete_product_view(request, pk):
product = models.Product.objects.get(id=pk)
product.delete()
return redirect('admin-products')
class update_product_view(UpdateView):
model = Product
fields = "__all__"
template_name = 'ecom/admin_update_product.html'
success_url = '/admin-products'
# --------------------------------------------
# ------------- ORDENES ----------------------
# --------------------------------------------
@login_required(login_url='adminlogin')
def admin_view_booking_view(request):
producto = models.Product.objects.all()
orders = models.Orders.objects.all().order_by('-id')
ordered_products = []
ultima_ordenes = []
ordered_bys = []
for order in orders:
ordered_product = models.Product.objects.all().filter(id=order.product.id).order_by('-id')
ordered_by = models.Customer.objects.all().filter(id=order.customer.id).order_by('-id')
ordered_products.append(ordered_product)
ordered_bys.append(ordered_by)
ultima_orden = models.Customer.objects.all().filter(id=order.customer.id).order_by('-id')
ultima_ordenes.append(ultima_orden)
return render(request, 'ecom/admin_view_booking.html',
{'data': zip(ordered_products, ordered_bys, orders), "producto": producto})
@login_required(login_url='adminlogin')
def delete_order_view(request, pk):
order = models.Orders.objects.get(id=pk)
order.delete()
return redirect('admin-view-booking')
# para cambiar el estado del pedido (pendiente, entregado ...)
@login_required(login_url='adminlogin')
def update_order_view(request, pk):
order = models.Orders.objects.get(id=pk)
orderForm = forms.OrderForm(instance=order)
if request.method == 'POST':
orderForm = forms.OrderForm(request.POST, instance=order)
if orderForm.is_valid():
orderForm.save()
return redirect('admin-view-booking')
return render(request, 'ecom/update_order.html', {'orderForm': orderForm})
# administrador ver los comentarios
@login_required(login_url='adminlogin')
def view_feedback_view(request):
feedbacks = models.Feedback.objects.all().filter(estado="Pendiente").order_by('-id')
return render(request, 'ecom/view_feedback.html', {'feedbacks': feedbacks})
@login_required(login_url='adminlogin')
def responder_feedback_view(request, pk):
responderFeedback = forms.ResponderFeedbackForm()
if request.method == 'POST':
responder = forms.ResponderFeedbackForm(request.POST)
if responder.is_valid():
enquiry_x = models.Feedback.objects.get(id=pk)
enquiry_x.descripcion_solucion = responder.cleaned_data['descripcion_solucion']
enquiry_x.estado = "Respondido"
enquiry_x.save()
else:
print("El formulario es invalido")
return HttpResponseRedirect('/view-feedback')
return render(request, 'ecom/admin/admin_responder_feedback.html', {'responderFeedback': responderFeedback})
# ---------------------------------------------------------------------------------
# ------------------------ INICIO DE VISTAS RELACIONADAS CON CLIENTES PÚBLICOS ----
# ---------------------------------------------------------------------------------
class detalle_producto_DetailView(DetailView):
# Detalle de producto
model = Product
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super().get_context_data(**kwargs)
# Add in a QuerySet of all the books
context['categoria'] = models.Categoria.objects.all()
return context
class detalle_order_DetailView(DetailView):
# Para ver el detalle de las ordenes
model = Orders
template_name = 'ecom/detalle-orden.html'
def search_view(request):
# whatever user write in search box we get in query
query = request.GET['query']
products = models.Product.objects.all().filter(name__icontains=query)
if 'product_ids' in request.COOKIES:
product_ids = request.COOKIES['product_ids']
counter = product_ids.split('|')
product_count_in_cart = len(set(counter))
else:
product_count_in_cart = 0
# La variable de palabra se mostrará en html cuando el usuario haga clic en el botón de búsqueda
word = "Resultados de la Busqueda :"
if request.user.is_authenticated:
return render(request, 'ecom/customer_home.html',
{'products': products, 'word': word, 'product_count_in_cart': product_count_in_cart})
return render(request, 'ecom/index.html',
{'products': products, 'word': word, 'product_count_in_cart': product_count_in_cart})
def search_view_categorias(request):
# cualquier usuario que escriba en el cuadro de búsqueda, obtenemos en la consulta
query = request.GET['query']
products = models.Product.objects.all().filter(categoria__categoria__icontains=query)
if 'product_ids' in request.COOKIES:
product_ids = request.COOKIES['product_ids']
counter = product_ids.split('|')
product_count_in_cart = len(set(counter))
else:
product_count_in_cart = 0
# La variable de palabra se mostrará en html cuando el usuario haga clic en el botón de búsqueda
word = "Resultados de la Busqueda :"
categoria = models.Categoria.objects.all()
proveedores = models.Proveedor.objects.all()
if request.user.is_authenticated:
return render(request, 'ecom/customer_home.html',
{'products': products, 'categoria': categoria, 'proveedores': proveedores, 'word': word,
'product_count_in_cart': product_count_in_cart})
return render(request, 'ecom/index.html',
{'products': products, 'categoria': categoria, 'proveedores': proveedores, 'word': word,
'product_count_in_cart': product_count_in_cart})
def search_view_proveedores(request):
# cualquier usuario que escriba en el cuadro de búsqueda, obtenemos en la consulta
query = request.GET['query']
products = models.Product.objects.all().filter(proveedor_id=query)
if 'product_ids' in request.COOKIES:
product_ids = request.COOKIES['product_ids']
counter = product_ids.split('|')
product_count_in_cart = len(set(counter))
else:
product_count_in_cart = 0
# La variable de palabra se mostrará en html cuando el usuario haga clic en el botón de búsqueda
word = "Resultados de la Busqueda :"
categoria = models.Categoria.objects.all()
proveedores = models.Proveedor.objects.all()
if request.user.is_authenticated:
return render(request, 'ecom/customer_home.html',
{'products': products, 'categoria': categoria, 'proveedores': proveedores, 'word': word,
'product_count_in_cart': product_count_in_cart})
return render(request, 'ecom/index.html',
{'products': products, 'categoria': categoria, 'proveedores': proveedores, 'word': word,
'product_count_in_cart': product_count_in_cart})
# cualquiera puede agregar un producto al carrito, sin necesidad de iniciar sesión
def add_to_cart_view(request, pk):
products = models.Product.objects.all().order_by('-id')[:12]
# para el contador de carritos, obteniendo ID de productos agregados por el cliente desde las cookies
if 'product_ids' in request.COOKIES:
product_ids = request.COOKIES['product_ids']
counter = product_ids.split('|')
product_count_in_cart = len(set(counter))
else:
product_count_in_cart = 1
response = render(request, 'ecom/index.html',
{'products': products, 'product_count_in_cart': product_count_in_cart})
# agregar ID de producto a las cookies
if 'product_ids' in request.COOKIES:
product_ids = request.COOKIES['product_ids']
if product_ids == "":
product_ids = str(pk)
else:
product_ids = product_ids + "|" + str(pk)
response.set_cookie('product_ids', product_ids)
else:
response.set_cookie('product_ids', pk)
product = models.Product.objects.get(id=pk)
messages.info(request, product.name + ' Agregado al carro exitosamente!')
return response
# para pagar el carrito
def cart_view(request):
# para contador de carritos
if 'product_ids' in request.COOKIES:
product_ids = request.COOKIES['product_ids']
counter = product_ids.split('|')
product_count_in_cart = len(set(counter))
else:
product_count_in_cart = 0
word = "No has agregado ningun producto"
# fetching product details from db whose id is present in cookie
products = None
total = 0
descuento = 0
word = "No has agregado ningun producto"
if 'product_ids' in request.COOKIES:
product_ids = request.COOKIES['product_ids']
if product_ids != "":
product_id_in_cart = product_ids.split('|')
products = models.Product.objects.all().filter(id__in=product_id_in_cart)
productos = models.Product.objects.all().filter(id__in=product_id_in_cart)
# for total price shown in cart
for p in products:
if p.estado == "Agotado":
total = 0
else:
descuento = descuento + p.product_precio_discuento
total = total + p.price - p.product_precio_discuento
else:
word = "No has agregado ningun producto"
else:
word = "No has agregado ningun producto"
return render(request, 'ecom/cart.html',
{'products': products, 'total': total, 'descuento': descuento, 'word': word,
'product_count_in_cart': product_count_in_cart})
def remove_from_cart_view(request, pk):
# for counter in cart
if 'product_ids' in request.COOKIES:
product_ids = request.COOKIES['product_ids']
counter = product_ids.split('|')
product_count_in_cart = len(set(counter))
else:
product_count_in_cart = 0
# removing product id from cookie
total = 0
descuento = 0
if 'product_ids' in request.COOKIES:
product_ids = request.COOKIES['product_ids']
product_id_in_cart = product_ids.split('|')
product_id_in_cart = list(set(product_id_in_cart))
product_id_in_cart.remove(str(pk))
products = models.Product.objects.all().filter(id__in=product_id_in_cart)
# for total price shown in cart after removing product
for p in products:
descuento = descuento + p.product_precio_discuento
total = total + p.price - descuento
# for update coookie value after removing product id in cart
value = ""
for i in range(len(product_id_in_cart)):
if i == 0:
value = value + product_id_in_cart[0]
else:
value = value + "|" + product_id_in_cart[i]
response = render(request, 'ecom/cart.html',
{'products': products, 'total': total, 'descuento': descuento,
'product_count_in_cart': product_count_in_cart})
if value == "":
response.delete_cookie('product_ids')
response.set_cookie('product_ids', value)
return response
def añadir_cantidad_cart_view(request, pk, price):
if 'product_ids' in request.COOKIES:
product_ids = request.COOKIES['product_ids']
counter = product_ids.split('|')
product_count_in_cart = len(set(counter))
else:
product_count_in_cart = 0
# removing product id from cookie
total = 0
descuento = 0
if 'product_ids' in request.COOKIES:
product_ids = request.COOKIES['product_ids']
product_id_in_cart = product_ids.split('|')
product_id_in_cart = list(set(product_id_in_cart))
product_id_in_cart.append(str(pk))
products = models.Product.objects.all().filter(id__in=product_id_in_cart)
# for total price shown in cart after añadir producto
for p in products:
if p.pk == pk:
if p.price == price:
p.price += price
total = total + p.price
else:
total = total + p.price
response = render(request, 'ecom/cart.html',
{'products': products, 'descuento': descuento, 'total': total,
'product_count_in_cart': product_count_in_cart})
response.set_cookie('price', price)
return response
def disminuir_cantidad_cart_view(request, pk, price):
# for counter in cart
if 'product_ids' in request.COOKIES:
product_ids = request.COOKIES['product_ids']
counter = product_ids.split('|')
product_count_in_cart = len(set(counter))
else:
product_count_in_cart = 0
# removing product id from cookie
total = 0
descuento = 0
if 'product_ids' in request.COOKIES:
product_ids = request.COOKIES['product_ids']
product_id_in_cart = product_ids.split('|')
product_id_in_cart = list(set(product_id_in_cart))
product_id_in_cart.remove(str(pk))
products = models.Product.objects.all().filter(id__in=product_id_in_cart)
# for total price shown in cart after removing product
for p in products:
descuento = descuento + p.product_precio_discuento
p.price = price - p.price
total = total + p.price - descuento
return render(request, 'ecom/cart.html',
{'products': products, 'descuento': descuento, 'total': total,
'product_count_in_cart': product_count_in_cart})
def send_feedback_view(request):
feedbackForm = forms.FeedbackForm()
if request.method == 'POST':
feedbackForm = forms.FeedbackForm(request.POST)
if feedbackForm.is_valid():
feedbackForm.save()
return render(request, 'ecom/feedback_sent.html')
return render(request, 'ecom/send_feedback.html', {'feedbackForm': feedbackForm})
# ---------------------------------------------------------------------------------
# ------------------------ INICIO DE VISTAS RELACIONADAS CON EL CLIENTE ------------------------------
# ---------------------------------------------------------------------------------
@login_required(login_url='customerlogin')
@user_passes_test(is_customer)
def customer_home_view(request):
products = models.Product.objects.all().order_by('-id')[:12]
categoria = models.Categoria.objects.all()
proveedores = models.Proveedor.objects.all()
products_slider = models.Product.objects.all().order_by('-id')
productods_total = models.Product.objects.all()
allProds = []
cats = {3}
for cat in cats:
prod = models.Product.objects.filter()
n = len(prod)
nSlides = n // 4 + ceil((n / 4) - (n // 4))
allProds.append([prod, range(1, nSlides), nSlides])
params = {'allProds': allProds}
if 'product_ids' in request.COOKIES:
product_ids = request.COOKIES['product_ids']
counter = product_ids.split('|')
product_count_in_cart = len(set(counter))
else:
product_count_in_cart = 0
return render(request, 'ecom/customer_home.html',
{'products': products, 'productods_total': productods_total, 'categoria': categoria,
'proveedores': proveedores, 'product_count_in_cart': product_count_in_cart, 'allProds': allProds,
'products_slider': products_slider})
# dirección de envío antes de realizar el pedido
@login_required(login_url='customerlogin')
def customer_address_view(request):
# esto es para verificar si el producto está presente en el carrito o no
# Si no hay ningún producto en el carrito, no mostraremos el formulario de dirección.
product_in_cart = False
if 'product_ids' in request.COOKIES:
product_ids = request.COOKIES['product_ids']
if product_ids != "":
product_in_cart = True
# para mostrador en carrito
if 'product_ids' in request.COOKIES:
product_ids = request.COOKIES['product_ids']
counter = product_ids.split('|')
product_count_in_cart = len(set(counter))
else:
product_count_in_cart = 0
CALLBACK_URL = request.build_absolute_uri(reverse("confirmar_payment"))
addressForm = forms.AddressForm()
if request.method == 'POST':
addressForm = forms.AddressForm(request.POST)
if addressForm.is_valid():
# aquí estamos tomando la dirección, el correo electrónico, el teléfono móvil al momento de realizar el pedido
# no lo tomamos de la tabla de cuentas del cliente porque
# estas cosas pueden ser cambios
email = addressForm.cleaned_data['Email']
mobile = addressForm.cleaned_data['Mobile']
address = addressForm.cleaned_data['Address']
dni = addressForm.cleaned_data['Dni']
distrito = addressForm.cleaned_data['Distrito']
localidad = addressForm.cleaned_data['localidad']
# para mostrar el precio total en la página de pago ... acceder a la identificación de las cookies y luego obtener el precio del producto de db
total = 0
if 'product_ids' in request.COOKIES:
product_ids = request.COOKIES['product_ids']
if product_ids != "":
product_id_in_cart = product_ids.split('|')
products = models.Product.objects.all().filter(id__in=product_id_in_cart)
for p in products:
total = total + p.price
response = render(request, 'ecom/payment.html', {'total': total, 'CALLBACK_URL': CALLBACK_URL})
response.set_cookie('email', email)
response.set_cookie('mobile', mobile)
response.set_cookie('address', address)
response.set_cookie('localidad', localidad)
response.set_cookie('dni', dni)
response.set_cookie('distrito', distrito)
return response
else:
messages.error(request, "el telefono no es correcto")
return render(request, 'ecom/customer_address.html',
{'addressForm': addressForm, 'product_in_cart': product_in_cart,
'product_count_in_cart': product_count_in_cart})
def confirmar_payment(request):
return render(request, 'ecom/payment_success.html')
class payment_success_view(generic.View):
def post(self, request, *args, **kwargs):
# Aquí haremos el pedido | después del pago exitoso
# buscaremos el móvil del cliente, la dirección, el correo electrónico
# Obtendremos la identificación del producto de las cookies y luego los detalles respectivos de la base de datos.
# luego crearemos objetos de pedido y los almacenaremos en db
# después de eso, eliminaremos las cookies porque después de realizar el pedido ... el carrito debe estar vacío
customer = models.Customer.objects.get(user_id=request.user.id)
products = None
kardexs = None
email = None
mobile = None
address = None
localidad = None
distrito = None
dni = None
if 'product_ids' in request.COOKIES:
product_ids = request.COOKIES['product_ids']
if product_ids != "":
product_id_in_cart = product_ids.split('|')
products = models.Product.objects.all().filter(id__in=product_id_in_cart)
kardexs = models.Product.objects.all().filter(id__in=product_id_in_cart)
# Aquí obtenemos una lista de productos que serán pedidos por un cliente a la vez.
# estas cosas se pueden cambiar así que accediendo en el momento de realizar el pedido ...
if 'email' in request.COOKIES:
email = request.COOKIES['email']
if 'mobile' in request.COOKIES:
mobile = request.COOKIES['mobile']
if 'address' in request.COOKIES:
address = request.COOKIES['address']
if 'localidad' in request.COOKIES:
localidad = request.COOKIES['localidad']
if 'distrito' in request.COOKIES:
distrito = request.COOKIES['distrito']
if 'dni' in request.COOKIES:
dni = request.COOKIES['dni']
# here we are placing number of orders as much there is a products
# suppose if we have 5 items in cart and we place order....so 5 rows will be created in orders table
# there will be lot of redundant data in orders table...but its become more complicated if we normalize it
# para guardar los productos seleccionados en la tabla orden
for product in products:
o = models.Orders(customer=customer, product=product, status='Pendiente', email=email,
mobile=mobile, address=address, delivery_zona=localidad,
distribuidor=models.Distribuidor.objects.get(id=1), distrito=distrito,
dni=dni)
o.save()
send_email(email, distrito, address, o.id)
# para guardar los productos seleccionados en la tabla Kardex
for product in kardexs:
k = models.Kardex(ingreso=0, descripcion="Reducción del producto", salida=1,
producto_id=product)
k.save()
product.stock -= 1
product.save()
body = json.loads(request.body)
payment = models.Payment(
order=o,
succesful=True,
raw_response=json.dumps(body),
amount=float(body["purchase_units"][0]["amount"]["value"]),
payment_method='Paypal'
)
payment.save()
# después de realizar el pedido, las cookies deben eliminarse
response = HttpResponseRedirect('confirmar_payment')
response.delete_cookie('product_ids')
response.delete_cookie('email')
response.delete_cookie('mobile')
response.delete_cookie('address')
response.delete_cookie('localidad')
response.delete_cookie('distrito')
response.delete_cookie('dni')
return response
@login_required(login_url='customerlogin')
@user_passes_test(is_customer)
def my_order_view(request):
categorias = models.Categoria.objects.all()
product_in_cart = False
if 'product_ids' in request.COOKIES:
product_ids = request.COOKIES['product_ids']
if product_ids != "":
product_in_cart = True
# para mostrador en carrito
if 'product_ids' in request.COOKIES:
product_ids = request.COOKIES['product_ids']
counter = product_ids.split('|')
product_count_in_cart = len(set(counter))
else:
product_count_in_cart = 0
customer = models.Customer.objects.get(user_id=request.user.id)
orders = models.Orders.objects.all().filter(customer_id=customer).order_by('-id')[:6]
ordered_products = []
for order in orders:
ordered_product = models.Product.objects.all().filter(id=order.product.id)
ordered_products.append(ordered_product)
return render(request, 'ecom/my_order.html',
{'data': zip(ordered_products, orders, ), "product_count_in_cart": product_count_in_cart,
"categoria": categorias})
# --------------para descargar e imprimir la factura del paciente al alta (pdf)
import io
from xhtml2pdf import pisa
from django.http import HttpResponse
def render_to_pdf(template_src, context_dict):
template = get_template(template_src)
html = template.render(context_dict)
result = io.BytesIO()
pdf = pisa.pisaDocument(io.BytesIO(html.encode("ISO-8859-1")), result)
if not pdf.err:
return HttpResponse(result.getvalue(), content_type='application/pdf')
return
@login_required(login_url='customerlogin')
@user_passes_test(is_customer)
def download_invoice_view(request, orderID, productID):
order = models.Orders.objects.get(id=orderID)
product = models.Product.objects.get(id=productID)
mydict = {
'orderDate': order.order_date,
'customerName': request.user,
'customerEmail': order.email,
'customerMobile': order.mobile,
'shipmentAddress': order.address,
'orderStatus': order.status,
'productName': product.name,
'productImage': product.product_image,
'productPrice': product.price,
'productDescription': product.description,
}
return render_to_pdf('ecom/download_invoice.html', mydict)
@login_required(login_url='customerlogin')
@user_passes_test(is_customer)
def my_profile_view(request):
customer = models.Customer.objects.get(user_id=request.user.id)
return render(request, 'ecom/my_profile.html', {'customer': customer})
@login_required(login_url='customerlogin')
@user_passes_test(is_customer)
def edit_profile_view(request):
customer = models.Customer.objects.get(user_id=request.user.id)
user = models.User.objects.get(id=customer.user_id)
userForm = forms.CustomerUserForm(instance=user)
customerForm = forms.CustomerForm(request.FILES, instance=customer)
mydict = {'userForm': userForm, 'customerForm': customerForm}
if request.method == 'POST':
userForm = forms.CustomerUserForm(request.POST, instance=user)
customerForm = forms.CustomerForm(request.POST, instance=customer)
if userForm.is_valid() and customerForm.is_valid():
user = userForm.save()
user.set_password(<PASSWORD>)
user.save()
customerForm.save()
return HttpResponseRedirect('my-profile')
return render(request, 'ecom/edit_profile.html', context=mydict)
# ---------------------------------------------------------------------------------
# ------------------------ ABOUT US AND CONTACT US VIEWS START --------------------
# ---------------------------------------------------------------------------------
def aboutus_view(request):
return render(request, 'ecom/aboutus.html')
def contactus_view(request):
sub = forms.ContactusForm()
if request.method == 'POST':
sub = forms.ContactusForm(request.POST)
if sub.is_valid():
email = sub.cleaned_data['Email']
name = sub.cleaned_data['Name']
message = sub.cleaned_data['Message']
send_mail(str(name) + ' || ' + str(email), message, settings.EMAIL_HOST_USER, settings.EMAIL_RECEIVING_USER,
fail_silently=False)
return render(request, 'ecom/contactussuccess.html')
return render(request, 'ecom/contactus.html', {'form': sub})
from django.conf import settings
from django.shortcuts import render
from django.template.loader import get_template
from django.core.mail import EmailMultiAlternatives
def send_email(mail, distrito, address, orden):
context = {'mail': mail, 'distrito': distrito, 'direccion': address, 'orden': orden}
template = get_template('ecom/correo.html')
content = template.render(context)
email = EmailMultiAlternatives(
'Mensaje de Empresa Ikergust',
'Venta realizada',
settings.EMAIL_HOST_USER,
[mail],
)
email.attach_alternative(content, 'text/html')
email.send()
class send_feedback_view(View):
def get(self, request, *args, **kwargs):
a = Product.objects.all()
product_in_cart = False
if 'product_ids' in request.COOKIES:
product_ids = request.COOKIES['product_ids']
if product_ids != "":
product_in_cart = True
# para mostrador en carrito
if 'product_ids' in request.COOKIES:
product_ids = request.COOKIES['product_ids']
counter = product_ids.split('|')
product_count_in_cart = len(set(counter))
else:
product_count_in_cart = 0
return render(request, "ecom/send_feedback.html", {"a": a, "product_count_in_cart": product_count_in_cart})
def post(self, request, *args, **kwargs):
usuario = request.POST.get("usuario")
name = request.POST.get("nombre")
tipoproblema = request.POST.get("tipoproblema")
feedback = request.POST.get("feedback")
user = models.User.objects.get(id=usuario)
feedback = models.Feedback(user=user, name=name, feedback=feedback,
descripcion_solucion="", estado="Pendiente", tipo=tipoproblema)
feedback.save()
return render(request, "ecom/send_feedback.html")
#####################################
# ADMIN KARDEX ####
#####################################
def kardex_opciones(request):
kardex = models.Kardex.objects.all()
return render(request, 'ecom/admin/admin_opcionKardex.html', {"kardex": kardex})
class buscar_producto_kardex(View):
def get(self, request, *args, **kwargs):
categoria = models.Categoria.objects.all()
return render(request, "ecom/admin/Buscar_producto_kardex.html", {"categoria": categoria})
def post(self, request, *args, **kwargs):
categorias = models.Categoria.objects.all()
productos = models.Product.objects.all()
categoria = request.POST.get("categoria")
cat = models.Categoria.objects.get(id=categoria)
produc = []
for p in productos:
produc = models.Product.objects.all().filter(categoria_id=cat)
return render(request, "ecom/admin/Buscar_producto_kardex.html", {"productos": produc, "categoria": categorias})
def Actualizar_producto_kardex_view(request, pk):
if request.method == "GET":
producto_id = models.Product.objects.get(id=pk)
return render(request, 'ecom/admin/admin_kardex_añadirstock.html', {'producto': producto_id})
elif request.method == "POST":
pk = request.POST.get("id_producto")
stock = request.POST.get("stock")
producto_id = models.Product.objects.get(id=pk)
producto_id.stock = int(stock) + producto_id.stock
producto_id.save()
kardex = models.Kardex(ingreso=stock, descripcion="Aumento del producto", salida=0, producto_id=producto_id)
kardex.save()
categoria = models.Categoria.objects.all()
return render(request, 'ecom/admin/Buscar_producto_kardex.html', {"categoria": categoria})
def kardex_view(request):
kardex = models.Kardex.objects.all().order_by('-id')[:10]
return render(request, 'ecom/admin/admin_kardex.html', {"kardex": kardex})
def admin_distribuidor(request):
ordenes = models.Orders.objects.all().filter(status="Pendiente")
return render(request, 'ecom/admin/admin_distribuidor.html', {"ordenes": ordenes})
def admin_distribuidor_agregar(request):
customerForm = forms.CustomerForm()
mydict = {'customerForm': customerForm}
if request.method == 'POST':
usuario = request.POST.get("usuario")
contrasena = request.POST.get("contraseña")
nombres = request.POST.get("nombres")
apellidos = request.POST.get("apellidos")
customerForm = forms.CustomerForm(request.POST, request.FILES)
if customerForm.is_valid():
user = models.User(username=usuario, password=<PASSWORD>, first_name=nombres, last_name=apellidos)
user.set_password(<PASSWORD>)
user.save()
customer = customerForm.save(commit=False)
customer.user = user
customer.save()
my_customer_group = Group.objects.get_or_create(name='DISTRIBUIDOR')
my_customer_group[0].user_set.add(user)
Token.objects.create(user=user)
return HttpResponseRedirect('customerlogin')
return render(request, 'ecom/customersignup.html', context=mydict)
#####################################
# ADMIN PAQUETES ####
#####################################
def Paquete_opciones(request):
return render(request, 'ecom/admin/admin_opcionpaquetes.html')
#####################################
# ERRORES ####
#####################################
class Error404(TemplateView):
template_name = 'ecom/error404.html'
``` |
{
"source": "jorgeluisrmx/labtools_ThermoCalcCLI",
"score": 3
} |
#### File: thermocalc/controllers/rawtocurve.py
```python
from __future__ import absolute_import
import os
import sys
import argparse
from thermocalc.models import TFurnance, Curve
def parse_sysargv():
descr = """genera un archivo .curve con la distribución térmica de un horno tubular
basado en las mediciones tomadas con un termopar; pares posicion temperatura."""
parser = argparse.ArgumentParser(description=descr)
parser.add_argument('ltubo', help="longitud del tubo en [mm]", type=float)
parser.add_argument('ltermopar', help="longitud del termopar en [mm]", type=float)
parser.add_argument('datafile', help="archivo de datos [x T]", type=str)
return parser.parse_args()
# ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~
def raw_to_curve():
# arguments parsing
args = parse_sysargv()
# creacion de modelo goemtrico del horno
horno = TFurnance(args.ltubo, args.ltermopar)
# creacion de la curva con los datos del archivo [x T]
curva = Curve()
try:
with open(args.datafile, 'r') as data:
for line in data:
x, y = line.replace('\n', '').split(' ')
curva.add_point(horno.x2R(float(x)), float(y))
# se agrega el ala simetrica a la curva
curva.add_simetry()
except:
print 'Archivo de datos "{}" NO VALIDO'.format(args.datafile)
print sys.exc_info()
return
# generacion del archivo .curve
file_name = args.datafile.split('.')[0] + '.curve'
with open(file_name, 'w') as target:
target.write('{}\n'.format(args.ltubo))
target.write('{}\n'.format(args.ltermopar))
for x, y in curva:
target.write('{},{}\n'.format(x, y))
print ' archivo {} generado con exito'.format(file_name)
``` |
{
"source": "jorgemarpa/kepler-apertures",
"score": 2
} |
#### File: src/kepler_apertures/EXBAMachine.py
```python
import os
import glob
import warnings
import datetime
import wget
import numpy as np
import pandas as pd
from scipy import sparse
import matplotlib.pyplot as plt
from matplotlib import colors
from matplotlib import patches
from tqdm.auto import tqdm
from astropy.coordinates import SkyCoord, match_coordinates_3d
from astropy.time import Time
from astropy import units
from astropy.io import fits
from astropy.table import Table
from astropy.timeseries import BoxLeastSquares
import lightkurve as lk
from .utils import get_gaia_sources
from . import PACKAGEDIR, DATAOUTDIR
from .version import __version__
class EXBAMachine(object):
"""
Class that works with Kepler's EXBA data, to identify observed sources using Gaia
catalogs, and create light curves from simple aperture photometry.
"""
def __init__(self, channel=53, quarter=5, magnitude_limit=20, gaia_dr=3):
"""
Parameters
----------
channel : int
Channel number of the EXBA image.
quarter : int
Quarter number of the EXBA image.
magnitude_limit : float
Limiting magnitude in g band used when querying Gaia catalogs,
default is 20 mag.
gaia_dr : int
Gaia data release, dafult is EDR3.
Attributes
----------
quarter : int
Channel number of the EXBA image.
channel : int
Quarter number of the EXBA image.
gaia_dr : int
Gaia data release, dafult is EDR3.
tpfs : lightkurve.TargetPixelFileCollection
Collection of 4 TPFs that form the full EXBA mask.
time : numpy.ndarray
Data array containing the time values.
cadences : numpy.ndarray
Data array containing the cadence numbers.
row : numpy.ndarray
Data array containing the valid pixel row numbers. Has shape of [n_pixels].
column : numpy.ndarray
Data array containing the valid pixel columns numbers.
Has shape of [n_pixels].
flux : numpy.ndarray
Data array containing the valid image fluxes. Has shape of
[n_times, n_pixels].
flux_err : numpy.ndarray
Data array containing the valid image flux errors. Has shape of
[n_times, n_pixels].
ra : numpy.ndarray
Data array containing the valid RA pixel values. Has shape of [n_pixels].
dec : numpy.ndarray
Data array containing the valid Dec pixel values. Has shape of [n_pixels].
dx : numpy.ndarray
Distance between pixel and source coordinates, units of pixels. Has shape
of [n_sources, n_pixels]
dy : numpy.ndarray
Distance between pixel and source coordinates, units of pixels. Has shape
of [n_sources, n_pixels]
r : numpy.ndarray
Radial distance between pixel and source coordinates (polar coordinates),
in units of pixels.
phi : numpy.ndarray
Angle between pixel and source coordinates (polar coordinates),
in units of radians
n_sources : int
Number of sources in Gaia catalog observed in the EXBA mask.
n_rows : int
Number rows in the EXBA image.
n_columns : int
Number columns in the EXBA image.
aperture_mask : numpy.ndarray
Data array with the source aperture masks. Has shape of
[n_sources, n_pixels]
FLFRCSAP : numpy.array
Data array with the completeness metric for every source computed from
the photometric aperture.
CROWDSAP : numpy.array
Data array with the contamination metric for every source computed from
the photometric aperture.
"""
self.quarter = quarter
self.channel = channel
self.gaia_dr = gaia_dr
# load local TPFs files
tpfs_paths = np.sort(
glob.glob(
"%s/data/fits/exba/q%i/ch%02i/*_lpd-targ.fits.gz"
% (DATAOUTDIR, quarter, channel)
)
)
if len(tpfs_paths) == 0:
print("Downloading TPFs for EXBA mask...")
self.download_exba(channel=channel, quarter=quarter)
tpfs_paths = np.sort(
glob.glob(
"%s/data/fits/exba/q%i/ch%02i/*_lpd-targ.fits.gz"
% (DATAOUTDIR, quarter, channel)
)
)
self.tpfs_files = tpfs_paths
tpfs = lk.TargetPixelFileCollection(
[lk.KeplerTargetPixelFile(f) for f in tpfs_paths]
)
self.tpfs = tpfs
self.wcs = tpfs[0].wcs
print(self.tpfs)
# check for same channels and quarter
channels = [tpf.get_header()["CHANNEL"] for tpf in tpfs]
quarters = [tpf.get_header()["QUARTER"] for tpf in tpfs]
self.hdr = tpfs[0].get_header()
if len(set(channels)) != 1 and list(set(channels)) != [channel]:
raise ValueError(
"All TPFs must be from the same channel %s"
% ",".join([str(k) for k in channels])
)
if len(set(quarters)) != 1 and list(set(quarters)) != [quarter]:
raise ValueError(
"All TPFs must be from the same quarter %s"
% ",".join([str(k) for k in quarters])
)
# stich channel's strips and parse TPFs
time, cadences, row, col, flux, flux_err, unw = self._parse_TPFs_channel(tpfs)
self.time, self.cadences, flux, flux_err = self._preprocess(
time, cadences, flux, flux_err
)
self.row_2d, self.column_2d, self.flux_2d, self.flux_err_2d = (
row.copy(),
col.copy(),
flux.copy(),
flux_err.copy(),
)
self.row, self.column, self.flux, self.flux_err, self.unw = (
row.ravel(),
col.ravel(),
flux.reshape(flux.shape[0], np.product(flux.shape[1:])),
flux_err.reshape(flux_err.shape[0], np.product(flux_err.shape[1:])),
unw.ravel(),
)
self.ra, self.dec = self._convert_to_wcs(tpfs, self.row, self.column)
# search Gaia sources in the sky
sources = self._do_query(
self.ra,
self.dec,
epoch=self.time[0],
magnitude_limit=magnitude_limit,
load=True,
)
sources["col"], sources["row"] = self.wcs.wcs_world2pix(
sources.ra, sources.dec, 0.0
)
sources["col"] += tpfs[0].column
sources["row"] += tpfs[0].row
self.sources, self.bad_sources = self._clean_source_list(
sources, self.ra, self.dec
)
self.dx, self.dy = np.asarray(
[
np.vstack(
[
self.column - self.sources["col"][idx],
self.row - self.sources["row"][idx],
]
)
for idx in range(len(self.sources))
]
).transpose([1, 0, 2])
self.r = np.hypot(self.dx, self.dy)
self.phi = np.arctan2(self.dy, self.dx)
self.n_sources = self.sources.shape[0]
self.n_rows = self.flux_2d.shape[1]
self.n_columns = self.flux_2d.shape[2]
self.aperture_mask = np.zeros_like(self.dx).astype(bool)
self.FLFRCSAP = np.zeros(self.sources.shape[0])
self.CROWDSAP = np.zeros(self.sources.shape[0])
self.cut = np.zeros(self.sources.shape[0])
def __repr__(self):
q_result = ",".join([str(k) for k in list([self.quarter])])
return "EXBA Patch:\n\t Channel %i, Quarter %s, Gaia DR%i sources %i" % (
self.channel,
q_result,
self.gaia_dr,
len(self.sources),
)
@staticmethod
def download_exba(channel=1, quarter=5):
"""
Download EXBA fits file to a dedicated quarter/channel directory
It uses a exba_tpfs_info.csv to map the quarter/channel to the corresponding
file names in MAST archive.
Parameters
----------
channel : int
Number of channel to be download, valid numbers are bwtween 1 and 84.
quarter : int
Number of quarter to be download, valid numbers are bwtween 1 and 17.
"""
url = "https://archive.stsci.edu/missions/kepler/target_pixel_files/1000"
map = pd.read_csv("%s/data/exba_tpfs_info.csv" % (PACKAGEDIR), index_col=0)
file_names = map.query("channel == %i and quarter == %i" % (channel, quarter))
if not os.path.isdir(
"%s/data/fits/exba/q%i/ch%02i" % (DATAOUTDIR, quarter, channel)
):
os.makedirs("%s/data/fits/exba/q%i/ch%02i" % (DATAOUTDIR, quarter, channel))
for i, row in file_names.iterrows():
name = row["file_name"]
kid = row["kepler_id"].split(" ")[-1]
out = "%s/data/fits/exba/q%i/ch%02i/%s" % (
DATAOUTDIR,
quarter,
channel,
name,
)
print("%s/%s/%s" % (url, kid, name))
wget.download("%s/%s/%s" % (url, kid, name), out=out)
return
def _parse_TPFs_channel(self, tpfs):
"""
Function to parse the TPFs containing the EXBA masks (4 per channel) and
tile them.
Parameters
----------
tpfs : list of TPFs or TargetPixelFileCollection
A list of TPFs that contain the 4 EXBA mask per channel.
Returns
-------
times : numpy.ndarray
Data array containing the time values.
cadences : numpy.ndarray
Data array containing the cadence numbers.
row : numpy.ndarray
Data array containing the pixel row numbers.
col : numpy.ndarray
Data array containing the pixel column numbers.
flux : numpy.ndarray
Data array containing the image flux.
flux_err : numpy.ndarray
Data array containing the image flux errors.
"""
cadences = np.array([tpf.cadenceno for tpf in tpfs])
# check if all TPFs has same cadences
if not np.all(cadences[1:, :] - cadences[-1:, :] == 0):
raise ValueError("All TPFs must have same time basis")
# make sure tpfs are sorted by colum direction
tpfs = lk.TargetPixelFileCollection(
[tpfs[i] for i in np.argsort([tpf.column for tpf in tpfs])]
)
# extract times
times = tpfs[0].time.jd
# extract row,column mesh grid
col, row = np.hstack(
[
np.mgrid[
tpf.column : tpf.column + tpf.shape[2],
tpf.row : tpf.row + tpf.shape[1],
]
for tpf in tpfs
]
)
# extract flux vales
flux = np.hstack([tpf.flux.transpose(1, 2, 0) for tpf in tpfs]).transpose(
2, 0, 1
)
flux_err = np.hstack(
[tpf.flux_err.transpose(1, 2, 0) for tpf in tpfs]
).transpose(2, 0, 1)
# bookkeeping of tpf-pixel
unw = np.hstack(
[np.ones(tpf.shape[1:], dtype=np.int) * i for i, tpf in enumerate(tpfs)]
)
return times, cadences[0], row.T, col.T, flux, flux_err, unw
def _preprocess(self, times, cadences, flux, flux_err):
"""
Function to clean pixels with nan values and bad cadences. It Returns the same
input arrays but cleaned.
Parameters
----------
times : numpy.ndarray
Data array with the time values.
cadences : numpy.ndarray
Data array with the cadence numbers.
flux : numpy.ndarray
Data array with the image flux.
flux_err : numpy.ndarray
Data array with the image flux errors.
Returns
-------
times : numpy.ndarray
Data array with the time values.
cadences : numpy.ndarray
Data array with the cadence numbers.
flux : numpy.ndarray
Data array with the image flux.
flux_err : numpy.ndarray
Data array with the image flux errors.
"""
# Remove cadences with nan flux
nan_cadences = np.array([np.isnan(im).sum() == 0 for im in flux])
times = times[nan_cadences]
cadences = cadences[nan_cadences]
flux = flux[nan_cadences]
flux_err = flux_err[nan_cadences]
return times, cadences, flux, flux_err
def _convert_to_wcs(self, tpfs, row, col):
"""
Function to convert pixel number to RA and Dec values using the WCS solution
embedded in the TPFs.
Parameters
----------
tpfs : list of TPFs or TargetPixelFileCollection
A list of TPFs that contain the EXBA tiles.
row : numpy.ndarray
Data aray with the row pixel values to be converted to RA & Dec.
col : numpy.ndarray
Data aray with the column pixel values to be converted to RA & Dec.
Returns
-------
ra : numpy.ndarray
Right Ascension coordinate obtained from the WCS solution.
dec : numpy.ndarray
Declination coordinate obtained from the WCS solution.
"""
ra, dec = self.wcs.wcs_pix2world(
(col - tpfs[0].column), (row - tpfs[0].row), 0.0
)
return ra, dec
def _do_query(self, ra, dec, epoch=2020, magnitude_limit=20, load=True):
"""
Calculate ra, dec coordinates and search radius to query Gaia catalog.
Parameters
----------
ra : numpy.ndarray
Right ascension coordinate of pixels to do Gaia search
dec : numpy.ndarray
Declination coordinate of pixels to do Gaia search
epoch : float
Epoch of obervation in Julian Days of ra, dec coordinates,
will be used to propagate proper motions in Gaia.
magnitude_limit : int
Limiting magnitued for query
load : boolean
Load or not the saved query. Set to False if want to force to run new
queries.
Returns
-------
sources : pandas.DataFrame
Catalog with query result
"""
columns = [
"designation",
"ra",
"ra_error",
"dec",
"dec_error",
"pmra",
"pmdec",
"parallax",
"parallax_error",
"phot_g_n_obs",
"phot_g_mean_flux",
"phot_g_mean_flux_error",
"phot_g_mean_mag",
"phot_bp_n_obs",
"phot_bp_mean_flux",
"phot_bp_mean_flux_error",
"phot_bp_mean_mag",
"phot_rp_n_obs",
"phot_rp_mean_flux",
"phot_rp_mean_flux_error",
"phot_rp_mean_mag",
]
file_name = "%s/data/catalogs/exba/%i/channel_%02i_gaiadr%s_xmatch.csv" % (
DATAOUTDIR,
self.quarter,
self.channel,
str(self.gaia_dr),
)
if os.path.isfile(file_name) and load:
print("Loading query from file...")
print(file_name)
sources = pd.read_csv(file_name)
sources = sources.loc[:, columns]
else:
# find the max circle per TPF that contain all pixel data to query Gaia
ra_q = ra.mean()
dec_q = dec.mean()
rad_q = np.hypot(ra - ra_q, dec - dec_q).max() + 10 / 3600
# query Gaia with epoch propagation
sources = get_gaia_sources(
tuple([ra_q]),
tuple([dec_q]),
tuple([rad_q]),
magnitude_limit=magnitude_limit,
epoch=Time(epoch, format="jd").jyear,
dr=self.gaia_dr,
)
sources = sources.loc[:, columns]
if not os.path.isdir(
"%s/data/catalogs/exba/%i" % (DATAOUTDIR, self.quarter)
):
os.makedirs("%s/data/catalogs/exba/%i" % (DATAOUTDIR, self.quarter))
sources.to_csv(file_name)
return sources
def _clean_source_list(self, sources, ra, dec):
"""
Function to clean surces from the catalog removing sources outside the image
coverage (allowing for sources up to 4" outside the mask), and to remove
blended sources (within 2").
Parameters
----------
sources : pandas.DataFrame
Catalog with sources to be removed
ra : numpy.ndarray
Data array with values of RA for every pixel in the image.
dec : numpy.ndarray
Data array with values of Dec for every pixel in the image.
Returns
-------
sources : pandas.DataFrame
Clean catalog
"""
# find sources on the image
inside = (
(sources.row > self.row.min() - 1.0)
& (sources.row < self.row.max() + 1.0)
& (sources.col > self.column.min() - 1.0)
& (sources.col < self.column.max() + 1.0)
)
# find well separated sources
s_coords = SkyCoord(sources.ra, sources.dec, unit=("deg"))
midx, mdist = match_coordinates_3d(s_coords, s_coords, nthneighbor=2)[:2]
# remove sources closer than 4" = 1 pix
closest = mdist.arcsec < 2.0
blocs = np.vstack([midx[closest], np.where(closest)[0]])
bmags = np.vstack(
[
sources.phot_g_mean_mag[midx[closest]],
sources.phot_g_mean_mag[np.where(closest)[0]],
]
)
faintest = [blocs[idx][s] for s, idx in enumerate(np.argmax(bmags, axis=0))]
unresolved = np.in1d(np.arange(len(sources)), faintest)
del s_coords, midx, mdist, closest, blocs, bmags
# Keep track of sources that we removed
sources.loc[:, "clean_flag"] = 0
sources.loc[~inside, "clean_flag"] += 2 ** 0 # outside TPF
sources.loc[unresolved, "clean_flag"] += 2 ** 1 # close contaminant
# combine 2 source masks
clean = sources.clean_flag == 0
removed_sources = sources[~clean].reset_index(drop=True)
sources = sources[clean].reset_index(drop=True)
return sources, removed_sources
def do_photometry(self, aperture_mask):
"""
Function to do aperture photometry on a set of sources. It creates/update class
attributes that contains the SAP flux, errors, and aperture masks.
Parameters
----------
aperture_mask : numpy.ndarray
Boolean mask of shape [n_sources, n_pixels] that has the aperture mask
to be used to compute photometry for a set of sources.
"""
sap = np.zeros((self.sources.shape[0], self.flux.shape[0]))
sap_e = np.zeros((self.sources.shape[0], self.flux.shape[0]))
for sidx in tqdm(range(len(aperture_mask)), desc="SAP", leave=True):
sap[sidx, :] = self.flux[:, aperture_mask[sidx]].sum(axis=1)
sap_e[sidx, :] = (
np.power(self.flux_err[:, aperture_mask[sidx]].value, 2).sum(axis=1)
** 0.5
)
self.sap_flux = sap
self.sap_flux_err = sap_e
self.aperture_mask = aperture_mask
self.aperture_mask_2d = aperture_mask.reshape(
self.n_sources, self.n_rows, self.n_columns
)
return
def create_lcs(self, aperture_mask):
"""
Funciton to create `lightkurve.LightCurve` with the light curves using aperture
photometry. It creates a class attribute `self.lcs` that is a
`lk.LightCurveCollection` with the light curves of all input sources.
Parameters
----------
aperture_mask : numpy.ndarray
Boolean mask of shape [n_sources, n_pixels] that has the aperture mask
to be used to compute photometry for a set of sources.
"""
self.do_photometry(aperture_mask)
lcs = []
for idx, s in self.sources.iterrows():
tile = int((s.col - self.tpfs[0].column) / 9)
meta = {
"ORIGIN": "EXBAMachine",
# "APERTURE_MASK": self.aperture_mask_2d[idx],
"VERSION": __version__,
"LABEL": s.designation,
"TARGETID": int(s.designation.split(" ")[-1]),
"MISSION": "Kepler",
"INSTRUME": "Kepler Photometer",
"OBSMODE": "long cadence",
"SEASON": self.tpfs[tile].get_header()["SEASON"],
"EQUINOX": 2000,
"RA": s.ra,
"DEC": s.dec,
"PMRA": s.pmra / 1000 if np.isfinite(s.pmra) else None,
"PMDEC": s.pmdec / 1000 if np.isfinite(s.pmdec) else None,
"PARALLAX": s.parallax if np.isfinite(s.parallax) else None,
"GMAG": s.phot_g_mean_mag if np.isfinite(s.phot_g_mean_mag) else None,
"RPMAG": s.phot_rp_mean_mag
if np.isfinite(s.phot_rp_mean_mag)
else None,
"BPMAG": s.phot_bp_mean_mag
if np.isfinite(s.phot_bp_mean_mag)
else None,
"CHANNEL": self.channel,
"MODULE": self.hdr["MODULE"],
"OUTPUT": self.hdr["OUTPUT"],
"QUARTER": self.quarter,
"CAMPAIGN": "EXBA",
"ROW": np.round(s.row, decimals=4),
"COLUMN": np.round(s.col, decimals=4),
"FLFRCSAP": np.round(self.FLFRCSAP[idx], decimals=6),
"CROWDSAP": np.round(self.CROWDSAP[idx], decimals=6),
"PERCENT": self.cut[idx],
}
lc = lk.LightCurve(
time=self.time * units.d,
flux=self.sap_flux[idx] * (units.electron / units.second),
flux_err=self.sap_flux_err[idx] * (units.electron / units.second),
meta=meta,
# time_format="jd",
# flux_unit="electron/s",
cadenceno=self.cadences,
)
lcs.append(lc)
self.lcs = lk.LightCurveCollection(lcs)
return
def apply_CBV(self, do_under=False, plot=True):
"""
Applies CBV corrections to all the light curves in `self.lcs`. It optimizes
the alpha parameter for each correction, if optimization fails, uses the alpha
value calculated for previous light curve.
It creates class attributes to access the CBV-corrected light curves, and
under/over fitting metrics.
Parameters
----------
do_under : boolean
Compute or not the under-fitting metric for the CBV correction.
plot : boolean
Plot or not CBVcorrector diagnostic figures.
"""
if True:
warnings.filterwarnings("ignore", category=RuntimeWarning)
warnings.filterwarnings("ignore", category=lk.LightkurveWarning)
# Select which CBVs to use in the correction
cbv_type = ["SingleScale"]
# Select which CBV indices to use
# Use the first 8 SingleScale and all Spike CBVS
cbv_indices = [np.arange(1, 9)]
over_fit_m = []
under_fit_m = []
corrected_lcs = []
alpha = 1e-1
self.alpha = np.zeros(len(self.lcs))
# what if I optimize alpha for the first lc, then use that one for the rest?
for i in tqdm(range(len(self.lcs)), desc="Applying CBVs to LCs", leave=True):
lc = self.lcs[i][self.lcs[i].flux_err > 0].remove_outliers(
sigma_upper=5, sigma_lower=1e20
)
cbvcor = lk.correctors.CBVCorrector(lc, interpolate_cbvs=False)
if i % 1 == 0:
print("Optimizing alpha")
try:
cbvcor.correct(
cbv_type=cbv_type,
cbv_indices=cbv_indices,
alpha_bounds=[1e-2, 1e2],
target_over_score=0.9,
target_under_score=0.8,
)
alpha = cbvcor.alpha
if plot:
cbvcor.diagnose()
cbvcor.goodness_metric_scan_plot(
cbv_type=cbv_type, cbv_indices=cbv_indices
)
plt.show()
except (ValueError, TimeoutError):
print(
"Alpha optimization failed, using previous value %.4f" % alpha
)
self.alpha[i] = alpha
cbvcor.correct_gaussian_prior(
cbv_type=cbv_type, cbv_indices=cbv_indices, alpha=alpha
)
over_fit_m.append(cbvcor.over_fitting_metric())
if do_under:
under_fit_m.append(cbvcor.under_fitting_metric())
corrected_lcs.append(cbvcor.corrected_lc)
self.corrected_lcs = lk.LightCurveCollection(corrected_lcs)
self.over_fitting_metrics = np.array(over_fit_m)
if do_under:
self.under_fitting_metrics = np.array(under_fit_m)
return
def image_to_fits(self, path=None, overwrite=False):
"""
Creates a FITS file that contains the time-average imagege of the EXBA mask
in a ImageHDU, and the source catalog in a BinTableHDU.
Parameters
----------
path : string
Directory path where to save the FITS file.
overwrite : bool
Overwrite the output file.
Returns
-------
hdu : ImageHDU
An Image header unit containing the EXBA flux.
"""
primary_hdu = fits.PrimaryHDU(data=None, header=self.tpfs[0].get_header())
phdr = primary_hdu.header
phdr.set("OBJECT", "EXBA mask", "type of image")
phdr.set("RA_OBJ", self.ra.mean())
phdr.set("DEC_OBJ", self.dec.mean())
phdr.set("ROW_0", self.row.min(), "reference pixel value, origin top left")
phdr.set("COL_0", self.column.min(), "reference pixel value, origin top left")
image_hdu = fits.ImageHDU(data=self.flux_2d.mean(axis=0).value)
image_hdu.header["TTYPE1"] = "FLUX"
image_hdu.header["TFORM1"] = "E"
image_hdu.header["TUNIT1"] = "e-/s"
image_hdu.header["DATE"] = (datetime.datetime.now().strftime("%Y-%m-%d"),)
table_hdu = fits.BinTableHDU(data=Table.from_pandas(self.sources))
table_hdu.header["GAIA_DR"] = self.gaia_dr
hdu = fits.HDUList([primary_hdu, image_hdu, table_hdu])
if path is not None:
hdu.writeto(path, overwrite=overwrite, checksum=True)
else:
return hdu
def lcs_to_fits(self, path=None):
"""
Save all the light curves to fits files...
"""
hdu_list = []
for i, lc in enumerate(self.lcs):
# lc.quality = 0
# lc.centroid_col = lc.column
# lc.centroid_row = lc.row
hdu = lc.to_fits(**lc.meta)
hdu[1].header["FLFRCSAP"] = lc.FLFRCSAP
hdu[1].header["CROWDSAP"] = lc.CROWDSAP
hdu = lk.lightcurve._make_aperture_extension(hdu, self.aperture_mask_2d[i])
hdu[2].header["FLFRCSAP"] = lc.FLFRCSAP
hdu[2].header["CROWDSAP"] = lc.CROWDSAP
del hdu[0].header["FLFRCSAP"], hdu[0].header["CROWDSAP"]
if path is not None:
name = "%s/lc_%s.fits" % (path, lc.label.replace(" ", "_"))
hdu.writeto(name, overwrite=overwrite, checksum=True)
hdu_list.append(hdu)
return hdu_list
def plot_image(self, frame=0, sources=True, ax=None):
"""
Function to plot the full EXBA image and the Gaia Sources.
Parameters
----------
frame : int
Frame number. The default is 0, i.e. the first frame.
sources : boolean
Whether to overplot or not the source catalog
ax : matplotlib.axes
Matlotlib axis can be provided, if not one will be created and returned
Returns
-------
ax : matplotlib.axes
Matlotlib axis with the figure
"""
if ax is None:
fig, ax = plt.subplots(1, figsize=(5, 7))
ax = plt.subplot(projection=self.wcs)
ax.set_title("EXBA mask Quarter %i Channel %i" % (self.quarter, self.channel))
pc = ax.pcolormesh(
self.column_2d,
self.row_2d,
self.flux_2d[frame],
shading="auto",
cmap="viridis",
norm=colors.SymLogNorm(linthresh=100, vmin=0, vmax=1000, base=10),
rasterized=True,
)
if sources:
ax.scatter(
self.sources.col,
self.sources.row,
s=20,
facecolors="none",
marker="o",
edgecolors="r",
linewidth=1.5,
label="Gaia Sources",
)
ax.set_xlabel("R.A. [hh:mm:ss]", fontsize=14)
ax.set_ylabel("Dec [deg]", fontsize=14)
cbar = fig.colorbar(pc, fraction=0.1, pad=0.04)
cbar.set_label(label=r"Flux ($e^{-}s^{-1}$)", size=14)
ax.set_aspect("equal", adjustable="box")
return ax
def plot_stamp(self, source_idx=0, aperture_mask=False, ax=None):
"""
Creates a figure with the "stamp" image of a given source and its aperture
mask.
Parameters
----------
source_idx : int
Index of the source in `self.sources` catalog to be plotted.
aperture_mask : boolean
Plot or not the aperutre mask.
ax : matplotlib.axes
Matlotlib axis can be provided, if not one will be created and returned.
Returns
-------
ax : matplotlib.axes
Matlotlib axis with the figure
"""
if isinstance(source_idx, str):
idx = np.where(self.sources.designation == source_idx)[0][0]
else:
idx = source_idx
if ax is None:
fig, ax = plt.subplots(1)
pc = ax.pcolor(
self.flux_2d[0],
shading="auto",
norm=colors.SymLogNorm(linthresh=50, vmin=3, vmax=5000, base=10),
)
ax.scatter(
self.sources.col - self.column.min() + 0.5,
self.sources.row - self.row.min() + 0.5,
s=20,
facecolors="y",
marker="o",
edgecolors="k",
)
ax.scatter(
self.sources.col.iloc[idx] - self.column.min() + 0.5,
self.sources.row.iloc[idx] - self.row.min() + 0.5,
s=25,
facecolors="r",
marker="o",
edgecolors="r",
)
ax.set_xlabel("Pixels")
ax.set_ylabel("Pixels")
plt.colorbar(pc, label=r"Flux ($e^{-}s^{-1}$)", ax=ax)
ax.set_aspect("equal", adjustable="box")
if aperture_mask:
for i in range(self.n_rows):
for j in range(self.n_columns):
if self.aperture_mask_2d[idx, i, j]:
rect = patches.Rectangle(
xy=(j, i),
width=1,
height=1,
color="red",
fill=False,
hatch="",
lw=1.5,
)
ax.add_patch(rect)
zoom = np.argwhere(self.aperture_mask_2d[idx] == True)
ax.set_ylim(
np.maximum(0, zoom[0, 0] - 5),
np.minimum(zoom[-1, 0] + 5, self.n_rows),
)
ax.set_xlim(
np.maximum(0, zoom[0, -1] - 5),
np.minimum(zoom[-1, -1] + 5, self.n_columns),
)
ax.set_title(
"FLFRCSAP %.2f\nCROWDSAP %.2f"
% (self.FLFRCSAP[idx], self.CROWDSAP[idx]),
bbox=dict(facecolor="white", alpha=1),
)
return ax
def plot_lightcurve(self, source_idx=0, ax=None):
"""
Creates a figure with the light curve of a given source.
mask.
Parameters
----------
source_idx : int
Index of the source in `self.sources` catalog to be plotted.
ax : matplotlib.axes
Matlotlib axis can be provided, if not one will be created and returned.
Returns
-------
ax : matplotlib.axes
Matlotlib axis with the figure
"""
if ax is None:
fig, ax = plt.subplots(1, figsize=(9, 3))
if isinstance(source_idx, str):
s = np.where(self.sources.designation == source_idx)[0][0]
else:
s = source_idx
ax.set_title(
"Channel %i Quarter %i Source %s (%i)"
% (self.channel, self.quarter, self.lcs[s].label, s)
)
if hasattr(self, "flatten_lcs"):
self.lcs[s].normalize().plot(label="raw", ax=ax, c="k", alpha=0.4)
self.flatten_lcs[s].plot(label="flatten", ax=ax, c="k", offset=-0.02)
if hasattr(self, "corrected_lcs"):
self.corrected_lcs[s].normalize().plot(
label="CBV", ax=ax, c="tab:blue", offset=+0.04
)
else:
self.lcs[s].plot(label="raw", ax=ax, c="k", alpha=0.4)
if hasattr(self, "corrected_lcs"):
self.corrected_lcs[s].plot(
label="CBV", ax=ax, c="tab:blue", offset=-0.02
)
return ax
```
#### File: src/kepler_apertures/KeplerPRF.py
```python
import os
import warnings
import numpy as np
import pandas as pd
from scipy import sparse
from scipy.optimize import minimize_scalar
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from matplotlib import patches
from astropy.io import fits
from . import PACKAGEDIR, DATAOUTDIR
from .utils import _make_A_polar
warnings.filterwarnings("ignore", category=RuntimeWarning)
warnings.filterwarnings("ignore", category=sparse.SparseEfficiencyWarning)
class KeplerPRF(object):
"""
Class to load PRF models computed from FFI, to create photometric apertures
"""
def __init__(
self,
prf_ws: np.array,
n_r_knots: int = 5,
n_phi_knots: int = 15,
rmin: float = 0.25,
rmax: float = 5,
):
"""
A KeplerPRF object is build by providing the hyperparameters of the spline
model, and the weights of each basis spline. The hyperparameters allow to
reconstruct the same basis splines while the weights are used at evaluation of
the model in new data.
Parameters
__________
prf_ws : numpy.ndarray
Weights corresponding to each basis of the design matrix.
rmin : float
The minimum radius for the PRF model to be fit.
rmax : float
The maximum radius for the PRF model to be fit.
n_r_knots : int
Number of radial knots in the spline model.
n_phi_knots : int
Number of azimuthal knots in the spline model.
Attributes
----------
prf_w : numpy.ndarray
Weights corresponding to each basis of the design matrix.
rmin : float
The minimum radius for the PRF model to be fit.
rmax : float
The maximum radius for the PRF model to be fit.
n_r_knots : int
Number of radial knots in the spline model.
n_phi_knots : int
Number of azimuthal knots in the spline model.
"""
self.prf_ws = prf_ws
self.rmin = rmin
self.rmax = rmax
self.n_r_knots = n_r_knots
self.n_phi_knots = n_phi_knots
@staticmethod
def load_from_file(
quarter: int = 5,
channel: int = 1,
):
"""
Loads a PRF model build from Kepler's FFI for a given quarter and channel.
Note: the file with the PRF models is csv file with a multiindex pandas
DataFrame, the FITS version is in development.
Parameters
----------
channel : int
Channel number of the FFI to be used to model the PRF. Valid values are
between 1 and 84.
quarter : int
Number of the quarter that will be used to model the PRF.
Valid values are between 1 and 17.
Returns
-------
KeplerPRF : KeplerPRF
An object with the PRF model ready to be evaluated in new data.
"""
# load PSF model
fname = "%s/data/ffi_prf_models_v0.1.0.csv" % (PACKAGEDIR)
if not os.path.isfile(fname):
raise FileNotFoundError("No PSF files: ", fname)
try:
tab = pd.read_csv(fname, index_col=0, header=[0, 1])
n_r_knots = int(tab.loc[channel, (str(quarter), "n_r_knots")])
n_phi_knots = int(tab.loc[channel, (str(quarter), "n_phi_knots")])
rmin = int(tab.loc[channel, (str(quarter), "rmin")])
rmax = int(tab.loc[channel, (str(quarter), "rmax")])
prf_ws = tab.loc[channel, str(quarter)].iloc[4:].values
except KeyError:
raise IOError(
"Quarter %i and channel %i has no PRF model data" % (quarter, channel)
)
return KeplerPRF(prf_ws, n_r_knots, n_phi_knots, rmin, rmax)
def evaluate_PSF(self, dx, dy):
"""
Function to evaluate the PRF model in a grid of data. THe function returns
a the prediction of the model as normalized flux. The model is evaluated in
pixels up to r < 7 from the location of the source.
Parameters
----------
dx : numpy.ndarray
Distance between pixels (row direction) and source coordinates.
dx : numpy.ndarray
Distance between pixels (column direction) and source coordinates.
Returns
-------
source_model: scipy.sparse.csr_matrix
Normalized fluxvalues of the PRF model evaluation in the dx, dy grid
"""
r = np.hypot(dx, dy)
phi = np.arctan2(dy, dx)
source_mask = r <= np.floor(self.rmax)
phi[phi >= np.pi] = np.pi - 1e-6
try:
dm = _make_A_polar(
phi[source_mask].ravel(),
r[source_mask].ravel(),
rmin=self.rmin,
rmax=self.rmax,
n_r_knots=self.n_r_knots,
n_phi_knots=self.n_phi_knots,
)
except ValueError:
dm = _make_A_polar(
phi[source_mask].ravel(),
r[source_mask].ravel(),
rmin=np.percentile(r[source_mask].ravel(), 1),
rmax=np.percentile(r[source_mask].ravel(), 99),
n_r_knots=self.n_r_knots,
n_phi_knots=self.n_phi_knots,
)
source_model = sparse.csr_matrix(r.shape)
m = 10 ** dm.dot(self.prf_ws)
source_model[source_mask] = m
source_model.eliminate_zeros()
# psf_models = source_model.multiply(1 / source_model.sum(axis=1)).tocsr()
return source_model
def diagnose_metrics(self, psf_models, idx=0, ax=None, plot=True):
"""
Function to evaluate the flux metrics for a single source as a function of
the parameter that controls the aperture size.
The flux metrics are computed by taking into account the PSF models of
neighbor sources.
This function is meant to be used only to generate the diagnostic or as a
helping function of `optimize_aperture()` to precalculate the values of the
metrics and find the optimal aperture in case of isolated sources, where the
optimal is the full aperture.
Parameters
----------
psf_models : scipy.sparse.csr_matrix
Sparse matrix with the PSF models of all sources in the field. It has shape
of [n_sources, n_pixels]
idx : int
Index of the source for which the metrcs will be computed. Has to be a
number between 0 and psf_models.shape[0].
ax : matplotlib.axes
Axis to be used to plot the figure
plot : boolean
Plot the metrics values.
Returns
-------
ax : matplotlib.axes
Figure axes
"""
compl, crowd, cut = [], [], []
for p in range(0, 101, 1):
cut.append(p)
mask = (
psf_models[idx] >= np.percentile(psf_models[idx].data, p)
).toarray()[0]
crowd.append(self.compute_CROWDSAP(psf_models, mask, idx))
compl.append(self.compute_FLFRCSAP(psf_models[idx].toarray()[0], mask))
self.compl = np.array(compl)
self.crowd = np.array(crowd)
self.cut = np.array(cut)
if plot:
if ax is None:
fig, ax = plt.subplots(1)
ax.plot(self.cut, self.compl, label=r"FLFRCSAP")
ax.plot(self.cut, self.crowd, label=r"CROWDSAP")
ax.set_xlabel("Percentile")
ax.set_ylabel("Metric")
ax.legend()
return ax
def create_aperture_mask(self, psf_models, percentile=0, idx=None):
"""
Function to create the aperture mask of a given source for a given aperture
size. This function can compute aperutre mask for one or all sources available
in the psf_models
Parameters
----------
psf_models : scipy.sparse.csr_matrix
Sparse matrix with the PSF models of all sources in the field. It has shape
of [n_sources, n_pixels]
percentile : float
Percentile value that defines the isophote from the distribution of values
in the psf model of the source
idx : int
Index of the source for which the metrcs will be computed. Has to be a
number between 0 and psf_models.shape[0]. If None, then it computes the
apertures for all sources in psf_models.
Returns
-------
mask : numpy.ndarray
Boolean array with the aperture mask.
completeness : numpy.ndarray
Flux metric indicating flux completeness for the selected aperture.
crowdeness : numpy.ndarray
Flux metric indicating flux contamination for the selected aperture.
"""
if idx is not None:
mask = (
psf_models[idx] >= np.percentile(psf_models[idx].data, percentile)
).toarray()[0]
# recompute metrics for optimal mask
complet = self.compute_FLFRCSAP(psf_models[idx].toarray()[0], mask)
crowd = self.compute_CROWDSAP(psf_models, mask, idx)
return mask, complet, crowd
else:
masks, completeness, crowdeness = [], [], []
for idx in range(psf_models.shape[0]):
mask = (
psf_models[idx] >= np.percentile(psf_models[idx].data, percentile)
).toarray()[0]
masks.append(mask)
completeness.append(
self.compute_FLFRCSAP(psf_models[idx].toarray()[0], mask)
)
crowdeness.append(self.compute_CROWDSAP(psf_models, mask, idx))
return np.array(masks), np.array(completeness), np.array(crowdeness)
def optimize_aperture(
self, psf_models, idx=0, target_complet=0.9, target_crowd=0.9, max_iter=100
):
"""
Function to optimize the aperture mask for a given source. There are two
special cases:
* Isolated sources, the optimal aperture is the full aperture.
* If optimizing for one single metric.
For these last two case, no actual optimization if performed, and we use the
results from `diagnose_metrics()`.
The optimization is done using scipy Brent's algorithm and it uses a custom
loss function that uses a Leaky ReLU term to achive the target value for
both metrics.
Parameters
----------
psf_models : scipy.sparse.csr_matrix
Sparse matrix with the PSF models of all sources in the field. It has shape
of [n_sources, n_pixels]
idx : int
Index of the source for which the metrcs will be computed. Has to be a
number between 0 and psf_models.shape[0]. If None, then it computes the
apertures for all sources in psf_models.
target_complet : float
Value of the target completeness metric.
target_crowd : float
Value of the target crowdeness metric.
max_iter : int
Numer of maximum iterations to be performed by the optimizer.
Returns
-------
mask : numpy.ndarray
Boolean array with the aperture mask.
completeness : float
Flux metric indicating flux completeness for the selected aperture.
crowdeness : float
Flux metric indicating flux contamination for the selected aperture.
optimal_percentile : float
Percentile of the normalized flux distribution that defines the isophote.
"""
# Do special cases when optimizing for only one metric
self.diagnose_metrics(psf_models, idx=idx, plot=False)
if target_complet < 0 and target_crowd > 0:
optim_p = self.cut[np.argmax(self.crowd)]
elif target_crowd < 0 and target_complet > 0:
optim_p = self.cut[np.argmax(self.compl)]
# for isolated sources, only need to optimize for completeness, in case of
# asking for 2 metrics
elif target_complet > 0 and target_crowd > 0 and all(self.crowd > 0.99):
optim_p = self.cut[np.argmax(self.compl)]
else:
optim_params = {
"percentile_bounds": [5, 95],
"target_complet": target_complet,
"target_crowd": target_crowd,
"max_iter": max_iter,
"psf_models": psf_models,
"idx": idx,
}
minimize_result = minimize_scalar(
self._goodness_metric_obj_fun,
method="Bounded",
bounds=[5, 95],
options={"maxiter": max_iter, "disp": False},
args=(optim_params),
)
optim_p = minimize_result.x
mask = (
psf_models[idx] >= np.percentile(psf_models[idx].data, optim_p)
).toarray()[0]
# recompute metrics for optimal mask
complet = self.compute_FLFRCSAP(psf_models[idx].toarray()[0], mask)
crowd = self.compute_CROWDSAP(psf_models, mask, idx)
return mask, complet, crowd, optim_p
def _goodness_metric_obj_fun(self, percentile, optim_params):
"""
The objective function to minimize with scipy.optimize.minimize_scalar called
during optimization of the photometric aperture.
Parameters
----------
percentile : int
Percentile of the normalized flux distribution that defines the isophote.
optim_params : dictionary
Dictionary with the variables needed for evaluate the metric:
psf_models
idx
target_complet
target_crowd
Returns
-------
penalty : int
Value of the objective function to be used for optiization.
"""
psf_models = optim_params["psf_models"]
idx = optim_params["idx"]
# Find the value where to cut
cut = np.percentile(psf_models[idx].data, int(percentile))
# create "isophot" mask with current cut
mask = (psf_models[idx] > cut).toarray()[0]
# Do not compute and ignore if target score < 0
if optim_params["target_complet"] > 0:
completMetric = self.compute_FLFRCSAP(psf_models[idx].toarray()[0], mask)
else:
completMetric = 1.0
# Do not compute and ignore if target score < 0
if optim_params["target_crowd"] > 0:
crowdMetric = self.compute_CROWDSAP(psf_models, mask, idx)
else:
crowdMetric = 1.0
# Once we hit the target we want to ease-back on increasing the metric
# However, we don't want to ease-back to zero pressure, that will
# unconstrain the penalty term and cause the optmizer to run wild.
# So, use a "Leaky ReLU"
# metric' = threshold + (metric - threshold) * leakFactor
leakFactor = 0.01
if (
optim_params["target_complet"] > 0
and completMetric >= optim_params["target_complet"]
):
completMetric = optim_params["target_complet"] + 0.001 * (
completMetric - optim_params["target_complet"]
)
if (
optim_params["target_crowd"] > 0
and crowdMetric >= optim_params["target_crowd"]
):
crowdMetric = optim_params["target_crowd"] + 0.1 * (
crowdMetric - optim_params["target_crowd"]
)
penalty = -(completMetric + 10 * crowdMetric)
return penalty
# def plot_mean_PSF(self, ax=None):
# """
# Function to plot the PRF model as created from the FFI. This is only for
# illustration purposes.
#
# Parameters
# ----------
# ax : matplotlib.axes
# Matlotlib axis can be provided, if not one will be created and returned
#
# Returns
# -------
# ax : matplotlib.axes
# Matlotlib axis with the figure
# """
# if not hasattr(self, "x_data"):
# raise AttributeError("Class doesn't have attributes to plot PSF model")
#
# if ax is None:
# fig, ax = plt.subplots(1, 2, figsize=(8, 3))
# vmin = -0.5
# vmax = -3
# cax = ax[0].scatter(
# self.x_data,
# self.y_data,
# c=self.f_data,
# marker=".",
# s=2,
# vmin=vmin,
# vmax=vmax,
# )
# fig.colorbar(cax, ax=ax[0])
# ax[0].set_title("Data mean flux")
# ax[0].set_ylabel("dy")
# ax[0].set_xlabel("dx")
#
# cax = ax[1].scatter(
# self.x_data,
# self.y_data,
# c=self.f_model,
# marker=".",
# s=2,
# vmin=vmin,
# vmax=vmax,
# )
# fig.colorbar(cax, ax=ax[1])
# ax[1].set_title("Average PSF Model")
# ax[1].set_xlabel("dx")
#
# return ax
def plot_aperture(self, flux, mask=None, ax=None, log=False):
"""
Function to plot the photometric aperture for a given source.
Parameters
----------
flux : numpy.ndarray
Data array with the flux image.
mask : numpy.ndarray
Boolean array with the aperture mask
log : boolean
Plot the image in log or linear scale.
ax : matplotlib.axes
Matlotlib axis can be provided, if not one will be created and returned
Returns
-------
ax : matplotlib.axes
Matlotlib axis with the figure
"""
if ax is None:
fig, ax = plt.subplots(1, figsize=(5, 5))
pc = ax.pcolor(
flux,
shading="auto",
norm=colors.LogNorm() if log else None,
)
plt.colorbar(pc, label="", fraction=0.038, ax=ax)
ax.set_aspect("equal", adjustable="box")
ax.set_title("")
if mask is not None:
for i in range(flux.shape[0]):
for j in range(flux.shape[1]):
if mask[i, j]:
rect = patches.Rectangle(
xy=(j, i),
width=1,
height=1,
color="red",
fill=False,
hatch="",
)
ax.add_patch(rect)
zoom = np.argwhere(mask == True)
ax.set_ylim(
np.maximum(0, zoom[0, 0] - 3),
np.minimum(zoom[-1, 0] + 3, flux.shape[0]),
)
ax.set_xlim(
np.maximum(0, zoom[0, -1] - 3),
np.minimum(zoom[-1, -1] + 3, flux.shape[1]),
)
else:
ax.set_xlim(np.argmax(flux))
ax.set_ylim()
return ax
@staticmethod
def compute_FLFRCSAP(psf_model, mask):
"""
Compute fraction of target flux enclosed in the optimal aperture to total flux
for a given source (flux completeness).
Parameters
----------
psf_model: numpy ndarray
Array with the PSF model for the target source. It has shape [n_pixels]
mask: boolean array
Array of boolean indicating the aperture for the target source.
Returns
-------
FLFRCSAP: float
Completeness metric
"""
return psf_model[mask].sum() / psf_model.sum()
@staticmethod
def compute_CROWDSAP(psf_models, mask, idx):
"""
Compute the ratio of target flux relative to flux from all sources within
the photometric aperture (i.e. 1 - Crowdeness).
Parameters
----------
psf_models: numpy ndarray
Array with the PSF models for all targets in the cutout. It has shape
[n_sources, n_pixels].
mask: boolean array
Array of boolean indicating the aperture for the target source.
idx: int
Index of the source to compute the metric. It has to be a number between
0 and psf_models.shape[0].
Returns
-------
CROWDSAP: float
Crowdeness metric
"""
ratio = (
psf_models.multiply(1 / psf_models.sum(axis=0)).tocsr()[idx].toarray()[0]
)
return ratio[mask].sum() / mask.sum()
``` |
{
"source": "jorgemarpa/lightkurve",
"score": 2
} |
#### File: lightkurve/seismology/core.py
```python
import logging
import warnings
import numpy as np
from matplotlib import pyplot as plt
from scipy.signal import find_peaks
from astropy import units as u
from astropy.units import cds
from .. import MPLSTYLE
from . import utils, stellar_estimators
from ..periodogram import SNRPeriodogram
from ..utils import LightkurveWarning, validate_method
from .utils import SeismologyQuantity
# Import the optional Bokeh dependency required by ``interact_echelle```,
# or print a friendly error otherwise.
try:
import bokeh # Import bokeh first so we get an ImportError we can catch
from bokeh.io import show, output_notebook
from bokeh.plotting import figure
from bokeh.models import LogColorMapper, Slider, RangeSlider, Button
from bokeh.layouts import layout, Spacer
except:
# Nice error will be raised when ``interact_echelle``` is called.
pass
log = logging.getLogger(__name__)
__all__ = ["Seismology"]
class Seismology(object):
"""Enables astroseismic quantities to be estimated from periodograms.
This class provides easy access to methods to estimate numax, deltanu, radius,
mass, and logg, and stores them on its tray for easy diagnostic plotting.
Examples
--------
Download the TESS light curve for HIP 116158:
>>> import lightkurve as lk
>>> lc = lk.search_lightcurve("HIP 116158", sector=2).download() # doctest: +SKIP
>>> lc = lc.normalize().remove_nans().remove_outliers() # doctest: +SKIP
Create a Lomb-Scargle periodogram:
>>> pg = lc.to_periodogram(normalization='psd', minimum_frequency=100, maximum_frequency=800) # doctest: +SKIP
Create a Seismology object and use it to estimate parameters:
>>> seismology = pg.flatten().to_seismology() # doctest: +SKIP
>>> seismology.estimate_numax() # doctest: +SKIP
numax: 415.00 uHz (method: ACF2D)
>>> seismology.estimate_deltanu() # doctest: +SKIP
deltanu: 28.78 uHz (method: ACF2D)
>>> seismology.estimate_radius(teff=5080) # doctest: +SKIP
radius: 2.78 solRad (method: Uncorrected Scaling Relations)
Parameters
----------
periodogram : `~lightkurve.periodogram.Periodogram` object
Periodogram to be analyzed. Must be background-corrected,
e.g. using `periodogram.flatten()`.
"""
periodogram = None
"""The periodogram from which seismological parameters are being extracted."""
def __init__(self, periodogram):
if not isinstance(periodogram, SNRPeriodogram):
warnings.warn(
"Seismology received a periodogram which does not appear "
"to have been background-corrected. Please consider calling "
"`periodogram.flatten()` prior to extracting seismological parameters.",
LightkurveWarning,
)
self.periodogram = periodogram
def __repr__(self):
attrs = np.asarray(["numax", "deltanu", "mass", "radius", "logg"])
tray = np.asarray([hasattr(self, attr) for attr in attrs])
if tray.sum() == 0:
tray_str = " - no values have been computed so far."
else:
tray_str = " - computed values:\n * " + "\n * ".join(
[getattr(self, attr).__repr__() for attr in attrs[tray]]
)
return "Seismology(ID: {}){}".format(self.periodogram.label, tray_str)
@staticmethod
def from_lightcurve(lc, **kwargs):
"""Returns a `Seismology` object given a `LightCurve`."""
log.info(
"Building a Seismology object directly from a light curve "
"uses default periodogram parameters. For further tuneability, "
"create a periodogram object first, using `to_periodogram`."
)
return Seismology(
periodogram=lc.normalize()
.remove_nans()
.fill_gaps()
.to_periodogram(**kwargs)
.flatten()
)
def _validate_numax(self, numax):
"""Raises exception if `numax` is None and `self.numax` is not set."""
if numax is None:
try:
return self.numax
except AttributeError:
raise AttributeError(
"You need to call `Seismology.estimate_numax()` first."
)
return numax
def _validate_deltanu(self, deltanu):
"""Raises exception if `deltanu` is None and `self.deltanu` is not set."""
if deltanu is None:
try:
return self.deltanu
except AttributeError:
raise AttributeError(
"You need to call `Seismology.estimate_deltanu()` first."
)
return deltanu
def _clean_echelle(
self,
deltanu=None,
numax=None,
minimum_frequency=None,
maximum_frequency=None,
smooth_filter_width=0.1,
scale="linear",
):
"""Takes input seismology object and creates the necessary arrays for an echelle
diagram. Validates all the inputs.
Parameters
----------
deltanu : float
Value for the large frequency separation of the seismic mode
frequencies in the periodogram. Assumed to have the same units as
the frequencies, unless given an Astropy unit.
Is assumed to be in the same units as frequency if not given a unit.
numax : float
Value for the frequency of maximum oscillation. If a numax is
passed, a suitable range one FWHM of the mode envelope either side
of the will be shown. This is overwritten by custom frequency ranges.
Is assumed to be in the same units as frequency if not given a unit.
minimum_frequency : float
The minimum frequency at which to display the echelle
Is assumed to be in the same units as frequency if not given a unit.
maximum_frequency : float
The maximum frequency at which to display the echelle.
Is assumed to be in the same units as frequency if not given a unit.
smooth_filter_width : float
If given a value, will smooth periodogram used to plot the echelle
diagram using the periodogram.smooth(method='boxkernel') method with
a filter width of `smooth_filter_width`. This helps visualise the
echelle diagram. Is assumed to be in the same units as the
periodogram frequency.
ax : `~matplotlib.axes.Axes`
A matplotlib axes object to plot into. If no axes is provided,
a new one will be created.
scale: str
Set z axis to be "linear" or "log". Default is linear.
Returns
-------
ep : np.ndarray
Echelle diagram power
x_f : np.ndarray
frequencies for X axis
y_f : np.ndarray
frequencies for Y axis
"""
if (minimum_frequency is None) & (maximum_frequency is None):
numax = self._validate_numax(numax)
deltanu = self._validate_deltanu(deltanu)
if (not hasattr(numax, "unit")) & (numax is not None):
numax = numax * self.periodogram.frequency.unit
if (not hasattr(deltanu, "unit")) & (deltanu is not None):
deltanu = deltanu * self.periodogram.frequency.unit
if smooth_filter_width:
pgsmooth = self.periodogram.smooth(filter_width=smooth_filter_width)
freq = pgsmooth.frequency # Makes code below more readable below
power = pgsmooth.power # Makes code below more readable below
else:
freq = self.periodogram.frequency # Makes code below more readable
power = self.periodogram.power # Makes code below more readable
fmin = freq[0]
fmax = freq[-1]
# Check for any superfluous input
if (numax is not None) & (
any([a is not None for a in [minimum_frequency, maximum_frequency]])
):
warnings.warn(
"You have passed both a numax and a frequency limit. "
"The frequency limit will override the numax input.",
LightkurveWarning,
)
# Ensure input numax is in the correct units (if there is one)
if numax is not None:
numax = u.Quantity(numax, freq.unit).value
if numax > freq[-1].value:
raise ValueError(
"You can't pass in a numax outside the"
"frequency range of the periodogram."
)
fwhm = utils.get_fwhm(self.periodogram, numax)
fmin = numax - 2 * fwhm
if fmin < freq[0].value:
fmin = freq[0].value
fmax = numax + 2 * fwhm
if fmax > freq[-1].value:
fmax = freq[-1].value
# Set limits and set them in the right units
if minimum_frequency is not None:
fmin = u.Quantity(minimum_frequency, freq.unit).value
if fmin > freq[-1].value:
raise ValueError(
"You can't pass in a limit outside the "
"frequency range of the periodogram."
)
if maximum_frequency is not None:
fmax = u.Quantity(maximum_frequency, freq.unit).value
if fmax > freq[-1].value:
raise ValueError(
"You can't pass in a limit outside the "
"frequency range of the periodogram."
)
# Make sure fmin and fmax are Quantities or code below will break
fmin = u.Quantity(fmin, freq.unit)
fmax = u.Quantity(fmax, freq.unit)
# Add on 1x deltanu so we don't miss off any important range due to rounding
if fmax < freq[-1] - 1.5 * deltanu:
fmax += deltanu
fs = np.median(np.diff(freq))
x0 = int(freq[0] / fs)
ff = freq[int(fmin / fs) - x0 : int(fmax / fs) - x0] # Selected frequency range
pp = power[int(fmin / fs) - x0 : int(fmax / fs) - x0] # Power range
# Reshape the power into n_rows of n_columns
# When modulus ~ zero, deltanu divides into frequency without remainder
mod_zeros = find_peaks(-1.0 * (ff % deltanu))[0]
# The bottom left corner of the plot is the lowest frequency that
# divides into deltanu with almost zero remainder
start = mod_zeros[0]
# The top left corner of the plot is the highest frequency that
# divides into deltanu with almost zero remainder. This index is the
# approximate end, because we fix an integer number of rows and columns
approx_end = mod_zeros[-1]
# The number of rows is the number of times you can partition your
# frequency range into chunks of size deltanu, start and ending at
# frequencies that divide nearly evenly into deltanu
n_rows = len(mod_zeros) - 1
# The number of columns is the total number of frequency points divided
# by the number of rows, floor divided to the nearest integer value
n_columns = int((approx_end - start) / n_rows)
# The exact end point is therefore the ncolumns*nrows away from the start
end = start + n_columns * n_rows
ep = np.reshape(pp[start:end], (n_rows, n_columns))
if scale == "log":
ep = np.log10(ep)
# Reshape the freq into n_rowss of n_columnss & create arays
ef = np.reshape(ff[start:end], (n_rows, n_columns))
x_f = (ef[0, :] - ef[0, 0]) % deltanu
y_f = ef[:, 0]
return ep, x_f, y_f
def plot_echelle(
self,
deltanu=None,
numax=None,
minimum_frequency=None,
maximum_frequency=None,
smooth_filter_width=0.1,
scale="linear",
ax=None,
cmap="Blues",
):
"""Plots an echelle diagram of the periodogram by stacking the
periodogram in slices of deltanu.
Modes of equal radial degree should appear approximately vertically aligned.
If no structure is present, you are likely dealing with a faulty deltanu
value or a low signal to noise case.
This method is adapted from work by <NAME> & <NAME>.
Parameters
----------
deltanu : float
Value for the large frequency separation of the seismic mode
frequencies in the periodogram. Assumed to have the same units as
the frequencies, unless given an Astropy unit.
Is assumed to be in the same units as frequency if not given a unit.
numax : float
Value for the frequency of maximum oscillation. If a numax is
passed, a suitable range one FWHM of the mode envelope either side
of the will be shown. This is overwritten by custom frequency ranges.
Is assumed to be in the same units as frequency if not given a unit.
minimum_frequency : float
The minimum frequency at which to display the echelle
Is assumed to be in the same units as frequency if not given a unit.
maximum_frequency : float
The maximum frequency at which to display the echelle.
Is assumed to be in the same units as frequency if not given a unit.
smooth_filter_width : float
If given a value, will smooth periodogram used to plot the echelle
diagram using the periodogram.smooth(method='boxkernel') method with
a filter width of `smooth_filter_width`. This helps visualise the
echelle diagram. Is assumed to be in the same units as the
periodogram frequency.
scale: str
Set z axis to be "linear" or "log". Default is linear.
cmap : str
The name of the matplotlib colourmap to use in the echelle diagram.
Returns
-------
ax : `~matplotlib.axes.Axes`
The matplotlib axes object.
"""
if (minimum_frequency is None) & (maximum_frequency is None):
numax = self._validate_numax(numax)
deltanu = self._validate_deltanu(deltanu)
if (not hasattr(numax, "unit")) & (numax is not None):
numax = numax * self.periodogram.frequency.unit
if (not hasattr(deltanu, "unit")) & (deltanu is not None):
deltanu = deltanu * self.periodogram.frequency.unit
ep, x_f, y_f = self._clean_echelle(
numax=numax,
deltanu=deltanu,
minimum_frequency=minimum_frequency,
maximum_frequency=maximum_frequency,
smooth_filter_width=smooth_filter_width,
)
# Plot the echelle diagram
with plt.style.context(MPLSTYLE):
if ax is None:
_, ax = plt.subplots()
extent = (x_f[0].value, x_f[-1].value, y_f[0].value, y_f[-1].value)
figsize = plt.rcParams["figure.figsize"]
a = figsize[1] / figsize[0]
b = (extent[3] - extent[2]) / (extent[1] - extent[0])
vmin = np.nanpercentile(ep.value, 1)
vmax = np.nanpercentile(ep.value, 99)
im = ax.imshow(
ep.value,
cmap=cmap,
aspect=a / b,
origin="lower",
extent=extent,
vmin=vmin,
vmax=vmax,
)
cbar = plt.colorbar(im, ax=ax, extend="both", pad=0.01)
if isinstance(self.periodogram, SNRPeriodogram):
ylabel = "Signal to Noise Ratio (SNR)"
elif self.periodogram.power.unit == cds.ppm:
ylabel = "Amplitude [{}]".format(
self.periodogram.power.unit.to_string("latex")
)
else:
ylabel = "Power Spectral Density [{}]".format(
self.periodogram.power.unit.to_string("latex")
)
if scale == "log":
ylabel = "log10(" + ylabel + ")"
cbar.set_label(ylabel)
ax.set_xlabel(r"Frequency mod. {:.2f}".format(deltanu))
ax.set_ylabel(
r"Frequency [{}]".format(
self.periodogram.frequency.unit.to_string("latex")
)
)
ax.set_title("Echelle diagram for {}".format(self.periodogram.label))
return ax
def _make_echelle_elements(
self,
deltanu,
cmap="viridis",
minimum_frequency=None,
maximum_frequency=None,
smooth_filter_width=0.1,
scale="linear",
plot_width=490,
plot_height=340,
title="Echelle",
):
"""Helper function to make the elements of the echelle diagram for bokeh plotting."""
if not hasattr(deltanu, "unit"):
deltanu = deltanu * self.periodogram.frequency.unit
if smooth_filter_width:
pgsmooth = self.periodogram.smooth(filter_width=smooth_filter_width)
freq = pgsmooth.frequency # Makes code below more readable below
else:
freq = self.periodogram.frequency # Makes code below more readable
ep, x_f, y_f = self._clean_echelle(
deltanu=deltanu,
minimum_frequency=minimum_frequency,
maximum_frequency=maximum_frequency,
smooth_filter_width=smooth_filter_width,
scale=scale,
)
fig = figure(
plot_width=plot_width,
plot_height=plot_height,
x_range=(0, 1),
y_range=(y_f[0].value, y_f[-1].value),
title=title,
tools="pan,box_zoom,reset",
toolbar_location="above",
border_fill_color="white",
)
fig.yaxis.axis_label = r"Frequency [{}]".format(freq.unit.to_string())
fig.xaxis.axis_label = r"Frequency / {:.3f} Mod. 1".format(deltanu)
lo, hi = np.nanpercentile(ep.value, [0.1, 99.9])
vlo, vhi = 0.3 * lo, 1.7 * hi
vstep = (lo - hi) / 500
color_mapper = LogColorMapper(palette="RdYlGn10", low=lo, high=hi)
fig.image(
image=[ep.value],
x=0,
y=y_f[0].value,
dw=1,
dh=y_f[-1].value,
color_mapper=color_mapper,
name="img",
)
stretch_slider = RangeSlider(
start=vlo,
end=vhi,
step=vstep,
title="",
value=(lo, hi),
orientation="vertical",
width=10,
height=230,
direction="rtl",
show_value=False,
sizing_mode="fixed",
name="stretch",
)
def stretch_change_callback(attr, old, new):
"""TPF stretch slider callback."""
fig.select("img")[0].glyph.color_mapper.high = new[1]
fig.select("img")[0].glyph.color_mapper.low = new[0]
stretch_slider.on_change("value", stretch_change_callback)
return fig, stretch_slider
def interact_echelle(self, notebook_url="localhost:8888", **kwargs):
"""Display an interactive Jupyter notebook widget showing an Echelle diagram.
This feature only works inside an active Jupyter Notebook, and
requires an optional dependency, ``bokeh`` (v1.0 or later).
This dependency can be installed using e.g. `conda install bokeh`.
Parameters
----------
notebook_url : str
Location of the Jupyter notebook page (default: "localhost:8888")
When showing Bokeh applications, the Bokeh server must be
explicitly configured to allow connections originating from
different URLs. This parameter defaults to the standard notebook
host and port. If you are running on a different location, you
will need to supply this value for the application to display
properly. If no protocol is supplied in the URL, e.g. if it is
of the form "localhost:8888", then "http" will be used.
"""
try:
import bokeh
if bokeh.__version__[0] == "0":
warnings.warn(
"interact() requires Bokeh version 1.0 or later", LightkurveWarning
)
except ImportError:
log.error(
"The interact() tool requires the `bokeh` Python package; "
"you can install bokeh using e.g. `conda install bokeh`."
)
return None
maximum_frequency = kwargs.pop(
"maximum_frequency", self.periodogram.frequency.max().value
)
minimum_frequency = kwargs.pop(
"minimum_frequency", self.periodogram.frequency.min().value
)
if not hasattr(self, "deltanu"):
dnu = SeismologyQuantity(
quantity=self.periodogram.frequency.max() / 30,
name="deltanu",
method="echelle",
)
else:
dnu = self.deltanu
def create_interact_ui(doc):
fig_tpf, stretch_slider = self._make_echelle_elements(
dnu,
maximum_frequency=maximum_frequency,
minimum_frequency=minimum_frequency,
**kwargs
)
maxdnu = self.periodogram.frequency.max().value / 5
# Interactive slider widgets
dnu_slider = Slider(
start=0.01,
end=maxdnu,
value=dnu.value,
step=0.01,
title="Delta Nu",
width=290,
)
r_button = Button(label=">", button_type="default", width=30)
l_button = Button(label="<", button_type="default", width=30)
rr_button = Button(label=">>", button_type="default", width=30)
ll_button = Button(label="<<", button_type="default", width=30)
def update(attr, old, new):
"""Callback to take action when dnu slider changes"""
dnu = SeismologyQuantity(
quantity=dnu_slider.value * u.microhertz,
name="deltanu",
method="echelle",
)
ep, _, _ = self._clean_echelle(
deltanu=dnu,
minimum_frequency=minimum_frequency,
maximum_frequency=maximum_frequency,
**kwargs
)
fig_tpf.select("img")[0].data_source.data["image"] = [ep.value]
fig_tpf.xaxis.axis_label = r"Frequency / {:.3f} Mod. 1".format(dnu)
def go_right_by_one_small():
"""Step forward in time by a single cadence"""
existing_value = dnu_slider.value
if existing_value < maxdnu:
dnu_slider.value = existing_value + 0.002
def go_left_by_one_small():
"""Step back in time by a single cadence"""
existing_value = dnu_slider.value
if existing_value > 0:
dnu_slider.value = existing_value - 0.002
def go_right_by_one():
"""Step forward in time by a single cadence"""
existing_value = dnu_slider.value
if existing_value < maxdnu:
dnu_slider.value = existing_value + 0.01
def go_left_by_one():
"""Step back in time by a single cadence"""
existing_value = dnu_slider.value
if existing_value > 0:
dnu_slider.value = existing_value - 0.01
dnu_slider.on_change("value", update)
r_button.on_click(go_right_by_one_small)
l_button.on_click(go_left_by_one_small)
rr_button.on_click(go_right_by_one)
ll_button.on_click(go_left_by_one)
widgets_and_figures = layout(
[fig_tpf, [Spacer(height=20), stretch_slider]],
[
ll_button,
Spacer(width=30),
l_button,
Spacer(width=25),
dnu_slider,
Spacer(width=30),
r_button,
Spacer(width=23),
rr_button,
],
)
doc.add_root(widgets_and_figures)
output_notebook(verbose=False, hide_banner=True)
return show(create_interact_ui, notebook_url=notebook_url)
def estimate_numax(self, method="acf2d", **kwargs):
"""Returns the frequency of the peak of the seismic oscillation modes envelope.
At present, the only method supported is based on using a
2D autocorrelation function (ACF2D). This method is implemented by the
`~lightkurve.seismology.estimate_numax_acf2d` function which accepts
the parameters `numaxs`, `window_width`, and `spacing`.
For details and literature references, please read the detailed
docstring of this function by typing ``lightkurve.seismology.estimate_numax_acf2d?``
in a Python terminal or notebook.
Parameters
----------
method : str
Method to use. Only ``"acf2d"`` is supported at this time.
Returns
-------
numax : `~lightkurve.seismology.SeismologyQuantity`
Numax of the periodogram, including details on the units and method.
"""
method = validate_method(method, supported_methods=["acf2d"])
if method == "acf2d":
from .numax_estimators import estimate_numax_acf2d
result = estimate_numax_acf2d(self.periodogram, **kwargs)
self.numax = result
return result
def diagnose_numax(self, numax=None):
"""Create diagnostic plots showing how numax was estimated."""
numax = self._validate_numax(numax)
return numax.diagnostics_plot_method(numax, self.periodogram)
def estimate_deltanu(self, method="acf2d", numax=None):
"""Returns the average value of the large frequency spacing, DeltaNu,
of the seismic oscillations of the target.
At present, the only method supported is based on using an
autocorrelation function (ACF2D). This method is implemented by the
`~lightkurve.seismology.estimate_deltanu_acf2d` function which requires
the parameter `numax`. For details and literature references, please
read the detailed docstring of this function by typing
``lightkurve.seismology.estimate_deltanu_acf2d?`` in a Python terminal or notebook.
Parameters
----------
method : str
Method to use. Only ``"acf2d"`` is supported at this time.
Returns
-------
deltanu : `~lightkurve.seismology.SeismologyQuantity`
DeltaNu of the periodogram, including details on the units and method.
"""
method = validate_method(method, supported_methods=["acf2d"])
numax = self._validate_numax(numax)
if method == "acf2d":
from .deltanu_estimators import estimate_deltanu_acf2d
result = estimate_deltanu_acf2d(self.periodogram, numax=numax)
self.deltanu = result
return result
def diagnose_deltanu(self, deltanu=None):
"""Create diagnostic plots showing how numax was estimated."""
deltanu = self._validate_deltanu(deltanu)
return deltanu.diagnostics_plot_method(deltanu, self.periodogram)
def estimate_radius(self, teff=None, numax=None, deltanu=None):
"""Returns a stellar radius estimate based on the scaling relations.
The two global observable seismic parameters, numax and deltanu, along with
temperature, scale with fundamental stellar properties (Brown et al. 1991;
Kjeldsen & Bedding 1995). These scaling relations can be rearranged to
calculate a stellar radius as
R = Rsol * (numax/numax_sol)(deltanu/deltanusol)^-2(Teff/Teffsol)^0.5
where R is the radius and Teff is the effective temperature, and the suffix
'sol' indicates a solar value. In this method we use the solar values for
numax and deltanu as given in Huber et al. (2011) and for Teff as given in
Prsa et al. (2016).
This code structure borrows from work done in Bellinger et al. (2019), which
also functions as an accessible explanation of seismic scaling relations.
If no value of effective temperature is given, this function will check the
meta data of the `Periodogram` object used to create the `Seismology` object.
These data will often contain an effective tempearture from the Kepler Input
Catalogue (KIC, https://ui.adsabs.harvard.edu/abs/2011AJ....142..112B/abstract),
or from the EPIC or TIC for K2 and TESS respectively. The temperature values in these
catalogues are estimated using photometry, and so have large associated uncertainties
(roughly 200 K, see KIC). For more better results, spectroscopic measurements of
temperature are often more precise.
NOTE: These scaling relations are scaled to the Sun, and therefore do not
always produce an entirely accurate result for more evolved stars.
Parameters
----------
numax : float
The frequency of maximum power of the seismic mode envelope. If not
given an astropy unit, assumed to be in units of microhertz.
deltanu : float
The frequency spacing between two consecutive overtones of equal radial
degree. If not given an astropy unit, assumed to be in units of
microhertz.
teff : float
The effective temperature of the star. In units of Kelvin.
numax_err : float
Error on numax. Assumed to be same units as numax
deltanu_err : float
Error on deltanu. Assumed to be same units as deltanu
teff_err : float
Error on Teff. Assumed to be same units as Teff.
Returns
-------
radius : `~lightkurve.seismology.SeismologyQuantity`
Stellar radius estimate.
"""
numax = self._validate_numax(numax)
deltanu = self._validate_deltanu(deltanu)
if teff is None:
teff = self.periodogram.meta.get("TEFF")
if teff is None:
raise ValueError(
"You must provide an effective temperature argument (`teff`) to `estimate_radius`,"
"because the Periodogram object does not contain it in its meta data (i.e. `pg.meta['TEFF']` is missing"
)
else:
log.info(
"Using value for effective temperature from the Kepler Input Catalogue."
"These temperatue values may sometimes differ significantly from modern estimates."
)
pass
else:
pass
result = stellar_estimators.estimate_radius(numax, deltanu, teff)
self.radius = result
return result
def estimate_mass(self, teff=None, numax=None, deltanu=None):
"""Calculates mass using the asteroseismic scaling relations.
The two global observable seismic parameters, numax and deltanu, along with
temperature, scale with fundamental stellar properties (Brown et al. 1991;
Kjeldsen & Bedding 1995). These scaling relations can be rearranged to
calculate a stellar mass as
M = Msol * (numax/numax_sol)^3(deltanu/deltanusol)^-4(Teff/Teffsol)^1.5
where M is the mass and Teff is the effective temperature, and the suffix
'sol' indicates a solar value. In this method we use the solar values for
numax and deltanu as given in Huber et al. (2011) and for Teff as given in
Prsa et al. (2016).
This code structure borrows from work done in Bellinger et al. (2019), which
also functions as an accessible explanation of seismic scaling relations.
If no value of effective temperature is given, this function will check the
meta data of the `Periodogram` object used to create the `Seismology` object.
These data will often contain an effective tempearture from the Kepler Input
Catalogue (KIC, https://ui.adsabs.harvard.edu/abs/2011AJ....142..112B/abstract),
or from the EPIC or TIC for K2 and TESS respectively. The temperature values in these
catalogues are estimated using photometry, and so have large associated uncertainties
(roughly 200 K, see KIC). For more better results, spectroscopic measurements of
temperature are often more precise.
NOTE: These scaling relations are scaled to the Sun, and therefore do not
always produce an entirely accurate result for more evolved stars.
Parameters
----------
numax : float
The frequency of maximum power of the seismic mode envelope. If not
given an astropy unit, assumed to be in units of microhertz.
deltanu : float
The frequency spacing between two consecutive overtones of equal radial
degree. If not given an astropy unit, assumed to be in units of
microhertz.
teff : float
The effective temperature of the star. In units of Kelvin.
numax_err : float
Error on numax. Assumed to be same units as numax
deltanu_err : float
Error on deltanu. Assumed to be same units as deltanu
teff_err : float
Error on Teff. Assumed to be same units as Teff.
Returns
-------
mass : `~lightkurve.seismology.SeismologyQuantity`
Stellar mass estimate.
"""
numax = self._validate_numax(numax)
deltanu = self._validate_deltanu(deltanu)
if teff is None:
teff = self.periodogram.meta.get("TEFF")
if teff is None:
raise ValueError(
"You must provide an effective temperature argument (`teff`) to `estimate_radius`,"
"because the Periodogram object does not contain it in its meta data (i.e. `pg.meta['TEFF']` is missing"
)
else:
log.info(
"Using value for effective temperature from the Kepler Input Catalogue."
"These temperatue values may sometimes differ significantly from modern estimates."
)
pass
else:
pass
result = stellar_estimators.estimate_mass(numax, deltanu, teff)
self.mass = result
return result
def estimate_logg(self, teff=None, numax=None):
"""Calculates the log of the surface gravity using the asteroseismic scaling
relations.
The two global observable seismic parameters, numax and deltanu, along with
temperature, scale with fundamental stellar properties (Brown et al. 1991;
Kjeldsen & Bedding 1995). These scaling relations can be rearranged to
calculate a stellar surface gravity as
g = gsol * (numax/numax_sol)(Teff/Teffsol)^0.5
where g is the surface gravity and Teff is the effective temperature,
and the suffix 'sol' indicates a solar value. In this method we use the
solar values for numax as given in Huber et al. (2011) and for Teff as given
in Prsa et al. (2016). The solar surface gravity is calcluated from the
astropy constants for solar mass and radius and does not have an error.
The solar surface gravity is returned as log10(g) with units in dex, as is
common in the astrophysics literature.
This code structure borrows from work done in Bellinger et al. (2019), which
also functions as an accessible explanation of seismic scaling relations.
If no value of effective temperature is given, this function will check the
meta data of the `Periodogram` object used to create the `Seismology` object.
These data will often contain an effective tempearture from the Kepler Input
Catalogue (KIC, https://ui.adsabs.harvard.edu/abs/2011AJ....142..112B/abstract),
or from the EPIC or TIC for K2 and TESS respectively. The temperature values in these
catalogues are estimated using photometry, and so have large associated uncertainties
(roughly 200 K, see KIC). For more better results, spectroscopic measurements of
temperature are often more precise.
NOTE: These scaling relations are scaled to the Sun, and therefore do not
always produce an entirely accurate result for more evolved stars.
Parameters
----------
numax : float
The frequency of maximum power of the seismic mode envelope. If not
given an astropy unit, assumed to be in units of microhertz.
teff : float
The effective temperature of the star. In units of Kelvin.
numax_err : float
Error on numax. Assumed to be same units as numax
teff_err : float
Error on teff. Assumed to be same units as teff.
Returns
-------
logg : `~lightkurve.seismology.SeismologyQuantity`
Stellar surface gravity estimate.
"""
numax = self._validate_numax(numax)
if teff is None:
teff = self.periodogram.meta.get("TEFF")
if teff is None:
raise ValueError(
"You must provide an effective temperature argument (`teff`) to `estimate_radius`,"
"because the Periodogram object does not contain it in its meta data (i.e. `pg.meta['TEFF']` is missing"
)
else:
log.info(
"Using value for effective temperature from the Kepler Input Catalogue."
"These temperatue values may sometimes differ significantly from modern estimates."
)
pass
else:
pass
result = stellar_estimators.estimate_logg(numax, teff)
self.logg = result
return result
```
#### File: tests/io/test_kepseismic.py
```python
import pytest
from astropy.io import fits
import numpy as np
from lightkurve.io.kepseismic import read_kepseismic_lightcurve
from lightkurve.io.detect import detect_filetype
@pytest.mark.remote_data
def test_detect_kepseismic():
"""Can we detect the correct format for KEPSEISMIC files?"""
url = "https://archive.stsci.edu/hlsps/kepseismic/001200000/92147/20d-filter/hlsp_kepseismic_kepler_phot_kplr001292147-20d_kepler_v1_cor-filt-inp.fits"
f = fits.open(url)
assert detect_filetype(f) == "KEPSEISMIC"
@pytest.mark.remote_data
def test_read_kepseismic():
"""Can we read KEPSEISMIC files?"""
url = "https://archive.stsci.edu/hlsps/kepseismic/001200000/92147/20d-filter/hlsp_kepseismic_kepler_phot_kplr001292147-20d_kepler_v1_cor-filt-inp.fits"
with fits.open(url, mode="readonly") as hdulist:
fluxes = hdulist[1].data["FLUX"]
lc = read_kepseismic_lightcurve(url)
flux_lc = lc.flux.value
# print(flux_lc, fluxes)
assert np.sum(fluxes) == np.sum(flux_lc)
```
#### File: lightkurve/tests/test_correctors.py
```python
import pytest
@pytest.mark.remote_data
def test_to_corrector():
"""Does the tpf.to_corrector('pld') convenience method work?"""
from lightkurve import KeplerTargetPixelFile
from .test_targetpixelfile import TABBY_TPF
tpf = KeplerTargetPixelFile(TABBY_TPF)
lc = tpf.to_corrector("pld").correct()
assert len(lc.flux) == len(tpf.time)
```
#### File: lightkurve/tests/test_units.py
```python
import pytest
import lightkurve as lk # necessary to enable the units tested below
from astropy import units as u
def test_custom_units():
"""Are ppt, ppm, and percent enabled AstroPy units?"""
u.Unit("ppt") # custom unit defined in lightkurve.units
u.Unit("ppm") # not enabled by default; enabled in lightkurve.units
u.Unit("percent") # standard AstroPy unit
@pytest.mark.remote_data
def test_tasoc_ppm_units():
"""Regression test for #956."""
lc = lk.search_lightcurve('HV 2112', author='TASOC', sector=1, exptime=1800).download()
assert lc['flux_corr'].unit == "ppm"
assert "Unrecognized" not in repr(lc['flux_corr'].unit)
``` |
{
"source": "jorgemarpa/psfmachine",
"score": 2
} |
#### File: src/psfmachine/ffi.py
```python
import os
import logging
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from astropy.io import fits
from astropy.stats import SigmaClip
from astropy.time import Time
from astropy.wcs import WCS
import astropy.units as u
from astropy.stats import sigma_clip
from photutils import Background2D, MedianBackground, BkgZoomInterpolator
# from . import PACKAGEDIR
from .utils import do_tiled_query, _make_A_cartesian, solve_linear_model
from .machine import Machine
from .version import __version__
log = logging.getLogger(__name__)
__all__ = ["FFIMachine"]
class FFIMachine(Machine):
"""
Subclass of Machine for working with FFI data. It is a subclass of Machine
"""
def __init__(
self,
time,
flux,
flux_err,
ra,
dec,
sources,
column,
row,
wcs=None,
limit_radius=32.0,
n_r_knots=10,
n_phi_knots=15,
n_time_knots=10,
n_time_points=200,
time_radius=8,
cut_r=6,
rmin=1,
rmax=16,
meta=None,
):
"""
Class to work with FFI data.
Parameters
----------
time: numpy.ndarray
Time values in JD
flux: numpy.ndarray
Flux values at each pixels and times in units of electrons / sec. Has shape
[n_times, n_rows, n_columns]
flux_err: numpy.ndarray
Flux error values at each pixels and times in units of electrons / sec.
Has shape [n_times, n_rows, n_columns]
ra: numpy.ndarray
Right Ascension coordinate of each pixel
dec: numpy.ndarray
Declination coordinate of each pixel
sources: pandas.DataFrame
DataFrame with source present in the images
column: np.ndarray
Data array containing the "columns" of the detector that each pixel is on.
row: np.ndarray
Data array containing the "columns" of the detector that each pixel is on.
wcs : astropy.wcs
World coordinates system solution for the FFI. Used for plotting.
meta : dictionary
Meta data information related to the FFI
Attributes
----------
All attributes inherited from Machine.
meta : dictionary
Meta data information related to the FFI
wcs : astropy.wcs
World coordinates system solution for the FFI. Used for plotting.
flux_2d : numpy.ndarray
2D image representation of the FFI, used for plotting. Has shape [n_times,
image_height, image_width]
image_shape : tuple
Shape of 2D image
"""
self.column = column
self.row = row
self.ra = ra
self.dec = dec
# keep 2d image for easy plotting
self.flux_2d = flux
self.image_shape = flux.shape[1:]
# reshape flux and flux_err as [ntimes, npix]
self.flux = flux.reshape(flux.shape[0], -1)
self.flux_err = flux_err.reshape(flux_err.shape[0], -1)
self.sources = sources
# remove background and mask bright/saturated pixels
# these steps need to be done before `machine` init, so sparse delta
# and flux arrays have the same shape
if not meta["BACKAPP"]:
self._remove_background()
self._mask_pixels()
# init `machine` object
super().__init__(
time,
self.flux,
self.flux_err,
self.ra,
self.dec,
self.sources,
self.column,
self.row,
n_r_knots=n_r_knots,
n_phi_knots=n_phi_knots,
n_time_knots=n_time_knots,
n_time_points=n_time_points,
time_radius=time_radius,
cut_r=cut_r,
rmin=rmin,
rmax=rmax,
# hardcoded to work for Kepler and TESS FFIs
sparse_dist_lim=40 if meta["TELESCOP"] == "Kepler" else 210,
)
self.meta = meta
self.wcs = wcs
def __repr__(self):
return f"FFIMachine (N sources, N times, N pixels): {self.shape}"
@staticmethod
def from_file(
fname,
extension=1,
cutout_size=None,
cutout_origin=[0, 0],
correct_offsets=False,
plot_offsets=False,
**kwargs,
):
"""
Reads data from files and initiates a new object of FFIMachine class.
Parameters
----------
fname : str or list of strings
File name or list of file names of the FFI files.
extension : int
Number of HDU extension to be used, for Kepler FFIs this corresponds to the
channel number. For TESS FFIs, it correspond to the HDU extension containing
the image data (1).
cutout_size : int
Size of the cutout in pixels, assumed to be squared
cutout_origin : tuple of ints
Origin pixel coordinates where to start the cut out. Follows matrix indexing
correct_offsets : boolean
Check and correct for coordinate offset due to wrong WCS. It is off by
default.
plot_offsets : boolean
Create diagnostic plot for oordinate offset correction.
**kwargs : dictionary
Keyword arguments that defines shape model in a `machine` class object.
See `psfmachine.Machine` for details.
Returns
-------
FFIMachine : Machine object
A Machine class object built from the FFI.
"""
# load FITS files and parse arrays
(
wcs,
time,
flux,
flux_err,
ra,
dec,
column,
row,
metadata,
) = _load_file(fname, extension=extension)
# create cutouts if asked
if cutout_size is not None:
flux, flux_err, ra, dec, column, row = _do_image_cutout(
flux,
flux_err,
ra,
dec,
column,
row,
cutout_size=cutout_size,
cutout_origin=cutout_origin,
)
# hardcoded: the grid size to do the Gaia tiled query. This is different for
# cutouts and full channel. TESS and Kepler also need different grid sizes.
if metadata["TELESCOP"] == "Kepler":
ngrid = (2, 2) if flux.shape[1] <= 500 else (4, 4)
else:
ngrid = (5, 5) if flux.shape[1] < 500 else (10, 10)
# query Gaia and clean sources.
sources = _get_sources(
ra,
dec,
wcs,
magnitude_limit=18 if metadata["TELESCOP"] == "Kepler" else 15,
epoch=time.jyear.mean(),
ngrid=ngrid,
dr=3,
img_limits=[[row.min(), row.max()], [column.min(), column.max()]],
)
# correct coordinate offset if necessary.
if correct_offsets:
ra, dec, sources = _check_coordinate_offsets(
ra,
dec,
row,
column,
flux[0],
sources,
wcs,
plot=plot_offsets,
cutout_size=100,
)
return FFIMachine(
time.jd,
flux,
flux_err,
ra.ravel(),
dec.ravel(),
sources,
column.ravel(),
row.ravel(),
wcs=wcs,
meta=metadata,
**kwargs,
)
def save_shape_model(self, output=None):
"""
Saves the weights of a PRF fit to a disk.
Parameters
----------
output : str, None
Output file name. If None, one will be generated.
"""
# asign a file name
if output is None:
output = "./%s_ffi_shape_model_ext%s_q%s.fits" % (
self.meta["MISSION"],
str(self.meta["EXTENSION"]),
str(self.meta["QUARTER"]),
)
log.info(f"File name: {output}")
# create data structure (DataFrame) to save the model params
table = fits.BinTableHDU.from_columns(
[fits.Column(name="psf_w", array=self.psf_w, format="D")]
)
# include metadata and descriptions
table.header["object"] = ("PRF shape", "PRF shape parameters")
table.header["datatype"] = ("FFI", "Type of data used to fit shape model")
table.header["origin"] = ("PSFmachine.FFIMachine", "Software of origin")
table.header["version"] = (__version__, "Software version")
table.header["TELESCOP"] = (self.meta["TELESCOP"], "Telescope name")
table.header["mission"] = (self.meta["MISSION"], "Mission name")
table.header["quarter"] = (
self.meta["QUARTER"],
"Quarter/Campaign/Sector of observations",
)
table.header["channel"] = (self.meta["EXTENSION"], "Channel/Camera-CCD output")
table.header["MJD-OBS"] = (self.time[0], "MJD of observation")
table.header["n_rknots"] = (
self.n_r_knots,
"Number of knots for spline basis in radial axis",
)
table.header["n_pknots"] = (
self.n_phi_knots,
"Number of knots for spline basis in angle axis",
)
table.header["rmin"] = (self.rmin, "Minimum value for knot spacing")
table.header["rmax"] = (self.rmax, "Maximum value for knot spacing")
table.header["cut_r"] = (
self.cut_r,
"Radial distance to remove angle dependency",
)
# spline degree is hardcoded in `_make_A_polar` implementation.
table.header["spln_deg"] = (3, "Degree of the spline basis")
table.writeto(output, checksum=True, overwrite=True)
def load_shape_model(self, input=None, plot=False):
"""
Loads a PRF model from disk
Parameters
----------
input : str, None
Input file name. If None, one will be generated.
plot : boolean
Plot the PRF mean model loaded from disk
"""
if input is None:
raise NotImplementedError(
"Loading default model not implemented. Please provide input file."
)
# check if file exists and is the right format
if not os.path.isfile(input):
raise FileNotFoundError("No shape file: %s" % input)
if not input.endswith(".fits"):
# should use a custom exception for wrong file format
raise ValueError("File format not suported. Please provide a FITS file.")
# create source mask and uncontaminated pixel mask
self._get_source_mask()
self._get_uncontaminated_pixel_mask()
# open file
hdu = fits.open(input)
# check if shape parameters are for correct mission, quarter, and channel
if hdu[1].header["MISSION"] != self.meta["MISSION"]:
raise ValueError(
"Wrong shape model: file is for mission '%s',"
% (hdu[1].header["MISSION"])
+ " it should be '%s'." % (self.meta["MISSION"])
)
if hdu[1].header["QUARTER"] != self.meta["QUARTER"]:
raise ValueError(
"Wrong shape model: file is for quarter %i,"
% (hdu[1].header["QUARTER"])
+ " it should be %i." % (self.meta["QUARTER"])
)
if hdu[1].header["CHANNEL"] != self.meta["EXTENSION"]:
raise ValueError(
"Wrong shape model: file is for channel %i,"
% (hdu[1].header["CHANNEL"])
+ " it should be %i." % (self.meta["EXTENSION"])
)
# load model hyperparameters and weights
self.n_r_knots = hdu[1].header["n_rknots"]
self.n_phi_knots = hdu[1].header["n_pknots"]
self.rmin = hdu[1].header["rmin"]
self.rmax = hdu[1].header["rmax"]
self.cut_r = hdu[1].header["cut_r"]
self.psf_w = hdu[1].data["psf_w"]
del hdu
# create mean model, but PRF shapes from FFI are in pixels! and TPFMachine
# work in arcseconds
self._get_mean_model()
# remove background pixels and recreate mean model
self._update_source_mask_remove_bkg_pixels()
if plot:
return self.plot_shape_model()
return
def save_flux_values(self, output=None, format="fits"):
"""
Saves the flux values of all sources to a file. For FITS output files a multi-
extension file is created with each extension containing a single cadence/frame.
Parameters
----------
output : str, None
Output file name. If None, one will be generated.
format : str
Format of the output file. Only FITS is supported for now.
"""
# check if model was fitted
if not hasattr(self, "ws"):
self.fit_model(fit_va=False)
# asign default output file name
if output is None:
output = "./%s_source_catalog_ext%s_q%s_mjd%s.fits" % (
self.meta["MISSION"],
str(self.meta["EXTENSION"]),
str(self.meta["QUARTER"]),
str(self.time[0]),
)
log.info(f"File name: {output}")
primary_hdu = fits.PrimaryHDU()
primary_hdu.header["object"] = ("Photometric Catalog", "Photometry")
primary_hdu.header["origin"] = ("PSFmachine.FFIMachine", "Software of origin")
primary_hdu.header["version"] = (__version__, "Software version")
primary_hdu.header["TELESCOP"] = (self.meta["TELESCOP"], "Telescope")
primary_hdu.header["mission"] = (self.meta["MISSION"], "Mission name")
primary_hdu.header["DCT_TYPE"] = (self.meta["DCT_TYPE"], "Data type")
primary_hdu.header["quarter"] = (
self.meta["QUARTER"],
"Quarter/Campaign/Sector of observations",
)
primary_hdu.header["channel"] = (
self.meta["EXTENSION"],
"Channel/Camera-CCD output",
)
primary_hdu.header["aperture"] = ("PSF", "Type of photometry")
primary_hdu.header["N_OBS"] = (self.time.shape[0], "Number of cadences")
primary_hdu.header["DATSETNM"] = (self.meta["DATSETNM"], "data set name")
primary_hdu.header["RADESYS"] = (
self.meta["RADESYS"],
"reference frame of celestial coordinates",
)
primary_hdu.header["EQUINOX"] = (
self.meta["EQUINOX"],
"equinox of celestial coordinate system",
)
hdul = fits.HDUList([primary_hdu])
# create bin table with photometry
for k in range(self.time.shape[0]):
id_col = fits.Column(
name="gaia_id", array=self.sources.designation, format="29A"
)
ra_col = fits.Column(
name="ra", array=self.sources.ra, format="D", unit="deg"
)
dec_col = fits.Column(
name="dec", array=self.sources.dec, format="D", unit="deg"
)
flux_col = fits.Column(
name="psf_flux", array=self.ws[k, :], format="D", unit="-e/s"
)
flux_err_col = fits.Column(
name="psf_flux_err", array=self.werrs[k, :], format="D", unit="-e/s"
)
table_hdu = fits.BinTableHDU.from_columns(
[id_col, ra_col, dec_col, flux_col, flux_err_col]
)
table_hdu.header["EXTNAME"] = "CATALOG"
table_hdu.header["MJD-OBS"] = (self.time[k], "MJD of observation")
hdul.append(table_hdu)
hdul.writeto(output, checksum=True, overwrite=True)
return
def _remove_background(self, mask=None):
"""
Background removal. It models the background using a median estimator, rejects
flux values with sigma clipping. It modiffies the attributes `flux` and
`flux_2d`. The background model are stored in the `background_model` attribute.
Parameters
----------
mask : numpy.ndarray of booleans
Mask to reject pixels containing sources. Default None.
"""
# model background for all cadences
self.background_model = np.array(
[
Background2D(
flux_2d,
mask=mask,
box_size=(64, 50),
filter_size=15,
exclude_percentile=20,
sigma_clip=SigmaClip(sigma=3.0, maxiters=5),
bkg_estimator=MedianBackground(),
interpolator=BkgZoomInterpolator(order=3),
).background
for flux_2d in self.flux_2d
]
)
# substract background
self.flux_2d -= self.background_model
# flatten flix image
self.flux = self.flux_2d.reshape(self.flux_2d.shape[0], -1)
return
def _saturated_pixels_mask(self, saturation_limit=1.5e5, tolerance=3):
"""
Finds and removes saturated pixels, including bleed columns.
Parameters
----------
saturation_limit : foat
Saturation limit at which pixels are removed.
tolerance : float
Number of pixels masked around the saturated pixel, remove bleeding.
Returns
-------
mask : numpy.ndarray
Boolean mask with rejected pixels
"""
# Which pixels are saturated
# this nanpercentile takes forever to compute for a single cadance ffi
# saturated = np.nanpercentile(self.flux, 99, axis=0)
# assume we'll use ffi for 1 single cadence
saturated = np.where(self.flux > saturation_limit)[1]
# Find bad pixels, including allowence for a bleed column.
bad_pixels = np.vstack(
[
np.hstack(
[
self.column[saturated] + idx
for idx in np.arange(-tolerance, tolerance)
]
),
np.hstack(
[self.row[saturated] for idx in np.arange(-tolerance, tolerance)]
),
]
).T
# Find unique row/column combinations
bad_pixels = bad_pixels[
np.unique(["".join(s) for s in bad_pixels.astype(str)], return_index=True)[
1
]
]
# Build a mask of saturated pixels
m = np.zeros(len(self.column), bool)
# this works for FFIs but is slow
for p in bad_pixels:
m |= (self.column == p[0]) & (self.row == p[1])
saturated = (self.flux > saturation_limit)[0]
return m
def _bright_sources_mask(self, magnitude_limit=8, tolerance=30):
"""
Finds and mask pixels with halos produced by bright stars (e.g. <8 mag).
Parameters
----------
magnitude_limit : foat
Magnitude limit at which bright sources are identified.
tolerance : float
Radius limit (in pixels) at which pixels around bright sources are masked.
Returns
-------
mask : numpy.ndarray
Boolean mask with rejected pixels
"""
bright_mask = self.sources["phot_g_mean_mag"] <= magnitude_limit
mask = [
np.hypot(self.column - s.column, self.row - s.row) < tolerance
for _, s in self.sources[bright_mask].iterrows()
]
mask = np.array(mask).sum(axis=0) > 0
return mask
def _mask_pixels(self, pixel_saturation_limit=1.2e5, magnitude_bright_limit=8):
"""
Mask saturated pixels and halo/difraction pattern from bright sources.
Parameters
----------
pixel_saturation_limit: float
Flux value at which pixels saturate.
magnitude_bright_limit: float
Magnitude limit for sources at which pixels are masked.
"""
# mask saturated pixels.
self.non_sat_pixel_mask = ~self._saturated_pixels_mask(
saturation_limit=pixel_saturation_limit
)
self.non_bright_source_mask = ~self._bright_sources_mask(
magnitude_limit=magnitude_bright_limit
)
good_pixels = self.non_sat_pixel_mask & self.non_bright_source_mask
self.column = self.column[good_pixels]
self.row = self.row[good_pixels]
self.ra = self.ra[good_pixels]
self.dec = self.dec[good_pixels]
self.flux = self.flux[:, good_pixels]
self.flux_err = self.flux_err[:, good_pixels]
return
def residuals(self, plot=False, zoom=False, metric="residuals"):
"""
Get the residuals (model - image) and compute statistics. It creates a model
of the full image using the `mean_model` and the weights computed when fitting
the shape model.
Parameters
----------
plot : bool
Do plotting
zoom : bool
If plot is True then zoom into a section of the image for better
visualization.
metric : string
Type of metric used to plot. Default is "residuals", "chi2" is also
available.
Return
------
fig : matplotlib figure
Figure
"""
if not hasattr(self, "ws"):
self.fit_model(fit_va=False)
# evaluate mean model
ffi_model = self.mean_model.T.dot(self.ws[0])
ffi_model_err = self.mean_model.T.dot(self.werrs[0])
# compute residuals
residuals = ffi_model - self.flux[0]
weighted_chi = (ffi_model - self.flux[0]) ** 2 / ffi_model_err
# mask background
source_mask = ffi_model != 0.0
# rms
self.rms = np.sqrt((residuals[source_mask] ** 2).mean())
self.frac_esidual_median = np.median(
residuals[source_mask] / self.flux[0][source_mask]
)
self.frac_esidual_std = np.std(
residuals[source_mask] / self.flux[0][source_mask]
)
if plot:
fig, ax = plt.subplots(2, 2, figsize=(15, 15))
ax[0, 0].scatter(
self.column,
self.row,
c=self.flux[0],
marker="s",
s=7.5 if zoom else 1,
norm=colors.SymLogNorm(linthresh=500, vmin=0, vmax=5000, base=10),
)
ax[0, 0].set_aspect("equal", adjustable="box")
ax[0, 1].scatter(
self.column,
self.row,
c=ffi_model,
marker="s",
s=7.5 if zoom else 1,
norm=colors.SymLogNorm(linthresh=500, vmin=0, vmax=5000, base=10),
)
ax[0, 1].set_aspect("equal", adjustable="box")
if metric == "residuals":
to_plot = residuals
norm = colors.SymLogNorm(linthresh=500, vmin=-5000, vmax=5000, base=10)
cmap = "RdBu"
elif metric == "chi2":
to_plot = weighted_chi
norm = colors.LogNorm(vmin=1, vmax=5000)
cmap = "viridis"
else:
raise ValueError("wrong type of metric")
cbar = ax[1, 0].scatter(
self.column[source_mask],
self.row[source_mask],
c=to_plot[source_mask],
marker="s",
s=7.5 if zoom else 1,
cmap=cmap,
norm=norm,
)
ax[1, 0].set_aspect("equal", adjustable="box")
plt.colorbar(
cbar, ax=ax[1, 0], label=r"Flux ($e^{-}s^{-1}$)", fraction=0.042
)
ax[1, 1].hist(
residuals[source_mask] / self.flux[0][source_mask],
bins=50,
log=True,
label=(
"RMS (model - data) = %.3f" % self.rms
+ "\nMedian = %.3f" % self.frac_esidual_median
+ "\nSTD = %3f" % self.frac_esidual_std
),
)
ax[1, 1].legend(loc="best")
ax[0, 0].set_ylabel("Pixel Row Number")
ax[0, 0].set_xlabel("Pixel Column Number")
ax[0, 1].set_xlabel("Pixel Column Number")
ax[1, 0].set_ylabel("Pixel Row Number")
ax[1, 0].set_xlabel("Pixel Column Number")
ax[1, 1].set_xlabel("(model - data) / data")
ax[1, 0].set_title(metric)
if zoom:
ax[0, 0].set_xlim(self.column.min(), self.column.min() + 100)
ax[0, 0].set_ylim(self.row.min(), self.row.min() + 100)
ax[0, 1].set_xlim(self.column.min(), self.column.min() + 100)
ax[0, 1].set_ylim(self.row.min(), self.row.min() + 100)
ax[1, 0].set_xlim(self.column.min(), self.column.min() + 100)
ax[1, 0].set_ylim(self.row.min(), self.row.min() + 100)
return fig
return
def plot_image(self, ax=None, sources=False):
"""
Function to plot the Full Frame Image and Gaia sources.
Parameters
----------
ax : matplotlib.axes
Matlotlib axis can be provided, if not one will be created and returned.
sources : boolean
Whether to overplot or not the source catalog.
Returns
-------
ax : matplotlib.axes
Matlotlib axis with the figure
"""
if ax is None:
fig, ax = plt.subplots(1, figsize=(10, 10))
ax = plt.subplot(projection=self.wcs)
row_2d, col_2d = np.mgrid[
self.row.min() : self.row.max() + 1,
self.column.min() : self.column.max() + 1,
]
im = ax.pcolormesh(
col_2d,
row_2d,
self.flux_2d[0],
cmap=plt.cm.viridis,
shading="nearest",
# origin="lower",
norm=colors.SymLogNorm(linthresh=200, vmin=0, vmax=2000, base=10),
rasterized=True,
)
plt.colorbar(im, ax=ax, label=r"Flux ($e^{-}s^{-1}$)", fraction=0.042)
ax.set_title(
"%s FFI Ch/CCD %s MJD %f"
% (self.meta["MISSION"], self.meta["EXTENSION"], self.time[0])
)
ax.set_xlabel("R.A. [hh:mm]")
ax.set_ylabel("Decl. [deg]")
ax.grid(True, which="major", axis="both", ls="-", color="w", alpha=0.7)
ax.set_xlim(self.column.min() - 2, self.column.max() + 2)
ax.set_ylim(self.row.min() - 2, self.row.max() + 2)
ax.set_aspect("equal", adjustable="box")
if sources:
ax.scatter(
self.sources.column,
self.sources.row,
facecolors="none",
edgecolors="r",
linewidths=0.5 if self.sources.shape[0] > 1000 else 1,
alpha=0.9,
)
return ax
def plot_pixel_masks(self, ax=None):
"""
Function to plot the mask used to reject saturated and bright pixels.
Parameters
----------
ax : matplotlib.axes
Matlotlib axis can be provided, if not one will be created and returned.
Returns
-------
ax : matplotlib.axes
Matlotlib axis with the figure.
"""
row_2d, col_2d = np.mgrid[: self.flux_2d.shape[1], : self.flux_2d.shape[2]]
if ax is None:
fig, ax = plt.subplots(1, figsize=(10, 10))
if hasattr(self, "non_bright_source_mask"):
ax.scatter(
col_2d.ravel()[~self.non_bright_source_mask],
row_2d.ravel()[~self.non_bright_source_mask],
c="y",
marker="s",
s=1,
label="bright mask",
)
if hasattr(self, "non_sat_pixel_mask"):
ax.scatter(
col_2d.ravel()[~self.non_sat_pixel_mask],
row_2d.ravel()[~self.non_sat_pixel_mask],
c="r",
marker="s",
s=1,
label="saturated pixels",
)
ax.legend(loc="best")
ax.set_xlabel("Column Pixel Number")
ax.set_ylabel("Row Pixel Number")
ax.set_title("Pixel Mask")
return ax
def _load_file(fname, extension=1):
"""
Helper function to load FFI files and parse data. It parses the FITS files to
extract the image data and metadata. It checks that all files provided in fname
correspond to FFIs from the same mission.
Parameters
----------
fname : string or list of strings
Name of the FFI files
extension : int
Number of HDU extension to use, for Kepler FFIs this corresponds to the channel
Returns
-------
wcs : astropy.wcs
World coordinates system solution for the FFI. Used to convert RA, Dec to pixels
time : numpy.array
Array with time values in MJD
flux_2d : numpy.ndarray
Array with 2D (image) representation of flux values
flux_err_2d : numpy.ndarray
Array with 2D (image) representation of flux errors
ra_2d : numpy.ndarray
Array with 2D (image) representation of flux RA
dec_2d : numpy.ndarray
Array with 2D (image) representation of flux Dec
col_2d : numpy.ndarray
Array with 2D (image) representation of pixel column
row_2d : numpy.ndarray
Array with 2D (image) representation of pixel row
meta : dict
Dictionary with metadata
"""
if not isinstance(fname, list):
fname = np.sort([fname])
imgs = []
times = []
telescopes = []
dct_types = []
quarters = []
extensions = []
for i, f in enumerate(fname):
if not os.path.isfile(f):
raise FileNotFoundError("FFI calibrated fits file does not exist: ", f)
hdul = fits.open(f)
header = hdul[0].header
telescopes.append(header["TELESCOP"])
# kepler
if f.split("/")[-1].startswith("kplr"):
dct_types.append(header["DCT_TYPE"])
quarters.append(header["QUARTER"])
extensions.append(hdul[extension].header["CHANNEL"])
hdr = hdul[extension].header
times.append((hdr["MJDEND"] + hdr["MJDSTART"]) / 2)
imgs.append(hdul[extension].data)
# K2
elif f.split("/")[-1].startswith("ktwo"):
dct_types.append(header["DCT_TYPE"])
quarters.append(header["CAMPAIGN"])
extensions.append(hdul[extension].header["CHANNEL"])
hdr = hdul[extension].header
times.append((hdr["MJDEND"] + hdr["MJDSTART"]) / 2)
imgs.append(hdul[extension].data)
# TESS
elif f.split("/")[-1].startswith("tess"):
dct_types.append(header["CREATOR"].split(" ")[-1].upper())
quarters.append(f.split("/")[-1].split("-")[1])
hdr = hdul[1].header
times.append((hdr["TSTART"] + hdr["TSTOP"]) / 2)
imgs.append(hdul[1].data)
extensions.append("%i.%i" % (hdr["CAMERA"], hdr["CCD"]))
# raise NotImplementedError
else:
raise ValueError("FFI is not from Kepler or TESS.")
if i == 0:
wcs = WCS(hdr)
# check for integrity of files, same telescope, all FFIs and same quarter/campaign
if len(set(telescopes)) != 1:
raise ValueError("All FFIs must be from same telescope")
if len(set(dct_types)) != 1 or "FFI" not in set(dct_types).pop():
raise ValueError("All images must be FFIs")
if len(set(quarters)) != 1:
raise ValueError("All FFIs must be of same quarter/campaign/sector.")
# collect meta data, get everthing from one header.
attrs = [
"TELESCOP",
"INSTRUME",
"MISSION",
"DATSETNM",
]
meta = {k: header[k] for k in attrs if k in header.keys()}
attrs = [
"RADESYS",
"EQUINOX",
"BACKAPP",
]
meta.update({k: hdr[k] for k in attrs if k in hdr.keys()})
# we use "EXTENSION" to combine channel/camera keywords and "QUARTERS" to refer to
# Kepler quarters and TESS campaigns
meta.update({"EXTENSION": extensions[0], "QUARTER": quarters[0], "DCT_TYPE": "FFI"})
if "MISSION" not in meta.keys():
meta["MISSION"] = meta["TELESCOP"]
# sort by times in case fnames aren't
times = Time(times, format="mjd" if meta["TELESCOP"] == "Kepler" else "btjd")
tdx = np.argsort(times)
times = times[tdx]
# remove overscan of image
row_2d, col_2d, flux_2d = _remove_overscan(meta["TELESCOP"], np.array(imgs)[tdx])
# kepler FFIs have uncent maps stored in different files, so we use Poison noise as
# flux error for now.
flux_err_2d = np.sqrt(np.abs(flux_2d))
# convert to RA and Dec
ra, dec = wcs.all_pix2world(np.vstack([col_2d.ravel(), row_2d.ravel()]).T, 0.0).T
# some Kepler Channels/Modules have image data but no WCS (e.g. ch 5-8). If the WCS
# doesn't exist or is wrong, it could produce RA Dec values out of bound.
if ra.min() < 0.0 or ra.max() > 360 or dec.min() < -90 or dec.max() > 90:
raise ValueError("WCS lead to out of bound RA and Dec coordinates.")
ra_2d = ra.reshape(flux_2d.shape[1:])
dec_2d = dec.reshape(flux_2d.shape[1:])
del hdul, header, hdr, imgs, ra, dec
return (
wcs,
times,
flux_2d,
flux_err_2d,
ra_2d,
dec_2d,
col_2d,
row_2d,
meta,
)
def _get_sources(ra, dec, wcs, img_limits=[[0, 0], [0, 0]], **kwargs):
"""
Query Gaia catalog in a tiled manner and clean sources off sensor.
Parameters
----------
ra : numpy.ndarray
Data array with pixel RA values used to create the grid for tiled query and
compute centers and radius of cone search
dec : numpy.ndarray
Data array with pixel Dec values used to create the grid for tiled query and
compute centers and radius of cone search
wcs : astropy.wcs
World coordinates system solution for the FFI. Used to convert RA, Dec to pixels
img_limits :
Image limits in pixel numbers to remove sources outside the CCD.
**kwargs
Keyword arguments to be passed to `psfmachine.utils.do_tiled_query()`.
Returns
-------
sources : pandas.DataFrame
Data Frame with query result
"""
sources = do_tiled_query(ra, dec, **kwargs)
sources["column"], sources["row"] = wcs.all_world2pix(
sources.loc[:, ["ra", "dec"]].values, 0.0
).T
# remove sources outiside the ccd with a tolerance
tolerance = 0
inside = (
(sources.row > img_limits[0][0] - tolerance)
& (sources.row < img_limits[0][1] + tolerance)
& (sources.column > img_limits[1][0] - tolerance)
& (sources.column < img_limits[1][1] + tolerance)
)
sources = sources[inside].reset_index(drop=True)
return sources
def _do_image_cutout(
flux, flux_err, ra, dec, column, row, cutout_size=100, cutout_origin=[0, 0]
):
"""
Creates a cutout of the full image. Return data arrays corresponding to the cutout.
Parameters
----------
flux : numpy.ndarray
Data array with Flux values, correspond to full size image.
flux_err : numpy.ndarray
Data array with Flux errors values, correspond to full size image.
ra : numpy.ndarray
Data array with RA values, correspond to full size image.
dec : numpy.ndarray
Data array with Dec values, correspond to full size image.
column : numpy.ndarray
Data array with pixel column values, correspond to full size image.
row : numpy.ndarray
Data array with pixel raw values, correspond to full size image.
cutout_size : int
Size in pixels of the cutout, assumedto be squared. Default is 100.
cutout_origin : tuple of ints
Origin of the cutout following matrix indexing. Default is [0 ,0].
Returns
-------
flux : numpy.ndarray
Data array with Flux values of the cutout.
flux_err : numpy.ndarray
Data array with Flux errors values of the cutout.
ra : numpy.ndarray
Data array with RA values of the cutout.
dec : numpy.ndarray
Data array with Dec values of the cutout.
column : numpy.ndarray
Data array with pixel column values of the cutout.
row : numpy.ndarray
Data array with pixel raw values of the cutout.
"""
if cutout_size + cutout_origin[0] < np.minimum(*flux.shape[1:]):
column = column[
cutout_origin[0] : cutout_origin[0] + cutout_size,
cutout_origin[1] : cutout_origin[1] + cutout_size,
]
row = row[
cutout_origin[0] : cutout_origin[0] + cutout_size,
cutout_origin[1] : cutout_origin[1] + cutout_size,
]
flux = flux[
:,
cutout_origin[0] : cutout_origin[0] + cutout_size,
cutout_origin[1] : cutout_origin[1] + cutout_size,
]
flux_err = flux_err[
:,
cutout_origin[0] : cutout_origin[0] + cutout_size,
cutout_origin[1] : cutout_origin[1] + cutout_size,
]
ra = ra[
cutout_origin[0] : cutout_origin[0] + cutout_size,
cutout_origin[1] : cutout_origin[1] + cutout_size,
]
dec = dec[
cutout_origin[0] : cutout_origin[0] + cutout_size,
cutout_origin[1] : cutout_origin[1] + cutout_size,
]
else:
raise ValueError("Cutout size is larger than image shape ", flux.shape)
return flux, flux_err, ra, dec, column, row
def _remove_overscan(telescope, imgs):
"""
Removes overscan of the CCD. Return the image data with overscan columns and rows
removed, also return 2D data arrays with pixel columns and row values.
Parameters
----------
telescope : string
Name of the telescope.
imgs : numpy.ndarray
Array of 2D images to. Has shape of [n_times, image_height, image_width].
Returns
-------
row_2d : numpy.ndarray
Data array with pixel row values
col_2d : numpy.ndarray
Data array with pixel column values
flux_2d : numpy.ndarray
Data array with flux values
"""
if telescope == "Kepler":
# CCD overscan for Kepler
r_min = 20
r_max = 1044
c_min = 12
c_max = 1112
elif telescope == "TESS":
# CCD overscan for TESS
r_min = 0
r_max = 2048
c_min = 45
c_max = 2093
else:
raise TypeError("File is not from Kepler or TESS mission")
# remove overscan
row_2d, col_2d = np.mgrid[: imgs[0].shape[0], : imgs[0].shape[1]]
col_2d = col_2d[r_min:r_max, c_min:c_max]
row_2d = row_2d[r_min:r_max, c_min:c_max]
flux_2d = imgs[:, r_min:r_max, c_min:c_max]
return row_2d, col_2d, flux_2d
def _compute_coordinate_offset(ra, dec, flux, sources, plot=True):
"""
Compute coordinate offsets if the RA Dec of objects in source catalog don't align
with the RA Dec values of the image.
How it works: first compute dra, ddec and radius of each pixel respect to the
objects listed in sources. Then masks out all pixels further than ~25 arcsecs around
each source. It uses spline basis to model the flux as a function of the spatial
coord and find the scene centroid offsets.
Parameters
----------
ra : numpy.ndarray
Data array with pixel RA coordinates.
dec : numpy.ndarray
Data array with pixel Dec coordinates.
flux : numpy.ndarray
Data array with flux values.
sources : pandas DataFrame
Catalog with sources detected in the image.
plot : boolean
Create diagnostic plots.
Returns
-------
ra_offset : float
RA coordinate offset
dec_offset : float
Dec coordinate offset
"""
# diagnostic plot
if plot:
fig, ax = plt.subplots(1, 3, figsize=(15, 4))
ax[0].pcolormesh(
ra,
dec,
flux,
cmap=plt.cm.viridis,
shading="nearest",
norm=colors.SymLogNorm(linthresh=200, vmin=0, vmax=2000, base=10),
rasterized=True,
)
ax[0].scatter(
sources.ra,
sources.dec,
facecolors="none",
edgecolors="r",
linewidths=1,
alpha=0.9,
)
# create a temporal mask of ~25 (6 pix) arcsec around each source
ra, dec, flux = ra.ravel(), dec.ravel(), flux.ravel()
dra, ddec = np.asarray(
[
[
ra - sources["ra"][idx],
dec - sources["dec"][idx],
]
for idx in range(len(sources))
]
).transpose(1, 0, 2)
dra = dra * (u.deg)
ddec = ddec * (u.deg)
r = np.hypot(dra, ddec).to("arcsec")
source_rad = 0.5 * np.log10(sources.phot_g_mean_flux) ** 1.5 + 25
tmp_mask = r.value < source_rad.values[:, None]
flx = np.tile(flux, (sources.shape[0], 1))[tmp_mask]
# design matrix in cartesian coord to model flux(dra, ddec)
A = _make_A_cartesian(
dra.value[tmp_mask],
ddec.value[tmp_mask],
radius=np.percentile(r[tmp_mask], 90) / 3600,
n_knots=8,
)
prior_sigma = np.ones(A.shape[1]) * 10
prior_mu = np.zeros(A.shape[1]) + 10
w = solve_linear_model(
A,
flx,
y_err=np.sqrt(np.abs(flx)),
prior_mu=prior_mu,
prior_sigma=prior_sigma,
)
# iterate to reject outliers from nearby sources using (data - model)
for k in range(3):
bad = sigma_clip(flx - A.dot(w), sigma=3).mask
w = solve_linear_model(
A,
flx,
y_err=np.sqrt(np.abs(flx)),
k=~bad,
prior_mu=prior_mu,
prior_sigma=prior_sigma,
)
# flux model
flx_mdl = A.dot(w)
# mask flux values from model to be used as weights
k = flx_mdl > np.percentile(flx_mdl, 90)
# compute centroid offsets in arcseconds
ra_offset = np.average(dra[tmp_mask][k], weights=np.sqrt(flx_mdl[k])).to("arcsec")
dec_offset = np.average(ddec[tmp_mask][k], weights=np.sqrt(flx_mdl[k])).to("arcsec")
# diagnostic plots
if plot:
ax[1].scatter(
dra[tmp_mask] * 3600,
ddec[tmp_mask] * 3600,
c=np.log10(flx),
s=2,
vmin=2.5,
vmax=3,
)
ax[2].scatter(
dra[tmp_mask][k] * 3600,
ddec[tmp_mask][k] * 3600,
c=np.log10(flx_mdl[k]),
s=2,
)
ax[1].set_xlim(-30, 30)
ax[1].set_ylim(-30, 30)
ax[2].set_xlim(-30, 30)
ax[2].set_ylim(-30, 30)
ax[1].set_xlabel("R.A.")
ax[1].set_ylabel("Dec")
ax[1].set_xlabel(r"$\delta x$")
ax[1].set_ylabel(r"$\delta y$")
ax[2].set_xlabel(r"$\delta x$")
ax[2].set_ylabel(r"$\delta y$")
ax[1].axvline(ra_offset.value, c="r", ls="-")
ax[1].axhline(dec_offset.value, c="r", ls="-")
plt.show()
return ra_offset, dec_offset
def _check_coordinate_offsets(
ra, dec, row, column, flux, sources, wcs, cutout_size=50, plot=False
):
"""
Checks if there is any offset between the pixel coordinates and the Gaia sources
due to wrong WCS. It checks all 4 corners and image center, compute coordinates
offsets and sees if offsets are consistent in all regions.
Parameters
----------
ra : numpy.ndarray
Data array with pixel RA coordinates.
dec : numpy.ndarray
Data array with pixel Dec coordinates.
flux : numpy.ndarray
Data array with flux values.
sources : pandas DataFrame
Catalog with sources detected in the image.
wcs : astropy.wcs
World coordinates system solution for the FFI.
cutout_size : int
Size of the cutouts in each corner and center to be used to compute offsets.
Use larger cutouts for regions with low number of sources detected.
plot : boolean
Create diagnostic plots.
Returns
-------
ra : numpy.ndarray
Data arrays with corrected coordinates.
dec : numpy.ndarray
Data arrays with corrected coordinates.
sources : pandas DataFrame
Catalog with corrected pixel row and column coordinates.
"""
# define cutout origins for corners and image center
cutout_org = [
[0, 0],
[flux.shape[0] - cutout_size, 0],
[0, flux.shape[1] - cutout_size],
[flux.shape[0] - cutout_size, flux.shape[1] - cutout_size],
[(flux.shape[0] - cutout_size) // 2, (flux.shape[1] - cutout_size) // 2],
]
ra_offsets, dec_offsets = [], []
# iterate over cutouts to get offsets
for cdx, c_org in enumerate(cutout_org):
# create cutouts and sources inside
cutout_f = flux[
c_org[0] : c_org[0] + cutout_size, c_org[1] : c_org[1] + cutout_size
]
cutout_ra = ra[
c_org[0] : c_org[0] + cutout_size, c_org[1] : c_org[1] + cutout_size
]
cutout_dec = dec[
c_org[0] : c_org[0] + cutout_size, c_org[1] : c_org[1] + cutout_size
]
cutout_row = row[
c_org[0] : c_org[0] + cutout_size, c_org[1] : c_org[1] + cutout_size
]
cutout_col = column[
c_org[0] : c_org[0] + cutout_size, c_org[1] : c_org[1] + cutout_size
]
inside = (
(sources.row > cutout_row.min())
& (sources.row < cutout_row.max())
& (sources.column > cutout_col.min())
& (sources.column < cutout_col.max())
)
sources_in = sources[inside].reset_index(drop=True)
ra_offset, dec_offset = _compute_coordinate_offset(
cutout_ra, cutout_dec, cutout_f, sources_in, plot=plot
)
ra_offsets.append(ra_offset.value)
dec_offsets.append(dec_offset.value)
ra_offsets = np.asarray(ra_offsets) * u.arcsec
dec_offsets = np.asarray(dec_offsets) * u.arcsec
# diagnostic plot
if plot:
plt.plot(ra_offsets, label="RA offset")
plt.plot(dec_offsets, label="Dec offset")
plt.legend()
plt.xlabel("Cutout number")
plt.ylabel(r"$\delta$ [arcsec]")
plt.show()
# if offsets are > 1 arcsec and all within 1" from each other, then apply offsets
# to source coordinates
if (
(np.abs(ra_offsets.mean()) > 1 * u.arcsec)
and (np.abs(dec_offsets.mean()) > 1 * u.arcsec)
and (np.abs(ra_offsets - ra_offsets.mean()) < 1 * u.arcsec).all()
and (np.abs(dec_offsets - dec_offsets.mean()) < 1 * u.arcsec).all()
):
log.info("All offsets are > 1'' and in the same direction")
# correct the pix coord of sources
sources["column"], sources["row"] = wcs.all_world2pix(
np.array(
[
sources.ra + ra_offsets.mean().to("deg"),
sources.dec + dec_offsets.mean().to("deg"),
]
).T,
0.0,
).T
# correct the ra, dec grid with the offsets
ra -= ra_offsets.mean().to("deg").value
dec -= dec_offsets.mean().to("deg").value
return ra, dec, sources
def buildKeplerPRFDatabase(fnames):
"""Procedure to build the database of Kepler PRF shape models.
Parameters
---------
fnames: list of str
List of filenames for Kepler FFIs.
"""
# This proceedure should be stored as part of the module, because it will
# be vital for reproducability.
# 1. Do some basic checks on FFI files that they are Kepler FFIs, and that
# all 53 are present, all same channel etc.
# 2. Iterate through files
# for fname in fnames:
# f = FFIMachine.from_file(fname, HARD_CODED_PARAMETERS)
# f.build_shape_model()
# f.fit_model()
#
# output = (
# PACKAGEDIR
# + f"src/psfmachine/data/q{quarter}_ch{channel}_{params}.csv"
# )
# f.save_shape_model(output=output)
raise NotImplementedError
``` |
{
"source": "JorgeMartinezG/prism-frontend",
"score": 2
} |
#### File: app/tests/test_calculate.py
```python
from datetime import datetime, timezone
from unittest.mock import patch
from app.kobo import get_form_responses
from app.zonal_stats import calculate_stats
def test_calculate_stats_json_output():
""" Test calculate_stats with geojson_out=False."""
zones = '/app/tests/small_admin_boundaries.json'
geotiff = '/app/tests/raster_sample.tif'
features = calculate_stats(zones, geotiff, geojson_out=False)
assert len(features) == 26
assert True
def test_calculate_stats_geojson_output():
""" Test calculate_stats with geojson_out=True."""
zones = '/app/tests/small_admin_boundaries.json'
geotiff = '/app/tests/raster_sample.tif'
features = calculate_stats(zones, geotiff, geojson_out=True)
assert len(features) == 26
assert features[0]['type'] == 'Feature'
assert True
def test_calculate_stats_with_group_by():
""" Test calculate_stats with a group_by argument."""
zones = '/app/tests/small_admin_boundaries.json'
geotiff = '/app/tests/raster_sample.tif'
features = calculate_stats(
zones, geotiff, group_by='ADM1_PCODE', geojson_out=False)
assert len(features) == 4
assert True
def test_calculate_stats_wfs_polygons():
""" Test calculate_stats with a group_by argument."""
zones = '/app/tests/small_admin_boundaries.json'
geotiff = '/app/tests/raster_sample.tif'
wfs_response = {'filter_property_key': 'label', 'path': '/app/tests/wfs_response.json'}
features = calculate_stats(
zones, geotiff, geojson_out=False, wfs_response=wfs_response)
assert len(features) == 5
features = calculate_stats(
zones, geotiff, group_by='ADM1_PCODE', geojson_out=False, wfs_response=wfs_response)
assert len(features) == 2
assert True
@patch('app.kobo.get_responses_from_kobo')
@patch('app.kobo.get_kobo_params')
def test_kobo_response_form(kobo_params, kobo_data):
""" Test form response parsing. """
form_fields = {
'name': 'name',
'datetime': 'date',
'geom': 'geom',
'filters': {
'status': 'Approved'
}
}
kobo_params.return_value = (('test', 'test'), form_fields)
kobo_data_json = [
{
'date': '2019-09-22T21:35:54',
'geom': '21.908012 95.986908 0 0',
'value': '2',
'_validation_status': {'label': 'Approved'},
'username': 'jorge'
},
{
'date': '2021-01-01T10:00:08',
'geom': '21.916222 95.955971 0 0',
'value': '3',
'_validation_status': {'label': 'Approved'},
'username': 'test'
}
]
labels = {'value': 'integer', 'geom': 'geopoint', 'username': 'username', 'date': 'datetime'}
kobo_data.return_value = (kobo_data_json, labels)
begin = datetime(2000, 1, 1).replace(tzinfo=timezone.utc)
end = datetime(2030, 1, 1).replace(tzinfo=timezone.utc)
forms = get_form_responses(begin, end)
assert len(forms) == 2
assert forms[0]['lat'] == 21.908012
assert forms[0]['lon'] == 95.986908
assert forms[0]['value'] == 2
assert forms[0]['status'] == 'Approved'
form_fields = {
'name': 'name',
'datetime': 'date',
'geom': 'geom',
'filters': {
'status': 'Approved',
'username': 'jorge'
}
}
kobo_params.return_value = (('test', 'test'), form_fields)
forms = get_form_responses(begin, end)
assert len(forms) == 1
# Test Filter
begin = datetime(2000, 1, 1).replace(tzinfo=timezone.utc)
end = datetime(2020, 1, 1).replace(tzinfo=timezone.utc)
forms = get_form_responses(begin, end)
assert len(forms) == 1
assert True
```
#### File: api-flask/app/zonal_stats.py
```python
import logging
from collections import defaultdict
from datetime import datetime
from json import dump, load
from urllib.parse import urlencode
from app.caching import cache_file, get_json_file
from app.timer import timed
import rasterio
from rasterstats import zonal_stats
from shapely.geometry import mapping, shape
from shapely.ops import cascaded_union
from werkzeug.exceptions import InternalServerError
logger = logging.getLogger(__name__)
DEFAULT_STATS = ['min', 'max', 'mean', 'median']
def get_wfs_response(wfs_params):
"""
Execute Web Feature Service (WFS) request to external OGC server.
This request returns geospatial features that match required filters within cql_filter param.
https://docs.geoserver.org/stable/en/user/services/wfs/reference.html
"""
cql_filter = []
if 'time' in wfs_params.keys():
from_date = datetime.strptime(wfs_params.get('time'), '%Y-%m-%d')
cql_filter.append('timestamp DURING {}/P1D'.format(from_date.isoformat()))
params = {
'service': 'WFS',
'version': '1.0.0',
'request': 'GetFeature',
'typeName': wfs_params.get('layer_name'),
'outputFormat': 'application/json',
}
if len(cql_filter) > 0:
params['cql_filter'] = ' AND '.join(cql_filter)
wfs_url = '{url}?{params}'.format(url=wfs_params.get('url'), params=urlencode(params))
wfs_response_path = cache_file(url=wfs_url, prefix='wfs')
return dict(filter_property_key=wfs_params['key'], path=wfs_response_path)
def _extract_features_properties(zones):
with open(zones) as json_file:
zones = load(json_file)
return [f['properties'] for f in zones.get('features', [])]
def _group_zones(zones, group_by):
"""Group zones by a key id and merge polygons."""
with open(zones) as json_file:
geojson_data = load(json_file)
features = geojson_data.get('features', [])
grouped_polygons = defaultdict(list)
for zone_feature in features:
grouped_polygons[zone_feature['properties'][group_by]].append(
shape(zone_feature['geometry'])
)
new_features = []
for group_id, polygons in grouped_polygons.items():
new_geometry = mapping(cascaded_union(polygons))
new_features.append(
dict(
type='Feature',
id=group_id,
properties=dict([(group_by, group_id)]),
geometry=dict(
type=new_geometry['type'],
coordinates=new_geometry['coordinates'])
)
)
outjson = dict(type='FeatureCollection', features=new_features)
output_file = '{zones}.{group_by}'.format(zones=zones, group_by=group_by)
with open(output_file, 'w') as outfile:
dump(outjson, outfile)
return output_file
def _create_shapely_geoms(geojson_dict, filter_property_key):
"""
Read and parse geojson dictionary geometries into shapely objects.
returns a tuple with the property value that matches filter_property_key and shapely object.
"""
shapely_dicts = []
for f in geojson_dict.get('features'):
if f.get('geometry').get('type') not in ['MultiPolygon', 'Polygon']:
continue
obj_key = f['properties'][filter_property_key]
shapely_dicts.append((obj_key, shape(f.get('geometry'))))
return shapely_dicts
def _get_intersected_polygons(zones_geojson, wfs_geojson, filter_property_key):
"""
Generate polygon intersection between each zone and polygons from wfs response.
This function returns an array of dictionaries
- 'geom' key contains the shapely object which is used for statistics
- 'feature' key is a geojson feature with the intersected geometry
"""
wfs_shapes = _create_shapely_geoms(wfs_geojson, filter_property_key)
intersected_zones = []
for zone in zones_geojson.get('features'):
# Shapely object from zone geojson geometry.
geom = shape(zone.get('geometry'))
# Get geometry intersection between zone and wfs response polygons.
filtered = [(k, geom.intersection(s)) for k, s in wfs_shapes if geom.intersects(s)]
if len(filtered) == 0:
continue
filtered_dict = []
for k, geom in filtered:
properties = zone.get('properties').copy()
# Include property value from wfs_response.
properties[filter_property_key] = k
# Create geojson feature.
feature = {
'type': 'Feature',
'geometry': mapping(geom),
'properties': properties
}
filtered_dict.append({'geom': geom, 'feature': feature})
intersected_zones.append(filtered_dict)
# Flatten.
intersected_zones = [item for sublist in intersected_zones for item in sublist]
return intersected_zones
@timed
def calculate_stats(
zones,
geotiff,
group_by=None,
stats=DEFAULT_STATS,
prefix='stats_',
geojson_out=False,
wfs_response=None
):
"""Calculate stats."""
if group_by:
zones = _group_zones(zones, group_by)
stats_input = zones
if wfs_response:
zones_geojson = get_json_file(zones)
wfs_geojson = get_json_file(wfs_response.get('path'))
zones = _get_intersected_polygons(zones_geojson,
wfs_geojson,
wfs_response.get('filter_property_key'))
# Extract shapely objects to compute stats.
stats_input = [s.get('geom') for s in zones]
prefix = None
try:
stats = zonal_stats(
stats_input,
geotiff,
stats=stats,
prefix=prefix,
geojson_out=geojson_out
)
except rasterio.errors.RasterioError as e:
logger.error(e)
raise InternalServerError('An error occured calculating statistics.')
if wfs_response:
zones_features = [z.get('feature') for z in zones]
# Add statistics as feature property fields.
features = [{**z, 'properties': {**z.get('properties'), **s}}
for z, s in zip(zones_features, stats)]
# Return stats as geojson array of features.
return features
if not geojson_out:
feature_properties = _extract_features_properties(zones)
stats = [{**properties, **stat}
for stat, properties in zip(stats, feature_properties)]
return stats
``` |
{
"source": "jorgemauricio/automatizacion_python",
"score": 3
} |
#### File: jorgemauricio/automatizacion_python/generar_graficas_por_estacion.py
```python
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import requests
import json
def main():
# leer csv
df = pd.read_csv("data/estado1_5.csv")
# arrar de variables
array_variables = ["prec",
"tmax",
"tmin",
"tmed",
"velvmax",
"velv",
"dirv",
"radg",
"humr"]
array_titulos = ['Precipitación mm',
"Temperatura máxima ºC",
"Temperatura media ºC",
"Temperatura mínima ºC",
"Velocidad máxima del viento km/h",
"Velocidad promedio del viento km/h",
"Dirección del viento ºAz",
"Radiación global W/m2",
"Humedad relativa"]
# Ciclo de información por estación
for estacion in df['nombre'].unique():
# data frame temporal para guardar la información
df_temporal = df.where(df['nombre'] == estacion).dropna()
# ciclo de información por variable y elemento
for elemento in range(len(array_variables)):
# generar una figura en blanco
fig = plt.figure(figsize=(15,5))
# crear gráfica
axes = fig.add_axes([0.1,0.1,0.8,0.8])
# valor de x
x = np.arange(len(df_temporal['nombre']))
# valor de y
y = df_temporal[array_variables[elemento]]
# mapeo de datos
axes.plot(x,y)
# agregar título a la gráfica
axes.set_title(estacion)
# agregar nomenclatura para eje Y
axes.set_ylabel(array_titulos[elemento])
# agregar nomenclatura para eje X
axes.set_xlabel("Día")
# guardar archivo
nombre_grafica = "graphs/{}_{}.png".format(estacion, array_variables[elemento])
fig.savefig(nombre_grafica, dpi=300)
# limpiar el espacio de trabajo
fig.clf()
if __name__ == '__main__':
main()
``` |
{
"source": "jorgemauricio/becarios",
"score": 4
} |
#### File: becarios/ejercicios/10_examen.py
```python
import random
import time
def main():
horizontal_1 = False
horizontal_2 = False
horizontal_3 = False
vertical_1 = False
vertical_2 = False
vertical_3 = False
diagonal_1 = False
diagonal_2 = False
espacio_1 = 0
espacio_2 = 0
espacio_3 = 0
espacio_4 = 0
espacio_5 = 0
espacio_6 = 0
espacio_7 = 0
espacio_8 = 0
espacio_9 = 0
array_valores = [1,2,3,4,5,6,7,8,9]
dic = {"a1":[0, False],
"a2":[0, False],
"a3":[0, False],
"a4":[0, False],
"a5":[0, False],
"a6":[0, False],
"a7":[0, False],
"a8":[0, False],
"a9":[0, False]}
status = True
contador = 0
while status:
arr = random.sample(array_valores, len(array_valores))
arr_validacion = []
for k,v in dic.items():
if v[1] == False:
arr_validacion.append(k)
for i in arr_validacion:
if dic[i][1] == False:
if dic["a1"][0] + dic["a2"][0] + dic["a3"][0] == 15:
horizontal_1 = True
dic["a1"][1] = True
dic["a2"][1] = True
dic["a3"][1] = True
if dic["a4"][0] + dic["a5"][0] + dic["a6"][0] == 15:
horizontal_1 = True
dic["a4"][1] = True
dic["a5"][1] = True
dic["a6"][1] = True
if dic["a7"][0] + dic["a8"][0] + dic["a9"][0] == 15:
horizontal_1 = True
dic["a7"][1] = True
dic["a8"][1] = True
dic["a9"][1] = True
if dic["a1"][0] + dic["a4"][0] + dic["a7"][0] == 15:
vertical_1 = True
dic["a1"][1] = True
dic["a4"][1] = True
dic["a7"][1] = True
if dic["a2"][0] + dic["a5"][0] + dic["a8"][0] == 15:
vertical_2 = True
dic["a2"][1] = True
dic["a5"][1] = True
dic["a8"][1] = True
if dic["a3"][0] + dic["a6"][0] + dic["a9"][0] == 15:
vertical_3 = True
dic["a3"][1] = True
dic["a6"][1] = True
dic["a9"][1] = True
if dic["a1"][0] + dic["a5"][0] + dic["a9"][0] == 15:
diagonal_1 = True
dic["a1"][1] = True
dic["a5"][1] = True
dic["a9"][1] = True
if dic["a7"][0] + dic["a5"][0] + dic["a3"][0] == 15:
diagonal_2 = True
dic["a7"][1] = True
dic["a5"][1] = True
dic["a3"][1] = True
contador += 1
if horizontal_1 and horizontal_2 and horizontal_3 and vertical_1 and vertical_2 and vertical_3 and diagonal_1 and diagonal_2:
status = False
imprimir_cuadro(dic["a1"][0],dic["a2"][0],dic["a3"][0],dic["a4"][0],dic["a5"][0],dic["a6"][0],dic["a7"][0],dic["a8"][0],dic["a9"][0])
print("iteraciones: ", contador)
print("Iteraciones: {}".format(count))
def validacion(a1,a2,a3,a4,a5,a6,a7,a8,a9):
pass
def imprimir_cuadro(a1,a2,a3,a4,a5,a6,a7,a8,a9):
print("\n")
print("{}|{}|{}".format(a1,a2,a3))
print("{}|{}|{}".format(a4,a5,a6))
print("{}|{}|{}".format(a7,a8,a9))
print("\n")
#time.sleep(0.5)
if __name__ == '__main__':
main()
``` |
{
"source": "jorgemauricio/becarios_utna",
"score": 3
} |
#### File: becarios_utna/ejercicios/09_examen.py
```python
def main():
palabra = "parangaricutirimicuaro"
def ordenar_letras(word):
"""
Función que ordena las letras de una palabra
param: word: palabra a la cual se le van a ordenar sus letras
"""
if __name__ == '__main__':
main()
```
#### File: becarios_utna/ejercicios/10_examen.py
```python
import random
import time
def main():
def imprimir_cuadro(a1,a2,a3,a4,a5,a6,a7,a8,a9):
print("\n")
print("{}|{}|{}".format(a1,a2,a3))
print("{}|{}|{}".format(a4,a5,a6))
print("{}|{}|{}".format(a7,a8,a9))
print("\n")
#time.sleep(0.5)
if __name__ == '__main__':
main()
``` |
{
"source": "jorgemauricio/esparrago",
"score": 2
} |
#### File: jorgemauricio/esparrago/generateWindStations.py
```python
from mpl_toolkits.basemap import Basemap
import matplotlib.pyplot as plt
import numpy as np
from numpy import meshgrid
import pandas as pd
import math
plt.figure(figsize=(24,12))
m = Basemap(projection='mill',llcrnrlat=25,urcrnrlat=33,llcrnrlon=-115,urcrnrlon=-107,resolution='c')
#%% read txt file
data = pd.read_csv('data/db_sonora.csv')
#%% calculate U and V vectors
def calculateU(windSpeed, windDirection):
'''
Generate U vector
'''
return windSpeed * math.cos(windDirection)
def calculateV(windSpeed, windDirection):
'''
Generate V vector
'''
return windSpeed * math.sin(windDirection)
#%% cut info
data = data.loc[data['longitud'] > -115.0]
data = data.loc[data['longitud'] < -107.0]
data = data.loc[data['latitud'] > 25.0]
data = data.loc[data['latitud'] < 33.0]
#%% calculate U
data['u'] = data.apply(lambda x: calculateU(x['velv'], x['dirv']), axis=1)
#%% calculate V
data['v'] = data.apply(lambda x: calculateV(x['velv'], x['dirv']), axis=1)
#%% get fecha columns as array
dates = np.array(data['fecha'])
#%% get all the uniques values from fecha
dates = np.unique(dates)
#%% loop
for i in dates:
plt.clf()
dataTemp = data.loc[data['fecha'] == i]
#%% read x, y, value
lons = np.array(dataTemp['longitud'])
lats = np.array(dataTemp['latitud'])
u = np.array(dataTemp['u'])
v = np.array(dataTemp['v'])
speed = np.sqrt(u*u + v*v)
x, y = m(lons, lats)
m.fillcontinents(color='#cc9955', lake_color='aqua', zorder = 0)
m.drawcoastlines(color = '0.15')
m.quiver(x, y, u, v, speed, cmap=plt.cm.autumn)
#%% tempTitle for png file
tempTitleForPNG = 'maps/map_stations_{}.png'.format(i)
plt.savefig(tempTitleForPNG,dpi=300)
print('***** {}'.format(tempTitleForPNG))
``` |
{
"source": "jorgemauricio/get_modis_files_from_nasa",
"score": 3
} |
#### File: jorgemauricio/get_modis_files_from_nasa/pre_procesamiento_evi.py
```python
import time
import os
import re
import pandas as pd
import numpy as np
import datetime
from pyhdf.SD import SD, SDC
# funcion main
def main():
# constantes
DATAFIELD_NAME = "CMG 0.05 Deg 16 days EVI"
LONG_MIN = -118.2360
LONG_MAX = -86.1010
LAT_MIN = 12.3782
LAT_MAX = 33.5791
# lista de archivos a procesar
lista_de_archivos = [x for x in os.listdir("data") if x.endswith(".hdf")]
# ciclo de procesamiento
for archivo in lista_de_archivos:
# init timer
start_time = time.time()
# generar nombres
tipo, fecha, coleccion, produccion, extension = archivo.split(".")
anio = int(fecha[1:5])
dia = int(fecha[5:])
new_fecha = datetime.date(anio,1,1) + datetime.timedelta(dia)
new_fecha = new_fecha.strftime("%Y-%m-%d")
anio, mes, dia = new_fecha.split("-")
# procesamiento hdf
hdf = SD("data/{}".format(archivo), SDC.READ)
# leer el dataset
data2D = hdf.select(DATAFIELD_NAME)
data = data2D[:,:].astype(np.double)
# read attributes
attrs = data2D.attributes(full=1)
lna = attrs['long_name']
long_name = lna[0]
vra = attrs['valid_range']
valid_range = vra[0]
fva = attrs['_FillValue']
_FillValue = fva[0]
ua = attrs['units']
units = ua[0]
# Handle fill value
invalid = data == _FillValue
invalid = np.logical_or(invalid, data < valid_range[0])
invalid = np.logical_or(invalid, data > valid_range[1])
data[invalid] = np.nan
# apply scale factor and offset
data = (data - 0.0) / 10000
# normally we would use the grid metadata to reconstruct the grid, but
# the grid metadata is incorrect
x = np.linspace(-180, 180, 7200)
y = np.linspace(90, -90, 3600)
lon, lat = np.meshgrid(x,y)
# init xi, yi, zi
xi = []
yi = []
zi = []
# ciclo
for i in range(len(lon)):
for j in range(len(lat)):
xi.append(x[i])
yi.append(y[j])
zi.append(data[j,i])
# generar arreglo de datos
arr = np.stack((xi,yi,zi), axis=1)
# columnas para el df
cols = ['lon', 'lat', 'value']
# crear data frame con la informacion del hdf
df = pd.DataFrame(arr, columns=cols)
# delimitar el area de estudio
df = df.where((df['lon'] > LONG_MIN) & (df['lon'] < LONG_MAX)).dropna()
df = df.where((df['lat'] > LAT_MIN) & (df['lat'] < LAT_MAX)).dropna()
# obtener valores de x, y
lons = np.array(df['lon'])
lats = np.array(df['lat'])
# agregar anio, mes y dia al data frame
df['Anio'] = anio
df['Mes'] = mes
df['Dia'] = dia
# titulo archivo
titulo_archivo = "{}-{}-{}_EVI.csv".format(anio, mes, dia)
# exportar df a csv
df.to_csv("processing/{}".format(titulo_archivo), index=False)
# print file
print(titulo_archivo)
# end time
print("Tiempo de procesamiento: ", time.time() - start_time)
# frames
frames = []
# lista de archivos procesados
files = [x for x in os.listdir("processing") if x.endswith("EVI.csv")]
# loop
for file in files:
temp = pd.read_csv("processing/{}".format(file))
frames.append(temp)
# generar un solo archivo
resultado = pd.concat(frames)
# guardar a csv
resultado.to_csv("results/compilado_EVI.csv")
if __name__ == '__main__':
main()
``` |
{
"source": "jorgemauricio/GradosDiasDesarrollo",
"score": 3
} |
#### File: jorgemauricio/GradosDiasDesarrollo/algoritmo.py
```python
import math
import pandas as pd
import numpy as np
### Functions
# Metodo residual
def metodoResidual(tmax, tmin, tbase):
gdd = 0.0
if (tmax > umbralSuperior):
tmax = umbralSuperior
else:
tmax = tmax
if (tmin < umbralInferior):
tmin = umbralInferior
else:
tmin = tmin
gdd = ((tmax + tmin) / 2.0) - tbase
if (gdd < 0):
gdd = 0.0
return gdd
# Metodo triangulo simple
def metodoTrianguloSimple(tmax, tmin):
gdd = 0.0
if (tmin > umbralSuperior and tmax > umbralSuperior):
gdd = umbralSuperior - umbralInferior
elif (tmax < umbralInferior and tmin < umbralInferior):
gdd = 0.0
elif (tmin >= umbralInferior and tmax <= umbralSuperior):
gdd = ((6 * (tmax + tmin - 2.0 * umbralInferior)) / 12)
elif (tmin < umbralInferior and tmax > umbralSuperior):
dti = tmax - umbralInferior
dts = tmax - umbralSuperior
dt = tmax - tmin
gdd = ((6 * pow(dti, 2.0) / dt) - ((6 * pow(dts, 2.0)) / dt)) / 12
elif (tmin < umbralInferior and tmax > umbralInferior and tmax < umbralSuperior):
dti = tmax - umbralInferior
dt = tmax - tmin
gdd = ((6 * (pow(dti, 2.0)) / dt)) / 12
elif (tmin > umbralInferior and tmin < umbralSuperior and tmax > umbralSuperior):
dt = tmax - tmin
dts = tmax - umbralSuperior
gddTS = ((6 * (tmax + tmin - 2.0 * umbralInferior)) / 12) - (((6 * pow(dts, 2.0)) / dt) / 12)
return gdd
# Metodo seno simple
# Subrutina para metodo del seno simple
def sinec(suma, diff, temp1):
twopi = 6.2834
pihlf = 1.5708
d2 = temp1 - suma
d3 = diff * diff
d4 = d2 * d2
d5 = math.sqrt(d3 - d4)
theta = math.atan2(d2, d5)
if (d2 < 0 and theta > 0):
theta = theta - 3.1416
heat = (diff * math.cos(theta) - d2 * (pihlf - theta)) / twopi
return heat
def metodoSenoSimple(tmax, tmin):
gdd = 0.0
if (tmin > umbralSuperior):
gdd = umbralSuperior - umbralInferior
else:
if (tmax <= umbralInferior):
gdd = 0.0
else:
temp1 = 2 * umbralInferior
diff = tmax - tmin
suma = tmax + tmin
if (tmin >= umbralInferior):
gdd = (suma - temp1) / 2
else:
gdd = sinec(suma, diff, temp1)
if (tmax > umbralSuperior):
temp1 = 2 * umbralSuperior
gdd2 = gdd
gdd = sinec(suma, diff, temp1)
gdd = gdd2 - gdd
return gdd
# Leer archivo .csv
data = pd.read_csv('data/datos.csv')
# Solicitar al usuario los umbrales del cultivo
print ("*************************************************************")
print ("***** Programa para calcular grados-dias en Python *****")
print ("***** Metodos: *****")
print ("***** + Residual *****")
print ("***** + Triangulo Simple *****")
print ("***** + Metodo Seno Simple *****")
print ("*************************************************************")
# limites
umbralInferiorText = input("Introduce el umbral inferior: ")
umbralSuperiorText = input("Introduce el umbral superior: ")
tbaseText = input("Introduce la temperatura base: ")
umbralSuperior = float(umbralSuperiorText)
umbralInferior = float(umbralInferiorText)
tbase = int(tbaseText)
# variables
gddTS = 0.0
gddTD = 0.0
gddSS = 0.0
# validacion de umbrales
if (umbralSuperior >= umbralInferior):
data['GDDR'] = data.apply(lambda row: metodoResidual(row['tmax'], row['tmin'], tbase), axis=1)
data['GDDTS'] = data.apply(lambda row: metodoTrianguloSimple(row['tmax'], row['tmin']), axis=1)
data['GDDSS'] = data.apply(lambda row: metodoSenoSimple(row['tmax'], row['tmin']), axis=1)
data.to_csv('data/datos_procesados.csv', sep=',')
else:
print ("Error \nLimite inferior mayor al superior")
``` |
{
"source": "jorgemauricio/InvestigacionAreaFoliar",
"score": 3
} |
#### File: jorgemauricio/InvestigacionAreaFoliar/algoritmoGeneracionDeIndicadores.py
```python
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
from PIL import Image
#%% estilo de la grafica
plt.style.use('ggplot')
# - - - - - MAIN - - - - -
def main():
# leer archivo de analisis foliar
dataFoliar = pd.read_csv('data/Foliares_Hass.csv')
# estructura de archivo csv de resultados
textToData = "Id,N,P,K,Ca,Mg,S,Fe,Cu,Mn,Zn,B,Mo,Na,Cl,pVerde,pCafeRojo,pCafe,pAmarillo,pAmarilloDorado,pCafeAmarillo\n"
# obtener los archivos de imagen a clasificar
# generar una lista con todas las imagenes
fileList = [x for x in os.listdir('images/train') if x.endswith('.jpg')]
# ciclo for para evaluar cada imagen
for file in fileList:
# declarar dict
dictColores = {}
# determinar los principales colores en las imágenes
# print nombre de la imagen
print("***** Procesando: {}".format(file))
# nombre temporal del archivo a evaluar
nombreTemporalArchivo = "images/train/{}".format(file)
# cargar la imagen
im = Image.open(nombreTemporalArchivo)
pix = im.load()
# tamaño de la imagen
x, y = im.size
# ciclo for para contabilizar los colores
for i in range(x):
for j in range(y):
vR, vG, vB = pix[i, j]
valueL, valueA, valueB = convertirRGBtoLAB(vR, vG, vB)
statusColor = validarArea(valueL, valueA, valueB)
if statusColor:
nombreTemporalClave = "{}/{}/{}".format(valueL, valueA, valueB)
if nombreTemporalClave in dictColores:
dictColores[nombreTemporalClave] += 1
else:
dictColores[nombreTemporalClave] = 1
# dict to DataFrame
data = pd.DataFrame()
data['color'] = dictColores.keys()
data['frecuencia'] = dictColores.values()
# ordenar información
data = data.sort_values(by='frecuencia', ascending = False)
# nombre archivo de colores para cada imagen
nombreTemporalArchivoColores = 'resultados/totalDeColores_{}.csv'.format(file.split('.')[0])
# save to csv
data.to_csv(nombreTemporalArchivoColores, index=False)
# crear columna L
data['L'] = data.apply(lambda x: generarValorL(x['color']), axis=1)
# crear columna L
data['a'] = data.apply(lambda x: generarValorA(x['color']), axis=1)
# crear columna L
data['b'] = data.apply(lambda x: generarValorB(x['color']), axis=1)
# eliminar colores grises
# data = data.loc[(data['a'] <= -5) | (data['a'] >= 5) | (data['b'] <= -5) | (data['b'] >= 5)]
# clasificacion de hoja de acuerdo al porcentaje de color verde
data['clasificacionColor'] = data.apply(lambda x: clasificacionDecolor(x['L'],x['a'],x['b']), axis=1)
# eliminar colores de fondo
data = data.loc[data['clasificacionColor'] != "f"]
# sumatoria de la frecuencia
sumatoriaFrecuencia = data['frecuencia'].sum()
# generar columna de porcentaje
data['porcentaje'] = data['frecuencia'] / sumatoriaFrecuencia * 100
# sumatoria de porcentajes
data['sumatoriaPorcentaje'] = data['porcentaje'].cumsum()
# tomar solo en cuenta el 80-20 de los datos
# data = data.loc[data['sumatoriaPorcentaje'] <= 80]
# nombre archivo de colores clasificados para cada imagen
nombreTemporalArchivoColoresClasificados = 'resultados/totalDecolores_clasificados_{}.csv'.format(file.split('.')[0])
# guardar como csv
data.to_csv(nombreTemporalArchivoColoresClasificados, index=False)
# numero de registros por clasificacion
pVerde = len(np.array(data.loc[data['clasificacionColor'] == 'v']))
pCafeRojo = len(np.array(data.loc[data['clasificacionColor'] == 'cr']))
pCafe = len(np.array(data.loc[data['clasificacionColor'] == 'c']))
pAmarillo = len(np.array(data.loc[data['clasificacionColor'] == 'a']))
pAmarilloDorado = len(np.array(data.loc[data['clasificacionColor'] == 'ag']))
pCafeAmarillo = len(np.array(data.loc[data['clasificacionColor'] == 'ca']))
print(pVerde, pCafeRojo, pCafe, pAmarillo, pAmarilloDorado, pCafeAmarillo)
# numero total de registros
numeroTotalDeRegistros = pVerde + pCafeRojo + pCafe + pAmarillo + pAmarilloDorado + pCafeAmarillo
# numero de registros por clasificacion
pVerde = pVerde / numeroTotalDeRegistros * 100
pCafeRojo = pCafeRojo / numeroTotalDeRegistros * 100
pCafe = pCafe / numeroTotalDeRegistros * 100
pAmarillo = pAmarillo / numeroTotalDeRegistros * 100
pAmarilloDorado = pAmarilloDorado / numeroTotalDeRegistros * 100
pCafeAmarillo = pCafeAmarillo / numeroTotalDeRegistros * 100
# agregar record al texto
dataTemporalFoliar = dataFoliar.loc[dataFoliar['Id'] == int(file.split('.')[0])]
N = np.array(dataTemporalFoliar['N'])
P = np.array(dataTemporalFoliar['P'])
K = np.array(dataTemporalFoliar['K'])
Ca = np.array(dataTemporalFoliar['Ca'])
Mg = np.array(dataTemporalFoliar['Mg'])
S = np.array(dataTemporalFoliar['S'])
Fe = np.array(dataTemporalFoliar['Fe'])
Cu = np.array(dataTemporalFoliar['Cu'])
Mn = np.array(dataTemporalFoliar['Mn'])
Zn = np.array(dataTemporalFoliar['Zn'])
B = np.array(dataTemporalFoliar['B'])
Mo = np.array(dataTemporalFoliar['Mo'])
Na = np.array(dataTemporalFoliar['Na'])
Cl = np.array(dataTemporalFoliar['Cl'])
print('***** N: {}'.format(N[0]))
print('***** pVerde: {}'.format(pVerde))
textToData += "{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{:0.2f},{:0.2f},{:0.2f},{:0.2f},{:0.2f},{:0.2f}\n".format(file.split('.')[0],N[0],P[0],K[0],Ca[0],Mg[0],S[0],Fe[0],Cu[0],Mn[0],Zn[0],B[0],Mo[0],Na[0],Cl[0],pVerde,pCafeRojo,pCafe,pAmarillo,pAmarilloDorado,pCafeAmarillo)
# guardar archivo con los resultados
nombreTemporalDeArchivoFinal = 'data/Foliar_join_porcentajes.csv'
archivoCompiladoFinal = open(nombreTemporalDeArchivoFinal, "w")
archivoCompiladoFinal.write(textToData)
archivoCompiladoFinal.close()
# funcion para determinar el porcentaje de color verde
def clasificacionDecolor(L,a,b):
"""
Determina la clasificacion del color mediante los espectros de color Lab
param: L: valor L
param: a: valor a
param: b: valor b
regresa v: verde, r: rojo, c: cafe, a: amarillo, n: naranja, az: azul, f: fondo
"""
if L >= 2 and L <= 73 and a >= -64 and a <= -2 and b >= 3 and b <= 72:
return "v"
elif L >= 74 and L <= 99 and a >= -66 and a <= -4 and b >= 5 and b <= 95:
return "a"
elif L >= 41 and L <= 94 and a >= -18 and a <= -10 and b >= 48 and b <= 80:
return "ag"
elif L >= 3 and L <= 67 and a >= 2 and a <= 42 and b >= 4 and b <= 75:
return "c"
elif L >= 10 and L <= 60 and a >= -14 and a <=-5 and b >= 15 and b <= 64:
return "ca"
elif L >= 2 and L <= 19 and a >= 11 and a <= 40 and b >= 4 and b <= 29:
return "cr"
else:
return "f"
# función para generar la columna L
def generarValorL(valor):
"""
Genera el valor de L del string de color
param: valor: string de color
"""
L, a, b = valor.split("/")
return float(L)
# función para generar la columna L
def generarValorA(valor):
"""
Genera el valor de L del string de color
param: valor: string de color
"""
L, a, b = valor.split("/")
return float(a)
# función para generar la columna L
def generarValorB(valor):
"""
Genera el valor de L del string de color
param: valor: string de color
"""
L, a, b = valor.split("/")
return float(b)
# Function RGB to Lab
def convertirRGBtoLAB(vr, vg, vb):
"""
Convertir colores del espectro RGB a Lab
param: vr: valor espectro r
param: vg: valor espectro g
param: vb: valor espectro b
"""
r = (vr + 0.0) / 255
g = (vg + 0.0) / 255
b = (vb + 0.0) / 255
if (r > 0.04045):
r = pow((r + 0.055) / 1.055, 2.4)
else:
r = r / 12.92
if (g > 0.04045):
g = pow((g + 0.055) / 1.055, 2.4)
else:
g = g / 12.92
if (b > 0.04045):
b = pow((b + 0.055) / 1.055, 2.4)
else:
b = b / 12.92
r = r * 100.0
g = g * 100.0
b = b * 100.0
var_x = r * 0.4124 + g * 0.3576 + b * 0.1805
var_y = r * 0.2126 + g * 0.7152 + b * 0.0722
var_z = r * 0.0193 + g * 0.1192 + b * 0.9505
var_x = var_x / 95.047
var_y = var_y / 100.00
var_z = var_z / 108.883
if (var_x > 0.008856):
var_x = pow(var_x, (1.0 / 3.0))
else:
var_x = (7.787 * var_x) + (16.0 / 116.0)
if (var_y > 0.008856):
var_y = pow(var_y, (1.0 / 3.0))
else:
var_y = (7.787 * var_y) + (16.0 / 116.0)
if (var_z > 0.008856):
var_z = pow(var_z, (1.0 / 3.0))
else:
var_z = (7.787 * var_z) + (16.0 / 116.0)
var_L = (116.0 * var_y) - 16.0
var_a = 500.0 * (var_x - var_y)
var_b = 200.0 * (var_y - var_z)
if (var_L >= 0 and var_L <= 100 and var_a == 0 and var_b == 0):
return 0.0, 0.0, 0.0
else:
return var_L, var_a, var_b
def validarArea(vL, vA, vB):
"""
Eliminar puntos grises y fondo
param: vL: valor espectro L
param: vA: valor espectro a
param: vB: valor espectro b
"""
# validate grayscale and mark points
if vL >= 0 and vL <= 100 and vA > -5 and vA < 5 and vB > -5 and vB < 5:
return False
else:
return True
if __name__ == "__main__":
main()
``` |
{
"source": "jorgemauricio/mexico",
"score": 3
} |
#### File: jorgemauricio/mexico/generar_archivos_sonora.py
```python
import pandas as pd
import numpy as np
import os
import time
from time import gmtime, strftime
def main():
# dia actual
#today = strftime("%Y-%m-%d")
today = "2018-10-12"
# variables
arr_variables = ['temp','rh', 'tmax','rain','tsoil010','u','v','dewpoint', 'tmin']
for variable in arr_variables:
titulo_archivo = "/home/jorge/Documents/Research/mexico/data/{}/{}_{}.csv".format(today,today, variable)
# crear dataframe
df = pd.read_csv(titulo_archivo)
# filtrar información
df = df.where((df["lons"] > -115.65) & (df["lons"] < -107.94) & (df["lats"] > 25.4) & (df["lats"] < 33.06)).dropna()
# exportar dataframe a csv
df.to_csv("/home/jorge/Documents/Research/mexico/data/{}/sonora_{}_{}.csv".format(today, today,variable), index=False)
if __name__ == '__main__':
main()
``` |
{
"source": "jorgemauricio/procesamiento_raster",
"score": 3
} |
#### File: procesamiento_raster/algoritmos/preprocesamiento_informacion.py
```python
import pandas as pd
import numpy as np
import os
# main
def main():
# path
path_guardar_archivo = "/home/jorge/Documents/Research/procesamiento_raster/data"
path_leer_archivo = "/media/jorge/backup1/Magali"
# variable de archivos
lista_de_archivos = [x for x in os.listdir(path_leer_archivo) if x.endswith('.xyz')]
for i in lista_de_archivos:
nombre_archivo, extension = i.split(".")
ruta_del_archivo = "{}/{}".format(path_leer_archivo, i)
# leer archivo
data = pd.read_table(ruta_del_archivo, sep="\s+", header=None)
# variable de columnas
cols = ["x","y", nombre_archivo]
# asignar columnas
data.columns = cols
# delimitar valores de x
data = data.loc[data["x"] >= 672496]
# delimitar valores de y
data = data.loc[data["y"] >= 2834425]
# delimitar valores de índice
data = data.loc[data[nombre_archivo] >= 0]
nombre_archivo_exportar = "{}/{}_pp.csv".format(path_guardar_archivo, nombre_archivo)
data.to_csv(nombre_archivo_exportar, index=False)
print(nombre_archivo_exportar)
# if
if __name__ == '__main__':
main()
``` |
{
"source": "jorgemauricio/proyectoCaborca",
"score": 3
} |
#### File: proyectoCaborca/algoritmos/algoritmo.py
```python
import os
import urllib.request
import time
from time import gmtime, strftime
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
from numpy import meshgrid
from scipy.interpolate import griddata as gd
import os
from netCDF4 import Dataset
import numpy as np
import pandas as pd
# programa principal
def main():
# descargar información
print("Iniciar descarga de archivos")
iniciarDescarga()
# procesamiento información
print("Iniciar procesamiento de archivos")
iniciarProcesamiento()
def iniciarDescarga():
# ***** constantes
URL_DESCARGA = "http://satepsanone.nesdis.noaa.gov/pub/FIRE/GBBEPx"
# elementos
arrayElementos = ['bc','co', 'co2','oc','pm25','so2']
# Mac /Users/jorgemauricio/Documents/Research/proyectoCaborca
# Linux /home/jorge/Documents/Research/proyectoCaborca
URL_CARPETA = "/Users/jorgemauricio/Documents/Research/proyectoCaborca"
# fecha actual
fechaActual = strftime("%Y-%m-%d")
# fecha -1
anio, mes, dia = generarDiaAnterior(fechaActual)
# nombre de la ruta para la descarga
rutaDeCarpetaParaDescarga = '{}/data/{}-{:02d}-{:02d}'.format(URL_CARPETA,anio,mes,dia)
# nombre de la ruta para guardar temporales
rutaDeCarpetaParaTemporales = '{}/temp/{}-{:02d}-{:02d}'.format(URL_CARPETA,anio,mes,dia)
# nombre de la ruta para guardar mapas
rutaDeCarpetaParaMapas = '{}/maps/{}-{:02d}-{:02d}'.format(URL_CARPETA,anio,mes,dia)
# nombre de la ruta para shapes
rutaParaArchivosShapes = '{}/shapes/Estados.shp'.format(URL_CARPETA)
# crear carpeta para descarga
if not os.path.exists(rutaDeCarpetaParaDescarga):
os.mkdir(rutaDeCarpetaParaDescarga)
else:
print("***** Carpeta descarga ya existe")
# crear carpeta para guardar mapas
# crear carpeta para descarga
if not os.path.exists(rutaDeCarpetaParaMapas):
os.mkdir(rutaDeCarpetaParaMapas)
else:
print("***** Carpeta mapas ya existe")
# crear carpeta para guardar archivos temporales
if not os.path.exists(rutaDeCarpetaParaTemporales):
os.mkdir(rutaDeCarpetaParaTemporales)
else:
print("***** Carpeta temporales ya existe")
# cambiar a carpeta de descarga
os.chdir(rutaDeCarpetaParaDescarga)
# ciclo de descarga
for i in arrayElementos:
# crear nombre temporal de archivo a descargar
urlDescarga = "{}/GBBEPx.emis_{}.001.{}{:02d}{:02d}.nc".format(URL_DESCARGA,i,anio,mes,dia)
nombreDelArchivo = "GBBEPx.emis_{}.001.{}{:02d}{:02d}.nc".format(i,anio,mes,dia)
print("***** Descarga de archivo: {}".format(nombreDelArchivo))
descargaArchivo(urlDescarga, nombreDelArchivo)
def descargaArchivo(ud, na):
"""
Función que permite la descarga del archivo indicado
param: ud: url de descarga
param: na: nombre del archivo
"""
urllib.request.urlretrieve(ud, na)
def generarDiaAnterior(f):
"""
Función que permite conocer el día anterior para descargar el archivo
param: f: fecha actual
"""
anio, mes, dia = f.split('-')
anio = int(anio)
mes = int(mes)
dia = int(dia)
dia -= 1
if dia == 0:
mes -= 1
if mes == 0:
anio -= 1
mes = 12
diasEnElMes = numeroDeDiasEnElMes(mes)
return (anio, mes, dia)
def numeroDeDiasEnElMes(m):
"""
Función que permite saber el número de días en un mes
param: m: mes actual
"""
if mes == 2 and anio % 4 == 0:
return 29
elif mes == 2 and anio % 4 != 0:
return 28
elif mes == 1 or mes == 3 or mes == 5 or mes == 7 or mes == 8 or mes == 10 or mes == 12:
return 31
elif mes == 4 or mes == 6 or mes == 9 or mes == 11:
return 30
def iniciarProcesamiento():
# Mac /Users/jorgemauricio/Documents/Research/proyectoCaborca
# Linux /home/jorge/Documents/Research/proyectoCaborca
URL_CARPETA = "/Users/jorgemauricio/Documents/Research/proyectoCaborca"
# ruta para acceder a los archivos shapes# nombre de la ruta para shapes
rutaParaArchivosShapes = '{}/shapes/Estados'.format(URL_CARPETA)
# coordenadas estaciones
dataEstaciones = pd.read_csv("/Users/jorgemauricio/Documents/Research/proyectoCaborca/data/coordenadas_estaciones.csv")
# fecha actual
fechaActual = strftime("%Y-%m-%d")
# fecha -1
anio, mes, dia = generarDiaAnterior(fechaActual)
# nombre de la ruta para la descarga
rutaDeCarpetaParaElProcesamiento = '{}/data/{}-{:02d}-{:02d}'.format(URL_CARPETA,anio,mes,dia)
# constantes
LONG_MIN = -115.65
LONG_MAX = -107.94
LAT_MIN = 25.41
LAT_MAX = 33.06
# archivos a procesar
listaDeArchivos = [x for x in os.listdir(rutaDeCarpetaParaElProcesamiento) if x.endswith('.nc')]
# ciclo de procesamiento
for archivo in listaDeArchivos:
# nombre del archivo
# nombreArchivo = "GBBEPx.emis_so2.001.20180118.nc"
arrayNombreArchivo = archivo.split(".")
arrayComponente = arrayNombreArchivo[1].split("_")
nombreParaMapa = arrayComponente[1]
rutaArchivo = "{}/{}".format(rutaDeCarpetaParaElProcesamiento, archivo)
# leer el archivo netcdf
dataset = Dataset(rutaArchivo)
# generar las arreglos de las variables
biomass = dataset.variables['biomass'][:]
Latitude = dataset.variables['Latitude'][:]
Longitude = dataset.variables['Longitude'][:]
# variable para generar CSV
dataText = "Long,Lat,Biomass\n"
# procesamiento de información
for i in range(Longitude.shape[0]):
for j in range(Latitude.shape[0]):
tempText = "{},{},{}\n".format(Longitude[i], Latitude[j], biomass[0,j,i])
dataText += tempText
# generar archivo temporal csv
fileName = "{}/temp/{}-{:02d}-{:02d}/{}.csv".format(URL_CARPETA, anio, mes, dia, nombreParaMapa)
textFile = open(fileName, "w")
textFile.write(dataText)
textFile.close()
# leer el archivo temporal csv
data = pd.read_csv(fileName)
# limites longitud > -115.65 y < -107.94
data = data.loc[data['Long'] > LONG_MIN]
data = data.loc[data['Long'] < LONG_MAX]
# limites latitud > 25.41 y < 33.06
data = data.loc[data['Lat'] > LAT_MIN]
data = data.loc[data['Lat'] < LAT_MAX]
# ug/m3 a ppm
data['Biomass'] = data['Biomass'] * 10000000000
# obtener valores de x, y
lons = np.array(data['Long'])
lats = np.array(data['Lat'])
#%% iniciar la gráfica
plt.clf()
# agregar locación de estaciones
xC = np.array(dataEstaciones['Long'])
yC = np.array(dataEstaciones['Lat'])
m = Basemap(projection='mill',llcrnrlat=LAT_MIN,urcrnrlat=LAT_MAX,llcrnrlon=LONG_MIN,urcrnrlon=LONG_MAX,resolution='h')
# generar lats, lons
x, y = m(lons, lats)
# numero de columnas y filas
numCols = len(x)
numRows = len(y)
# generar xi, yi
xi = np.linspace(x.min(), x.max(), numCols)
yi = np.linspace(y.min(), y.max(), numRows)
# generar el meshgrid
xi, yi = np.meshgrid(xi, yi)
# generar zi
z = np.array(data['Biomass'])
zi = gd((x,y), z, (xi,yi), method='cubic')
# generar clevs
stepVariable = 1
step = (z.max() - z.min()) / 10
# verificar el valor del intervalo
if step <= 1:
stepVariable = 1
clevs = np.linspace(z.min(), z.max() + stepVariable , 10)
#clevs = [1,2,3,4,5,6,7,8,9,10]
# contour plot
cs = m.contourf(xi,yi,zi, clevs, zorder=5, alpha=0.5, cmap='PuBu')
# agregar archivo shape de estados
m.readshapefile(rutaParaArchivosShapes, 'Estados')
# agregar puntos de estaciones
m.scatter(xC, yC, latlon=True,s=1, marker='o', color='r', zorder=25)
# colorbar
cbar = m.colorbar(cs, location='right', pad="5%")
cbar.set_label('pm')
tituloTemporalParaElMapa = "{} {}-{:02d}-{:02d}".format(nombreParaMapa,anio,mes,dia)
plt.title(tituloTemporalParaElMapa)
# Mac /Users/jorgemauricio/Documents/Research/proyectoGranizo/Maps/{}_{}.png
# Linux /home/jorge/Documents/Research/proyectoGranizo/Maps/{}_{}.png
nombreTemporalParaElMapa = "/Users/jorgemauricio/Documents/Research/proyectoCaborca/maps/{}-{:02d}-{:02d}/{}.png".format(anio, mes, dia, nombreParaMapa)
plt.annotate('@2018 INIFAP', xy=(-109,29), xycoords='figure fraction', xytext=(0.45,0.45), color='g', zorder=50)
plt.savefig(nombreTemporalParaElMapa, dpi=300)
print('****** Genereate: {}'.format(nombreTemporalParaElMapa))
if __name__ == '__main__':
main()
```
#### File: proyectoCaborca/algoritmos/descargaInfo.py
```python
import os
import urllib.request
import time
from time import gmtime, strftime
def main():
print("Init")
def iniciarDescarga():
# Constantes
URL_DESCARGA = "http://satepsanone.nesdis.noaa.gov/pub/FIRE/GBBEPx"
arrayElementos = ['bc','co', 'co2','oc','pm25','so2']
# Mac /Users/jorgemauricio/Documents/Research/proyectoCaborca
# Linux /home/jorge/Documents/Research/proyectoCaborca
URL_CARPETA_DATA = "/Users/jorgemauricio/Documents/Research/proyectoCaborca/data"
# fecha actual
fechaPronostico = strftime("%Y-%m-%d")
# fecha - 1
anio, mes, dia = generarDiaAnterior(fechaPronostico)
# nombre de la ruta para la descarga
rutaDeCarpetaParaDescarga = '{}/{}-{:02d}-{:02d}'.format(URL_CARPETA_DATA,anio,mes,dia)
# crear carpeta apra descarga
os.mkdir(rutaDeCarpetaParaDescarga)
# cambiar a carpeta de descarga
os.chdir(rutaDeCarpetaParaDescarga)
# ciclo de descarga
for i in arrayElementos:
# crear nombre temporal de archivo a descargar
urlDescarga = "{}/GBBEPx.emis_{}.001.{}{:02d}{:02d}.nc".format(URL_DESCARGA,i,anio,mes,dia)
nombreDelArchivo = "GBBEPx.emis_{}.001.{}{:02d}{:02d}.nc".format(i,anio,mes,dia)
print("***** Descarga de archivo: {}".format(nombreDelArchivo))
descargaArchivo(urlDescarga, nombreDelArchivo)
def descargaArchivo(ud, na):
"""
Función que permite la descarga del archivo indicado
param: ud: url de descarga
param: na: nombre del archivo
"""
urllib.request.urlretrieve(ud, na)
def generarDiaAnterior(f):
"""
Función que permite conocer el día anterior para descargar el archivo
param: f: fecha actual
"""
anio, mes, dia = f.split('-')
anio = int(anio)
mes = int(mes)
dia = int(dia)
dia -= 1
if dia == 0:
mes -= 1
if mes == 0:
anio -= 1
mes = 12
diasEnElMes = numeroDeDiasEnElMes(mes)
return (anio, mes, dia)
def numeroDeDiasEnElMes(m):
"""
Función que permite saber el número de días en un mes
param: m: mes actual
"""
if mes == 2 and anio % 4 == 0:
return 29
elif mes == 2 and anio % 4 != 0:
return 28
elif mes == 1 or mes == 3 or mes == 5 or mes == 7 or mes == 8 or mes == 10 or mes == 12:
return 31
elif mes == 4 or mes == 6 or mes == 9 or mes == 11:
return 30
``` |
{
"source": "jorgemauricio/proyectoGranizo",
"score": 2
} |
#### File: proyectoGranizo/algoritmos_procesamiento/algoritmo_procesamiento_hdf5_area_influencia.py
```python
import h5py
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
from numpy import meshgrid
from scipy.interpolate import griddata as gd
import os
# programa principal
def main():
# distancia
DISTANCIA_1KM = 0.0043
# obtener coordenadas cañones antigranizo
dataAntigranizo = pd.read_csv("data/Coordenadas_caniones.csv")
#print(dataAntigranizo.head())
# Nombre del archivo
# filename = 'data/3B-HHR-L.MS.MRG.3IMERG.20180101-S000000-E002959.0000.V05B.HDF5'
# obtener archivos
listaDeArchivos = [x for x in os.listdir('data/hdf5') if x.endswith('HDF5')]
# print(listaDeArchivos)
# ciclo de procesamiento de datos
for nombre in listaDeArchivos:
# ruta temporal de archivo
nombreTemporalArchivo = "data/hdf5/{}".format(nombre)
#lectura del hdf5
f = h5py.File(nombreTemporalArchivo, 'r')
# variable temporal para procesar el hdf5
grid = f['Grid']
# arrays de numpy
lon = np.array(grid['lon'])
lat = np.array(grid['lat'])
precipitation = np.array(grid['precipitationCal'])
# crear la variable que guardara el texto
dataText = "Long,Lat,Prec\n"
for i in range(lon.shape[0]):
for j in range(lat.shape[0]):
tempText = "{},{},{}\n".format(lon[i], lat[j], precipitation[i,j])
dataText += tempText
# generar variables extras
nombreEnArray = nombre.split('.')
# fecha y minutos
tempfecha = nombreEnArray[4]
minutos = nombreEnArray[5]
fecha, temp1, temp2 = tempfecha.split('-')
# guardar a CSV
nombreArchivoParaPandas = guardarCSV(dataText, fecha, minutos)
# leer archivo en pandas
data = pd.read_csv(nombreArchivoParaPandas)
# ciclo de área de influencia
# iniciar con el procesamiento
for i in range(0, dataAntigranizo["Nombre"].count()):
# generar variables para la clasificación de información
# Nombre
nombreEstacion = dataAntigranizo.iloc[i]["Nombre"]
# Latitud
latitud = dataAntigranizo.iloc[i]["Lat"]
# Mes
longitud = dataAntigranizo.iloc[i]["Long"]
longitud_min = longitud - (3 * DISTANCIA_1KM)
longitud_max = longitud + (3 * DISTANCIA_1KM)
latitud_min = latitud - (3 * DISTANCIA_1KM)
latitud_max = latitud + (3 * DISTANCIA_1KM)
# limites longitud > -106.49 y < -97.5
data = data.loc[data['Long'] > -106.49]
data = data.loc[data['Long'] < -97.5]
# limites latitud > 17.43 y < 25.23
data = data.loc[data['Lat'] > 17.43]
data = data.loc[data['Lat'] < 25.23]
x = np.array(data['Long'])
y = np.array(data['Lat'])
# numero de columnas y filas
numCols = len(x)
numRows = len(y)
# generar xi, yi
xi = np.linspace(longitud_min, longitud_max, numCols)
yi = np.linspace(longitud_min, latitud_max, numRows)
# generar el meshgrid
xi, yi = np.meshgrid(xi, yi)
# generar zi
z = np.array(data['Prec'])
zi = gd((x,y), z, (xi,yi), method='cubic')
fileName = 'temp/test_zi.csv'
np.savetxt(fileName, zi, delimiter=",")
break
# print data
print("***** ",nombreEstacion)
# nombre para eliminar el archivo temporal
nombreTemporalArchivoEliminar = 'temp/{}_{}.csv'.format(fecha, minutos)
os.remove(nombreTemporalArchivoEliminar)
print("Eliminar: {}".format(nombreTemporalArchivoEliminar))
# cerrar archivo hdf5
f.close()
def generarNombreDelMapa(nombre):
"""
Función que genera el título del mapa
param: nombre: nombre del archivo
"""
def guardarCSV(variableTexto, fecha, minutos):
"""
Función que permite guardar una viriable de texto a .csv
param: txt: variable de texto a guardar
"""
fileName = 'temp/{}_{}.csv'.format(fecha, minutos)
textFile = open(fileName, "w")
textFile.write(variableTexto)
textFile.close()
return fileName
if __name__ == '__main__':
main()
``` |
{
"source": "jorgemauricio/rainMap",
"score": 2
} |
#### File: jorgemauricio/rainMap/algoritmoRainAgsWithMun.py
```python
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
from scipy.interpolate import griddata as gd
#%% read csv
data1 = pd.read_table('data/d1.txt', sep=',')
data2 = pd.read_table('data/d2.txt', sep=',')
data3 = pd.read_table('data/d3.txt', sep=',')
data4 = pd.read_table('data/d4.txt', sep=',')
data5 = pd.read_table('data/d5.txt', sep=',')
#%% make one dataFrame
data = data1.filter(items=['Long', 'Lat','Rain'])
data['Rain2'] = data2['Rain']
data['Rain3'] = data3['Rain']
data['Rain4'] = data4['Rain']
data['Rain5'] = data5['Rain']
data['Acum'] = data['Rain'] + data['Rain2'] + data['Rain3'] + data['Rain4'] + data['Rain5']
#%% get values from Ags
data = data.loc[data['Lat'] > 21.0]
data = data.loc[data['Lat'] < 24.0]
data = data.loc[data['Long'] > -104.0]
data = data.loc[data['Long'] < -100.0]
#%% get x and y values
lons = np.array(data['Long'])
lats = np.array(data['Lat'])
#%% set up plot
plt.clf()
#fig = plt.figure(figsize=(48,24))
m = Basemap(projection='mill',llcrnrlat=21.38,urcrnrlat=22.74,llcrnrlon=-103.05,urcrnrlon=-101.21,resolution='h')
#%% generate lats, lons
x, y = m(lons,lats)
#%% number of cols and rows
numcols = len(x)
numrows = len(y)
#%% generate xi, yi
xi = np.linspace(x.min(), x.max(), numcols)
yi = np.linspace(y.min(), y.max(), numrows)
#%% generate meshgrid
xi, yi = np.meshgrid(xi,yi)
#%% genate zi
z = np.array(data['Rain'])
zi = gd((x,y), z, (xi,yi), method='cubic')
#%% generate clevs
def generateClevs(minV, maxV):
arrayValues = []
step = (maxV - minV) / 10
for i in range(10):
rangeOfValue = int(step * i)
arrayValues.append(rangeOfValue)
return arrayValues
clevs = generateClevs(z.min(), z.max())
#%% contour plot
cs = m.contourf(xi,yi,zi, clevs, zorder=4, alpha=0.5, cmap='Spectral_r')
#%% draw map details
m.drawcoastlines()
#m.drawstates(linewidth=0.7)
m.drawcountries()
#%% read municipios shape file
m.readshapefile('shapes/Municipios', 'Municipios')
#m.readshapefile('shapes/Estados', 'Estados')
#m.drawmapscale(22, -103, 23, -102, 100, units='km', fontsize=14, yoffset=None, barstyle='fancy', labelstyle='simple', fillcolor1='w', fillcolor2='#000000',fontcolor='#000000', zorder=5)
#%% colorbar
cbar = m.colorbar(cs, location='right', pad="5%")
cbar.set_label('mm')
plt.title('Precipitación')
plt.savefig('maps/precipitacionCubic2.png', dpi=300, transparent=True)
plt.show()
```
#### File: jorgemauricio/rainMap/algoritmoRainAll.py
```python
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
from scipy.interpolate import griddata as gd
from time import gmtime, strftime
import time
#%% fecha del pronostico
fechaPronostico = strftime("%Y-%m-%d")
#%% read csv
data1 = pd.read_table('data/d1.txt', sep=',')
data2 = pd.read_table('data/d2.txt', sep=',')
data3 = pd.read_table('data/d3.txt', sep=',')
data4 = pd.read_table('data/d4.txt', sep=',')
data5 = pd.read_table('data/d5.txt', sep=',')
#%% make one dataFrame
data = data1.filter(items=['Long', 'Lat','Rain'])
data['Rain1'] = data1['Rain']
data['Rain2'] = data2['Rain']
data['Rain3'] = data3['Rain']
data['Rain4'] = data4['Rain']
data['Rain5'] = data5['Rain']
data['Acum'] = data['Rain'] + data['Rain2'] + data['Rain3'] + data['Rain4'] + data['Rain5']
#%% get values from Ags
data = data.loc[data['Lat'] > 21.0]
data = data.loc[data['Lat'] < 24.0]
data = data.loc[data['Long'] > -104.0]
data = data.loc[data['Long'] < -100.0]
#%% get x and y values
lons = np.array(data['Long'])
lats = np.array(data['Lat'])
#%% generate arrayFechas
# Generate Days
arrayFechas = []
tanio, tmes, tdia = fechaPronostico.split('-')
anio = int(tanio)
mes = int(tmes)
dia = int(tdia)
for i in range(0,5,1):
if i == 0:
newDiaString = '{}'.format(dia)
if len(newDiaString) == 1:
newDiaString = '0' + newDiaString
newMesString = '{}'.format(mes)
if len(newMesString) == 1:
newMesString = '0' + newMesString
fecha = '{}'.format(anio)+"-"+newMesString+"-"+newDiaString
arrayFechas.append(fecha)
if i > 0:
dia = dia + 1
if mes == 2 and anio % 4 == 0:
diaEnElMes = 29
elif mes == 2 and anio % 4 != 0:
diaEnElMes = 28
elif mes == 1 or mes == 3 or mes == 5 or mes == 7 or mes == 8 or mes == 10 or mes == 12:
diaEnElMes = 31
elif mes == 4 or mes == 6 or mes == 9 or mes == 11:
diaEnElMes = 30
if dia > diaEnElMes:
mes = mes + 1
dia = 1
if mes > 12:
anio = anio + 1
mes = 1
newDiaString = '{}'.format(dia)
if len(newDiaString) == 1:
newDiaString = '0' + newDiaString
newMesString = '{}'.format(mes)
if len(newMesString) == 1:
newMesString = '0' + newMesString
fecha = '{}'.format(anio)+"-"+newMesString+"-"+newDiaString
arrayFechas.append(fecha)
#%% loop diarios
counterFecha = 0
for i in range(1,6,1):
#%% set up plot
plt.clf()
#fig = plt.figure(figsize=(48,24))
m = Basemap(projection='mill',llcrnrlat=21.3,urcrnrlat=23,llcrnrlon=-103.5,urcrnrlon=-101,resolution='h')
#%% generate lats, lons
x, y = m(lons,lats)
#%% number of cols and rows
numcols = len(x)
numrows = len(y)
#%% generate xi, yi
xi = np.linspace(x.min(), x.max(), numcols)
yi = np.linspace(y.min(), y.max(), numrows)
#%% generate meshgrid
xi, yi = np.meshgrid(xi,yi)
#%% genate zi
tempTitleColumn = "Rain{}".format(i)
z = np.array(data[tempTitleColumn])
zi = gd((x,y), z, (xi,yi), method='cubic')
#%% generate clevs
def generateClevs(minV, maxV):
arrayValues = []
step = (maxV - minV) / 10
for i in range(10):
rangeOfValue = int(step * i)
arrayValues.append(rangeOfValue)
return arrayValues
clevs = generateClevs(z.min(), z.max())
#%% contour plot
cs = m.contourf(xi,yi,zi, clevs, zorder=4, alpha=0.5, cmap='Spectral_r')
#%% draw map details
m.drawcoastlines()
#m.drawstates(linewidth=0.7)
m.drawcountries()
#%% read municipios shape file
m.readshapefile('shapes/Municipios', 'Municipios')
#m.readshapefile('shapes/Estados', 'Estados')
#m.drawmapscale(22, -103, 23, -102, 100, units='km', fontsize=14, yoffset=None, barstyle='fancy', labelstyle='simple', fillcolor1='w', fillcolor2='#000000',fontcolor='#000000', zorder=5)
#%% colorbar
cbar = m.colorbar(cs, location='right', pad="5%")
cbar.set_label('mm')
tempMapTitle = "Precipitación acumulada en 24h (mm)\nPronóstico válido para: {}".format(arrayFechas[counterFecha])
plt.title(tempMapTitle)
tempFileName = "maps/{}.png".format(arrayFechas[counterFecha])
plt.annotate('INIFAP (WRF -EMS)', xy=(-102,22), xycoords='data', xytext=(-102,21), color='g')
plt.savefig(tempFileName, dpi=300, transparent=True)
counterFecha += 1
print('****** Genereate: {}'.format(tempFileName))
#%% generate Acum
#%% set up plot
plt.clf()
#fig = plt.figure(figsize=(48,24))
m = Basemap(projection='mill',llcrnrlat=21.3,urcrnrlat=23,llcrnrlon=-103.5,urcrnrlon=-101,resolution='h')
#%% generate lats, lons
x, y = m(lons,lats)
#%% number of cols and rows
numcols = len(x)
numrows = len(y)
#%% generate xi, yi
xi = np.linspace(x.min(), x.max(), numcols)
yi = np.linspace(y.min(), y.max(), numrows)
#%% generate meshgrid
xi, yi = np.meshgrid(xi,yi)
#%% genate zi
z = np.array(data['Acum'])
zi = gd((x,y), z, (xi,yi), method='cubic')
#%% generate clevs
def generateClevs(minV, maxV):
arrayValues = []
step = (maxV - minV) / 10
for i in range(10):
rangeOfValue = int(step * i)
arrayValues.append(rangeOfValue)
return arrayValues
clevs = generateClevs(z.min(), z.max())
#%% contour plot
cs = m.contourf(xi,yi,zi, clevs, zorder=4, alpha=0.5, cmap='Spectral_r')
#%% draw map details
m.drawcoastlines()
#m.drawstates(linewidth=0.7)
m.drawcountries()
#%% read municipios shape file
m.readshapefile('shapes/Municipios', 'Municipios')
#m.readshapefile('shapes/Estados', 'Estados')
#m.drawmapscale(22, -103, 23, -102, 100, units='km', fontsize=14, yoffset=None, barstyle='fancy', labelstyle='simple', fillcolor1='w', fillcolor2='#000000',fontcolor='#000000', zorder=5)
#%% colorbar
cbar = m.colorbar(cs, location='right', pad="5%")
cbar.set_label('mm')
tempMapTitle = "Precipitación acumulada en 24h (mm)\nPronóstico válido para {} al {}".format(arrayFechas[0],arrayFechas[-1])
plt.title(tempMapTitle)
plt.annotate('INIFAP (WRF -EMS)', xy=(-102,22), xycoords='data', xytext=(-102,21), color='g')
plt.savefig("maps/acum.png", dpi=300, transparent=True)
print('****** Genereate: Acum')
``` |
{
"source": "jorgemauricio/ResearchWheatRust",
"score": 3
} |
#### File: jorgemauricio/ResearchWheatRust/algoritmoEstacionesAnalisis.py
```python
import pandas as pd
import numpy as np
import math
import matplotlib.pyplot as plt
def main():
#%% read data stations
dataStations = pd.read_csv('data/db_sonora.csv')
#%% read data indicencia
dataInc = pd.read_csv('data/incidencia_sonora.csv')
#%% generate new data base
textToDataIncidencia = 'lat,long,problem,incidencia,anio,mes,dia,ciclo,tipo\n'
#%% loop for
for row in dataInc.itertuples():
latProcesamiento = getattr(row,'lat')
longProcesamiento = getattr(row, 'long')
problemProcesamiento = getattr(row,'problem')
incidenciaProcesamiento = getattr(row,'incidencia')
cicloProcesamiento = getattr(row,'ciclo')
anio = getattr(row,'anio')
mes = getattr(row,'mes')
dia = getattr(row,'dia')
arrayFechasProcesamiento = generacionDeFechas(anio, mes, dia)
textToDataIncidencia += "{},{},{},{},{},{},{},{},N\n".format(latProcesamiento, longProcesamiento, problemProcesamiento, incidenciaProcesamiento, anio, mes, dia, cicloProcesamiento)
for i in arrayFechasProcesamiento:
tanio, tmes, tdia = i.split("-")
textToDataIncidencia += "{},{},{},0.0,{},{},{},{},G\n".format(latProcesamiento, longProcesamiento, problemProcesamiento, tanio, tmes, tdia, cicloProcesamiento)
#%% from text to data
tempTitle = 'resultados/db_incidencia.csv'
textFile = open(tempTitle, 'w')
textFile.write(textToDataIncidencia)
textFile.close()
#%% read new data incidencia
dataIncidencia = pd.read_csv('resultados/db_incidencia.csv')
#%% generate punto de rocio
dataStations['dpoint'] = dataStations.apply(lambda x: puntoDeRocio(x['humr'], x['tmed']), axis=1)
#%% generate tmidnight
dataStations['tmidnight'] = dataStations['tmax'] - dataStations['tmin']
#%% dataStations to np arrays
latInc = np.array(dataIncidencia['lat'])
longInc = np.array(dataIncidencia['long'])
problemInc = np.array(dataIncidencia['problem'])
incidenciaInc = np.array(dataIncidencia['incidencia'])
cicloInc = np.array(dataIncidencia['ciclo'])
anioInc = np.array(dataIncidencia['anio'])
mesInc = np.array(dataIncidencia['mes'])
diaInc = np.array(dataIncidencia['dia'])
tipoInc = np.array(dataIncidencia['tipo'])
indexInc = np.array(dataIncidencia.index)
#%% create text for data
textToData = "lat,long,problem,incidencia,anio,mes,dia,ciclo,prec,tmax,tmin,tmed,velvmax,velv,dirvvmax,dirv,radg,humr,et,dpoint,tmidnight,condiciones,tipo\n"
#%% loop
for i in range(len(latInc)):
anio = anioInc[i]
mes = mesInc[i]
dia = diaInc[i]
latitud = latInc[i]
longitud = longInc[i]
incidencia = incidenciaInc[i]
ciclo = cicloInc[i]
problema = problemInc[i]
tipo = tipoInc[i]
dataTemp = dataStations.loc[(dataStations['anio'] == anio) & (dataStations['mes'] == mes) & (dataStations['dia'] == dia)]
dataTemp['distancia'] = dataTemp.apply(lambda x: distanciaPuntoAPunto(x['latitud'], latitud, x['longitud'], longitud), axis=1)
distanciaMinima = dataTemp['distancia'].min()
dataTemp = dataTemp.loc[dataTemp['distancia'] == distanciaMinima]
prec = np.array(dataTemp['prec'])
tmax = np.array(dataTemp['tmax'])
tmin = np.array(dataTemp['tmin'])
tmed = np.array(dataTemp['tmed'])
velvmax = np.array(dataTemp['velvmax'])
velv = np.array(dataTemp['velv'])
dirvvmax = np.array(dataTemp['dirvvmax'])
dirv = np.array(dataTemp['dirv'])
radg = np.array(dataTemp['radg'])
humr = np.array(dataTemp['humr'])
et = np.array(dataTemp['et'])
dpoint = np.array(dataTemp['dpoint'])
tmidnight = np.array(dataTemp['tmidnight'])
condicion = validarCondicion(tmed, tmidnight, dpoint)
textToData += "{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{}\n".format(latitud, longitud, problema, incidencia, anio, mes, dia, ciclo, prec[0], tmax[0], tmin[0], tmed[0], velvmax[0], velv[0], dirvvmax[0], dirv[0],radg[0],humr[0],et[0], dpoint[0], tmidnight[0], condicion, tipo)
#%% from text to data
tempTitle = 'resultados/db_join_estaciones_10_25.csv'
textFile = open(tempTitle, 'w')
textFile.write(textToData)
textFile.close()
#%% read data
data = pd.read_csv('resultados/db_join_estaciones_10_25.csv')
#%% generar indice Presencia
data['indicePresencia'] = data['condiciones'].shift(-1) + data['condiciones'].shift(-2) + data['condiciones'].shift(-3) + data['condiciones'].shift(-4)
#%% eliminar tados generados
data = data.loc[data['tipo']== 'N']
#%% generar porcentaje de presencia
data['porcentajePresencia'] = data['indicePresencia'] * 0.25
#%% guardar info
data.to_csv('resultados/db_join_estaciones_10_25_pp.csv', index=False)
#%% Longitud del punto
def distanciaPuntoAPunto(lat1, lat2, long1, long2):
"""
Calcula la distancia entre el punto de incidencia y el punto de la estacion
param: lat1: latitud del punto de incidencia
param: lat2: latitud de la estacion
param: long1: longitud del punto de incidencia
param: long2: longitud de la estacion
"""
dX = (lat2 - lat1) ** 2
dY = (long2 - long1) ** 2
return math.sqrt(dX + dY)
#%% generate punto de rocio
def puntoDeRocio(hr, t):
"""
Calcula el punto de rocio
param: hr: humedad relativa
param: t: temperatura ambiente
"""
pr = (hr / 100.0)**(1/8.0) * (112 + 0.9 * t) + (0.1 * t) - 112
return pr
def generacionDeFechas(anio, mes, dia):
"""
Genera el arreglo de 4 días previos a la fecha de incidencia
param: anio : año de la incidencia
param: mes : mes de la incidencia
param: dia: dia de la incidencia
"""
arrayFechas = []
for i in range(0,4,1):
dia -= 1
if dia < 1:
mes = mes - 1
if mes == 2 and anio % 4 == 0:
diasEnMes = 29
elif mes == 2 and anio % 4 != 0:
diasEnMes = 28
elif mes == 1 or mes == 3 or mes == 5 or mes == 7 or mes == 8 or mes == 10 or mes == 12:
diasEnMes = 31
elif mes == 4 or mes == 6 or mes == 9 or mes == 11:
diasEnMes = 30
dia = diasEnMes
if mes < 1:
mes = 12
fecha = '{}-{}-{}'.format(anio, mes, dia)
arrayFechas.append(fecha)
return arrayFechas
def validarCondicion(tempPro, tempMid, dwpoint):
"""
Calcular si existen condiciones para la presencia de roya
param: tempPro: temperatura promedio
param: tempMid: temperatura nocturna
param: dwpoint: punto de rocio
"""
if (tempPro >= 10 and tempPro <= 25 ) and (tempMid >= 15 and tempMid <= 20) and dwpoint >= 5:
return 1
else:
return 0
if __name__ == "__main__":
main()
``` |
{
"source": "jorgemb/lotimg",
"score": 4
} |
#### File: jorgemb/lotimg/files.py
```python
import os
import os.path
import tkinter
from tkinter import filedialog
from tkinter.filedialog import askdirectory
def get_image_file_names(parent=None, initial_dir=""):
"""
Asks the user to specify image files to convert.
:param parent: TKinter interface
:param initial_dir: Initial directory to be shown to the user.
:return: List of files picked by the user
"""
root = parent
if parent is None:
root = tkinter.Tk()
root.withdraw()
if initial_dir == "":
initial_dir = os.path.expanduser("~")
opts = {"parent": root,
"filetypes": [("Imagenes", "*.jpg;*.bmp;*.gif;*.png;*.tiff;*.tga")],
"initialdir": initial_dir}
all_files = filedialog.askopenfilenames(**opts)
if len(all_files) == 0:
return []
# No longer necessary to split file names
# file_names = split_file_names(all_files)
file_names = list(all_files)
if parent is None:
root.destroy()
del root
return file_names
def get_names_in_path(path_list):
"""
Returns a list with the name of all the archives in a path.
:param path_list: A path list, as returned by getFileNames
:return: A name list
"""
names = list()
for path in path_list:
name = os.path.split(path)[1]
names.append(name)
return names
def split_file_names(names):
"""
Splits file names from the response given by the selection dialog.
:param names: String with names separated with {}
:return: List with paths as string
"""
first = 0
counter = 0
names_list = list()
for letter in names:
if letter == "{":
first = counter
elif letter == "}":
names_list.append(names[first + 1:counter])
counter += 1
return names_list
def get_save_directory(parent=None, initial_dir=""):
"""
Asks the user to choose a directory where the converted images will be saved.
:param parent: Tkinter parent
:param initial_dir: Initial directory to show to the user.
:return: String with the chosen directory.
"""
root = parent
if parent is None:
root = tkinter.Tk()
root.withdraw()
if initial_dir == "":
initial_dir = os.path.expanduser("~")
opts = {"parent": root,
"initialdir": initial_dir}
directory = askdirectory(**opts)
if parent is None:
root.destroy()
del root
return directory
def change_names_with_rule(names, rule):
"""
Changes the names list using the given rule, which is executed as Python code.
:param names: List of original names
:param rule: Rule to apply to each name
:return: List with changed names
"""
changed = 0
if rule is None:
return [], 0
# Compile rule
if isinstance(rule, str):
try:
rule = compile(rule, "<string>", "exec")
except:
print("changeNames - Bad rule")
return [], changed
# Apply rule to each name
modified_names = names[:]
for n in range(len(modified_names)):
try:
local_variables = {"name": modified_names[n], "n": n}
# TODO: Define allowed builtins
exec(rule, {"__builtins__": {'str': str, 'int': int}}, local_variables)
# TODO: Add verifications in order to see if there's a name clash or blank names.
modified_names[n] = local_variables["name"]
changed += 1
except Exception as e:
# Any exception is logged
# TODO: Log exception
pass
return modified_names, changed
def make_naming_rule(name, digits=4):
"""
Compiles a new naming rule in the form: Name####
:param name: Name to use
:param digits: Amount of digits to use
:return: Compiled rule
"""
rule = "name = '%s' + str(n+1).zfill(%d)\n" % (str(name), digits)
return compile_rule(rule)
def compile_rule(rule):
"""
Compiles a rule in string format. Basically checks that is valid python format.
:param rule:
:return:
"""
if len(rule) == 0:
return None
try:
c_rule = compile(rule, "<string>", "exec")
return c_rule
except:
return None
``` |
{
"source": "JorgeMemorandum/PythonTest",
"score": 3
} |
#### File: PythonTest/Python_chatting_application/server.py
```python
import socket
import threading
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((socket.gethostname(), 1023))
print(socket.gethostname())
s.listen(5)
clients = []
nickename = []
def Client_Handler(cli):
while True:
try:
reply = cli.recv(1024).decode("utf-8")
if reply == "QUIT":
index_of_cli = clients.index(cli)
nick = nickename[index_of_cli]
nickename.remove(nick)
clients.remove(cli)
BroadCasating(f"{nick} has left the chat room")
print(f"Disconnected with f{nick}")
break
BroadCasating(reply)
except:
index_of_cli = clients.index(cli)
print(index_of_cli)
nick = nickename[index_of_cli]
nickename.remove(nick)
clients.remove(cli)
BroadCasating(f"{nick} has left the chat room")
print(f"Disconnected with {nick}")
break
def BroadCasating(msg):
for client in clients:
client.send(bytes(msg, "utf-8"))
def recieve():
while True:
client_sckt, addr = s.accept()
print(f"Connection has been established {addr}")
client_sckt.send(bytes("NICK", "utf-8"))
nick = client_sckt.recv(1024).decode("utf-8")
nickename.append(nick)
clients.append(client_sckt)
print(f"{nick} has joined the chat room ")
BroadCasating(f"{nick} has joined the chat room say hi !!!")
threading._start_new_thread(Client_Handler , (client_sckt , ))
recieve()
s.close()
``` |
{
"source": "jorgemf/kaggle_redefining_cancer_treatment",
"score": 2
} |
#### File: src/d2v/doc2vec_eval_doc_prediction.py
```python
import sys
import numpy as np
import tensorflow as tf
from tensorflow.python.training import training_util
from .. import evaluator, metrics
from ..configuration import *
from .doc2vec_train_doc_prediction import doc2vec_prediction_model
from .doc2vec_train_doc_prediction import DocPredictionDataset
class DocPredictionEval(evaluator.Evaluator):
def __init__(self, dataset, log_dir=DIR_D2V_DOC_LOGDIR):
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
super(DocPredictionEval, self).__init__(checkpoints_dir=log_dir,
output_path=os.path.join(log_dir,
dataset.type),
dataset=dataset,
singular_monitored_session_config=config,
infinite_loop=True)
self.best_loss = -1
def model(self, input_vectors, input_gene, input_variation, output_label, batch_size,
embedding_size=EMBEDDINGS_SIZE,
output_classes=9):
logits, targets = doc2vec_prediction_model(input_vectors, input_gene, input_variation,
output_label, batch_size,
is_training=False, embedding_size=embedding_size,
output_classes=output_classes)
loss = tf.nn.softmax_cross_entropy_with_logits(labels=targets, logits=logits)
self.global_step = training_util.get_or_create_global_step()
global_step_increase = tf.assign_add(self.global_step, 1)
self.accumulated_loss = tf.Variable(0.0, dtype=tf.float32, name='accumulated_loss',
trainable=False)
self.accumulated_loss = tf.assign_add(self.accumulated_loss, tf.reduce_sum(loss))
self.prediction = tf.nn.softmax(logits)
self.metrics = metrics.single_label(self.prediction, targets, moving_average=False)
steps = tf.cast(global_step_increase, dtype=tf.float32)
tf.summary.scalar('loss', self.accumulated_loss / (steps * batch_size))
return None
def create_graph(self, dataset_tensor, batch_size):
input_vectors, input_gene, input_variation, output_label = dataset_tensor
self.batch_size = batch_size
return self.model(input_vectors, input_gene, input_variation, output_label, batch_size)
def step(self, session, graph_data, summary_op):
self.num_steps, self.final_metrics, self.final_loss, summary = \
session.run([self.global_step, self.metrics, self.accumulated_loss, summary_op])
return summary
def after_create_session(self, session, coord):
super(DocPredictionEval, self).after_create_session(session, coord)
def end(self, session):
super(DocPredictionEval, self).end(session)
cm = self.final_metrics['confusion_matrix']
data_size = self.num_steps * self.batch_size
loss = self.final_loss / data_size
print('Loss: {}'.format(loss))
print('Confusion matrix:')
for r in cm:
print('\t'.join([str(x) for x in r]))
if self.best_loss < 0 or loss < self.best_loss:
self.best_loss = loss
self.copy_checkpoint_as_best()
class DocPredictionInference(evaluator.Evaluator):
def __init__(self, dataset, log_dir=DIR_D2V_DOC_LOGDIR):
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
super(DocPredictionInference, self).__init__(checkpoints_dir=log_dir,
output_path=os.path.join(log_dir,
dataset.type),
dataset=dataset,
singular_monitored_session_config=config,
infinite_loop=False)
def model(self, input_vectors, input_gene, input_variation, batch_size,
embedding_size=EMBEDDINGS_SIZE, output_classes=9):
self.global_step = training_util.get_or_create_global_step()
logits, _ = doc2vec_prediction_model(input_vectors, input_gene, input_variation,
None, batch_size,
is_training=False, embedding_size=embedding_size,
output_classes=output_classes)
global_step_increase = tf.assign_add(self.global_step, 1)
with tf.control_dependencies([global_step_increase]):
self.prediction = tf.nn.softmax(logits)
return self.prediction
def end(self, session):
pass
def create_graph(self, dataset_tensor, batch_size):
input_vectors, input_gene, input_variation, _ = dataset_tensor
return self.model(input_vectors, input_gene, input_variation, batch_size)
def after_create_session(self, session, coord):
super(DocPredictionInference, self).after_create_session(session, coord)
print('ID,class1,class2,class3,class4,class5,class6,class7,class8,class9')
def step(self, session, graph_data, summary_op):
step, predictions = session.run([self.global_step, self.prediction])
predictions = predictions[0]
predictions = [p + 0.01 for p in predictions] # penalize less the mistakes
sum = np.sum(predictions)
predictions = [p / sum for p in predictions]
print('{},{}'.format(step, ','.join(['{:.3f}'.format(x) for x in predictions])))
return None
if __name__ == '__main__':
import logging
logging.getLogger().setLevel(logging.INFO)
if len(sys.argv) > 1 and sys.argv[1] == 'val':
# get validation error
evaluator = DocPredictionEval(dataset=DocPredictionDataset(type='val'),
log_dir=os.path.join(DIR_D2V_DOC_LOGDIR))
evaluator.run()
elif len(sys.argv) > 1 and sys.argv[1] == 'test':
# get validation error
evaluator = DocPredictionInference(dataset=DocPredictionDataset(type='stage2_test'),
log_dir=os.path.join(DIR_D2V_DOC_LOGDIR, 'val'))
evaluator.run()
elif len(sys.argv) > 1 and sys.argv[1] == 'train':
# get validation error
evaluator = DocPredictionEval(dataset=DocPredictionDataset(type='train'),
log_dir=os.path.join(DIR_D2V_DOC_LOGDIR))
evaluator.run()
```
#### File: src/d2v/doc2vec_train_eval_word_embeds.py
```python
import tensorflow as tf
import csv
import time
from datetime import timedelta
import shutil
import sys
from tensorflow.contrib import layers
from .. import trainer
from .doc2vec_train_word_embeds import Doc2VecDataset
from ..rnn.text_classification_train import _load_embeddings
from tensorflow.python.training import training_util
from ..configuration import *
class Doc2VecTrainerEval(trainer.Trainer):
"""
Helper class to run the training and create the model for the training. See trainer.Trainer for
more details.
"""
def __init__(self, dataset, log_dir=DIR_D2V_EVAL_LOGDIR):
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
super(Doc2VecTrainerEval, self).__init__(log_dir, dataset=dataset,
monitored_training_session_config=config,
log_step_count_steps=1000,
save_summaries_steps=1000)
def model(self,
input_doc, input_words, output_label, batch_size,
vocabulary_size=VOCABULARY_SIZE,
embedding_size=EMBEDDINGS_SIZE,
context_size=D2V_CONTEXT_SIZE,
num_negative_samples=D2V_NEGATIVE_NUM_SAMPLES,
learning_rate_initial=D2V_LEARNING_RATE_INITIAL,
learning_rate_decay=D2V_LEARNING_RATE_DECAY,
learning_rate_decay_steps=D2V_LEARNING_RATE_DECAY_STEPS):
self.global_step = training_util.get_or_create_global_step()
# inputs/outputs
input_doc = tf.reshape(input_doc, [batch_size])
input_words = tf.reshape(input_words, [batch_size, context_size])
output_label = tf.reshape(output_label, [batch_size, 1])
# embeddings
word_embeddings = _load_embeddings(vocabulary_size, embedding_size,
filename_prefix='word_embeddings',
from_dir=DIR_DATA_DOC2VEC)
self.word_embeddings = tf.constant(value=word_embeddings,
shape=[vocabulary_size, embedding_size],
dtype=tf.float32, name='word_embeddings')
self.doc_embeddings = tf.get_variable(shape=[self.dataset.num_docs, embedding_size],
initializer=layers.xavier_initializer(),
dtype=tf.float32, name='doc_embeddings')
words_embed = tf.nn.embedding_lookup(self.word_embeddings, input_words)
doc_embed = tf.nn.embedding_lookup(self.word_embeddings, input_doc)
# average the words_embeds
words_embed_average = tf.reduce_mean(words_embed, axis=1)
embed = tf.concat([words_embed_average, doc_embed], axis=1)
# NCE loss
nce_weights = tf.get_variable(shape=[vocabulary_size, embedding_size * 2],
initializer=layers.xavier_initializer(),
dtype=tf.float32, name='nce_weights')
nce_biases = tf.get_variable(shape=[vocabulary_size],
initializer=layers.xavier_initializer(),
dtype=tf.float32, name='nce_biases')
nce_loss = tf.nn.nce_loss(weights=nce_weights, biases=nce_biases,
labels=output_label,
inputs=embed, num_sampled=num_negative_samples,
num_classes=vocabulary_size)
self.loss = tf.reduce_mean(nce_loss)
tf.summary.scalar('loss', self.loss)
# learning rate & optimizer
self.learning_rate = tf.train.exponential_decay(learning_rate_initial, self.global_step,
learning_rate_decay_steps,
learning_rate_decay,
staircase=True, name='learning_rate')
tf.summary.scalar('learning_rate', self.learning_rate)
sgd = tf.train.GradientDescentOptimizer(self.learning_rate)
self.optimizer = sgd.minimize(self.loss, global_step=self.global_step)
return None
def create_graph(self, dataset_tensor, batch_size):
input_doc, input_word, output_label = dataset_tensor
return self.model(input_doc, input_word, output_label, batch_size)
def step(self, session, graph_data):
if self.is_chief:
lr, _, loss, step, self.embeddings_docs = \
session.run([self.learning_rate, self.optimizer, self.loss, self.global_step,
self.doc_embeddings])
if time.time() > self.print_timestamp + 5 * 60:
self.print_timestamp = time.time()
elapsed_time = str(timedelta(seconds=time.time() - self.init_time))
m = 'step: {} loss: {:0.4f} learning_rate = {:0.6f} elapsed seconds: {}'
print(m.format(step, loss, lr, elapsed_time))
current_time = time.time()
embeddings_file = 'doc_eval_embeddings_{}_{}_{}'.format(self.dataset.type,
VOCABULARY_SIZE,
EMBEDDINGS_SIZE)
embeddings_filepath = os.path.join(DIR_DATA_DOC2VEC, embeddings_file)
if not os.path.exists(embeddings_filepath):
self.save_embeddings(self.embeddings_docs)
else:
embeddings_file_timestamp = os.path.getmtime(embeddings_filepath)
# save the embeddings every 30 minutes
if current_time - embeddings_file_timestamp > 30 * 60:
self.save_embeddings(self.embeddings_docs)
else:
session.run([self.optimizer])
def after_create_session(self, session, coord):
self.init_time = time.time()
self.print_timestamp = time.time()
def end(self, session):
if self.is_chief:
self.save_embeddings(self.embeddings_docs)
def save_embeddings(self, doc_embeddings):
print('Saving embeddings in text format...')
embeddings_file = 'doc_eval_embeddings_{}_{}_{}'.format(self.dataset.type,
VOCABULARY_SIZE, EMBEDDINGS_SIZE)
embeddings_filepath = os.path.join(DIR_DATA_DOC2VEC, embeddings_file)
with open(embeddings_filepath, 'w') as f:
writer = csv.writer(f, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
writer.writerows(doc_embeddings)
# copy the embeddings file to the log dir so we can download it from tensorport
if os.path.exists(embeddings_filepath):
shutil.copy(embeddings_filepath, self.log_dir)
if __name__ == '__main__':
import logging
logging.getLogger().setLevel(logging.INFO)
if len(sys.argv) > 1 and sys.argv[1] == 'train_val':
# start the training for eval
trainer = Doc2VecTrainerEval(dataset=Doc2VecDataset(type='val'),
log_dir=os.path.join(DIR_D2V_EVAL_LOGDIR, 'val'))
trainer.run(epochs=D2V_EPOCHS, batch_size=D2V_BATCH_SIZE)
elif len(sys.argv) > 1 and sys.argv[1] == 'train_test':
# start the training for second stage dataset
trainer = Doc2VecTrainerEval(dataset=Doc2VecDataset(type='stage2_test'),
log_dir=os.path.join(DIR_D2V_EVAL_LOGDIR, 'test'))
trainer.run(epochs=D2V_EPOCHS, batch_size=D2V_BATCH_SIZE)
```
#### File: src/rnn/text_classification_dataset.py
```python
import tensorflow as tf
import numpy as np
from itertools import groupby
from ..configuration import *
from ..tf_dataset import TFDataSet
def _padding(arr, pad, token=-1):
len_arr = len(arr)
if len_arr > pad:
return arr[:pad]
elif len_arr < pad:
arr.extend([token] * (pad - len_arr))
return arr
else:
return arr
class TextClassificationDataset(TFDataSet):
"""
Helper class for the dataset. See dataset_filelines.DatasetFilelines for more details.
"""
def __init__(self, type='train', sentence_split=False):
"""
:param str type: type of set, either 'train' or 'test'
:param bool sentence_split: whether to split the doc in sentences or use only words
"""
data_files = os.path.join(DIR_DATA_TEXT_CLASSIFICATION, '{}_set'.format(type))
if type == 'train' or type == 'val':
if sentence_split:
padded_shape = ([None, MAX_WORDS_IN_SENTENCE], [1])
else:
padded_shape = ([None], [1])
padded_values = (-1, -1)
elif type == 'test' or type == 'stage2_test':
if sentence_split:
padded_shape = [None, MAX_WORDS_IN_SENTENCE]
else:
padded_shape = [None]
padded_values = -1
else:
raise ValueError(
'Type can only be train, val, test or stage2_test but it is {}'.format(type))
self.type = type
self.sentence_split = None
if sentence_split:
dict_filename = 'word2vec_dataset_{}_dict'.format(VOCABULARY_SIZE)
dict_filepath = os.path.join(DIR_DATA_WORD2VEC, dict_filename)
with tf.gfile.FastGFile(dict_filepath, 'r') as f:
for line in f:
data = line.split()
symbol = data[0]
if symbol == '.':
self.sentence_split = int(data[1])
break
super(TextClassificationDataset, self).__init__(name=type,
data_files_pattern=data_files,
min_queue_examples=100,
shuffle_size=10000)
# TODO TF <= 1.2.0 have an issue with padding with more than one dimension
# padded_shapes=padded_shape,
# padded_values=padded_values)
def _map(self, example_serialized):
variant_padding = 20
sentence_padding = [-1] * MAX_WORDS_IN_SENTENCE
def _parse_sequence(example_serialized, dataset_type):
example_serialized = example_serialized.split('||')
example_class = example_serialized[0].strip()
example_gene = np.int32(example_serialized[1].strip())
example_variant = list([np.int32(w) for w in example_serialized[2].strip().split()])
sequence = list([np.int32(w) for w in example_serialized[3].strip().split()])
example_variant = _padding(example_variant, variant_padding)
if self.sentence_split is not None:
groups = groupby(sequence, lambda x: x == self.sentence_split)
sequence = list([list(g) for k, g in groups if not k])
for i, sentence in enumerate(sequence):
sentence = _padding(sentence, MAX_WORDS_IN_SENTENCE)
sequence[i] = np.asarray(sentence, dtype=np.int32)
sequence_begin = _padding(sequence, MAX_SENTENCES, token=sentence_padding)
sequence_end = _padding(list(reversed(sequence)), MAX_SENTENCES, token=sentence_padding)
else:
sequence_begin = _padding(sequence, MAX_WORDS)
sequence_end = _padding(list(reversed(sequence)), MAX_WORDS, token=sentence_padding)
if dataset_type == 'train' or dataset_type == 'val':
# first class is 1, last one is 9
data_sample_class = int(example_class) - 1
return [
np.asarray(sequence_begin, dtype=np.int32),
np.asarray(sequence_end, dtype=np.int32),
np.int32(example_gene),
np.asarray(example_variant, dtype=np.int32),
np.int32(data_sample_class),
]
elif dataset_type == 'test' or dataset_type == 'stage2_test':
return [
np.asarray(sequence_begin, dtype=np.int32),
np.asarray(sequence_end, dtype=np.int32),
np.int32(example_gene),
np.asarray(example_variant, dtype=np.int32),
]
else:
raise ValueError()
if self.type == 'train' or self.type == 'val':
sequence_begin, sequence_end, gene, variant, result_class = \
tf.py_func(lambda x: _parse_sequence(x, self.type), [example_serialized],
[tf.int32, tf.int32, tf.int32, tf.int32, tf.int32], stateful=True)
elif self.type == 'test' or self.type == 'stage2_test':
sequence_begin, sequence_end, gene, variant = \
tf.py_func(lambda x: _parse_sequence(x, self.type), [example_serialized],
[tf.int32, tf.int32, tf.int32, tf.int32], stateful=True)
result_class = None
else:
raise ValueError()
# TODO for TF <= 1.2.0 set shape because of padding
if self.sentence_split is not None:
sequence_begin = tf.reshape(sequence_begin, [MAX_SENTENCES, MAX_WORDS_IN_SENTENCE])
else:
sequence_end = tf.reshape(sequence_end, [MAX_WORDS])
gene = tf.reshape(gene, [1])
variant = tf.reshape(variant, [variant_padding])
if result_class is not None:
result_class = tf.reshape(result_class, [1])
return sequence_begin, sequence_end, gene, variant, result_class
else:
return sequence_begin, sequence_end, gene, variant
```
#### File: src/rnn/text_classification_train.py
```python
import tensorflow as tf
import csv
import time
from datetime import timedelta
import sys
import numpy as np
from tensorflow.python.training import training_util
from tensorflow.contrib import slim
from tensorflow.python.ops import variables as tf_variables
from ..configuration import *
from .. import trainer, evaluator, metrics
from ..task_spec import get_task_spec
from .text_classification_dataset import TextClassificationDataset
def _load_embeddings(vocabulary_size, embeddings_size,
filename_prefix='embeddings', from_dir=DIR_DATA_WORD2VEC):
embeddings = []
embeddings_file = '{}_{}_{}'.format(filename_prefix, vocabulary_size, embeddings_size)
with open(os.path.join(from_dir, embeddings_file), 'r') as file:
reader = csv.reader(file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
for row in reader:
embeddings.append([float(r) for r in row])
return embeddings
class TextClassificationTrainer(trainer.Trainer):
"""
Helper class to run the training and create the model for the training. See trainer.Trainer for
more details.
"""
def __init__(self, dataset, text_classification_model, log_dir=DIR_TC_LOGDIR,
use_end_sequence=False, task_spec=None, max_steps=None):
self.text_classification_model = text_classification_model
self.use_end_sequence = use_end_sequence
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
super(TextClassificationTrainer, self).__init__(log_dir=log_dir, dataset=dataset,
task_spec=task_spec, max_steps=max_steps,
monitored_training_session_config=config)
def model(self, input_text_begin, input_text_end, gene, variation, expected_labels, batch_size,
vocabulary_size=VOCABULARY_SIZE, embeddings_size=EMBEDDINGS_SIZE, output_classes=9):
# embeddings
embeddings = _load_embeddings(vocabulary_size, embeddings_size)
# global step
self.global_step = training_util.get_or_create_global_step()
# model
with slim.arg_scope(self.text_classification_model.model_arg_scope()):
outputs = self.text_classification_model.model(input_text_begin, input_text_end,
gene, variation, output_classes,
embeddings=embeddings,
batch_size=batch_size)
# loss
targets = self.text_classification_model.targets(expected_labels, output_classes)
self.loss = self.text_classification_model.loss(targets, outputs)
tf.summary.scalar('loss', self.loss)
# learning rate
self.optimizer, self.learning_rate = \
self.text_classification_model.optimize(self.loss, self.global_step)
if self.learning_rate is not None:
tf.summary.scalar('learning_rate', self.learning_rate)
# metrics
self.metrics = metrics.single_label(outputs['prediction'], targets)
# saver to save the model
self.saver = tf.train.Saver()
# check a nan value in the loss
self.loss = tf.check_numerics(self.loss, 'loss is nan')
return None
def create_graph(self, dataset_tensor, batch_size):
input_text_begin, input_text_end, gene, variation, expected_labels = dataset_tensor
if not self.use_end_sequence:
input_text_end = None
return self.model(input_text_begin, input_text_end, gene, variation, expected_labels, batch_size)
def step(self, session, graph_data):
lr, _, loss, step, metrics = \
session.run([self.learning_rate, self.optimizer, self.loss, self.global_step,
self.metrics])
if self.is_chief and time.time() > self.print_timestamp + 5 * 60:
self.print_timestamp = time.time()
elapsed_time = str(timedelta(seconds=time.time() - self.init_time))
m = 'step: {} loss: {:0.4f} learning_rate = {:0.6f} elapsed seconds: {} ' \
'precision: {} recall: {} accuracy: {}'
logging.info(m.format(step, loss, lr, elapsed_time,
metrics['precision'], metrics['recall'], metrics['accuracy']))
def after_create_session(self, session, coord):
self.init_time = time.time()
self.print_timestamp = time.time()
class TextClassificationTest(evaluator.Evaluator):
"""Evaluator for distributed training"""
def __init__(self, dataset, text_classification_model, output_path, log_dir=DIR_TC_LOGDIR,
use_end_sequence=False,max_steps=None):
self.use_end_sequence = use_end_sequence
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
super(TextClassificationTest, self).__init__(checkpoints_dir=log_dir, dataset=dataset,
output_path=output_path, max_steps=max_steps,
singular_monitored_session_config=config)
self.text_classification_model = text_classification_model
self.eval_writer = tf.summary.FileWriter(log_dir)
def model(self, input_text_begin, input_text_end, gene, variation, expected_labels, batch_size,
vocabulary_size=VOCABULARY_SIZE, embeddings_size=EMBEDDINGS_SIZE, output_classes=9):
# embeddings
embeddings = _load_embeddings(vocabulary_size, embeddings_size)
# model
with slim.arg_scope(self.text_classification_model.model_arg_scope()):
outputs = self.text_classification_model.model(input_text_begin, input_text_end,
gene, variation, output_classes,
embeddings=embeddings,
batch_size=batch_size,
training=False)
# loss
targets = self.text_classification_model.targets(expected_labels, output_classes)
loss = self.text_classification_model.loss(targets, outputs)
self.accumulated_loss = tf.Variable(0.0, dtype=tf.float32, name='accumulated_loss',
trainable=False)
self.accumulated_loss = tf.assign_add(self.accumulated_loss, loss)
step = tf.Variable(0, dtype=tf.int32, name='eval_step', trainable=False)
step_increase = tf.assign_add(step, 1)
self.loss = self.accumulated_loss / tf.cast(step_increase, dtype=tf.float32)
tf.summary.scalar('loss', self.loss)
# metrics
self.metrics = metrics.single_label(outputs['prediction'], targets, moving_average=False)
return None
def create_graph(self, dataset_tensor, batch_size):
input_text_begin, input_text_end, gene, variation, expected_labels = dataset_tensor
if not self.use_end_sequence:
input_text_end = None
graph_data = self.model(input_text_begin, input_text_end, gene, variation,
expected_labels, batch_size)
return graph_data
def step(self, session, graph_data, summary_op):
summary, self.loss_result, self.metrics_results = \
session.run([summary_op, self.loss, self.metrics])
return summary
def end(self, session):
super(TextClassificationTest, self).end(session)
chk_step = int(self.lastest_checkpoint.split('-')[-1])
m = 'step: {} loss: {:0.4f} precision: {} recall: {} accuracy: {}'
logging.info(m.format(chk_step, self.loss_result, self.metrics_results['precision'],
self.metrics_results['recall'], self.metrics_results['accuracy']))
def after_create_session(self, session, coord):
# checkpoints_file = os.path.join(self.checkpoints_dir, 'checkpoint')
# alt_checkpoints_dir = '{}_tp'.format(self.checkpoints_dir)
# import glob
# files = glob.glob('{}/model.ckpt-*.data-*'.format(alt_checkpoints_dir))
# chk_step = 0
# for f in files:
# num = f.split('model.ckpt-')[1].split('.')[0]
# num = int(num)
# if chk_step == 0 or num < chk_step:
# chk_step = num
# if chk_step != 0:
# ckpt_files = glob.glob('{}/model.ckpt-{}.data-*'.format(alt_checkpoints_dir, chk_step))
# ckpt_files = [x.split('/')[-1] for x in ckpt_files]
# for f in ckpt_files + ['model.ckpt-{}.index', 'model.ckpt-{}.meta']:
# f = f.format(chk_step)
# os.rename(os.path.join(alt_checkpoints_dir, f), os.path.join(self.checkpoints_dir, f))
# with open(checkpoints_file, 'wb') as f:
# f.write('model_checkpoint_path: "./model.ckpt-{}"\n'.format(chk_step))
# f.write('all_model_checkpoint_paths: "./model.ckpt-{}"\n'.format(chk_step))
super(TextClassificationTest, self).after_create_session(session, coord)
# with open(checkpoints_file, 'wb') as f:
# f.write('model_checkpoint_path: "./model.ckpt-"\n')
# f.write('all_model_checkpoint_paths: "./model.ckpt-"\n')
class TextClassificationEval(evaluator.Evaluator):
"""Evaluator for text classification"""
def __init__(self, dataset, text_classification_model, output_path, log_dir=DIR_TC_LOGDIR,
use_end_sequence=False):
self.use_end_sequence = use_end_sequence
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
super(TextClassificationEval, self).__init__(checkpoints_dir=log_dir,
output_path=output_path,
infinite_loop=False,
singular_monitored_session_config=config)
self.dataset = dataset
self.text_classification_model = text_classification_model
def model(self, input_text_begin, input_text_end, gene, variation, batch_size,
vocabulary_size=VOCABULARY_SIZE, embeddings_size=EMBEDDINGS_SIZE, output_classes=9):
# embeddings
embeddings = _load_embeddings(vocabulary_size, embeddings_size)
# global step
self.global_step = training_util.get_or_create_global_step()
self.global_step = tf.assign_add(self.global_step, 1)
# model
with tf.control_dependencies([self.global_step]):
with slim.arg_scope(self.text_classification_model.model_arg_scope()):
self.outputs = self.text_classification_model.model(input_text_begin, input_text_end,
gene, variation, output_classes,
embeddings=embeddings,
batch_size=batch_size,
training=False)
# restore only the trainable variables
self.saver = tf.train.Saver(var_list=tf_variables.trainable_variables())
return self.outputs
def create_graph(self, dataset_tensor, batch_size):
input_text_begin, input_text_end, gene, variation = dataset_tensor
if not self.use_end_sequence:
input_text_end = None
return self.model(input_text_begin, input_text_end, gene, variation, batch_size)
def after_create_session(self, session, coord):
super(TextClassificationEval, self).after_create_session(session, coord)
print('ID,class1,class2,class3,class4,class5,class6,class7,class8,class9')
def step(self, session, graph_data, summary_op):
step, predictions = session.run([self.global_step, self.outputs['prediction']])
predictions = predictions[0]
predictions = [p + 0.01 for p in predictions] # penalize less the mistakes
sum = np.sum(predictions)
predictions = [p / sum for p in predictions]
print('{},{}'.format(step, ','.join(['{:.3f}'.format(x) for x in predictions])))
return None
import logging
def main(model, name, sentence_split=False, end_sequence=USE_END_SEQUENCE, batch_size=TC_BATCH_SIZE):
"""
Main method to execute the text_classification models
:param ModelSimple model: object model based on ModelSimple
:param str name: name of the model
:param bool sentence_split: whether to split the dataset in sentneces or not,
only used for hatt model
:param bool end_sequence: whether to use or not the end of the sequences in the models
:param int batch_size: batch size of the models
"""
logging.getLogger().setLevel(logging.INFO)
log_dir = '{}_{}'.format(DIR_TC_LOGDIR, name)
if len(sys.argv) > 1 and sys.argv[1] == 'test':
# execute the test with the train dataset
dataset = TextClassificationDataset(type='train', sentence_split=sentence_split)
tester = TextClassificationTest(dataset=dataset, text_classification_model=model,
log_dir=log_dir,
output_path=os.path.join(log_dir, 'test_trainset'),
use_end_sequence=end_sequence)
tester.run()
elif len(sys.argv) > 1 and sys.argv[1] == 'validate':
dataset = TextClassificationDataset(type='val', sentence_split=sentence_split)
tester = TextClassificationTest(dataset=dataset, text_classification_model=model,
log_dir=log_dir,
output_path=os.path.join(log_dir, 'validate'),
use_end_sequence=end_sequence)
tester.run()
elif len(sys.argv) > 1 and sys.argv[1] == 'eval':
# evaluate the data of the test dataset. We submit this output to kaggle
dataset = TextClassificationDataset(type='test', sentence_split=sentence_split)
evaluator = TextClassificationEval(dataset=dataset, text_classification_model=model,
log_dir=log_dir,
output_path=os.path.join(log_dir, 'test'),
use_end_sequence=end_sequence)
evaluator.run()
elif len(sys.argv) > 1 and sys.argv[1] == 'eval_stage2':
# evaluate the data of the test dataset. We submit this output to kaggle
dataset = TextClassificationDataset(type='stage2_test', sentence_split=sentence_split)
evaluator = TextClassificationEval(dataset=dataset, text_classification_model=model,
log_dir=log_dir,
output_path=os.path.join(log_dir, 'test_stage2'),
use_end_sequence=end_sequence)
evaluator.run()
else:
# training
task_spec = get_task_spec(with_evaluator=USE_LAST_WORKER_FOR_VALIDATION)
if task_spec.join_if_ps():
# join if it is a parameters server and do nothing else
return
with(tf.gfile.Open(os.path.join(DIR_DATA_TEXT_CLASSIFICATION, 'train_set'))) as f:
max_steps = int(TC_EPOCHS * len(f.readlines()) / batch_size)
if task_spec.is_evaluator():
dataset = TextClassificationDataset(type='val', sentence_split=sentence_split)
# evaluator running in the last worker
tester = TextClassificationTest(dataset=dataset, text_classification_model=model,
log_dir=log_dir,
output_path=os.path.join(log_dir, 'val'),
use_end_sequence=end_sequence,
max_steps=max_steps)
tester.run()
else:
dataset = TextClassificationDataset(type='train', sentence_split=sentence_split)
trainer = TextClassificationTrainer(dataset=dataset, text_classification_model=model,
log_dir=log_dir, use_end_sequence=end_sequence,
task_spec=task_spec, max_steps=max_steps)
trainer.run(epochs=TC_EPOCHS, batch_size=batch_size)
```
#### File: src/w2v/word2vec_process_data.py
```python
import re
import io
from ..configuration import *
from ..preprocess_data import load_csv_wikipedia_gen, load_csv_dataset, group_count
def load_word2vec_data(filename, vocabulary_size=VOCABULARY_SIZE):
"""
Loads the word2vec data: the dictionary file with the relation of the word with its int id,
the dataset as a list of list of ids and a dictionary with the frequency of the words in
the dataset.
:param str filename: name of the file with the word2vec dataset, the dictionary file and the
frequency file are generated with the suffixes _dict and _count based on this fiename
:return (Dict[str,int], List[List[int]], Dict[int,float]: a tuple with a dictionary for the
symbols and a list of sentences where each sentence is a list of int and a dictionary with
the frequencies of the words
"""
filename = '{}_{}'.format(filename, vocabulary_size)
filename_dict = '{}_dict'.format(filename)
filename_count = '{}_count'.format(filename)
with open(os.path.join(DIR_DATA_WORD2VEC, filename_dict), 'r') as f:
symbols_dict = { }
for line in f.readlines():
data = line.split()
symbol = data[0]
encoded = int(data[1])
symbols_dict[symbol] = encoded
encoded_text = []
with open(os.path.join(DIR_DATA_WORD2VEC, filename), 'r') as f:
for line in f.readlines():
encoded_text.append([int(word) for word in line.split()])
total_count = 0
with open(os.path.join(DIR_DATA_WORD2VEC, filename_count), 'r') as f:
word_frequency_dict = { }
for line in f.readlines():
line = line.strip()
if len(line) > 0:
data = line.split(' = ')
symbol = symbols_dict[data[0].strip()]
count = int(data[1].strip())
if symbol in symbols_dict:
word_frequency_dict[symbol] += count
else:
word_frequency_dict[symbol] = count
total_count += count
for key in word_frequency_dict.keys():
word_frequency_dict[key] = float(word_frequency_dict[key]) / total_count
return symbols_dict, encoded_text, word_frequency_dict
def load_or_create_dataset_word2vec(filename, text_samples, vocabulary_size=VOCABULARY_SIZE):
"""
Loads the dataset for word2vec or creates it from the text_samples if the file doesn't exits.
Three files are generated: dictionary file, word frequency file and dataset file. The dataset
file already contains the ids instead of the words. The vocabulary is truncated to fit the
vocabulary size, the less frequent words are transformed into the unknown id (the number 0)
:param str filename: filename prefix of the dataset
:param List[List[str]] text_samples: list of list of words
:param int vocabulary_size: the final size of the vocabulary
:return (Dict[str,int], List[List[int]], Dict[int,float]: a tuple with a dictionary for the
symbols and a list of sentences where each sentence is a list of int and a dictionary with
the frequencies of the words
"""
filename_vocabulary = '{}_{}'.format(filename, vocabulary_size)
filename_dict = '{}_dict'.format(filename_vocabulary)
filename_count = '{}_count'.format(filename_vocabulary)
filename_tsv = '{}.tsv'.format(filename_vocabulary)
if not os.path.exists(os.path.join(DIR_DATA_WORD2VEC, filename_vocabulary)):
text_lines = []
for text_sample in text_samples:
sentences = re.split('\n|\s\.\s', text_sample.lower())
for sentence in sentences:
words = sentence.split()
if len(words) > 0:
words.append('.')
words = list([word.strip().lower() for word in words])
text_lines.append(words)
symbols_count = group_count(text_lines)
symbols_ordered_by_count = sorted(symbols_count.items(), key=lambda x: x[1], reverse=True)
total_symbols = len(symbols_ordered_by_count)
print('Total symbols: {}'.format(total_symbols))
print('Vocabulary size: {}'.format(vocabulary_size))
unknown_symbols = symbols_ordered_by_count[vocabulary_size - 1:]
known_symbols = symbols_ordered_by_count[:vocabulary_size - 1]
symbols_dict = { }
for symbol, _ in unknown_symbols:
symbols_dict[symbol] = 0
counter = 1
for symbol, _ in known_symbols:
symbols_dict[symbol] = counter
counter += 1
encoded_text = []
words_count = 0
for sentence in text_lines:
words_count += len(sentence)
encoded_sentence = []
for word in sentence:
encoded_sentence.append(symbols_dict[word])
if len(encoded_sentence) > 0:
encoded_text.append(encoded_sentence)
print('Total sentences: {}'.format(len(text_lines)))
print('Total words: {}'.format(words_count))
print('words/sentences: {}'.format(float(words_count) / float(len(text_lines))))
with io.open(os.path.join(DIR_DATA_WORD2VEC, filename_dict), 'w', encoding='utf8') as f:
for symbol in sorted(symbols_dict.keys()):
f.write(u'{} {}\n'.format(symbol, symbols_dict[symbol]))
with io.open(os.path.join(DIR_DATA_WORD2VEC, filename_vocabulary), 'w',
encoding='utf8') as f:
for sentence in encoded_text:
f.write(u' '.join(str(word) for word in sentence))
f.write(u'\n')
with io.open(os.path.join(DIR_DATA_WORD2VEC, filename_count), 'w', encoding='utf8') as f:
for symbol, count in symbols_ordered_by_count:
f.write(u'{} = {}\n'.format(symbol, count))
with io.open(os.path.join(DIR_DATA_WORD2VEC, filename_tsv), 'w', encoding='utf8') as f:
f.write(u'word\tcount\tid\n')
f.write(u'_UNKOWN_\t{}\t0\n'.format(len(unknown_symbols)))
pos = 1
for symbol, count in known_symbols:
f.write(u'{}\t{}\t{}\n'.format(symbol, count, pos))
pos += 1
return load_word2vec_data(filename)
if __name__ == '__main__':
import logging
logging.getLogger().setLevel(logging.INFO)
print('Generate text for Word2Vec model... (without using test data)')
train_set = load_csv_dataset('train_set_numbers_parsed')
genes_articles = load_csv_wikipedia_gen('wikipedia_mutations_parsed')
word2vec_text = [s.text for s in genes_articles] + [s.text for s in train_set]
symbols_dict, word2vec_encoded_text, word_frequency = load_or_create_dataset_word2vec(
'word2vec_dataset', word2vec_text)
``` |
{
"source": "jorgemfm27/AutoDepGraph",
"score": 2
} |
#### File: autodepgraph/tests/write_test_graphs.py
```python
import autodepgraph as adg
from autodepgraph.graph_v2 import AutoDepGraph_DAG
import autodepgraph.node as n
import networkx as nx
import os
############################################
# This file generates and saves predefined
# graphs used in the tests.
############################################
test_dir = os.path.join(adg.__path__[0], 'tests', 'test_data')
# # Write graph for test_graph
# nodeA = n.CalibrationNode('A')
# nodeB = n.CalibrationNode('B')
# nodeE = n.CalibrationNode('E')
# nodeF = n.CalibrationNode('F')
# a = g.Graph('new_graph')
# a.add_node(nodeA)
# a.add_node(nodeB)
# a.add_node(nodeE)
# a.add_node(nodeF)
# nodeA.state('good')
# nodeB.state('bad')
# nodeE.state('unknown')
# a.save_graph(os.path.join(test_dir, 'test_graph_new_nodes.yaml'))
# # Write graph for test_visualization
# nodeC = n.CalibrationNode('C')
# nodeD = n.CalibrationNode('D')
# nodeG = n.CalibrationNode('G')
# nodeH = n.CalibrationNode('H')
# a.add_node('C')
# a.add_node('D')
# a.add_node('G')
# a.add_node('H')
# nodeA.state('good')
# nodeB.state('needs calibration')
# nodeD.state('bad')
# nodeE.state('unknown')
# nodeF.state('good')
# nodeC.state('active')
# nodeD.add_parent('C')
# nodeD.add_parent('A')
# nodeE.add_parent('D')
# nodeG.add_parent('F')
# nodeC.add_parent('B')
# nodeB.add_parent('A')
# nodeG.add_parent('D')
# nodeH.add_parent('G')
# for node in a.nodes.values():
# node.calibrate_function('test_calibration_True')
# nodeC.calibrate_function('NotImplementedCalibration')
# nodeF.calibrate_function('NotImplementedCalibration')
# a.save_graph(os.path.join(test_dir, 'test_graph_states.yaml'))
# for node in a.nodes.values():
# node.close()
# a.close()
# def create_rabi_sims_example_graph():
# rmg = g.Graph('Rabi_sims_example_graph')
# nodenames = ['mixer_offset', 'mixer_skewness', 'frequency_spec',
# 'Amplitude_coarse', 'SSRO', 'frequency_ramsey', 'Motzoi',
# 'Amplitude_fine', 'High_fidelity_single_qubit_gates',
# 'High_readout_fidelity', 'Chevron_amp', 'Trotter_chevron',
# 'Photon_meter', 'Wigner_tomography', 'Rabi_simulation']
# for nodename in nodenames:
# rmg.add_node(nodename)
# rmg.nodes[nodename].calibrate_function('test_calibration_True')
# rmg.nodes[nodename].check_function('always_needs_calibration')
# rmg.mixer_skewness.parents(['mixer_offset'])
# rmg.Amplitude_coarse.parents(['frequency_spec',
# 'mixer_offset', 'mixer_skewness'])
# rmg.SSRO.parents(['Amplitude_coarse'])
# rmg.frequency_ramsey.parents(['Amplitude_coarse', 'SSRO',
# 'frequency_spec'])
# rmg.Motzoi.parents(['Amplitude_coarse', 'frequency_ramsey'])
# rmg.Amplitude_fine.parents(['Motzoi'])
# rmg.High_fidelity_single_qubit_gates.parents(
# ['Amplitude_fine', 'Motzoi', 'frequency_ramsey'])
# rmg.High_readout_fidelity.parents(
# ['High_fidelity_single_qubit_gates', 'SSRO'])
# rmg.Chevron_amp.parents(
# ['High_fidelity_single_qubit_gates', 'High_readout_fidelity'])
# rmg.Trotter_chevron.parents(['Chevron_amp'])
# rmg.Photon_meter.parents(['Trotter_chevron'])
# rmg.Wigner_tomography.parents(['Photon_meter'])
# rmg.Rabi_simulation.parents(
# ['Wigner_tomography', 'Photon_meter',
# 'High_fidelity_single_qubit_gates', 'High_readout_fidelity'])
# rmg.save_graph(os.path.join(test_dir, 'rabi_sims_example_graph.yaml'))
# create_rabi_sims_example_graph()
class Qubit():
def __init__(self, name):
self.name = name
class Device():
def __init__(self, name, qubits):
self.name = name
self.qubits = qubits
def create_dep_graph_single_qubit(self):
print(self.name+' DAG')
DAG = AutoDepGraph_DAG(name=self.name+' DAG')
DAG.add_node(self.name+' resonator frequency')
DAG.add_node(self.name+' frequency coarse')
DAG.add_edge(self.name+' frequency coarse',
self.name+' resonator frequency')
DAG.add_node(self.name+' mixer offsets drive')
DAG.add_node(self.name+' mixer skewness drive')
DAG.add_node(self.name+' mixer offsets readout')
DAG.add_node(self.name + ' pulse amplitude coarse')
DAG.add_edge(self.name + ' pulse amplitude coarse',
self.name+' frequency coarse')
DAG.add_edge(self.name + ' pulse amplitude coarse',
self.name+' mixer offsets drive')
DAG.add_edge(self.name + ' pulse amplitude coarse',
self.name+' mixer skewness drive')
DAG.add_edge(self.name + ' pulse amplitude coarse',
self.name+' mixer offsets readout')
DAG.add_node(self.name+' T1')
DAG.add_node(self.name+' T2-echo')
DAG.add_node(self.name+' T2-star')
DAG.add_edge(self.name + ' T1', self.name+' pulse amplitude coarse')
DAG.add_edge(self.name + ' T2-echo', self.name+' pulse amplitude coarse')
DAG.add_edge(self.name + ' T2-star', self.name+' pulse amplitude coarse')
DAG.add_node(self.name+' frequency fine')
DAG.add_edge(self.name + ' frequency fine',
self.name+' pulse amplitude coarse')
DAG.add_node(self.name + ' pulse amplitude med')
DAG.add_edge(self.name + ' pulse amplitude med',
self.name+' frequency fine')
DAG.add_node(self.name + ' optimal weights')
DAG.add_edge(self.name + ' optimal weights',
self.name+' pulse amplitude med')
DAG.add_node(self.name+' gates restless')
DAG.add_edge(self.name + ' gates restless', self.name+' optimal weights')
# easy to implement a check
DAG.add_node(self.name+' frequency fine')
DAG.add_node(self.name+' room temp. dist. corr.')
DAG.add_node(self.name+' cryo dist. corr.')
DAG.add_edge(self.name+' cryo dist. corr.',
self.name+' room temp. dist. corr.')
DAG.add_edge(self.name+' cryo dist. corr.',
self.name+' gates restless')
# DAG.add_node(self.name+' ')
return DAG
def create_dep_graph_device(self, dags):
DAG = nx.compose_all(dags)
DAG.add_node(self.name+' multiplexed readout')
DAG.add_node(self.name+' resonator frequencies coarse')
DAG.add_node('AWG8 MW-staircase')
DAG.add_node('AWG8 Flux-staircase')
DAG.add_node('Chevron q0-q1')
DAG.add_node('Chevron q1-q2')
DAG.add_node('CZ q0-q1')
DAG.add_node('CZ q1-q2')
DAG.add_edge('CZ q0-q1', 'Chevron q0-q1')
DAG.add_edge('CZ q1-q2', 'Chevron q1-q2')
DAG.add_edge('CZ q0-q1', 'q0 cryo dist. corr.')
DAG.add_edge('CZ q0-q1', 'q1 cryo dist. corr.')
DAG.add_edge('CZ q1-q2', 'q1 cryo dist. corr.')
DAG.add_edge('CZ q1-q2', 'q2 cryo dist. corr.')
DAG.add_edge('Chevron q0-q1', 'q0 gates restless')
DAG.add_edge('Chevron q0-q1', 'AWG8 Flux-staircase')
DAG.add_edge('Chevron q0-q1', 'q1 gates restless')
DAG.add_edge('Chevron q0-q1', self.name+' multiplexed readout')
DAG.add_edge('Chevron q1-q2', 'q0 gates restless')
DAG.add_edge('Chevron q1-q2', 'AWG8 Flux-staircase')
DAG.add_edge('Chevron q1-q2', 'q1 gates restless')
DAG.add_edge('Chevron q1-q2', self.name+' multiplexed readout')
for qubit in self.qubits:
q_name = qubit.name
DAG.add_edge(q_name+' room temp. dist. corr.',
'AWG8 Flux-staircase')
DAG.add_edge(self.name+' multiplexed readout',
q_name+' optimal weights')
DAG.add_edge(q_name+' resonator frequency',
self.name+' resonator frequencies coarse')
DAG.add_edge(q_name+' pulse amplitude coarse', 'AWG8 MW-staircase')
return DAG
qubit_names = ['q0', 'q1', 'q2']
qubits = []
dags = []
for qubit_name in qubit_names:
qi = Qubit(qubit_name)
qubits.append(qi)
dep_graph = create_dep_graph_single_qubit(qi)
dags.append(dep_graph)
device = Device('3 qubit device', qubits)
device_dag = create_dep_graph_device(device, dags)
for node, attrs in device_dag.nodes(True):
attrs['calibrate_function'] = 'autodepgraph.node_functions.calibration_functions.test_calibration_True'
device_dag.draw_svg()
fn = os.path.join(test_dir, 'three_qubit_graph.yaml')
nx.readwrite.write_yaml(device_dag, fn)
``` |
{
"source": "jorgeMFS/Defacer",
"score": 3
} |
#### File: src/WRFiles/FileWriter.py
```python
import datetime
import os
import sys
import time
import nibabel as nib
import nrrd
import numpy as np
from pydicom.dataset import Dataset, FileDataset
class FileWriter(object):
def __init__(self, file_name_path, np_array):
self.save_file_path, self.save_file_name = self.path_file(file_name_path)
self.np_array = np_array
self.determine_format()
@staticmethod
def path_file(file_and_path):
path, filename = os.path.split(file_and_path)
return path, filename
def determine_format(self):
_, file_extension = os.path.splitext(self.save_file_name)
if file_extension == '.nrrd':
self.nrrd_writer()
elif file_extension == ".dcm":
self.dcm_writer()
elif file_extension == ".nii":
self.nibabel_writer()
else:
print("not supported file, please try dcm, nifti or nrrd file formats")
sys.exit(0)
def nrrd_writer(self):
os.chdir(self.save_file_path)
nrrd.write(self.save_file_name, self.np_array)
def nibabel_writer(self):
os.chdir(self.save_file_path)
save_img = nib.Nifti1Image(self.np_array, affine=np.eye(4))
nib.save(save_img, filename=self.save_file_name)
def dcm_writer(self):
if self.np_array.ndim == 2:
os.chdir(self.save_file_path)
self.write_dicom(self.np_array, self.save_file_name)
elif self.np_array.ndim == 3:
os.chdir(self.save_file_path)
self.create_multiple_files()
@staticmethod
def write_dicom(pixel_array, dcm_name):
"""
Input:
pixel_array: 2D numpy ndarray.
If pixel_array is larger than 2D, errors.
filename: string name for the output file.
"""
file_meta = Dataset()
file_meta.MediaStorageSOPClassUID = 'Secondary Capture Image Storage'
file_meta.MediaStorageSOPInstanceUID = '1.3.6.1.4.1.9590.100.1.1.111165684411017669021768385720736873780'
file_meta.ImplementationClassUID = '1.3.6.1.4.1.9590.100.1.0.100.4.0'
ds = FileDataset(dcm_name, {}, file_meta=file_meta, preamble=b"\0" * 128)
ds.Modality = 'WSD'
ds.ContentDate = str(datetime.date.today()).replace('-', '')
ds.ContentTime = str(time.time()) # milliseconds since the epoch
ds.StudyInstanceUID = '1.3.6.1.4.1.9590.100.1.1.124313977412360175234271287472804872093'
ds.SeriesInstanceUID = '1.3.6.1.4.1.9590.100.1.1.369231118011061003403421859172643143649'
ds.SOPInstanceUID = '1.3.6.1.4.1.9590.100.1.1.111165684411017669021768385720736873780'
ds.SOPClassUID = 'Secondary Capture Image Storage'
ds.SecondaryCaptureDeviceManufctur = 'Python 3'
# These are the necessary imaging components of the FileDataset object.
ds.SamplesPerPixel = 1
ds.PhotometricInterpretation = "MONOCHROME2"
ds.PixelRepresentation = 0
ds.HighBit = 15
ds.BitsStored = 16
ds.BitsAllocated = 16
ds.SmallestImagePixelValue = b'\\x00\\x00'
ds.LargestImagePixelValue = b'\\xff\\xff'
ds.Columns = pixel_array.shape[0]
ds.Rows = pixel_array.shape[1]
if pixel_array.dtype != np.uint16:
pixel_array = pixel_array.astype(np.uint16)
ds.PixelData = pixel_array.tostring()
ds.save_as(dcm_name)
return
def create_multiple_files(self):
array = self.np_array
string_file_name, file_extension = os.path.splitext(self.save_file_name)
index = np.where(array.shape == np.min(array.shape))[0]
if index == 0:
for it in range(np.min(array.shape)):
px_array = array[it, :, :]
ss = string_file_name + '_' + str(it) + file_extension
self.write_dicom(pixel_array=px_array, dcm_name=ss)
if index == 1:
print(np.max(array.shape))
for it in range(np.min(array.shape)):
px_array = array[:, it, :]
ss = string_file_name + '_' + str(it) + file_extension
self.write_dicom(pixel_array=px_array, dcm_name=ss)
if index == 2:
for it in range(np.min(array.shape)):
px_array = array[:, :, it]
ss = string_file_name + '_' + str(it) + file_extension
self.write_dicom(pixel_array=px_array, dcm_name=ss)
def main():
x = np.arange(16).reshape(16, 1)
pixel_array = (x + x.T) * 32
pixel_array = np.tile(pixel_array, (16, 16))
file_path = '/home/mikejpeg/IdeaProjects/Defacer/image/results/dicom/pretty.dcm'
FileWriter(file_name_path=file_path, np_array=pixel_array)
return 0
if __name__ == '__main__':
sys.exit(main())
``` |
{
"source": "JorgeMGuimaraes/escalonador",
"score": 3
} |
#### File: JorgeMGuimaraes/escalonador/disco.py
```python
from processo import Processo, estado
estado_disco = {
'idle': 0,
'ocupado': 1
}
## classe
class Disco:
def __init__(self, id_disco) -> None:
self.id_disco = id_disco
# Refatorar, trocar esse estado por checar se processo is none, como no processador
self.estado = estado_disco['idle']
self.processo_atual = None
return
def define_estado(self, novo_estado: int) -> None:
self.estado = novo_estado
return
def disponivel(self) -> bool:
return self.processo_atual == None
def gravar(self) -> bool:
if self.disponivel():
return False
return True
def define_processo_atual(self, processo: Processo) -> None:
self.processo_atual = processo
# TODO: ver se podemos remover esse print
print(f'Disco {self.id_disco} foi atribuido ao Processo {processo.id_processo}')
return
def pode_agregar(self, processo: Processo) -> bool:
if self.processo_atual is None and processo.discos > 0:
processo.discos -= 1
self.define_processo_atual(processo)
return True
return False
def liberar(self):
self.processo_atual = None
return
```
#### File: JorgeMGuimaraes/escalonador/estado_processo.py
```python
from processo import Processo
from typing import List
## classes
class EstadoProcesso:
def __init__(self, atual: List[Processo], proximo: List[Processo], estado:int, msg_padrao: str) -> None:
self.atual = atual
self.proximo = proximo
self.msg_padrao = msg_padrao
self.estado = estado
return
def neste(self, processo: Processo):
self.proximo.append(processo)
self.atual.remove(processo)
processo.estado = self.estado
return f'Processo {processo.id_processo}: {self.msg_padrao}'
```
#### File: JorgeMGuimaraes/escalonador/sistema_operacional.py
```python
import io
from escalonador import Escalonador
from processo import Processo
from recursos import Recursos
from typing import List
## classes
## definicoes
def processa_entrada(quantum: int) -> List[Processo]:
filename ='entrada'
contador_processos = 0
processos = []
with open(file=filename,mode='r') as arquivo:
for linha in arquivo:
if linha.startswith('#') or linha == '\n' or linha == '':
continue
valores = linha.split(',')
chegada = int(valores[0].strip())
prioridade = int(valores[1].strip())
duracao = int(valores[2].strip())
memoria = int(valores[3].strip())
discos = int(valores[4].strip())
entrada = Processo(
id_processo = contador_processos,
chegada = chegada,
prioridade = prioridade,
duracao = duracao,
memoria = memoria,
discos = discos )
entrada.define_quantum(quantum)
processos.append(entrada)
contador_processos += 1
return processos
def imprime_header(quantum: int) -> None:
espacamento = ' '
s = 'Estado Atual:\n'
s += f'Quantum: {quantum}\n'
print(s)
return
def main():
# TODO: pegar valores do usuário
quantum = 2
contador_quanta = 0
cpus = 4
discos = 4
memoria = 16 * 1024
recursos = Recursos(cpus, discos, memoria)
escalonador = Escalonador(recursos, processa_entrada(quantum))
escalonador.imprime_processos_recebidos()
print()
while escalonador.continuar_processando():
imprime_header(contador_quanta)
escalonador.ativa_prontos_suspensos()
escalonador.executa_leitura_gravacao()
escalonador.reserva_discos()
escalonador.instanciar_processos(contador_quanta)
escalonador.admitir_processos()
escalonador.despachar()
escalonador.processar_threads()
escalonador.atualiza_estado_idle(contador_quanta)
recursos.imprime_processadores()
recursos.imprime_memoria()
recursos.imprime_discos()
escalonador.imprime_log_processos()
escalonador.imprime_andamento(contador_quanta)
contador_quanta += 1
_ = input()
return
## Programa principal
if __name__ == "__main__": main()
``` |
{
"source": "JorgeMiguelGomes/pimpmypython",
"score": 2
} |
#### File: JorgeMiguelGomes/pimpmypython/app.py
```python
import random
import pandas as pd
import numpy as np
# Import Dash and Dash Bootstrap Components
import dash
import dash_daq as daq
from dash import Input, Output, dcc, html
import dash_bootstrap_components as dbc
# _________________________________________
import plotly.express as px
app = dash.Dash(
external_stylesheets=[dbc.themes.CYBORG],
#suppress_callback_exceptions=True,
meta_tags=[{"name": "viewport", "content": "width=device-width, initial-scale=1"}],
)
app.layout = html.Div(
dbc.Container(
html.Div([
html.H4('PIMP MY PYTHON'),
html.Div(html.H4(id='ip')),
html.Div(html.H2(id='a')),
html.Div(id='b'),
html.Div(id='c'),
html.Div(id='d'),
dcc.Graph(id='graph'),
dcc.Interval(
id='component',
interval=1*1000, # in milliseconds
n_intervals=0
)
])
)
)
@app.callback(
Output(component_id="ip",component_property="children"),
Output(component_id="a",component_property="children"),
Output(component_id="b",component_property="children"),
Output(component_id="c",component_property="children"),
Output(component_id="d",component_property="children"),
Output(component_id="graph",component_property="figure"),
Input(component_id="component",component_property="n_intervals")
)
def update(component):
a = str(random.randint(1, 254))
b = str(random.randint(1, 254))
c = str(random.randint(1, 254))
d = str(random.randint(1, 254))
dot = "."
ip = a + dot + b + dot + c + dot + d
a1 = str(random.randint(1, 1400))
b1 = str(random.randint(1, 700))
c1 = str(random.randint(1, 800))
d1 = str(random.randint(1, 1240))
fig = px.pie(names=['resistance','threath','exploit','profit'], values=[c1, d1, b1, d1], template='plotly_dark',hole=0.6, color_discrete_sequence=px.colors.sequential.Viridis)
return ip,a,b,c,d,fig
if __name__ == "__main__":
app.run_server(debug=True, port=8888)
# END APP
``` |
{
"source": "jorgemira/Calories",
"score": 3
} |
#### File: calories/deploy/gunicorn.py
```python
import multiprocessing
from abc import ABC
from gunicorn.app.base import BaseApplication
from calories.main import cfg
def number_of_workers():
return (multiprocessing.cpu_count() * 2) + 1
class StandaloneApplication(BaseApplication, ABC):
def __init__(self, app, options=None):
self.options = options or {}
self.application = app
super().__init__()
def load_config(self):
config = {
key: value
for key, value in self.options.items()
if key in self.cfg.settings and value is not None
}
for key, value in config.items():
self.cfg.set(key.lower(), value)
def load(self):
return self.application
def run(app):
options = {
"bind": "%s:%s" % (cfg.ADDRESS, cfg.PORT),
"workers": number_of_workers(),
"keyfile": cfg.KEYFILE, # 'certs/server.key',
"certfile": cfg.CERTFILE, # 'certs/server.crt',
"ca-certs": cfg.CACERTS, # 'certs/ca-crt.pem',
"cert-reqs": 2,
}
return StandaloneApplication(app, options).run()
```
#### File: controller/helpers/__init__.py
```python
class RequestError(Exception):
def __init__(self, message: str, code: int):
self.message = message
self.code = code
class BadRequest(RequestError):
def __init__(self, message: str):
super().__init__(message, 400)
class Unauthorized(RequestError):
def __init__(self, message: str):
super().__init__(message, 401)
class Forbidden(RequestError):
def __init__(self, message: str):
super().__init__(message, 403)
class NotFound(RequestError):
def __init__(self, message: str):
super().__init__(message, 404)
class Conflict(RequestError):
def __init__(self, message: str):
super().__init__(message, 409)
```
#### File: main/controller/meals.py
```python
from flask import abort
from calories.main import logger
from calories.main.controller import ResponseType, RequestBodyType
from calories.main.controller.helpers import RequestError
from calories.main.controller.helpers.auth import is_allowed
from calories.main.controller.helpers.meals import (
get_meals,
get_meal,
crt_meal,
updt_meal,
dlt_meals,
)
from calories.main.models.models import Role
@is_allowed(roles_allowed=[Role.USER], only_allow_self=True)
def read_meals(
user: str,
username: str,
filter_results: str = None,
items_per_page: int = 10,
page_number: int = 1,
) -> ResponseType:
"""Read the list of meals for a given user
:param user: The user that requests the action
:param username: User to return al his meals
:param filter_results: Filter string for the results
:param items_per_page: Number of items of every page, defaults to 10
:param page_number: Page number of the results defaults to 1
"""
try:
data, pagination = get_meals(
username, filter_results, items_per_page, page_number
)
except RequestError as e:
data = pagination = None
logger.warning(e.message)
abort(e.code, e.message)
logger.info(
f"User: '{user}', read meals for user: '{username}',"
f" filter: '{filter_results}', itemsPerPage: '{items_per_page}',"
f" pageNumber: '{page_number}'"
)
return (
{
"status": 200,
"title": "Success",
"detail": f"List of meals succesfully read for user: '{username}'",
"data": data,
"num_pages": pagination.num_pages,
"total_result": pagination.total_results,
},
200,
)
@is_allowed(roles_allowed=[Role.USER], only_allow_self=True)
def read_meal(user: str, username: str, meal_id: int) -> ResponseType:
"""Read a meal that belongs to a user
:param user: The user that requests the action
:param username: Username to read his meal
:param meal_id: Id of the meal
"""
try:
data = get_meal(username, meal_id)
except RequestError as e:
data = None
logger.warning(e.message)
abort(e.code, e.message)
logger.info(f"User: '{user}' read meal: '{meal_id}' of user: '{username}'")
return (
{
"status": 200,
"title": "Success",
"detail": f"Meal: '{meal_id}' of user: '{username}' succesfully read",
"data": data,
},
200,
)
@is_allowed(roles_allowed=[Role.USER], only_allow_self=True)
def create_meal(user: str, username: str, body: RequestBodyType) -> ResponseType:
"""Create a meal
:param user: User that requests the action
:param username: User whose meal is going to be created
:param body: Information about the new meal
:return: A success message if the meal was found or a 400 error
if any parameter was wrong
"""
try:
data = crt_meal(username, body)
except RequestError as e:
data = None
logger.warning(e.message)
abort(e.code, e.message)
logger.info(
f"User: '{user}' created meal: '{data.get('id')}' for user: '{username}'"
)
return (
{
"status": 201,
"title": "Success",
"detail": f"Meal: '{data.get('id')}' of user: '{username}' "
f"succesfully created",
"data": data,
},
201,
)
@is_allowed(roles_allowed=[Role.USER], only_allow_self=True)
def update_meal(
user: str, username: str, meal_id: int, body: RequestBodyType
) -> ResponseType:
"""Update a meal
:param user: User that requests the action
:param username: User whose meal is going to be updated
:param meal_id: Meal id to update
:param body: New body of the updated meal
:return: A success message if the meal was found or a 404 error if either the user or the meal does not exist
"""
try:
data = updt_meal(username, meal_id, body)
except RequestError as e:
data = None
logger.warning(e.message)
abort(e.code, e.message)
logger.info(f"User: '{user}' updated meal: '{meal_id}' for user: '{username}'")
return (
{
"status": 200,
"title": "Success",
"detail": f"Meal: '{meal_id}' of user: '{username}' succesfully updated",
"data": data,
},
200,
)
@is_allowed(roles_allowed=[Role.USER], only_allow_self=True)
def delete_meal(user: str, username: str, meal_id: int) -> ResponseType:
"""Delete a meal
:param user: User that requests the action
:param username: User whose meal is going to be deleted
:param meal_id: Meal id to delete
:return: A success message if the meal was found or a 404 error if either the user
or the meal does not exist
"""
try:
dlt_meals(username, meal_id)
except RequestError as e:
logger.warning(e.message)
abort(e.code, e.message)
logger.info(f"User: '{user}' deleted meal: '{meal_id}' for user: '{username}'")
return (
{
"status": 200,
"title": "Success",
"detail": f"Meal: '{meal_id}' of user: '{username}' succesfully deleted",
"data": None,
},
200,
)
```
#### File: main/models/models.py
```python
from enum import Enum
from sqlalchemy.ext.hybrid import hybrid_property
from werkzeug.security import generate_password_hash
from calories.main import db, ma
class Role(str, Enum):
"""Enum for Role types"""
USER = "USER"
MANAGER = "MANAGER"
ADMIN = "ADMIN"
class User(db.Model):
"""Database Model Class for users"""
__tablename__ = "user"
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(32), unique=True)
_password = db.Column("password", db.String(128))
name = db.Column(db.String(128))
email = db.Column(db.String(128))
role = db.Column(db.Enum(Role))
daily_calories = db.Column(db.Integer)
meals = db.relationship(
"Meal",
backref="user",
cascade="all, delete, delete-orphan",
single_parent=True,
)
@hybrid_property
def password(self):
return self._password
@password.setter
def password(self, plaintext: str):
self._password = generate_password_hash(plaintext)
class Meal(db.Model):
"""Database Model Class for meals"""
__tablename__ = "meal"
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey("user.id"))
date = db.Column(db.Date)
time = db.Column(db.Time)
name = db.Column(db.String(128))
grams = db.Column(db.Integer, default=0)
description = db.Column(db.String)
calories = db.Column(db.Integer, default=0)
under_daily_total = db.Column(db.Boolean, default=True)
class UserSchema(ma.ModelSchema):
class Meta:
model = User
sqla_session = db.session
class MealSchema(ma.ModelSchema):
class Meta:
model = Meal
sqla_session = db.session
```
#### File: main/util/external_apis.py
```python
import requests
from calories.main import cfg, logger
def calories_from_nutritionix(meal: str) -> int:
"""Query Nutritionix API to get the calories information of a meal
:param meal: Name of the meal
:return: The calories of the specified meal
"""
auth = {"appId": cfg.NTX_APP_ID, "appKey": cfg.NTX_API_KEY}
try:
food_info = requests.get(
"/".join([cfg.NTX_BASE_URL, "search", meal]),
params={**auth, "results": "0:1"},
).json()
except (requests.RequestException, ValueError) as e:
logger.warning(
f"Exception happened while trying to get calories for '{meal}': {e} "
)
return 0
if not food_info.get("total_hits", None):
return 0
try:
meal_id = food_info["hits"][0]["fields"]["item_id"]
except LookupError as e:
logger.warning(
f"Exception happened while trying to get calories for '{meal}': {e} "
)
return 0
try:
food_info = requests.get(
"/".join([cfg.NTX_BASE_URL, "item"]), params={**auth, "id": meal_id}
).json()
except (requests.RequestException, ValueError) as e:
logger.warning(
f"Exception happened while trying to get calories for '{meal}': {e} "
)
return 0
logger.info(f"Successfully read calories from Nutrionix API for meal: '{meal}'")
return food_info.get("nf_calories", 0)
```
#### File: main/util/filters.py
```python
import re
from fiql_parser import parse_str_to_expression, FiqlException
from sqlalchemy_filters import apply_filters, apply_pagination
from sqlalchemy_filters.exceptions import FieldNotFound, BadFilterFormat
from werkzeug.exceptions import abort
def to_fiql(filter_spec: str):
"""Transform a filter specification in out format to Fiql"""
transformations = [
(r"\beq\b", r"=="),
(r"\bne\b", r"!="),
(r"\bgt\b", r"=gt="),
(r"\bge\b", r"=ge="),
(r"\blt\b", r"=lt="),
(r"\ble\b", r"=le="),
(r"\band\b", r";"),
(r"\bor\b", r","),
(r'"', r"'"),
(r"\s", r""),
]
for a, b in transformations:
filter_spec = re.sub(a, b, filter_spec, flags=re.I)
return parse_str_to_expression(filter_spec).to_python()
def to_sql_alchemy(filter_spec):
"""Transform a Fiql object into a SQLAlchemy filter expression
:param filter_spec: Fiql object containing the filter
:type filter_spec: dict
:return: The SQLAlchemy filter expression
"""
if None in filter_spec:
raise FiqlException
if isinstance(filter_spec, tuple):
if "'" in filter_spec[2]:
value = filter_spec[2].replace("'", "")
else:
try:
value = int(filter_spec[2])
except ValueError:
value = filter_spec[2]
return {"field": filter_spec[0], "op": filter_spec[1], "value": value}
if isinstance(filter_spec, list):
return {filter_spec[0].lower(): [to_sql_alchemy(e) for e in filter_spec[1:]]}
def apply_filter(
query: str, filter_spec: str = None, page_size: int = 10, page_number: int = 1
):
"""Apply filtering and pagination to any given query
:param query: Query to apply filtering to
:param filter_spec: Filter to apply to the query
:param page_size: Page size used for pagination
:param page_number: Page number used for pagination
:return: The query after applying the filter and pagination options selected, and
the pagination information
:rtype: tuple
"""
if filter_spec:
try:
query = apply_filters(query, to_sql_alchemy(to_fiql(filter_spec)))
except (FiqlException, FieldNotFound, BadFilterFormat):
abort(400, f"Filter '{filter_spec}' is invalid")
query, pagination = apply_pagination(
query, page_number=page_number, page_size=page_size
)
return query, pagination
```
#### File: test/controller/__init__.py
```python
import json
import unittest
from typing import Dict, Union, List, Any
from urllib.parse import quote
from calories.test import BaseTestCase
HeaderType = Dict[str, str]
class TestAPI(BaseTestCase):
def setUp(self):
super().setUp()
self.path = 'api'
def delete(self, path: str, headers: HeaderType = None):
return self.client.delete(path, content_type='application/json', headers=headers)
def get(self, path: str, headers: HeaderType = None):
return self.client.get(path, content_type='application/json', headers=headers)
def post(self, path: str, data: HeaderType, headers: HeaderType = None):
return self.client.post(path, data=json.dumps(data), content_type='application/json', headers=headers)
def put(self, path: str, data, headers: HeaderType = None):
return self.client.put(path, data=json.dumps(data), content_type='application/json', headers=headers)
def _login(self, username: str = 'admin', password: str = '<PASSWORD>') -> str:
path = 'api/login'
request_data = {'username': username, 'password': password}
response = self.post(path, request_data)
data = json.loads(response.data.decode())
return data['Authorization']
def _get_headers(self, username: str = 'admin', password: str = '<PASSWORD>') -> HeaderType:
return {'accept': 'application/json', 'Authorization': f'Bearer {self._login(username, password)}'}
def _check_error(self, response: ..., code: int, title: str, detail: str) -> None:
data = json.loads(response.data.decode())
self.assertEqual(data['status'], code)
self.assertEqual(data['title'], title)
self.assertEqual(data['detail'], detail)
self.assertEqual(response.content_type, 'application/problem+json')
self.assertEqual(response.status_code, code)
def _check_succes(self, expected: Any, response: ..., code: int) -> None:
data = json.loads(response.data.decode())
self.assertEqual(data['status'], code)
self.assertEqual(data['title'], 'Success')
self.assertEqual(data['data'], expected)
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.status_code, code)
```
#### File: test/controller/test_users.py
```python
import unittest
from urllib.parse import quote
from calories.test.controller import TestAPI
class TestUsers(TestAPI):
"""Test class for calories.main.controller.users"""
def test_get_all_users_unauthenticated(self):
"""Unauthenticated request"""
path = "/".join([self.path, "users"])
with self.client:
response = self.get(path, None)
self._check_error(
response, 401, "Unauthorized", "No authorization token provided"
)
def test_get_all_users_success_admin(self):
"""Successful request as admin"""
path = "/".join([self.path, "users"])
with self.client:
expected = [
{
"daily_calories": 0,
"email": "<EMAIL>",
"name": "Administrator",
"role": "ADMIN",
"username": "admin",
},
{
"daily_calories": 2000,
"email": "<EMAIL>",
"name": "<NAME>",
"role": "MANAGER",
"username": "manager1",
},
{
"daily_calories": 4000,
"email": "<EMAIL>",
"name": "<NAME>",
"role": "MANAGER",
"username": "manager2",
},
{
"daily_calories": 2500,
"email": "<EMAIL>",
"name": "User 1",
"role": "USER",
"username": "user1",
},
{
"daily_calories": 3000,
"email": "<EMAIL>",
"name": "User 2",
"role": "USER",
"username": "user2",
},
]
response = self.get(path, self._get_headers())
self._check_succes(expected, response, 200)
def test_get_all_users_user_not_allowed(self):
"""User is not allowed to see user list"""
path = "/".join([self.path, "users"])
with self.client:
response = self.get(path, self._get_headers("user1", "pass_user1"))
self._check_error(
response,
403,
"Forbidden",
"User 'user1' belongs to the role 'USER' and is not allowed to perform the action",
)
def test_get_all_users_manager_allowed(self):
"""Manager is allowed to see user list"""
path = "/".join([self.path, "users"])
with self.client:
expected = [
{
"daily_calories": 0,
"email": "<EMAIL>",
"name": "Administrator",
"role": "ADMIN",
"username": "admin",
},
{
"daily_calories": 2000,
"email": "<EMAIL>",
"name": "<NAME>",
"role": "MANAGER",
"username": "manager1",
},
{
"daily_calories": 4000,
"email": "<EMAIL>",
"name": "<NAME>",
"role": "MANAGER",
"username": "manager2",
},
{
"daily_calories": 2500,
"email": "<EMAIL>",
"name": "User 1",
"role": "USER",
"username": "user1",
},
{
"daily_calories": 3000,
"email": "<EMAIL>",
"name": "User 2",
"role": "USER",
"username": "user2",
},
]
response = self.get(path, self._get_headers("manager1", "pass_manager1"))
self._check_succes(expected, response, 200)
def test_get_all_users_manager_allowed_first_page(self):
"""Manager is allowed to see user list, test pagination: first page"""
path = "/".join([self.path, "users"])
with self.client:
expected = [
{
"daily_calories": 0,
"email": "<EMAIL>",
"name": "Administrator",
"role": "ADMIN",
"username": "admin",
},
{
"daily_calories": 2000,
"email": "<EMAIL>",
"name": "<NAME>",
"role": "MANAGER",
"username": "manager1",
},
]
paged_path = path + "?items_per_page=2&page_number=1"
response = self.get(
paged_path, self._get_headers("manager1", "pass_manager1")
)
self._check_succes(expected, response, 200)
def test_get_all_users_manager_allowed_last_page(self):
"""Manager is allowed to see user list, test pagination: last page"""
path = "/".join([self.path, "users"])
with self.client:
expected = [
{
"daily_calories": 3000,
"email": "<EMAIL>",
"name": "<NAME>",
"role": "USER",
"username": "user2",
}
]
paged_path = path + "?items_per_page=2&page_number=3"
response = self.get(
paged_path, self._get_headers("manager1", "pass_manager1")
)
self._check_succes(expected, response, 200)
def test_get_all_users_manager_allowed_beyond_last_page(self):
"""Manager is allowed to see user list, test pagination: beyond last page"""
path = "/".join([self.path, "users"])
with self.client:
expected = []
paged_path = path + "?items_per_page=2&page_number=20"
response = self.get(
paged_path, self._get_headers("manager1", "pass_manager1")
)
self._check_succes(expected, response, 200)
def test_get_all_users_manager_allowed_filtering1(self):
"""Manager is allowed to see user list, filtering 1"""
path = "/".join([self.path, "users"])
with self.client:
expected = [
{
"daily_calories": 3000,
"email": "<EMAIL>",
"name": "User 2",
"role": "USER",
"username": "user2",
}
]
filtered_path = (
path
+ "?filter_results="
+ quote(
"(username ne 'user1') AND "
"((daily_calories gt 2200) AND (daily_calories lt 3500))"
)
)
response = self.get(
filtered_path, self._get_headers("manager1", "pass_manager1")
)
self._check_succes(expected, response, 200)
def test_get_all_users_manager_allowed_filtering2(self):
"""Manager is allowed to see user list, filtering 2"""
path = "/".join([self.path, "users"])
with self.client:
filtered_path = path + "?filter_results=" + quote("wrongfilter")
response = self.get(
filtered_path, self._get_headers("manager1", "pass_manager1")
)
self._check_error(
response, 400, "Bad Request", "Filter 'wrongfilter' is invalid"
)
def test_post_user_unauthenticated(self):
"""Unauthenticated request"""
path = "/".join([self.path, "users"])
with self.client:
request_data = {"username": "admin", "password": "<PASSWORD>"}
response = self.post(path, request_data, None)
self._check_error(
response, 401, "Unauthorized", "No authorization token provided"
)
def test_post_user_missing_parameters(self):
"""Request missing parameters"""
path = "/".join([self.path, "users"])
with self.client:
request_data = {"username": "admin", "password": "<PASSWORD>"}
response = self.post(path, request_data, self._get_headers())
self._check_error(
response, 400, "Bad Request", "'name' is a required property"
)
def test_post_user_correct_request(self):
"""Correct request"""
path = "/".join([self.path, "users"])
with self.client:
request_data = {
"username": "user3",
"name": "User 3",
"email": "<EMAIL>",
"role": "USER",
"daily_calories": 2500,
"password": "<PASSWORD>",
}
expected = request_data.copy()
expected.pop("password")
response = self.post(path, request_data, self._get_headers())
self._check_succes(expected, response, 201)
def test_post_user_username_exists(self):
"""Username exists"""
path = "/".join([self.path, "users"])
with self.client:
request_data = {
"username": "user1",
"name": "User 1",
"email": "<EMAIL>",
"role": "USER",
"daily_calories": 2500,
"password": "<PASSWORD>",
}
response = self.post(path, request_data, self._get_headers())
self._check_error(response, 409, "Conflict", "User 'user1' exists already")
def test_post_user_user_add_user(self):
"""Role user tries to add user"""
path = "/".join([self.path, "users"])
with self.client:
request_data = {
"username": "user4",
"name": "User 4",
"email": "<EMAIL>",
"role": "USER",
"daily_calories": 2500,
"password": "<PASSWORD>",
}
response = self.post(
path, request_data, self._get_headers("user1", "pass_user1")
)
self._check_error(
response,
403,
"Forbidden",
"User 'user1' belongs to the role 'USER' and is not allowed to perform the action",
)
def test_post_user_manager_add_user(self):
"""Manager tries to add a user"""
path = "/".join([self.path, "users"])
with self.client:
request_data = {
"username": "user4",
"name": "<NAME>",
"email": "<EMAIL>",
"role": "USER",
"daily_calories": 2500,
"password": "<PASSWORD>",
}
expected = request_data.copy()
expected.pop("password")
response = self.post(
path, request_data, self._get_headers("manager1", "pass_manager1")
)
self._check_succes(expected, response, 201)
def test_delete_user_unauthenticated(self):
"""Unauthenticated request"""
path = "/".join([self.path, "users"])
with self.client:
response = self.delete("/".join([path, "user1"]), None)
self._check_error(
response, 401, "Unauthorized", "No authorization token provided"
)
def test_delete_user_user_not_allowed(self):
"""User cannot delete other users"""
path = "/".join([self.path, "users"])
with self.client:
response = self.delete(
"/".join([path, "manager1"]), self._get_headers("user1", "pass_user1")
)
self._check_error(
response,
403,
"Forbidden",
"User 'user1' belongs to the role 'USER' and is not allowed to perform the action",
)
def test_delete_user_user_himself(self):
"""User can delete himself"""
path = "/".join([self.path, "users"])
with self.client:
expected = None
response = self.delete(
"/".join([path, "user1"]), self._get_headers("user1", "pass_user1")
)
self._check_succes(expected, response, 200)
def test_delete_user_manager_to_manager(self):
"""Managers cannot delete managers"""
path = "/".join([self.path, "users"])
with self.client:
response = self.delete(
"/".join([path, "manager2"]),
self._get_headers("manager1", "pass_manager1"),
)
self._check_error(
response,
403,
"Forbidden",
"User 'manager1' can only delete users with role USER",
)
def test_delete_user_not_exists(self):
"""Error deleting non existing user"""
path = "/".join([self.path, "users"])
with self.client:
response = self.delete(
"/".join([path, "user4"]),
self._get_headers("manager1", "pass_manager1"),
)
self._check_error(response, 404, "Not Found", "User 'user4' not found")
def test_get_user_unauthenticated(self):
"""Unauthenticated request"""
path = "/".join([self.path, "users"])
with self.client:
response = self.get("/".join([path, "user1"]), None)
self._check_error(
response, 401, "Unauthorized", "No authorization token provided"
)
def test_get_user_user_other(self):
"""User cannot get other users"""
path = "/".join([self.path, "users"])
with self.client:
response = self.get(
"/".join([path, "manager1"]), self._get_headers("user1", "pass_user1")
)
self._check_error(
response,
403,
"Forbidden",
"User 'user1' belongs to the role 'USER' and is not allowed to perform the action",
)
def test_get_user_himself(self):
"""User can get himself request"""
path = "/".join([self.path, "users"])
with self.client:
expected = {
"daily_calories": 2500,
"email": "<EMAIL>",
"name": "User 1",
"role": "USER",
"username": "user1",
}
response = self.get(
"/".join([path, "user1"]), self._get_headers("user1", "pass_user1")
)
self._check_succes(expected, response, 200)
def test_get_user_manager_users(self):
"""User managers can get users"""
path = "/".join([self.path, "users"])
with self.client:
expected = {
"daily_calories": 4000,
"email": "<EMAIL>",
"name": "<NAME>",
"role": "MANAGER",
"username": "manager2",
}
response = self.get(
"/".join([path, "manager2"]),
self._get_headers("manager1", "pass_manager1"),
)
self._check_succes(expected, response, 200)
def test_get_user_error_not_found(self):
"""Error getting non existing user"""
path = "/".join([self.path, "users"])
with self.client:
response = self.get(
"/".join([path, "user3"]),
self._get_headers("manager1", "pass_manager1"),
)
self._check_error(response, 404, "Not Found", "User 'user3' not found")
def test_put_user_unauthenticated(self):
"""Unauthenticated request"""
path = "/".join([self.path, "users"])
with self.client:
request_data = {"username": "admin", "password": "<PASSWORD>"}
response = self.put("/".join([path, "user1"]), request_data, None)
self._check_error(
response, 401, "Unauthorized", "No authorization token provided"
)
def test_put_user_user_other_users(self):
"""User cannot get other users"""
path = "/".join([self.path, "users"])
with self.client:
request_data = {"username": "admin", "password": "<PASSWORD>"}
response = self.put(
"/".join([path, "manager1"]),
request_data,
self._get_headers("user1", "pass_user1"),
)
self._check_error(
response,
403,
"Forbidden",
"User 'user1' belongs to the role 'USER' and is not allowed to perform the action",
)
def test_put_user_user_himself(self):
"""User can put himself"""
path = "/".join([self.path, "users"])
with self.client:
expected = {
"daily_calories": 2500,
"email": "<EMAIL>",
"name": "User 1A",
"role": "USER",
"username": "user1a",
}
request_data = {
"username": "user1a",
"password": "<PASSWORD>",
"name": "User 1A",
}
response = self.put(
"/".join([path, "user1"]),
request_data,
self._get_headers("user1", "pass_user1"),
)
self._check_succes(expected, response, 200)
def test_put_user_manager_user(self):
"""User managers can put users"""
path = "/".join([self.path, "users"])
with self.client:
expected = {
"daily_calories": 2500,
"email": "<EMAIL>",
"name": "User 1",
"role": "USER",
"username": "user1",
}
request_data = {"password": "<PASSWORD>"}
response = self.put(
"/".join([path, "user1"]),
request_data,
self._get_headers("manager1", "pass_manager1"),
)
self._check_succes(expected, response, 200)
def test_put_user_non_existing(self):
"""Error getting non existing user"""
path = "/".join([self.path, "users"])
with self.client:
request_data = {"password": "<PASSWORD>"}
response = self.put(
"/".join([path, "user3"]),
request_data,
self._get_headers("manager1", "pass_manager1"),
)
self._check_error(response, 404, "Not Found", "User 'user3' not found")
if __name__ == "__main__":
unittest.main()
```
#### File: test/util/test_filters.py
```python
import unittest
from sqlalchemy_filters.exceptions import InvalidPage
from werkzeug.exceptions import BadRequest
from calories.main.models.models import User
from calories.main.util.filters import apply_filter
from calories.test import BaseTestCase
class TestExternalAPIs(BaseTestCase):
"""Test class for calories.main.util.filters"""
def setUp(self):
super().setUp()
self.users = User.query.order_by(User.username)
def test_filters_eq(self):
"""Test filtering eq operator"""
query, pagination = apply_filter(self.users, "username eq user1")
filtered = query.all()
self.assertEqual(len(filtered), 1)
self.assertEqual(filtered[0].username, "user1")
self.assertEqual(pagination.num_pages, 1)
self.assertEqual(pagination.total_results, 1)
def test_filters_ne(self):
"""Test filtering ne operator"""
query, pagination = apply_filter(self.users, "username ne user1")
filtered = query.all()
self.assertEqual(len(filtered), 4)
self.assertEqual(filtered[0].username, "admin")
self.assertEqual(filtered[1].username, "manager1")
self.assertEqual(filtered[2].username, "manager2")
self.assertEqual(filtered[3].username, "user2")
self.assertEqual(pagination.num_pages, 1)
self.assertEqual(pagination.total_results, 4)
def test_filters_and(self):
"""Test filtering and operator"""
query, pagination = apply_filter(
self.users, "username ne 'user1' AND daily_calories lt 2000"
)
filtered = query.all()
self.assertEqual(len(filtered), 1)
self.assertEqual(filtered[0].username, "admin")
self.assertEqual(pagination.num_pages, 1)
self.assertEqual(pagination.total_results, 1)
def test_filters_complex(self):
"""Test filtering with complex filter"""
query, pagination = apply_filter(
self.users,
"username ne 'user1' AND (daily_calories lt 3000 AND "
"daily_calories gt 1000)",
)
filtered = query.all()
self.assertEqual(len(filtered), 1)
self.assertEqual(filtered[0].username, "manager1")
self.assertEqual(pagination.num_pages, 1)
self.assertEqual(pagination.total_results, 1)
def test_filters_no_results(self):
"""Test filtering returning no results"""
query, pagination = apply_filter(self.users, "username eq 'wronguser'")
filtered = query.all()
self.assertEqual(len(filtered), 0)
self.assertEqual(pagination.num_pages, 0)
self.assertEqual(pagination.total_results, 0)
def test_filters_wrong_field(self):
"""Test filtering providing wrong fields"""
with self.assertRaises(BadRequest):
apply_filter(self.users, "wrongfield eq 'wronguser'")
def test_filters_wrong_filter(self):
"""Test filtering providing wrong filters"""
with self.assertRaises(BadRequest):
apply_filter(self.users, "wrongfilter")
def test_pagination_1_2(self):
"""Test pagination page_number=1 and page_size=2"""
query, pagination = apply_filter(self.users, page_number=1, page_size=2)
filtered = query.all()
self.assertEqual(len(filtered), 2)
self.assertEqual(filtered[0].username, "admin")
self.assertEqual(filtered[1].username, "manager1")
self.assertEqual(pagination.num_pages, 3)
self.assertEqual(pagination.total_results, 5)
def test_pagination_2_2(self):
"""Test pagination page_number=2 and page_size=2"""
query, pagination = apply_filter(self.users, page_number=2, page_size=2)
filtered = query.all()
self.assertEqual(len(filtered), 2)
self.assertEqual(filtered[0].username, "manager2")
self.assertEqual(filtered[1].username, "user1")
self.assertEqual(pagination.num_pages, 3)
self.assertEqual(pagination.total_results, 5)
def test_pagination_3_2(self):
"""Test pagination page_number=3 and page_size=2"""
query, pagination = apply_filter(self.users, page_number=3, page_size=2)
filtered = query.all()
self.assertEqual(len(filtered), 1)
self.assertEqual(filtered[0].username, "user2")
self.assertEqual(pagination.num_pages, 3)
self.assertEqual(pagination.total_results, 5)
def test_pagination_4_2(self):
"""Test pagination page_number=4 and page_size=2"""
query, pagination = apply_filter(self.users, page_number=4, page_size=2)
filtered = query.all()
self.assertEqual(len(filtered), 0)
self.assertEqual(pagination.num_pages, 3)
self.assertEqual(pagination.total_results, 5)
def test_pagination_0_2(self):
"""Test pagination invalid page_number=0 and page_size=2"""
with self.assertRaises(InvalidPage):
apply_filter(self.users, page_number=0, page_size=2)
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "jorgemira/euler-py",
"score": 4
} |
#### File: jorgemira/euler-py/p005.py
```python
from fractions import gcd
RESULT = 232792560
def lcm(num1, num2):
'''Return least common multiple of two numbers'''
return num1 * num2 // gcd(num1, num2)
def lcmm(numbers):
'''Return least common multiple of a list of numbers'''
return reduce(lcm, numbers)
def solve():
'''Main function'''
max_num = 20
return lcmm(xrange(1, max_num))
if __name__ == '__main__':
print solve()
```
#### File: jorgemira/euler-py/p007.py
```python
from utils import is_prime
RESULT = 104743
def solve():
'''Main function'''
count = 0
num = 1
nth_num = 10001
while count < nth_num:
num += 1
if is_prime(num):
count += 1
return num
if __name__ == '__main__':
print solve()
```
#### File: jorgemira/euler-py/p009.py
```python
RESULT = 31875000
def solve():
'''Main function'''
total = 1000
for num_a in xrange(1, 251):
num_b = num_a + 1
while num_b < (total - num_a - num_b):
num_c = total - num_a - num_b
if num_a**2 + num_b**2 == num_c**2:
return num_a * num_b * num_c
num_b += 1
if __name__ == '__main__':
print solve()
```
#### File: jorgemira/euler-py/p010.py
```python
from utils import eratosthenes2
RESULT = 142913828922
def solve():
'''Main function'''
max_primes = 2000000
return sum(eratosthenes2(max_primes))
if __name__ == '__main__':
print solve()
```
#### File: jorgemira/euler-py/p011.py
```python
from utils import mul, slurp_file
RESULT = 70600674
def solve():
'''Main function'''
grid = []
res = 0
max_adj = 4
text = slurp_file('p011.txt')
for row in text.split('\n'):
grid.append([int(x) for x in row.split(' ')])
len_grid = len(grid)
# Horizontal
for i in xrange(len_grid):
for j in xrange(len_grid - max_adj):
res = max(res, mul([grid[i][j+x] for x in xrange(max_adj)]))
# Vertical
for i in xrange(len_grid):
for j in xrange(len_grid - max_adj):
res = max(res, mul([grid[j+x][i] for x in xrange(max_adj)]))
# Diag 1
for i in xrange(len_grid - max_adj):
for j in xrange(len_grid - max_adj):
res = max(res, mul([grid[i + x][j + x] for x in xrange(max_adj)]))
# Diag 2
for i in xrange(max_adj - 1, len_grid - max_adj):
for j in xrange(len_grid - max_adj):
res = max(res, mul([grid[i - x][j + x] for x in xrange(max_adj)]))
return res
if __name__ == '__main__':
print solve()
```
#### File: jorgemira/euler-py/p015.py
```python
from math import factorial
RESULT = 137846528820
def solve():
'''Main function'''
size = 20
result = factorial(size * 2) / factorial(size)**2
return result
if __name__ == '__main__':
print solve()
```
#### File: jorgemira/euler-py/p029.py
```python
RESULT = 9183
def solve():
'''Main function'''
vals = set()
limit = 100
for i in xrange(2, limit + 1):
for j in xrange(2, limit + 1):
vals.add(i**j)
return len(vals)
if __name__ == '__main__':
print solve()
```
#### File: jorgemira/euler-py/p034.py
```python
from math import factorial
FACTS = [factorial(n) for n in xrange(10)]
RESULT = 40730
def get_digits(num):
'''Generator that yields the digits of a number'''
while num:
yield num % 10
num /= 10
def is_curious(num):
'''Return True if a number is equal to the sum of the factorials of its
digits, false otherwise'''
return num == sum([FACTS[x] for x in get_digits(num)])
def get_limit():
'''Return the upper limit for the problem'''
count = 1
while 10 ** count < count * FACTS[9]:
count += 1
return FACTS[9] * count
def solve():
'''Main function'''
total = 0
limit = get_limit()
for num in xrange(10, limit):
if is_curious(num):
total += num
return total
if __name__ == "__main__":
print solve()
```
#### File: jorgemira/euler-py/p035.py
```python
from utils import is_prime
RESULT = 55
def get_rotations(num):
'''Returns a list with the rotations of a given number'''
result = set()
result.add(num)
str_num = str(num)
for _ in xrange(len(str(num)) - 1):
str_num = str_num[1:] + str_num[:1]
result.add(int(str_num))
return result
def all_odd(num):
'''Checks wether all the digits of a number are odd or not'''
if num == 2:
# 2 is even but prime
return True
else:
return all([int(x) % 2 for x in str(num)])
def solve():
'''Main function'''
max_nums = 1000000
checked = set()
result = 0
for num in xrange(max_nums):
rotations = get_rotations(num)
min_rot = min(rotations)
if not min_rot in checked:
checked.add(min_rot)
if all_odd(num) and all([is_prime(x) for x in rotations]):
result += len(rotations)
return result
if __name__ == '__main__':
print solve()
```
#### File: jorgemira/euler-py/test_problems.py
```python
import unittest
import sys
from glob import glob
from time import time
class TestProblems(unittest.TestCase):
'''Generic class for testing problems of Project Euler'''
@classmethod
def setUpClass(cls):
'''Set up the list for the test times'''
cls.times = []
@classmethod
def tearDownClass(cls):
'''Print test times'''
print
for test_time in cls.times:
print test_time
def setUp(self):
'''Initialize start_time variable'''
self.start_time = time()
def tearDown(self):
'''Calculate time spent in test and add it to the list'''
time_spent = time() - self.start_time
test_name = self.id().split('.')[-1]
self.times.append("%s: %.3fs" % (test_name, time_spent))
def generate_test(problem):
'''Dinamically generates a test case for a problem'''
def test_problem(self):
'''Check whether %s returns the expected result'''
prob = __import__(problem)
self.assertEqual(prob.solve(), prob.RESULT)
test_problem.__doc__ %= problem
return test_problem
def get_names(nums):
'''Return the corret module names from the numbers given'''
files = []
for num in nums:
for _ in xrange(3 - len(num)):
num = '0%s' % num
files.append('p%s' % num)
return files
def get_all():
'''Returns all the file names without the extension containing problems
in the current directory'''
files = []
for f_name in glob('p???.py'):
files.append(f_name[:4])
return files
def run_tests(tests):
'''Creates and runs test cases for the specified problems'''
problems = []
if not tests or 'all' in tests:
problems = get_all()
else:
problems = get_names(tests)
for problem in sorted(problems):
setattr(TestProblems, 'test_%s' % problem, generate_test(problem))
unittest.main()
if __name__ == '__main__':
sys.argv, args = sys.argv[:1], sys.argv[1:]
run_tests(args)
``` |
{
"source": "jorgemira/gps_tracker",
"score": 3
} |
#### File: client/gps_tracker/jobs.py
```python
from typing import Callable
import schedule
from . import constants as c
from .gpsd import GPSD
from .logging import get_logger
from .server import Server
logger = get_logger(__name__)
def schedule_job(job: Callable, seconds: int) -> None:
"""Clear a previously running job, if exists, and launch it again"""
schedule.clear(job.__name__)
job()
schedule.every(seconds).seconds.do(job).tag(job.__name__)
def post_location_job() -> None:
"""Post unsent location list and then post current location"""
if not Server.token:
Server.login()
try:
location = GPSD.get_location()
except Exception:
logger.exception("Cannot acquire location")
return
if Server.token:
Server.send_unsent_locations()
Server.post_location(location)
else:
Server.append_failed_location(location)
def panic_job() -> None:
"""Check for panic mode and reschedule post_location_job if necesary"""
new_panic = Server.is_panic_mode()
if Server.panic_mode and not new_panic:
logger.info("Disabling panic mode")
schedule_job(post_location_job, c.TIME_NO_PANIC)
elif not Server.panic_mode and new_panic:
logger.info("Enabling panic mode")
schedule_job(post_location_job, c.TIME_PANIC)
Server.panic_mode = new_panic
```
#### File: client/gps_tracker/server.py
```python
import json
import os
from json.decoder import JSONDecodeError
from typing import List, Union
import requests
from requests import RequestException
from . import constants as c
from .location import Location
from .logging import get_logger
logger = get_logger(__name__)
class Server:
token: Union[str, None] = None
panic: bool = False
panic_mode: bool = False
@classmethod
def login(cls) -> None:
"""Get and store authentication token from server"""
contents = {"username": c.USERNAME, "password": <PASSWORD>}
try:
response = requests.post(c.AUTH_URL, json=contents)
if response.status_code != 200:
raise ValueError("Log in response was not successful")
content = json.loads(response.content)
cls.token = content["token"]
except (RequestException, JSONDecodeError, ValueError):
logger.exception("Error logging into server")
cls.token = None
@classmethod
def post_location(cls, location: Location) -> None:
"""Upload a location into the server"""
headers = {"Authorization": f"Token {cls.token}"}
data = location.to_json()
try:
response = requests.post(c.LOCATIONS_URL, json=data, headers=headers)
if response.status_code != 201:
raise ValueError("Location posting response was not successful")
except (RequestException, ValueError):
logger.exception("Error posting location")
cls.append_failed_location(location)
@classmethod
def is_panic_mode(cls) -> bool:
headers = {"Authorization": f"Token {cls.token}"}
try:
response = requests.get(c.PANIC_URL, headers=headers)
content = json.loads(response.content)
return content["panic"]
except (RequestException, JSONDecodeError):
logger.exception("Cannot get panic mode")
return False
@staticmethod
def append_failed_location(location: Location) -> None:
"""Append location into PENDING_FILE"""
try:
with open(c.PENDING_FILE, "a") as file:
file.write(json.dumps(location.to_json()) + "\n")
except IOError:
logger.exception("Cannot append failed location")
@classmethod
def send_unsent_locations(cls) -> None:
"""Iterate through the list of locations that have not been sent and try to send them"""
unsent_locations = cls._get_unsent_locations()
for location in unsent_locations:
cls.post_location(location)
@staticmethod
def _get_unsent_locations() -> List[Location]:
"""Return a list of the locations that have not been sent"""
locations = []
if not os.path.exists(c.PENDING_FILE):
return locations
with open(c.PENDING_FILE) as file:
for line in file:
try:
locations.append(Location.from_json(line))
except JSONDecodeError:
logger.warn(f"Error decoding string: '{line}'")
os.remove(c.PENDING_FILE)
return locations
``` |
{
"source": "jorgeMorfinezM/binary_search_algorithms",
"score": 4
} |
#### File: jorgeMorfinezM/binary_search_algorithms/binary_for_search.py
```python
def binary_search(list_data, search_data):
left_index, right_index = 0, len(list_data) - 1
while left_index <= right_index:
pivot_index = (left_index + right_index) // 2
pivot_data = list_data[pivot_index]
if pivot_data == search_data:
return pivot_index
if search_data < pivot_data:
right_index = pivot_index - 1
else:
left_index = pivot_index + 1
# sale del ciclo, significa que no existe el valor buscado en el arreglo
return -1
"""
Tests
"""
# Test con arreglo numericos
data_list = [1, 2, 3, 10, 50, 80, 120, 150, 500, 1000]
print("Busqueda para la lista: ", data_list)
data_search = 500
index_search = binary_search(data_list, data_search)
print("El elemento {} esta en el indice {}".format(data_search, index_search))
# Test con arreglo de cadenas
data_list = ["Albino", "Bambu", "Becerro", "Contaminacion", "Cortina", "Trampolin"]
print("Busqueda para la lista: ", data_list)
data_search = "Cortina"
index_search = binary_search(data_list, data_search)
print("El elemento {} esta en el indice {}".format(data_search, index_search))
data_chars = ["{", "[", "]", "}", "]"]
print("Busqueda para la lista: ", data_chars)
data_search = '['
search = binary_search(data_chars, data_search)
print("El elemento {} esta en el indice {}".format(data_search, search))
``` |
{
"source": "jorgeMorfinezM/cargamos_api_test",
"score": 2
} |
#### File: jorgeMorfinezM/cargamos_api_test/app.py
```python
__author__ = "<NAME> (<EMAIL>)"
__copyright__ = "Copyright 2021, <NAME>"
__license__ = ""
__history__ = """ """
__version__ = "1.1.A19.1 ($Rev: 1 $)"
import json
import re
import threading
import time
import uuid
from flask import Flask, jsonify, render_template, json, request
from flask_jwt_extended import JWTManager
from auth_controller.api_authentication import *
from utilities.Utility import Utility as Util
from logger_controller.logger_control import *
from db_controller.database_backend import *
from model.StoreModel import StoreModel
from model.ProductModel import ProductModel
logger = configure_ws_logger()
app = Flask(__name__, static_url_path='/static')
app.config['JWT_SECRET_KEY'] = '<KEY>'
app.config['JWT_BLACKLIST_ENABLED'] = False
app.config['JWT_BLACKLIST_TOKEN_CHECKS'] = ['access', 'refresh']
app.config['JWT_ERROR_MESSAGE_KEY'] = 'message'
app.config['JWT_ACCESS_TOKEN_EXPIRES'] = 3600
app.config['PROPAGATE_EXCEPTIONS'] = True
jwt = JWTManager(app)
# Se inicializa la App con un hilo para evitar problemas de ejecución
# (Falta validacion para cuando ya exista hilo corriendo)
@app.before_first_request
def activate_job():
def run_job():
while True:
time.sleep(2)
thread = threading.Thread(target=run_job)
thread.start()
# Contiene la llamada al HTML que soporta la documentacion de la API,
# sus metodos, y endpoints con los modelos de datos I/O
@app.route('/')
def main():
return render_template('api_manage_ecommerce.html')
def get_stock_all_stores_by_product(product_sku):
stock_list = []
stock_in_stores = select_all_stock_in_product(product_sku)
stock_list = json.loads(stock_in_stores)
if stock_list:
logger.info('List Stock in all Stores by SKU: {}: {}: '.format(product_sku, stock_list))
return stock_list
@app.route('/api/ecommerce/stock/total/', methods=['GET', 'OPTIONS'])
@jwt_required
def endpoint_list_stock_all_stores():
headers = request.headers
auth = headers.get('Authorization')
if not auth and 'Bearer' not in auth:
return request_unauthorized()
else:
if request.method == 'OPTIONS':
headers = {
'Access-Control-Allow-Methods': 'POST, GET, OPTIONS',
'Access-Control-Max-Age': 1000,
'Access-Control-Allow-Headers': 'origin, x-csrftoken, content-type, accept',
}
return '', 200, headers
elif request.method == 'GET':
data = request.get_json(force=True)
product_sku = data['product_sku']
json_data = get_stock_all_stores_by_product(product_sku)
if not product_sku:
return request_conflict()
return json.dumps(json_data)
else:
return not_found()
def get_stock_by_store_by_product(product_sku, store_code):
stock_list = []
stock_in_store = select_stock_in_product(store_code, product_sku)
stock_list = json.loads(stock_in_store)
if stock_list:
logger.info('List Stock in one Store: {} by SKU: {}: {}: '.format(store_code, product_sku, stock_list))
return stock_list
@app.route('/api/ecommerce/stock/detail/', methods=['GET', 'OPTIONS'])
@jwt_required
def endpoint_detailed_stock_by_sku():
headers = request.headers
auth = headers.get('Authorization')
if not auth and 'Bearer' not in auth:
return request_unauthorized()
else:
if request.method == 'OPTIONS':
headers = {
'Access-Control-Allow-Methods': 'POST, GET, OPTIONS',
'Access-Control-Max-Age': 1000,
'Access-Control-Allow-Headers': 'origin, x-csrftoken, content-type, accept',
}
return '', 200, headers
elif request.method == 'GET':
data = request.get_json(force=True)
product_sku = data['product_sku']
store_code = data['store_code']
json_data = get_stock_by_store_by_product(product_sku, store_code)
if not product_sku:
return request_conflict()
return json.dumps(json_data)
else:
return not_found()
def add_stock_by_store_by_product(stock, product_sku, store_code):
stock_add = []
stock_in_product = update_product_store_stock(stock, product_sku, store_code)
stock_add = json.loads(stock_in_product)
if stock_add:
logger.info('Add Stock: {} in one Product: {} by Store: {}: {}: '.format(stock,
product_sku,
store_code,
stock_add))
return stock_add
@app.route('/api/ecommerce/stock/add/', methods=['POST', 'OPTIONS'])
@jwt_required
def endpoint_update_stock():
headers = request.headers
auth = headers.get('Authorization')
if not auth and 'Bearer' not in auth:
return request_unauthorized()
else:
if request.method == 'OPTIONS':
headers = {
'Access-Control-Allow-Methods': 'POST, GET, OPTIONS',
'Access-Control-Max-Age': 1000,
'Access-Control-Allow-Headers': 'origin, x-csrftoken, content-type, accept',
}
return '', 200, headers
elif request.method == 'GET':
data = request.get_json(force=True)
stock = data['stock']
product_sku = data['product_sku']
store_code = data['store_code']
json_data = add_stock_by_store_by_product(stock, product_sku, store_code)
if not product_sku and not store_code and not stock:
return request_conflict()
return json.dumps(json_data)
else:
return not_found()
def manage_store_requested_data(store_data):
store_data_manage = []
store_model_db = StoreModelDb()
try:
store_code = store_data.get("store_code")
store_name = store_data.get("store_name")
store_street_address = store_data("street_address")
store_external_number = store_data("external_number_address")
store_suburb_address = store_data.get("suburb_address")
store_city_address = store_data.get("city_address")
store_country_address = store_data.get("country_address")
store_zippostal_code = store_data.get("zip_postal_code_address")
store_min_inventory = store_data.get("minimum_inventory")
store_obj = StoreModel(store_code, store_name, store_external_number, store_street_address, store_suburb_address,
store_city_address, store_country_address, store_zippostal_code, store_min_inventory)
store_data = store_model_db.manage_store_data(store_obj)
store_data_manage = json.loads(store_data)
if len(store_data_manage) != 0:
logger.info('Response Store Data: %s', str(store_data_manage))
return store_data_manage
except SQLAlchemyError as error:
raise mvc_exc.ConnectionError(
'Can\'t connect to database, verify data connection to "{}".\nOriginal Exception raised: {}'.format(
store_model_db.__tablename__, error
)
)
def get_stores_by_code(store_code):
store_list_data = {}
store_get_list_data = select_by_store_code(store_code)
store_list_data = json.loads(store_get_list_data)
if store_list_data:
logger.info('List Stores data by code: {}: {}: '.format(store_code, store_list_data))
return store_list_data
def update_store_data_endpoint(store_dict_input):
store_updated = dict()
store_updated = update_store_data(store_dict_input)
return store_updated
@app.route('/api/ecommerce/manage/store/', methods=['POST', 'GET', 'PUT', 'DELETE', 'OPTIONS'])
@jwt_required
def endpoint_processing_store_data():
headers = request.headers
auth = headers.get('Authorization')
if not auth and 'Bearer' not in auth:
return request_unauthorized()
else:
if request.method == 'OPTIONS':
headers = {
'Access-Control-Allow-Methods': 'POST, GET, PUT, DELETE, OPTIONS',
'Access-Control-Max-Age': 1000,
'Access-Control-Allow-Headers': 'origin, x-csrftoken, content-type, accept',
}
return '', 200, headers
elif request.method == 'POST':
data = request.get_json(force=True)
if not data or str(data) is None:
return request_conflict()
logger.info('Data Json Store to Manage on DB: %s', str(data))
json_store_response = manage_store_requested_data(data)
return json.dumps(json_store_response)
elif request.method == 'GET':
data = request.get_json(force=True)
store_code = data['store_code']
json_data = []
json_data = get_stores_by_code(store_code)
logger.info('Stores List data by Code: %s', str(json_data))
if not store_code:
return request_conflict()
return json.dumps(json_data)
elif request.method == 'PUT':
data_store = request.get_json(force=True)
store_code = data_store.get("store_code")
store_name = data_store.get("store_name")
if not data_store:
return request_conflict()
json_data = dict()
json_data = update_store_data_endpoint(data_store)
logger.info('Data to update Store: %s',
"Store code: {0}, Store name: {1}".format(store_code, store_name))
logger.info('Store updated Info: %s', str(json_data))
return json_data
elif request.method == 'DELETE':
data = request.get_json(force=True)
store_code = data['store_code']
logger.info('Store to Delete: %s', 'Store Code: {}'.format(store_code))
json_data = []
if not store_code and not Util.validate_store_code_syntax(store_code):
return request_conflict()
json_data = delete_store_data(store_code)
logger.info('Store deleted: %s', json_data)
return json.dumps(json_data)
else:
return not_found()
def manage_product_requested_data(product_data):
product_data_manage = []
product_model_db = ProductModelDb()
try:
product_sku = product_data.get("product_sku")
product_unspc = product_data.get("product_unspc")
product_brand = product_data.get("product_brand")
category_id = product_data.get("category_id")
parent_category_id = product_data.get("parent_category_id")
product_uom = product_data.get("unit_of_measure")
product_stock = product_data.get("product_stock")
store_code = product_data.get("product_store_code")
product_name = product_data.get("product_name")
product_title = product_data.get("product_title")
product_long_description = product_data.get("product_long_description")
product_photo = product_data.get("product_photo")
product_price = product_data.get("product_price")
product_tax = product_data.get("product_tax")
product_currency = product_data.get("product_currency")
product_status = product_data.get("product_status")
product_published = product_data.get("product_published")
manage_stock = product_data.get("product_manage_stock")
product_length = product_data.get("product_length")
product_width = product_data.get("product_width")
product_height = product_data.get("product_height")
product_weight = product_data.get("product_weight")
product_obj = ProductModel(product_sku, product_unspc, product_brand, category_id, parent_category_id,
product_uom, product_stock, store_code, product_name, product_title,
product_long_description, product_photo, product_price, product_tax, product_currency,
product_status, product_published, manage_stock, product_length, product_width,
product_height, product_weight)
data_product = product_model_db.manage_product_data(product_obj)
product_data_manage = json.loads(data_product)
if len(product_data_manage) != 0:
logger.info('Response Product Data: %s', str(product_data_manage))
return product_data_manage
except SQLAlchemyError as error:
raise mvc_exc.ConnectionError(
'Can\'t connect to database, verify data connection to "{}".\nOriginal Exception raised: {}'.format(
product_model_db.__tablename__, error
)
)
def get_products_by_sku(product_sku):
product_list_data = {}
product_get_list_data = select_by_product_sku(product_sku)
product_list_data = json.loads(product_get_list_data)
if product_list_data:
logger.info('List Product data by SKU: {}: {}: '.format(product_sku, product_list_data))
return product_list_data
@app.route('/api/ecommerce/manage/product/', methods=['POST', 'GET', 'PUT', 'DELETE', 'OPTIONS'])
@jwt_required
def endpoint_processing_product_data():
headers = request.headers
auth = headers.get('Authorization')
if not auth and 'Bearer' not in auth:
return request_unauthorized()
else:
if request.method == 'OPTIONS':
headers = {
'Access-Control-Allow-Methods': 'POST, GET, PUT, DELETE, OPTIONS',
'Access-Control-Max-Age': 1000,
'Access-Control-Allow-Headers': 'origin, x-csrftoken, content-type, accept',
}
return '', 200, headers
elif request.method == 'POST':
data = request.get_json(force=True)
if not data or str(data) is None:
return request_conflict()
logger.info('Data Json Store to Manage on DB: %s', str(data))
json_store_response = manage_product_requested_data(data)
return json.dumps(json_store_response)
elif request.method == 'GET':
data = request.get_json(force=True)
product_sku = data['product_sku']
json_data = []
json_data = get_products_by_sku(product_sku)
logger.info('Product List data by SKU: %s', str(json_data))
if not product_sku:
return request_conflict()
return json.dumps(json_data)
elif request.method == 'PUT':
data_store = request.get_json(force=True)
product_sku = data_store.get('product_sku')
product_stock = data_store.get('product_stock')
product_store_code = data_store.get('product_store_code')
product_name = data_store.get('product_name')
if not data_store:
return request_conflict()
json_data = dict()
json_data = update_product_data(data_store)
logger.info('Data to update Product: %s',
"Product SKU: {0}, "
"Product Name: {1}, "
"Product Store Code: {2}, "
"Product Stock: {3}".format(product_sku, product_name, product_store_code, product_stock))
logger.info('Product updated Info: %s', str(json_data))
return json_data
elif request.method == 'DELETE':
data = request.get_json(force=True)
store_code = data['store_code']
product_sku = data['product_sku']
logger.info('Store to Delete: %s', 'Store Code: {}'.format(store_code))
json_data = []
if not store_code and not Util.validate_store_code_syntax(store_code):
return request_conflict()
json_data = delete_product_data(product_sku, store_code)
logger.info('Product deleted: %s', json_data)
return json.dumps(json_data)
else:
return not_found()
@app.route('/api/ecommerce/authorization/', methods=['POST', 'OPTIONS'])
def get_authentication():
json_token = {}
if request.method == 'OPTIONS':
headers = {
'Access-Control-Allow-Methods': 'POST, GET, OPTIONS',
'Access-Control-Max-Age': 1000,
'Access-Control-Allow-Headers': 'origin, x-csrftoken, content-type, accept',
}
return '', 200, headers
elif request.method == 'POST':
data = request.get_json(force=True)
user_name = data['username']
password = data['password']
rfc = data['rfc_client']
regex_email = r"^[(a-z0-9\_\-\.)]+@[(a-z0-9\_\-\.)]+\.[(a-z)]{2,15}$"
regex_passwd = r"^[(A-Za-z0-9\_\-\.\$\#\&\*)(A-Za-z0-9\_\-\.\$\#\&\*)]+"
regex_rfc = r"^([A-ZÑ&]{3,4})?(?:-?)?(\d{2}(?:0[1-9]|1[0-2])(?:0[1-9]|[12]\d|3[01]))?(?:-?)?([A-Z\d]{2})([A\d])$"
match_email = re.match(regex_email, user_name, re.M | re.I)
match_passwd = re.match(regex_passwd, password, re.M | re.I)
match_rfc = re.match(regex_rfc, rfc, re.M | re.I)
if match_email and match_rfc and match_passwd:
password = <PASSWORD>
json_token = user_registration(user_name, password)
json_token = json.dumps(json_token)
return json_token
else:
return request_conflict()
else:
return not_found()
@app.errorhandler(404)
def not_found(error=None):
message = {
'error_code': 404,
'error_message': 'Page Not Found: ' + request.url,
}
resp = jsonify(message)
resp.status_code = 404
return resp
@app.errorhandler(500)
def server_error(error=None):
message = {
'error_code': 500,
'error_message': 'Server Error: ' + request.url,
}
resp = jsonify(message)
resp.status_code = 500
return resp
@app.errorhandler(401)
def request_unauthorized(error=None):
message = {
'error_code': 401,
'error_message': 'Request Unauthorized: ' + request.url,
}
resp = jsonify(message)
resp.status_code = 401
return resp
@app.errorhandler(409)
def request_conflict(error=None):
message = {
"error_code": 409,
"error_message": 'Request data conflict or Authentication data conflict, please verify it. ' + request.url,
}
resp = jsonify(message)
resp.status_code = 409
return resp
if __name__ == "__main__":
app.debug = True
app.run()
``` |
{
"source": "jorgeMorfinezM/product_api_test",
"score": 3
} |
#### File: apps/api_authentication/UsersAuthModel.py
```python
__author__ = "<NAME> (<EMAIL>)"
__copyright__ = "Copyright 2021"
__license__ = ""
__history__ = """ User's Model """
__version__ = "1.21.H28.1 ($Rev: 1 $)"
import json
import logging
from datetime import datetime
from sqlalchemy_filters import apply_filters
from sqlalchemy import Column, Boolean, Integer, String, Date, Time, Sequence
from db_controller.database_backend import *
from db_controller import mvc_exceptions as mvc_exc
cfg_db = get_config_settings_db()
USERS_ID_SEQ = Sequence('users_seq') # define sequence explicitly
class UsersAuthModel(Base):
r"""
Class to instance User data to authenticate the API.
Transactions:
- Create: Add Product and Users data to database if not exists.
- Update: Update Product and Users data on database if exists.
- Delete: Inactivaate Product and Users data on database if exists.
"""
__tablename__ = cfg_db.user_auth_table
user_id = Column('user_id', Integer, USERS_ID_SEQ, primary_key=True, server_default=USERS_ID_SEQ.next_value())
user_name = Column('user_name', String, nullable=False)
password = Column('password_hash', String, nullable=False)
is_active = Column('is_active', Boolean, nullable=False)
is_staff = Column('is_staff', Boolean, nullable=False)
is_superuser = Column('is_superuser', Boolean, nullable=False)
creation_date = Column('creation_date', Date, nullable=False)
last_update_date = Column('last_update_date', Date, nullable=True)
def __init__(self, data_user):
self.user_name = data_user.get('username')
self.password = data_user.get('password')
self.is_active = data_user.get('is_active')
self.is_staff = data_user.get('is_staff')
self.is_superuser = data_user.get('is_superuser')
# self.email = data_user.get('creation_date')
def manage_user_authentication(self, session, data):
try:
user_verification = self.check_if_row_exists(session, data)
# insert validation
if user_verification:
# update method
self.user_update_password(session, data)
else:
# insert
self.insert_data(session, data)
except SQLAlchemyError as e:
logger.exception('An exception was occurred while execute transactions: %s', e)
raise mvc_exc.ItemNotStored(
'Can\'t insert user_id: "{}" with user_name: {} because it\'s not stored in "{}"'.format(
data.get('username'), data.get('is_active'), UsersAuthModel.__tablename__
)
)
# Transaction to looking for a User on db to authenticate
def check_if_row_exists(self, session, data):
r"""
Looking for a ciudad by name on the database to valid authentication.
:param session: The session of the database.
:param data: The data of User model to valid authentication on the API.
:return row_exists: Statement data row to valid if the ciudad name exists to authenticate the API.
"""
row_exists = None
user_id = 0
try:
user_row = self.get_user_by_id(session, data)
if user_row is not None:
user_id = user_row.user_id
else:
user_id = 0
logger.info('User Row object in DB: %s', str(user_row))
row_exists = session.query(UsersAuthModel).filter(UsersAuthModel.user_id == user_id). \
filter(UsersAuthModel.is_active == "true").scalar()
logger.info('Row to data: {}, Exists: %s'.format(data), str(row_exists))
except SQLAlchemyError as exc:
row_exists = None
logger.exception('An exception was occurred while execute transactions: %s', str(str(exc.args) + ':' +
str(exc.code)))
raise mvc_exc.IntegrityError(
'Row not stored in "{}". IntegrityError: {}'.format(data.get('username'),
str(str(exc.args) + ':' + str(exc.code)))
)
finally:
session.close()
return row_exists
def insert_data(self, session, data):
r"""
Looking for a User on the database to valid authentication.
:param session: The session of the database.
:param data: The data of User model to valid authentication on the API.
"""
endpoint_response = None
if not self.check_if_row_exists(session, data):
try:
self.creation_date = get_current_date(session)
data['creation_date'] = self.creation_date
new_row = UsersAuthModel(data)
logger.info('New Row User name: %s', str(new_row.user_name))
session.add(new_row)
user_row = self.get_user_by_id(session, data)
logger.info('User ID Inserted: %s', str(user_row.user_id))
session.flush()
data['user_id'] = user_row.user_id
# check insert correct
row_inserted = self.get_one_user(session, data)
logger.info('Data User inserted: %s, Original Data: {}'.format(data), str(row_inserted))
if row_inserted:
logger.info('User inserted is: %s', 'Username: {}, '
'IsActive: {} '
'CreationDate: {}'.format(row_inserted.user_name,
row_inserted.is_active,
row_inserted.creation_date))
endpoint_response = json.dumps({
"Username": row_inserted.user_name,
"Password": <PASSWORD>,
"IsActive": row_inserted.is_active,
"IsStaff": row_inserted.is_staff,
"IsSuperUser": row_inserted.is_superuser,
"CreationDate": row_inserted.creation_date
})
except SQLAlchemyError as exc:
endpoint_response = None
session.rollback()
logger.exception('An exception was occurred while execute transactions: %s', str(str(exc.args) + ':' +
str(exc.code)))
raise mvc_exc.IntegrityError(
'Row not stored in "{}". IntegrityError: {}'.format(data.get('username'),
str(str(exc.args) + ':' + str(exc.code)))
)
finally:
session.close()
return endpoint_response
# Transaction to update user's data on db to authenticate - PUT
def user_update(self, session, data):
r"""
Transaction to update data of a user to authenticate on the API correctly.
:param session: The ciudad name to update password hashed.
:param data: The password hashed to authenticate on the API.
"""
endpoint_response = None
if self.check_if_row_exists(session, data):
try:
user_row = self.get_user_by_id(session, data)
if user_row is not None:
user_id = user_row.user_id
else:
user_id = 0
self.last_update_date = get_current_date(session)
data['last_update_date'] = self.last_update_date
# update row to database
session.query(UsersAuthModel).filter(UsersAuthModel.user_id == user_id). \
update({"user_name": data.get('username'),
"password": data.get('password'),
"is_active": data.get('is_active'),
"is_staff": data.get('is_staff'),
"is_superuser": data.get('is_superuser'),
"last_update_date": data.get('last_update_date')},
synchronize_session='fetch')
session.flush()
# check update correct
row_updated = self.get_one_user(session, data)
logger.info('Data Updated: %s', str(row_updated))
if row_updated:
logger.info('Data User updated')
endpoint_response = json.dumps({
"Username": row_updated.user_name,
"Password": <PASSWORD>,
"IsActive": row_updated.is_active,
"IsStaff": row_updated.is_staff,
"IsSuperUser": row_updated.is_superuser,
"CreationDate": row_updated.creation_date,
"UpdatedDate": row_updated.last_update_date
})
except SQLAlchemyError as exc:
session.rollback()
endpoint_response = None
logger.exception('An exception was occurred while execute transactions: %s',
str(str(exc.args) + ':' +
str(exc.code)))
raise mvc_exc.IntegrityError(
'Row not stored in "{}". IntegrityError: {}'.format(data.get('username'),
str(str(exc.args) + ':' + str(exc.code)))
)
finally:
session.close()
return endpoint_response
# Transaction to update user's password hashed on db to authenticate -
# On manage function to POST User
def user_update_password(self, session, data):
r"""
Transaction to update data of a user to authenticate on the API correctly.
:param session: The ciudad name to update password hashed.
:param data: The password hashed to authenticate on the API.
"""
endpoint_response = None
if self.check_if_row_exists(session, data):
try:
user_row = self.get_user_by_id(session, data)
if user_row is not None:
user_id = user_row.user_id
else:
user_id = 0
self.last_update_date = get_current_date(session)
data['last_update_date'] = self.last_update_date
# update row to database
session.query(UsersAuthModel).filter(UsersAuthModel.user_id == user_id). \
update({"password": data.get('password'),
"last_update_date": data.get('last_update_date')},
synchronize_session='fetch')
session.flush()
# check update correct
row_updated = self.get_one_user(session, data)
logger.info('Data Updated: %s', str(row_updated))
if row_updated:
logger.info('Data User updated')
endpoint_response = json.dumps({
"Username": row_updated.user_name,
"Password": <PASSWORD>,
"IsActive": row_updated.is_active,
"IsStaff": row_updated.is_staff,
"IsSuperUser": row_updated.is_superuser,
"CreationDate": row_updated.creation_date,
"UpdatedDate": row_updated.last_update_date
})
except SQLAlchemyError as exc:
session.rollback()
endpoint_response = None
logger.exception('An exception was occurred while execute transactions: %s',
str(str(exc.args) + ':' +
str(exc.code)))
raise mvc_exc.IntegrityError(
'Row not stored in "{}". IntegrityError: {}'.format(data.get('username'),
str(str(exc.args) + ':' + str(exc.code)))
)
finally:
session.close()
return endpoint_response
# Transaction to delete user's data on db to authenticate - DELETE
def user_inactivate(self, session, data):
r"""
Transaction to inactivate user to authenticate on the API correctly.
:param session: The database object session connect.
:param data: The data dictionary to inactivate on the API.
"""
endpoint_response = None
if self.check_if_row_exists(session, data):
try:
user_row = self.get_user_by_id(session, data)
if user_row is not None:
user_id = user_row.user_id
else:
user_id = 0
self.last_update_date = get_current_date(session)
data['last_update_date'] = self.last_update_date
# update row to database
session.query(UsersAuthModel).filter(UsersAuthModel.user_id == user_id). \
filter(UsersAuthModel.is_active == "true"). \
update({"is_active": "false",
"last_update_date": data.get('last_update_date')},
synchronize_session='fetch')
session.flush()
# check update correct
row_updated = self.get_one_user(session, data)
logger.info('Data Deleted: %s', str(row_updated))
if row_updated:
logger.info('Data User updated')
endpoint_response = json.dumps({
"Username": row_updated.user_name,
"Password": <PASSWORD>,
"IsActive": row_updated.is_active,
"IsStaff": row_updated.is_staff,
"IsSuperUser": row_updated.is_superuser,
"CreationDate": row_updated.creation_date,
"UpdatedDate": row_updated.last_update_date
})
except SQLAlchemyError as exc:
session.rollback()
endpoint_response = None
logger.exception('An exception was occurred while execute transactions: %s',
str(str(exc.args) + ':' +
str(exc.code)))
raise mvc_exc.IntegrityError(
'Row not stored in "{}". IntegrityError: {}'.format(data.get('username'),
str(str(exc.args) + ':' + str(exc.code)))
)
finally:
session.close()
return endpoint_response
@staticmethod
def get_user_by_id(session, data):
row = None
try:
row_exists = session.query(UsersAuthModel).filter(UsersAuthModel.user_name == data.get('username')). \
filter(UsersAuthModel.is_active == data.get('is_active')). \
filter(UsersAuthModel.is_superuser == data.get('is_superuser')).scalar()
if row_exists:
row = session.query(UsersAuthModel).filter(UsersAuthModel.user_name == data.get('username')).\
filter(UsersAuthModel.is_active == data.get('is_active')).\
filter(UsersAuthModel.is_superuser == data.get('is_superuser')).one()
logger.info('Data User on Db: %s',
'Username: {}, Is_Active: {}'.format(row.user_name, row.is_activee))
except SQLAlchemyError as exc:
logger.exception('An exception was occurred while execute transactions: %s', str(str(exc.args) + ':' +
str(exc.code)))
raise mvc_exc.ItemNotStored(
'Can\'t read data: "{}" because it\'s not stored in "{}". Row empty: {}'.format(
data.get('username'), UsersAuthModel.__tablename__, str(str(exc.args) + ':' + str(exc.code))
)
)
finally:
session.close()
return row
def get_one_user(self, session, data):
row_user = None
try:
user_row = self.get_user_by_id(session, data)
if session.query(UsersAuthModel).filter(UsersAuthModel.user_id == user_row.user_id).scalar():
row_user = session.query(UsersAuthModel).filter(UsersAuthModel.user_id == user_row.user_id).one()
if row_user:
logger.info('Data Bank on Db: %s',
'Bank Name: {}, Bank usaToken: {}, Bank Status: {}'.format(row_user.nombre_banco,
row_user.usa_token,
row_user.estatus_banco))
except SQLAlchemyError as exc:
logger.exception('An exception was occurred while execute transactions: %s', str(str(exc.args) + ':' +
str(exc.code)))
raise mvc_exc.ItemNotStored(
'Can\'t read data: "{}" because it\'s not stored in "{}". Row empty: {}'.format(
data.get('user_id'), UsersAuthModel.__tablename__, str(str(exc.args) + ':' + str(exc.code))
)
)
finally:
session.close()
return row_user
@staticmethod
def get_all_users(session, data):
r"""
Function to retrieve all users registered.
:param session: The database object session connect.
:param data: The data dictionary to inactivate on the API.
"""
all_users = None
user_data = []
page = 1
per_page = 10
email_recipients = []
all_users = session.query(UsersAuthModel).all()
if 'offset' in data.keys() and 'limit' in data.keys():
page = data.get('offset')
per_page = data.get('limit')
all_users = session.query(UsersAuthModel).paginate(page=page, per_page=per_page, error_out=False).all()
for user_rs in all_users:
id_user = user_rs.user_id
username = user_rs.user_name
password = <PASSWORD>
is_active = user_rs.is_active
is_staff = user_rs.is_staff
is_superuser = user_rs.is_superuser
creation_date = user_rs.creation_date
last_update_date = user_rs.last_update_date
user_data += [{
"AuthUser": {
"Id": id_user,
"Username": username,
"Password": password,
"IsActive": is_active,
"IsStaff": is_staff,
"IsSuperuser": is_superuser,
"CreationDate": creation_date,
"LastUpdateDate": last_update_date
}
}]
if bool(is_active) and bool(is_superuser):
email_recipients.append(username)
cfg_app.email_recipients = email_recipients
return json.dumps(user_data)
@staticmethod
def get_users_recipients(session):
r"""
Function to retrieve all usernames registered to generate recipients.
:param session: The database object session connect.
"""
all_users = None
email_recipients = []
all_users = session.query(UsersAuthModel).all()
for user_rs in all_users:
username = user_rs.user_name
if bool(is_active) and bool(is_superuser):
email_recipients.append(username)
return email_recipients
def __repr__(self):
return "<AuthUserModel(id_user='%s', username='%s', password='%s', email='%s', " \
"first_name='%s', last_name='%s', is_active='%s', is_staff='%s', is_superuser='%s', " \
"date_joined='%s')>" % (self.user_id, self.username, self.password, self.email, self.first_name,
self.last_name, self.is_active, self.is_staff, self.is_superuser,
self.date_joined)
```
#### File: apps/category/CategoryModel.py
```python
__author__ = "<NAME> (<EMAIL>)"
__copyright__ = "Copyright 2021"
__license__ = ""
__history__ = """ """
__version__ = "1.21.H28.1 ($Rev: 1 $)"
import json
import logging
from sqlalchemy_filters import apply_filters
from sqlalchemy import Column, Numeric, Integer, String, Date, Time, Sequence
from db_controller.database_backend import *
from db_controller import mvc_exceptions as mvc_exc
cfg_db = get_config_settings_db()
CATEGORY_ID_SEQ = Sequence('category_seq') # define sequence explicitly
class CategoryModel(Base):
r"""
Class to instance the data of CategoryModel on the database.
Transactions:
- Create: Add data to the database if not exists.
- Update: Update data to the database if exists.
- Delete: Inactivate data to the database if exists.
- Select: Get data from databases.
"""
__tablename__ = 'category'
category_id = Column('id_category', Integer, CATEGORY_ID_SEQ,
primary_key=True, server_default=CATEGORY_ID_SEQ.next_value())
category_name = Column('name_category', String, nullable=False, index=True)
category_short_description = Column('short_desc_category', String, nullable=False)
category_status = Column('status_category', String, nullable=False, index=True)
creation_date = Column('creation_date', Date, nullable=False)
last_update_date = Column('last_update_date', Date, nullable=True)
def __init__(self, data_category):
self.category_name = data_category.get('nombre_categoria')
self.category_short_description = data_category.get('descrippcion_corta_categoria')
self.category_status = data_category.get('estatus_categoria')
def check_if_row_exists(self, session, data):
"""
Validate if row exists on database from dictionary data
:param session: Session database object
:param data: Dictionary with data to make validation function
:return: row_exists: Object with boolean response from db
"""
row_exists = None
id_category = 0
try:
# for example to check if the insert on db is correct
row_category = self.get_category_id(session, data)
if row_category is not None:
id_category = row_category.category_id
else:
id_category = 0
logger.info('Category Row object in DB: %s', str(id_category))
row_exists = session.query(CategoryModel).filter(CategoryModel.category_id == id_category). \
filter(CategoryModel.category_status == 'activo').scalar()
logger.info('Row to data: {}, Exists: %s'.format(data), str(row_exists))
except SQLAlchemyError as exc:
row_exists = None
logger.exception('An exception was occurred while execute transactions: %s', str(str(exc.args) + ':' +
str(exc.code)))
raise mvc_exc.IntegrityError(
'Row not stored in "{}". IntegrityError: {}'.format(data.get('nombre_categoria'),
str(str(exc.args) + ':' + str(exc.code)))
)
finally:
session.close()
return row_exists
def insert_data(self, session, data):
"""
Function to insert new row on database
:param session: Session database object
:param data: Dictionary to insert new the data containing on the db
:return: endpoint_response
"""
endpoint_response = None
if not self.check_if_row_exists(session, data):
try:
self.creation_date = get_current_date(session)
data['creation_date'] = self.creation_date
new_row = CategoryModel(data)
logger.info('New Row Category: %s', str(new_row.category_name))
session.add(new_row)
row_category = self.get_category_id(session, data)
logger.info('Category ID Inserted: %s', str(row_category.category_id))
session.flush()
data['id_categoria'] = row_category.category_id
# check insert correct
row_inserted = self.get_one_category(session, data)
logger.info('Data Category inserted: %s, Original Data: {}'.format(data), str(row_inserted))
if row_inserted:
endpoint_response = json.dumps({
"id_categoria": str(row_category.category_id),
"nombre_categoria": row_category.category_name,
"descripcion_categoria": row_category.category_short_description,
"estatus_categoria": row_category.category_status
})
except SQLAlchemyError as exc:
endpoint_response = None
session.rollback()
logger.exception('An exception was occurred while execute transactions: %s', str(str(exc.args) + ':' +
str(exc.code)))
raise mvc_exc.IntegrityError(
'Row not stored in "{}". IntegrityError: {}'.format(data.get('nombre_categoria'),
str(str(exc.args) + ':' + str(exc.code)))
)
finally:
session.close()
return endpoint_response
# Transaction to update Category's data on db to authenticate - PUT
def update_data(self, session, data):
r"""
Transaction to update data of a user to authenticate on the API correctly.
:param session: The ciudad name to update password hashed.
:param data: The password hashed to authenticate on the API.
"""
endpoint_response = None
if self.check_if_row_exists(session, data):
try:
row_category = self.get_category_id(session, data)
if row_category is not None:
id_category = row_category.category_id
else:
id_category = 0
self.last_update_date = get_current_date(session)
data['last_update_date'] = self.last_update_date
# update row to database
session.query(CategoryModel).filter(CategoryModel.category_id == id_category). \
update({"category_name": data.get('nombre_categoria'),
"category_short_description": data.get('descrippcion_corta_categoria'),
"category_status": data.get('estatus_categoria'),
"last_update_date": data.get('last_update_date')},
synchronize_session='fetch')
session.flush()
# check update correct
row_updated = self.get_one_category(session, data)
logger.info('Data Updated: %s', str(row_updated))
if row_updated:
logger.info('Data Category updated')
endpoint_response = json.dumps({
"id_categoria": str(row_updated.category_id),
"nombre_categoria": row_updated.category_name,
"descripcion_categoria": row_updated.category_short_description,
"estatus_categoria": row_updated.category_status,
"creation_date": row_updated.creation_date,
"last_update_date": row_updated.last_update_date
})
except SQLAlchemyError as exc:
session.rollback()
endpoint_response = None
logger.exception('An exception was occurred while execute transactions: %s',
str(str(exc.args) + ':' +
str(exc.code)))
raise mvc_exc.IntegrityError(
'Row not stored in "{}". IntegrityError: {}'.format(data.get('nombre_categoria'),
str(str(exc.args) + ':' + str(exc.code)))
)
finally:
session.close()
return endpoint_response
# Transaction to delete category's data on db to authenticate - DELETE
def category_inactivate(self, session, data):
r"""
Transaction to inactivate category data on the API correctly.
:param session: The database object session connect.
:param data: The data dictionary to inactivate on the API.
"""
endpoint_response = None
if self.check_if_row_exists(session, data):
try:
row_category = self.get_category_id(session, data)
if row_category is not None:
id_category = row_category.category_id
else:
id_category = 0
self.last_update_date = get_current_date(session)
data['last_update_date'] = self.last_update_date
# update row to database
session.query(CategoryModel).filter(CategoryModel.category_id == id_category). \
filter(CategoryModel.category_status == "activo"). \
update({"category_status": "inactivo",
"last_update_date": data.get('last_update_date')},
synchronize_session='fetch')
session.flush()
# check delete correct
row_deleted = self.get_one_category(session, data)
logger.info('Data Deleted: %s', str(row_deleted))
if row_deleted:
logger.info('Data Category inactivated')
endpoint_response = json.dumps({
"id_categoria": str(row_deleted.category_id),
"nombre_categoria": row_deleted.category_name,
"descripcion_categoria": row_deleted.category_short_description,
"estatus_categoria": row_deleted.category_status,
"creation_date": row_deleted.creation_date,
"last_update_date": row_deleted.last_update_date
})
except SQLAlchemyError as exc:
session.rollback()
endpoint_response = None
logger.exception('An exception was occurred while execute transactions: %s',
str(str(exc.args) + ':' +
str(exc.code)))
raise mvc_exc.IntegrityError(
'Row not stored in "{}". IntegrityError: {}'.format(data.get('nombre_categoria'),
str(str(exc.args) + ':' + str(exc.code)))
)
finally:
session.close()
return endpoint_response
@staticmethod
def get_category_id(session, data):
"""
Get Category object row registered on database to get the ID
:param session: Database session object
:param data: Dictionary with data to get row
:return: row_category: The row on database registered
"""
row_category = None
try:
row_exists = session.query(CategoryModel).\
filter(CategoryModel.category_name == data.get('nombre_categoria')).scalar()
logger.info('Row Data Category Exists on DB: %s', str(row_exists))
if row_exists:
row_category = session.query(CategoryModel). \
filter(CategoryModel.category_name == data.get('nombre_categoria')). \
filter(CategoryModel.category_status == data.get('estatus_categoria')).one()
logger.info('Row ID Category data from database object: {}'.format(str(row_category)))
except SQLAlchemyError as exc:
logger.exception('An exception was occurred while execute transactions: %s', str(str(exc.args) + ':' +
str(exc.code)))
raise mvc_exc.ItemNotStored(
'Can\'t read data: "{}" because it\'s not stored in "{}". Row empty: {}'.format(
data.get('nombre_categoria'), CategoryModel.__tablename__, str(str(exc.args) + ':' +
str(exc.code))
)
)
finally:
session.close()
return row_category
@staticmethod
def get_one_category(session, data):
row = None
try:
row = session.query(CategoryModel).filter(CategoryModel.category_id == data.get('id_categoria')).one()
if row:
logger.info('Data Categoria on Db: %s',
'Nombre: {}, Estatus: {}'.format(row.category_name,
row.category_status))
except SQLAlchemyError as exc:
row = None
logger.exception('An exception was occurred while execute transactions: %s', str(str(exc.args) + ':' +
str(exc.code)))
raise mvc_exc.ItemNotStored(
'Can\'t read data: "{}" because it\'s not stored in "{}". Row empty: {}'.format(
data.get('nombre_categoria'), CategoryModel.__tablename__, str(str(exc.args) + ':' + str(exc.code))
)
)
finally:
session.close()
return row
@staticmethod
def get_all_categories(session, data):
"""
Get all Categories objects data registered on database.
:param data: Dictionary contains relevant data to filter Query on resultSet DB
:param session: Database session
:return: json.dumps dict
"""
all_categories = None
category_data = []
page = None
per_page = None
all_categories = session.query(CategoryModel).all()
if 'offset' in data.keys() and 'limit' in data.keys():
page = data.get('offset')
per_page = data('limit')
all_categories = session.query(CategoryModel).paginate(page=page, per_page=per_page, error_out=False).all()
for category in all_categories:
category_id = category.category_id
category_name = category.category_name
category_short_desc = category.category_short_description
category_status = category.category_status
category_data += [{
"Categoria": {
"id_categoria": str(category_id),
"nombre_categoria": category_name,
"descripcion_categoria": category_short_desc,
"estatus_categoria": category_status,
"creation_date": category.creation_date,
"last_update_date": category.last_update_date
}
}]
return json.dumps(category_data)
@staticmethod
def get_category_by_filters(session, data, filter_spec):
"""
Get list of States filtered by options by user request
:param session: Database session
:param data: Dictionary contains relevant data to filter Query on resultSet DB
:param filter_spec: List of options defined by user request
:return: json.dumps dict
"""
page = 1
per_page = 10
query_result = None
category_data = []
if 'offset' in data.keys() and 'limit' in data.keys():
page = data.get('offset')
per_page = data('limit')
query_result = session.query(CategoryModel).all()
query = session.query(CategoryModel)
filtered_query = apply_filters(query, filter_spec)
if filter_spec is not None and filtered_query is not None:
query_result = filtered_query.paginate(page=page, per_page=per_page, error_out=False).all()
logger.info('Query filtered resultSet: %s', str(query_result))
for category in query_result:
category_id = category.category_id
category_name = category.category_name
category_short_desc = category.category_short_description
category_status = category.category_status
category_data += [{
"Categoria": {
"id_categoria": str(category_id),
"nombre_categoria": category_name,
"descripcion_categoria": category_short_desc,
"estatus_categoria": category_status,
"creation_date": category.creation_date,
"last_update_date": category.last_update_date
}
}]
return json.dumps(category_data)
def __repr__(self):
return "<CategoryModel(category_id='%s', " \
" category_name='%s', " \
" category_short_description='%s', " \
" category_status='%s'," \
" creation_date='%s', " \
" last_update_date='%s')>" % (self.category_id,
self.category_name,
self.category_short_description,
self.category_status,
self.creation_date,
self.last_update_date)
```
#### File: apps/category/view_endpoints.py
```python
__author__ = "<NAME> (<EMAIL>)"
__copyright__ = "Copyright 2021"
__license__ = ""
__history__ = """ """
__version__ = "1.21.H05.1 ($Rev: 2 $)"
from flask import Blueprint, json, request
from flask_jwt_extended import JWTManager, jwt_required
from db_controller.database_backend import *
from .CategoryModel import CategoryModel
from handler_controller.ResponsesHandler import ResponsesHandler as HandlerResponse
from handler_controller.messages import SuccessMsg, ErrorMsg
from logger_controller.logger_control import *
from utilities.Utility import *
cfg_app = get_config_settings_app()
category_api = Blueprint('category_api', __name__)
jwt = JWTManager(category_api)
logger = configure_logger('ws')
@category_api.route('/', methods=['POST', 'PUT', 'GET', 'DELETE'])
@jwt_required
def endpoint_manage_category_data():
conn_db, session_db = init_db_connection()
headers = request.headers
auth = headers.get('Authorization')
if not auth and 'Bearer' not in auth:
return HandlerResponse.request_unauthorized(ErrorMsg.ERROR_REQUEST_UNAUTHORIZED, auth)
else:
if request.method == 'POST':
data = request.get_json(force=True)
category_model = CategoryModel(data)
if not data or str(data) is None:
return HandlerResponse.request_conflict(ErrorMsg.ERROR_REQUEST_DATA_CONFLICT, data)
logger.info('Data Json Category to Manage on DB: %s', str(data))
category_response = category_model.insert_data(session_db, data)
logger.info('Data Category to Register on DB: %s', str(data))
if not category_response:
return HandlerResponse.response_success(ErrorMsg.ERROR_DATA_NOT_FOUND, category_response)
return HandlerResponse.response_resource_created(SuccessMsg.MSG_CREATED_RECORD, category_response)
elif request.method == 'PUT':
data = request.get_json(force=True)
category_model = CategoryModel(data)
if not data or str(data) is None:
return HandlerResponse.request_conflict(ErrorMsg.ERROR_REQUEST_DATA_CONFLICT, data)
logger.info('Data Json Category to Manage on DB: %s', str(data))
category_response = category_model.update_data(session_db, data)
logger.info('Data Driver to Update on DB: %s', str(data))
if not category_response:
return HandlerResponse.response_success(ErrorMsg.ERROR_DATA_NOT_FOUND, category_response)
return HandlerResponse.response_resource_created(SuccessMsg.MSG_UPDATED_RECORD, category_response)
elif request.method == 'DELETE':
data = request.get_json(force=True)
category_model = CategoryModel(data)
if not data or str(data) is None:
return HandlerResponse.request_conflict(ErrorMsg.ERROR_REQUEST_DATA_CONFLICT, data)
logger.info('Data Json Category to Manage on DB: %s', str(data))
category_response = category_model.category_inactivate(session_db, data)
logger.info('Data Driver to Update on DB: %s', str(data))
if not category_response:
return HandlerResponse.response_success(ErrorMsg.ERROR_DATA_NOT_FOUND, category_response)
return HandlerResponse.response_resource_created(SuccessMsg.MSG_DELETED_RECORD, category_response)
elif request.method == 'GET':
data = dict()
states_on_db = None
data['offset'] = request.args.get('offset', 1)
data['limit'] = request.args.get('limit', 10)
category_model = CategoryModel(data)
states_on_db = category_model.get_all_categories(session_db, data)
if not bool(states_on_db) or not states_on_db or "[]" == states_on_db:
return HandlerResponse.response_success(ErrorMsg.ERROR_DATA_NOT_FOUND, states_on_db)
return HandlerResponse.response_success(SuccessMsg.MSG_GET_RECORD, states_on_db)
else:
return HandlerResponse.request_not_found(ErrorMsg.ERROR_METHOD_NOT_ALLOWED)
@category_api.route('/filter', methods=['GET'])
def get_looking_for_category():
conn_db, session_db = init_db_connection()
data = dict()
query_string = request.query_string.decode('utf-8')
if request.method == 'GET':
state_on_db = None
filter_spec = []
data['offset'] = request.args.get('offset', 1)
data['limit'] = request.args.get('limit', 10)
if 'nombre_categoria' in query_string:
category_name = request.args.get('nombre_categoria')
data['nombre_categoria'] = category_name
filter_spec.append({'field': 'category_name', 'op': 'ilike', 'value': category_name})
# filter_spec.append({'field': 'nombre_estado', 'op': '==', 'value': category_name})
if 'estatus_categoria' in query_string:
category_status = request.args.get('estatus_categoria')
data['estatus_categoria'] = category_status
filter_spec.append({'field': 'category_status', 'op': '==', 'value': category_status})
category_model = CategoryModel(data)
state_on_db = category_model.get_category_by_filters(session_db, data, filter_spec)
if not bool(state_on_db) or not state_on_db or "[]" == state_on_db:
return HandlerResponse.response_success(ErrorMsg.ERROR_DATA_NOT_FOUND, state_on_db)
return HandlerResponse.response_success(SuccessMsg.MSG_GET_RECORD, state_on_db)
else:
return HandlerResponse.request_not_found(ErrorMsg.ERROR_METHOD_NOT_ALLOWED)
```
#### File: apps/product/ProductModel.py
```python
__author__ = "<NAME> (<EMAIL>)"
__copyright__ = "Copyright 2021"
__license__ = ""
__history__ = """ """
__version__ = "1.21.H05.1 ($Rev: 2 $)"
import json
import logging
from apps.category.CategoryModel import CategoryModel
from sqlalchemy_filters import apply_filters
from sqlalchemy import Column, Numeric, Integer, Float, String, Date, Time, Boolean, Sequence
from db_controller.database_backend import *
from db_controller import mvc_exceptions as mvc_exc
from utilities.Utility import *
cfg_db = get_config_settings_db()
PRODUCT_ID_SEQ = Sequence('product_seq') # define sequence explicitly
class ProductModel(Base):
r"""
Class to instance the data of MunicipioModel on the database.
Transactions:
- Insert: Add data to the database if not exists.
- Select:
"""
__tablename__ = 'product'
product_id = Column('id_product', Integer, PRODUCT_ID_SEQ,
primary_key=True, server_default=PRODUCT_ID_SEQ.next_value())
product_sku = Column('sku_product', String, nullable=False, index=True)
product_brand = Column('brand_product', String, nullable=False, index=True)
unit_of_measure = Column('uom_product', String, nullable=False, index=True)
product_stock = Column('stock_product', Integer, nullable=False, index=True)
product_name = Column('name_product', String, nullable=False, index=True)
product_title = Column('title_product', String, nullable=False, index=True)
product_long_description = Column('description_product', String, nullable=False)
product_photo = Column('photo_product', String, nullable=False)
product_price = Column('price_product', Float, nullable=False, index=True)
product_tax = Column('tax_product', Float, nullable=False)
product_tax_rate = Column('tax_rate_product', Float, nullable=False)
product_status = Column('status_product', String, nullable=False, index=True)
product_published = Column('published_product', Boolean, nullable=False, index=True)
product_length = Column('length_product', Float, nullable=False)
product_width = Column('width_product', Float, nullable=False)
product_height = Column('height_product', Float, nullable=False)
product_weight = Column('weight_product', Float, nullable=False)
# Flag to notify id user change data on product (On PUT or DELETE)
change_made = Column('change_made', Boolean, nullable=False, index=True)
# Number of times of anonymous user was queried product
product_queried_count = Column('product_queried_count', Integer, nullable=False, index=True)
creation_date = Column('creation_date', Date, nullable=False)
last_update_date = Column('last_update_date', Date, nullable=True)
category_id = Column(
'product_id_category',
Integer,
ForeignKey('CategoryModel.category_id', onupdate='CASCADE', ondelete='CASCADE'),
nullable=True,
unique=True
# no need to add index=True, all FKs have indexes
)
id_category = relationship(CategoryModel,
backref=__tablename__)
parent_category_id = Column( 'product_id_parent_cat', Integer, nullable=True)
def __init__(self, data_product):
self.product_sku = data_product.get('sku_producto')
self.product_brand = data_product.get('marca_producto')
self.unit_of_measure = data_product.get('unidad_medida_producto')
self.product_stock = data_product.get('inventario_producto')
self.product_name = data_product.get('nombre_producto')
self.product_title = data_product.get('titulo_producto')
self.product_long_description = data_product.get('descripcion_larga')
self.product_photo = data_product.get('url_imagen')
self.product_price = decimal_formatting(data_product.get('precio_unitario'))
self.product_tax = decimal_formatting(data_product.get('costo_impuesto'))
self.product_tax_rate = decimal_formatting(data_product.get('tasa_impuesto'))
self.product_status = data_product.get('estatus_producto')
self.product_published = data_product.get('producto_publicado')
self.product_length = decimal_formatting(data_product.get('volumetria_largo_producto'))
self.product_width = decimal_formatting(data_product.get('volumetria_ancho_producto'))
self.product_height = decimal_formatting(data_product.get('volumetria_alto_producto'))
self.product_weight = decimal_formatting(data_product.get('volumetria_peso_producto'))
self.category_id = data_product.get('id_categoria_producto')
self.parent_category_id = data_product.get('id_categoria_padre_producto')
def check_if_row_exists(self, session, data):
"""
Validate if row exists on database from dictionary data
:param session: Session database object
:param data: Dictionary with data to make validation function
:return: row_exists: Object with boolean response from db
"""
row_exists = None
id_product = 0
try:
# for example to check if the insert on db is correct
row_product = self.get_product_id(session, data)
if row_product is not None:
id_product = row_product.product_id
else:
id_product = 0
logger.info('Product Row object in DB: %s', str(id_product))
row_exists = session.query(ProductModel).filter(ProductModel.product_id == id_product).scalar()
logger.info('Row to data: {}, Exists: %s'.format(data), str(row_exists))
except SQLAlchemyError as exc:
row_exists = None
logger.exception('An exception was occurred while execute transactions: %s', str(str(exc.args) + ':' +
str(exc.code)))
raise mvc_exc.IntegrityError(
'Row not stored in "{}". IntegrityError: {}'.format(data.get('sku_producto'),
str(str(exc.args) + ':' + str(exc.code)))
)
finally:
session.close()
return row_exists
def insert_data(self, session, data):
"""
Function to insert/create new row on database
:param session: Session database object
:param data: Dictionary to insert new the data containing on the db
:return: endpoint_response
"""
endpoint_response = None
if not self.check_if_row_exists(session, data):
try:
self.creation_date = get_current_date(session)
data['creation_date'] = self.creation_date
new_row = ProductModel(data)
logger.info('New Row Product: %s', str(new_row.product_sku))
session.add(new_row)
row_product = self.get_product_id(session, data)
logger.info('Product ID Inserted: %s', str(row_product.product_id))
session.flush()
data['id_product'] = row_product.product_id
# check insert correct
row_inserted = self.get_one_product(session, data)
logger.info('Data Product inserted: %s, Original Data: {}'.format(data), str(row_inserted))
if row_inserted:
endpoint_response = json.dumps({
"sku_producto": row_inserted.product_sku,
"marca_producto": row_inserted.product_brand,
"unidad_medida_producto": row_inserted.unit_of_measure,
"inventario_producto": row_inserted.product_stock,
"nombre_producto": row_inserted.product_name,
"titulo_producto": row_inserted.product_title,
"descripcion_larga": row_inserted.product_long_description,
"url_imagen": row_inserted.product_photo,
"precio_unitario": str(row_inserted.product_price),
"costo_impuesto": str(row_inserted.product_tax),
"tasa_impuesto": str(row_inserted.product_tax_rate),
"estatus_producto": row_inserted.product_status,
"producto_publicado": row_inserted.product_published,
"volumetria_largo_producto": str(row_inserted.product_length),
"volumetria_ancho_producto": str(row_inserted.product_width),
"volumetria_alto_producto": str(row_inserted.product_height),
"volumetria_peso_producto": str(row_inserted.product_weight),
"id_categoria_producto": str(row_inserted.category_id),
"id_categoria_padre_producto": str(row_inserted.parent_category_id),
"fecha_alta": str(row_inserted.creation_date)
})
except SQLAlchemyError as exc:
endpoint_response = None
session.rollback()
logger.exception('An exception was occurred while execute transactions: %s', str(str(exc.args) + ':' +
str(exc.code)))
raise mvc_exc.IntegrityError(
'Row not stored in "{}". IntegrityError: {}'.format(data.get('sku_producto'),
str(str(exc.args) + ':' + str(exc.code)))
)
finally:
session.close()
return endpoint_response
# Transaction to update Product's data on db to authenticate - PUT
def update_data(self, session, data):
r"""
Transaction to update data by a user authenticated on the API correctly.
:param session: The product object db session.
:param data: The dictionary json request on the API to update data values.
"""
endpoint_response = None
if self.check_if_row_exists(session, data):
try:
row_product = self.get_product_id(session, data)
logger.info('Product ID Updated: %s', str(row_product.product_id))
data['id_product'] = row_product.product_id
if row_product is not None:
id_product = row_product.product_id
else:
id_product = 0
self.last_update_date = get_current_date(session)
data['last_update_date'] = self.last_update_date
# update row to database
session.query(ProductModel).filter(ProductModel.product_id == id_product). \
update({"product_sku": data.get('sku_producto'),
"product_brand": data.get('marca_producto'),
"unit_of_measure": data.get('unidad_medida_producto'),
"product_stock": data.get('inventario_producto'),
"product_name": data.get('unidad_medida_producto'),
"product_title": data.get('unidad_medida_producto'),
"product_long_description": data.get('unidad_medida_producto'),
"product_photo": data.get('unidad_medida_producto'),
"product_price": data.get('unidad_medida_producto'),
"product_tax": data.get('unidad_medida_producto'),
"product_tax_rate": data.get('unidad_medida_producto'),
"product_status": data.get('unidad_medida_producto'),
"product_published": data.get('unidad_medida_producto'),
"product_length": data.get('unidad_medida_producto'),
"product_width": data.get('unidad_medida_producto'),
"product_height": data.get('unidad_medida_producto'),
"product_weight": data.get('unidad_medida_producto'),
"category_id": data.get('unidad_medida_producto'),
"parent_category_id": data.get('unidad_medida_producto'),
"change_made": "True",
"last_update_date": data.get('last_update_date')},
synchronize_session='fetch')
session.flush()
# check update correct
row_updated = self.get_one_product(session, data)
logger.info('Data Product updated: %s, Original Data: {}'.format(data), str(row_updated))
if row_updated:
logger.info('Data Product updated')
endpoint_response = json.dumps({
"sku_producto": row_updated.product_sku,
"marca_producto": row_updated.product_brand,
"unidad_medida_producto": row_updated.unit_of_measure,
"inventario_producto": row_updated.product_stock,
"nombre_producto": row_updated.product_name,
"titulo_producto": row_updated.product_title,
"descripcion_larga": row_updated.product_long_description,
"url_imagen": row_updated.product_photo,
"precio_unitario": str(row_updated.product_price),
"costo_impuesto": str(row_updated.product_tax),
"tasa_impuesto": str(row_updated.product_tax_rate),
"estatus_producto": row_updated.product_status,
"producto_publicado": row_updated.product_published,
"volumetria_largo_producto": str(row_updated.product_length),
"volumetria_ancho_producto": str(row_updated.product_width),
"volumetria_alto_producto": str(row_updated.product_height),
"volumetria_peso_producto": str(row_updated.product_weight),
"id_categoria_producto": str(row_updated.category_id),
"id_categoria_padre_producto": str(row_updated.parent_category_id),
"cambio_realizado": bool(row_updated.change_made),
"fecha_alta": str(row_updated.creation_date),
"fecha_actualizacion": str(row_updated.last_update_date)
})
except SQLAlchemyError as exc:
session.rollback()
endpoint_response = None
logger.exception('An exception was occurred while execute transactions: %s',
str(str(exc.args) + ':' +
str(exc.code)))
raise mvc_exc.IntegrityError(
'Row not stored in "{}". IntegrityError: {}'.format(data.get('sku_producto'),
str(str(exc.args) + ':' + str(exc.code)))
)
finally:
session.close()
return endpoint_response
# Transaction to delete Product's data on db - DELETE
def product_inactivate(self, session, data):
r"""
Transaction to inactivate Product data on the API correctly.
:param session: The database object session connect.
:param data: The data dictionary to inactivate register on the API.
"""
endpoint_response = None
if self.check_if_row_exists(session, data):
try:
row_product = self.get_product_id(session, data)
logger.info('Product ID Inactivated: %s', str(row_product.product_id))
data['id_product'] = row_product.product_id
if row_product is not None:
id_product = row_product.product_id
else:
id_product = 0
self.last_update_date = get_current_date(session)
data['last_update_date'] = self.last_update_date
# update row to database
session.query(ProductModel).filter(ProductModel.product_id == id_product). \
filter(ProductModel.product_status == "activo"). \
update({"product_status": "inactivo",
"last_update_date": data.get('last_update_date')},
synchronize_session='fetch')
session.flush()
# check delete correct
row_deleted = self.get_one_product(session, data)
logger.info('Data Deleted: %s', str(row_deleted))
if row_deleted:
logger.info('Data Product inactivated')
endpoint_response = json.dumps({
"sku_producto": row_deleted.product_sku,
"marca_producto": row_deleted.product_brand,
"unidad_medida_producto": row_deleted.unit_of_measure,
"inventario_producto": row_deleted.product_stock,
"nombre_producto": row_deleted.product_name,
"titulo_producto": row_deleted.product_title,
"descripcion_larga": row_deleted.product_long_description,
"url_imagen": row_deleted.product_photo,
"precio_unitario": str(row_deleted.product_price),
"costo_impuesto": str(row_deleted.product_tax),
"tasa_impuesto": str(row_deleted.product_tax_rate),
"estatus_producto": row_deleted.product_status,
"producto_publicado": row_deleted.product_published,
"volumetria_largo_producto": str(row_deleted.product_length),
"volumetria_ancho_producto": str(row_deleted.product_width),
"volumetria_alto_producto": str(row_deleted.product_height),
"volumetria_peso_producto": str(row_deleted.product_weight),
"id_categoria_producto": str(row_deleted.category_id),
"id_categoria_padre_producto": str(row_deleted.parent_category_id),
"creation_date": str(row_deleted.creation_date),
"last_update_date": str(row_deleted.last_update_date)
})
except SQLAlchemyError as exc:
session.rollback()
endpoint_response = None
logger.exception('An exception was occurred while execute transactions: %s',
str(str(exc.args) + ':' +
str(exc.code)))
raise mvc_exc.IntegrityError(
'Row not stored in "{}". IntegrityError: {}'.format(data.get('sku_producto'),
str(str(exc.args) + ':' + str(exc.code)))
)
finally:
session.close()
return endpoint_response
@staticmethod
def get_product_id(session, data):
"""
Get Producto object row registered on database to get the ID
:param session: Database session object
:param data: Dictionary with data to get row
:return: row_product: The row on database registered
"""
row_product = None
try:
row_exists = session.query(ProductModel).\
filter(ProductModel.product_sku == data.get('sku_producto')).\
filter(ProductModel.product_name == data.get('nombre_producto')).\
filter(ProductModel.product_brand == data.get('marca_producto')).scalar()
logger.info('Row Data Producto Exists on DB: %s', str(row_exists))
if row_exists:
row_product = session.query(ProductModel). \
filter(ProductModel.product_sku == data.get('sku_producto')). \
filter(ProductModel.product_name == data.get('nombre_producto')). \
filter(ProductModel.product_brand == data.get('marca_producto')).one()
logger.info('Row ID Product data from database object: {}'.format(str(row_product)))
except SQLAlchemyError as exc:
logger.exception('An exception was occurred while execute transactions: %s', str(str(exc.args) + ':' +
str(exc.code)))
raise mvc_exc.ItemNotStored(
'Can\'t read data: "{}" because it\'s not stored in "{}". Row empty: {}'.format(
data.get('nombre_producto'), ProductModel.__tablename__, str(str(exc.args) + ':' +
str(exc.code))
)
)
finally:
session.close()
return row_product
@staticmethod
def get_one_product(session, data):
row = None
try:
row = session.query(ProductModel). \
filter(ProductModel.product_sku == data.get('sku_producto')). \
filter(ProductModel.product_name == data.get('nombre_producto')). \
filter(ProductModel.product_brand == data.get('marca_producto')).one()
if row:
logger.info('Data Product on Db: %s',
'Nombre: {}, SKU: {}, Marca: {}'.format(row.product_name,
row.product_sku,
row.product_brand))
except SQLAlchemyError as exc:
row = None
logger.exception('An exception was occurred while execute transactions: %s', str(str(exc.args) + ':' +
str(exc.code)))
raise mvc_exc.ItemNotStored(
'Can\'t read data: "{}" because it\'s not stored in "{}". Row empty: {}'.format(
data.get('nombre_producto'), ProductModel.__tablename__, str(str(exc.args) + ':' + str(exc.code))
)
)
finally:
session.close()
return row
@staticmethod
def get_all_products(session, data):
"""
Get all Products objects data registered on database.
:param data: Dictionary contains relevant data to filter Query on resultSet DB
:param session: Database session
:return: json.dumps dict
"""
querying_counter = 0
all_products = None
product_data = []
page = None
per_page = None
all_products = session.query(ProductModel).all()
if 'offset' in data.keys() and 'limit' in data.keys():
page = data.get('offset')
per_page = data('limit')
all_products = session.query(ProductModel).paginate(page=page, per_page=per_page, error_out=False).all()
for product in all_products:
if product.product_queried_count is not None:
querying_counter = int(product.product_queried_count)
querying_counter += 1
data['last_update_date'] = get_current_date(session)
# update row to database
session.query(ProductModel).filter(ProductModel.product_id == product.id_product). \
update({"product_queried_count": querying_counter,
"last_update_date": data.get('last_update_date')},
synchronize_session='fetch')
session.flush()
product_data += [{
"Product": {
"sku_producto": product.product_sku,
"marca_producto": product.product_brand,
"unidad_medida_producto": product.unit_of_measure,
"inventario_producto": product.product_stock,
"nombre_producto": product.product_name,
"titulo_producto": product.product_title,
"descripcion_larga": product.product_long_description,
"url_imagen": product.product_photo,
"precio_unitario": str(product.product_price),
"costo_impuesto": str(product.product_tax),
"tasa_impuesto": str(product.product_tax_rate),
"estatus_producto": product.product_status,
"producto_publicado": product.product_published,
"volumetria_largo_producto": str(product.product_length),
"volumetria_ancho_producto": str(product.product_width),
"volumetria_alto_producto": str(product.product_height),
"volumetria_peso_producto": str(product.product_weight),
"id_categoria_producto": str(product.category_id),
"id_categoria_padre_producto": str(product.parent_category_id),
"contador_busqueda": str(querying_counter),
"fecha_alta": str(product.creation_date),
"fecha_actualizacion": str(data.get('last_update_date'))
}
}]
return json.dumps(product_data)
@staticmethod
def get_products_by_filters(session, data, filter_spec):
"""
Get list of Products filtered by options by user request
:param session: Database session
:param data: Dictionary contains relevant data to filter Query on resultSet DB
:param filter_spec: List of options defined by user request
:return: json.dumps dict
"""
querying_counter = 0
page = 1
per_page = 10
query_result = None
product_data = []
if 'offset' in data.keys() and 'limit' in data.keys():
page = data.get('offset')
per_page = data('limit')
query_result = session.query(ProductModel).all()
query = session.query(ProductModel)
filtered_query = apply_filters(query, filter_spec)
if filter_spec is not None and filtered_query is not None:
query_result = filtered_query.paginate(page=page, per_page=per_page, error_out=False).all()
logger.info('Query filtered resultSet: %s', str(query_result))
for product in query_result:
if product.product_queried_count is not None:
querying_counter = int(product.product_queried_count)
querying_counter += 1
data['last_update_date'] = get_current_date(session)
# update row to database
session.query(ProductModel).filter(ProductModel.product_id == product.id_product). \
update({"product_queried_count": querying_counter,
"last_update_date": data.get('last_update_date')},
synchronize_session='fetch')
session.flush()
product_data += [{
"Product": {
"sku_producto": product.product_sku,
"marca_producto": product.product_brand,
"unidad_medida_producto": product.unit_of_measure,
"inventario_producto": product.product_stock,
"nombre_producto": product.product_name,
"titulo_producto": product.product_title,
"descripcion_larga": product.product_long_description,
"url_imagen": product.product_photo,
"precio_unitario": str(product.product_price),
"costo_impuesto": str(product.product_tax),
"tasa_impuesto": str(product.product_tax_rate),
"estatus_producto": product.product_status,
"producto_publicado": product.product_published,
"volumetria_largo_producto": str(product.product_length),
"volumetria_ancho_producto": str(product.product_width),
"volumetria_alto_producto": str(product.product_height),
"volumetria_peso_producto": str(product.product_weight),
"id_categoria_producto": str(product.category_id),
"id_categoria_padre_producto": str(product.parent_category_id),
"contador_busqueda": str(querying_counter),
"fecha_alta": str(product.creation_date),
"fecha_actualizacion": str(data.get('last_update_date'))
}
}]
return json.dumps(product_data)
def __repr__(self):
return "<MunicipioModel(product_id='%s'" \
" product_sku='%s', " \
" product_brand='%s', " \
" unit_of_measure='%s', " \
" product_stock='%s'" \
" product_name='%s'" \
" product_title='%s'" \
" product_long_description='%s'" \
" product_photo='%s'" \
" product_price='%s'" \
" product_tax='%s'" \
" product_tax_rate='%s'" \
" product_status='%s'" \
" product_published='%s'" \
" product_length='%s'" \
" product_width='%s'" \
" product_height='%s'" \
" product_weight='%s'" \
" category_id='%s'" \
" parent_category_id='%s'" \
" creation_date='%s'" \
" last_update_date='%s')>" % (self.product_id,
self.product_sku,
self.product_brand,
self.unit_of_measure,
self.product_stock,
self.product_name,
self.product_title,
self.product_long_description,
self.product_photo,
self.product_price,
self.product_tax,
self.product_tax_rate,
self.product_status,
self.product_published,
self.product_length,
self.product_width,
self.product_height,
self.product_weight,
self.category_id,
self.parent_category_id,
self.creation_date,
self.last_update_date)
```
#### File: product_api_test/db_controller/database_backend.py
```python
__author__ = "<NAME> (<EMAIL>)"
__copyright__ = "Copyright 2021"
__license__ = ""
__history__ = """ Script with functions to manage database back end"""
__version__ = "1.21.H05.1 ($Rev: 2 $)"
import psycopg2
from sqlalchemy import create_engine, ForeignKey
from sqlalchemy_utils import database_exists, create_database
from sqlalchemy.orm import sessionmaker, relationship
from sqlalchemy.exc import SQLAlchemyError
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.engine.reflection import Inspector
from db_controller import mvc_exceptions as mvc_exc
from logger_controller.logger_control import *
from utilities.Utility import *
from flask_bcrypt import Bcrypt
Base = declarative_base()
bcrypt = Bcrypt()
cfg_db = get_config_settings_db()
cfg_app = get_config_settings_app()
# logger = configure_logger(cfg_app.log_types[2].__str__())
logger = configure_logger('db')
logging.basicConfig()
logging.getLogger('sqlalchemy.engine').setLevel(logging.DEBUG)
def create_engine_db():
# For Test connection:
engine = create_engine(cfg_db.Development.SQLALCHEMY_DATABASE_URI.__str__(),
client_encoding="utf8",
execution_options={"isolation_level": "REPEATABLE READ"})
if not 'development' == cfg_app.flask_api_env:
engine = create_engine(cfg_db.Production.SQLALCHEMY_DATABASE_URI.__str__(),
client_encoding="utf8",
execution_options={"isolation_level": "REPEATABLE READ"})
logger.info("Engine Created by URL: {}".format(cfg_db.Development.SQLALCHEMY_DATABASE_URI.__str__()))
return engine
def create_database_api(engine_session):
if not database_exists(engine_session.url):
logger.info("Create the Database...")
create_database(engine_session.url, 'utf-8')
logger.info("Database created...")
def create_bd_objects(engine_obj):
inspector = Inspector.from_engine(engine_obj)
table_names = list()
table_names.append(cfg_db.user_auth_table.__str__())
table_names.append(cfg_db.category_table.__str__())
table_names.append(cfg_db.product_table.__str__())
if not inspector.get_table_names():
Base.metadata.create_all(bind=engine_obj)
logger.info("Database objects created...")
else:
for table_name in inspector.get_table_names():
logger.info('Table on database: %s', str(table_name))
if table_name in table_names:
logger.info('Table already created: %s', str(table_name))
else:
# Create tables
Base.metadata.create_all(bind=engine_obj)
logger.info("Database objects created...")
# Base.metadata.create_all(bind=engine_obj)
logger.info("Database objects created...")
def session_to_db(engine_se):
r"""
Get and manage the session connect to the database engine.
:return connection: Object to connect to the database and transact on it.
"""
session = None
connection = None
try:
if engine_se:
session_maker = sessionmaker(bind=engine_se, autocommit=True)
connection = engine_se.connect()
session = session_maker(bind=connection)
logger.info("Connection and Session objects created...")
else:
logger.error("Database not created or some parameters with the connection to the database can't be read")
except mvc_exc.DatabaseError as db_error:
logger.exception("Can not connect to database, verify data connection", db_error, exc_info=True)
raise mvc_exc.ConnectionError(
'Can not connect to database, verify data connection.\nOriginal Exception raised: {}'.format(db_error)
)
return connection, session
def init_db_connection():
engine_db = create_engine_db()
create_database_api(engine_db)
create_bd_objects(engine_db)
connection, session = session_to_db(engine_db)
return connection, session
def scrub(input_string):
"""Clean an input string (to prevent SQL injection).
Parameters
----------
input_string : str
Returns
-------
str
"""
return "".join(k for k in input_string if k.isalnum())
def create_cursor(conn):
r"""
Create an object statement to transact to the database and manage his data.
:param conn: Object to connect to the database.
:return cursor: Object statement to transact to the database with the connection.
"""
try:
cursor = conn.cursor()
except mvc_exc.ConnectionError as conn_error:
logger.exception("Can not create the cursor object, verify database connection", conn_error, exc_info=True)
raise mvc_exc.ConnectionError(
'Can not connect to database, verify data connection.\nOriginal Exception raised: {}'.format(
conn_error
)
)
return cursor
def disconnect_from_db(conn):
r"""
Generate close session to the database through the disconnection of the conn object.
:param conn: Object connector to close session.
"""
if conn is not None:
conn.close()
def close_cursor(cursor):
r"""
Generate close statement to the database through the disconnection of the cursor object.
:param cursor: Object cursor to close statement.
"""
if cursor is not None:
cursor.close()
def get_current_date(session):
# sql_current_date = 'SELECT CURDATE()' # For MySQL
sql_current_date = 'SELECT NOW()' # For PostgreSQL
current_date = session.execute(sql_current_date).one()
logger.info('CurrentDate: %s', current_date)
return current_date
def get_systimestamp_date(session):
last_updated_date_column = session.execute('SELECT systimestamp from dual').scalar()
logger.info('Timestamp from DUAL: %s', last_updated_date_column)
return last_updated_date_column
def get_current_date_from_db(session, conn, cursor):
r"""
Get the current date and hour from the database server to set to the row registered or updated.
:return last_updated_date: The current day with hour to set the date value.
"""
last_updated_date = None
try:
# sql_current_date = 'SELECT CURDATE()' # For MySQL
sql_current_date = 'SELECT NOW()' # For PostgreSQL
cursor.execute(sql_current_date)
result = cursor.fetchone()[0]
print("NOW() :", result)
if result is not None:
# last_updated_date = datetime.datetime.strptime(str(result), "%Y-%m-%d %H:%M:%S")
# last_updated_date = datetime.datetime.strptime(str(result), "%Y-%m-%d %I:%M:%S")
last_updated_date = result
cursor.close()
except SQLAlchemyError as error:
conn.rollback()
logger.exception('An exception occurred while execute transaction: %s', error)
raise SQLAlchemyError(
"A SQL Exception {} occurred while transacting with the database.".format(error)
)
finally:
disconnect_from_db(conn)
return last_updated_date
```
#### File: product_api_test/logger_controller/logger_control.py
```python
import errno
import logging
import os
import sys
from utilities.Utility import *
from datetime import datetime
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
# Possible values to LOGGER: DEBUG, INFO, WARN, ERROR, and CRITICAL
# LOG_LEVEL = logging.DEBUG
# Para LOG file de App principal
def configure_logging(log_name, path_to_log_directory, logger_type):
"""
Configure logger
:param logger_type: The type to write logger and setup on the modules of App
:param log_name: Name of the log file saved
:param path_to_log_directory: Path to directory to write log file in
:return:
"""
cfg = get_config_settings_app()
_date_name = datetime.now().strftime('%Y-%m-%dT%H%M')
log_filename = str(log_name + _date_name + cfg.log_file_extension)
_importer_logger = logging.getLogger(logger_type)
_importer_logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - Module: %(module)s - Line No: %(lineno)s : %(name)s : %(levelname)s - '
'%(message)s')
if not os.path.exists(path_to_log_directory):
fh = logging.FileHandler(filename=os.path.join(path_to_log_directory, log_filename), mode='a',
encoding='utf-8', delay=False)
else:
fh = logging.FileHandler(filename=os.path.join(path_to_log_directory, log_filename), mode='w',
encoding='utf-8', delay=False)
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
_importer_logger.addHandler(fh)
# For Testing logging - Comment
sh = logging.StreamHandler(sys.stdout)
sh.setLevel(logging.DEBUG)
sh.setFormatter(formatter)
_importer_logger.addHandler(sh)
create_directory_if_not_exists(_importer_logger, path_to_log_directory)
return _importer_logger
# Para LOG console de App principal
def configure_logging_console(logger_type):
"""
Configure logger
:param logger_type: The type to write logger and setup on the modules of App
:return _imoporter_log:
"""
_date_name = datetime.now().strftime('%Y-%m-%dT%H%M')
_importer_logger = logging.getLogger(logger_type)
_importer_logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - Module: %(module)s - Line No: %(lineno)s : %(name)s : %(levelname)s - '
'%(message)s')
sh = logging.StreamHandler(sys.stdout)
sh.setLevel(logging.DEBUG)
sh.setFormatter(formatter)
_importer_logger.addHandler(sh)
return _importer_logger
def log_critical_error(logger, exc, message):
"""
Logs the exception at ´CRITICAL´ log level
:param logger: the logger
:param exc: exception to log
:param message: description message to log details of where/why exc occurred
"""
if logger is not None:
logger.critical(message)
logger.critical(exc)
def create_directory_if_not_exists(logger, path):
"""
Creates 'path' if it does not exist
If creation fails, an exception will be thrown
:param logger: the logger
:param path: the path to ensure it exists
"""
try:
os.makedirs(path)
except OSError as exc:
if exc.errno != errno.EEXIST and not os.path.isdir(path):
log_critical_error(logger, exc, 'An error happened trying to create ' + path)
raise
def configure_logger(logger_type):
"""
Declare and validate existence of log directory; create and configure logger object
:return: instance of configured logger object
"""
cfg = get_config_settings_app()
log_name = cfg.log_file_app_name
log_dir = cfg.log_file_save_path
logger = configure_logging(log_name, log_dir, logger_type)
if logger is not None:
return logger
def configure_console_logger(logger_type):
"""
Declare and validate existence of log directory; create and configure logger object
:param logger_type: The type to write logger and setup on the modules of App
:return: instance of configured logger object
"""
logger = configure_logging_console(logger_type)
if logger is not None:
return logger
```
#### File: product_api_test/tests/UserAuthenticationTest.py
```python
__author__ = "<NAME> (<EMAIL>)"
__copyright__ = "Copyright 2021, <NAME>"
__license__ = ""
__history__ = """ """
__version__ = "1.1.I02.1 ($Rev: 1 $)"
import unittest
import json
from tests.BaseCase import BaseCase
class TestUserLogin(BaseCase):
def test_successful_login(self):
user_name = "<EMAIL>"
password = "<PASSWORD>"
email = "<EMAIL>"
payload = json.dumps({
"username": user_name,
"password": password,
"is_active": 1,
"is_staff": 0,
"is_superuser": 1,
})
# When
response = self.app.post('/api/v1.0/manager/user/login/', headers={"Content-Type": "application/json"},
data=payload)
token_response = json.loads(response.get_data(as_text=True))
# Then
self.assertEqual(str, type(token_response['message_login']))
self.assertEqual(str, type(token_response['access_token']))
self.assertEqual(str, type(token_response['refresh_token']))
self.assertEqual(str, type(token_response['data']))
self.assertEqual(200, response.status_code)
return token_response['access_token']
def test_login_already_existing_user(self):
user_name = "<EMAIL>"
password = "<PASSWORD>"
email = "<EMAIL>"
payload = json.dumps({
"username": user_name,
"password": password,
"is_active": 1,
"is_staff": 1,
"is_superuser": 0,
})
# When
response = self.app.post('/api/v1.0/manager/user/login/', headers={"Content-Type": "application/json"},
data=payload)
login_response = json.loads(response.get_data(as_text=True))
# Then
# self.assertEqual(str, type(login_response['Username']))
# self.assertEqual(str, type(login_response['Password']))
# self.assertEqual(str, type(login_response['IsActive']))
# self.assertEqual(str, type(login_response['IsStaff']))
# self.assertEqual(str, type(login_response['IsSuperUser']))
# self.assertEqual(str, type(login_response['CreationDate']))
self.assertEqual(str, type(login_response['message_login']))
self.assertEqual(str, type(login_response['access_token']))
self.assertEqual(str, type(login_response['refresh_token']))
self.assertEqual(str, type(login_response['data']))
self.assertEqual(200, response.status_code)
def test_login_with_invalid_username(self):
user_name = "<EMAIL>"
password = "<PASSWORD>"
email = "<EMAIL>"
payload = json.dumps({
"username": user_name,
"password": password,
"is_active": 1,
"is_staff": 0,
"is_superuser": 0,
})
# When
response = self.app.post('/api/v1.0/manager/user/login/', headers={"Content-Type": "application/json"},
data=payload)
user_response = json.loads(response.get_data(as_text=True))
# Then
self.assertEqual(str, type(user_response['message_login']))
self.assertEqual(str, type(user_response['data']))
def test_login_with_invalid_password(self):
user_name = "<EMAIL>"
password = "Ñ"
email = "<EMAIL>"
payload = json.dumps({
"username": user_name,
"password": password,
"is_active": 1,
"is_staff": 0,
"is_superuser": 0,
})
# When
response = self.app.post('/api/v1.0/manager/user/login/', headers={"Content-Type": "application/json"},
data=payload)
user_response = json.loads(response.get_data(as_text=True))
# Then
self.assertEqual(str, type(user_response['message_login']))
self.assertEqual(str, type(user_response['data']))
def test_login_get_users(self):
# When
response = self.app.get('/api/v1.0/manager/user/list', headers={"Content-Type": "application/json"})
user_response = json.loads(response.get_data(as_text=True))
# Then
self.assertEqual(str, type(user_response["AuthUser"]['Id']))
self.assertEqual(str, type(user_response["AuthUser"]['Username']))
self.assertEqual(str, type(user_response["AuthUser"]['Password']))
self.assertEqual(str, type(user_response["AuthUser"]['IsActive']))
self.assertEqual(str, type(user_response["AuthUser"]['IsStaff']))
self.assertEqual(str, type(user_response["AuthUser"]['IsSuperUser']))
self.assertEqual(str, type(user_response["AuthUser"]['CreationDate']))
self.assertEqual(str, type(user_response["AuthUser"]['LastUpdateDate']))
self.assertEqual(200, response.status_code)
def test_user_update_data(self):
user_name = "<EMAIL>"
password = "<PASSWORD>"
email = "<EMAIL>"
payload = json.dumps({
"username": user_name,
"password": password,
"is_active": 1,
"is_staff": 0,
"is_superuser": 0,
})
# When
response = self.app.put('/api/v1.0/manager/user/', headers={"Content-Type": "application/json"}, data=payload)
user_response = json.loads(response.get_data(as_text=True))
# Then
self.assertEqual(str, type(user_response['Username']))
self.assertEqual(str, type(user_response['Password']))
self.assertEqual(str, type(user_response['IsActive']))
self.assertEqual(str, type(user_response['IsStaff']))
self.assertEqual(str, type(user_response['IsSuperUser']))
self.assertEqual(str, type(user_response['CreationDate']))
self.assertEqual(str, type(user_response['LastUpdateDate']))
self.assertEqual(200, response.status_code)
def test_user_delete_data(self):
user_name = "<EMAIL>"
password = "<PASSWORD>"
email = "<EMAIL>"
payload = json.dumps({
"username": user_name,
"password": password,
"is_active": 1,
"is_staff": 0,
"is_superuser": 0,
})
# When
response = self.app.delete('/api/v1.0/manager/user/', headers={"Content-Type": "application/json"}, data=payload)
user_response = json.loads(response.get_data(as_text=True))
# Then
self.assertEqual(str, type(user_response['Username']))
self.assertEqual(str, type(user_response['Password']))
self.assertEqual(str, type(user_response['IsActive']))
self.assertEqual(str, type(user_response['IsStaff']))
self.assertEqual(str, type(user_response['IsSuperUser']))
self.assertEqual(str, type(user_response['CreationDate']))
self.assertEqual(str, type(user_response['LastUpdateDate']))
self.assertEqual(200, response.status_code)
```
#### File: product_api_test/utilities/Utility.py
```python
__author__ = "<NAME> (<EMAIL>)"
__copyright__ = "Copyright 2021"
__license__ = ""
__history__ = """ """
__version__ = "1.21.H05.1 ($Rev: 2 $)"
from settings.settings import *
from datetime import datetime
import pytz
# Define y obtiene el configurador para las constantes generales del sistema
def get_config_settings_app():
"""
Get the config object to charge the settings configurator.
:return object: cfg object, contain the Match to the settings allowed in Constants file configuration.
"""
settings_api = AppConstants()
return settings_api
# Define y obtiene el configurador para las constantes de la base de datos
def get_config_settings_db():
"""
Get the config object to charge the settings database configurator.
:return object: cfg object, contain the Match to the settings allowed in Constants file configuration.
"""
settings_db = DbConstants()
return settings_db
def decimal_formatting(value):
return ('%.2f' % value).rstrip('0').rstrip('.')
# Cambia fecha-hora en datos a timezone UTC desde el dato y timezone definido
def set_utc_date_data(data_date, timezone_date):
utc_date_convert = ""
utc_hour_convert = ""
date_on_utc = ""
local_date = pytz.timezone(timezone_date)
naive = datetime.strptime(data_date, "%Y-%m-%d %H:%M:%S")
local_dt = local_date.localize(naive, is_dst=None)
utc_dt = local_dt.astimezone(pytz.utc)
print(utc_dt)
date_on_utc = str(utc_dt).split()
utc_date_convert = date_on_utc[0]
utc_hour_convert = date_on_utc[1]
return utc_date_convert, utc_hour_convert
``` |
{
"source": "jorgeMorfinezM/TVOrderS3Uploader",
"score": 2
} |
#### File: jorgeMorfinezM/TVOrderS3Uploader/app.py
```python
__author__ = "<NAME> (<EMAIL>)"
__copyright__ = "Copyright 2019, <NAME>"
__license__ = ""
__history__ = """ """
__version__ = "1.20.D10.1.2.1 ($Rev: 10 $)"
from constants.constants import Constants as Const
import fnmatch
import boto3
from ftplib import FTP_TLS
import argparse
from logger_controller.logger_control import *
import time
logger = configure_logger()
# Conecta al FTP B2B
def ftp_orders_b2b_tecnofin_connector():
cfg = get_config_constant_file()
remote_host = cfg['ACCESS_B2B']['HOST']
remote_port = cfg['ACCESS_B2B']['PORT']
remote_username = cfg['ACCESS_B2B']['USERNAME']
remote_password = cfg['ACCESS_B2B']['PASSWORD']
remote_timeout = cfg['ACCESS_B2B']['TIME_OUT']
ftps = FTP_TLS(remote_host)
ftps.set_debuglevel(2)
ftps.set_pasv(True)
ftps.connect(port=remote_port, timeout=remote_timeout)
ftps.login(remote_username, remote_password)
logger.info('FTP Connected to: %s', remote_username+'@'+str(remote_host))
ftps.prot_p()
return ftps
# Conecta al FTP B2C de Tecnofin
def ftp_orders_b2c_tecnofin_connector():
cfg = get_config_constant_file()
remote_host = cfg['ACCESS_B2C']['HOST']
remote_port = cfg['ACCESS_B2C']['PORT']
remote_username = cfg['ACCESS_B2C']['USERNAME']
remote_password = cfg['ACCESS_B2C']['PASSWORD']
remote_timeout = cfg['ACCESS_B2C']['TIME_OUT']
ftps = FTP_TLS(remote_host)
ftps.set_debuglevel(2)
ftps.set_pasv(True)
ftps.connect(port=remote_port, timeout=remote_timeout)
ftps.login(remote_username, remote_password)
logger.info('FTP Connected to: %s', remote_username+'@'+str(remote_host))
ftps.prot_p()
return ftps
# Parsea todos los XML de pedidos pendientes
# para insertarlos mediante API de Pedidos
def parse_xml_pedidos_b2c_tv(order_type):
import os
cfg = get_config_constant_file()
orders_status_list = cfg['ORDERS_STATUS_LIST']
if 'B2C' in order_type:
remote_path = cfg['PATH_ORDERS_B2C']
local_temp_path = cfg['PATH_LOCAL']
pattern = cfg['EXT_ORDERS_TV']
ftps = ftp_orders_b2c_tecnofin_connector()
ftps.cwd(remote_path)
pedidos = ftps.nlst()
# ssh = connect_to_ftp_pedidos()
# sftp = ssh.open_sftp()
# pedidos = sftp.listdir(remote_path)
file_remote = None
file_local = None
for pedido in pedidos:
# print all entries that are files
if fnmatch.fnmatch(pedido, pattern):
file_remote = remote_path + '/' + pedido
file_local = local_temp_path + '/' + pedido
pedido_s3_exists = validate_order_exists_s3(pedido)
logger.info('Pedido Order Exists in S3: %s',
'Pedido: {0} ¿exists?: {1}'.format(pedido,
pedido_s3_exists))
if pedido_s3_exists is False:
logger.info('Server File >>> ' + file_remote + ' : ' + file_local + ' <<< Local File')
ftps.retrbinary('RETR %s' % file_remote, open(file_local, 'wb').write)
# sftp.get(file_remote, file_local) # NOT USE IT
logger.info('Local File Pedido was created: %s', str(file_local))
copy_order_to_aws_s3(pedido)
# If file exists, delete it ##
if os.path.isfile(file_local):
os.remove(file_local)
logger.info('Local File Pedido was deleted: %s', str(file_local))
else: # Show an error ##
logger.error("Error: %s file not found" % file_local)
else:
logger.info('Pedido File: %s', '{0} already exists in Bucket S3!'.format(pedido))
ftps.delete(pedido)
time.sleep(1)
ftps.close()
elif 'B2B' in order_type:
remote_path = cfg['PATH_ORDERS_B2B']
local_temp_path = cfg['PATH_LOCAL']
pattern = cfg['EXT_ORDERS_TV']
ftps = ftp_orders_b2b_tecnofin_connector()
ftps.cwd(remote_path)
pedidos = ftps.nlst()
for pedido in pedidos:
# print all entries that are files
if fnmatch.fnmatch(pedido, pattern):
file_remote = remote_path + '/' + pedido
file_local = local_temp_path + '/' + pedido
pedido_s3_exists = validate_order_exists_s3(pedido)
logger.info('Pedido Order Exists in S3: %s',
'Pedido: {0} ¿exists?: {1}'.format(pedido,
pedido_s3_exists))
if pedido_s3_exists is False:
logger.info('Server File >>> ' + file_remote + ' : ' + file_local + ' <<< Local File')
ftps.retrbinary('RETR %s' % file_remote, open(file_local, 'wb').write)
# sftp.get(file_remote, file_local) # NOT USE IT
logger.info('Local File Pedido was created: %s', str(file_local))
copy_order_to_aws_s3(pedido)
# If file exists, delete it ##
if os.path.isfile(file_local):
os.remove(file_local)
logger.info('Local File Pedido was deleted: %s', str(file_local))
else: # Show an error ##
logger.error("Error: %s file not found" % file_local)
else:
logger.info('Pedido File: %s', '{0} already exists in Bucket S3!'.format(pedido))
ftps.close()
# Contiene el codigo para conectar Bucket AWS de S3
# y subir el archivo del pedido:
def copy_order_to_aws_s3(pedido):
cfg = get_config_constant_file()
bucket_s3_name = cfg['BUCKET_AWS_S3']['S3_NAME']
s3_access_key = cfg['BUCKET_AWS_S3']['ACCESS_KEY']
s3_secret_key = cfg['BUCKET_AWS_S3']['SECRET_KEY']
bucketname = bucket_s3_name
logger.info('Bucket S3 to Upload Order file: %s', bucketname)
logger.info('Order Pedido file to upload: %s', pedido)
s3 = boto3.resource('s3', aws_access_key_id=s3_access_key, aws_secret_access_key=s3_secret_key)
s3.Object(bucketname, pedido).upload_file(Filename=pedido)
logger.info('Order Pedido file uploaded: %s', pedido)
# Conecta a AWS S3 para descargar y leer cada pedido XML
def connect_aws_s3():
cfg = get_config_constant_file()
bucket_s3_name = cfg['BUCKET_AWS_S3']['S3_NAME']
s3_access_key = cfg['BUCKET_AWS_S3']['ACCESS_KEY']
s3_secret_key = cfg['BUCKET_AWS_S3']['SECRET_KEY']
bucketname = bucket_s3_name
s3 = boto3.resource('s3', aws_access_key_id=s3_access_key, aws_secret_access_key=s3_secret_key)
bucket_pedidos = s3.Bucket(bucketname)
# s3.Object(bucketname, pedido).upload_file(Filename=pedido)
return bucket_pedidos
def validate_order_exists_s3(pedido_order):
pedido_s3_exists = False
bucket_pedidos = connect_aws_s3()
logger.info('Pedido to validate in S3: %s', str(pedido_order))
for pedido in bucket_pedidos.objects.all():
order_name = pedido.key
logger.info('File Pedido into S3 Bucket: %s', str(order_name))
if str(pedido_order) in str(order_name):
pedido_s3_exists = True
else:
pedido_s3_exists = False
return pedido_s3_exists
# Define y obtiene el configurador para las constantes del sistema:
def get_config_constant_file():
"""Contiene la obtencion del objeto config
para setear datos de constantes en archivo
configurador
:rtype: object
"""
# TEST
_constants_file = "constants/constants.yml"
# PROD
# _constants_file = "constants/constants.yml"
cfg = Const.get_constants_file(_constants_file)
return cfg
def main():
pass
parser = argparse.ArgumentParser()
parser.add_argument('--order_type', required=True, type=str,
help="Parametro Tipo de Orden B2C o B2B entre comillas")
args = parser.parse_args()
order_type = args.order_type
logger.info('ORDER_TYPE ARG: %s', str(order_type))
parse_xml_pedidos_b2c_tv(order_type)
if __name__ == "__main__":
pass
main()
``` |
{
"source": "jorgemorgado/puppet-metrics",
"score": 3
} |
#### File: puppet-metrics/bin/genjson.py
```python
__version__ = 1.0
import sys
import re
import json
import argparse
from collections import defaultdict
version = """%(prog)s 1.0 (c)2015 <EMAIL>"""
description = "Generate a JSON output from a cloc/SQLite query input."
parser = argparse.ArgumentParser(description=description)
parser.add_argument('-v', '--version', action='version', version=version)
parser.add_argument('-d', '--debug', action='store_true', dest='debug',
default=False, help='enable debug mode (developers only)')
args = parser.parse_args()
# Disable traceback if not in debug mode
if not args.debug:
sys.tracebacklimit = 0
def main():
modules = defaultdict(list)
pattern_manifest = re.compile(r'^\.\/common\/modules\/(?P<name>.*?)\/manifests\/(?P<file>.*?)\.pp\|(?P<lines>\d+?)$')
pattern_template = re.compile(r'^\.\/common\/modules\/(?P<name>.*?)\/templates\/(?P<file>.*?)\.erb\|(?P<lines>\d+?)$')
for line in sys.stdin:
# Does it matches a manifest file?
match = pattern_manifest.match(line)
if match:
modulename = match.group('name')
filelines = match.group('lines')
if match.group('file') == 'init':
filename = modulename
else:
filename = modulename + '::' + match.group('file').replace('/', '::')
modules[modulename].append([ filename, filelines ])
# Does it matches a template file?
match = pattern_template.match(line)
if match:
modulename = match.group('name')
filelines = match.group('lines')
filename = modulename + '/' + match.group('file') + '.erb'
modules[modulename].append([ filename, filelines ])
flare_children = []
for k, v in modules.iteritems():
#print "Module %s:" % k
children = []
for item in v:
#print "\t", item[0], item[1]
children.append({ "name": item[0], "size": int(item[1]) })
node = { "name": k, "children": children }
#print node
flare_children.append(node)
data = {
"name": "flare",
"children": flare_children
}
# Output JSON data
print json.dumps(data)
if __name__ == "__main__":
try:
sys.exit(main())
except KeyboardInterrupt:
print "Caught Ctrl-C."
sys.exit(ERROR)
``` |
{
"source": "jorgemorgado/sqlalchemy",
"score": 2
} |
#### File: test/orm/test_cache_key.py
```python
import random
import sqlalchemy as sa
from sqlalchemy import Column
from sqlalchemy import func
from sqlalchemy import inspect
from sqlalchemy import Integer
from sqlalchemy import null
from sqlalchemy import select
from sqlalchemy import Table
from sqlalchemy import testing
from sqlalchemy import text
from sqlalchemy import true
from sqlalchemy import update
from sqlalchemy.orm import aliased
from sqlalchemy.orm import Bundle
from sqlalchemy.orm import defaultload
from sqlalchemy.orm import defer
from sqlalchemy.orm import join as orm_join
from sqlalchemy.orm import joinedload
from sqlalchemy.orm import lazyload
from sqlalchemy.orm import Load
from sqlalchemy.orm import load_only
from sqlalchemy.orm import Query
from sqlalchemy.orm import relationship
from sqlalchemy.orm import selectinload
from sqlalchemy.orm import Session
from sqlalchemy.orm import subqueryload
from sqlalchemy.orm import synonym
from sqlalchemy.orm import with_expression
from sqlalchemy.orm import with_loader_criteria
from sqlalchemy.orm import with_polymorphic
from sqlalchemy.sql.base import CacheableOptions
from sqlalchemy.sql.expression import case
from sqlalchemy.sql.visitors import InternalTraversal
from sqlalchemy.testing import AssertsCompiledSQL
from sqlalchemy.testing import eq_
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import ne_
from sqlalchemy.testing.fixtures import fixture_session
from test.orm import _fixtures
from .inheritance import _poly_fixtures
from .test_query import QueryTest
from ..sql.test_compare import CacheKeyFixture
def stmt_20(*elements):
return tuple(
elem._statement_20() if isinstance(elem, Query) else elem
for elem in elements
)
class CacheKeyTest(CacheKeyFixture, _fixtures.FixtureTest):
run_setup_mappers = "once"
run_inserts = None
run_deletes = None
@classmethod
def setup_mappers(cls):
cls._setup_stock_mapping()
def test_mapper_and_aliased(self):
User, Address, Keyword = self.classes("User", "Address", "Keyword")
self._run_cache_key_fixture(
lambda: (inspect(User), inspect(Address), inspect(aliased(User))),
compare_values=True,
)
def test_attributes(self):
User, Address, Keyword = self.classes("User", "Address", "Keyword")
self._run_cache_key_fixture(
lambda: (
User.id,
Address.id,
aliased(User).id,
aliased(User, name="foo").id,
aliased(User, name="bar").id,
User.name,
User.addresses,
Address.email_address,
aliased(User).addresses,
),
compare_values=True,
)
def test_bundles_in_annotations(self):
User = self.classes.User
self._run_cache_key_fixture(
lambda: (
Bundle("mybundle", User.id).__clause_element__(),
Bundle("myotherbundle", User.id).__clause_element__(),
Bundle("mybundle", User.name).__clause_element__(),
Bundle("mybundle", User.id, User.name).__clause_element__(),
),
compare_values=True,
)
def test_bundles_directly(self):
User = self.classes.User
self._run_cache_key_fixture(
lambda: (
Bundle("mybundle", User.id),
Bundle("mybundle", User.id).__clause_element__(),
Bundle("myotherbundle", User.id),
Bundle("mybundle", User.name),
Bundle("mybundle", User.id, User.name),
),
compare_values=True,
)
def test_query_expr(self):
(User,) = self.classes("User")
self._run_cache_key_fixture(
lambda: (
with_expression(User.name, true()),
with_expression(User.name, null()),
with_expression(User.name, func.foobar()),
with_expression(User.name, User.name == "test"),
Load(User).with_expression(User.name, true()),
Load(User).with_expression(User.name, null()),
Load(User).with_expression(User.name, func.foobar()),
Load(User).with_expression(User.name, User.name == "test"),
),
compare_values=True,
)
def test_loader_criteria(self):
User, Address = self.classes("User", "Address")
from sqlalchemy import Column, Integer, String
class Foo(object):
id = Column(Integer)
name = Column(String)
self._run_cache_key_fixture(
lambda: (
with_loader_criteria(User, User.name != "somename"),
with_loader_criteria(User, User.id != 5),
with_loader_criteria(User, lambda cls: cls.id == 10),
with_loader_criteria(Address, Address.id != 5),
with_loader_criteria(Foo, lambda cls: cls.id == 10),
),
compare_values=True,
)
def test_loader_criteria_bound_param_thing(self):
from sqlalchemy import Column, Integer
class Foo(object):
id = Column(Integer)
def go(param):
return with_loader_criteria(Foo, lambda cls: cls.id == param)
g1 = go(10)
g2 = go(20)
ck1 = g1._generate_cache_key()
ck2 = g2._generate_cache_key()
eq_(ck1.key, ck2.key)
eq_(ck1.bindparams[0].key, ck2.bindparams[0].key)
eq_(ck1.bindparams[0].value, 10)
eq_(ck2.bindparams[0].value, 20)
def test_instrumented_attributes(self):
User, Address, Keyword, Order, Item = self.classes(
"User", "Address", "Keyword", "Order", "Item"
)
self._run_cache_key_fixture(
lambda: (
User.addresses,
User.addresses.of_type(aliased(Address)),
User.orders,
User.orders.and_(Order.id != 5),
User.orders.and_(Order.description != "somename"),
),
compare_values=True,
)
def test_unbound_options(self):
User, Address, Keyword, Order, Item = self.classes(
"User", "Address", "Keyword", "Order", "Item"
)
self._run_cache_key_fixture(
lambda: (
joinedload(User.addresses),
joinedload(User.addresses.of_type(aliased(Address))),
joinedload("addresses"),
joinedload(User.orders),
joinedload(User.orders.and_(Order.id != 5)),
joinedload(User.orders.and_(Order.id == 5)),
joinedload(User.orders.and_(Order.description != "somename")),
joinedload(User.orders).selectinload("items"),
joinedload(User.orders).selectinload(Order.items),
defer(User.id),
defer("id"),
defer("*"),
defer(Address.id),
subqueryload(User.orders),
selectinload(User.orders),
joinedload(User.addresses).defer(Address.id),
joinedload(aliased(User).addresses).defer(Address.id),
joinedload(User.addresses).defer("id"),
joinedload(User.orders).joinedload(Order.items),
joinedload(User.orders).subqueryload(Order.items),
subqueryload(User.orders).subqueryload(Order.items),
subqueryload(User.orders)
.subqueryload(Order.items)
.defer(Item.description),
defaultload(User.orders).defaultload(Order.items),
defaultload(User.orders),
),
compare_values=True,
)
def test_unbound_sub_options(self):
"""test #6869"""
User, Address, Keyword, Order, Item = self.classes(
"User", "Address", "Keyword", "Order", "Item"
)
self._run_cache_key_fixture(
lambda: (
joinedload(User.addresses).options(
joinedload(Address.dingaling)
),
joinedload(User.addresses).options(
joinedload(Address.dingaling).options(load_only("name"))
),
joinedload(User.orders).options(
joinedload(Order.items).options(joinedload(Item.keywords))
),
),
compare_values=True,
)
def test_bound_options(self):
User, Address, Keyword, Order, Item = self.classes(
"User", "Address", "Keyword", "Order", "Item"
)
a1 = aliased(Address)
self._run_cache_key_fixture(
lambda: (
Load(User).joinedload(User.addresses),
Load(User).joinedload(
User.addresses.of_type(aliased(Address))
),
Load(User).joinedload(User.orders),
Load(User).joinedload(User.orders.and_(Order.id != 5)),
Load(User).joinedload(
User.orders.and_(Order.description != "somename")
),
Load(User).defer(User.id),
Load(User).subqueryload(User.addresses),
Load(Address).defer(Address.id),
Load(Address).defer("*"),
Load(a1).defer(a1.id),
Load(User).joinedload(User.addresses).defer(Address.id),
Load(User).joinedload(User.orders).joinedload(Order.items),
Load(User).joinedload(User.orders).subqueryload(Order.items),
Load(User).subqueryload(User.orders).subqueryload(Order.items),
Load(User)
.subqueryload(User.orders)
.subqueryload(Order.items)
.defer(Item.description),
Load(User).defaultload(User.orders).defaultload(Order.items),
Load(User).defaultload(User.orders),
Load(Address).raiseload("*"),
Load(Address).raiseload(Address.user),
),
compare_values=True,
)
def test_selects_w_orm_joins(self):
User, Address, Keyword, Order, Item = self.classes(
"User", "Address", "Keyword", "Order", "Item"
)
a1 = aliased(Address)
self._run_cache_key_fixture(
lambda: (
select(User).join(User.addresses),
select(User).join(User.orders),
select(User).join(User.addresses).join(User.orders),
select(User).join(Address, User.addresses),
select(User).join(a1, User.addresses),
select(User).join(User.addresses.of_type(a1)),
select(User).join(
User.addresses.and_(Address.email_address == "foo")
),
select(User)
.join(Address, User.addresses)
.join_from(User, Order),
select(User)
.join(Address, User.addresses)
.join_from(User, User.orders),
select(User.id, Order.id).select_from(
orm_join(User, Order, User.orders)
),
),
compare_values=True,
)
def test_orm_query_w_orm_joins(self):
User, Address, Keyword, Order, Item = self.classes(
"User", "Address", "Keyword", "Order", "Item"
)
a1 = aliased(Address)
self._run_cache_key_fixture(
lambda: stmt_20(
fixture_session().query(User).join(User.addresses),
fixture_session().query(User).join(User.orders),
fixture_session()
.query(User)
.join(User.addresses)
.join(User.orders),
fixture_session()
.query(User)
.join(User.addresses)
.join(Address.dingaling),
fixture_session().query(User).join(Address, User.addresses),
fixture_session().query(User).join(a1, User.addresses),
fixture_session().query(User).join(User.addresses.of_type(a1)),
),
compare_values=True,
)
def test_orm_query_using_with_entities(self):
"""test issue #6503"""
User, Address, Keyword, Order, Item = self.classes(
"User", "Address", "Keyword", "Order", "Item"
)
self._run_cache_key_fixture(
lambda: stmt_20(
fixture_session()
.query(User)
.join(User.addresses)
.with_entities(Address.id),
#
fixture_session().query(Address.id).join(User.addresses),
#
fixture_session()
.query(User)
.options(selectinload(User.addresses))
.with_entities(User.id),
#
fixture_session()
.query(User)
.options(selectinload(User.addresses)),
#
fixture_session().query(User).with_entities(User.id),
#
# here, propagate_attr->orm is Address, entity is Address.id,
# but the join() + with_entities() will log a
# _MemoizedSelectEntities to differentiate
fixture_session()
.query(Address, Order)
.join(Address.dingaling)
.with_entities(Address.id),
#
# same, propagate_attr->orm is Address, entity is Address.id,
# but the join() + with_entities() will log a
# _MemoizedSelectEntities to differentiate
fixture_session()
.query(Address, User)
.join(Address.dingaling)
.with_entities(Address.id),
),
compare_values=True,
)
def test_synonyms(self, registry):
"""test for issue discovered in #7394"""
@registry.mapped
class User2(object):
__table__ = self.tables.users
name_syn = synonym("name")
@registry.mapped
class Address2(object):
__table__ = self.tables.addresses
name_syn = synonym("email_address")
self._run_cache_key_fixture(
lambda: (
User2.id,
User2.name,
User2.name_syn,
Address2.name_syn,
Address2.email_address,
aliased(User2).name_syn,
aliased(User2, name="foo").name_syn,
aliased(User2, name="bar").name_syn,
),
compare_values=True,
)
def test_more_with_entities_sanity_checks(self):
"""test issue #6503"""
User, Address, Keyword, Order, Item = self.classes(
"User", "Address", "Keyword", "Order", "Item"
)
sess = fixture_session()
q1 = (
sess.query(Address, Order)
.with_entities(Address.id)
._statement_20()
)
q2 = (
sess.query(Address, User).with_entities(Address.id)._statement_20()
)
assert not q1._memoized_select_entities
assert not q2._memoized_select_entities
# no joins or options, so q1 and q2 have the same cache key as Order/
# User are discarded. Note Address is first so propagate_attrs->orm is
# Address.
eq_(q1._generate_cache_key(), q2._generate_cache_key())
q3 = sess.query(Order).with_entities(Address.id)._statement_20()
q4 = sess.query(User).with_entities(Address.id)._statement_20()
# with Order/User as lead entity, this affects propagate_attrs->orm
# so keys are different
ne_(q3._generate_cache_key(), q4._generate_cache_key())
# confirm by deleting propagate attrs and memoized key and
# running again
q3._propagate_attrs = None
q4._propagate_attrs = None
del q3.__dict__["_generate_cache_key"]
del q4.__dict__["_generate_cache_key"]
eq_(q3._generate_cache_key(), q4._generate_cache_key())
# once there's a join() or options() prior to with_entities, now they
# are not discarded from the key; Order and User are in the
# _MemoizedSelectEntities
q5 = (
sess.query(Address, Order)
.join(Address.dingaling)
.with_entities(Address.id)
._statement_20()
)
q6 = (
sess.query(Address, User)
.join(Address.dingaling)
.with_entities(Address.id)
._statement_20()
)
assert q5._memoized_select_entities
assert q6._memoized_select_entities
ne_(q5._generate_cache_key(), q6._generate_cache_key())
def test_orm_query_from_statement(self):
User, Address, Keyword, Order, Item = self.classes(
"User", "Address", "Keyword", "Order", "Item"
)
self._run_cache_key_fixture(
lambda: stmt_20(
fixture_session()
.query(User)
.from_statement(text("select * from user")),
select(User).from_statement(text("select * from user")),
fixture_session()
.query(User)
.options(selectinload(User.addresses))
.from_statement(text("select * from user")),
fixture_session()
.query(User)
.options(subqueryload(User.addresses))
.from_statement(text("select * from user")),
fixture_session()
.query(User)
.from_statement(text("select * from user order by id")),
fixture_session()
.query(User.id)
.from_statement(text("select * from user")),
),
compare_values=True,
)
def test_orm_query_basic(self):
User, Address, Keyword, Order, Item = self.classes(
"User", "Address", "Keyword", "Order", "Item"
)
a1 = aliased(Address)
self._run_cache_key_fixture(
lambda: stmt_20(
fixture_session().query(User),
fixture_session().query(User).prefix_with("foo"),
fixture_session().query(User).filter_by(name="ed"),
fixture_session()
.query(User)
.filter_by(name="ed")
.order_by(User.id),
fixture_session()
.query(User)
.filter_by(name="ed")
.order_by(User.name),
fixture_session()
.query(User)
.filter_by(name="ed")
.group_by(User.id),
fixture_session()
.query(User)
.join(User.addresses)
.filter(User.name == "ed"),
fixture_session().query(User).join(User.orders),
fixture_session()
.query(User)
.join(User.orders)
.filter(Order.description == "adsf"),
fixture_session()
.query(User)
.join(User.addresses)
.join(User.orders),
fixture_session().query(User).join(Address, User.addresses),
fixture_session().query(User).join(a1, User.addresses),
fixture_session().query(User).join(User.addresses.of_type(a1)),
fixture_session().query(Address).join(Address.user),
fixture_session().query(User, Address).filter_by(name="ed"),
fixture_session().query(User, a1).filter_by(name="ed"),
),
compare_values=True,
)
def test_options(self):
class MyOpt(CacheableOptions):
_cache_key_traversal = [
("x", InternalTraversal.dp_plain_obj),
("y", InternalTraversal.dp_plain_obj),
]
x = 5
y = ()
self._run_cache_key_fixture(
lambda: (
MyOpt,
MyOpt + {"x": 10},
MyOpt + {"x": 15, "y": ("foo",)},
MyOpt + {"x": 15, "y": ("foo",)} + {"y": ("foo", "bar")},
),
compare_values=True,
)
class PolyCacheKeyTest(CacheKeyFixture, _poly_fixtures._Polymorphic):
run_setup_mappers = "once"
run_inserts = None
run_deletes = None
def test_wp_objects(self):
Person, Manager, Engineer, Boss = self.classes(
"Person", "Manager", "Engineer", "Boss"
)
self._run_cache_key_fixture(
lambda: (
inspect(with_polymorphic(Person, [Manager, Engineer])),
inspect(with_polymorphic(Person, [Manager])),
inspect(with_polymorphic(Person, [Manager, Engineer, Boss])),
inspect(
with_polymorphic(Person, [Manager, Engineer], flat=True)
),
inspect(
with_polymorphic(
Person,
[Manager, Engineer],
select(Person)
.outerjoin(Manager)
.outerjoin(Engineer)
.subquery(),
)
),
),
compare_values=True,
)
def test_wp_queries(self):
Person, Manager, Engineer, Boss = self.classes(
"Person", "Manager", "Engineer", "Boss"
)
def two():
wp = with_polymorphic(Person, [Manager, Engineer])
return fixture_session().query(wp)
def three():
wp = with_polymorphic(Person, [Manager, Engineer])
return fixture_session().query(wp).filter(wp.name == "asdfo")
def three_a():
wp = with_polymorphic(Person, [Manager, Engineer], flat=True)
return fixture_session().query(wp).filter(wp.name == "asdfo")
def five():
subq = (
select(Person)
.outerjoin(Manager)
.outerjoin(Engineer)
.subquery()
)
wp = with_polymorphic(Person, [Manager, Engineer], subq)
return fixture_session().query(wp).filter(wp.name == "asdfo")
self._run_cache_key_fixture(
lambda: stmt_20(two(), three(), three_a(), five()),
compare_values=True,
)
def test_wp_joins(self):
Company, Person, Manager, Engineer, Boss = self.classes(
"Company", "Person", "Manager", "Engineer", "Boss"
)
def one():
return (
fixture_session()
.query(Company)
.join(Company.employees)
.filter(Person.name == "asdf")
)
def two():
wp = with_polymorphic(Person, [Manager, Engineer])
return (
fixture_session()
.query(Company)
.join(Company.employees.of_type(wp))
.filter(wp.name == "asdf")
)
def three():
wp = with_polymorphic(Person, [Manager, Engineer])
return (
fixture_session()
.query(Company)
.join(Company.employees.of_type(wp))
.filter(wp.Engineer.name == "asdf")
)
self._run_cache_key_fixture(
lambda: stmt_20(one(), two(), three()),
compare_values=True,
)
class RoundTripTest(QueryTest, AssertsCompiledSQL):
__dialect__ = "default"
run_setup_mappers = None
@testing.fixture
def plain_fixture(self):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
self.mapper_registry.map_imperatively(
User,
users,
properties={
"addresses": relationship(
Address, back_populates="user", order_by=addresses.c.id
)
},
)
self.mapper_registry.map_imperatively(
Address,
addresses,
properties={
"user": relationship(User, back_populates="addresses")
},
)
return User, Address
def test_subqueryload(self, plain_fixture):
# subqueryload works pretty poorly w/ caching because it has
# to create a new query. previously, baked query went through a
# bunch of hoops to improve upon this and they were found to be
# broken anyway. so subqueryload currently pulls out the original
# query as well as the requested query and works with them at row
# processing time to create its own query. all of which is fairly
# non-performant compared to the selectinloader that has a fixed
# query.
User, Address = plain_fixture
s = Session(testing.db, future=True)
def query(names):
stmt = (
select(User)
.where(User.name.in_(names))
.options(subqueryload(User.addresses))
.order_by(User.id)
)
return s.execute(stmt)
def go1():
r1 = query(["ed"])
eq_(
r1.scalars().all(),
[User(name="ed", addresses=[Address(), Address(), Address()])],
)
def go2():
r1 = query(["ed", "fred"])
eq_(
r1.scalars().all(),
[
User(
name="ed", addresses=[Address(), Address(), Address()]
),
User(name="fred", addresses=[Address()]),
],
)
for i in range(5):
fn = random.choice([go1, go2])
self.assert_sql_count(testing.db, fn, 2)
@testing.combinations((True,), (False,), argnames="use_core")
@testing.combinations((True,), (False,), argnames="arbitrary_element")
@testing.combinations((True,), (False,), argnames="exercise_caching")
def test_column_targeting_core_execute(
self,
plain_fixture,
connection,
use_core,
arbitrary_element,
exercise_caching,
):
"""test that CursorResultSet will do a column rewrite for any core
execute even if the ORM compiled the statement.
This translates the current stmt.selected_columns to the cached
ResultSetMetaData._keymap. The ORM skips this because loading.py
has also cached the selected_columns that are used. But for
an outside-facing Core execute, this has to remain turned on.
Additionally, we want targeting of SQL expressions to work with both
Core and ORM statement executions. So the ORM still has to do some
translation here for these elements to be supported.
"""
User, Address = plain_fixture
user_table = inspect(User).persist_selectable
def go():
my_thing = case((User.id > 9, 1), else_=2)
# include entities in the statement so that we test that
# the column indexing from
# ORM select()._raw_columns -> Core select()._raw_columns is
# translated appropriately
stmt = (
select(User, Address.email_address, my_thing, User.name)
.join(Address)
.where(User.name == "ed")
)
if arbitrary_element:
target, exp = (my_thing, 2)
elif use_core:
target, exp = (user_table.c.name, "ed")
else:
target, exp = (User.name, "ed")
if use_core:
row = connection.execute(stmt).first()
else:
row = Session(connection).execute(stmt).first()
eq_(row._mapping[target], exp)
if exercise_caching:
for i in range(3):
go()
else:
go()
@testing.combinations(
(lazyload, 2),
(joinedload, 1),
(selectinload, 2),
(subqueryload, 2),
argnames="strat,expected_stmt_cache",
)
def test_cache_key_loader_strategies(
self,
plain_fixture,
strat,
expected_stmt_cache,
connection,
):
User, Address = plain_fixture
cache = {}
connection = connection.execution_options(compiled_cache=cache)
sess = Session(connection)
def go():
stmt = (
select(User).where(User.id == 7).options(strat(User.addresses))
)
u1 = sess.execute(stmt).scalars().first()
eq_(u1.addresses, [Address(id=1)])
go()
lc = len(cache)
stmt_entries = [k for k in cache]
eq_(len(stmt_entries), expected_stmt_cache)
for i in range(3):
go()
eq_(len(cache), lc)
class CompositeTest(fixtures.MappedTest):
__dialect__ = "default"
@classmethod
def define_tables(cls, metadata):
Table(
"edges",
metadata,
Column("id", Integer, primary_key=True),
Column("x1", Integer),
Column("y1", Integer),
Column("x2", Integer),
Column("y2", Integer),
)
@classmethod
def setup_mappers(cls):
edges = cls.tables.edges
class Point(cls.Comparable):
def __init__(self, x, y):
self.x = x
self.y = y
def __composite_values__(self):
return [self.x, self.y]
__hash__ = None
def __eq__(self, other):
return (
isinstance(other, Point)
and other.x == self.x
and other.y == self.y
)
def __ne__(self, other):
return not isinstance(other, Point) or not self.__eq__(other)
class Edge(cls.Comparable):
def __init__(self, *args):
if args:
self.start, self.end = args
cls.mapper_registry.map_imperatively(
Edge,
edges,
properties={
"start": sa.orm.composite(Point, edges.c.x1, edges.c.y1),
"end": sa.orm.composite(Point, edges.c.x2, edges.c.y2),
},
)
def test_bulk_update_cache_key(self):
"""test secondary issue located as part of #7209"""
Edge, Point = (self.classes.Edge, self.classes.Point)
stmt = (
update(Edge)
.filter(Edge.start == Point(14, 5))
.values({Edge.end: Point(16, 10)})
)
stmt2 = (
update(Edge)
.filter(Edge.start == Point(14, 5))
.values({Edge.end: Point(17, 8)})
)
eq_(stmt._generate_cache_key(), stmt2._generate_cache_key())
``` |
{
"source": "JorgeMtzVera/prueba_tecnica_ai_engineer",
"score": 2
} |
#### File: JorgeMtzVera/prueba_tecnica_ai_engineer/ThirdAttempt.py
```python
from datetime import timedelta
import airflow
from airflow import DAG
from airflow.operators.bash_operator import BashOperator
from airflow.operators.python_operator import PythonOperator
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
import pickle
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import json
import os
from sqlalchemy import create_engine
#Default arguments of the DAG
default_args = {
'owner': 'jorgemtzvera',
'depends_on_past': False,
'start_date': airflow.utils.dates.days_ago(1),
'email': ['<EMAIL>'],
'email_on_failure': True,
'email_on_retry': False,
'retries': 1,
'retry_delay': timedelta(minutes=2)
}
#Define the functions which will inclu
def extract_data():
"""
1. We set our working directory.
2. We choose our relevant variables.
3. Separate them into a training and a validation set.
4. Save them into different .csv files.
"""
os.chdir("/opt/airflow")
archivo = os.getcwd() + '/train.csv'
bicicletas_df = pd.read_csv(archivo, index_col='datetime', parse_dates=True);
engine = create_engine('postgresql://postgres:[email protected]:5432/test');
columnas_relevantes = ['temp', 'season', 'weather', 'humidity']
parametros_ppales = bicicletas_df[columnas_relevantes]
bicicletas_usadas = bicicletas_df["count"]
parametros_ppales_train, parametros_ppales_test, bicicletas_usadas_train, bicicletas_usadas_test = train_test_split(parametros_ppales, bicicletas_usadas, random_state=21)
parametros_ppales_train.to_csv("parametros_ppales_train.csv")
parametros_ppales_test.to_csv("parametros_ppales_test.csv")
bicicletas_usadas_train.to_csv("bicicletas_usadas_train.csv")
bicicletas_usadas_test.to_csv("bicicletas_usadas_test.csv")
parametros_ppales_train.to_sql("parametros_ppales_train", con = engine,
if_exists = 'append')
parametros_ppales_test.to_sql("parametros_ppales_test", con = engine,
if_exists = 'append')
bicicletas_usadas_train.to_sql("bicicletas_usadas_train", con = engine,
if_exists = 'append')
bicicletas_usadas_test.to_sql("bicicletas_usadas_test", con = engine,
if_exists = 'append')
print("Los archivos fueron guardados correctamente")
def train_model():
"""
1. We load the training data.
2. We obtain linear regression coefficients of a model.
3. Save the model in a pickle file.
"""
os.chdir("/opt/airflow")
archivo_1 = os.getcwd() + '/parametros_ppales_train.csv'
parametros_ppales_train = pd.read_csv(archivo_1, index_col='datetime', parse_dates=True);
archivo_2 = os.getcwd() + '/bicicletas_usadas_train.csv'
bicicletas_usadas_train = pd.read_csv(archivo_2, index_col='datetime', parse_dates=True);
linreg = LinearRegression()
linreg.fit(parametros_ppales_train, bicicletas_usadas_train)
pickle.dump(linreg, open('modelo.p','wb'))
def validation():
"""
1. We load the model from the pickle file.
2. We load the tests from the .csv files.
4. Save the predictions in .csv and in the POSTGRESQL database.
5. We print metric values.
"""
os.chdir("/opt/airflow")
engine = create_engine('postgresql://postgres:[email protected]:5432/test');
modelo_pred = pickle.load(open('modelo.p','rb'))
archivo_4 = os.getcwd() + '/parametros_ppales_test.csv'
parametros_ppales_test = pd.read_csv(archivo_4, index_col='datetime', parse_dates=True);
archivo_5 = os.getcwd() + '/bicicletas_usadas_test.csv'
bicicletas_usadas_test = pd.read_csv(archivo_5, index_col='datetime', parse_dates=True);
y_pred = modelo_pred.predict(parametros_ppales_test)
df_y_pred = pd.DataFrame(y_pred.tolist(),columns=['Count'])
df_y_pred.to_csv("prediction.csv")
df_y_pred.to_sql("prediction", con = engine, if_exists = 'append')
def plotting():
"""
1. Plot some of the results.
"""
os.chdir("/opt/airflow")
modelo_pred = pickle.load(open('modelo.p','rb'))
archivo_6 = os.getcwd() + '/parametros_ppales_test.csv'
parametros_ppales_test = pd.read_csv(archivo_6, index_col='datetime', parse_dates=True);
archivo_7 = os.getcwd() + '/bicicletas_usadas_test.csv'
bicicletas_usadas_test = pd.read_csv(archivo_7, index_col='datetime', parse_dates=True);
y_pred = modelo_pred.predict(parametros_ppales_test)
df_y_pred = pd.DataFrame(y_pred.tolist(),columns=['Count'])
plt.scatter(parametros_ppales_test['temp'],bicicletas_usadas_test , color='gray',label="Data")
plt.scatter(parametros_ppales_test['temp'], y_pred, color='red',label="Prediction")
plt.xlabel("Temperature (C)")
plt.ylabel("Number of Rides")
plt.legend(loc='upper left')
plt.savefig('PLOT.pdf')
# Create the DAG
with DAG(
dag_id="Third_DAG",
default_args=default_args,
description='Tercer intento de utilizar un DAG',
schedule_interval='@hourly',
catchup = False
) as f:
# Escribimos las tareas
TareaM1 = BashOperator(
task_id ='Start',
bash_command = "pwd"
)
Tarea0 = PythonOperator(
task_id ='Extraction',
python_callable = extract_data
)
Tarea1 = PythonOperator(
task_id ='Training',
python_callable = train_model
)
Tarea2 = PythonOperator(
task_id ='Testing',
python_callable = validation
)
Tarea3 = PythonOperator(
task_id ='Plotting',
python_callable = plotting
)
#Establecemos la jerarquía entre las tareas
TareaM1 >> Tarea0 >> Tarea1 >> Tarea2 >> Tarea3
``` |
{
"source": "Jorgen1040/discord.py-components",
"score": 3
} |
#### File: discord.py-components/discord_components/client.py
```python
from discord import (
Client,
TextChannel,
Message,
Embed,
AllowedMentions,
InvalidArgument,
User,
File,
)
from discord.ext.commands import Bot, Context as DContext
from discord.http import Route
from discord.abc import Messageable
from functools import wraps
from asyncio import TimeoutError
from typing import Union, List, Callable, Awaitable
from json import dumps
from .button import Button
from .select import Select
from .component import Component
from .context import Context
from .message import ComponentMessage
from .interaction import InteractionEventType
__all__ = ("DiscordComponents",)
class DiscordComponents:
"""discord_components client
Parameters
----------
bot: Union[:class:`discord.Client`, :class:`discord.ext.commands.Bot`]
The bot
change_discord_methods: :class:`bool`
Whether to change the methods of the discord module
If this is enabled, you can just use :class:`await <Messageable>.send`, :class:`await <Context>.send` as :class:`await <DiscordButton>.send_button_msg`, :class:`await <Message>.edit`, as :class:`await <DiscordComponents>.edit_component_msg`
Attributes
----------
bot: Union[:class:`discord.Client`, :class:`discord.ext.commands.Bot`]
The bot
"""
def __init__(self, bot: Union[Client, Bot], change_discord_methods: bool = True):
self.bot = bot
if change_discord_methods:
self.change_discord_methods()
def change_discord_methods(self):
"""A function that change the methods of the discord module"""
async def send_component_msg_prop(ctxorchannel, *args, **kwargs) -> Message:
if isinstance(ctxorchannel, DContext):
return await self.send_component_msg(ctxorchannel.channel, *args, **kwargs)
else:
return await self.send_component_msg(ctxorchannel, *args, **kwargs)
async def edit_component_msg_prop(*args, **kwargs):
return await self.edit_component_msg(*args, **kwargs)
async def wait_for_interact_ctx(ctx, *args, **kwargs):
return await self.wait_for_interact(*args, **kwargs)
Messageable.send = send_component_msg_prop
Message.edit = edit_component_msg_prop
DContext.wait_for_interact = wait_for_interact_ctx
async def send_component_msg(
self,
channel: TextChannel,
content: str = "",
*,
tts: bool = False,
embed: Embed = None,
file: File = None,
allowed_mentions: AllowedMentions = None,
components: List[Union[Component, List[Component]]] = None,
**options,
) -> Message:
"""A function that sends a message with components
:returns: :class:`discord.Message`
Parameters
----------
channel: :class:`discord.Messageable`
The channel to send the message
content: str
The message's content
tts: :class:`bool`
Indicates if the message should be sent using text-to-speech.
embed: :class:`discord.Embed`
The rich embed for the content.
file: :class:`discord.File`
The file to upload.
allowed_mentions: :class:`discord.AllowedMentions`
Controls the mentions being processed in this message. If this is
passed, then the object is merged with :attr:`discord.Client.allowed_mentions`.
The merging behaviour only overrides attributes that have been explicitly passed
to the object, otherwise it uses the attributes set in :attr:`discord.Client.allowed_mentions`.
If no object is passed at all then the defaults given by :attr:`discord.Client.allowed_mentions`
are used instead.
components: List[Union[:class:`~discord_components.Component`, List[:class:`~discord_components.Component`]]]
The components to send.
If this is 2-dimensional array, a array is a line
"""
state = self.bot._get_state()
if embed:
embed = embed.to_dict()
if allowed_mentions:
if state.allowed_mentions:
allowed_mentions = state.allowed_mentions.merge(allowed_mentions).to_dict()
else:
allowed_mentions = allowed_mentions.to_dict()
else:
allowed_mentions = state.allowed_mentions and state.allowed_mentions.to_dict()
data = {
"content": content,
**self._get_components_json(components),
**options,
"embed": embed,
"allowed_mentions": allowed_mentions,
"tts": tts,
}
if file:
try:
await self.bot.http.request(
Route("POST", f"/channels/{channel.id}/messages"),
form=[
{
"name": "payload_json",
"value": dumps(data, separators=(",", ":"), ensure_ascii=True),
},
{
"name": "file",
"value": file.fp,
"filename": file.filename,
"content_type": "application/octet-stream",
},
],
files=[file],
)
finally:
file.close()
else:
data = await self.bot.http.request(
Route("POST", f"/channels/{channel.id}/messages"), json=data
)
return ComponentMessage(components=components, state=state, channel=channel, data=data)
async def edit_component_msg(
self,
message: ComponentMessage,
content: str = "",
*,
tts: bool = False,
embed: Embed = None,
file: File = None,
allowed_mentions: AllowedMentions = None,
components: List[Union[Component, List[Component]]] = None,
**options,
):
"""A function that edits a message with components
:returns: :class:`discord_components.ComponentMessage`
Parameters
----------
channel: :class:`discord.Messageable`
The channel to send the message
content: str
The message's content
tts: :class:`bool`
Indicates if the message should be sent using text-to-speech.
embed: :class:`discord.Embed`
The rich embed for the content.
file: :class:`discord.File`
The file to upload.
allowed_mentions: :class:`discord.AllowedMentions`
Controls the mentions being processed in this message. If this is
passed, then the object is merged with :attr:`discord.Client.allowed_mentions`.
The merging behaviour only overrides attributes that have been explicitly passed
to the object, otherwise it uses the attributes set in :attr:`discord.Client.allowed_mentions`.
If no object is passed at all then the defaults given by :attr:`discord.Client.allowed_mentions`
are used instead.
components: List[Union[:class:`~discord_components.Component`, List[:class:`~discord_components.Component`]]]
The components to send.
If this is 2-dimensional array, a array is a line
"""
state = self.bot._get_state()
if embed:
embed = embed.to_dict()
if allowed_mentions:
if state.allowed_mentions:
allowed_mentions = state.allowed_mentions.merge(allowed_mentions).to_dict()
else:
allowed_mentions = allowed_mentions.to_dict()
else:
allowed_mentions = state.allowed_mentions and state.allowed_mentions.to_dict()
data = {
"content": content,
**self._get_components_json(components),
**options,
"embed": embed,
"allowed_mentions": allowed_mentions,
"tts": tts,
}
if file:
try:
await self.bot.http.request(
Route("PATCH", f"/channels/{message.channel.id}/messages/{message.id}"),
form=[
{
"name": "payload_json",
"value": dumps(data, separators=(",", ":"), ensure_ascii=True),
},
{
"name": "file",
"value": file.fp,
"filename": file.filename,
"content_type": "application/octet-stream",
},
],
files=[file],
)
finally:
file.close()
else:
await self.bot.http.request(
Route("PATCH", f"/channels/{message.channel.id}/messages/{message.id}"), json=data
)
def _get_components_json(
self, components: List[Union[Component, List[Component]]] = None
) -> dict:
if not components:
return {}
for i in range(len(components)):
if not isinstance(components[i], list):
components[i] = [components[i]]
lines = components
return {
"components": (
[
{
"type": 1,
"components": [component.to_dict() for component in components],
}
for components in lines
]
if lines
else []
),
}
def _get_component_type(self, type: int):
if type == 2:
return Button
elif type == 3:
return Select
def _structured_raw_data(self, raw_data: dict) -> dict:
data = {
"interaction": raw_data["d"]["id"],
"interaction_token": raw_data["d"]["token"],
"raw": raw_data,
}
raw_data = raw_data["d"]
state = self.bot._get_state()
components = []
for line in raw_data["message"]["components"]:
if line["type"] >= 2:
components.append(self._get_component_type(line["type"]).from_json(line))
for btn in line["components"]:
if btn["type"] >= 2:
components.append(self._get_component_type(line["type"]).from_json(btn))
data["message"] = ComponentMessage(
state=state,
channel=self.bot.get_channel(int(raw_data["channel_id"])),
data=raw_data["message"],
components=components,
)
if "member" in raw_data:
userData = raw_data["member"]["user"]
else:
userData = raw_data["user"]
data["user"] = User(state=state, data=userData)
data["custom_id"] = raw_data["data"]["custom_id"]
return data
async def wait_for_interact(
self,
type: str,
check: Callable[[Context], Awaitable[bool]] = None,
timeout: float = None,
) -> Context:
"""A function that waits until a user clicks a button on the message
:returns: :class:`~discord_components.Context`
Parameters
----------
type: :class:`str`
The interaction event type
check: Optional[Callable[[:class:`Context`], Coroutine[:class:`bool`]]]
The wait_for check function
timeout: Optional[:class:`float`]
The wait_for timeout
"""
while True:
res = await self.bot.wait_for("socket_response", check=check, timeout=timeout)
if res["t"] != "INTERACTION_CREATE":
continue
if InteractionEventType[type] != res["d"]["data"]["component_type"]:
continue
break
data = self._structured_raw_data(res)
rescomponent = None
for component in data["message"].components:
if component.id == data["custom_id"]:
rescomponent = component
ctx = Context(
bot=self.bot,
client=self,
user=data["user"],
component=rescomponent,
raw_data=data["raw_data"],
message=data["message"],
)
return ctx
async def fetch_component_message(self, message: Message) -> ComponentMessage:
"""Converts a message class to a ComponentMessage class
:returns: :class:`~discord_components.ComponentMessage`
Parameters
----------
message: :class:`discord.Message`
The message to convert
"""
res = await self.bot.http.request(
Route("GET", f"/channels/{message.channel.id}/messages/{message.id}")
)
components = []
for i in res["components"]:
if i["type"] >= 2:
components.append(self._get_component_type(i["type"]).from_json(i))
continue
for j in i["components"]:
if j["type"] < 2:
continue
components.append(self._get_component_type(i["type"]).from_json(i))
return ComponentMessage(
channel=message.channel, state=self.bot._get_state(), data=res, components=components
)
``` |
{
"source": "Jorgen1040/jassa-bot",
"score": 2
} |
#### File: jassa-bot/src/bot.py
```python
import asyncio
import hashlib
import io
import json
import logging
import os
import random
import stat
import sys
import time
import traceback
from datetime import datetime
from distutils.util import strtobool
from urllib.parse import quote
import colorlog
import discord
import ffmpeg
import requests
import rule34
from bs4 import BeautifulSoup as bs
from discord.ext import commands
from discord_together import DiscordTogether
token = os.environ["BOT_TOKEN"]
ownerid = int(os.environ["OWNER_ID"])
tarkov_key = os.getenv("TARKOV_API")
if os.getenv("ERROR_DM") is not None:
dm = bool(strtobool(os.getenv("ERROR_DM")))
else:
dm = True
prefix = os.getenv("PREFIX", "+")
logging_level = os.getenv("LOGGING_LEVEL", "INFO")
# Disable logging from discord.py
logging.getLogger("discord").setLevel(logging.CRITICAL)
# Set up colorlog
handler = logging.StreamHandler()
handler.setFormatter(colorlog.ColoredFormatter(
fmt="%(log_color)s%(asctime)s [%(levelname)s]: %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
))
logger = logging.getLogger("bot")
logger.setLevel(logging_level)
logger.addHandler(handler)
rule34 = rule34.Sync()
intents = discord.Intents().default()
intents.members = True
bot = commands.Bot(command_prefix=prefix, owner_id=ownerid, intents=intents)
# Emojis :)
ok = "✅"
no = "❌"
nsfw = "🔞"
# TODO: Add a task that selects a random user to change server icon
# TODO: Add pin command (custom pin channel, that works over the 50 cap)
if tarkov_key is not None:
tarkov_market = True
logger.info("Tarkov API enabled. (from https://tarkov-market.com)")
else:
tarkov_market = False
logger.warning("No tarkov-market API key found, price of items won't be available")
# Check for linux and folders
if sys.platform != "linux":
logger.warning("Bot is not made for non Linux installations. Persistence may not work")
try:
if os.path.isdir("/jassa-bot/output/optimized"):
logger.info("All files are correct :). Persistence is enabled")
else:
os.makedirs("/jassa-bot/output/optimized")
logger.info("Made output folders, persistence is now enabled")
# TODO: Merge servers.json and aliases.json
if not os.path.isfile("/jassa-bot/aliases.json"):
logger.info("Missing aliases.json, making file")
with open("/jassa-bot/aliases.json", "x") as f:
f.write("{}")
os.chmod("/jassa-bot/aliases.json", stat.S_IRWXO)
if not os.path.isfile("/jassa-bot/servers.json"):
logger.info("Missing servers.json, making file")
with open("/jassa-bot/servers.json", "x") as f:
f.write("{}")
os.chmod("/jassa-bot/servers.json", stat.S_IRWXO)
except PermissionError as e:
logger.warning(e)
logger.warning("Permission denied for /jassa-bot directory. Persistence will not work!")
@bot.event
async def on_ready():
await bot.change_presence(activity=discord.Game(f"{prefix}jasså"))
bot.togetherControl = await DiscordTogether(token)
logger.info(f"Logged in as {bot.user}")
@bot.event
async def on_command(ctx):
logger.info(f"{ctx.message.author} called {ctx.command}")
@bot.event
async def on_command_error(ctx, error):
await ctx.message.remove_reaction(ok, bot.user)
error = getattr(error, 'original', error)
if isinstance(error, commands.NSFWChannelRequired):
await ctx.message.add_reaction(nsfw)
# Only send meme response in the right discord server
if ctx.guild.id == 461648348622094347:
await ctx.send("IKKE I GENERAL DA! KUN I <#607395883239342080>")
else:
await ctx.send("This command is only available in channels marked NSFW")
elif isinstance(error, commands.NotOwner):
await ctx.message.add_reaction(no)
await ctx.send("You have to be the bot owner to use this command")
elif isinstance(error, commands.NoPrivateMessage):
await ctx.message.add_reaction(no)
await ctx.send("This command is only available in a guild")
elif isinstance(error, commands.MissingPermissions):
await ctx.message.add_reaction(no)
if len(error.missing_perms) > 1:
await ctx.send(f"You are missing the following permissions for this command: `{'`, `'.join(error.missing_perms)}`")
else:
await ctx.send(f"You need the `{error.missing_perms[0]}` permission to use this command")
elif not isinstance(error, commands.CommandNotFound):
# Only error if not already handled
matches = [no, nsfw]
for reaction in ctx.message.reactions:
if any(x in reaction.emoji for x in matches):
return
await ctx.message.add_reaction(no)
await ctx.send("Unknown error")
logger.error(f'"{error}" in {ctx.guild.name}: {ctx.channel.name}')
if dm is True:
owner = bot.get_user(int(ownerid))
trace = traceback.format_exception(type(error), error, error.__traceback__)
if "NoneType: None" in trace:
trace = str(error)
if len(trace) < 2000:
await owner.send(f"**Guild:** {ctx.guild.name} **Channel:** {ctx.channel.name} **Time:** {datetime.now().strftime('%d/%m/%Y %H:%M:%S')}\n```\n{trace}\n```")
else:
await owner.send(f"Errored in {ctx.guild.name}, {ctx.channel.name} at {datetime.now().strftime('%d/%m/%Y %H:%M:%S')}")
await owner.send(file=discord.File(io.StringIO(trace), filename="traceback.txt"))
traceback.print_exception(type(error), error, error.__traceback__)
@bot.command(aliases=["pog"])
async def ping(ctx):
ping = round(bot.latency * 1000)
await ctx.send(f"{ping}ms")
logger.info(f"{ping}ms")
@bot.command(aliases=["jasså"])
async def jassa(ctx, args):
await ctx.message.add_reaction(ok)
async with ctx.channel.typing():
name = hashlib.md5(args.encode()).hexdigest()
filename = "/jassa-bot/output/" + name + ".mp4"
optimized = "/jassa-bot/output/optimized/" + name + ".gif"
if os.path.isfile(optimized):
logger.info("Gif exists, sending file")
await ctx.send(file=discord.File(optimized))
else:
start_time = time.time()
logger.info("Making new gif")
# Generate mp4 with text
try:
(
ffmpeg
.input('media/template.mp4')
.drawtext(fontfile="ProximaNova-Semibold.otf", text=args, x=160, y=656, fontsize=32.5, fontcolor="white", enable="between(t,0.5,5)")
.filter('fps', fps=19)
.filter('scale', "400", "trunc(ow/a/2)*2", flags="lanczos")
.output(filename)
.run(quiet=True)
)
except ffmpeg.Error as e:
print('stdout:', e.stdout.decode('utf8'))
print('stderr:', e.stderr.decode('utf8'))
raise e
# Convert mp4 to gif
logger.info("Converting mp4 to gif")
try:
(
ffmpeg
.filter([
ffmpeg.input(filename),
ffmpeg.input("media/palette.png")
],
filter_name="paletteuse",
dither="bayer"
)
.filter("fps", fps=19, round="up")
.output(optimized)
.run(quiet=True)
)
except ffmpeg.Error as e:
print("stdout:", e.stdout.decode("utf8"))
print("stderr:", e.stderr.decode("utf8"))
raise e
logger.info(f"Successfully generated gif with {args} in {time.time()-start_time} seconds")
await ctx.send(file=discord.File(optimized))
@jassa.error
async def jassa_error(ctx, error):
if isinstance(error, commands.MissingRequiredArgument):
await ctx.message.add_reaction(no)
await ctx.send(f"Mangler navn (eller noe annet).\nRiktig bruk: `{prefix}jasså <navn>`")
@bot.command(aliases=["activites", "activity"])
@commands.guild_only()
async def together(ctx, name: str):
if ctx.author.voice is None:
await ctx.message.add_reaction(no)
return await ctx.send("You have to be in a voice channel.")
try:
link = await bot.togetherControl.create_link(ctx.author.voice.channel.id, name)
except discord.InvalidArgument as e:
await ctx.message.add_reaction(no)
return await ctx.send(str(e))
await ctx.send(f"Click the blue link! (Not the Play button)\n{link}")
await ctx.message.add_reaction(ok)
@together.error
async def together_error(ctx, error):
if isinstance(error, commands.MissingRequiredArgument):
await ctx.message.add_reaction(no)
app_list = []
for app in DiscordTogether.default_choices:
app_list.append(f"`{app}`")
await ctx.send(
"Please specify what application you want to use.\n"
"Available applications:\n" + ", ".join(app_list)
)
@bot.command()
@commands.guild_only()
@commands.bot_has_guild_permissions(manage_nicknames=True)
async def setnick(ctx, member: discord.Member, *, nickname: str = None):
old_nick = member.display_name
if nickname is not None and len(nickname) > 32:
await ctx.message.add_reaction(no)
return await ctx.send("Nickname can't be longer than 32 characters")
if member == ctx.author and ctx.author.guild_permissions.manage_nicknames is False:
await ctx.message.add_reaction(no)
return await ctx.send("You can't change your own nickname")
if member == ctx.guild.owner:
await ctx.message.add_reaction(no)
return await ctx.send("You can't change the server owner's name. (Discord doesn't allow it)")
try:
await member.edit(nick=nickname)
except discord.Forbidden:
await ctx.message.add_reaction(no)
return await ctx.send("Missing permissions to change that user's nickname")
await ctx.message.add_reaction(ok)
# Send to log
with open("/jassa-bot/servers.json") as f:
servers = json.load(f)
try:
log_id = servers[str(ctx.guild.id)]["nickname_log_channel"]
log_channel = bot.get_channel(log_id)
except KeyError:
# If no log channel is set, just return
return
# Generate embed
embed = discord.Embed(
description=f"**{ctx.author.mention} changed nickname of {member.mention}**",
timestamp=datetime.utcnow(),
color=discord.Colour.random(seed=member.id)
)
embed.set_author(name=str(ctx.author), icon_url=ctx.author.avatar_url)
embed.add_field(name="Before", value=old_nick, inline=False)
embed.add_field(name="After", value=nickname, inline=False)
await log_channel.send(embed=embed)
@setnick.error
async def setnick_error(ctx, error):
if isinstance(error, commands.MemberNotFound):
await ctx.message.add_reaction(no)
await ctx.send("Unable to find that member")
if isinstance(error, commands.MissingRequiredArgument):
await ctx.message.add_reaction(no)
await ctx.send(f"Missing required argument.\nUsage: `{prefix}setnick <Username/Mention> <Optional: nickname>`")
@bot.command()
@commands.guild_only()
@commands.has_guild_permissions(manage_guild=True)
async def setnicklog(ctx, channel: discord.TextChannel):
if channel.guild != ctx.guild:
await ctx.send("Cannot set log channel outside of server")
return await ctx.message.add_reaction(no)
with open("/jassa-bot/servers.json", "r") as f:
servers = json.load(f)
try:
servers[str(ctx.guild.id)]
except KeyError:
print("Guild ID not already in servers.json, adding it")
servers[str(ctx.guild.id)] = {}
servers[str(ctx.guild.id)]["nickname_log_channel"] = channel.id
with open("/jassa-bot/servers.json", "w") as f:
json.dump(servers, f, indent=4)
await channel.send(f"Successfully set this as the log channel for the `{prefix}setnick` command")
await ctx.message.add_reaction(ok)
@setnicklog.error
async def setnicklog_error(ctx, error):
if isinstance(error, commands.ChannelNotFound):
await ctx.send("Unable to find channel. Please be more specific or use an ID or mention it with #")
await ctx.message.add_reaction(no)
@bot.command(aliases=["shut", "shutyobitchassup", "shutyobitchass", "sybap"])
@commands.guild_only()
async def shutup(ctx):
await ctx.message.add_reaction(ok)
async with ctx.channel.typing():
try:
mention = ctx.message.mentions[0]
except IndexError:
# Get author from previous message if no one is mentioned
history = await ctx.channel.history(limit=2).flatten()
mention = history[1].author
if ctx.message.role_mentions:
await ctx.message.remove_reaction(ok, bot.user)
await ctx.message.add_reaction(no)
return await ctx.send("You mentioned a role. Please mention a user.")
if mention == ctx.message.author:
await ctx.message.remove_reaction(ok, bot.user)
await ctx.message.add_reaction(no)
return await ctx.send("Unable to find a user to mute (mention them)")
if mention.bot:
await ctx.message.remove_reaction(ok, bot.user)
await ctx.message.add_reaction(no)
return await ctx.send("Won't mute a bot ;)")
muted_role = discord.utils.get(ctx.guild.roles, name="Muted")
if muted_role is None:
await ctx.message.remove_reaction(ok, bot.user)
await ctx.message.add_reaction(no)
return await ctx.send("The role `Muted` does not exist. Has it been renamed?")
# Make sure users don't accidentally get muted in VCs
# TODO: Optimize this
# * Disabled due to being WAY TOO SLOW
# channels = ctx.guild.voice_channels
# for channel in channels:
# # ? If user calling command is in a vc with the other, also do vc mute
# await channel.set_permissions(muted_role, speak=True)
await ctx.message.author.add_roles(muted_role)
await mention.add_roles(muted_role)
await ctx.send("https://tenor.com/view/meryl-streep-shut-up-yell-gif-15386483")
await asyncio.sleep(60)
await ctx.message.author.remove_roles(muted_role)
await mention.remove_roles(muted_role)
@bot.command(aliases=["q"])
async def quest(ctx, *, args: str):
# TODO: Detect quests, and give quest objectives
await ctx.message.add_reaction(ok)
query = quote(args)
search_url = "https://escapefromtarkov.gamepedia.com/Special:Search?scope=internal&search=" + query
r = requests.get(search_url)
results = bs(r.text, "html.parser")
# Find the first search result
if results.find("a", class_="unified-search__result__title"):
result = results.find("a", class_="unified-search__result__title").get("href")
r = requests.get(result)
page = bs(r.text, "html.parser")
else:
page = results
# Handle disambiguation pages
if page.find("table", class_="plainlinks ambox ambox-green"):
result = "https://escapefromtarkov.gamepedia.com" + page.find("div", class_="mw-parser-output").find("a").get("href")
r = requests.get(result)
page = bs(r.text, "html.parser")
title = page.find("h1", id="firstHeading").get_text().strip()
if "Search results for" in title:
await ctx.send(f"Unable to find {discord.utils.escape_markdown(args)}, try being more specific.")
return
embed = discord.Embed(title=title, url=r.url)
# Get prices from tarkov-market.com if API key is set
if tarkov_market:
api = requests.get('https://tarkov-market.com/api/v1/item?q=' + title, headers={'x-api-key': tarkov_key})
try:
tarkov_item = api.json()[0]
except IndexError:
# If no results are found, state so
embed.add_field(name="Price", value=f"No results found for {title}")
else:
name = tarkov_item["name"]
price = format(tarkov_item["price"], ",")
avg24h = format(tarkov_item["avg24hPrice"], ",")
per_slot = format(int(tarkov_item["price"] / tarkov_item["slots"]), ",")
market_link = tarkov_item["link"]
trader_name = tarkov_item["traderName"]
trader_price = format(tarkov_item["traderPrice"], ",")
trader_currency = tarkov_item["traderPriceCur"]
# Check if wiki and API name is same, if not display API name to avoid wrong price
if name == title:
name_string = "Price"
else:
name_string = f"Price ({name})"
embed.add_field(name=name_string, value=f"**Current:** {price} ₽\n**Per slot:** {per_slot} ₽\n**24h average:** {avg24h} ₽\n**{trader_name}:** {trader_price} {trader_currency}\n[More info]({market_link})")
if page.find(id="Quests"):
quests = page.find(id="Quests").find_parent("h2").find_next_sibling("ul").find_all("li")
quests_string = ""
for quest in quests:
text = quest.get_text()
for href in quest.find_all("a"):
quest_name = href.get_text()
quest_url = "https://escapefromtarkov.gamepedia.com" + href.get("href")
text = text.replace(quest_name, f"[{quest_name}]({quest_url})")
if "in raid" in text:
text = text.replace("in raid", "**in raid**")
quests_string += text + "\n"
if len(quests_string) > 1024:
embed.add_field(name="Quests", value=f"Too many quests to show, see more [here]({r.url + '#Quests'})", inline=False)
else:
embed.add_field(name="Quests", value=quests_string, inline=False)
if page.find(id="Hideout"):
uses_element = page.find(id="Hideout").find_parent("h2").find_next_sibling()
# If uses_element isn't "ul", there's probably no hideout uses
if uses_element.name == "ul":
uses_string = ""
if uses_element.name == "p":
uses_string = uses_element.text
else:
uses = uses_element.find_all("li")
for use in uses:
uses_string += use.get_text() + "\n"
if len(uses_string) > 1024:
embed.add_field(name="Hideout", value=f"Too many hideout uses to show, see more [here]({r.url + '#Hideout'})", inline=False)
else:
embed.add_field(name="Hideout", value=uses_string, inline=False)
# TODO: Fix formatting for Trading and Crafting embed
# Fix weird formatting for multiple items (both with x amount and + another item)
# Formatting for additional notes (ex. "After completing his task ...")
if page.find(id="Trading"):
# If the Trading tab is empty, skip it
try:
trades = page.find(id="Trading").find_parent("h2").find_next_sibling("table", class_="wikitable").find_all("tr")
except AttributeError:
pass
else:
trades_string = ""
previous_level = ""
for trade in trades:
th = trade.find_all("th")
trader_info = th[2].get_text().strip().split()
trader = trader_info[0]
trader_level = trader_info[1]
barter_in = th[0].get_text().strip()
barter_out = th[4].get_text().strip()
if trader_level != previous_level:
trades_string += f"**{trader} {trader_level}:**\n"
previous_level = trader_level
trades_string += f"{barter_in} -> {barter_out}\n"
if len(trades_string) > 1024:
embed.add_field(name="Trading", value=f"Too many trades to show, see more [here]({r.url + '#Trading'})", inline=False)
else:
embed.add_field(name="Trading", value=trades_string, inline=False)
if page.find(id="Crafting"):
# If the Crafting tab is empty, skip it
try:
crafts = page.find(id="Crafting").find_parent("h2").find_next_sibling("table", class_="wikitable").find_all("tr")
except AttributeError:
pass
else:
crafts_string = ""
previous_station = ""
for craft in crafts:
th = craft.find_all("th")
station = th[2].find("big").get_text()
time = th[2].get_text().strip().replace(station, "")
craft_in = th[0].get_text().strip()
craft_out = th[4].get_text().strip()
if station != previous_station:
crafts_string += f"**{station}:**\n"
previous_station = station
crafts_string += f"{time}: {craft_in} -> {craft_out}\n"
if len(crafts_string) > 1024:
embed.add_field(name="Crafting", value=f"Too many crafts to show, see more [here]({r.url + '#Crafting'})", inline=False)
else:
embed.add_field(name="Crafting", value=crafts_string, inline=False)
# Check for icon
icon = None
if page.find("td", class_="va-infobox-icon"):
icon = page.find("td", class_="va-infobox-icon").find("a", class_="image").get("href")
else:
# TODO: Make it so that it retries until it finds an item
if page.find("td", class_="va-infobox-mainimage-image"):
icon = page.find("td", class_="va-infobox-mainimage-image").find("a", class_="image").get("href")
embed.set_footer(text="This might not be an item")
if icon is not None:
embed.set_thumbnail(url=icon)
await ctx.send(embed=embed)
@quest.error
async def quest_error(ctx, error):
if isinstance(error, commands.MissingRequiredArgument):
await ctx.message.add_reaction(no)
await ctx.send(f"Missing search query. Usage: `{prefix}quest <query>`")
@bot.command(aliases=["map"])
async def maps(ctx, *, args: str):
async with ctx.channel.typing():
query = quote(args)
# TODO: Make this do a search for more reliable results
url = "https://escapefromtarkov.gamepedia.com/wiki/" + query
r = requests.get(url)
results = bs(r.text, "html.parser")
if results.find(id="Maps"):
# Get all maps
maps = results.find(id="Maps").find_parent("h2").find_next_siblings("p")
await ctx.send(f"Maps found for **{args}** ({url}):")
for map_img in maps:
if "Interactive Map" in map_img.text:
# Skip the image if its from an Interactive Map
continue
if map_img.find("a"):
map_url = map_img.find("a").get("href")
await ctx.send(map_url)
await ctx.message.add_reaction(ok)
else:
await ctx.message.add_reaction(no)
await ctx.send(f"Unable to find any maps for **{args}**")
@bot.command()
@commands.has_guild_permissions(administrator=True)
async def vcmute(ctx):
# This command has only been tested with the Vexera Muted role
await ctx.message.add_reaction(ok)
muted_role = discord.utils.get(ctx.guild.roles, name="Muted")
if muted_role is None:
return await ctx.send("The role `Muted` does not exist. Has it been renamed?")
channels = ctx.guild.voice_channels
perm = not channels[0].overwrites_for(muted_role).speak
for channel in channels:
await channel.set_permissions(muted_role, speak=perm)
await ctx.send(f"Set Speak permission for the Muted role to {perm} in {len(channels)} voice channels")
@bot.command(aliases=["mv"])
@commands.has_guild_permissions(move_members=True)
async def moveall(ctx, *, channel: str):
with open("/jassa-bot/aliases.json", "r") as f:
aliases = json.load(f)
try:
channel = aliases[str(ctx.guild.id)][channel]
channel = bot.get_channel(int(channel))
except KeyError:
channel = discord.utils.find(lambda x: x.name == channel, ctx.guild.voice_channels)
if channel is None:
await ctx.message.add_reaction(no)
return await ctx.send("Unable to find channel")
for member in ctx.message.author.voice.channel.members:
await member.move_to(channel)
logger.info(f"Moved {member} to {channel} in {ctx.guild}")
await ctx.message.add_reaction(ok)
@moveall.error
async def moveall_error(ctx, error):
if isinstance(error, commands.MissingRequiredArgument):
await ctx.message.add_reaction(no)
await ctx.send(f"Missing voice channel ID/name to move to. Usage: `{prefix}moveall <vc id/name>`")
if isinstance(error, commands.ChannelNotFound):
await ctx.message.add_reaction(no)
await ctx.send("Unable to find channel")
@bot.command(aliases=["mvalias", "movealias"])
@commands.has_guild_permissions(administrator=True)
async def alias(ctx, alias: str, channel: discord.VoiceChannel = None):
await ctx.message.add_reaction(ok)
with open("/jassa-bot/aliases.json", "r") as f:
aliases = json.load(f)
try:
aliases[str(ctx.guild.id)]
except KeyError:
print("Guild ID not already in list, adding it")
aliases[str(ctx.guild.id)] = {}
if channel is None:
await ctx.send(f"Removed alias for channel ID {aliases[str(ctx.guild.id)][alias]}")
aliases[str(ctx.guild.id)].pop(alias)
else:
alias_list = {}
alias_list[alias] = str(channel.id)
aliases[str(ctx.guild.id)].update(alias_list)
await ctx.send("Added alias for channel")
with open("/jassa-bot/aliases.json", "w") as f:
json.dump(aliases, f, indent=4)
@alias.error
async def alias_error(ctx, error):
if isinstance(error, commands.MissingRequiredArgument):
await ctx.message.add_reaction(no)
await ctx.send(f"Missing alias and/or channel ID. Usage: `{prefix}alias <alias> <channel ID/name in quotes>`")
if isinstance(error, commands.ChannelNotFound):
await ctx.message.add_reaction(no)
await ctx.send("Unable to find channel")
@bot.command(aliases=["lb", "rolelb", "leaderboard"])
async def roleleaderboard(ctx, arg: str = None):
try:
await ctx.message.add_reaction(ok)
if arg is None:
limit = 11
elif arg == "full":
limit = -999999
elif arg == "0":
await ctx.message.add_reaction(no)
await ctx.message.remove_reaction(ok, bot.user)
return await ctx.send("Number must be more than `0`")
else:
limit = int(arg) + 1
members_list = ctx.guild.members
roles = {}
for member in members_list:
roles[member.display_name] = len(member.roles)
sorted_list = {k: v for k, v in sorted(roles.items(), key=lambda item: item[1], reverse=True)}
embed = discord.Embed(colour=discord.Colour.gold())
value_string = ""
role_place = 1
for item in sorted_list.items():
if role_place == limit:
break
username = discord.utils.escape_markdown(item[0], ignore_links=False)
current = f"{role_place}. {username}: {item[1]} roles\n"
if len(value_string) + len(current) >= 1024:
await ctx.send("Too many users, displaying as many as possible")
break
else:
value_string += current
role_place += 1
embed.add_field(name="Role leaderboard", value=value_string)
await ctx.send(embed=embed)
except ValueError:
await ctx.message.add_reaction(no)
await ctx.message.remove_reaction(ok, bot.user)
await ctx.send("Command only accepts either numbers or `full` as arguments")
@bot.command(aliases=["rule34"])
@commands.is_nsfw()
async def r34(ctx, *, tags):
# Check for illegal tags
if ("cub" or "loli" or "shota" or "child" or "underage" or "shotacon") in tags:
await ctx.message.add_reaction(nsfw)
await ctx.send("NEI TOS")
else:
logger.info(f"Rule34: Searching for {tags}")
await ctx.message.add_reaction(ok)
# TODO: Swap to use getImages instead
xml_url = rule34.URLGen(tags + "+-cub -loli -underage -shotacon -shota")
logger.info(f"Got API url for {tags}: {xml_url}")
xml = bs(requests.get(xml_url).text, "lxml")
urls = []
for post in xml.findAll("post"):
file_url = post.attrs["file_url"]
urls += [file_url]
count = len(urls)
count_text = str(count)
if count >= 100:
count_text = "100+"
if count >= 1:
random_url = random.choice(urls)
await ctx.send(f"Found {count_text} results, here is one of them")
await ctx.send(random_url)
logger.info(f"Rule34: Sent {random_url} with tag(s): {tags}")
else:
logger.info(f"Rule34: No posts were found with the tag(s): {tags}")
await ctx.send(f"No posts were found with the tag(s): {tags}")
@r34.error
async def r34_error(ctx, error):
if isinstance(error, commands.MissingRequiredArgument):
await ctx.message.add_reaction(no)
await ctx.send(f"Missing tags to search for.\nUsage: `{prefix}r34/rule34 <tags>` or for multiple tags `{prefix}r34/rule34 <tag1> <tag2> ...`")
@bot.command()
@commands.is_owner()
async def close(ctx):
ctx.add_reaction("👋")
await bot.close()
bot.run(token)
``` |
{
"source": "jorgen99/smarthus",
"score": 4
} |
#### File: smarthus/events/greenhouse.py
```python
import sqlite3
from sqlite3 import Error
def add_temperature(temperature, db="greenhouse.db"):
try:
conn = sqlite3.connect(db)
cursor = conn.cursor()
create_table_if_not_exist(cursor)
sql = """ INSERT INTO greenhouse(time, temperature) values(?, ?) """
cursor.execute(sql, temperature)
conn.commit()
except sqlite3.Error as error:
print("Error while talking to sqlite...", error)
finally:
if conn:
conn.close()
def create_table_if_not_exist(cursor):
cursor.execute(
"""CREATE TABLE IF NOT EXISTS greenhouse (
time text NOT NULL,
temperature real
);"""
)
def apa(bepa):
return "Hej {}".format(bepa)
```
#### File: smarthus/events/sensor.py
```python
class Sensor:
def __init__(self, house, unit):
self.house = house
self.unit = unit
def event_is_from_me(self, event):
return self.house == event.house and self.unit == event.unit
``` |
{
"source": "jorgenavarroortiz/collision_avoidance_resource_allocation_lorawan_device",
"score": 3
} |
#### File: collision_avoidance_resource_allocation_lorawan_device/lib/LoRaAirTimeCalc.py
```python
from network import WLAN, LoRa # For Pycom MicroPython
#import LoRa # For Python3
import struct
from math import ceil
# From atom console (pycom)
#from LoRaAirTimeCalc import *
#airtimetheoretical(20, 10, LoRa.BW_125KHZ, LoRa.CODING_4_5)
# Using Pycom constants for bw and cr, calculates the DR (datarate) in bps
def dataratetheoretical(sf, bw, cr):
if bw == LoRa.BW_125KHZ:
bwv = 125
if bw == LoRa.BW_250KHZ:
bwv = 250
if bw == LoRa.BW_500KHZ:
bwv = 500
crv = 4/(4 + cr)
return 1000*sf*bwv*(crv)/2**sf
# Air time (in seconds) theoretical calculation for LoRa-RAW, where there is a default preamble of 8 bytes plus 5 bytes of CRC, etc
def airtimetheoretical(payloadsize, sf, bw, cr):
if bw == LoRa.BW_125KHZ:
bwv = 125
if bw == LoRa.BW_250KHZ:
bwv = 250
if bw == LoRa.BW_500KHZ:
bwv = 500
if sf in [11,12]:
lowDRopt = 1
else:
lowDRopt = 0
tsym = (2**sf)/(bwv*1000)
tpreamble = (8 + 4.25)*tsym # Preamble with 8 bytes
numbersymbolspayload = 8 + max(ceil((8*payloadsize - 4*sf + 28 + 16)/(4*(sf - 2*lowDRopt)))*(4 + cr),0)
tpayload = numbersymbolspayload*tsym
tpacket = tpreamble + tpayload
return tpacket, tpreamble, tpayload, tsym, numbersymbolspayload
```
#### File: jorgenavarroortiz/collision_avoidance_resource_allocation_lorawan_device/main.py
```python
import machine
from network import WLAN
import utime
from network import LoRa
import socket
import time
import ubinascii
import crypto
from LoRaAirTimeCalc import *
import sys
from pycoproc import Pycoproc
from microWebCli import MicroWebCli
## PARAMETERS
# Debug messages
debug = 0
# Fixed channel and spreading factor from CARA initial resource block, for testing...
bFixedChannelAndDR=True
# Time to reconnect to Wi-Fi
wifiRetryTime = 20.0
# OTAA or ABP
# VERY IMPORTANT!!! If ABP is used, make sure that RX2 data rate is set to 5
# and RX2 frequency is set to 869.525 MHz (chirpstack -> device profile ->
# -> join (OTAA/ABP))). Set Class-C confirmed downlink timeout to 5 seconds in
# both cases (chirpstack -> device profile -> class-C)
# IMPORTANT!!!: for ABP, first activate the device (before starting this program)
bOTAA = True
# For OTAA
AppEUI = '<KEY>' # Not used
AppKey = '00000000000000000000000000000001'
# For ABP
DevAddr = '00000001'
NwkSKey = '00000000000000000000000000000001'
AppSKey = '00000000000000000000000000000001'
# Retransmission time for JOINREQ
joinReqRtxTime = 5.0
# First transmission starting on a CARA period (only for debugging)
bFirstTransmissionStartingOnACARAPeriod = False
# Global variables
selectedFreq = 0
selectedDR = 0
## FUNCTIONS
# General functions
def Random():
r = crypto.getrandbits(32)
return ((r[0]<<24)+(r[1]<<16)+(r[2]<<8)+r[3])/4294967295.0
def RandomRange(rfrom, rto):
return Random()*(rto-rfrom)+rfrom
def zfill(s, width):
return '{:0>{w}}'.format(s, w=width)
# Functions related to board
def showBoard(lora):
print("[INFO] Detected board:", sys.platform)
# Expansion board
#pyexp = Pycoproc()
#pid = pyexp.read_product_id()
#if (pid == 61458):
# print("Detected expansion board: PySense")
#elif (pid == 61459):
# print("Detected expansion board: PyTrack")
#else:
# print("Expansion board identifier: ", pid)
## WI-FI MAC address
#print("Device unique ID:", ubinascii.hexlify(machine.unique_id()).upper().decode('utf-8'))
print("[INFO] Wi-Fi MAC: ", ubinascii.hexlify(WLAN().mac()[0]).upper().decode('utf-8'))
## LORAWAN MAC address
print("[INFO] LORAWAN DevEUI:", ubinascii.hexlify(lora.mac()).upper().decode('utf-8'))
# Functions related to Wi-Fi
def connectWiFi():
# Connect to Wi-Fi for synchronizing using NTP
#print("Trying to connect to Wi-Fi network...")
wlan = WLAN(mode=WLAN.STA)
#wlan.connect('ARTEMIS', auth=(WLAN.WPA2, 'wimunet!'))
wifi_count=1
while not wlan.isconnected():
# machine.idle()
print('[INFO] Connecting to Wi-Fi, attempt {}...'.format(wifi_count))
wlan.connect('ARTEMIS', auth=(WLAN.WPA2, 'wimunet!')) #, timeout=5000)
time.sleep(wifiRetryTime)
wifi_count = wifi_count + 1
print('[INFO] WiFi connected')
print(wlan.ifconfig())
# Functions related to synchronization
def synchronizeTime():
# Synchronization
rtc = machine.RTC()
rtc.ntp_sync('pool.ntp.org', update_period=3600)
while not rtc.synced():
machine.idle()
print("[INFO] RTC NTP sync complete")
print(rtc.now())
utime.timezone(7200)
print("[INFO] Local time:", end=" ")
print(utime.localtime())
return rtc
# Functions related to LoRaWAN
def initializeLoRaWAN(randomTimeForJoining):
timeToWaitForJoining = RandomRange(0,randomTimeForJoining)
print("[INFO] Time waiting before joining = {:.1f}".format(timeToWaitForJoining))
time.sleep(timeToWaitForJoining)
if (bOTAA):
# Create an OTAA authentication parameters
app_eui = ubinascii.unhexlify(AppEUI)
app_key = ubinascii.unhexlify(AppKey)
# Join a network using OTAA (Over the Air Activation)
lora.join(activation=LoRa.OTAA, auth=(app_eui, app_key), timeout=0)
else:
# Create an ABP authentication params
dev_addr = struct.unpack(">l", ubinascii.unhexlify(DevAddr))[0]
nwk_swkey = ubinascii.unhexlify(NwkSKey)
app_swkey = ubinascii.unhexlify(AppSKey)
# Join a network using ABP (Activation By Personalization)
lora.join(activation=LoRa.ABP, auth=(dev_addr, nwk_swkey, app_swkey), timeout=0)
# Wait until the module has joined the network
print('[INFO] Not joined yet...')
while not lora.has_joined():
#blink(red, 0.5, 1)
time.sleep(2.5)
print('[INFO] Not joined yet...')
print('[INFO] --- Joined Sucessfully --- ')
# Create a LoRa socket
s = socket.socket(socket.AF_LORA, socket.SOCK_RAW)
# Set the LoRaWAN data rate (DR0...DR5 - the lower DR, the higher SF)
s.setsockopt(socket.SOL_LORA, socket.SO_DR, 5)
# Set CONFIRMED to false
s.setsockopt(socket.SOL_LORA, socket.SO_CONFIRMED, False)
return s
def frequencyForChannel(value):
return (867100000 + value*200000)
def convertDRtoSF(value):
return (12 - value)
def convertSFtoDR(value):
return (12 - value)
def setTransmissionParameters(s, selectedFreq, selectedDR):
if (debug > 0):
print("[DEBUG] Changing frequency to {:d}".format(selectedFreq))
# Add all channels with the selected frequency
for channel in range(0, 15):
lora.add_channel(channel, frequency=selectedFreq, dr_min=0, dr_max=5)
# Set spreading factor
s.setsockopt(socket.SOL_LORA, socket.SO_DR, selectedDR)
if (debug > 0):
print("[DEBUG] Changing DR to {:d} (SF={:d})".format(selectedDR, convertDRtoSF(selectedDR)))
def createResourceBlocksLists(sfMask):
channelsList = []
sfList = []
sfMaskStr = bin(sfMask)[2:]
sfMaskStr = zfill(sfMaskStr, 6)
#print("sfMask = {}".format(bin(sfMask)))
if (debug > 0):
print("[DEBUG] SF mask = {}".format(sfMaskStr))
i = 0
for sfIndex in reversed(range(0, len(sfMaskStr))):
for channel in range(0, 8):
if (sfMaskStr[sfIndex] == "1"):
sfList.append(12-sfIndex)
channelsList.append(frequencyForChannel(channel))
if (debug > 0):
print("[DEBUG] Resource block {:d} with channel {:d} and SF {:d}".format(i, channel, 12-sfIndex))
i = i + 1
return [channelsList, sfList]
def setDataRate(s, selectedDR):
# Set spreading factor
s.setsockopt(socket.SOL_LORA, socket.SO_DR, selectedDR)
if (debug > 0):
print("[DEBUG] Changing DR to {:d} (SF={:d})".format(selectedDR, convertDRtoSF(selectedDR)))
def generateMessage(messageCounter):
if (messageCounter < 10):
message = "Testing data....." + str(messageCounter)
elif (messageCounter < 100):
message = "Testing data...." + str(messageCounter)
elif (messageCounter < 1000):
message = "Testing data..." + str(messageCounter)
elif (messageCounter < 10000):
message = "Testing data.." + str(messageCounter)
else:
message = "Testing data." + str(messageCounter)
return message
# Functions related to resource blocks
def getCARAParameters():
contentBytes = MicroWebCli.GETRequest('http://192.168.1.205/CARA/joinTime')
randomTimeForJoining = float(contentBytes)
print("[INFO] Random time for joining = {:f}".format(randomTimeForJoining))
# Time between transmissions = fixedTime + rand(randomTime)
contentBytes = MicroWebCli.GETRequest('http://192.168.1.205/CARA/fixedTime')
fixedTime = float(contentBytes)
print("[INFO] Fixed time between LoRaWAN frames = {:f}".format(fixedTime))
contentBytes = MicroWebCli.GETRequest('http://192.168.1.205/CARA/randomTime')
randomTime = float(contentBytes)
print("[INFO] Ramdom time between LoRaWAN frames = {:f}".format(randomTime))
# Duration of each period with same transmission parameters (freq and SF)
# We assume that 48*durationOfPeriod is a divisor of 24h*3600s
contentBytes = MicroWebCli.GETRequest('http://192.168.1.205/CARA/durationOfPeriod')
durationOfPeriod = float(contentBytes)
print("[INFO] Duration of CARA period = {:f}".format(durationOfPeriod))
# Avoid (or not) border effect (transmissions that spread over two periods)
contentBytes = MicroWebCli.GETRequest('http://192.168.1.205/CARA/avoidBorderEffect')
avoidBorderEffect = int(contentBytes)
print("[INFO] Avoid border effect = {:d}".format(avoidBorderEffect))
contentBytes = MicroWebCli.GETRequest('http://192.168.1.205/CARA/borderEffectGuardTime')
borderEffectGuardTime = float(contentBytes)
print("[INFO] Border effect guard time = {:f}".format(borderEffectGuardTime))
contentBytes = MicroWebCli.GETRequest('http://192.168.1.205/CARA/count_v2.php')
countNodes = int(contentBytes)
print("[INFO] Nodes with version 2 counted = {:d}".format(countNodes))
return [randomTimeForJoining, fixedTime, randomTime, durationOfPeriod, avoidBorderEffect, borderEffectGuardTime]
def receiveJoinAccept():
# Waiting for JoinAccept message
JoinAcceptReceived = False
# JoinRequest has to be transmitted
timeToRetransmitJoinReq = True
while not JoinAcceptReceived:
if (timeToRetransmitJoinReq):
s.setblocking(True)
s.send("#JOINREQ#")
print("[INFO] #JOINREQ# sent!")
s.setblocking(False)
timeToRetransmitJoinReq = False
timeJoinReq1 = utime.ticks_ms()
#if (bOTAA):
if (True):
data, port = s.recvfrom(64)
else:
data = s.recv(64)
lg = len (data)
if lg > 0:
if (debug > 0):
print("[DEBUG] Downlink Port={:d} Size={:d} Payload={}".format(port, lg, ubinascii.hexlify(data).upper()) )
strDecodedData = ubinascii.a2b_base64(ubinascii.b2a_base64(data)).decode('utf-8')
if (debug > 0):
print("[DEBUG] Downlink Port={:d} Size={:d} Payload={}".format(port, lg, strDecodedData) )
if strDecodedData.find("#JOINACC#") == 0:
print ("[INFO] #JOINACC# received")
JoinAcceptReceived = True
strDecodedDataSplit = strDecodedData.split()
caraEnabled = int(strDecodedDataSplit[1])
if caraEnabled == 1:
print("[INFO] CARA enabled")
initialResourceBlock = int(strDecodedDataSplit[2])
sfMask = int(strDecodedDataSplit[3])
print("[INFO] Initial resource block = {:d}".format(initialResourceBlock))
print("[INFO] SF mask = {:d}".format(sfMask))
#print("SF mask={:d}".format(sfMask))
channelsList, sfList = createResourceBlocksLists(sfMask)
selectedSF = sfList[initialResourceBlock]
selectedDR = convertSFtoDR(selectedSF)
# Remove all the channels (first three are default channels and cannot be removed)
for channel in range(0, 15):
lora.remove_channel(channel)
else:
print("[INFO] CARA disabled, using standard LoRaWAN...")
# If CARA is disabled, the server will use 0...5 as the initial resource block,
# which will be used to define the DR (i.e. the spreading factor) for each node
selectedDR = int(strDecodedDataSplit[2]) #[4])
setDataRate(s, selectedDR)
print("[INFO] Selected DataRate = {:d}".format(selectedDR))
time.sleep(0.1)
timeJoinReq2 = utime.ticks_ms()
timeFromLastJoinReq = utime.ticks_diff(timeJoinReq2,timeJoinReq1)
#print("Time (ms) from last transmission of Join Request = {:.3f}".format(timeFromLastJoinReq))
if (timeFromLastJoinReq > (1000*joinReqRtxTime)):
timeToRetransmitJoinReq = True
return [caraEnabled, initialResourceBlock, sfMask, selectedDR, channelsList, sfList]
def assignmentAlgorithm1(timeNextTransmission, channelsList, sfList, initialResourceBlock):
# THIS ALGORITHM SELECT THE CHANNEL AND SF FOLLOWING A SEQUENTIAL ORDER
if (debug > 0):
print("[DEBUG] Time for next transmission (sec) = {:.3f}".format(timeNextTransmission))
#print("durationOfPeriod = {:.2f}".format(durationOfPeriod))
indexCurrentPeriod = int(timeNextTransmission / durationOfPeriod)
if (debug > 0):
print("[DEBUG] Current period = {:d}".format(indexCurrentPeriod))
noResourceBlocks = len(channelsList)
currentResourceBlock = (indexCurrentPeriod+initialResourceBlock) % noResourceBlocks
selectedFreq = channelsList[currentResourceBlock]
selectedSF = sfList[currentResourceBlock]
selectedDR = convertSFtoDR(selectedSF)
#nextResourceBlock = (indexCurrentPeriod+initialResourceBlock+1) % noResourceBlocks
#selectedFreqForNextPeriod = channelsList[nextResourceBlock]
#selectedSFForNextPeriod = sfList[nextResourceBlock]
#selectedDRForNextPeriod = convertSFtoDR(selectedSFForNextPeriod)
if (debug > 0):
print("[DEBUG] Current resource block = {:d}".format(currentResourceBlock))
print("[DEBUG] Selected frequency = {:d}".format(selectedFreq))
print("[DEBUG] Selected DR = {:d}".format(selectedDR))
#print("Next resource block = {:d}".format(nextResourceBlock))
#print("Selected frequency for next period = {:d}".format(selectedFreqForNextPeriod))
#print("Selected DR for next period = {:d}".format(selectedDRForNextPeriod))
#return [selectedFreq, selectedDR, selectedFreqForNextPeriod, selectedDRForNextPeriod]
return [selectedFreq, selectedDR]
#def checkBorderEffect(timeNextTransmission, selectedFreq, selectedDR, selectedFreqForNextPeriod, selectedDRForNextPeriod, borderEffectGuardTime, payloadsize):
def checkBorderEffect(timeNextTransmission, selectedFreq, selectedDR, borderEffectGuardTime, payloadsize):
# CHECK TIME OVER AIR TO AVOID BORDER EFFECTS (WAIT FOR NEXT PERIOD IF NECESSARY)
airTime = airtimetheoretical(payloadsize, convertDRtoSF(selectedDR), LoRa.BW_125KHZ, LoRa.CODING_4_5)[0]
limitForThisPeriod = (int(timeNextTransmission / durationOfPeriod)+1) * durationOfPeriod
if (debug > 0):
print("checkBorderEffect: timeNextTransmission={:.2f}, airTime={:.2f}, limitForThisPeriod={:.2f}".format(timeNextTransmission, airTime, limitForThisPeriod))
if ( (timeNextTransmission + airTime + borderEffectGuardTime) > limitForThisPeriod ):
# The transmission has to wait for next period
#timeNextTransmission = limitForThisPeriod + 0.1
# timeNextTransmission = timeNextTransmission + airTime + borderEffectGuardTime + 0.1
# if (debug > 0):
# print("AVOIDING BORDER EFFECT: timeNextTransmission = {:.3f}".format(timeNextTransmission))
borderTransmission = True
else:
# The transmission can be fitted in the current period
# print("The transmission can be fitted in the current period")
borderTransmission = False
# return timeNextTransmission
return borderTransmission
###################
## MAIN FUNCTION ##
###################
# INITIALIZE LORA (LORAWAN mode. Europe = LoRa.EU868)
lora = LoRa(mode=LoRa.LORAWAN, region=LoRa.EU868, public=True, tx_retries=3, device_class=LoRa.CLASS_C, adr=False)
# BOARD INFORMATION
showBoard(lora)
# CONNECT TO WIFI
timeToWaitForWiFi = RandomRange(0,10)
time.sleep(timeToWaitForWiFi)
connectWiFi()
## TIME SYNCHRONIZATION
rtc = synchronizeTime()
# OBTAIN EXPERIMENT PARAMETERS
randomTimeForJoining, fixedTime, randomTime, durationOfPeriod, avoidBorderEffect, borderEffectGuardTime = getCARAParameters()
## LORAWAN (initialize and return a socket)
s = initializeLoRaWAN(randomTimeForJoining)
# Waiting for Join Accept message (from CARA server)
caraEnabled, initialResourceBlock, sfMask, selectedDR, channelsList, sfList = receiveJoinAccept()
# Infinite loop
messageCounter = 0
while True:
message = generateMessage (messageCounter) # Testing data.....01, ...
payloadsize = len(message)
messageCounter = messageCounter + 1
# Time between transmissions
randNo = fixedTime + RandomRange(0,randomTime)
if (debug > 0):
print("[DEBUG] Random number = {:.1f}".format(randNo))
# Initially we assume that there is no border effect (checked later)
borderTransmission = False
if (messageCounter == 1):
year, month, day, hour, minute, second, usecond, nothing = rtc.now()
currentTime = hour*3600 + minute*60 + second + usecond/1000000
if (bFirstTransmissionStartingOnACARAPeriod):
# In order to start (first message) at the beginning of one period... just for testing
limitForThisPeriod = (int(currentTime / durationOfPeriod)+1) * durationOfPeriod
timeNextTransmission = limitForThisPeriod + 1.0
else:
# First packet sent at random time
timeNextTransmission = currentTime + randNo
selectedFreq, selectedDR = assignmentAlgorithm1(timeNextTransmission, channelsList, sfList, initialResourceBlock)
else:
# Not the first packet
if caraEnabled == 1:
# Our algorithm for assigning a frequency and a spreading factor for this transmission
timeNextTransmission = timeLastTransmission + randNo
selectedFreq, selectedDR = assignmentAlgorithm1(timeNextTransmission, channelsList, sfList, initialResourceBlock)
# If border effect has to be avoided
if avoidBorderEffect == 1:
# timeNextTransmission = checkBorderEffect(timeNextTransmission, selectedFreq, selectedDR, borderEffectGuardTime, payloadsize)
# selectedFreq, selectedDR = assignmentAlgorithm1(timeNextTransmission, channelsList, sfList, initialResourceBlock)
borderTransmission = checkBorderEffect(timeNextTransmission, selectedFreq, selectedDR, borderEffectGuardTime, payloadsize)
# FOR TESTING, FIXED CHANNEL AND DR (OBTAINED FROM CARA - #JOINACC# PARAMETERS)...
if bFixedChannelAndDR:
selectedFreq = channelsList[initialResourceBlock]
selectedSF = sfList[initialResourceBlock]
selectedDR = convertSFtoDR(selectedSF)
# Set transmission parameters (frequency and spreading factor)
setTransmissionParameters(s, selectedFreq, selectedDR)
# end if (caraEnabled == 1)
airTime = airtimetheoretical(payloadsize, convertDRtoSF(selectedDR), LoRa.BW_125KHZ, LoRa.CODING_4_5)[0]
year, month, day, hour, minute, second, usecond, nothing = rtc.now()
currentTime = hour*3600 + minute*60 + second + usecond/1000000
#timeToWait = randNo - (currentTime - lastTime) - airTime
timeToWait = timeNextTransmission - currentTime
if (debug > 0):
print("[DEBUG] currentTime = {:.3f}".format(currentTime))
print("[DEBUG] timeNextTransmission = {:.3f}".format(timeNextTransmission))
print("[DEBUG] timeToWait = {:.3f}".format(timeToWait))
print("[DEBUG] airTime = {:.3f}".format(airTime))
timeLastTransmission = timeNextTransmission
if (timeToWait > 0):
print("[INFO] Waiting for next transmission (t={:.3f})...".format(timeNextTransmission))
time.sleep(timeToWait)
else:
print("[INFO] Next transmission starts immediately (time between transmissions too short)!")
if (borderTransmission == False):
s.setblocking(True)
s.send(message)
s.setblocking(False)
year, month, day, hour, minute, second, usecond, nothing = rtc.now()
print("[INFO] Message sent at {:02d}:{:02d}:{:02d}.{:.06d} on {:d} Hz with DR {:d} (air time {:.3f} s)".format(hour, minute, second, usecond, selectedFreq, selectedDR, airTime))
else:
year, month, day, hour, minute, second, usecond, nothing = rtc.now()
print("[INFO] Message not sent at {:02d}:{:02d}:{:02d}.{:.06d} due to border effect".format(hour, minute, second, usecond))
``` |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.