metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jonathan-felicity/codepleateau",
"score": 3
} |
#### File: codepleateau/scripts/pen.py
```python
import os
import shutil
def write(name, type):
"""Generating boiler plate files"""
if(type):
repo = f'{os.path.dirname(__file__)}/repo/{type}'
shutil.copytree(repo, name)
else:
print("False")
``` |
{
"source": "jonathanfeng/new_horizons",
"score": 3
} |
#### File: app/bin/get_tz.py
```python
import pytz
from datetime import datetime, tzinfo
from pprint import pprint
def get_timezones():
timezones = pytz.common_timezones
tzs = []
for timezone in timezones:
tzs.append([timezone, datetime.now(pytz.timezone(timezone)).strftime('%z')])
return tzs
```
#### File: new_horizons/app/routes.py
```python
from flask import render_template, request, send_from_directory
import os
from app import app
from app.bin import find_fish
from app.bin import find_bugs
from app.bin.get_tz import get_timezones
from datetime import datetime
northern_fishes = find_fish.load_northern()
southern_fishes = find_fish.load_southern()
northern_bugs = find_bugs.load_northern()
southern_bugs = find_bugs.load_southern()
print('Fish and bugs loaded.')
@app.route('/')
@app.route('/index')
def index():
return render_template('index.html')
@app.route('/fish', methods=['GET', 'POST'])
def fish():
if request.method == 'POST':
hemisphere = request.form.get('hemisphere', None)
timezone = request.form.get('timezone', None)
if hemisphere != None and timezone != None:
if hemisphere == 'Northern':
fish_list = find_fish.main(timezone, northern_fishes)
else:
fish_list = find_fish.main(timezone, southern_fishes)
return render_template('critter-table.html', hemisphere=hemisphere, timezones=get_timezones(), timezone=timezone, critter_list=fish_list, title='Fish Fetcher', critterType='fish')
return render_template('critter.html', timezones=get_timezones(), title='Fish Fetcher', critterType='fish')
else:
return render_template('critter.html', timezones=get_timezones(), title='Fish Fetcher', critterType='fish')
@app.route('/bugs', methods=['GET', 'POST'])
def bugs():
if request.method == 'POST':
hemisphere = request.form.get('hemisphere', None)
timezone = request.form.get('timezone', None)
if hemisphere != None and timezone != None:
if hemisphere == 'Northern':
bugs_list = find_bugs.main(timezone, northern_bugs)
else:
bugs_list = find_bugs.main(timezone, southern_bugs)
return render_template('critter-table.html', hemisphere=hemisphere, timezones=get_timezones(), timezone=timezone, critter_list=bugs_list, title='Bug Bounties', critterType='bugs')
return render_template('critter.html', timezones=get_timezones(), title='Bug Bounties', critterType='bugs')
else:
return render_template('critter.html', timezones=get_timezones(), title='Bug Bounties', critterType='bugs')
@app.route('/favicon.ico')
def favicon():
return send_from_directory(os.path.join(app.root_path,'static'), 'favicon.ico', mimetype='image/vnd.microsoft.icon')
@app.route('/apple-touch-icon.png')
def apple_touch_icon():
return send_from_directory(os.path.join(app.root_path,'static'), 'apple-touch-icon.png')
@app.route('/android-icon-192x192.png')
def android_icon():
return send_from_directory(os.path.join(app.root_path,'static'), 'android-icon-192x192.png')
``` |
{
"source": "JonathanFerraz/home-assistant",
"score": 3
} |
#### File: custom_components/localtuya/sensor.py
```python
import logging
from functools import partial
import voluptuous as vol
from homeassistant.components.sensor import DEVICE_CLASSES, DOMAIN
from homeassistant.const import (
CONF_DEVICE_CLASS,
CONF_UNIT_OF_MEASUREMENT,
STATE_UNKNOWN,
)
from .common import LocalTuyaEntity, async_setup_entry
from .const import CONF_SCALING
_LOGGER = logging.getLogger(__name__)
DEFAULT_PRECISION = 2
def flow_schema(dps):
"""Return schema used in config flow."""
return {
vol.Optional(CONF_UNIT_OF_MEASUREMENT): str,
vol.Optional(CONF_DEVICE_CLASS): vol.In(DEVICE_CLASSES),
vol.Optional(CONF_SCALING): vol.All(
vol.Coerce(float), vol.Range(min=-1000000.0, max=1000000.0)
),
}
class LocaltuyaSensor(LocalTuyaEntity):
"""Representation of a Tuya sensor."""
def __init__(
self,
device,
config_entry,
sensorid,
**kwargs,
):
"""Initialize the Tuya sensor."""
super().__init__(device, config_entry, sensorid, _LOGGER, **kwargs)
self._state = STATE_UNKNOWN
@property
def state(self):
"""Return sensor state."""
return self._state
@property
def device_class(self):
"""Return the class of this device."""
return self._config.get(CONF_DEVICE_CLASS)
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._config.get(CONF_UNIT_OF_MEASUREMENT)
def status_updated(self):
"""Device status was updated."""
state = self.dps(self._dp_id)
scale_factor = self._config.get(CONF_SCALING)
if scale_factor is not None and isinstance(state, (int, float)):
state = round(state * scale_factor, DEFAULT_PRECISION)
self._state = state
async_setup_entry = partial(async_setup_entry, DOMAIN, LocaltuyaSensor, flow_schema)
``` |
{
"source": "jonathanfgarcia/jpfg-logging",
"score": 2
} |
#### File: jpfg/logging/__init__.py
```python
from jpfg.logging.eventlogger import EventLogger
CRITICAL = 50
ERROR = 40
WARNING = 30
INFO = 20
DEBUG = 10
NOTSET = 0
def getEventLogger(name=None, host='localhost', port=24224):
"""Returns an EventLogger for structed event logging.
The optional host and port parameters are used to forward messages to the centralized fluentd
server at the specified `$host:$port`."""
return EventLogger(name, host, port)
def critical(name, event_or_message=None, **kwargs):
"""Emits an event log with level CRITICAL."""
kwargs.update({"__caller_stack": 3})
getEventLogger(name).critical(event_or_message, **kwargs)
def error(name, event_or_message=None, **kwargs):
"""Emits an event log with level ERROR."""
kwargs.update({"__caller_stack": 3})
getEventLogger(name).error(event_or_message, **kwargs)
def warning(name, event_or_message=None, **kwargs):
"""Emits an event log with level WARNING."""
kwargs.update({"__caller_stack": 3})
getEventLogger(name).warning(event_or_message, **kwargs)
def info(name, event_or_message=None, **kwargs):
"""Emits an event log with level INFO."""
kwargs.update({"__caller_stack": 3})
getEventLogger(name).info(event_or_message, **kwargs)
def debug(name, event_or_message=None, **kwargs):
"""Emits an event log with level DEBUG."""
kwargs.update({"__caller_stack": 3})
getEventLogger(name).debug(event_or_message, **kwargs)
``` |
{
"source": "jonathanfiat/ApproxiPong",
"score": 3
} |
#### File: ApproxiPong/graphics/generate_figures_part2.py
```python
import numpy as np
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
from matplotlib import patches, lines, gridspec, text
from matplotlib.animation import FFMpegWriter
import common
PART_NUM = 2
def generate_fig1():
fig = Figure(figsize=(6, 4))
canvas = FigureCanvas(fig)
ax = fig.add_axes((0.01, 0.01, 0.98, 0.98))
common.set_ax_params(ax)
ax.axis([0., 1.5, 0., 1.])
r = 0.05
ax.add_patch(patches.FancyBboxPatch(
(0.1 - r, 0.5 - r),
width=2 * r,
height=2 * r,
boxstyle="round,pad=0.01",
facecolor="lightblue"
))
ax.text(0.1, 0.5, "$s$", ha="center", va="center", size="large")
heights = np.linspace(0.8, 0.2, 3)
x = np.linspace(0.3 + r + 0.01, 1.4, 10)
for i in range(3):
h = heights[i]
for j in range(3):
base = h + (j - 1) / 12.
y = base + np.random.uniform(-1., 1., 10) / 30.
y[0] = h + (j - 1) / 24.
ax.add_artist(lines.Line2D(x, y, color="black"))
ax.add_patch(patches.Circle((x[-1], y[-1]), 0.01, color="black"))
ax.add_patch(patches.FancyBboxPatch(
(0.3 - r, h - r),
width=2 * r,
height=2 * r,
boxstyle="round,pad=0.01",
facecolor="lightgreen"
))
ax.text(0.3, h, "$a_{}$".format(i),
ha="center", va="center", size="large")
ax.add_patch(common.arrow_by_start_end(
(0.1 + r + 0.01, 0.5 + r * (1 - i) / 3.), (0.3 - r - 0.01, h),
length_includes_head=True, color="black", head_width=0.02))
common.save_next_fig(PART_NUM, fig)
def generate_fig2():
fig = Figure(figsize=(6, 4))
canvas = FigureCanvas(fig)
ax = fig.add_axes((0.01, 0.01, 0.98, 0.98))
common.set_ax_params(ax)
ax.axis([0., 1.5, 0., 1.])
r = 0.05
ax.add_patch(patches.FancyBboxPatch(
(0.1 - r, 0.5 - r),
width=2 * r,
height=2 * r,
boxstyle="round,pad=0.01",
facecolor="lightblue"
))
ax.text(0.1, 0.5, "$s$", ha="center", va="center", size="large")
heights = np.linspace(0.8, 0.2, 3)
x = np.linspace(0.5 + r/2 + 0.01, 1.4, 10)
for i in range(3):
h = heights[i]
for j in range(3):
h2 = h + (heights[j] - 0.5) / 3.5
ax.add_patch(patches.FancyBboxPatch(
(0.5 - r/2, h2 - r/2),
width=r,
height=r,
boxstyle="round,pad=0.01",
facecolor="lightgreen"
))
ax.text(0.5, h2, "$a_{}$".format(j),
ha="center", va="center", size="small")
ax.add_patch(common.arrow_by_start_end(
(0.3 + r + 0.01, h + r * (1 - j) / 3.),
(0.5 - r/2 - 0.01, h2), length_includes_head=True,
color="black", head_width=0.02))
for k in range(2):
base = h2 + (k - 0.5) / 24.
y = base + np.random.uniform(-1., 1., 10) / 60.
y[0] = h2 + (k - 0.5) / 24.
ax.add_artist(lines.Line2D(x, y, color="black"))
ax.add_patch(patches.Circle((x[-1], y[-1]), 0.01,
color="black"))
ax.add_patch(patches.FancyBboxPatch(
(0.3 - r, h - r),
width=2 * r,
height=2 * r,
boxstyle="round,pad=0.01",
facecolor="lightgreen"
))
ax.text(0.3, h, "$a_{}$".format(i),
ha="center", va="center", size="large")
ax.add_patch(common.arrow_by_start_end(
(0.1 + r + 0.01, 0.5 + r * (1 - i) / 3.),
(0.3 - r - 0.01, h), length_includes_head=True, color="black",
head_width=0.02))
common.save_next_fig(PART_NUM, fig)
class RandomTree:
def __init__(self, degree, parent=None):
self.degree = degree
self.children = [None] * degree
self.active = False
if parent is None:
self.parent = None
self.visitorder = [self]
else:
self.parent = parent
self.visitorder = parent.visitorder
self.visitorder.append(self)
self.patches = {}
def simulate(self, max_depth):
if max_depth:
a = np.random.choice(self.degree)
if self.children[a] is None:
self.children[a] = RandomTree(self.degree, self)
self.children[a].simulate(max_depth - 1)
def set(self):
self.active = True
for child in self.children:
if child is not None:
child.set()
def draw(self, ax, facecolor, alpha=1., text_size="small"):
L = []
L.append(patches.FancyBboxPatch(
self.box_xy,
width=self.width,
height=self.height,
boxstyle="round,pad=0.1",
facecolor=facecolor,
alpha=alpha
))
L.append(text.Text(self.xy[0], self.xy[1], self.text,
ha="center", va="center", size=text_size, alpha=alpha))
if self.parent:
L.append(common.arrow_by_start_end(
self.father_a_xy,
self.a_xy,
length_includes_head=True,
color="black",
head_width=0.1,
alpha=alpha))
for a in L:
ax.add_artist(a)
self.patches[ax] = L
def remove(self, ax):
for a in self.patches[ax]:
a.remove()
del self.patches[ax]
def generate_fig3():
tree = RandomTree(3)
for i in range(10):
tree.simulate(8)
while True:
try:
a = np.random.choice(3)
tree.children[a].set()
break
except AttributeError:
pass
fig1 = Figure(figsize=(16/2, 9/2))
canvas1 = FigureCanvas(fig1)
ax1 = fig1.add_axes((0.01, 0.01, 0.98, 0.98))
fig2 = Figure(figsize=(16/2, 9/2))
canvas2 = FigureCanvas(fig2)
ax2 = fig2.add_axes((0.01, 0.01, 0.98, 0.98))
fig3 = Figure(figsize=(16, 9))
canvas3 = FigureCanvas(fig3)
ax3 = fig3.add_axes((0.01, 0.01, 0.98, 0.98))
for ax in [ax1, ax2, ax3]:
common.set_ax_params(ax)
ax.axis([0., 16., 0., 9.])
r = 0.4
tree.xy = (1., 9. / 2.)
tree.box_xy = (1. - r, 9. / 2. - r)
tree.width = 2 * r
tree.height = 2 * r
tree.text = "$s$"
tree.facecolor1 = "lightblue"
tree.facecolor2 = "lightblue"
tree.alpha = 0.2
tree.connectors = [(1. + r + 0.1, 9. / 2. + j * r / 3) for j in [1, 0, -1]]
X = np.linspace(3., 15., 8)
L = [tree]
for i in range(8):
L2 = []
for n in L:
L2.extend(c for c in n.children if c)
Y = np.linspace(9., 0., len(L2) + 2)[1:-1]
cnt = 0
for n in L:
for j in range(3):
if n.children[j] is not None:
c = n.children[j]
x, y = X[i], Y[cnt]
c.connectors = [(x + r/2 + 0.1, y + k * r / 6)
for k in [1, 0, -1]]
c.xy = (x, y)
c.box_xy = (x - r/2, y - r/2)
c.width = r
c.height = r
c.father_a_xy = n.connectors[j]
c.a_xy = (x - r/2 - 0.1, y)
c.text = "$a_{}$".format(j)
c.facecolor1 = "lightgreen"
if (i==0 and c.active):
c.facecolor2 = "lightblue"
else:
c.facecolor2 = "lightgreen"
c.alpha = 1. if c.active else 0.2
cnt += 1
L = L2
writer = FFMpegWriter()
writer.setup(fig3, "figures/part{}/mcts_movie.mp4".format(PART_NUM))
writer.grab_frame()
reset = False
for c in tree.visitorder:
if reset:
n = c
L = []
while n:
L.append(n)
n = n.parent
else:
L = [c]
for n in L[::-1]:
n.draw(ax3, "red", 1., "xx-large")
writer.grab_frame()
n.remove(ax3)
n.draw(ax3, n.facecolor1, 1., "xx-large")
writer.grab_frame()
c.draw(ax1, c.facecolor1, 1.)
c.draw(ax2, c.facecolor2, c.alpha)
reset = not any(c.children)
writer.finish()
common.save_next_fig(PART_NUM, fig1)
common.save_next_fig(PART_NUM, fig2)
def generate_fig4():
fig = Figure(figsize=(8, 8))
canvas = FigureCanvas(fig)
fig.suptitle("Demonstration of How Descritezation Creates Non-Determinism")
gs = gridspec.GridSpec(2, 3)
ax1 = fig.add_subplot(gs[0, :])
ax2 = fig.add_subplot(gs[1, 0])
ax3 = fig.add_subplot(gs[1, 1])
ax4 = fig.add_subplot(gs[1, 2])
for ax in [ax1, ax2, ax3, ax4]:
ax.set_aspect(1)
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_facecolor((0., 0., 0.))
ax.tick_params("both", bottom="off", left="off")
ax.axis([-1., 1., -1., 1.])
x_axis = lines.Line2D([-1., 1.], [0., 0.], color="red", alpha=0.5,
linestyle="--")
y_axis = lines.Line2D([0., 0.], [-1., 1.], color="red", alpha=0.5,
linestyle="--")
ax.add_artist(x_axis)
ax.add_artist(y_axis)
ax1.set_title("Current Partial State")
c_pos = patches.Rectangle((-1., -1.), 1., 1., color="palevioletred",
alpha=0.7)
a = patches.FancyArrow(-0.5, -0.5, 0.4, 0.4, width=0.01, color="pink")
ax1.add_patch(c_pos)
ax1.add_patch(a)
ax2.set_title("Possible Full State 1")
c_pos = patches.Rectangle((-1., -1.), 1., 1., color="palevioletred",
alpha=0.7)
ball = patches.Circle((-0.8, -0.2), radius=2 * common.BALL_RADIUS,
color=common.BALL_COLOR)
a = patches.FancyArrow(-0.8, -0.2, 0.4, 0.4, width=0.01, color="pink")
ax2.add_patch(c_pos)
ax2.add_patch(a)
ax2.add_patch(ball)
ax3.set_title("Possible Full State 2")
c_pos = patches.Rectangle((-1., -1.), 1., 1., color="palevioletred",
alpha=0.7)
ball = patches.Circle((-0.2, -0.2), radius=2 * common.BALL_RADIUS,
color=common.BALL_COLOR)
a = patches.FancyArrow(-0.2, -0.2, 0.4, 0.4, width=0.01, color="pink")
ax3.add_patch(c_pos)
ax3.add_patch(a)
ax3.add_patch(ball)
ax4.set_title("Possible Full State 3")
c_pos = patches.Rectangle((-1., -1.), 1., 1., color="palevioletred",
alpha=0.7)
ball = patches.Circle((-0.2, -0.8), radius=2 * common.BALL_RADIUS,
color=common.BALL_COLOR)
a = patches.FancyArrow(-0.2, -0.8, 0.4, 0.4, width=0.01, color="pink")
ax4.add_patch(c_pos)
ax4.add_patch(a)
ax4.add_patch(ball)
common.save_next_fig(PART_NUM, fig)
def generate_fig5():
def possible_v(ax, dir1, dir2, size, start):
t1 = np.arctan2(*dir1)
t2 = np.arctan2(*dir2)
a1 = common.arrow_by_start_size_angle(start, size, t1, width=0.01,
color="pink")
a2 = common.arrow_by_start_size_angle(start, size, t2, width=0.01,
color="pink")
arc = patches.Arc(start, size, size, 0.0, np.degrees(t1),
np.degrees(t2), color="pink")
ax.add_patch(a1)
ax.add_patch(a2)
ax.add_patch(arc)
fig = Figure(figsize=(8, 8))
canvas = FigureCanvas(fig)
fig.suptitle("Demonstration of How Descritezation Creates Non-Markovian Models")
gs = gridspec.GridSpec(2, 2)
ax1 = fig.add_subplot(gs[0, :])
ax2 = fig.add_subplot(gs[1, 0])
ax3 = fig.add_subplot(gs[1, 1])
for ax in [ax1, ax2, ax3]:
common.set_ax_params(ax, "black")
ax.axis([-1., 1., -1., 1.])
for v in np.linspace(-1., 1., 5)[1:-1]:
x_axis = lines.Line2D([-1., 1.], [v, v], color="red", alpha=0.5,
linestyle="--")
y_axis = lines.Line2D([v, v], [-1., 1.], color="red", alpha=0.5,
linestyle="--")
ax.add_artist(x_axis)
ax.add_artist(y_axis)
ax1.set_title("Current Partial State")
c_pos = patches.Rectangle((0., 0.), 0.5, 0.5, color="palevioletred",
alpha=0.7)
ax1.add_patch(c_pos)
possible_v(ax1, (0.2, 1.), (1., 0.2), 0.4, (0.25, 0.25))
ax2.set_title("Possible Past 1 &\nImplications on Current State")
c_pos = patches.Rectangle((0., 0.), 0.5, 0.5, color="palevioletred",
alpha=0.7)
p1_pos = patches.Rectangle((-0.5, 0.), 0.5, 0.5, color="palevioletred",
alpha=0.5)
p2_pos = patches.Rectangle((-1., 0.), 0.5, 0.5, color="palevioletred",
alpha=0.3)
ax2.add_patch(c_pos)
ax2.add_patch(p1_pos)
ax2.add_patch(p2_pos)
possible_v(ax2, (0.2, 1.), (1., 1.), 0.4, (0.25, 0.25))
ax3.set_title("Possible Past 2 &\nImplications on Current State")
c_pos = patches.Rectangle((0., 0.), 0.5, 0.5, color="palevioletred",
alpha=0.7)
p1_pos = patches.Rectangle((0., -0.5), 0.5, 0.5, color="palevioletred",
alpha=0.5)
p2_pos = patches.Rectangle((0., -1.), 0.5, 0.5, color="palevioletred",
alpha=0.3)
ax3.add_patch(c_pos)
ax3.add_patch(p1_pos)
ax3.add_patch(p2_pos)
possible_v(ax3, (1., 1.), (1., 0.2), 0.4, (0.25, 0.25))
common.save_next_fig(PART_NUM, fig)
DATA_FILES = [
common.DataFile("deep_value_iteration_follow", "Deep Q Iteration", 1, []),
common.DataFile("deep_value_iteration_random2", "Deep Q Iteration Random2", 1, []),
common.DataFile("deep_value_iteration_random", "Deep Q Iteration Random", 1, []),
common.DataFile("deep_value_iteration_self", "Deep Q Iteration Self", 1, []),
]
if __name__ == "__main__":
generate_fig1()
generate_fig2()
generate_fig3()
generate_fig4()
generate_fig5()
common.fill_data(DATA_FILES)
common.base_plot(DATA_FILES[0], PART_NUM, cut=50, xlabel="Q-Iterations",
jump=10)
# common.compare_plots(DATA_FILES, PART_NUM, xlabel="Q-Iterations", cut=100)
```
#### File: ApproxiPong/graphics/generate_figures_part3.py
```python
import numpy as np
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
from matplotlib import patches
import common
PART_NUM = 3
def generate_fig1(width=4):
fig = Figure(figsize=(width, 4))
canvas = FigureCanvas(fig)
ax = fig.add_axes((0.01, 0.01, 0.98, 0.98))
common.set_ax_params(ax)
ax.axis([0., 1., 0., 1.])
for i, w in enumerate([0.1, 0.3, 0.5, 0.7, 0.9]):
ax.add_patch(patches.FancyBboxPatch(
(0.5 - w / 2., 0.5 - w / 2.),
width=w,
height=w,
boxstyle="round,pad=0.01",
facecolor="blue",
edgecolor="None",
alpha=0.3
))
for i, w in enumerate([0.1, 0.3, 0.5, 0.7, 0.9]):
ax.add_patch(patches.FancyBboxPatch(
(0.5 - w / 2., 0.5 - w / 2.),
width=w,
height=w,
boxstyle="round,pad=0.01",
facecolor="None",
edgecolor="black",
))
if i > 0:
ax.text(0.5 - w / 2., 0.5 - w / 2.,
"$\\pi_{}=$MCTS$(\\pi_{})$".format(i, i-1),
ha="left", va="bottom", size="small", color="yellow")
else:
ax.text(0.5 - w / 2., 0.5 - w / 2.,
"$\\pi_0$", ha="left", va="bottom", size="small",
color="yellow")
common.save_next_fig(PART_NUM, fig)
def generate_fig2():
fig = Figure(figsize=(4, 10))
canvas = FigureCanvas(fig)
ax = fig.add_axes((0.01, 0.01, 0.98, 0.98))
common.set_ax_params(ax)
ax.axis([0., 1., 0., 2.5])
w0 = 0.1
w1 = 0.3
H = np.linspace(2.2, 0.3, 5)
for i, h in enumerate(H):
for w in [w0, w1]:
ax.add_patch(patches.FancyBboxPatch(
(0.5 - w / 2., h - w / 2.),
width=w,
height=w,
boxstyle="round,pad=0.01",
facecolor="blue",
edgecolor="None",
alpha=0.3
))
for w in [w0, w1]:
ax.add_patch(patches.FancyBboxPatch(
(0.5 - w / 2., h - w / 2.),
width=w,
height=w,
boxstyle="round,pad=0.01",
facecolor="None",
edgecolor="black"
))
ax.text(0.5 - w0 / 2., h - w0 / 2.,
"$\\pi_{}$".format(i),
ha="left", va="bottom", size="large", color="yellow")
ax.text(0.5 - w1 / 2., h - w1 / 2.,
"MCTS$(\\pi_{})$".format(i),
ha="left", va="bottom", size="large", color="yellow")
if i < 4:
ax.add_artist(common.arrow_by_start_end(
[0.5 - w1 / 2. - 0.01, h - w1 / 2. - 0.01],
[0.5 - w0 / 2. - 0.01, H[i + 1] + w0 / 2. + 0.01],
color="black", width=0.005,
length_includes_head=True,
alpha=0.3))
ax.add_artist(common.arrow_by_start_end(
[0.5 + w1 / 2. + 0.01, h - w1 / 2. - 0.01],
[0.5 + w0 / 2. + 0.01, H[i + 1] + w0 / 2. + 0.01],
color="black", width=0.005,
length_includes_head=True,
alpha=0.3))
common.save_next_fig(PART_NUM, fig)
DATA_FILES = [
common.DataFile("alpha_pong", "Alpha Pong Network", 100, []),
common.DataFile("alpha_pong_mcts", "MCTS(Alpha Pong Network)", 100, []),
common.DataFile("alpha_pong_self", "Alpha Pong Network", 100, []),
common.DataFile("alpha_pong_self_mcts", "MCTS(Alpha Pong Network)", 100, []),
]
if __name__ == "__main__":
generate_fig1()
generate_fig2()
common.fill_data(DATA_FILES)
common.compare_plots(DATA_FILES[:2], PART_NUM,
title="Win Rate of Alpha-Pong-Zero", show_time=True)
common.compare_plots(DATA_FILES[2:], PART_NUM,
title="Win Rate of Alpha-Pong-Zero, trained using self-play", show_time=True)
generate_fig1(8)
```
#### File: pong/learning/deep_p.py
```python
from argparse import ArgumentParser
import numpy as np
import tensorflow as tf
from ..mechanics.pong import Pong
from ..utils.tf_machinery import NeuralNetwork
from ..utils.replay_db import ReplayDB
from ..utils.greenlet_learner import GreenletLearner
from ..utils import common
class DeepP(GreenletLearner):
"""Learn using the Deep-P-Learning algorithm."""
def __init__(self, args):
"""Create an DeepP instance using args."""
super().__init__(args)
self.db = ReplayDB(Pong.STATE_DIM, args.db_size)
self.s = tf.Session()
self.states = tf.placeholder(tf.float32,
shape=(None, Pong.STATE_DIM), name="states")
self.actions = tf.placeholder(tf.int32, (None,), "actions")
self.q_estimation = tf.placeholder(tf.float32, (None,),
name="q_estimation")
self.e = tf.placeholder_with_default(0.0, ())
self.p_network = NeuralNetwork(Pong.STATE_DIM, Pong.NUM_ACTIONS,
[50, 50, 50], session=self.s, name_prefix="", input_=self.states)
self.q_network = NeuralNetwork(Pong.STATE_DIM, Pong.NUM_ACTIONS,
[50, 50, 50], session=self.s, name_prefix="q_", input_=self.states)
max_values = self.q_network.take(self.p_network.output_argmax)
mean_values = tf.reduce_mean(self.q_network.output, axis=1)
self.v_values = (1 - self.e) * max_values + self.e * mean_values
self.q_values = self.q_network.take(self.actions)
self.loss = tf.reduce_mean((self.q_estimation - self.q_values) ** 2)
self.optimizer = tf.train.AdamOptimizer(args.learning_rate)
self.train_op = self.optimizer.minimize(self.loss,
var_list=list(self.q_network.vars()))
self.assign_ops = self.p_network.assign(self.q_network)
self.s.run(tf.global_variables_initializer())
self.s.run(self.assign_ops)
self.nn = self.p_network
self.n_iter = self.nn.load(self.load_path)
self.ed = common.ExplorationDecay(args, self.n_iter)
def iteration(self, results):
"""Perform a single iteration."""
self.db.store_episodes_results(results)
self.q_network.reinit()
samples = self.db.iter_samples(self.args.sample_size,
self.args.learning_iters)
for sample in samples:
if self.args.td1:
feed_dict = {
self.states: sample.s1,
self.actions: sample.a,
self.q_estimation: sample.r
}
else:
v = NeuralNetwork.run_op_in_batches(self.s, self.v_values,
{self.states: sample.s2}, self.args.batch_size,
{self.e: self.ed.epsilon})
q = sample.r + (~sample.done * self.args.gamma * v)
feed_dict = {
self.states: sample.s1,
self.actions: sample.a,
self.q_estimation: q
}
self.q_network.train_in_batches(self.train_op, feed_dict,
self.args.num_batches, self.args.batch_size)
if self.args.td1:
self.db.clear()
self.s.run(self.assign_ops)
self.ed.next()
def decide_actions(self, eval_states, *args):
"""Select actions randomly using current NN parameters."""
return self.nn.predict_exploration(eval_states, self.ed.epsilon)
#===============================================================================
# Main
#===============================================================================
def main(argv):
parser = common.standard_parser("/tmp/Pong/DeepP/", exploration=True,
start=1., end=0.01, frame=1)
parser.add_argument("--db_size", type=int, default=10000000)
parser.add_argument("--gamma", "-g", default=0.999, type=float)
parser.add_argument("--learning_rate", "-qlr", default=0.001, type=float)
parser.add_argument("--num_batches", "-qnb", default=10, type=int)
parser.add_argument("--batch_size", "-qbs", default=1000, type=int)
parser.add_argument("--learning_iters", "-qli", default=1000, type=int)
parser.add_argument("--sample_size", "-qss", default=10000, type=int)
parser.add_argument("--td1", action="store_true")
args = parser.parse_args(argv)
DP = DeepP(args)
DP.learn()
```
#### File: pong/mechanics/pong.py
```python
from enum import IntEnum
import numpy as np
from . import constants as c
class S(IntEnum):
BALL_X = 0
BALL_Y = 1
BALL_VX = 2
BALL_VY = 3
L_Y = 4
L_VY = 5
R_Y = 6
R_VY = 7
l = 4
r = 6
Y = 0
V = 1
def random_sign():
return np.random.choice([-1, 1])
class Pong:
STATE_DIM = 8
NUM_ACTIONS = 3
RANGES = [(-1., 1.), (-1., 1.), (-0.0299999, 0.0299999), (-0.2, 0.2),
(-1., 1.), (-0.2, 0.2), (-1., 1.), (-0.2, 0.2)]
@classmethod
def empty_state(cls):
return np.zeros(cls.STATE_DIM, np.float32)
@classmethod
def flip_state(cls, state):
f_state = state.copy()
f_state[..., [S.BALL_X, S.BALL_VX]] *= -1.
f_state[..., [S.L_Y, S.L_VY, S.R_Y, S.R_VY]] = \
state[..., [S.R_Y, S.R_VY, S.L_Y, S.L_VY]]
return f_state
def __init__(self, max_steps=c.MAX_STEPS, random_positions=False, f=None):
"""
Create a new Pong simulator.
max_steps is the maximum number of steps before a draw. Use
max_steps=None for limitless games.
"""
self.reset(random_positions=random_positions)
if max_steps is None:
self.max_steps = np.inf
else:
self.max_steps = max_steps
self.f = f
def set_state(self, state):
"""Set the internal state of the simulator"""
self.s = state.copy()
self.done = False
self.win = None
self.hit = None
self.miss = None
self.n_steps = 0
def reset(self, random_positions=False):
"""Start the simulator anew"""
self.s = self.empty_state()
if random_positions:
self.s[[S.L_Y, S.R_Y]] = np.random.uniform(
c.BOTTOM + c.HPL,
c.TOP - c.HPL,
2
)
self.score = {"l": 0, "r": 0, "draw": 0}
self.new_episode()
def new_episode(self):
"""Start a new game episode"""
s = self.s
s[S.BALL_X] = 0.
s[S.BALL_Y] = 0.
s[S.BALL_VX] = random_sign() * c.VX
s[S.BALL_VY] = random_sign() * np.random.uniform(c.VY0, c.VY1)
self.set_state(s)
def random_state(self):
"""Set the simulator to a random state."""
s = self.s
s[S.BALL_VX] = random_sign() * c.VX
s[[S.BALL_X, S.BALL_Y, S.L_Y, S.R_Y]] = np.random.uniform(-1., 1., 4)
s[[S.BALL_VY, S.L_VY, S.R_VY]] = np.random.uniform(-0.2, 0.2, 3)
self.set_state(s)
def step_paddle(self, p, a):
"""Perform action a on paddle p"""
s = self.s
s[p + S.V] = (s[p + S.V] + c.DY[a]) * c.FRICTION
s[p + S.Y] = s[p + S.Y] + s[p + S.V]
if s[p + S.Y] + c.HPL >= c.TOP:
s[p + S.Y] = c.TOP - c.HPL
s[p + S.V] = 0.0
if s[p + S.Y] - c.HPL <= c.BOTTOM:
s[p + S.Y] = c.BOTTOM + c.HPL
s[p + S.V] = 0.0
def step_ball(self):
"""
Overly complicated function to move the ball 1 time unit forward.
Deals with the rare cases where the ball hits two edges at the same
time unit.
"""
s = self.s
if s[S.BALL_VX] > 0.:
tt_x = (c.RIGHT - s[S.BALL_X]) / s[S.BALL_VX]
elif s[S.BALL_VX] < 0.:
tt_x = (c.LEFT - s[S.BALL_X]) / s[S.BALL_VX]
else:
tt_x = np.inf
if s[S.BALL_VY] > 0.:
tt_y = (c.TOP - s[S.BALL_Y]) / s[S.BALL_VY]
elif s[S.BALL_VY] < 0.:
tt_y = (c.BOTTOM - s[S.BALL_Y]) / s[S.BALL_VY]
else:
tt_y = np.inf
if (tt_x > 1.) and (tt_y > 1.): # no collision
self.advance_ball(1.)
elif tt_x <= tt_y <= 1.: # collision on X then on Y
self.advance_ball(tt_x)
self.hit_x()
self.advance_ball(tt_y - tt_x)
self.hit_y()
self.advance_ball(1. - tt_y)
elif tt_y < tt_x <= 1.: # collision on Y then on X
self.advance_ball(tt_y)
self.hit_y()
self.advance_ball(tt_x - tt_y)
self.hit_x()
self.advance_ball(1. - tt_x)
elif tt_x <= 1.: # collision on X
self.advance_ball(tt_x)
self.hit_x()
self.advance_ball(1. - tt_x)
elif tt_y <= 1.: # collision on Y
self.advance_ball(tt_y)
self.hit_y()
self.advance_ball(1. - tt_y)
else: # ???
raise RuntimeError("Weird")
def advance_ball(self, t):
"""
Move ball t time units, assuming the ball doesn't hit any edge.
"""
s = self.s
s[S.BALL_X] += t * s[S.BALL_VX]
s[S.BALL_Y] += t * s[S.BALL_VY]
def hit_y(self):
"""Handle the case where the ball hits top or bottom."""
self.s[S.BALL_VY] *= -1.
def hit_x(self):
"""Handle the case where the ball hits left or right"""
s = self.s
side = "l" if np.isclose(s[S.BALL_X], c.LEFT) else "r"
p = S[side]
if s[p + S.Y] - c.HPL < s[S.BALL_Y] < s[p + S.Y] + c.HPL:
s[S.BALL_VX] *= -1.
s[S.BALL_VY] += s[p + S.V]
self.hit = side
else:
self.miss = side
self.win = "l" if (side == "r") else "r"
self.score[self.win] += 1
self.done = True
def step(self, l_a, r_a):
"""
Advance the simulator 1 time unit forward.
"""
if self.done:
raise ValueError("Episode done")
self.hit = None
self.miss = None
self.step_paddle(S.l, l_a)
self.step_paddle(S.r, r_a)
self.step_ball()
self.n_steps += 1
if not self.done and self.n_steps >= self.max_steps:
self.score["draw"] += 1
self.done = True
if self.f:
self.s = self.f(self.s)
def fast_set_and_step(self, s, l_a, r_a):
"""
Set the simulator to state s and advance the simulator 1 time unit
forward.
"""
self.s[:] = s
self.hit = None
self.miss = None
self.step_paddle(S.l, l_a)
self.step_paddle(S.r, r_a)
self.step_ball()
def get_state(self):
"""
Get the complete internal state of the game.
"""
return self.s.copy()
def reward(self, side, full=True):
t = self.win if full else self.hit
if t == side:
return 1
elif self.miss == side:
return -1
else:
return 0
def win_string(self):
return self.win if self.win else "draw"
def play(sim, l_pol, r_pol, n_episodes = 100):
"""Run n_episodes episodes of Pong using the policies l_pol and r_pol"""
try:
for i in range(n_episodes):
while not sim.done:
state = sim.get_state()
l_a = l_pol.get_action(state)
r_a = r_pol.get_action(state)
sim.step(l_a, r_a)
sim.new_episode()
except KeyboardInterrupt:
pass
return sim.score
```
#### File: pong/utils/fast_evaluate.py
```python
import argparse
import shlex
import greenlet as gl
import numpy as np
import tensorflow as tf
from pong.mechanics.pong import Pong, S
from pong.mechanics import policies
from pong.mechanics import constants as c
from pong.utils import common
from pong.utils.tf_machinery import NeuralNetwork
class NNPolicy(policies.PolicyRight):
@classmethod
def create_from_commandline(cls, side, argv):
parser = argparse.ArgumentParser()
parser.add_argument("save_path")
parser.add_argument("--name", "-n", default="NeuralNetwork")
args = parser.parse_args(shlex.split(argv))
return cls(side, **vars(args))
def __init__(self, side, layers=[50, 50, 50], save_path=None,
name="Neural"):
super().__init__(side, name)
self.save_path = save_path
self.g = tf.Graph()
with self.g.as_default():
self.nn = NeuralNetwork(Pong.STATE_DIM, Pong.NUM_ACTIONS, layers)
if save_path is not None:
self.nn.load(save_path)
def get_action_right(self, state, *args):
return gl.getcurrent().parent.switch((self.side, state))
def evaluate_all(self, states):
return self.nn.predict_argmax(states)
class TargetNNPolicy(policies.PolicyRight):
@classmethod
def create_from_commandline(cls, side, argv):
parser = argparse.ArgumentParser()
parser.add_argument("save_path")
parser.add_argument("--name", "-n", default="NeuralNetwork")
args = parser.parse_args(shlex.split(argv))
return cls(side, **vars(args))
def __init__(self, side, layers=[50, 50, 50], save_path=None, name="Neural"):
super().__init__(side, name)
self.save_path = save_path
self.g = tf.Graph()
with self.g.as_default():
self.nn = NeuralNetwork(Pong.STATE_DIM, 1, layers)
if save_path is not None:
self.nn.load(save_path)
def get_action_right(self, state, *args):
return gl.getcurrent().parent.switch((self.side, state))
def evaluate_all(self, states):
y = states[:, S.R_Y]
low = y - 0.5 * c.HPL
high = y + 0.5 * c.HPL
T = self.nn.predict_raw(states)[:, 0]
A = np.zeros(states.shape[0], np.int32)
A[T < low] = c.A_DOWN
A[T > high] = c.A_UP
return A
class FastEvaluate:
def __init__(self, l_pol, r_pol, disc=False):
self.l_pol = l_pol
self.r_pol = r_pol
self.disc = disc
def run_episode(self, *args):
if self.disc:
sim = Pong(random_positions=True, f=self.r_pol.discretization)
else:
sim = Pong(random_positions=True)
while not sim.done:
state = sim.get_state()
l_a = self.l_pol.get_action(state)
r_a = self.r_pol.get_action(state)
sim.step(l_a, r_a)
return sim.win_string()
def run_episodes(self, n):
eval_states = np.zeros((n, Pong.STATE_DIM), np.float32)
threads = []
for i in range(n):
t = gl.greenlet(self.run_episode)
threads.append(t)
A = [None] * n
alive = np.ones(n, np.bool)
while alive.any():
flags = {"l": np.zeros(n, np.bool), "r": np.zeros(n, np.bool)}
for i in range(n):
if alive[i]:
data = threads[i].switch(A[i])
if not threads[i].dead:
side, state = data
eval_states[i] = state
flags[side][i] = True
else:
alive[i] = False
self.score[data] += 1
A = np.zeros(n, np.int32)
if flags["l"].any():
A[flags["l"]] = self.l_pol.evaluate_all(eval_states[flags["l"]])
if flags["r"].any():
A[flags["r"]] = self.r_pol.evaluate_all(eval_states[flags["r"]])
def estimate(self, n):
self.score = {"l": 0, "r": 0, "draw": 0}
self.run_episodes(n)
return self.score
policies.POLICIES["nn"] = NNPolicy
policies.POLICIES["targetnn"] = TargetNNPolicy
```
#### File: pong/utils/one_sided_mcts.py
```python
import numpy as np
from ..mechanics.pong import Pong
class Node:
def __init__(self, state, P, V, c_puct):
self.state = state
self.N = np.zeros(Pong.NUM_ACTIONS, np.int32)
self.W = np.zeros(Pong.NUM_ACTIONS, np.float32)
self.Q = np.zeros(Pong.NUM_ACTIONS, np.float32)
self.child = [None] * Pong.NUM_ACTIONS
self.P = P
self.V = V
self.c_puct = c_puct
def pick_child(self):
U = self.c_puct * self.P * np.sqrt(self.N.sum()) / (1. + self.N)
return (self.Q + U).argmax()
def propogate_reward(self, a, v):
self.N[a] += 1
self.W[a] += v
self.Q[a] = self.W[a] / self.N[a]
def probabilities(self, tau=1.):
if tau == 0.:
p = np.zeros(Pong.NUM_ACTIONS, np.float32)
p[self.N.argmax()] = 1.
return p
else:
p = self.N ** (1. / tau)
return (p / p.sum())
class MCTS:
def __init__(self, sim, l_pol, prior, max_depth, c_puct):
self.l_pol = l_pol
self.sim = sim
self.prior = prior
self.max_depth = max_depth
self.c_puct = c_puct
self.root = self.create_node()
def create_node(self):
state = self.sim.get_state()
P, V = self.prior(state)
return Node(state, P, V, self.c_puct)
def create_child(self, node, a):
l_a = self.l_pol.get_action(node.state)
self.sim.set_state(node.state)
self.sim.step(l_a, a)
if self.sim.win == "r":
node.child[a] = 1
elif self.sim.win == "l":
node.child[a] = -1
else:
node.child[a] = self.create_node()
def select(self):
stack = []
node = self.root
for i in range(self.max_depth):
a = node.pick_child()
stack.append((node, a))
if node.child[a] is None:
self.create_child(node, a)
if node.child[a] in [1, -1]:
v = node.child[a]
break
node = node.child[a]
v = node.V
for node, a in stack:
node.N[a] += 1
node.W[a] += v
node.Q[a] = node.W[a] / node.N[a]
def search(self, num):
for i in range(num):
self.select()
def step(self, a):
if self.root.child[a] is None:
self.create_child(self.root, a)
self.root = self.root.child[a]
def done(self):
return self.root in [-1, 1]
``` |
{
"source": "jonathanFielder/reverb-price-finder",
"score": 3
} |
#### File: reverb-price-finder/reverb_price_finder/db_mod.py
```python
import sqlite3 as lite
class Data_Base:
def __init__(self, db):
self.db = db
self.connect()
def connect(self):
self.conn = lite.connect(self.db)
self.cursor = self.conn.cursor()
self.connected = True
def commit(self):
self.conn.commit()
def close(self):
self.conn.commit()
self.conn.close()
self.connected = False
def c_ex(self, sql):#cursor execute
self.cursor.execute(sql)
def c_ex_it(self, sql_add_q, items):#cursor execute items
self.cursor.execute(sql_add_q, items)
def query_all(self, table):
#query database | * means everything
self.cursor.execute("SELECT * FROM {}".format(table))
self.result = self.cursor.fetchall()
return self.result
def query(self, query):
try:
self.cursor.execute(query)
self.result = self.cursor.fetchall()
except:
print('error expecting query. check format')
return None
else:
return self.result
def main():
pass
if __name__ == '__main__':
main()
``` |
{
"source": "jonathanfoster/microdot",
"score": 3
} |
#### File: microdot/microdot-asyncio/microdot_asyncio.py
```python
try:
import uasyncio as asyncio
except ImportError:
import asyncio
from microdot import Microdot as BaseMicrodot
from microdot import print_exception
from microdot import Request as BaseRequest
from microdot import Response as BaseResponse
def _iscoroutine(coro):
return hasattr(coro, 'send') and hasattr(coro, 'throw')
class Request(BaseRequest):
@staticmethod
async def create(stream, client_addr):
# request line
line = (await stream.readline()).strip().decode()
if not line: # pragma: no cover
return None
method, url, http_version = line.split()
http_version = http_version.split('/', 1)[1]
# headers
headers = {}
content_length = 0
while True:
line = (await stream.readline()).strip().decode()
if line == '':
break
header, value = line.split(':', 1)
value = value.strip()
headers[header] = value
if header == 'Content-Length':
content_length = int(value)
# body
body = await stream.read(content_length) \
if content_length else b''
return Request(client_addr, method, url, http_version, headers, body)
class Response(BaseResponse):
async def write(self, stream):
self.complete()
# status code
await stream.awrite('HTTP/1.0 {status_code} {reason}\r\n'.format(
status_code=self.status_code,
reason='OK' if self.status_code == 200 else 'N/A').encode())
# headers
for header, value in self.headers.items():
values = value if isinstance(value, list) else [value]
for value in values:
await stream.awrite('{header}: {value}\r\n'.format(
header=header, value=value).encode())
await stream.awrite(b'\r\n')
# body
if self.body:
if hasattr(self.body, 'read'):
while True:
buf = self.body.read(self.send_file_buffer_size)
if len(buf):
await stream.awrite(buf)
if len(buf) < self.send_file_buffer_size:
break
if hasattr(self.body, 'close'):
self.body.close()
else:
await stream.awrite(self.body)
class Microdot(BaseMicrodot):
def run(self, host='0.0.0.0', port=5000, debug=False):
self.debug = debug
async def serve(reader, writer):
if not hasattr(writer, 'awrite'): # pragma: no cover
# CPython provides the awrite and aclose methods in 3.8+
async def awrite(self, data):
self.write(data)
await self.drain()
async def aclose(self):
self.close()
await self.wait_closed()
from types import MethodType
writer.awrite = MethodType(awrite, writer)
writer.aclose = MethodType(aclose, writer)
await self.dispatch_request(reader, writer)
if self.debug: # pragma: no cover
print('Starting async server on {host}:{port}...'.format(
host=host, port=port))
loop = asyncio.get_event_loop()
loop.run_until_complete(asyncio.start_server(serve, host, port))
loop.run_forever()
loop.close() # pragma: no cover
async def dispatch_request(self, reader, writer):
req = await Request.create(reader, writer.get_extra_info('peername'))
if req:
f = self.find_route(req)
try:
res = None
if f:
for handler in self.before_request_handlers:
res = await self._invoke_handler(handler, req)
if res:
break
if res is None:
res = await self._invoke_handler(
f, req, **req.url_args)
if isinstance(res, tuple):
res = Response(*res)
elif not isinstance(res, Response):
res = Response(res)
for handler in self.after_request_handlers:
res = await self._invoke_handler(
handler, req, res) or res
elif 404 in self.error_handlers:
res = await self._invoke_handler(
self.error_handlers[404], req)
else:
res = 'Not found', 404
except Exception as exc:
print_exception(exc)
res = None
if exc.__class__ in self.error_handlers:
try:
res = await self._invoke_handler(
self.error_handlers[exc.__class__], req, exc)
except Exception as exc2: # pragma: no cover
print_exception(exc2)
if res is None:
if 500 in self.error_handlers:
res = await self._invoke_handler(
self.error_handlers[500], req)
else:
res = 'Internal server error', 500
if isinstance(res, tuple):
res = Response(*res)
elif not isinstance(res, Response):
res = Response(res)
await res.write(writer)
await writer.aclose()
if self.debug and req: # pragma: no cover
print('{method} {path} {status_code}'.format(
method=req.method, path=req.path,
status_code=res.status_code))
async def _invoke_handler(self, f_or_coro, *args, **kwargs):
ret = f_or_coro(*args, **kwargs)
if _iscoroutine(ret):
ret = await ret
return ret
redirect = Response.redirect
send_file = Response.send_file
``` |
{
"source": "jonathanfrawley/build",
"score": 2
} |
#### File: build/autolens/run_python.py
```python
import os
import shutil
from distutils.dir_util import copy_tree
import build_util
BUILD_PATH = os.getcwd()
WORKSPACE_PATH = f"{os.getcwd()}/../autolens_workspace"
SCRIPTS_ROOT_PATH = f"{WORKSPACE_PATH}/scripts"
SCRIPTS_NO_RUN = [
"mask.py",
"positions.py",
"lens_light_centre.py",
"scaled_dataset.py",
"tutorial_3_lens_and_source.py",
"tutorial_4_x2_lens_galaxies.py",
"tutorial_5_complex_source.py",
"tutorial_8_model_fit.py",
"tutorial_6_model_fit.py",
"tutorial_searches.py",
"tutorial_6_derived.py",
"hyper_mode.py",
"pipeline.py",
"light_parametric__mass_total__source_inversion.py",
"Emcee.py",
"PySwarms.py",
"Zeus.py",
"EmceePlotter.py",
"PySwarmsPlotter.py",
"ZeusPlotter.py",
"UltraNestPlotter.py",
"DynestyPlotter.py",
]
def main():
copy_tree(f"autolens/configs/default", f"{WORKSPACE_PATH}/config")
os.chdir(WORKSPACE_PATH)
if os.path.exists(f"{WORKSPACE_PATH}/output"):
try:
os.rename(f"{WORKSPACE_PATH}/output", f"{WORKSPACE_PATH}/output_backup")
except OSError:
shutil.rmtree(f"{WORKSPACE_PATH}/output")
if not os.path.exists(f"{WORKSPACE_PATH}/auto_files"):
os.system("git clone https://github.com/Jammy2211/auto_files --depth 1")
if not os.path.exists(f"{WORKSPACE_PATH}/output"):
os.mkdir(f"{WORKSPACE_PATH}/output")
os.system(f"cp -r {WORKSPACE_PATH}/auto_files/autolens/output {WORKSPACE_PATH}")
os.chdir(SCRIPTS_ROOT_PATH)
for folder in [
"howtolens",
# "database"
]:
build_util.execute_scripts_in_folder(
workspace_path=WORKSPACE_PATH,
folder=folder,
root_path=f"{SCRIPTS_ROOT_PATH}/{folder}",
scripts_no_run=SCRIPTS_NO_RUN
)
os.chdir(BUILD_PATH)
copy_tree(f"autolens/configs/test", f"{WORKSPACE_PATH}/config")
for folder in [
# "imaging",
# "interferometer",
# "point_source",
# "misc",
"plot"
]:
build_util.execute_scripts_in_folder(
workspace_path=WORKSPACE_PATH,
folder=folder,
root_path=f"{SCRIPTS_ROOT_PATH}/{folder}",
scripts_no_run=SCRIPTS_NO_RUN
)
shutil.rmtree(f"{WORKSPACE_PATH}/output")
os.rename(f"{WORKSPACE_PATH}/output_backup", f"{WORKSPACE_PATH}/output")
os.chdir(BUILD_PATH)
copy_tree(f"autolens/configs/default", f"{WORKSPACE_PATH}/config")
os.chdir(WORKSPACE_PATH)
os.system(f"git add -f config")
os.chdir(BUILD_PATH)
os.chdir(WORKSPACE_PATH)
shutil.rmtree("auto_files")
if __name__ == "__main__":
main()
``` |
{
"source": "jonathanfrawley/dalle-mini",
"score": 3
} |
#### File: dalle-mini/dalle_mini/backend.py
```python
import requests
from io import BytesIO
import base64
from PIL import Image
class ServiceError(Exception):
def __init__(self, status_code):
self.status_code = status_code
def get_images_from_backend(prompt, backend_url):
r = requests.post(
backend_url,
json={"prompt": prompt}
)
if r.status_code == 200:
images = r.json()["images"]
images = [Image.open(BytesIO(base64.b64decode(img))) for img in images]
return images
else:
raise ServiceError(r.status_code)
```
#### File: dalle-mini/dalle_mini/helpers.py
```python
from PIL import Image, ImageDraw, ImageFont
def captioned_strip(images, caption=None, rows=1):
increased_h = 0 if caption is None else 48
w, h = images[0].size[0], images[0].size[1]
img = Image.new("RGB", (len(images)*w//rows, h*rows + increased_h))
for i, img_ in enumerate(images):
img.paste(img_, (i//rows*w, increased_h + (i % rows) * h))
if caption is not None:
draw = ImageDraw.Draw(img)
font = ImageFont.truetype("/usr/share/fonts/truetype/liberation2/LiberationMono-Bold.ttf", 40)
draw.text((20, 3), caption, (255,255,255), font=font)
return img
```
#### File: dev/data/CC3M_downloader.py
```python
import sys
import os
from datetime import datetime
import pandas as pd
import contexttimer
from urllib.request import urlopen
import requests
from PIL import Image
import torch
from torchvision.transforms import functional as TF
from multiprocessing import Pool
from tqdm import tqdm
import logging
import sys
# Setup
logging.basicConfig(filename='download.log', filemode='w', level=logging.INFO)
requests.packages.urllib3.disable_warnings(requests.packages.urllib3.exceptions.InsecureRequestWarning)
if len(sys.argv) != 3:
print("Provide .tsv file name & output directory. e.g. python downloader.py Train-GCC-training.tsv training")
exit(1)
# Load data
print(f'Starting to load at {datetime.now().isoformat(timespec="minutes")}')
with contexttimer.Timer(prefix="Loading from tsv"):
df = pd.read_csv(sys.argv[1], delimiter='\t', header=None)
url_to_idx_map = {url: index for index, caption, url in df.itertuples()}
print(f'Loaded {len(url_to_idx_map)} urls')
base_dir = os.path.join(os.getcwd(), sys.argv[2])
def process(item):
url, image_id = item
try:
base_url = os.path.basename(url) # extract base url
stem, ext = os.path.splitext(base_url) # split into stem and extension
filename = f'{image_id:08d}---{stem}.jpg' # create filename
filepath = os.path.join(base_dir, filename) # concat to get filepath
if not os.path.isfile(filepath):
req = requests.get(url, stream=True, timeout=1, verify=False).raw
image = Image.open(req).convert('RGB')
if min(image.size) > 512:
image = TF.resize(image, size=512, interpolation=Image.LANCZOS)
image.save(filepath) # save PIL image
except Exception as e:
logging.info(" ".join(repr(e).splitlines()))
logging.error(url)
list_of_items = list(url_to_idx_map.items())
print(len(list_of_items))
with Pool(128) as p:
r = list(tqdm(p.imap(process, list_of_items), total=len(list_of_items)))
print('DONE')
``` |
{
"source": "jonathanfrawley/mednerf",
"score": 2
} |
#### File: mednerf/graf-main/render_views_xray.py
```python
from ignite.engine import Engine
from ignite.metrics import PSNR, SSIM
from torchvision import transforms
from tqdm import tqdm
from torch.autograd import Variable
from PIL import Image
import sys
import copy
import argparse
import os
import glob
from os import path
import torch
import torch.optim as optim
import torch.nn.functional as F
torch.set_default_tensor_type('torch.cuda.FloatTensor')
sys.path.append('submodules') # needed to make imports work in GAN_stability
from graf.config import get_data, build_models, update_config, get_render_poses
from graf.utils import to_phi, to_theta, save_video
from submodules.GAN_stability.gan_training.checkpoints import CheckpointIO
from submodules.GAN_stability.gan_training.distributions import get_ydist, get_zdist
from submodules.GAN_stability.gan_training.config import (
load_config
)
from submodules.GAN_stability.gan_training import lpips
percept = lpips.PerceptualLoss(model='net-lin', net='alex', use_gpu=True)
# to keep ignite pytorch format
def get_output(metrics_engine, output):
return output[0], output[1]
def get_rays(pose, generator):
return generator.val_ray_sampler(generator.H, generator.W,
generator.focal, pose)[0]
def test(range_phi, render_radius, theta_mean,
z, generator_test, N_samples, iteration):
fps = min(int(72 / 2.), 25) # aim for at least 2 second video
with torch.no_grad():
phi_rot = min(int(range_phi[1] - range_phi[0]), 72) # at least 1 frame per degree
poses_rot = get_render_poses(render_radius, angle_range=range_phi, theta=theta_mean, N=phi_rot)
zrot = z[0].clone().unsqueeze(1).expand(-1, 72, -1).flatten(0, 1)
zrot = zrot.split(batch_size)
samples = len(zrot)
poses_rot = poses_rot.unsqueeze(0) \
.expand(samples, -1, -1, -1).flatten(0, 1)
rays = torch.stack([get_rays(poses_rot[i].to(device), generator_test) for i in range(samples)])
rays = rays.split(batch_size)
rgb, depth = [], []
for z_i, rays_i in tqdm(zip(zrot, rays), total=len(zrot), desc='Create samples...'):
bs = len(z_i)
if rays_i is not None:
rays_i = rays_i.permute(1, 0, 2, 3).flatten(1, 2) # Bx2x(HxW)xC -> 2x(BxHxW)x3
rgb_i, depth_i, _, _ = generator_test(z_i, rays=rays_i)
reshape = lambda x: x.view(bs, generator_test.H, generator_test.W, x.shape[1]).permute(0, 3, 1, 2) # (NxHxW)xC -> NxCxHxW
rgb.append(reshape(rgb_i).cpu())
depth.append(reshape(depth_i).cpu())
rgb = torch.cat(rgb)
depth = torch.cat(depth)
reshape = lambda x: x.view(N_samples, 72, *x.shape[1:])
rgb = reshape(rgb)
for i in range(N_samples):
save_video(rgb[i], os.path.join(eval_dir, 'generated_' + '{:04d}_rgb.mp4'.format(iteration)), as_gif=False, fps=fps)
def reconstruct(args, config_file):
device = torch.device("cuda:0")
_, hwfr, _ = get_data(config_file)
config_file['data']['hwfr'] = hwfr
# Create models
generator, discriminator = build_models(config_file, disc=False)
generator = generator.to(device)
# Test generator, use model average
generator_test = copy.deepcopy(generator)
generator_test.parameters = lambda: generator_test._parameters
generator_test.named_parameters = lambda: generator_test._named_parameters
checkpoint_io.register_modules(**{k + '_test': v for k, v in generator_test.module_dict.items()})
g_optim = optim.RMSprop(generator_test.parameters(), lr=0.0005, alpha=0.99, eps=1e-8)
# Register modules to checkpoint
checkpoint_io.register_modules(
g_optimizer=g_optim
)
generator_test.eval()
# Distributions
ydist = get_ydist(1, device=device) # Dummy to keep GAN training structure in tact
y = torch.zeros(batch_size)
zdist = get_zdist(config_file['z_dist']['type'], config_file['z_dist']['dim'],
device=device)
# Load checkpoint
model_file = args.model
print('load %s' % os.path.join(checkpoint_dir, model_file))
load_dict = checkpoint_io.load(model_file)
psnr_engine = Engine(get_output)
psnr = PSNR(data_range=2.)
psnr.attach(psnr_engine, "psnr")
ssim_engine = Engine(get_output)
ssim = SSIM(data_range=2.)
ssim.attach(ssim_engine, "ssim")
N_samples = batch_size
N_poses = 1 # corresponds to number of frames
render_radius = config_file['data']['radius']
if isinstance(render_radius, str): # use maximum radius
render_radius = float(render_radius.split(',')[1])
transform_list = [
transforms.Resize((128,128)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
]
trans = transforms.Compose(transform_list)
target_xray = glob.glob(os.path.join(args.xray_img_path, '*.png'))
target_xray = torch.unsqueeze(trans(Image.open(target_xray[0]).convert('RGB')),0)
# target_xray = target_xray.repeat(N_samples,1,1,1)
range_theta = (to_theta(config_file['data']['vmin']), to_theta(config_file['data']['vmax']))
range_phi = (to_phi(0), to_phi(1))
theta_mean = 0.5 * sum(range_theta)
phi_mean = 0.5 * sum(range_phi)
N_phi = min(int(range_phi[1] - range_phi[0]), N_poses) # at least 1 frame per degree
poses = get_render_poses(render_radius, angle_range=range_phi, theta=theta_mean, N=N_phi)
z = zdist.sample((N_samples,))
N_samples, N_frames = len(z), len(poses)
z = Variable(z, requires_grad=True)
z_optim = optim.Adam([z], lr=0.0005, betas=(0., 0.999))
# reshape inputs
z = z.unsqueeze(1).expand(-1, N_poses, -1).flatten(0, 1)
poses = poses.unsqueeze(0) \
.expand(N_samples, -1, -1, -1).flatten(0, 1)
z = z.split(batch_size)
log_rec_loss = 0.
ssim_value = 0.
psnr_value = 0.
for iteration in range(5000):
z_optim.zero_grad()
g_optim.zero_grad()
n_samples = len(z)
rays = torch.stack([get_rays(poses[i].to(device), generator_test) for i in range(n_samples)])
rays = rays.split(batch_size)
rgb, depth = [], []
for z_i, rays_i in tqdm(zip(z, rays), total=len(z), desc='Create samples...'):
bs = len(z_i)
if rays_i is not None:
rays_i = rays_i.permute(1, 0, 2, 3).flatten(1, 2) # Bx2x(HxW)xC -> 2x(BxHxW)x3
rgb_i, depth_i, _, _ = generator_test(z_i, rays=rays_i)
reshape = lambda x: x.view(bs, generator_test.H, generator_test.W, x.shape[1]).permute(0, 3, 1, 2) # (NxHxW)xC -> NxCxHxW
rgb.append(reshape(rgb_i).cpu())
depth.append(reshape(depth_i).cpu())
rgb = torch.cat(rgb)
depth = torch.cat(depth)
reshape = lambda x: x.view(N_samples, N_frames, *x.shape[1:])
xray_recons = reshape(rgb)
nll = z[0]**2 / 2
nll = nll.mean()
rec_loss = 0.3 * percept(F.avg_pool2d(torch.unsqueeze(xray_recons[0][0],0), 2, 2),
F.avg_pool2d(target_xray,2,2)).sum() +\
0.1 * F.mse_loss(torch.unsqueeze(xray_recons[0][0],0), target_xray) +\
0.3 * nll
rec_loss.backward()
z_optim.step()
g_optim.step()
log_rec_loss += rec_loss.item()
data = torch.unsqueeze(torch.stack([xray_recons[0][0].unsqueeze(0),
target_xray],0),0)
psnr_state = psnr_engine.run(data)
psnr_value += psnr_state.metrics['psnr']
ssim_state = ssim_engine.run(data)
ssim_value += ssim_state.metrics['ssim']
print(f"SSIM: ", ssim_value)
print(f"PSNR: ", psnr_value)
print("Reconstruction loss g: %.5f"%(log_rec_loss))
ssim_value = 0.
psnr_value = 0.
log_rec_loss = 0
if iteration % args.save_every == args.save_every - 1:
test(range_phi, render_radius, theta_mean,
z, generator_test, N_samples, iteration)
if psnr_value > args.psnr_stop:
break
if __name__ == "__main__":
# Arguments
parser = argparse.ArgumentParser(
description='Finetune the latent code to reconstruct the CT given an xray projection.'
)
parser.add_argument('config_file', type=str, help='Path to config file.')
parser.add_argument('--xray_img_path', type=str, default='None', help='Path to real xray')
parser.add_argument('--save_dir', type=str, help='Name of dir to save results')
parser.add_argument('--model', type=str, default='model_best.pt', help='model.pt to use for eval')
parser.add_argument("--save_every", default=15, type=int, help="save video of projections every number of iterations")
parser.add_argument("--psnr_stop", default=20, type=float, help="stop at this psnr value")
args, unknown = parser.parse_known_args()
device = torch.device("cuda:0")
config_file = load_config(args.config_file, 'configs/default.yaml')
config_file['data']['fov'] = float(config_file['data']['fov'])
config_file = update_config(config_file, unknown)
batch_size = 1
out_dir = os.path.join(config_file['training']['outdir'], config_file['expname'])
checkpoint_dir = path.join(out_dir, 'chkpts')
eval_dir = os.path.join(out_dir, args.save_dir)
os.makedirs(eval_dir, exist_ok=True)
config_file['training']['nworkers'] = 0
# Logger
checkpoint_io = CheckpointIO(
checkpoint_dir=checkpoint_dir
)
reconstruct(args, config_file)
``` |
{
"source": "jonathanfrawley/PyAutoArray",
"score": 2
} |
#### File: autoarray/dataset/imaging.py
```python
import logging
import numpy as np
import copy
from autoarray import exc
from autoarray.dataset import abstract_dataset, preprocess
from autoarray.mask import mask_2d as msk
from autoarray.structures.arrays.two_d import array_2d
from autoarray.structures.grids.two_d import grid_2d
from autoarray.structures import kernel_2d
from autoarray.operators import convolver
logger = logging.getLogger(__name__)
class SettingsImaging(abstract_dataset.AbstractSettingsDataset):
def __init__(
self,
grid_class=grid_2d.Grid2D,
grid_inversion_class=grid_2d.Grid2D,
sub_size=1,
sub_size_inversion=4,
fractional_accuracy=0.9999,
sub_steps=None,
pixel_scales_interp=None,
signal_to_noise_limit=None,
signal_to_noise_limit_radii=None,
use_normalized_psf=True,
):
"""
The lens dataset is the collection of data_type (image, noise-map, PSF), a mask, grid, convolver \
and other utilities that are used for modeling and fitting an image of a strong lens.
Whilst the image, noise-map, etc. are loaded in 2D, the lens dataset creates reduced 1D arrays of each \
for lens calculations.
Parameters
----------
grid_class : ag.Grid2D
The type of grid used to create the image from the `Galaxy` and `Plane`. The options are `Grid2D`,
`Grid2DIterate` and `Grid2DInterpolate` (see the `Grid2D` documentation for a description of these options).
grid_inversion_class : ag.Grid2D
The type of grid used to create the grid that maps the `Inversion` source pixels to the data's image-pixels.
The options are `Grid2D`, `Grid2DIterate` and `Grid2DInterpolate` (see the `Grid2D` documentation for a
description of these options).
sub_size : int
If the grid and / or grid_inversion use a `Grid2D`, this sets the sub-size used by the `Grid2D`.
fractional_accuracy : float
If the grid and / or grid_inversion use a `Grid2DIterate`, this sets the fractional accuracy it
uses when evaluating functions.
sub_steps : [int]
If the grid and / or grid_inversion use a `Grid2DIterate`, this sets the steps the sub-size is increased by
to meet the fractional accuracy when evaluating functions.
pixel_scales_interp : float or (float, float)
If the grid and / or grid_inversion use a `Grid2DInterpolate`, this sets the resolution of the interpolation
grid.
signal_to_noise_limit : float
If input, the dataset's noise-map is rescaled such that no pixel has a signal-to-noise above the
signa to noise limit.
psf_shape_2d : (int, int)
The shape of the PSF used for convolving model image generated using analytic light profiles. A smaller \
shape will trim the PSF relative to the input image PSF, giving a faster analysis run-time.
"""
super().__init__(
grid_class=grid_class,
grid_inversion_class=grid_inversion_class,
sub_size=sub_size,
sub_size_inversion=sub_size_inversion,
fractional_accuracy=fractional_accuracy,
sub_steps=sub_steps,
pixel_scales_interp=pixel_scales_interp,
signal_to_noise_limit=signal_to_noise_limit,
signal_to_noise_limit_radii=signal_to_noise_limit_radii,
)
self.use_normalized_psf = use_normalized_psf
class Imaging(abstract_dataset.AbstractDataset):
def __init__(
self,
image: array_2d.Array2D,
noise_map: array_2d.Array2D,
psf: kernel_2d.Kernel2D = None,
settings=SettingsImaging(),
name: str = None,
setup_convolver=False,
):
"""
A class containing the data, noise-map and point spread function of a 2D imaging dataset.
Parameters
----------
image : aa.Array2D
The array of the image data, in units of electrons per second.
noise_map : Array2D
An array describing the RMS standard deviation error in each pixel in units of electrons per second.
psf : aa.Array2D
An array describing the Point Spread Function kernel of the image.
mask: msk.Mask2D
The 2D mask that is applied to the image.
"""
self.unmasked = None
self.setup_convolver = setup_convolver
if setup_convolver and psf is not None:
try:
image.mask.blurring_mask_from_kernel_shape(
kernel_shape_native=psf.shape_native
)
except exc.MaskException:
image = image.padded_before_convolution_from(
kernel_shape=psf.shape_native, mask_pad_value=1
)
noise_map = noise_map.padded_before_convolution_from(
kernel_shape=psf.shape_native, mask_pad_value=1
)
print(
f"The image and noise map of the `Imaging` objected had been padded to the dimensions"
f"{image.shape}. This is because the blurring region around its mask, which defines where"
f"PSF flux may be convolved into the masked region, extended beyond the edge of the image."
f""
f"This can be prevented by using a smaller mask, smaller PSF kernel size or manually padding"
f"the image and noise-map yourself."
)
super().__init__(data=image, noise_map=noise_map, settings=settings, name=name)
self.psf_unormalized = psf
if psf is not None:
self.psf_normalized = kernel_2d.Kernel2D.manual_native(
array=psf.native, pixel_scales=psf.pixel_scales, normalize=True
)
if setup_convolver and psf is not None:
self.convolver = convolver.Convolver(mask=self.mask, kernel=self.psf)
self.blurring_grid = self.grid.blurring_grid_from_kernel_shape(
kernel_shape_native=self.psf.shape_native
)
else:
self.convolver = None
self.blurring_grid = None
def __array_finalize__(self, obj):
if isinstance(obj, Imaging):
try:
for key, value in obj.__dict__.items():
setattr(self, key, value)
except AttributeError:
logger.debug(
"Original object in Imaging.__array_finalize__ missing one or more attributes"
)
@property
def psf(self):
if self.settings.use_normalized_psf:
return self.psf_normalized
return self.psf_unormalized
@classmethod
def from_fits(
cls,
image_path,
pixel_scales,
noise_map_path,
image_hdu=0,
noise_map_hdu=0,
psf_path=None,
psf_hdu=0,
name=None,
):
"""Factory for loading the imaging data_type from .fits files, as well as computing properties like the noise-map,
exposure-time map, etc. from the imaging-data.
This factory also includes a number of routines for converting the imaging-data from unit_label not supported by PyAutoLens \
(e.g. adus, electrons) to electrons per second.
Parameters
----------
noise_map_non_constant
name
image_path : str
The path to the image .fits file containing the image (e.g. '/path/to/image.fits')
pixel_scales : float
The size of each pixel in scaled units.
image_hdu : int
The hdu the image is contained in the .fits file specified by *image_path*.
psf_path : str
The path to the psf .fits file containing the psf (e.g. '/path/to/psf.fits')
psf_hdu : int
The hdu the psf is contained in the .fits file specified by *psf_path*.
noise_map_path : str
The path to the noise_map .fits file containing the noise_map (e.g. '/path/to/noise_map.fits')
noise_map_hdu : int
The hdu the noise_map is contained in the .fits file specified by *noise_map_path*.
"""
image = array_2d.Array2D.from_fits(
file_path=image_path, hdu=image_hdu, pixel_scales=pixel_scales
)
noise_map = array_2d.Array2D.from_fits(
file_path=noise_map_path, hdu=noise_map_hdu, pixel_scales=pixel_scales
)
if psf_path is not None:
psf = kernel_2d.Kernel2D.from_fits(
file_path=psf_path,
hdu=psf_hdu,
pixel_scales=pixel_scales,
normalize=False,
)
else:
psf = None
return Imaging(image=image, noise_map=noise_map, psf=psf, name=name)
def apply_mask(self, mask):
if self.image.mask.is_all_false:
unmasked_imaging = self
else:
unmasked_imaging = self.unmasked
image = array_2d.Array2D.manual_mask(
array=unmasked_imaging.image.native, mask=mask.mask_sub_1
)
noise_map = array_2d.Array2D.manual_mask(
array=unmasked_imaging.noise_map.native, mask=mask.mask_sub_1
)
imaging = Imaging(
image=image,
noise_map=noise_map,
psf=self.psf_unormalized,
settings=self.settings,
name=self.name,
setup_convolver=True,
)
imaging.unmasked = unmasked_imaging
return imaging
def apply_settings(self, settings):
return Imaging(
image=self.image,
noise_map=self.noise_map,
psf=self.psf_unormalized,
settings=settings,
name=self.name,
setup_convolver=self.setup_convolver,
)
@property
def shape_native(self):
return self.data.shape_native
@property
def image(self):
return self.data
@property
def pixel_scales(self):
return self.data.pixel_scales
def signal_to_noise_limited_from(self, signal_to_noise_limit, mask=None):
imaging = copy.deepcopy(self)
if mask is None:
mask = msk.Mask2D.unmasked(
shape_native=self.shape_native, pixel_scales=self.pixel_scales
)
noise_map_limit = np.where(
(self.signal_to_noise_map.native > signal_to_noise_limit) & (mask == False),
np.abs(self.image.native) / signal_to_noise_limit,
self.noise_map.native,
)
imaging.noise_map = array_2d.Array2D.manual_mask(
array=noise_map_limit, mask=self.image.mask
)
return imaging
def modify_image_and_noise_map(self, image, noise_map):
imaging = copy.deepcopy(self)
imaging.data = image
imaging.noise_map = noise_map
return imaging
def output_to_fits(
self, image_path, psf_path=None, noise_map_path=None, overwrite=False
):
self.image.output_to_fits(file_path=image_path, overwrite=overwrite)
if self.psf is not None and psf_path is not None:
self.psf.output_to_fits(file_path=psf_path, overwrite=overwrite)
if self.noise_map is not None and noise_map_path is not None:
self.noise_map.output_to_fits(file_path=noise_map_path, overwrite=overwrite)
class AbstractSimulatorImaging:
def __init__(
self,
exposure_time: float,
background_sky_level: float = 0.0,
psf: kernel_2d.Kernel2D = None,
normalize_psf: bool = True,
read_noise: float = None,
add_poisson_noise: bool = True,
noise_if_add_noise_false: float = 0.1,
noise_seed: int = -1,
):
"""A class representing a Imaging observation, using the shape of the image, the pixel scale,
psf, exposure time, etc.
Parameters
----------
psf : Kernel2D
An arrays describing the PSF kernel of the image.
exposure_time : float
The exposure time of the simulated imaging.
background_sky_level : float
The level of the background sky of the simulated imaging.
normalize_psf : bool
If `True`, the PSF kernel is normalized so all values sum to 1.0.
read_noise : float
The level of read-noise added to the simulated imaging by drawing from a Gaussian distribution with
sigma equal to the value `read_noise`.
add_poisson_noise : bool
Whether Poisson noise corresponding to photon count statistics on the imaging observation is added.
noise_if_add_noise_false : float
If noise is not added to the simulated dataset a `noise_map` must still be returned. This value gives
the value of noise assigned to every pixel in the noise-map.
noise_seed : int
The random seed used to add random noise, where -1 corresponds to a random seed every run.
"""
if psf is not None and normalize_psf:
psf = psf.normalized
self.psf = psf
self.exposure_time = exposure_time
self.background_sky_level = background_sky_level
self.read_noise = read_noise
self.add_poisson_noise = add_poisson_noise
self.noise_if_add_noise_false = noise_if_add_noise_false
self.noise_seed = noise_seed
class SimulatorImaging(AbstractSimulatorImaging):
def from_image(self, image: array_2d.Array2D, name: str = None):
"""
Returns a realistic simulated image by applying effects to a plain simulated image.
Parameters
----------
image : array_2d.Array2D
The image before simulating which has noise added, PSF convolution, etc performed to it.
"""
exposure_time_map = array_2d.Array2D.full(
fill_value=self.exposure_time,
shape_native=image.shape_native,
pixel_scales=image.pixel_scales,
)
background_sky_map = array_2d.Array2D.full(
fill_value=self.background_sky_level,
shape_native=image.shape_native,
pixel_scales=image.pixel_scales,
)
if self.psf is not None:
psf = self.psf
else:
psf = kernel_2d.Kernel2D.no_blur(pixel_scales=image.pixel_scales)
image = psf.convolved_array_from_array(array=image)
image = image + background_sky_map
if self.add_poisson_noise is True:
image = preprocess.data_eps_with_poisson_noise_added(
data_eps=image,
exposure_time_map=exposure_time_map,
seed=self.noise_seed,
)
noise_map = preprocess.noise_map_from_data_eps_and_exposure_time_map(
data_eps=image, exposure_time_map=exposure_time_map
)
else:
noise_map = array_2d.Array2D.full(
fill_value=self.noise_if_add_noise_false,
shape_native=image.shape_native,
pixel_scales=image.pixel_scales,
)
if np.isnan(noise_map).any():
raise exc.DatasetException(
"The noise-map has NaN values in it. This suggests your exposure time and / or"
"background sky levels are too low, creating signal counts at or close to 0.0."
)
image = image - background_sky_map
mask = msk.Mask2D.unmasked(
shape_native=image.shape_native, pixel_scales=image.pixel_scales
)
image = array_2d.Array2D.manual_mask(array=image, mask=mask)
return Imaging(image=image, psf=self.psf, noise_map=noise_map, name=name)
```
#### File: autoarray/mask/mask_2d_util.py
```python
import numpy as np
from skimage.transform import rescale
from typing import Tuple
import warnings
from autoarray import decorator_util
from autoarray import exc
from autoarray.structures.grids.two_d import grid_2d_util
@decorator_util.jit()
def mask_2d_centres_from(
shape_native: Tuple[int, int],
pixel_scales: Tuple[float, float],
centre: (float, float),
) -> (float, float):
"""
Returns the (y,x) scaled central coordinates of a mask from its shape, pixel-scales and centre.
The coordinate system is defined such that the positive y axis is up and positive x axis is right.
Parameters
----------
shape_native : (int, int)
The (y,x) shape of the 2D array the scaled centre is computed for.
pixel_scales : (float, float)
The (y,x) scaled units to pixel units conversion factor of the 2D array.
centre : (float, flloat)
The (y,x) centre of the 2D mask.
Returns
-------
tuple (float, float)
The (y,x) scaled central coordinates of the input array.
Examples
--------
centres_scaled = centres_from_shape_pixel_scales_and_centre(shape=(5,5), pixel_scales=(0.5, 0.5), centre=(0.0, 0.0))
"""
y_centre_scaled = (float(shape_native[0] - 1) / 2) - (centre[0] / pixel_scales[0])
x_centre_scaled = (float(shape_native[1] - 1) / 2) + (centre[1] / pixel_scales[1])
return (y_centre_scaled, x_centre_scaled)
@decorator_util.jit()
def total_pixels_2d_from(mask_2d: np.ndarray) -> int:
"""
Returns the total number of unmasked pixels in a mask.
Parameters
----------
mask_2d : np.ndarray
A 2D array of bools, where `False` values are unmasked and included when counting pixels.
Returns
-------
int
The total number of pixels that are unmasked.
Examples
--------
mask = np.array([[True, False, True],
[False, False, False]
[True, False, True]])
total_regular_pixels = total_regular_pixels_from_mask(mask=mask)
"""
total_regular_pixels = 0
for y in range(mask_2d.shape[0]):
for x in range(mask_2d.shape[1]):
if not mask_2d[y, x]:
total_regular_pixels += 1
return total_regular_pixels
@decorator_util.jit()
def total_sub_pixels_2d_from(mask_2d: np.ndarray, sub_size: int) -> int:
"""
Returns the total number of sub-pixels in unmasked pixels in a mask.
Parameters
----------
mask_2d : np.ndarray
A 2D array of bools, where `False` values are unmasked and included when counting sub pixels.
sub_size : int
The size of the sub-grid that each pixel of the 2D mask array is divided into.
Returns
-------
int
The total number of sub pixels that are unmasked.
Examples
--------
mask = np.array([[True, False, True],
[False, False, False]
[True, False, True]])
total_sub_pixels = total_sub_pixels_from_mask(mask=mask, sub_size=2)
"""
return total_pixels_2d_from(mask_2d) * sub_size ** 2
@decorator_util.jit()
def total_sparse_pixels_2d_from(
mask_2d: np.ndarray, unmasked_sparse_grid_pixel_centres: np.ndarray
) -> int:
"""Given the full (i.e. without removing pixels which are outside the mask) pixelization grid's pixel
center and the mask, compute the total number of pixels which are within the mask and thus used
by the pixelization grid.
Parameters
-----------
mask_2d : np.ndarray
The mask within which pixelization pixels must be inside
unmasked_sparse_grid_pixel_centres : np.ndarray
The centres of the unmasked pixelization grid pixels.
"""
total_sparse_pixels = 0
for unmasked_sparse_pixel_index in range(
unmasked_sparse_grid_pixel_centres.shape[0]
):
y = unmasked_sparse_grid_pixel_centres[unmasked_sparse_pixel_index, 0]
x = unmasked_sparse_grid_pixel_centres[unmasked_sparse_pixel_index, 1]
if not mask_2d[y, x]:
total_sparse_pixels += 1
return total_sparse_pixels
@decorator_util.jit()
def mask_2d_circular_from(
shape_native: Tuple[int, int],
pixel_scales: Tuple[float, float],
radius: float,
centre: Tuple[float, float] = (0.0, 0.0),
) -> np.ndarray:
"""
Returns a circular mask from the 2D mask array shape and radius of the circle.
This creates a 2D array where all values within the mask radius are unmasked and therefore `False`.
Parameters
----------
shape_native: (int, int)
The (y,x) shape of the mask in units of pixels.
pixel_scales: float
The scaled units to pixel units conversion factor of each pixel.
radius : float
The radius (in scaled units) of the circle within which pixels unmasked.
centre
The centre of the circle used to mask pixels.
Returns
-------
ndarray
The 2D mask array whose central pixels are masked as a circle.
Examples
--------
mask = mask_circular_from_shape_pixel_scale_and_radius(
shape=(10, 10), pixel_scales=0.1, radius=0.5, centre=(0.0, 0.0))
"""
mask_2d = np.full(shape_native, True)
centres_scaled = mask_2d_centres_from(
shape_native=mask_2d.shape, pixel_scales=pixel_scales, centre=centre
)
for y in range(mask_2d.shape[0]):
for x in range(mask_2d.shape[1]):
y_scaled = (y - centres_scaled[0]) * pixel_scales[0]
x_scaled = (x - centres_scaled[1]) * pixel_scales[1]
r_scaled = np.sqrt(x_scaled ** 2 + y_scaled ** 2)
if r_scaled <= radius:
mask_2d[y, x] = False
return mask_2d
@decorator_util.jit()
def mask_2d_circular_annular_from(
shape_native: Tuple[int, int],
pixel_scales: Tuple[float, float],
inner_radius: float,
outer_radius: float,
centre: Tuple[float, float] = (0.0, 0.0),
) -> np.ndarray:
"""
Returns an circular annular mask from an input inner and outer mask radius and shape.
This creates a 2D array where all values within the inner and outer radii are unmasked and therefore `False`.
Parameters
----------
shape_native : (int, int)
The (y,x) shape of the mask in units of pixels.
pixel_scales : (float, float)
The scaled units to pixel units conversion factor of each pixel.
inner_radius : float
The radius (in scaled units) of the inner circle outside of which pixels are unmasked.
outer_radius : float
The radius (in scaled units) of the outer circle within which pixels are unmasked.
centre
The centre of the annulus used to mask pixels.
Returns
-------
ndarray
The 2D mask array whose central pixels are masked as a annulus.
Examples
--------
mask = mask_annnular_from_shape_pixel_scale_and_radius(
shape=(10, 10), pixel_scales=0.1, inner_radius=0.5, outer_radius=1.5, centre=(0.0, 0.0))
"""
mask_2d = np.full(shape_native, True)
centres_scaled = mask_2d_centres_from(
shape_native=mask_2d.shape, pixel_scales=pixel_scales, centre=centre
)
for y in range(mask_2d.shape[0]):
for x in range(mask_2d.shape[1]):
y_scaled = (y - centres_scaled[0]) * pixel_scales[0]
x_scaled = (x - centres_scaled[1]) * pixel_scales[1]
r_scaled = np.sqrt(x_scaled ** 2 + y_scaled ** 2)
if outer_radius >= r_scaled >= inner_radius:
mask_2d[y, x] = False
return mask_2d
@decorator_util.jit()
def mask_2d_circular_anti_annular_from(
shape_native: Tuple[int, int],
pixel_scales: Tuple[float, float],
inner_radius: float,
outer_radius: float,
outer_radius_2_scaled: float,
centre: Tuple[float, float] = (0.0, 0.0),
) -> np.ndarray:
"""
Returns an anti-annular mask from an input inner and outer mask radius and shape. The anti-annular is analogous to
the annular mask but inverted, whereby its unmasked values are those inside the annulus.
This creates a 2D array where all values outside the inner and outer radii are unmasked and therefore `False`.
Parameters
----------
shape_native : (int, int)
The (y,x) shape of the mask in units of pixels.
pixel_scales : (float, float)
The scaled units to pixel units conversion factor of each pixel.
inner_radius : float
The inner radius in scaled units of the annulus within which pixels are `False` and unmasked.
outer_radius : float
The first outer radius in scaled units of the annulus within which pixels are `True` and masked.
outer_radius_2 : float
The second outer radius in scaled units of the annulus within which pixels are `False` and unmasked and
outside of which all entries are `True` and masked.
centre
The centre of the annulus used to mask pixels.
Returns
-------
ndarray
The 2D mask array whose central pixels are masked as a annulus.
Examples
--------
mask = mask_annnular_from_shape_pixel_scale_and_radius(
shape=(10, 10), pixel_scales=0.1, inner_radius=0.5, outer_radius=1.5, centre=(0.0, 0.0))
"""
mask_2d = np.full(shape_native, True)
centres_scaled = mask_2d_centres_from(
shape_native=mask_2d.shape, pixel_scales=pixel_scales, centre=centre
)
for y in range(mask_2d.shape[0]):
for x in range(mask_2d.shape[1]):
y_scaled = (y - centres_scaled[0]) * pixel_scales[0]
x_scaled = (x - centres_scaled[1]) * pixel_scales[1]
r_scaled = np.sqrt(x_scaled ** 2 + y_scaled ** 2)
if (
inner_radius >= r_scaled
or outer_radius_2_scaled >= r_scaled >= outer_radius
):
mask_2d[y, x] = False
return mask_2d
def mask_2d_via_pixel_coordinates_from(
shape_native: Tuple[int, int], pixel_coordinates: [list], buffer: int = 0
) -> np.ndarray:
"""
Returns a mask where all unmasked `False` entries are defined from an input list of list of pixel coordinates.
These may be buffed via an input ``buffer``, whereby all entries in all 8 neighboring directions by this
amount.
Parameters
----------
shape_native (int, int)
The (y,x) shape of the mask in units of pixels.
pixel_coordinates : [[int, int]]
The input lists of 2D pixel coordinates where `False` entries are created.
buffer : int
All input ``pixel_coordinates`` are buffed with `False` entries in all 8 neighboring directions by this
amount.
"""
mask_2d = np.full(shape=shape_native, fill_value=True)
for y, x in pixel_coordinates:
mask_2d[y, x] = False
if buffer == 0:
return mask_2d
else:
return buffed_mask_2d_from(mask_2d=mask_2d, buffer=buffer)
@decorator_util.jit()
def elliptical_radius_from(
y_scaled: float, x_scaled: float, angle: float, axis_ratio: float
) -> float:
"""
Returns the elliptical radius of an ellipse from its (y,x) scaled centre, rotation angle `angle` defined in degrees
counter-clockwise from the positive x-axis and its axis-ratio.
This is used by the function `mask_elliptical_from` to determine the radius of every (y,x) coordinate in elliptical
units when deciding if it is within the mask.
Parameters
----------
y_scaled : float
The scaled y coordinate in Cartesian coordinates which is converted to elliptical coordinates.
x_scaled : float
The scaled x coordinate in Cartesian coordinates which is converted to elliptical coordinates.
angle
The rotation angle in degrees counter-clockwise from the positive x-axis
axis_ratio
The axis-ratio of the ellipse (minor axis / major axis).
Returns
-------
float
The radius of the input scaled (y,x) coordinate on the ellipse's ellipitcal coordinate system.
"""
r_scaled = np.sqrt(x_scaled ** 2 + y_scaled ** 2)
theta_rotated = np.arctan2(y_scaled, x_scaled) + np.radians(angle)
y_scaled_elliptical = r_scaled * np.sin(theta_rotated)
x_scaled_elliptical = r_scaled * np.cos(theta_rotated)
return np.sqrt(
x_scaled_elliptical ** 2.0 + (y_scaled_elliptical / axis_ratio) ** 2.0
)
@decorator_util.jit()
def mask_2d_elliptical_from(
shape_native: Tuple[int, int],
pixel_scales: Tuple[float, float],
major_axis_radius: float,
axis_ratio: float,
angle: float,
centre: Tuple[float, float] = (0.0, 0.0),
) -> np.ndarray:
"""
Returns an elliptical mask from an input major-axis mask radius, axis-ratio, rotational angle, shape and
centre.
This creates a 2D array where all values within the ellipse are unmasked and therefore `False`.
Parameters
----------
shape_native: (int, int)
The (y,x) shape of the mask in units of pixels.
pixel_scales : (float, float)
The scaled units to pixel units conversion factor of each pixel.
major_axis_radius : float
The major-axis (in scaled units) of the ellipse within which pixels are unmasked.
axis_ratio
The axis-ratio of the ellipse within which pixels are unmasked.
angle
The rotation angle of the ellipse within which pixels are unmasked, (counter-clockwise from the positive
x-axis).
centre
The centre of the ellipse used to mask pixels.
Returns
-------
ndarray
The 2D mask array whose central pixels are masked as an ellipse.
Examples
--------
mask = mask_elliptical_from_shape_pixel_scale_and_radius(
shape=(10, 10), pixel_scales=0.1, major_axis_radius=0.5, elliptical_comps=(0.333333, 0.0), centre=(0.0, 0.0))
"""
mask_2d = np.full(shape_native, True)
centres_scaled = mask_2d_centres_from(
shape_native=mask_2d.shape, pixel_scales=pixel_scales, centre=centre
)
for y in range(mask_2d.shape[0]):
for x in range(mask_2d.shape[1]):
y_scaled = (y - centres_scaled[0]) * pixel_scales[0]
x_scaled = (x - centres_scaled[1]) * pixel_scales[1]
r_scaled_elliptical = elliptical_radius_from(
y_scaled, x_scaled, angle, axis_ratio
)
if r_scaled_elliptical <= major_axis_radius:
mask_2d[y, x] = False
return mask_2d
@decorator_util.jit()
def mask_2d_elliptical_annular_from(
shape_native: Tuple[int, int],
pixel_scales: Tuple[float, float],
inner_major_axis_radius: float,
inner_axis_ratio: float,
inner_phi: float,
outer_major_axis_radius: float,
outer_axis_ratio: float,
outer_phi: float,
centre: Tuple[float, float] = (0.0, 0.0),
) -> np.ndarray:
"""
Returns an elliptical annular mask from an input major-axis mask radius, axis-ratio, rotational angle for
both the inner and outer elliptical annuli and a shape and centre for the mask.
This creates a 2D array where all values within the elliptical annuli are unmasked and therefore `False`.
Parameters
----------
shape_native: (int, int)
The (y,x) shape of the mask in units of pixels.
pixel_scales : (float, float)
The scaled units to pixel units conversion factor of each pixel.
inner_major_axis_radius : float
The major-axis (in scaled units) of the inner ellipse within which pixels are masked.
inner_axis_ratio
The axis-ratio of the inner ellipse within which pixels are masked.
inner_phi : float
The rotation angle of the inner ellipse within which pixels are masked, (counter-clockwise from the
positive x-axis).
outer_major_axis_radius : float
The major-axis (in scaled units) of the outer ellipse within which pixels are unmasked.
outer_axis_ratio
The axis-ratio of the outer ellipse within which pixels are unmasked.
outer_phi : float
The rotation angle of the outer ellipse within which pixels are unmasked, (counter-clockwise from the
positive x-axis).
centre
The centre of the elliptical annuli used to mask pixels.
Returns
-------
ndarray
The 2D mask array whose elliptical annuli pixels are masked.
Examples
--------
mask = mask_elliptical_annuli_from_shape_pixel_scale_and_radius(
shape=(10, 10), pixel_scales=0.1,
inner_major_axis_radius=0.5, inner_axis_ratio=0.5, inner_phi=45.0,
outer_major_axis_radius=1.5, outer_axis_ratio=0.8, outer_phi=90.0,
centre=(0.0, 0.0))
"""
mask_2d = np.full(shape_native, True)
centres_scaled = mask_2d_centres_from(
shape_native=mask_2d.shape, pixel_scales=pixel_scales, centre=centre
)
for y in range(mask_2d.shape[0]):
for x in range(mask_2d.shape[1]):
y_scaled = (y - centres_scaled[0]) * pixel_scales[0]
x_scaled = (x - centres_scaled[1]) * pixel_scales[1]
inner_r_scaled_elliptical = elliptical_radius_from(
y_scaled, x_scaled, inner_phi, inner_axis_ratio
)
outer_r_scaled_elliptical = elliptical_radius_from(
y_scaled, x_scaled, outer_phi, outer_axis_ratio
)
if (
inner_r_scaled_elliptical >= inner_major_axis_radius
and outer_r_scaled_elliptical <= outer_major_axis_radius
):
mask_2d[y, x] = False
return mask_2d
@decorator_util.jit()
def blurring_mask_2d_from(
mask_2d: np.ndarray, kernel_shape_native: Tuple[int, int]
) -> np.ndarray:
"""
Returns a blurring mask from an input mask and psf shape.
The blurring mask corresponds to all pixels which are outside of the mask but will have a fraction of their
light blur into the masked region due to PSF convolution. The PSF shape is used to determine which pixels these are.
If a pixel is identified which is outside the 2D dimensions of the input mask, an error is raised and the user
should pad the input mask (and associated images).
Parameters
-----------
mask_2d : np.ndarray
A 2D array of bools, where `False` values are unmasked.
kernel_shape_native : (int, int)
The 2D shape of the PSF which is used to compute the blurring mask.
Returns
-------
ndarray
The 2D blurring mask array whose unmasked values (`False`) correspond to where the mask will have PSF light
blurred into them.
Examples
--------
mask = np.array([[True, True, True],
[True, False, True]
[True, True, True]])
blurring_mask = blurring_mask_from_mask_and_psf_shape(mask=mask)
"""
blurring_mask_2d = np.full(mask_2d.shape, True)
for y in range(mask_2d.shape[0]):
for x in range(mask_2d.shape[1]):
if not mask_2d[y, x]:
for y1 in range(
(-kernel_shape_native[0] + 1) // 2,
(kernel_shape_native[0] + 1) // 2,
):
for x1 in range(
(-kernel_shape_native[1] + 1) // 2,
(kernel_shape_native[1] + 1) // 2,
):
if (
0 <= x + x1 <= mask_2d.shape[1] - 1
and 0 <= y + y1 <= mask_2d.shape[0] - 1
):
if mask_2d[y + y1, x + x1]:
blurring_mask_2d[y + y1, x + x1] = False
else:
raise exc.MaskException(
"setup_blurring_mask extends beyond the sub_size "
"of the mask - pad the datas array before masking"
)
return blurring_mask_2d
@decorator_util.jit()
def mask_2d_via_shape_native_and_native_for_slim(
shape_native: Tuple[int, int], native_for_slim: np.ndarray
) -> np.ndarray:
"""
For a slimmed set of data that was computed by mapping unmasked values from a native 2D array of shape
(total_y_pixels, total_x_pixels), map its slimmed indexes back to the original 2D array to create the
native 2D mask.
This uses an array 'native_for_slim' of shape [total_masked_pixels[ where each index gives the native 2D pixel
indexes of the slimmed array's unmasked pixels, for example:
- If native_for_slim[0] = [0,0], the first value of the slimmed array maps to the pixel [0,0] of the native 2D array.
- If native_for_slim[1] = [0,1], the second value of the slimmed array maps to the pixel [0,1] of the native 2D array.
- If native_for_slim[4] = [1,1], the fifth value of the slimmed array maps to the pixel [1,1] of the native 2D array.
Parameters
----------
shape_native : (int, int)
The shape of the 2D array which the pixels are defined on.
native_for_slim : np.ndarray
An array describing the native 2D array index that every slimmed array index maps too.
Returns
-------
ndarray
A 2D mask array where unmasked values are `False`.
Examples
--------
native_for_slim = np.array([[0,1], [1,0], [1,1], [1,2], [2,1]])
mask = mask_from_shape_and_native_for_slim(shape=(3,3), native_for_slim=native_for_slim)
"""
mask = np.ones(shape_native)
for index in range(len(native_for_slim)):
mask[native_for_slim[index, 0], native_for_slim[index, 1]] = False
return mask
@decorator_util.jit()
def check_if_edge_pixel(mask_2d: np.ndarray, y: int, x: int) -> bool:
"""
Checks if an input [y,x] pixel on the input `mask` is an edge-pixel.
An edge pixel is defined as a pixel on the mask which is unmasked (has a `False`) value and at least 1 of its 8
direct neighbors is masked (is `True`).
Parameters
----------
mask_2d : np.ndarray
The mask for which the input pixel is checked if it is an edge pixel.
y : int
The y pixel coordinate on the mask that is checked for if it is an edge pixel.
x : int
The x pixel coordinate on the mask that is checked for if it is an edge pixel.
Returns
-------
bool
If `True` the pixel on the mask is an edge pixel, else a `False` is returned because it is not.
"""
if (
mask_2d[y + 1, x]
or mask_2d[y - 1, x]
or mask_2d[y, x + 1]
or mask_2d[y, x - 1]
or mask_2d[y + 1, x + 1]
or mask_2d[y + 1, x - 1]
or mask_2d[y - 1, x + 1]
or mask_2d[y - 1, x - 1]
):
return True
else:
return False
@decorator_util.jit()
def total_edge_pixels_from(mask_2d: np.ndarray) -> int:
"""
Returns the total number of edge-pixels in a mask.
An edge pixel is defined as a pixel on the mask which is unmasked (has a `False`) value and at least 1 of its 8
direct neighbors is masked (is `True`).
Parameters
----------
mask_2d : np.ndarray
The mask for which the total number of edge pixels is computed.
Returns
-------
int
The total number of edge pixels.
"""
edge_pixel_total = 0
for y in range(1, mask_2d.shape[0] - 1):
for x in range(1, mask_2d.shape[1] - 1):
if not mask_2d[y, x]:
if check_if_edge_pixel(mask_2d=mask_2d, y=y, x=x):
edge_pixel_total += 1
return edge_pixel_total
@decorator_util.jit()
def edge_1d_indexes_from(mask_2d: np.ndarray) -> np.ndarray:
"""
Returns a 1D array listing all edge pixel indexes in the mask.
An edge pixel is defined as a pixel on the mask which is unmasked (has a `False`) value and at least 1 of its 8
direct neighbors is masked (is `True`).
Parameters
----------
mask_2d : np.ndarray
The mask for which the 1D edge pixel indexes are computed.
Returns
-------
np.ndarray
The 1D indexes of all edge pixels on the mask.
"""
edge_pixel_total = total_edge_pixels_from(mask_2d)
edge_pixels = np.zeros(edge_pixel_total)
edge_index = 0
regular_index = 0
for y in range(1, mask_2d.shape[0] - 1):
for x in range(1, mask_2d.shape[1] - 1):
if not mask_2d[y, x]:
if (
mask_2d[y + 1, x]
or mask_2d[y - 1, x]
or mask_2d[y, x + 1]
or mask_2d[y, x - 1]
or mask_2d[y + 1, x + 1]
or mask_2d[y + 1, x - 1]
or mask_2d[y - 1, x + 1]
or mask_2d[y - 1, x - 1]
):
edge_pixels[edge_index] = regular_index
edge_index += 1
regular_index += 1
return edge_pixels
@decorator_util.jit()
def check_if_border_pixel(
mask_2d: np.ndarray, edge_pixel_slim: int, native_to_slim: np.ndarray
) -> bool:
"""
Checks if an input [y,x] pixel on the input `mask` is a border-pixel.
A borders pixel is a pixel which:
1) is not fully surrounding by `False` mask values.
2) Can reach the edge of the array without hitting a masked pixel in one of four directions (upwards, downwards,
left, right).
The borders pixels are thus pixels which are on the exterior edge of the mask. For example, the inner ring of edge
pixels in an annular mask are edge pixels but not borders pixels.
Parameters
----------
mask_2d : np.ndarray
The mask for which the input pixel is checked if it is a border pixel.
edge_pixel_slim : int
The edge pixel index in 1D that is checked if it is a border pixel (this 1D index is mapped to 2d via the
array `sub_native_index_for_sub_slim_index_2d`).
native_to_slim : np.ndarray
An array describing the native 2D array index that every slimmed array index maps too.
Returns
-------
bool
If `True` the pixel on the mask is a border pixel, else a `False` is returned because it is not.
"""
edge_pixel_index = int(edge_pixel_slim)
y = int(native_to_slim[edge_pixel_index, 0])
x = int(native_to_slim[edge_pixel_index, 1])
if (
np.sum(mask_2d[0:y, x]) == y
or np.sum(mask_2d[y, x : mask_2d.shape[1]]) == mask_2d.shape[1] - x - 1
or np.sum(mask_2d[y : mask_2d.shape[0], x]) == mask_2d.shape[0] - y - 1
or np.sum(mask_2d[y, 0:x]) == x
):
return True
else:
return False
@decorator_util.jit()
def total_border_pixels_from(mask_2d, edge_pixels, native_to_slim):
"""
Returns the total number of border-pixels in a mask.
A borders pixel is a pixel which:
1) is not fully surrounding by `False` mask values.
2) Can reach the edge of the array without hitting a masked pixel in one of four directions (upwards, downwards,
left, right).
The borders pixels are thus pixels which are on the exterior edge of the mask. For example, the inner ring of edge
pixels in an annular mask are edge pixels but not borders pixels.
Parameters
----------
mask_2d : np.ndarray
The mask for which the total number of border pixels is computed.
edge_pixel_1d : int
The edge pixel index in 1D that is checked if it is a border pixel (this 1D index is mapped to 2d via the
array `sub_native_index_for_sub_slim_index_2d`).
native_to_slim : np.ndarray
An array describing the 2D array index that every 1D array index maps too.
Returns
-------
int
The total number of border pixels.
"""
border_pixel_total = 0
for i in range(edge_pixels.shape[0]):
if check_if_border_pixel(mask_2d, edge_pixels[i], native_to_slim):
border_pixel_total += 1
return border_pixel_total
@decorator_util.jit()
def border_slim_indexes_from(mask_2d: np.ndarray) -> np.ndarray:
"""
Returns a slim array of shape [total_unmasked_border_pixels] listing all borders pixel indexes in the mask.
A borders pixel is a pixel which:
1) is not fully surrounding by `False` mask values.
2) Can reach the edge of the array without hitting a masked pixel in one of four directions (upwards, downwards,
left, right).
The borders pixels are thus pixels which are on the exterior edge of the mask. For example, the inner ring of edge
pixels in an annular mask are edge pixels but not borders pixels.
Parameters
----------
mask_2d : np.ndarray
The mask for which the slimmed border pixel indexes are calculated.
Returns
-------
np.ndarray
The slimmed indexes of all border pixels on the mask.
"""
edge_pixels = edge_1d_indexes_from(mask_2d=mask_2d)
sub_native_index_for_sub_slim_index_2d = native_index_for_slim_index_2d_from(
mask_2d=mask_2d, sub_size=1
)
border_pixel_total = total_border_pixels_from(
mask_2d=mask_2d,
edge_pixels=edge_pixels,
native_to_slim=sub_native_index_for_sub_slim_index_2d,
)
border_pixels = np.zeros(border_pixel_total)
border_pixel_index = 0
for edge_pixel_index in range(edge_pixels.shape[0]):
if check_if_border_pixel(
mask_2d=mask_2d,
edge_pixel_slim=edge_pixels[edge_pixel_index],
native_to_slim=sub_native_index_for_sub_slim_index_2d,
):
border_pixels[border_pixel_index] = edge_pixels[edge_pixel_index]
border_pixel_index += 1
return border_pixels
def sub_border_pixel_slim_indexes_from(
mask_2d: np.ndarray, sub_size: int
) -> np.ndarray:
"""
Returns a slim array of shape [total_unmasked_border_pixels] listing all sub-borders pixel indexes in
the mask.
A borders pixel is a pixel which:
1) is not fully surrounding by `False` mask values.
2) Can reach the edge of the array without hitting a masked pixel in one of four directions (upwards, downwards,
left, right).
The borders pixels are thus pixels which are on the exterior edge of the mask. For example, the inner ring of
edge pixels in an annular mask are edge pixels but not borders pixels.
A sub-border pixel is, for a border-pixel, the pixel within that border pixel which is furthest from the origin
of the mask.
Parameters
----------
mask_2d : np.ndarray
The mask for which the 1D border pixel indexes are calculated.
sub_size : int
The size of the sub-grid in each mask pixel.
Returns
-------
np.ndarray
The 1D indexes of all border sub-pixels on the mask.
"""
border_pixels = border_slim_indexes_from(mask_2d=mask_2d)
sub_border_pixels = np.zeros(shape=border_pixels.shape[0])
sub_slim_indexes_for_slim_index = sub_slim_indexes_for_slim_index_via_mask_2d_from(
mask_2d=mask_2d, sub_size=sub_size
)
sub_grid_2d_slim = grid_2d_util.grid_2d_slim_via_mask_from(
mask_2d=mask_2d, pixel_scales=(1.0, 1.0), sub_size=sub_size, origin=(0.0, 0.0)
)
mask_centre = grid_2d_util.grid_2d_centre_from(grid_2d_slim=sub_grid_2d_slim)
for (border_1d_index, border_pixel) in enumerate(border_pixels):
sub_border_pixels_of_border_pixel = sub_slim_indexes_for_slim_index[
int(border_pixel)
]
sub_border_pixels[
border_1d_index
] = grid_2d_util.furthest_grid_2d_slim_index_from(
grid_2d_slim=sub_grid_2d_slim,
slim_indexes=sub_border_pixels_of_border_pixel,
coordinate=mask_centre,
)
return sub_border_pixels
@decorator_util.jit()
def buffed_mask_2d_from(mask_2d: np.ndarray, buffer: int = 1) -> np.ndarray:
"""
Returns a buffed mask from an input mask, where the buffed mask is the input mask but all `False` entries in the
mask are buffed by an integer amount in all 8 surrouning pixels.
Parameters
----------
mask_2d : np.ndarray
The mask whose `False` entries are buffed.
buffer : int
The number of pixels around each `False` entry that pixel are buffed in all 8 directions.
Returns
-------
np.ndarray
The buffed mask.
"""
buffed_mask_2d = mask_2d.copy()
for y in range(mask_2d.shape[0]):
for x in range(mask_2d.shape[1]):
if not mask_2d[y, x]:
for y0 in range(y - buffer, y + 1 + buffer):
for x0 in range(x - buffer, x + 1 + buffer):
if (
y0 >= 0
and x0 >= 0
and y0 <= mask_2d.shape[0] - 1
and x0 <= mask_2d.shape[1] - 1
):
buffed_mask_2d[y0, x0] = False
return buffed_mask_2d
def rescaled_mask_2d_from(mask_2d: np.ndarray, rescale_factor: float) -> np.ndarray:
"""
Returns a rescaled mask from an input mask, where the rescaled mask is the input mask but rescaled to a larger or
smaller size depending on the `rescale_factor`.
For example, a `rescale_factor` of 0.5 would reduce a 10 x 10 mask to a 5x5 mask, where the `False` entries
of the 5 x 5 mask corresponding to pixels which had at least one `False` entry in their corresponding location on the
10 x 10 mask. A rescale factor of 2.0 would increase the 10 x 10 mask in size to a 20 x 20 mask, with `False`
again wherever the original mask had those entries.
The edge of the rescaled mask is automatically set to all ` True` values to prevent border issues.
Parameters
----------
mask_2d : np.ndarray
The mask that is increased or decreased in size via rescaling.
rescale_factor : float
The factor by which the mask is increased in size or decreased in size.
Returns
-------
np.ndarray
The rescaled mask.
"""
warnings.filterwarnings("ignore")
rescaled_mask_2d = rescale(
image=mask_2d,
scale=rescale_factor,
mode="edge",
anti_aliasing=False,
multichannel=False,
)
rescaled_mask_2d[0, :] = 1
rescaled_mask_2d[rescaled_mask_2d.shape[0] - 1, :] = 1
rescaled_mask_2d[:, 0] = 1
rescaled_mask_2d[:, rescaled_mask_2d.shape[1] - 1] = 1
return np.isclose(rescaled_mask_2d, 1)
@decorator_util.jit()
def slim_index_for_sub_slim_index_via_mask_2d_from(
mask_2d: np.ndarray, sub_size: int
) -> np.ndarray:
""" "
For pixels on a native 2D array of shape (total_y_pixels, total_x_pixels), compute a slimmed array which, for
every unmasked pixel on the native 2D array, maps the slimmed sub-pixel indexes to their slimmed pixel indexes.
For example, for a sub-grid size of 2, the following mappings from sub-pixels to 2D array pixels are:
- slim_index_for_sub_slim_index[0] = 0 -> The first sub-pixel maps to the first unmasked pixel on the native 2D array.
- slim_index_for_sub_slim_index[3] = 0 -> The fourth sub-pixel maps to the first unmasked pixel on the native 2D array.
- slim_index_for_sub_slim_index[7] = 1 -> The eighth sub-pixel maps to the second unmasked pixel on the native 2D array.
Parameters
----------
mask_2d : np.ndarray
The mask whose indexes are mapped.
sub_size : int
The sub-size of the grid on the mask, so that the sub-mask indexes can be computed correctly.
Returns
-------
np.ndarray
The array of shape [total_unmasked_pixels] mapping every unmasked pixel on the native 2D mask array to its
slimmed index on the sub-mask array.
Examples
--------
mask = np.array([[True, False, True]])
slim_index_for_sub_slim_index = slim_index_for_sub_slim_index_via_mask_2d_from(mask_2d=mask_2d, sub_size=2)
"""
total_sub_pixels = total_sub_pixels_2d_from(mask_2d=mask_2d, sub_size=sub_size)
slim_index_for_sub_slim_index = np.zeros(shape=total_sub_pixels)
slim_index = 0
sub_slim_index = 0
for y in range(mask_2d.shape[0]):
for x in range(mask_2d.shape[1]):
if not mask_2d[y, x]:
for y1 in range(sub_size):
for x1 in range(sub_size):
slim_index_for_sub_slim_index[sub_slim_index] = slim_index
sub_slim_index += 1
slim_index += 1
return slim_index_for_sub_slim_index
def sub_slim_indexes_for_slim_index_via_mask_2d_from(
mask_2d: np.ndarray, sub_size: int
) -> [list]:
""" "
For pixels on a native 2D array of shape (total_y_pixels, total_x_pixels), compute a list of lists which, for every
unmasked pixel giving its slim pixel indexes of its corresponding sub-pixels.
For example, for a sub-grid size of 2, the following mappings from sub-pixels to 2D array pixels are:
- sub_slim_indexes_for_slim_index[0] = [0, 1, 2, 3] -> The first pixel maps to the first 4 subpixels in 1D.
- sub_slim_indexes_for_slim_index[1] = [4, 5, 6, 7] -> The seond pixel maps to the next 4 subpixels in 1D.
Parameters
----------
mask_2d : np.ndarray
The mask whose indexes are mapped.
sub_size : int
The sub-size of the grid on the mask, so that the sub-mask indexes can be computed correctly.
Returns
-------
[list]
The lists of the 1D sub-pixel indexes in every unmasked pixel in the mask.
The term 'grid' is used because the grid is defined as the grid of coordinates on the centre of every
pixel on the 2D array. Thus, this array maps sub-pixels on a sub-grid to pixels on a grid.
Examples
--------
mask = ([[True, False, True]])
sub_mask_1d_indexes_for_mask_1d_index = sub_mask_1d_indexes_for_mask_1d_index_from(mask=mask, sub_size=2)
"""
total_pixels = total_pixels_2d_from(mask_2d=mask_2d)
sub_slim_indexes_for_slim_index = [[] for _ in range(total_pixels)]
slim_index_for_sub_slim_indexes = slim_index_for_sub_slim_index_via_mask_2d_from(
mask_2d=mask_2d, sub_size=sub_size
).astype("int")
for sub_slim_index, slim_index in enumerate(slim_index_for_sub_slim_indexes):
sub_slim_indexes_for_slim_index[slim_index].append(sub_slim_index)
return sub_slim_indexes_for_slim_index
@decorator_util.jit()
def sub_slim_index_for_sub_native_index_from(sub_mask_2d: np.ndarray):
"""
Returns a 2D array which maps every `False` entry of a 2D mask to its sub slim mask array. Every
True entry is given a value -1.
This is used as a convenience tool for creating structures util between different grids and structures.
For example, if we had a 3x4 mask:
[[False, True, False, False],
[False, True, False, False],
[False, False, False, True]]]
The sub_slim_index_for_sub_native_index array would be:
[[0, -1, 2, 3],
[4, -1, 5, 6],
[7, 8, 9, -1]]
Parameters
----------
sub_mask_2d : np.ndarray
The 2D mask that the util array is created for.
Returns
-------
ndarray
The 2D array mapping 2D mask entries to their 1D masked array indexes.
Examples
--------
mask = np.full(fill_value=False, shape=(9,9))
sub_two_to_one = mask_to_mask_1d_index_from_mask(mask=mask)
"""
sub_slim_index_for_sub_native_index = np.full(
fill_value=-1, shape=sub_mask_2d.shape
)
sub_mask_1d_index = 0
for sub_mask_y in range(sub_mask_2d.shape[0]):
for sub_mask_x in range(sub_mask_2d.shape[1]):
if sub_mask_2d[sub_mask_y, sub_mask_x] is False:
sub_slim_index_for_sub_native_index[
sub_mask_y, sub_mask_x
] = sub_mask_1d_index
sub_mask_1d_index += 1
return sub_slim_index_for_sub_native_index
@decorator_util.jit()
def native_index_for_slim_index_2d_from(
mask_2d: np.ndarray, sub_size: int
) -> np.ndarray:
"""
Returns an array of shape [total_unmasked_pixels*sub_size] that maps every unmasked sub-pixel to its
corresponding native 2D pixel using its (y,x) pixel indexes.
For example, for a sub-grid size of 2x2, if pixel [2,5] corresponds to the first pixel in the masked slim array:
- The first sub-pixel in this pixel on the 1D array is sub_native_index_for_sub_slim_index_2d[4] = [2,5]
- The second sub-pixel in this pixel on the 1D array is sub_native_index_for_sub_slim_index_2d[5] = [2,6]
- The third sub-pixel in this pixel on the 1D array is sub_native_index_for_sub_slim_index_2d[5] = [3,5]
Parameters
-----------
mask_2d : np.ndarray
A 2D array of bools, where `False` values are unmasked.
sub_size : int
The size of the sub-grid in each mask pixel.
Returns
-------
ndarray
An array that maps pixels from a slimmed array of shape [total_unmasked_pixels*sub_size] to its native array
of shape [total_pixels*sub_size, total_pixels*sub_size].
Examples
--------
mask_2d = np.array([[True, True, True],
[True, False, True]
[True, True, True]])
sub_native_index_for_sub_slim_index_2d = sub_native_index_for_sub_slim_index_via_mask_2d_from(mask_2d=mask_2d, sub_size=1)
"""
total_sub_pixels = total_sub_pixels_2d_from(mask_2d=mask_2d, sub_size=sub_size)
sub_native_index_for_sub_slim_index_2d = np.zeros(shape=(total_sub_pixels, 2))
sub_slim_index = 0
for y in range(mask_2d.shape[0]):
for x in range(mask_2d.shape[1]):
if not mask_2d[y, x]:
for y1 in range(sub_size):
for x1 in range(sub_size):
sub_native_index_for_sub_slim_index_2d[sub_slim_index, :] = (
(y * sub_size) + y1,
(x * sub_size) + x1,
)
sub_slim_index += 1
return sub_native_index_for_sub_slim_index_2d
@decorator_util.jit()
def mask_2d_neighbors_from(mask_2d: np.ndarray) -> np.ndarray:
"""
Returns an array of shape [total_unmasked_pixels] that maps every unmasked pixel to the slim index of a
neighboring unmasked pixel.
Neighbors are chosen to the right of every unmasked pixel, and then down, left and up if there is no unmasked pixel
in each location.
Parameters
-----------
mask_2d : np.ndarray
A 2D array of bools, where `False` values are unmasked.
Returns
-------
ndarray
A slimmed array mapping every unmasked pixel to the slimmed index of a neighboring unmasked pixel.
Examples
--------
mask = np.array(
[
[True, True, True, True],
[True, False, False, True],
[True, False, False, True],
[True, True, True, True],
]
)
mask_neighbors = mask_2d_neighbors_from(mask_2d=mask_2d)
"""
total_pixels = total_pixels_2d_from(mask_2d=mask_2d)
mask_neighbors = -1 * np.ones(shape=total_pixels)
sub_slim_index_for_sub_native_index = sub_slim_index_for_sub_native_index_from(
sub_mask_2d=mask_2d
)
mask_index = 0
for y in range(mask_2d.shape[0]):
for x in range(mask_2d.shape[1]):
if not mask_2d[y, x]:
flag = True
if x + 1 < mask_2d.shape[1]:
if not mask_2d[y, x + 1]:
mask_neighbors[
mask_index
] = sub_slim_index_for_sub_native_index[y, x + 1]
flag = False
if y + 1 < mask_2d.shape[0] and flag:
if not mask_2d[y + 1, x]:
mask_neighbors[
mask_index
] = sub_slim_index_for_sub_native_index[y + 1, x]
flag = False
if x - 1 >= 0 and flag:
if not mask_2d[y, x - 1]:
mask_neighbors[
mask_index
] = sub_slim_index_for_sub_native_index[y, x - 1]
flag = False
if y - 1 >= 0 and flag:
if not mask_2d[y - 1, x]:
mask_neighbors[
mask_index
] = sub_slim_index_for_sub_native_index[y - 1, x]
mask_index += 1
return mask_neighbors
```
#### File: grids/two_d/abstract_grid_2d.py
```python
import numpy as np
from autoarray import decorator_util
from autoarray import exc
from autoarray.geometry import geometry_util
from autoarray.structures import abstract_structure
from autoarray.structures.arrays.two_d import array_2d
from autoarray.structures.grids import abstract_grid
from autoarray.structures.grids.two_d import grid_2d
from autoarray.structures.grids.two_d import grid_2d_irregular
from autoarray.mask import mask_2d as msk
from autoarray.structures.grids.two_d import grid_2d_util
from autoarray.structures.arrays.two_d import array_2d_util
def check_grid_2d(grid_2d):
if grid_2d.shape[-1] != 2:
raise exc.GridException(
"The final dimension of the input grid is not equal to 2 (e.g. the (y,x) coordinates)"
)
if 2 < len(grid_2d.shape) > 3:
raise exc.GridException("The dimensions of the input grid array is not 2 or 3")
def check_grid_2d_and_mask_2d(grid_2d, mask_2d):
if len(grid_2d.shape) == 2:
if grid_2d.shape[0] != mask_2d.sub_pixels_in_mask:
raise exc.GridException(
"The input 1D grid does not have the same number of entries as sub-pixels in"
"the mask."
)
elif len(grid_2d.shape) == 3:
if (grid_2d.shape[0], grid_2d.shape[1]) != mask_2d.sub_shape_native:
raise exc.GridException(
"The input grid is 2D but not the same dimensions as the sub-mask "
"(e.g. the mask 2D shape multipled by its sub size."
)
def convert_grid_2d(grid_2d, mask_2d):
"""
Manual Grid2D functions take as input a list or ndarray which is to be returned as an Grid2D. This function
performs the following and checks and conversions on the input:
1) If the input is a list, convert it to an ndarray.
2) Check that the number of sub-pixels in the array is identical to that of the mask.
3) Map the input ndarray to its `slim` representation.
For a Grid2D, `slim` refers to a 2D NumPy array of shape [total_coordinates, 2] and `native` a 3D NumPy array of
shape [total_y_coordinates, total_x_coordinates, 2]
Parameters
----------
array : np.ndarray or list
The input structure which is converted to an ndarray if it is a list.
mask_2d : Mask2D
The mask of the output Array2D.
"""
grid_2d = abstract_grid.convert_grid(grid=grid_2d)
if len(grid_2d.shape) == 2:
return abstract_grid.convert_grid(grid=grid_2d)
return grid_2d_util.grid_2d_slim_from(
grid_2d_native=grid_2d, mask=mask_2d, sub_size=mask_2d.sub_size
)
class AbstractGrid2D(abstract_structure.AbstractStructure2D):
def __array_finalize__(self, obj):
super().__array_finalize__(obj)
if hasattr(obj, "_sub_border_flat_indexes"):
self.mask._sub_border_flat_indexes = obj._sub_border_flat_indexes
@property
def slim(self):
"""
Return a `Grid2D` where the data is stored its `slim` representation, which is an ndarray of shape
[total_unmasked_pixels * sub_size**2, 2].
If it is already stored in its `slim` representation it is returned as it is. If not, it is mapped from
`native` to `slim` and returned as a new `Grid2D`.
"""
if len(self.shape) == 2:
return self
grid_2d_slim = grid_2d_util.grid_2d_slim_from(
grid_2d_native=self, mask=self.mask, sub_size=self.mask.sub_size
)
return self._new_structure(grid=grid_2d_slim, mask=self.mask)
@property
def native(self):
"""
Return a `Grid2D` where the data is stored in its `native` representation, which is an ndarray of shape
[sub_size*total_y_pixels, sub_size*total_x_pixels, 2].
If it is already stored in its `native` representation it is return as it is. If not, it is mapped from
`slim` to `native` and returned as a new `Grid2D`.
"""
if len(self.shape) != 2:
return self
grid_2d_native = grid_2d_util.grid_2d_native_from(
grid_2d_slim=self, mask_2d=self.mask, sub_size=self.mask.sub_size
)
return self._new_structure(grid=grid_2d_native, mask=self.mask)
@property
def binned(self) -> "AbstractGrid2D":
"""
Convenience method to access the binned-up grid in its 1D representation, which is a Grid2D stored as an
ndarray of shape [total_unmasked_pixels, 2].
The binning up process converts a grid from (y,x) values where each value is a coordinate on the sub-grid to
(y,x) values where each coordinate is at the centre of its mask (e.g. a grid with a sub_size of 1). This is
performed by taking the mean of all (y,x) values in each sub pixel.
If the grid is stored in 1D it is return as is. If it is stored in 2D, it must first be mapped from 2D to 1D.
"""
grid_2d_slim = self.slim
grid_2d_slim_binned_y = np.multiply(
self.mask.sub_fraction,
grid_2d_slim[:, 0].reshape(-1, self.mask.sub_length).sum(axis=1),
)
grid_2d_slim_binned_x = np.multiply(
self.mask.sub_fraction,
grid_2d_slim[:, 1].reshape(-1, self.mask.sub_length).sum(axis=1),
)
return self._new_structure(
grid=np.stack((grid_2d_slim_binned_y, grid_2d_slim_binned_x), axis=-1),
mask=self.mask.mask_sub_1,
)
@property
def flipped(self) -> np.ndarray:
"""Return the grid as an ndarray of shape [total_unmasked_pixels, 2] with flipped values such that coordinates
are given as (x,y) values.
This is used to interface with Python libraries that require the grid in (x,y) format."""
return np.fliplr(self)
@property
@array_2d_util.Memoizer()
def in_radians(self):
"""Return the grid as an ndarray where all (y,x) values are converted to Radians.
This grid is used by the interferometer module."""
return (self * np.pi) / 648000.0
def squared_distances_from_coordinate(self, coordinate=(0.0, 0.0)):
"""
Returns the squared distance of every coordinate on the grid from an input coordinate.
Parameters
----------
coordinate : (float, float)
The (y,x) coordinate from which the squared distance of every grid (y,x) coordinate is computed.
"""
squared_distances = np.square(self[:, 0] - coordinate[0]) + np.square(
self[:, 1] - coordinate[1]
)
return array_2d.Array2D.manual_mask(array=squared_distances, mask=self.mask)
def distances_from_coordinate(self, coordinate=(0.0, 0.0)):
"""
Returns the distance of every coordinate on the grid from an input (y,x) coordinate.
Parameters
----------
coordinate : (float, float)
The (y,x) coordinate from which the distance of every grid (y,x) coordinate is computed.
"""
distances = np.sqrt(
self.squared_distances_from_coordinate(coordinate=coordinate)
)
return array_2d.Array2D.manual_mask(array=distances, mask=self.mask)
def grid_2d_radial_projected_from(
self, centre=(0.0, 0.0), angle: float = 0.0
) -> grid_2d_irregular.Grid2DIrregular:
"""
Determine a projected radial grid of points from a 2D region of coordinates defined by an
extent [xmin, xmax, ymin, ymax] and with a (y,x) centre. This functions operates as follows:
1) Given the region defined by the extent [xmin, xmax, ymin, ymax], the algorithm finds the longest 1D distance
of the 4 paths from the (y,x) centre to the edge of the region (e.g. following the positive / negative y and
x axes).
2) Use the pixel-scale corresponding to the direction chosen (e.g. if the positive x-axis was the longest, the
pixel_scale in the x dimension is used).
3) Determine the number of pixels between the centre and the edge of the region using the longest path between
the two chosen above.
4) Create a (y,x) grid of radial points where all points are at the centre's y value = 0.0 and the x values
iterate from the centre in increasing steps of the pixel-scale.
5) Rotate these radial coordinates by the input `angle` clockwise.
A schematic is shown below:
-------------------
| |
|<- - - - ->x | x = centre
| | <-> = longest radial path from centre to extent edge
| |
-------------------
4) Create a (y,x) grid of radial points where all points are at the centre's y value = 0.0 and the x values
iterate from the centre in increasing steps of the pixel-scale.
5) Rotate these radial coordinates by the input `angle` clockwise.
Parameters
----------
extent : np.ndarray
The extent of the grid the radii grid is computed using, with format [xmin, xmax, ymin, ymax]
centre : (float, flloat)
The (y,x) central coordinate which the radial grid is traced outwards from.
pixel_scales : (float, float)
The (y,x) scaled units to pixel units conversion factor of the 2D mask array.
sub_size : int
The size of the sub-grid that each pixel of the 2D mask array is divided into.
angle
The angle with which the radial coordinates are rotated clockwise.
Returns
-------
grid_2d_irregular.Grid2DIrregular
A radial set of points sampling the longest distance from the centre to the edge of the extent in along the
positive x-axis.
"""
grid_radial_projected_2d = grid_2d_util.grid_scaled_2d_slim_radial_projected_from(
extent=self.extent,
centre=centre,
pixel_scales=self.pixel_scales,
sub_size=self.sub_size,
)
grid_radial_projected_2d = geometry_util.transform_grid_2d_to_reference_frame(
grid_2d=grid_radial_projected_2d, centre=centre, angle=angle
)
grid_radial_projected_2d = geometry_util.transform_grid_2d_from_reference_frame(
grid_2d=grid_radial_projected_2d, centre=centre, angle=0.0
)
return grid_2d_irregular.Grid2DIrregular(grid=grid_radial_projected_2d)
@property
def shape_native_scaled(self) -> (float, float):
"""
The two dimensional shape of the grid in scaled units, computed by taking the minimum and maximum values of
the grid.
"""
return (
np.amax(self[:, 0]) - np.amin(self[:, 0]),
np.amax(self[:, 1]) - np.amin(self[:, 1]),
)
@property
def scaled_maxima(self) -> (float, float):
"""
The maximum values of the grid in scaled coordinates returned as a tuple (y_max, x_max).
"""
return (
self.origin[0] + (self.shape_native_scaled[0] / 2.0),
self.origin[1] + (self.shape_native_scaled[1] / 2.0),
)
@property
def scaled_minima(self) -> (float, float):
"""
The minium values of the grid in scaled coordinates returned as a tuple (y_min, x_min).
"""
return (
(self.origin[0] - (self.shape_native_scaled[0] / 2.0)),
(self.origin[1] - (self.shape_native_scaled[1] / 2.0)),
)
@property
def extent(self) -> np.ndarray:
"""
The extent of the grid in scaled units returned as an ndarray of the form [x_min, x_max, y_min, y_max].
This follows the format of the extent input parameter in the matplotlib method imshow (and other methods) and
is used for visualization in the plot module.
"""
return np.asarray(
[
self.scaled_minima[1],
self.scaled_maxima[1],
self.scaled_minima[0],
self.scaled_maxima[0],
]
)
def extent_with_buffer(self, buffer=1.0e-8) -> [float, float, float, float]:
"""
The extent of the grid in scaled units returned as a list [x_min, x_max, y_min, y_max], where all values are
buffed such that their extent is further than the grid's extent..
This follows the format of the extent input parameter in the matplotlib method imshow (and other methods) and
is used for visualization in the plot module.
"""
return [
self.scaled_minima[1] - buffer,
self.scaled_maxima[1] + buffer,
self.scaled_minima[0] - buffer,
self.scaled_maxima[0] + buffer,
]
def padded_grid_from_kernel_shape(self, kernel_shape_native):
"""When the edge pixels of a mask are unmasked and a convolution is to occur, the signal of edge pixels will be
'missing' if the grid is used to evaluate the signal via an analytic function.
To ensure this signal is included the padded grid is used, which is 'buffed' such that it includes all pixels
whose signal will be convolved into the unmasked pixels given the 2D kernel shape.
Parameters
----------
kernel_shape_native : (float, float)
The 2D shape of the kernel which convolves signal from masked pixels to unmasked pixels.
"""
shape = self.mask.shape
padded_shape = (
shape[0] + kernel_shape_native[0] - 1,
shape[1] + kernel_shape_native[1] - 1,
)
padded_mask = msk.Mask2D.unmasked(
shape_native=padded_shape,
pixel_scales=self.mask.pixel_scales,
sub_size=self.mask.sub_size,
)
return grid_2d.Grid2D.from_mask(mask=padded_mask)
@property
def sub_border_grid(self):
"""
The (y,x) grid of all sub-pixels which are at the border of the mask.
This is NOT all sub-pixels which are in mask pixels at the mask's border, but specifically the sub-pixels
within these border pixels which are at the extreme edge of the border.
"""
return self[self.mask._sub_border_flat_indexes]
def relocated_grid_from_grid(self, grid):
"""
Relocate the coordinates of a grid to the border of this grid if they are outside the border, where the
border is defined as all pixels at the edge of the grid's mask (see *mask._border_1d_indexes*).
This is performed as follows:
1) Use the mean value of the grid's y and x coordinates to determine the origin of the grid.
2) Compute the radial distance of every grid coordinate from the origin.
3) For every coordinate, find its nearest pixel in the border.
4) Determine if it is outside the border, by comparing its radial distance from the origin to its paired \
border pixel's radial distance.
5) If its radial distance is larger, use the ratio of radial distances to move the coordinate to the border \
(if its inside the border, do nothing).
The method can be used on uniform or irregular grids, however for irregular grids the border of the
'image-plane' mask is used to define border pixels.
Parameters
----------
grid : Grid2D
The grid (uniform or irregular) whose pixels are to be relocated to the border edge if outside it.
"""
if len(self.sub_border_grid) == 0:
return grid
return grid_2d.Grid2D(
grid=self.relocated_grid_from_grid_jit(
grid=grid, border_grid=self.sub_border_grid
),
mask=grid.mask,
sub_size=grid.mask.sub_size,
)
def relocated_pixelization_grid_from_pixelization_grid(self, pixelization_grid):
"""
Relocate the coordinates of a pixelization grid to the border of this grid, see the method
*relocated_grid_from_grid* for a full description of grid relocation.
This function operates the same as other grid relocation functions by returns the grid as a
`Grid2DVoronoi` instance.
Parameters
----------
grid : Grid2D
The grid (uniform or irregular) whose pixels are to be relocated to the border edge if outside it.
"""
if len(self.sub_border_grid) == 0:
return pixelization_grid
return grid_2d.Grid2DSparse(
grid=self.relocated_grid_from_grid_jit(
grid=pixelization_grid, border_grid=self.sub_border_grid
),
sparse_index_for_slim_index=pixelization_grid.sparse_index_for_slim_index,
)
@staticmethod
@decorator_util.jit()
def relocated_grid_from_grid_jit(grid, border_grid):
"""
Relocate the coordinates of a grid to its border if they are outside the border, where the border is
defined as all pixels at the edge of the grid's mask (see *mask._border_1d_indexes*).
This is performed as follows:
1) Use the mean value of the grid's y and x coordinates to determine the origin of the grid.
2) Compute the radial distance of every grid coordinate from the origin.
3) For every coordinate, find its nearest pixel in the border.
4) Determine if it is outside the border, by comparing its radial distance from the origin to its paired \
border pixel's radial distance.
5) If its radial distance is larger, use the ratio of radial distances to move the coordinate to the border \
(if its inside the border, do nothing).
The method can be used on uniform or irregular grids, however for irregular grids the border of the
'image-plane' mask is used to define border pixels.
Parameters
----------
grid : Grid2D
The grid (uniform or irregular) whose pixels are to be relocated to the border edge if outside it.
border_grid : Grid2D
The grid of border (y,x) coordinates.
"""
grid_relocated = np.zeros(grid.shape)
grid_relocated[:, :] = grid[:, :]
border_origin = np.zeros(2)
border_origin[0] = np.mean(border_grid[:, 0])
border_origin[1] = np.mean(border_grid[:, 1])
border_grid_radii = np.sqrt(
np.add(
np.square(np.subtract(border_grid[:, 0], border_origin[0])),
np.square(np.subtract(border_grid[:, 1], border_origin[1])),
)
)
border_min_radii = np.min(border_grid_radii)
grid_radii = np.sqrt(
np.add(
np.square(np.subtract(grid[:, 0], border_origin[0])),
np.square(np.subtract(grid[:, 1], border_origin[1])),
)
)
for pixel_index in range(grid.shape[0]):
if grid_radii[pixel_index] > border_min_radii:
closest_pixel_index = np.argmin(
np.square(grid[pixel_index, 0] - border_grid[:, 0])
+ np.square(grid[pixel_index, 1] - border_grid[:, 1])
)
move_factor = (
border_grid_radii[closest_pixel_index] / grid_radii[pixel_index]
)
if move_factor < 1.0:
grid_relocated[pixel_index, :] = (
move_factor * (grid[pixel_index, :] - border_origin[:])
+ border_origin[:]
)
return grid_relocated
def output_to_fits(self, file_path, overwrite=False):
"""
Output the grid to a .fits file.
Parameters
----------
file_path : str
The path the file is output to, including the filename and the ``.fits`` extension,
e.g. '/path/to/filename.fits'
overwrite : bool
If a file already exists at the path, if overwrite=True it is overwritten else an error is raised."""
array_2d_util.numpy_array_2d_to_fits(
array_2d=self.native, file_path=file_path, overwrite=overwrite
)
``` |
{
"source": "jonathanfrawley/PyAutoArray_copy",
"score": 3
} |
#### File: autoarray/instruments/euclid.py
```python
from autoarray.layout import layout as lo, layout_util
from autoarray.layout import region as reg
from autoarray.structures.arrays.two_d import array_2d
def roe_corner_from(ccd_id, quadrant_id):
row_index = ccd_id[-1]
if (row_index in "123") and (quadrant_id == "E"):
return (1, 0)
elif (row_index in "123") and (quadrant_id == "F"):
return (1, 1)
elif (row_index in "123") and (quadrant_id == "G"):
return (0, 1)
elif (row_index in "123") and (quadrant_id == "H"):
return (0, 0)
elif (row_index in "456") and (quadrant_id == "E"):
return (0, 1)
elif (row_index in "456") and (quadrant_id == "F"):
return (0, 0)
elif (row_index in "456") and (quadrant_id == "G"):
return (1, 0)
elif (row_index in "456") and (quadrant_id == "H"):
return (1, 1)
class Array2DEuclid(array_2d.Array2D):
"""
In the Euclid FPA, the quadrant id ('E', 'F', 'G', 'H') depends on whether the CCD is located
on the left side (rows 1-3) or right side (rows 4-6) of the FPA:
LEFT SIDE ROWS 1-2-3
--------------------
<--------S----------- ---------S----------->
[] [========= 2 =========] [========= 3 =========] [] |
/ [xxxxxxxxxxxxxxxxxxxxx] [xxxxxxxxxxxxxxxxxxxxx] / |
| [xxxxxxxxxxxxxxxxxxxxx] [xxxxxxxxxxxxxxxxxxxxx] | | Direction arctic
P [xxxxxxxxx H xxxxxxxxx] [xxxxxxxxx G xxxxxxxxx] P | clocks an image
| [xxxxxxxxxxxxxxxxxxxxx] [xxxxxxxxxxxxxxxxxxxxx] | | without any rotation
| [xxxxxxxxxxxxxxxxxxxxx] [xxxxxxxxxxxxxxxxxxxxx] | | (e.g. towards row 0
| of the ndarrays)
| [xxxxxxxxxxxxxxxxxxxxx] [xxxxxxxxxxxxxxxxxxxxx] | |
| [xxxxxxxxxxxxxxxxxxxxx] [xxxxxxxxxxxxxxxxxxxxx] | |
P [xxxxxxxxx E xxxxxxxxx] [xxxxxxxxx F xxxxxxxxx] P |
| [xxxxxxxxxxxxxxxxxxxxx] [xxxxxxxxxxxxxxxxxxxxx] | |
[xxxxxxxxxxxxxxxxxxxxx] [xxxxxxxxxxxxxxxxxxxxx] |
[] [========= 0 =========] [========= 1 =========] []
<---------S---------- ----------S----------->
RIGHT SIDE ROWS 4-5-6
---------------------
<--------S----------- ---------S----------->
[] [========= 2 =========] [========= 3 =========] [] |
/ [xxxxxxxxxxxxxxxxxxxxx] [xxxxxxxxxxxxxxxxxxxxx] / |
| [xxxxxxxxxxxxxxxxxxxxx] [xxxxxxxxxxxxxxxxxxxxx] | | Direction arctic
P [xxxxxxxxx F xxxxxxxxx] [xxxxxxxxx E xxxxxxxxx] P | clocks an image
| [xxxxxxxxxxxxxxxxxxxxx] [xxxxxxxxxxxxxxxxxxxxx] | | without any rotation
| [xxxxxxxxxxxxxxxxxxxxx] [xxxxxxxxxxxxxxxxxxxxx] | | (e.g. towards row 0
| of the ndarrays)
| [xxxxxxxxxxxxxxxxxxxxx] [xxxxxxxxxxxxxxxxxxxxx] | |
| [xxxxxxxxxxxxxxxxxxxxx] [xxxxxxxxxxxxxxxxxxxxx] | |
P [xxxxxxxxx G xxxxxxxxx] [xxxxxxxxx H xxxxxxxxx] P |
| [xxxxxxxxxxxxxxxxxxxxx] [xxxxxxxxxxxxxxxxxxxxx] | |
[xxxxxxxxxxxxxxxxxxxxx] [xxxxxxxxxxxxxxxxxxxxx] |
[] [========= 0 =========] [========= 1 =========] []
<---------S---------- ----------S----------->
Therefore, to setup a quadrant image with the correct frame_geometry using its CCD id (from which
we can extract its row number) and quadrant id, we need to first determine if the CCD is on the left / right
side and then use its quadrant id ('E', 'F', 'G' or 'H') to pick the correct quadrant.
"""
@classmethod
def from_fits_header(cls, array, ext_header):
"""
Use an input array of a Euclid quadrant and its corresponding .fits file header to rotate the quadrant to
the correct orientation for arCTIc clocking.
See the docstring of the `Array2DEuclid` class for a complete description of the Euclid FPA, quadrants and
rotations.
"""
ccd_id = ext_header["CCDID"]
quadrant_id = ext_header["QUADID"]
parallel_overscan_size = ext_header.get("PAROVRX", default=None)
if parallel_overscan_size is None:
parallel_overscan_size = 0
serial_overscan_size = ext_header.get("OVRSCANX", default=None)
serial_prescan_size = ext_header.get("PRESCANX", default=None)
serial_size = ext_header.get("NAXIS1", default=None)
parallel_size = ext_header.get("NAXIS2", default=None)
return cls.from_ccd_and_quadrant_id(
array=array,
ccd_id=ccd_id,
quadrant_id=quadrant_id,
parallel_size=parallel_size,
serial_size=serial_size,
serial_prescan_size=serial_prescan_size,
serial_overscan_size=serial_overscan_size,
parallel_overscan_size=parallel_overscan_size,
)
@classmethod
def from_ccd_and_quadrant_id(cls, array, ccd_id, quadrant_id):
"""
Use an input array of a Euclid quadrant, its ccd_id and quadrant_id to rotate the quadrant to
the correct orientation for arCTIc clocking.
See the docstring of the `Array2DEuclid` class for a complete description of the Euclid FPA, quadrants and
rotations.
"""
row_index = ccd_id[-1]
if (row_index in "123") and (quadrant_id == "E"):
return Array2DEuclid.bottom_left(array_electrons=array)
elif (row_index in "123") and (quadrant_id == "F"):
return Array2DEuclid.bottom_right(array_electrons=array)
elif (row_index in "123") and (quadrant_id == "G"):
return Array2DEuclid.top_right(array_electrons=array)
elif (row_index in "123") and (quadrant_id == "H"):
return Array2DEuclid.top_left(array_electrons=array)
elif (row_index in "456") and (quadrant_id == "E"):
return Array2DEuclid.top_right(array_electrons=array)
elif (row_index in "456") and (quadrant_id == "F"):
return Array2DEuclid.top_left(array_electrons=array)
elif (row_index in "456") and (quadrant_id == "G"):
return Array2DEuclid.bottom_left(array_electrons=array)
elif (row_index in "456") and (quadrant_id == "H"):
return Array2DEuclid.bottom_right(array_electrons=array)
@classmethod
def top_left(cls, array_electrons):
"""
Use an input array of a Euclid quadrant corresponding to the top-left of a Euclid CCD and rotate the quadrant
to the correct orientation for arCTIc clocking.
See the docstring of the `Array2DEuclid` class for a complete description of the Euclid FPA, quadrants and
rotations.
"""
array_electrons = layout_util.rotate_array_from_roe_corner(
array=array_electrons, roe_corner=(0, 0)
)
return cls.manual(array=array_electrons, pixel_scales=0.1)
@classmethod
def top_right(cls, array_electrons):
"""
Use an input array of a Euclid quadrant corresponding the top-left of a Euclid CCD and rotate the quadrant to
the correct orientation for arCTIc clocking.
See the docstring of the `Array2DEuclid` class for a complete description of the Euclid FPA, quadrants and
rotations.
"""
array_electrons = layout_util.rotate_array_from_roe_corner(
array=array_electrons, roe_corner=(0, 1)
)
return cls.manual(array=array_electrons, pixel_scales=0.1)
@classmethod
def bottom_left(cls, array_electrons):
"""
Use an input array of a Euclid quadrant corresponding to the bottom-left of a Euclid CCD and rotate the
quadrant to the correct orientation for arCTIc clocking.
See the docstring of the `Array2DEuclid` class for a complete description of the Euclid FPA, quadrants and
rotations.
"""
array_electrons = layout_util.rotate_array_from_roe_corner(
array=array_electrons, roe_corner=(1, 0)
)
return cls.manual(array=array_electrons, pixel_scales=0.1)
@classmethod
def bottom_right(cls, array_electrons):
"""
Use an input array of a Euclid quadrant corresponding to the bottom-right of a Euclid CCD and rotate the
quadrant to the correct orientation for arCTIc clocking.
See the docstring of the `Array2DEuclid` class for a complete description of the Euclid FPA, quadrants and
rotations.
"""
array_electrons = layout_util.rotate_array_from_roe_corner(
array=array_electrons, roe_corner=(1, 1)
)
return cls.manual(array=array_electrons, pixel_scales=0.1)
class Layout2DEuclid(lo.Layout2D):
@classmethod
def from_ccd_and_quadrant_id(
cls,
ccd_id,
quadrant_id,
parallel_size=2086,
serial_size=2128,
serial_prescan_size=51,
serial_overscan_size=29,
parallel_overscan_size=20,
):
"""
Use an input array of a Euclid quadrant, its ccd_id and quadrant_id to rotate the quadrant to
the correct orientation for arCTIc clocking.
See the docstring of the `Array2DEuclid` class for a complete description of the Euclid FPA, quadrants and
rotations.
"""
row_index = ccd_id[-1]
if (row_index in "123") and (quadrant_id == "E"):
return Layout2DEuclid.bottom_left(
parallel_size=parallel_size,
serial_size=serial_size,
serial_prescan_size=serial_prescan_size,
serial_overscan_size=serial_overscan_size,
parallel_overscan_size=parallel_overscan_size,
)
elif (row_index in "123") and (quadrant_id == "F"):
return Layout2DEuclid.bottom_right(
parallel_size=parallel_size,
serial_size=serial_size,
serial_prescan_size=serial_prescan_size,
serial_overscan_size=serial_overscan_size,
parallel_overscan_size=parallel_overscan_size,
)
elif (row_index in "123") and (quadrant_id == "G"):
return Layout2DEuclid.top_right(
parallel_size=parallel_size,
serial_size=serial_size,
serial_prescan_size=serial_prescan_size,
serial_overscan_size=serial_overscan_size,
parallel_overscan_size=parallel_overscan_size,
)
elif (row_index in "123") and (quadrant_id == "H"):
return Layout2DEuclid.top_left(
parallel_size=parallel_size,
serial_size=serial_size,
serial_prescan_size=serial_prescan_size,
serial_overscan_size=serial_overscan_size,
parallel_overscan_size=parallel_overscan_size,
)
elif (row_index in "456") and (quadrant_id == "E"):
return Layout2DEuclid.top_right(
parallel_size=parallel_size,
serial_size=serial_size,
serial_prescan_size=serial_prescan_size,
serial_overscan_size=serial_overscan_size,
parallel_overscan_size=parallel_overscan_size,
)
elif (row_index in "456") and (quadrant_id == "F"):
return Layout2DEuclid.top_left(
parallel_size=parallel_size,
serial_size=serial_size,
serial_prescan_size=serial_prescan_size,
serial_overscan_size=serial_overscan_size,
parallel_overscan_size=parallel_overscan_size,
)
elif (row_index in "456") and (quadrant_id == "G"):
return Layout2DEuclid.bottom_left(
parallel_size=parallel_size,
serial_size=serial_size,
serial_prescan_size=serial_prescan_size,
serial_overscan_size=serial_overscan_size,
parallel_overscan_size=parallel_overscan_size,
)
elif (row_index in "456") and (quadrant_id == "H"):
return Layout2DEuclid.bottom_right(
parallel_size=parallel_size,
serial_size=serial_size,
serial_prescan_size=serial_prescan_size,
serial_overscan_size=serial_overscan_size,
parallel_overscan_size=parallel_overscan_size,
)
@classmethod
def top_left(
cls,
parallel_size=2086,
serial_size=2128,
serial_prescan_size=51,
serial_overscan_size=29,
parallel_overscan_size=20,
):
if parallel_overscan_size > 0:
parallel_overscan = reg.Region2D(
(
0,
parallel_overscan_size,
serial_prescan_size,
serial_size - serial_overscan_size,
)
)
else:
parallel_overscan = None
serial_prescan = reg.Region2D((0, parallel_size, 0, serial_prescan_size))
serial_overscan = reg.Region2D(
(
0,
parallel_size - parallel_overscan_size,
serial_size - serial_overscan_size,
serial_size,
)
)
layout_2d = Layout2DEuclid(
shape_2d=(parallel_size, serial_size),
original_roe_corner=(0, 0),
parallel_overscan=parallel_overscan,
serial_prescan=serial_prescan,
serial_overscan=serial_overscan,
)
return layout_2d.new_rotated_from_roe_corner(roe_corner=(0, 0))
@classmethod
def top_right(
cls,
parallel_size=2086,
serial_size=2128,
serial_prescan_size=51,
serial_overscan_size=29,
parallel_overscan_size=20,
):
if parallel_overscan_size > 0:
parallel_overscan = reg.Region2D(
(
0,
parallel_overscan_size,
serial_overscan_size,
serial_size - serial_prescan_size,
)
)
else:
parallel_overscan = None
serial_prescan = reg.Region2D(
(0, parallel_size, serial_size - serial_prescan_size, serial_size)
)
serial_overscan = reg.Region2D(
(0, parallel_size - parallel_overscan_size, 0, serial_overscan_size)
)
layout_2d = Layout2DEuclid(
shape_2d=(parallel_size, serial_size),
original_roe_corner=(0, 1),
parallel_overscan=parallel_overscan,
serial_prescan=serial_prescan,
serial_overscan=serial_overscan,
)
return layout_2d.new_rotated_from_roe_corner(roe_corner=(0, 1))
@classmethod
def bottom_left(
cls,
parallel_size=2086,
serial_size=2128,
serial_prescan_size=51,
serial_overscan_size=29,
parallel_overscan_size=20,
):
if parallel_overscan_size > 0:
parallel_overscan = reg.Region2D(
(
parallel_size - parallel_overscan_size,
parallel_size,
serial_prescan_size,
serial_size - serial_overscan_size,
)
)
else:
parallel_overscan = None
serial_prescan = reg.Region2D((0, parallel_size, 0, serial_prescan_size))
serial_overscan = reg.Region2D(
(
0,
parallel_size - parallel_overscan_size,
serial_size - serial_overscan_size,
serial_size,
)
)
layout_2d = Layout2DEuclid(
shape_2d=(parallel_size, serial_size),
original_roe_corner=(1, 0),
parallel_overscan=parallel_overscan,
serial_prescan=serial_prescan,
serial_overscan=serial_overscan,
)
return layout_2d.new_rotated_from_roe_corner(roe_corner=(1, 0))
@classmethod
def bottom_right(
cls,
parallel_size=2086,
serial_size=2128,
serial_prescan_size=51,
serial_overscan_size=29,
parallel_overscan_size=20,
):
if parallel_overscan_size > 0:
parallel_overscan = reg.Region2D(
(
parallel_size - parallel_overscan_size,
parallel_size,
serial_overscan_size,
serial_size - serial_prescan_size,
)
)
else:
parallel_overscan = None
serial_prescan = reg.Region2D(
(0, parallel_size, serial_size - serial_prescan_size, serial_size)
)
serial_overscan = reg.Region2D(
(0, parallel_size - parallel_overscan_size, 0, serial_overscan_size)
)
layout_2d = Layout2DEuclid(
shape_2d=(parallel_size, serial_size),
original_roe_corner=(1, 1),
parallel_overscan=parallel_overscan,
serial_prescan=serial_prescan,
serial_overscan=serial_overscan,
)
return layout_2d.new_rotated_from_roe_corner(roe_corner=(1, 1))
```
#### File: autoarray/inversion/inversions.py
```python
import numpy as np
from autoconf import conf
from autoarray import exc
from autoarray.structures.arrays.two_d import array_2d
from autoarray.structures.grids.two_d import grid_2d
from autoarray.structures.grids.two_d import grid_2d_irregular
from autoarray.structures import visibilities as vis
from autoarray.operators import convolver as conv, transformer as trans
from autoarray.inversion import regularization as reg, mappers, inversion_util
from autoarray.dataset import imaging, interferometer
from autoarray import preloads as pload
from scipy.interpolate import griddata
from scipy import sparse
import pylops
import typing
class SettingsInversion:
def __init__(
self,
use_linear_operators=False,
tolerance=1e-8,
maxiter=250,
check_solution=True,
):
self.use_linear_operators = use_linear_operators
self.tolerance = tolerance
self.maxiter = maxiter
self.check_solution = check_solution
def inversion(
dataset,
mapper: typing.Union[mappers.MapperRectangular, mappers.MapperVoronoi],
regularization,
settings=SettingsInversion(),
):
if isinstance(dataset, imaging.Imaging):
return InversionImagingMatrix.from_data_mapper_and_regularization(
image=dataset.image,
noise_map=dataset.noise_map,
convolver=dataset.convolver,
mapper=mapper,
regularization=regularization,
settings=settings,
)
elif isinstance(dataset, interferometer.Interferometer):
return AbstractInversionInterferometer.from_data_mapper_and_regularization(
visibilities=dataset.visibilities,
noise_map=dataset.noise_map,
transformer=dataset.transformer,
mapper=mapper,
regularization=regularization,
settings=settings,
)
def log_determinant_of_matrix_cholesky(matrix):
"""There are two terms in the inversion's Bayesian log likelihood function which require the log determinant of \
a matrix. These are (Nightingale & Dye 2015, Nightingale, Dye and Massey 2018):
ln[det(F + H)] = ln[det(curvature_reg_matrix)]
ln[det(H)] = ln[det(regularization_matrix)]
The curvature_reg_matrix is positive-definite, which means the above log determinants can be computed \
efficiently (compared to using np.det) by using a Cholesky decomposition first and summing the log of each \
diagonal term.
Parameters
-----------
matrix : np.ndarray
The positive-definite matrix the log determinant is computed for.
"""
try:
return 2.0 * np.sum(np.log(np.diag(np.linalg.cholesky(matrix))))
except np.linalg.LinAlgError:
raise exc.InversionException()
class AbstractInversion:
def __init__(
self,
noise_map: np.ndarray,
mapper: typing.Union[mappers.MapperRectangular, mappers.MapperVoronoi],
regularization: reg.Regularization,
regularization_matrix: np.ndarray,
reconstruction: np.ndarray,
settings: SettingsInversion,
):
self.noise_map = noise_map
self.mapper = mapper
self.regularization = regularization
self.regularization_matrix = regularization_matrix
self.reconstruction = reconstruction
self.settings = settings
def interpolated_reconstructed_data_from_shape_native(self, shape_native=None):
return self.interpolated_values_from_shape_native(
values=self.reconstruction, shape_native=shape_native
)
def interpolated_errors_from_shape_native(self, shape_native=None):
return self.interpolated_values_from_shape_native(
values=self.errors, shape_native=shape_native
)
def interpolated_values_from_shape_native(self, values, shape_native=None):
if shape_native is not None:
grid = grid_2d.Grid2D.bounding_box(
bounding_box=self.mapper.source_pixelization_grid.extent,
shape_native=shape_native,
buffer_around_corners=False,
)
elif (
conf.instance["general"]["inversion"]["interpolated_grid_shape"]
in "image_grid"
):
grid = self.mapper.source_grid_slim
elif (
conf.instance["general"]["inversion"]["interpolated_grid_shape"]
in "source_grid"
):
dimension = int(np.sqrt(self.mapper.pixels))
shape_native = (dimension, dimension)
grid = grid_2d.Grid2D.bounding_box(
bounding_box=self.mapper.source_pixelization_grid.extent,
shape_native=shape_native,
buffer_around_corners=False,
)
else:
raise exc.InversionException(
"In the genenal.ini config file a valid option was not found for the"
"interpolated_grid_shape. Must be {image_grid, source_grid}"
)
interpolated_reconstruction = griddata(
points=self.mapper.source_pixelization_grid,
values=values,
xi=grid.binned.native,
method="linear",
)
interpolated_reconstruction[np.isnan(interpolated_reconstruction)] = 0.0
return array_2d.Array2D.manual(
array=interpolated_reconstruction, pixel_scales=grid.pixel_scales
)
@property
def regularization_term(self):
"""
Returns the regularization term of an inversion. This term represents the sum of the difference in flux \
between every pair of neighboring pixels. This is computed as:
s_T * H * s = solution_vector.T * regularization_matrix * solution_vector
The term is referred to as *G_l* in Warren & Dye 2003, Nightingale & Dye 2015.
The above works include the regularization_matrix coefficient (lambda) in this calculation. In PyAutoLens, \
this is already in the regularization matrix and thus implicitly included in the matrix multiplication.
"""
return np.matmul(
self.reconstruction.T,
np.matmul(self.regularization_matrix, self.reconstruction),
)
@property
def log_det_regularization_matrix_term(self):
return log_determinant_of_matrix_cholesky(self.regularization_matrix)
@property
def brightest_reconstruction_pixel(self):
return np.argmax(self.reconstruction)
@property
def brightest_reconstruction_pixel_centre(self):
return grid_2d_irregular.Grid2DIrregular(
grid=[
self.mapper.source_pixelization_grid[
self.brightest_reconstruction_pixel
]
]
)
@property
def mapped_reconstructed_image(self):
raise NotImplementedError()
@property
def residual_map(self):
raise NotImplementedError()
@property
def normalized_residual_map(self):
raise NotImplementedError()
@property
def chi_squared_map(self):
raise NotImplementedError()
@property
def regularization_weight_list(self):
return self.regularization.regularization_weight_list_from_mapper(
mapper=self.mapper
)
class AbstractInversionMatrix:
def __init__(
self,
curvature_reg_matrix: np.ndarray,
curvature_matrix: np.ndarray,
regularization_matrix: np.ndarray,
):
self.curvature_matrix = curvature_matrix
self.curvature_reg_matrix = curvature_reg_matrix
self.regularization_matrix = regularization_matrix
@property
def log_det_curvature_reg_matrix_term(self):
return log_determinant_of_matrix_cholesky(self.curvature_reg_matrix)
@property
def errors_with_covariance(self):
return np.linalg.inv(self.curvature_reg_matrix)
@property
def errors(self):
return np.diagonal(self.errors_with_covariance)
class InversionImagingMatrix(AbstractInversion, AbstractInversionMatrix):
def __init__(
self,
image: array_2d.Array2D,
noise_map: array_2d.Array2D,
convolver: conv.Convolver,
mapper: typing.Union[mappers.MapperRectangular, mappers.MapperVoronoi],
regularization: reg.Regularization,
blurred_mapping_matrix: np.ndarray,
curvature_matrix: np.ndarray,
regularization_matrix: np.ndarray,
curvature_reg_matrix: np.ndarray,
reconstruction: np.ndarray,
settings: SettingsInversion,
):
""" An inversion, which given an input image and noise-map reconstructs the image using a linear inversion, \
including a convolution that accounts for blurring.
The inversion uses a 2D pixelization to perform the reconstruction by util each pixelization pixel to a \
set of image pixels via a mapper. The reconstructed pixelization is smoothed via a regularization scheme to \
prevent over-fitting noise.
Parameters
-----------
image_1d : np.ndarray
Flattened 1D array of the observed image the inversion is fitting.
noise_map : np.ndarray
Flattened 1D array of the noise-map used by the inversion during the fit.
convolver : imaging.convolution.Convolver
The convolver used to blur the mapping matrix with the PSF.
mapper : inversion.mappers.Mapper
The util between the image-pixels (via its / sub-grid) and pixelization pixels.
regularization : inversion.regularization.Regularization
The regularization scheme applied to smooth the pixelization used to reconstruct the image for the \
inversion
Attributes
-----------
blurred_mapping_matrix : np.ndarray
The matrix representing the blurred mappings between the image's sub-grid of pixels and the pixelization \
pixels.
regularization_matrix : np.ndarray
The matrix defining how the pixelization's pixels are regularized with one another for smoothing (H).
curvature_matrix : np.ndarray
The curvature_matrix between each pixelization pixel and all other pixelization pixels (F).
curvature_reg_matrix : np.ndarray
The curvature_matrix + regularization matrix.
solution_vector : np.ndarray
The vector containing the reconstructed fit to the hyper_galaxies.
"""
super(InversionImagingMatrix, self).__init__(
noise_map=noise_map,
mapper=mapper,
regularization=regularization,
regularization_matrix=regularization_matrix,
reconstruction=reconstruction,
settings=settings,
)
AbstractInversionMatrix.__init__(
self=self,
curvature_matrix=curvature_matrix,
curvature_reg_matrix=curvature_reg_matrix,
regularization_matrix=regularization_matrix,
)
self.image = image
self.convolver = convolver
self.blurred_mapping_matrix = blurred_mapping_matrix
@classmethod
def from_data_mapper_and_regularization(
cls,
image: array_2d.Array2D,
noise_map: array_2d.Array2D,
convolver: conv.Convolver,
mapper: typing.Union[mappers.MapperRectangular, mappers.MapperVoronoi],
regularization: reg.Regularization,
settings=SettingsInversion(),
preloads=pload.Preloads(),
):
if preloads.blurred_mapping_matrix is None:
blurred_mapping_matrix = convolver.convolve_mapping_matrix(
mapping_matrix=mapper.mapping_matrix
)
else:
blurred_mapping_matrix = preloads.blurred_mapping_matrix
data_vector = inversion_util.data_vector_via_blurred_mapping_matrix_from(
blurred_mapping_matrix=blurred_mapping_matrix,
image=image,
noise_map=noise_map,
)
if preloads.curvature_matrix_sparse_preload is None:
curvature_matrix = inversion_util.curvature_matrix_via_mapping_matrix_from(
mapping_matrix=blurred_mapping_matrix, noise_map=noise_map
)
else:
curvature_matrix = inversion_util.curvature_matrix_via_sparse_preload_from(
mapping_matrix=blurred_mapping_matrix,
noise_map=noise_map,
curvature_matrix_sparse_preload=preloads.curvature_matrix_sparse_preload.astype(
"int"
),
curvature_matrix_preload_counts=preloads.curvature_matrix_preload_counts.astype(
"int"
),
)
regularization_matrix = regularization.regularization_matrix_from_mapper(
mapper=mapper
)
curvature_reg_matrix = np.add(curvature_matrix, regularization_matrix)
try:
values = np.linalg.solve(curvature_reg_matrix, data_vector)
except np.linalg.LinAlgError:
raise exc.InversionException()
if settings.check_solution:
if np.isclose(a=values[0], b=values[1], atol=1e-4).all():
if np.isclose(a=values[0], b=values, atol=1e-4).all():
raise exc.InversionException()
return InversionImagingMatrix(
image=image,
noise_map=noise_map,
convolver=convolver,
mapper=mapper,
regularization=regularization,
blurred_mapping_matrix=blurred_mapping_matrix,
curvature_matrix=curvature_matrix,
regularization_matrix=regularization_matrix,
curvature_reg_matrix=curvature_reg_matrix,
reconstruction=values,
settings=settings,
)
@property
def mapped_reconstructed_image(self):
reconstructed_image = inversion_util.mapped_reconstructed_data_from(
mapping_matrix=self.blurred_mapping_matrix,
reconstruction=self.reconstruction,
)
return array_2d.Array2D(
array=reconstructed_image, mask=self.mapper.source_grid_slim.mask.mask_sub_1
)
@property
def residual_map(self):
return inversion_util.inversion_residual_map_from(
pixelization_values=self.reconstruction,
data=self.image,
slim_index_for_sub_slim_index=self.mapper.source_grid_slim.mask._slim_index_for_sub_slim_index,
all_sub_slim_indexes_for_pixelization_index=self.mapper.all_sub_slim_indexes_for_pixelization_index,
)
@property
def normalized_residual_map(self):
return inversion_util.inversion_normalized_residual_map_from(
pixelization_values=self.reconstruction,
data=self.image,
noise_map_1d=self.noise_map,
slim_index_for_sub_slim_index=self.mapper.source_grid_slim.mask._slim_index_for_sub_slim_index,
all_sub_slim_indexes_for_pixelization_index=self.mapper.all_sub_slim_indexes_for_pixelization_index,
)
@property
def chi_squared_map(self):
return inversion_util.inversion_chi_squared_map_from(
pixelization_values=self.reconstruction,
data=self.image,
noise_map_1d=self.noise_map,
slim_index_for_sub_slim_index=self.mapper.source_grid_slim.mask._slim_index_for_sub_slim_index,
all_sub_slim_indexes_for_pixelization_index=self.mapper.all_sub_slim_indexes_for_pixelization_index,
)
@property
def curvature_matrix_sparse_preload(self):
curvature_matrix_sparse_preload, curvature_matrix_preload_counts = inversion_util.curvature_matrix_sparse_preload_via_mapping_matrix_from(
mapping_matrix=self.blurred_mapping_matrix
)
return curvature_matrix_sparse_preload
@property
def curvature_matrix_preload_counts(self):
curvature_matrix_sparse_preload, curvature_matrix_preload_counts = inversion_util.curvature_matrix_sparse_preload_via_mapping_matrix_from(
mapping_matrix=self.blurred_mapping_matrix
)
return curvature_matrix_preload_counts
class AbstractInversionInterferometer(AbstractInversion):
def __init__(
self,
visibilities: vis.Visibilities,
noise_map: vis.VisibilitiesNoiseMap,
transformer: trans.TransformerNUFFT,
mapper: typing.Union[mappers.MapperRectangular, mappers.MapperVoronoi],
regularization: reg.Regularization,
regularization_matrix: np.ndarray,
reconstruction: np.ndarray,
settings: SettingsInversion,
):
super(AbstractInversionInterferometer, self).__init__(
noise_map=noise_map,
mapper=mapper,
regularization=regularization,
regularization_matrix=regularization_matrix,
reconstruction=reconstruction,
settings=settings,
)
self.visibilities = visibilities
self.transformer = transformer
@classmethod
def from_data_mapper_and_regularization(
cls,
visibilities: vis.Visibilities,
noise_map: vis.VisibilitiesNoiseMap,
transformer: trans.TransformerNUFFT,
mapper: typing.Union[mappers.MapperRectangular, mappers.MapperVoronoi],
regularization: reg.Regularization,
settings=SettingsInversion(use_linear_operators=True),
):
if not settings.use_linear_operators:
return InversionInterferometerMatrix.from_data_mapper_and_regularization(
visibilities=visibilities,
noise_map=noise_map,
transformer=transformer,
mapper=mapper,
regularization=regularization,
settings=settings,
)
else:
return InversionInterferometerLinearOperator.from_data_mapper_and_regularization(
visibilities=visibilities,
noise_map=noise_map,
transformer=transformer,
mapper=mapper,
regularization=regularization,
settings=settings,
)
@property
def mapped_reconstructed_image(self):
mapped_reconstructed_image = inversion_util.mapped_reconstructed_data_from(
mapping_matrix=self.mapper.mapping_matrix,
reconstruction=self.reconstruction,
)
return array_2d.Array2D(
array=mapped_reconstructed_image,
mask=self.mapper.source_grid_slim.mask.mask_sub_1,
)
@property
def residual_map(self):
return None
@property
def normalized_residual_map(self):
return None
@property
def chi_squared_map(self):
return None
class InversionInterferometerMatrix(
AbstractInversionInterferometer, AbstractInversionMatrix
):
def __init__(
self,
visibilities: vis.Visibilities,
noise_map: vis.VisibilitiesNoiseMap,
transformer: trans.TransformerNUFFT,
mapper: typing.Union[mappers.MapperRectangular, mappers.MapperVoronoi],
regularization: reg.Regularization,
regularization_matrix: np.ndarray,
reconstruction: np.ndarray,
transformed_mapping_matrix: np.ndarray,
curvature_matrix: np.ndarray,
curvature_reg_matrix: np.ndarray,
settings: SettingsInversion,
):
""" An inversion, which given an input image and noise-map reconstructs the image using a linear inversion, \
including a convolution that accounts for blurring.
The inversion uses a 2D pixelization to perform the reconstruction by util each pixelization pixel to a \
set of image pixels via a mapper. The reconstructed pixelization is smoothed via a regularization scheme to \
prevent over-fitting noise.
Parameters
-----------
image_1d : np.ndarray
Flattened 1D array of the observed image the inversion is fitting.
noise_map : np.ndarray
Flattened 1D array of the noise-map used by the inversion during the fit.
convolver : imaging.convolution.Convolver
The convolver used to blur the mapping matrix with the PSF.
mapper : inversion.mappers.Mapper
The util between the image-pixels (via its / sub-grid) and pixelization pixels.
regularization : inversion.regularization.Regularization
The regularization scheme applied to smooth the pixelization used to reconstruct the image for the \
inversion
Attributes
-----------
blurred_mapping_matrix : np.ndarray
The matrix representing the blurred mappings between the image's sub-grid of pixels and the pixelization \
pixels.
regularization_matrix : np.ndarray
The matrix defining how the pixelization's pixels are regularized with one another for smoothing (H).
curvature_matrix : np.ndarray
The curvature_matrix between each pixelization pixel and all other pixelization pixels (F).
curvature_reg_matrix : np.ndarray
The curvature_matrix + regularization matrix.
solution_vector : np.ndarray
The vector containing the reconstructed fit to the hyper_galaxies.
"""
super(InversionInterferometerMatrix, self).__init__(
visibilities=visibilities,
noise_map=noise_map,
transformer=transformer,
mapper=mapper,
regularization=regularization,
regularization_matrix=regularization_matrix,
reconstruction=reconstruction,
settings=settings,
)
AbstractInversionMatrix.__init__(
self=self,
curvature_matrix=curvature_matrix,
curvature_reg_matrix=curvature_reg_matrix,
regularization_matrix=regularization_matrix,
)
self.curvature_reg_matrix = curvature_reg_matrix
self.transformed_mapping_matrix = transformed_mapping_matrix
@classmethod
def from_data_mapper_and_regularization(
cls,
visibilities: vis.Visibilities,
noise_map: vis.VisibilitiesNoiseMap,
transformer: trans.TransformerNUFFT,
mapper: typing.Union[mappers.MapperRectangular, mappers.MapperVoronoi],
regularization: reg.Regularization,
settings=SettingsInversion(),
):
transformed_mapping_matrix = transformer.transformed_mapping_matrix_from_mapping_matrix(
mapping_matrix=mapper.mapping_matrix
)
data_vector = inversion_util.data_vector_via_transformed_mapping_matrix_from(
transformed_mapping_matrix=transformed_mapping_matrix,
visibilities=visibilities,
noise_map=noise_map,
)
real_curvature_matrix = inversion_util.curvature_matrix_via_mapping_matrix_from(
mapping_matrix=transformed_mapping_matrix.real, noise_map=noise_map.real
)
imag_curvature_matrix = inversion_util.curvature_matrix_via_mapping_matrix_from(
mapping_matrix=transformed_mapping_matrix.imag, noise_map=noise_map.imag
)
regularization_matrix = regularization.regularization_matrix_from_mapper(
mapper=mapper
)
curvature_matrix = np.add(real_curvature_matrix, imag_curvature_matrix)
curvature_reg_matrix = np.add(curvature_matrix, regularization_matrix)
try:
values = np.linalg.solve(curvature_reg_matrix, data_vector)
except np.linalg.LinAlgError:
raise exc.InversionException()
if settings.check_solution:
if np.isclose(a=values[0], b=values[1], atol=1e-4).all():
if np.isclose(a=values[0], b=values, atol=1e-4).all():
raise exc.InversionException()
return InversionInterferometerMatrix(
visibilities=visibilities,
noise_map=noise_map,
transformer=transformer,
mapper=mapper,
curvature_matrix=curvature_matrix,
regularization=regularization,
transformed_mapping_matrix=transformed_mapping_matrix,
regularization_matrix=regularization_matrix,
curvature_reg_matrix=curvature_reg_matrix,
reconstruction=values,
settings=settings,
)
@property
def mapped_reconstructed_visibilities(self):
visibilities = inversion_util.mapped_reconstructed_visibilities_from(
transformed_mapping_matrix=self.transformed_mapping_matrix,
reconstruction=self.reconstruction,
)
return vis.Visibilities(visibilities=visibilities)
class InversionInterferometerLinearOperator(AbstractInversionInterferometer):
def __init__(
self,
visibilities: vis.Visibilities,
noise_map: vis.VisibilitiesNoiseMap,
transformer: trans.TransformerNUFFT,
mapper: typing.Union[mappers.MapperRectangular, mappers.MapperVoronoi],
regularization: reg.Regularization,
regularization_matrix: np.ndarray,
reconstruction: np.ndarray,
log_det_curvature_reg_matrix_term: float,
settings: SettingsInversion,
):
""" An inversion, which given an input image and noise-map reconstructs the image using a linear inversion, \
including a convolution that accounts for blurring.
The inversion uses a 2D pixelization to perform the reconstruction by util each pixelization pixel to a \
set of image pixels via a mapper. The reconstructed pixelization is smoothed via a regularization scheme to \
prevent over-fitting noise.
Parameters
-----------
image_1d : np.ndarray
Flattened 1D array of the observed image the inversion is fitting.
noise_map : np.ndarray
Flattened 1D array of the noise-map used by the inversion during the fit.
convolver : imaging.convolution.Convolver
The convolver used to blur the mapping matrix with the PSF.
mapper : inversion.mappers.Mapper
The util between the image-pixels (via its / sub-grid) and pixelization pixels.
regularization : inversion.regularization.Regularization
The regularization scheme applied to smooth the pixelization used to reconstruct the image for the \
inversion
Attributes
-----------
blurred_mapping_matrix : np.ndarray
The matrix representing the blurred mappings between the image's sub-grid of pixels and the pixelization \
pixels.
regularization_matrix : np.ndarray
The matrix defining how the pixelization's pixels are regularized with one another for smoothing (H).
curvature_matrix : np.ndarray
The curvature_matrix between each pixelization pixel and all other pixelization pixels (F).
curvature_reg_matrix : np.ndarray
The curvature_matrix + regularization matrix.
solution_vector : np.ndarray
The vector containing the reconstructed fit to the hyper_galaxies.
"""
self._log_det_curvature_reg_matrix_term = log_det_curvature_reg_matrix_term
super(InversionInterferometerLinearOperator, self).__init__(
visibilities=visibilities,
noise_map=noise_map,
transformer=transformer,
mapper=mapper,
regularization=regularization,
regularization_matrix=regularization_matrix,
reconstruction=reconstruction,
settings=settings,
)
@classmethod
def from_data_mapper_and_regularization(
cls,
visibilities: vis.Visibilities,
noise_map: vis.VisibilitiesNoiseMap,
transformer: trans.TransformerNUFFT,
mapper: typing.Union[mappers.MapperRectangular, mappers.MapperVoronoi],
regularization: reg.Regularization,
settings=SettingsInversion(),
):
regularization_matrix = regularization.regularization_matrix_from_mapper(
mapper=mapper
)
Aop = pylops.MatrixMult(sparse.bsr_matrix(mapper.mapping_matrix))
Fop = transformer
Op = Fop * Aop
curvature_matrix_approx = np.multiply(
np.sum(noise_map.weight_list_ordered_1d),
mapper.mapping_matrix.T @ mapper.mapping_matrix,
)
preconditioner_matrix = np.add(curvature_matrix_approx, regularization_matrix)
preconditioner_inverse_matrix = np.linalg.inv(preconditioner_matrix)
MOp = pylops.MatrixMult(sparse.bsr_matrix(preconditioner_inverse_matrix))
log_det_curvature_reg_matrix_term = 2.0 * np.sum(
np.log(np.diag(np.linalg.cholesky(preconditioner_matrix)))
)
reconstruction = pylops.NormalEquationsInversion(
Op=Op,
Regs=None,
epsNRs=[1.0],
data=visibilities.ordered_1d,
Weight=pylops.Diagonal(diag=noise_map.weight_list_ordered_1d),
NRegs=[pylops.MatrixMult(sparse.bsr_matrix(regularization_matrix))],
M=MOp,
tol=settings.tolerance,
atol=settings.tolerance,
**dict(maxiter=settings.maxiter),
)
return InversionInterferometerLinearOperator(
visibilities=visibilities,
noise_map=noise_map,
transformer=transformer,
mapper=mapper,
regularization=regularization,
regularization_matrix=regularization_matrix,
reconstruction=np.real(reconstruction),
settings=settings,
log_det_curvature_reg_matrix_term=log_det_curvature_reg_matrix_term,
)
@property
def log_det_curvature_reg_matrix_term(self):
return self._log_det_curvature_reg_matrix_term
@property
def mapped_reconstructed_visibilities(self):
return self.transformer.visibilities_from_image(
image=self.mapped_reconstructed_image
)
@property
def errors(self):
return None
```
#### File: autoarray/inversion/regularization.py
```python
import numpy as np
import pylops
from autoarray.inversion import regularization_util
class Regularization:
def __init__(self):
""" Abstract base class for a regularization-scheme, which is applied to a pixelization to enforce a \
smooth-source solution and prevent over-fitting noise_map in the hyper_galaxies. This is achieved by computing a \
'regularization term' - which is the sum of differences in reconstructed flux between every set of neighboring \
pixels. This regularization term is added to the solution's chi-squared as a penalty term. This effects \
a pixelization in the following ways:
1) The regularization matrix (see below) is added to the curvature matrix used by the inversion to \
linearly invert and fit the hyper_galaxies. Thus, it changes the pixelization in a linear manner, ensuring that \
the minimum chi-squared solution is achieved accounting for the penalty term.
2) The log likelihood of the pixelization's fit to the hyper_galaxies changes from L = -0.5 *(chi^2 + noise_normalization) \
to L = -0.5 (chi^2 + coefficients * regularization_term + noise_normalization). The regularization \
coefficient is a 'hyper_galaxies-parameter' which determines how strongly we smooth the pixelization's reconstruction.
The value of the coefficients(s) is set using the Bayesian framework of (Suyu 2006) and this \
is described further in the (*inversion.Inversion* class).
The regularization matrix, H, is calculated by defining a set of B matrices which describe how the \
pixels neighbor one another. For example, lets take a 3x3 square grid:
______
I0I1I2I
I3I4I5I
I6I7I8I
^^^^^^^
We want to regularize this grid such that each pixel is regularized with the pixel to its right and below it \
(provided there are pixels in that direction). This means that:
- pixel 0 is regularized with pixel 1 (to the right) and pixel 3 (below).
- pixel 1 is regularized with pixel 2 (to the right) and pixel 4 (below),
- Pixel 2 is only regularized with pixel 5, as there is no pixel to its right.
- and so on.
We make two 9 x 9 B matrices, which describe regularization in each direction (i.e. rightwards and downwards). \
We simply put a -1 and 1 in each row of a pixel index where it has a neighbor, where the value 1 goes in the \
column of its neighbor's index. Thus, the B matrix describing neighboring pixels to their right looks like:
B_x = [-1, 1, 0, 0, 0, 0, 0, 0, 0] # [0->1]
[ 0, -1, 1, 0, 0, 0, 0, 0, 0] # [1->2]
[ 0, 0, -1, 0, 0, 0, 0, 0, 0] # [] NOTE - no pixel neighbor.
[ 0, 0, 0, -1, 1, 0, 0, 0, 0] # [3->4]
[ 0, 0, 0, 0, -1, 1, 0, 0, 0] # [4->5]
[ 0, 0, 0, 0, 0, -1, 0, 0, 0] # [] NOTE - no pixel neighbor.
[ 0, 0, 0, 0, 0, 0, -1, 1, 0] # [6->7]
[ 0, 0, 0, 0, 0, 0, 0, -1, 1] # [7->8]
[ 0, 0, 0, 0, 0, 0, 0, 0, -1] # [] NOTE - no pixel neighbor.
We now make another B matrix for the regularization downwards:
B_y = [-1, 0, 0, 1, 0, 0, 0, 0, 0] # [0->3]
[ 0, -1, 0, 0, 1, 0, 0, 0, 0] # [1->4]
[ 0, 0, -1, 0, 0, 1, 0, 0, 0] # [2->5]
[ 0, 0, 0, -1, 0, 0, 1, 0, 0] # [3->6]
[ 0, 0, 0, 0, -1, 0, 0, 1, 0] # [4->7]
[ 0, 0, 0, 0, 0, -1, 0, 0, 1] # [5->8]
[ 0, 0, 0, 0, 0, 0, -1, 0, 0] # [] NOTE - no pixel neighbor.
[ 0, 0, 0, 0, 0, 0, 0, -1, 0] # [] NOTE - no pixel neighbor.
[ 0, 0, 0, 0, 0, 0, 0, 0, -1] # [] NOTE - no pixel neighbor.
After making the B matrices that represent our pixel neighbors, we can compute the regularization matrix, H, \
of each direction as H = B * B.T (matrix multiplication).
E.g.
H_x = B_x.T, * B_x
H_y = B_y.T * B_y
H = H_x + H_y
Whilst the example above used a square-grid with regularization to the right and downwards, this matrix \
formalism can be extended to describe regularization in more directions (e.g. upwards, to the left).
It can also describe irpixelizations, e.g. an irVoronoi pixelization, where a B matrix is \
computed for every shared Voronoi vertex of each Voronoi pixel. The number of B matrices is now equal to the \
number of Voronoi vertices in the pixel with the most Voronoi vertices. However, we describe below a scheme to \
compute this solution more efficiently.
### COMBINING B MATRICES ###
The B matrices above each had the -1's going down the diagonam. This is not necessary, and it is valid to put \
each pixel pairing anywhere. So, if we had a 4x4 B matrix, where:
- pixel 0 regularizes with pixel 1
- pixel 2 regularizes with pixel 3
- pixel 3 regularizes with pixel 0
We can still set this up as one matrix (even though the pixel 0 comes up twice):
B = [-1, 1, 0 , 0] # [0->1]
[ 0, 0, 0 , 0] # We can skip rows by making them all zeros.
[ 0, 0, -1 , 1] # [2->3]
[ 1, 0, 0 ,-1] # [3->0] This is valid!
So, for a Voronoi pixelzation, we don't have to make the same number of B matrices as Voronoi vertices, \
we can combine them into fewer B matrices as above.
# SKIPPING THE B MATRIX CALCULATION #
Infact, going through the rigmarole of computing and multiplying B matrices like this is uncessary. It is \
more computationally efficiently to directly compute H. This is possible, provided you know know all of the \
neighboring pixel pairs (which, by definition, you need to know to set up the B matrices anyway). Thus, the \
'regularization_matrix_from_pixel_neighbors' functions in this module directly compute H from the pixel \
neighbors.
# POSITIVE DEFINITE MATRIX #
The regularization matrix must be positive-definite, as the Bayesian framework of Suyu 2006 requires that we \
use its determinant in the calculation.
Parameters
-----------
shape : (int, int)
The dimensions of the rectangular grid of pixels (x_pixels, y_pixel)
coefficients : (float,)
The regularization_matrix coefficients used to smooth the pix reconstructed_inversion_image.
"""
def regularization_weight_list_from_mapper(self, mapper):
raise NotImplementedError
def regularization_matrix_from_mapper(self, mapper):
raise NotImplementedError
class Constant(Regularization):
def __init__(self, coefficient=1.0):
"""A instance-regularization scheme (regularization is described in the `Regularization` class above).
For the instance regularization_matrix scheme, there is only 1 regularization coefficient that is applied to \
all neighboring pixels. This means that we when write B, we only need to regularize pixels in one direction \
(e.g. pixel 0 regularizes pixel 1, but NOT visa versa). For example:
B = [-1, 1] [0->1]
[0, -1] 1 does not regularization with 0
A small numerical value of 1.0e-8 is added to all elements in a instance regularization matrix, to ensure that \
it is positive definite.
Parameters
-----------
coefficient : (float,)
The regularization coefficient which controls the degree of smooth of the inversion reconstruction.
"""
self.coefficient = coefficient
super(Constant, self).__init__()
def regularization_weight_list_from_mapper(self, mapper):
regularization_weight_list = self.coefficient * np.ones(mapper.pixels)
return mapper.reconstruction_from(solution_vector=regularization_weight_list)
def regularization_matrix_from_mapper(self, mapper):
return regularization_util.constant_regularization_matrix_from(
coefficient=self.coefficient,
pixel_neighbors=mapper.source_pixelization_grid.pixel_neighbors,
pixel_neighbors_size=mapper.source_pixelization_grid.pixel_neighbors_size,
)
class AdaptiveBrightness(Regularization):
def __init__(self, inner_coefficient=1.0, outer_coefficient=1.0, signal_scale=1.0):
""" A instance-regularization scheme (regularization is described in the `Regularization` class above).
For the weighted regularization scheme, each pixel is given an 'effective regularization weight', which is \
applied when each set of pixel neighbors are regularized with one another. The motivation of this is that \
different regions of a pixelization require different levels of regularization (e.g., high smoothing where the \
no signal is present and less smoothing where it is, see (Nightingale, Dye and Massey 2018)).
Unlike the instance regularization_matrix scheme, neighboring pixels must now be regularized with one another \
in both directions (e.g. if pixel 0 regularizes pixel 1, pixel 1 must also regularize pixel 0). For example:
B = [-1, 1] [0->1]
[-1, -1] 1 now also regularizes 0
For a instance regularization coefficient this would NOT produce a positive-definite matrix. However, for
the weighted scheme, it does!
The regularize weight_list change the B matrix as shown below - we simply multiply each pixel's effective \
regularization weight by each row of B it has a -1 in, so:
regularization_weight_list = [1, 2, 3, 4]
B = [-1, 1, 0 ,0] # [0->1]
[0, -2, 2 ,0] # [1->2]
[0, 0, -3 ,3] # [2->3]
[4, 0, 0 ,-4] # [3->0]
If our -1's werent down the diagonal this would look like:
B = [4, 0, 0 ,-4] # [3->0]
[0, -2, 2 ,0] # [1->2]
[-1, 1, 0 ,0] # [0->1]
[0, 0, -3 ,3] # [2->3] This is valid!
Parameters
-----------
coefficients : (float, float)
The regularization coefficients which controls the degree of smoothing of the inversion reconstruction in \
high and low signal regions of the reconstruction.
signal_scale : float
A factor which controls how rapidly the smoothness of regularization varies from high signal regions to \
low signal regions.
"""
super(AdaptiveBrightness, self).__init__()
self.inner_coefficient = inner_coefficient
self.outer_coefficient = outer_coefficient
self.signal_scale = signal_scale
def regularization_weight_list_from_mapper(self, mapper):
pixel_signals = mapper.pixel_signals_from_signal_scale(
signal_scale=self.signal_scale
)
return regularization_util.adaptive_regularization_weight_list_from(
inner_coefficient=self.inner_coefficient,
outer_coefficient=self.outer_coefficient,
pixel_signals=pixel_signals,
)
def regularization_matrix_from_mapper(self, mapper):
regularization_weight_list = self.regularization_weight_list_from_mapper(
mapper=mapper
)
return regularization_util.weighted_regularization_matrix_from(
regularization_weight_list=regularization_weight_list,
pixel_neighbors=mapper.source_pixelization_grid.pixel_neighbors,
pixel_neighbors_size=mapper.source_pixelization_grid.pixel_neighbors_size,
)
class RegularizationLop(pylops.LinearOperator):
def __init__(self, regularization_matrix):
self.regularization_matrix = regularization_matrix
self.pixels = regularization_matrix.shape[0]
self.dims = self.pixels
self.shape = (self.pixels, self.pixels)
self.dtype = dtype
self.explicit = False
def _matvec(self, x):
return np.dot(self.regularization_matrix, x)
def _rmatvec(self, x):
return np.dot(self.regularization_matrix.T, x)
```
#### File: autoarray/mask/mask_2d.py
```python
import logging
import copy
import numpy as np
from autoarray import exc
from autoarray.mask import abstract_mask, mask_2d_util
from autoarray.structures.grids.two_d import grid_2d_util
from autoarray.geometry import geometry_util
from autoarray.structures.arrays.two_d import array_2d_util
from autoarray.structures.arrays.two_d import array_2d
from autoarray.structures.grids.two_d import grid_2d
logging.basicConfig()
logger = logging.getLogger(__name__)
class AbstractMask2D(abstract_mask.AbstractMask):
# noinspection PyUnusedLocal
def __new__(
cls,
mask: np.ndarray,
pixel_scales: (float, float),
sub_size: int = 1,
origin: (float, float) = (0.0, 0.0),
*args,
**kwargs
):
"""
A 2D mask, representing a uniform rectangular grid of neighboring rectangular pixels.
When applied to 2D data it extracts or masks the unmasked image pixels corresponding to mask entries that are
`False` or 0).
The mask defines the geometry of the 2D uniform grid of pixels for the 2D data structure it is paired with,
for example the grid's `pixel scales` (y,x) `origin`. The 2D uniform grid may also be sub-gridded,
whereby every pixel is sub-divided into a uniform grid of sub-pixels which are all used to perform
calculations more accurate.
The mask includes tols to map the 2D data structure between 2D representations (that include all data-points
irrespective of if they are masked or not) and 1D data structures (that only contain the unmasked data).
Parameters
----------
mask: np.ndarray
The `ndarray` of shape [total_y_pixels, total_x_pixels] containing the `bool`'s representing the
`mask`, where `False` signifies an entry is unmasked and used in calculations.
pixel_scales: (float, float) or float
The (y,x) scaled units to pixel units conversion factors of every pixel. If this is input as a `float`,
it is converted to a (float, float) structure.
origin : (float, float)
The (y,x) scaled units origin of the mask's coordinate system.
"""
obj = abstract_mask.AbstractMask.__new__(
cls=cls,
mask=mask,
pixel_scales=pixel_scales,
sub_size=sub_size,
origin=origin,
)
return obj
def __array_finalize__(self, obj):
super().__array_finalize__(obj=obj)
if isinstance(obj, AbstractMask2D):
pass
else:
self.origin = (0.0, 0.0)
@property
def shape_native(self):
return self.shape
@property
def sub_shape_native(self):
try:
return (self.shape[0] * self.sub_size, self.shape[1] * self.sub_size)
except AttributeError:
print("bleh")
@property
def sub_mask(self):
sub_shape = (self.shape[0] * self.sub_size, self.shape[1] * self.sub_size)
return mask_2d_util.mask_2d_via_shape_native_and_native_for_slim(
shape_native=sub_shape,
native_for_slim=self._sub_mask_index_for_sub_mask_1d_index,
).astype("bool")
def rescaled_mask_from_rescale_factor(self, rescale_factor):
rescaled_mask = mask_2d_util.rescaled_mask_2d_from(
mask_2d=self, rescale_factor=rescale_factor
)
return Mask2D(
mask=rescaled_mask,
pixel_scales=self.pixel_scales,
sub_size=self.sub_size,
origin=self.origin,
)
@property
def mask_sub_1(self):
"""
Returns the mask on the same scaled coordinate system but with a sub-grid of `sub_size`.
"""
return Mask2D(
mask=self, sub_size=1, pixel_scales=self.pixel_scales, origin=self.origin
)
def resized_mask_from_new_shape(self, new_shape, pad_value: int = 0.0):
"""resized the array to a new shape and at a new origin.
Parameters
-----------
new_shape : (int, int)
The new two-dimensional shape of the array.
"""
mask = copy.deepcopy(self)
resized_mask = array_2d_util.resized_array_2d_from_array_2d(
array_2d=mask, resized_shape=new_shape, pad_value=pad_value
).astype("bool")
return Mask2D(
mask=resized_mask,
pixel_scales=self.pixel_scales,
sub_size=self.sub_size,
origin=self.origin,
)
def trimmed_array_from_padded_array_and_image_shape(
self, padded_array, image_shape
):
"""
Map a padded 1D array of values to its original 2D array, trimming all edge values.
Parameters
-----------
padded_array : np.ndarray
A 1D array of values which were computed using a padded grid
"""
pad_size_0 = self.shape[0] - image_shape[0]
pad_size_1 = self.shape[1] - image_shape[1]
trimmed_array = padded_array.binned.native[
pad_size_0 // 2 : self.shape[0] - pad_size_0 // 2,
pad_size_1 // 2 : self.shape[1] - pad_size_1 // 2,
]
return array_2d.Array2D.manual(
array=trimmed_array,
pixel_scales=self.pixel_scales,
sub_size=1,
origin=self.origin,
)
def unmasked_blurred_array_from_padded_array_psf_and_image_shape(
self, padded_array, psf, image_shape
):
"""
For a padded grid and psf, compute an unmasked blurred image from an unmasked unblurred image.
This relies on using the lens dataset's padded-grid, which is a grid of (y,x) coordinates which extends over
the entire image as opposed to just the masked region.
Parameters
----------
psf : aa.Kernel2D
The PSF of the image used for convolution.
unmasked_image_1d : np.ndarray
The 1D unmasked image which is blurred.
"""
blurred_image = psf.convolved_array_from_array(array=padded_array)
return self.trimmed_array_from_padded_array_and_image_shape(
padded_array=blurred_image, image_shape=image_shape
)
def output_to_fits(self, file_path, overwrite=False):
"""
Write the 2D Mask to a .fits file.
Before outputting a NumPy array, the array may be flipped upside-down using np.flipud depending on the project
config files. This is for Astronomy projects so that structures appear the same orientation as `.fits` files
loaded in DS9.
Parameters
----------
file_path : str
The full path of the file that is output, including the file name and `.fits` extension.
overwrite : bool
If `True` and a file already exists with the input file_path the .fits file is overwritten. If `False`, an
error is raised.
Returns
-------
None
Examples
--------
mask = Mask2D(mask=np.full(shape=(5,5), fill_value=False))
mask.output_to_fits(file_path='/path/to/file/filename.fits', overwrite=True)
"""
array_2d_util.numpy_array_2d_to_fits(
array_2d=self.astype("float"), file_path=file_path, overwrite=overwrite
)
@property
def shape_native_scaled(self):
return (
float(self.pixel_scales[0] * self.shape[0]),
float(self.pixel_scales[1] * self.shape[1]),
)
@property
def central_pixel_coordinates(self):
return geometry_util.central_pixel_coordinates_2d_from(
shape_native=self.shape_native
)
@property
def central_scaled_coordinates(self):
return geometry_util.central_scaled_coordinate_2d_from(
shape_native=self.shape_native,
pixel_scales=self.pixel_scales,
origin=self.origin,
)
def pixel_coordinates_2d_from(self, scaled_coordinates_2d):
return geometry_util.pixel_coordinates_2d_from(
scaled_coordinates_2d=scaled_coordinates_2d,
shape_native=self.shape,
pixel_scales=self.pixel_scales,
origins=self.origin,
)
def scaled_coordinates_2d_from(self, pixel_coordinates_2d):
return geometry_util.scaled_coordinates_2d_from(
pixel_coordinates_2d=pixel_coordinates_2d,
shape_native=self.shape,
pixel_scales=self.pixel_scales,
origins=self.origin,
)
@property
@array_2d_util.Memoizer()
def mask_centre(self):
return grid_2d_util.grid_2d_centre_from(grid_2d_slim=self.masked_grid_sub_1)
@property
def scaled_maxima(self):
return (
(self.shape_native_scaled[0] / 2.0) + self.origin[0],
(self.shape_native_scaled[1] / 2.0) + self.origin[1],
)
@property
def scaled_minima(self):
return (
(-(self.shape_native_scaled[0] / 2.0)) + self.origin[0],
(-(self.shape_native_scaled[1] / 2.0)) + self.origin[1],
)
@property
def extent(self):
return np.array(
[
self.scaled_minima[1],
self.scaled_maxima[1],
self.scaled_minima[0],
self.scaled_maxima[0],
]
)
@property
def edge_buffed_mask(self):
edge_buffed_mask = mask_2d_util.buffed_mask_2d_from(mask_2d=self).astype("bool")
return Mask2D(
mask=edge_buffed_mask,
pixel_scales=self.pixel_scales,
sub_size=self.sub_size,
origin=self.origin,
)
@property
def unmasked_grid_sub_1(self):
"""
The scaled-grid of (y,x) coordinates of every pixel.
This is defined from the top-left corner, such that the first pixel at location [0, 0] will have a negative x \
value y value in scaled units.
"""
grid_slim = grid_2d_util.grid_2d_slim_via_shape_native_from(
shape_native=self.shape,
pixel_scales=self.pixel_scales,
sub_size=1,
origin=self.origin,
)
return grid_2d.Grid2D(grid=grid_slim, mask=self.unmasked_mask.mask_sub_1)
@property
def masked_grid(self):
sub_grid_1d = grid_2d_util.grid_2d_slim_via_mask_from(
mask_2d=self,
pixel_scales=self.pixel_scales,
sub_size=self.sub_size,
origin=self.origin,
)
return grid_2d.Grid2D(grid=sub_grid_1d, mask=self.edge_mask.mask_sub_1)
@property
def masked_grid_sub_1(self):
grid_slim = grid_2d_util.grid_2d_slim_via_mask_from(
mask_2d=self, pixel_scales=self.pixel_scales, sub_size=1, origin=self.origin
)
return grid_2d.Grid2D(grid=grid_slim, mask=self.mask_sub_1)
@property
def edge_grid_sub_1(self):
"""
The indicies of the mask's border pixels, where a border pixel is any unmasked pixel on an
exterior edge (e.g. next to at least one pixel with a `True` value but not central pixels like those within \
an annulus mask).
"""
edge_grid_1d = self.masked_grid_sub_1[self._edge_1d_indexes]
return grid_2d.Grid2D(grid=edge_grid_1d, mask=self.edge_mask.mask_sub_1)
@property
def border_grid_1d(self):
"""
The indicies of the mask's border pixels, where a border pixel is any unmasked pixel on an
exterior edge (e.g. next to at least one pixel with a `True` value but not central pixels like those within \
an annulus mask).
"""
return self.masked_grid[self._sub_border_flat_indexes]
@property
def border_grid_sub_1(self):
"""
The indicies of the mask's border pixels, where a border pixel is any unmasked pixel on an
exterior edge (e.g. next to at least one pixel with a `True` value but not central pixels like those within \
an annulus mask).
"""
border_grid_1d = self.masked_grid_sub_1[self._border_1d_indexes]
return grid_2d.Grid2D(grid=border_grid_1d, mask=self.border_mask.mask_sub_1)
def grid_pixels_from_grid_scaled_1d(self, grid_scaled_1d):
"""
Convert a grid of (y,x) scaled coordinates to a grid of (y,x) pixel values. Pixel coordinates are \
returned as floats such that they include the decimal offset from each pixel's top-left corner.
The pixel coordinate origin is at the top left corner of the grid, such that the pixel [0,0] corresponds to \
highest y scaled coordinate value and lowest x scaled coordinate.
The scaled coordinate origin is defined by the class attribute origin, and coordinates are shifted to this \
origin before computing their 1D grid pixel indexes.
Parameters
----------
grid_scaled_1d: np.ndarray
A grid of (y,x) coordinates in scaled units.
"""
grid_pixels_1d = grid_2d_util.grid_pixels_2d_slim_from(
grid_scaled_2d_slim=grid_scaled_1d,
shape_native=self.shape,
pixel_scales=self.pixel_scales,
origin=self.origin,
)
return grid_2d.Grid2D(grid=grid_pixels_1d, mask=self.mask_sub_1)
def grid_pixel_centres_from_grid_scaled_1d(self, grid_scaled_1d):
"""
Convert a grid of (y,x) scaled coordinates to a grid of (y,x) pixel values. Pixel coordinates are \
returned as integers such that they map directly to the pixel they are contained within.
The pixel coordinate origin is at the top left corner of the grid, such that the pixel [0,0] corresponds to \
higher y scaled coordinate value and lowest x scaled coordinate.
The scaled coordinate origin is defined by the class attribute origin, and coordinates are shifted to this \
origin before computing their 1D grid pixel indexes.
Parameters
----------
grid_scaled_1d: np.ndarray
The grid of (y,x) coordinates in scaled units.
"""
grid_pixel_centres_1d = grid_2d_util.grid_pixel_centres_2d_slim_from(
grid_scaled_2d_slim=grid_scaled_1d,
shape_native=self.shape,
pixel_scales=self.pixel_scales,
origin=self.origin,
).astype("int")
return grid_2d.Grid2D(
grid=grid_pixel_centres_1d, mask=self.edge_mask.mask_sub_1
)
def grid_pixel_indexes_from_grid_scaled_1d(self, grid_scaled_1d):
"""
Convert a grid of (y,x) scaled coordinates to a grid of (y,x) pixel 1D indexes. Pixel coordinates are \
returned as integers such that they are the pixel from the top-left of the 2D grid going rights and then \
downwards.
For example:
- The pixel at the top-left, whose 2D index is [0,0], corresponds to 1D index 0.
- The fifth pixel on the top row, whose 2D index is [0,5], corresponds to 1D index 4.
- The first pixel on the second row, whose 2D index is [0,1], has 1D index 10 if a row has 10 pixels.
The scaled coordinate origin is defined by the class attribute origin, and coordinates are shifted to this \
origin before computing their 1D grid pixel indexes.
Parameters
----------
grid_scaled_1d: np.ndarray
The grid of (y,x) coordinates in scaled units.
"""
grid_pixel_indexes_1d = grid_2d_util.grid_pixel_indexes_2d_slim_from(
grid_scaled_2d_slim=grid_scaled_1d,
shape_native=self.shape,
pixel_scales=self.pixel_scales,
origin=self.origin,
).astype("int")
return array_2d.Array2D(
array=grid_pixel_indexes_1d, mask=self.edge_mask.mask_sub_1
)
def grid_scaled_from_grid_pixels_1d(self, grid_pixels_1d):
"""
Convert a grid of (y,x) pixel coordinates to a grid of (y,x) scaled values.
The pixel coordinate origin is at the top left corner of the grid, such that the pixel [0,0] corresponds to \
higher y scaled coordinate value and lowest x scaled coordinate.
The scaled coordinate origin is defined by the class attribute origin, and coordinates are shifted to this \
origin before computing their 1D grid pixel indexes.
Parameters
----------
grid_pixels_1d : np.ndarray
The grid of (y,x) coordinates in pixels.
"""
grid_scaled_1d = grid_2d_util.grid_scaled_2d_slim_from(
grid_pixels_2d_slim=grid_pixels_1d,
shape_native=self.shape,
pixel_scales=self.pixel_scales,
origin=self.origin,
)
return grid_2d.Grid2D(grid=grid_scaled_1d, mask=self.edge_mask.mask_sub_1)
def grid_scaled_from_grid_pixels_1d_for_marching_squares(
self, grid_pixels_1d, shape_native
):
grid_scaled_1d = grid_2d_util.grid_scaled_2d_slim_from(
grid_pixels_2d_slim=grid_pixels_1d,
shape_native=shape_native,
pixel_scales=(
self.pixel_scales[0] / self.sub_size,
self.pixel_scales[1] / self.sub_size,
),
origin=self.origin,
)
grid_scaled_1d[:, 0] -= self.pixel_scales[0] / (2.0 * self.sub_size)
grid_scaled_1d[:, 1] += self.pixel_scales[1] / (2.0 * self.sub_size)
return grid_2d.Grid2D(grid=grid_scaled_1d, mask=self.edge_mask.mask_sub_1)
@property
def _sub_native_index_for_sub_slim_index(self):
"""A 1D array of mappings between every unmasked pixel and its 2D pixel coordinates."""
return mask_2d_util.native_index_for_slim_index_2d_from(
mask_2d=self, sub_size=1
).astype("int")
@property
def _edge_1d_indexes(self):
"""
The indicies of the mask's edge pixels, where an edge pixel is any unmasked pixel on its edge \
(next to at least one pixel with a `True` value).
"""
return mask_2d_util.edge_1d_indexes_from(mask_2d=self).astype("int")
@property
def _edge_2d_indexes(self):
"""
The indicies of the mask's edge pixels, where an edge pixel is any unmasked pixel on its edge \
(next to at least one pixel with a `True` value).
"""
return self._sub_native_index_for_sub_slim_index[self._edge_1d_indexes].astype(
"int"
)
@property
def _border_1d_indexes(self):
"""
The indicies of the mask's border pixels, where a border pixel is any unmasked pixel on an
exterior edge (e.g. next to at least one pixel with a `True` value but not central pixels like those within
an annulus mask).
"""
return mask_2d_util.border_slim_indexes_from(mask_2d=self).astype("int")
@property
def _border_2d_indexes(self):
"""The indicies of the mask's border pixels, where a border pixel is any unmasked pixel on an
exterior edge (e.g. next to at least one pixel with a `True` value but not central pixels like those within
an annulus mask).
"""
return self._sub_native_index_for_sub_slim_index[
self._border_1d_indexes
].astype("int")
@property
def _sub_border_flat_indexes(self):
"""The indicies of the mask's border pixels, where a border pixel is any unmasked pixel on an
exterior edge (e.g. next to at least one pixel with a `True` value but not central pixels like those within
an annulus mask).
"""
return mask_2d_util.sub_border_pixel_slim_indexes_from(
mask_2d=self, sub_size=self.sub_size
).astype("int")
@array_2d_util.Memoizer()
def blurring_mask_from_kernel_shape(self, kernel_shape_native):
"""
Returns a blurring mask, which represents all masked pixels whose light will be blurred into unmasked
pixels via PSF convolution (see grid.Grid2D.blurring_grid_from_mask_and_psf_shape).
Parameters
----------
kernel_shape_native : (int, int)
The shape of the psf which defines the blurring region (e.g. the shape of the PSF)
"""
if kernel_shape_native[0] % 2 == 0 or kernel_shape_native[1] % 2 == 0:
raise exc.MaskException("psf_size of exterior region must be odd")
blurring_mask = mask_2d_util.blurring_mask_2d_from(
mask_2d=self, kernel_shape_native=kernel_shape_native
)
return Mask2D(
mask=blurring_mask,
sub_size=1,
pixel_scales=self.pixel_scales,
origin=self.origin,
)
@property
def unmasked_mask(self):
"""
The indicies of the mask's border pixels, where a border pixel is any unmasked pixel on an
exterior edge (e.g. next to at least one pixel with a `True` value but not central pixels like those within \
an annulus mask).
"""
return Mask2D.unmasked(
shape_native=self.shape_native,
sub_size=self.sub_size,
pixel_scales=self.pixel_scales,
origin=self.origin,
)
@property
def edge_mask(self):
"""
The indicies of the mask's border pixels, where a border pixel is any unmasked pixel on an
exterior edge (e.g. next to at least one pixel with a `True` value but not central pixels like those within \
an annulus mask).
"""
mask = np.full(fill_value=True, shape=self.shape)
mask[self._edge_2d_indexes[:, 0], self._edge_2d_indexes[:, 1]] = False
return Mask2D(
mask=mask,
sub_size=self.sub_size,
pixel_scales=self.pixel_scales,
origin=self.origin,
)
@property
def border_mask(self):
"""
The indicies of the mask's border pixels, where a border pixel is any unmasked pixel on an
exterior edge (e.g. next to at least one pixel with a `True` value but not central pixels like those within \
an annulus mask).
"""
mask = np.full(fill_value=True, shape=self.shape)
mask[self._border_2d_indexes[:, 0], self._border_2d_indexes[:, 1]] = False
return Mask2D(
mask=mask,
sub_size=self.sub_size,
pixel_scales=self.pixel_scales,
origin=self.origin,
)
@property
def _sub_mask_index_for_sub_mask_1d_index(self):
"""
A 1D array of mappings between every unmasked sub pixel and its 2D sub-pixel coordinates.
"""
return mask_2d_util.native_index_for_slim_index_2d_from(
mask_2d=self, sub_size=self.sub_size
).astype("int")
@property
@array_2d_util.Memoizer()
def _slim_index_for_sub_slim_index(self):
"""
The util between every sub-pixel and its host pixel.
For example:
- sub_to_pixel[8] = 2 - The ninth sub-pixel is within the 3rd pixel.
- sub_to_pixel[20] = 4 - The twenty first sub-pixel is within the 5th pixel.
"""
return mask_2d_util.slim_index_for_sub_slim_index_via_mask_2d_from(
mask_2d=self, sub_size=self.sub_size
).astype("int")
@property
def zoom_centre(self):
extraction_grid_1d = self.grid_pixels_from_grid_scaled_1d(
grid_scaled_1d=self.masked_grid_sub_1.slim
)
y_pixels_max = np.max(extraction_grid_1d[:, 0])
y_pixels_min = np.min(extraction_grid_1d[:, 0])
x_pixels_max = np.max(extraction_grid_1d[:, 1])
x_pixels_min = np.min(extraction_grid_1d[:, 1])
return (
((y_pixels_max + y_pixels_min - 1.0) / 2.0),
((x_pixels_max + x_pixels_min - 1.0) / 2.0),
)
@property
def zoom_offset_pixels(self):
if self.pixel_scales is None:
return self.central_pixel_coordinates
return (
self.zoom_centre[0] - self.central_pixel_coordinates[0],
self.zoom_centre[1] - self.central_pixel_coordinates[1],
)
@property
def zoom_offset_scaled(self):
return (
-self.pixel_scales[0] * self.zoom_offset_pixels[0],
self.pixel_scales[1] * self.zoom_offset_pixels[1],
)
@property
def zoom_region(self):
"""
The zoomed rectangular region corresponding to the square encompassing all unmasked values. This zoomed
extraction region is a squuare, even if the mask is rectangular.
This is used to zoom in on the region of an image that is used in an analysis for visualization.
"""
# Have to convert mask to bool for invert function to work.
where = np.array(np.where(np.invert(self.astype("bool"))))
y0, x0 = np.amin(where, axis=1)
y1, x1 = np.amax(where, axis=1)
ylength = y1 - y0
xlength = x1 - x0
if ylength > xlength:
length_difference = ylength - xlength
x1 += int(length_difference / 2.0)
x0 -= int(length_difference / 2.0)
elif xlength > ylength:
length_difference = xlength - ylength
y1 += int(length_difference / 2.0)
y0 -= int(length_difference / 2.0)
return [y0, y1 + 1, x0, x1 + 1]
@property
def zoom_shape_native(self):
region = self.zoom_region
return (region[1] - region[0], region[3] - region[2])
@property
def zoom_mask_unmasked(self):
""" The scaled-grid of (y,x) coordinates of every pixel.
This is defined from the top-left corner, such that the first pixel at location [0, 0] will have a negative x \
value y value in scaled units.
"""
return Mask2D.unmasked(
shape_native=self.zoom_shape_native,
pixel_scales=self.pixel_scales,
sub_size=self.sub_size,
origin=self.zoom_offset_scaled,
)
class Mask2D(AbstractMask2D):
@classmethod
def manual(
cls,
mask: np.ndarray or list,
pixel_scales: (float, float),
sub_size: int = 1,
origin: (float, float) = (0.0, 0.0),
invert: bool = False,
) -> "Mask2D":
"""
Returns a Mask2D (see `AbstractMask2D.__new__`) by inputting the array values in 2D, for example:
mask=np.array([[False, False],
[True, False]])
mask=[[False, False],
[True, False]]
Parameters
----------
mask : np.ndarray or list
The `bool` values of the mask input as an `np.ndarray` of shape [total_y_pixels, total_x_pixels] or a
list of lists.
pixel_scales: (float, float) or float
The (y,x) scaled units to pixel units conversion factors of every pixel. If this is input as a `float`,
it is converted to a (float, float) structure.
sub_size : int
The size (sub_size x sub_size) of each unmasked pixels sub-array.
origin : (float, float)
The (y,x) scaled units origin of the mask's coordinate system.
invert : bool
If `True`, the `bool`'s of the input `mask` are inverted, for example `False`'s become `True`
and visa versa.
"""
if type(mask) is list:
mask = np.asarray(mask).astype("bool")
if invert:
mask = np.invert(mask)
pixel_scales = geometry_util.convert_pixel_scales_2d(pixel_scales=pixel_scales)
if len(mask.shape) != 2:
raise exc.MaskException("The input mask is not a two dimensional array")
return cls(
mask=mask, pixel_scales=pixel_scales, sub_size=sub_size, origin=origin
)
@classmethod
def unmasked(
cls,
shape_native: (int, int),
pixel_scales: (float, float),
sub_size: int = 1,
origin: (float, float) = (0.0, 0.0),
invert: bool = False,
) -> "Mask2D":
"""
Create a mask where all pixels are `False` and therefore unmasked.
Parameters
----------
shape_native : (int, int)
The 2D shape of the mask that is created.
pixel_scales: (float, float) or float
The (y,x) scaled units to pixel units conversion factors of every pixel. If this is input as a `float`,
it is converted to a (float, float) structure.
sub_size : int
The size (sub_size x sub_size) of each unmasked pixels sub-array.
origin : (float, float)
The (y,x) scaled units origin of the mask's coordinate system.
invert : bool
If `True`, the `bool`'s of the input `mask` are inverted, for example `False`'s become `True`
and visa versa.
"""
return cls.manual(
mask=np.full(shape=shape_native, fill_value=False),
pixel_scales=pixel_scales,
sub_size=sub_size,
origin=origin,
invert=invert,
)
@classmethod
def circular(
cls,
shape_native: (int, int),
radius: float,
pixel_scales: (float, float),
sub_size: int = 1,
origin: (float, float) = (0.0, 0.0),
centre: (float, float) = (0.0, 0.0),
invert: bool = False,
) -> "Mask2D":
"""
Returns a Mask2D (see *Mask2D.__new__*) where all `False` entries are within a circle of input radius.
The `radius` and `centre` are both input in scaled units.
Parameters
----------
shape_native : (int, int)
The (y,x) shape of the mask in units of pixels.
radius : float
The radius in scaled units of the circle within which pixels are `False` and unmasked.
pixel_scales: (float, float) or float
The (y,x) scaled units to pixel units conversion factors of every pixel. If this is input as a `float`,
it is converted to a (float, float) structure.
sub_size : int
The size (sub_size x sub_size) of each unmasked pixels sub-array.
origin : (float, float)
The (y,x) scaled units origin of the mask's coordinate system.
centre: (float, float)
The (y,x) scaled units centre of the circle used to mask pixels.
invert : bool
If `True`, the `bool`'s of the input `mask` are inverted, for example `False`'s become `True`
and visa versa.
"""
if type(pixel_scales) is not tuple:
if type(pixel_scales) is float or int:
pixel_scales = (float(pixel_scales), float(pixel_scales))
mask = mask_2d_util.mask_2d_circular_from(
shape_native=shape_native,
pixel_scales=pixel_scales,
radius=radius,
centre=centre,
)
return cls.manual(
mask=mask,
pixel_scales=pixel_scales,
sub_size=sub_size,
origin=origin,
invert=invert,
)
@classmethod
def circular_annular(
cls,
shape_native: (int, int),
inner_radius: float,
outer_radius: float,
pixel_scales: (float, float),
sub_size: int = 1,
origin: (float, float) = (0.0, 0.0),
centre: (float, float) = (0.0, 0.0),
invert: bool = False,
) -> "Mask2D":
"""
Returns a Mask2D (see *Mask2D.__new__*) where all `False` entries are within an annulus of input
inner radius and outer radius.
The `inner_radius`, `outer_radius` and `centre` are all input in scaled units.
Parameters
----------
shape_native : (int, int)
The (y,x) shape of the mask in units of pixels.
inner_radius : float
The inner radius in scaled units of the annulus within which pixels are `False` and unmasked.
outer_radius : float
The outer radius in scaled units of the annulus within which pixels are `False` and unmasked.
pixel_scales: (float, float) or float
The (y,x) scaled units to pixel units conversion factors of every pixel. If this is input as a `float`,
it is converted to a (float, float) structure.
sub_size : int
The size (sub_size x sub_size) of each unmasked pixels sub-array.
origin : (float, float)
The (y,x) scaled units origin of the mask's coordinate system.
centre: (float, float)
The (y,x) scaled units centre of the annulus used to mask pixels.
invert : bool
If `True`, the `bool`'s of the input `mask` are inverted, for example `False`'s become `True`
and visa versa.
"""
if type(pixel_scales) is not tuple:
if type(pixel_scales) is float or int:
pixel_scales = (float(pixel_scales), float(pixel_scales))
mask = mask_2d_util.mask_2d_circular_annular_from(
shape_native=shape_native,
pixel_scales=pixel_scales,
inner_radius=inner_radius,
outer_radius=outer_radius,
centre=centre,
)
return cls.manual(
mask=mask,
pixel_scales=pixel_scales,
sub_size=sub_size,
origin=origin,
invert=invert,
)
@classmethod
def circular_anti_annular(
cls,
shape_native: (int, int),
inner_radius: float,
outer_radius: float,
outer_radius_2: float,
pixel_scales: (float, float),
sub_size: int = 1,
origin: (float, float) = (0.0, 0.0),
centre: (float, float) = (0.0, 0.0),
invert: bool = False,
) -> "Mask2D":
"""
Returns a Mask2D (see *Mask2D.__new__*) where all `False` entries are within an inner circle and second
outer circle, forming an inverse annulus.
The `inner_radius`, `outer_radius`, `outer_radius_2` and `centre` are all input in scaled units.
Parameters
----------
shape_native : (int, int)
The (y,x) shape of the mask in units of pixels.
inner_radius : float
The inner radius in scaled units of the annulus within which pixels are `False` and unmasked.
outer_radius : float
The first outer radius in scaled units of the annulus within which pixels are `True` and masked.
outer_radius_2 : float
The second outer radius in scaled units of the annulus within which pixels are `False` and unmasked and
outside of which all entries are `True` and masked.
pixel_scales: (float, float) or float
The (y,x) scaled units to pixel units conversion factors of every pixel. If this is input as a `float`,
it is converted to a (float, float) structure.
sub_size : int
The size (sub_size x sub_size) of each unmasked pixels sub-array.
origin : (float, float)
The (y,x) scaled units origin of the mask's coordinate system.
centre: (float, float)
The (y,x) scaled units centre of the anti-annulus used to mask pixels.
invert : bool
If `True`, the `bool`'s of the input `mask` are inverted, for example `False`'s become `True`
and visa versa.
"""
if type(pixel_scales) is not tuple:
if type(pixel_scales) is float or int:
pixel_scales = (float(pixel_scales), float(pixel_scales))
mask = mask_2d_util.mask_2d_circular_anti_annular_from(
shape_native=shape_native,
pixel_scales=pixel_scales,
inner_radius=inner_radius,
outer_radius=outer_radius,
outer_radius_2_scaled=outer_radius_2,
centre=centre,
)
return cls.manual(
mask=mask,
pixel_scales=pixel_scales,
sub_size=sub_size,
origin=origin,
invert=invert,
)
@classmethod
def elliptical(
cls,
shape_native: (int, int),
major_axis_radius: float,
axis_ratio: float,
angle: float,
pixel_scales: (float, float),
sub_size: int = 1,
origin: (float, float) = (0.0, 0.0),
centre: (float, float) = (0.0, 0.0),
invert: bool = False,
) -> "Mask2D":
"""
Returns a Mask2D (see *Mask2D.__new__*) where all `False` entries are within an ellipse.
The `major_axis_radius`, and `centre` are all input in scaled units.
Parameters
----------
shape_native : (int, int)
The (y,x) shape of the mask in units of pixels.
major_axis_radius : float
The major-axis in scaled units of the ellipse within which pixels are unmasked.
axis_ratio : float
The axis-ratio of the ellipse within which pixels are unmasked.
angle : float
The rotation angle of the ellipse within which pixels are unmasked, (counter-clockwise from the positive \
x-axis).
pixel_scales: (float, float) or float
The (y,x) scaled units to pixel units conversion factors of every pixel. If this is input as a `float`,
it is converted to a (float, float) structure.
sub_size : int
The size (sub_size x sub_size) of each unmasked pixels sub-array.
origin : (float, float)
The (y,x) scaled units origin of the mask's coordinate system.
centre: (float, float)
The (y,x) scaled units centred of the ellipse used to mask pixels.
invert : bool
If `True`, the `bool`'s of the input `mask` are inverted, for example `False`'s become `True`
and visa versa.
"""
if type(pixel_scales) is not tuple:
if type(pixel_scales) is float or int:
pixel_scales = (float(pixel_scales), float(pixel_scales))
mask = mask_2d_util.mask_2d_elliptical_from(
shape_native=shape_native,
pixel_scales=pixel_scales,
major_axis_radius=major_axis_radius,
axis_ratio=axis_ratio,
angle=angle,
centre=centre,
)
return cls.manual(
mask=mask,
pixel_scales=pixel_scales,
sub_size=sub_size,
origin=origin,
invert=invert,
)
@classmethod
def elliptical_annular(
cls,
shape_native: (int, int),
inner_major_axis_radius: float,
inner_axis_ratio: float,
inner_phi: float,
outer_major_axis_radius: float,
outer_axis_ratio: float,
outer_phi: float,
pixel_scales: (float, float),
sub_size: int = 1,
origin: (float, float) = (0.0, 0.0),
centre: (float, float) = (0.0, 0.0),
invert: bool = False,
) -> "Mask2D":
"""
Returns a Mask2D (see *Mask2D.__new__*) where all `False` entries are within an elliptical annulus of input
inner and outer scaled major-axis and centre.
The `outer_major_axis_radius`, `inner_major_axis_radius` and `centre` are all input in scaled units.
Parameters
----------
shape_native (int, int)
The (y,x) shape of the mask in units of pixels.
pixel_scales : (float, float)
The scaled units to pixel units conversion factor of each pixel.
inner_major_axis_radius : float
The major-axis in scaled units of the inner ellipse within which pixels are masked.
inner_axis_ratio : float
The axis-ratio of the inner ellipse within which pixels are masked.
inner_phi : float
The rotation angle of the inner ellipse within which pixels are masked, (counter-clockwise from the \
positive x-axis).
outer_major_axis_radius : float
The major-axis in scaled units of the outer ellipse within which pixels are unmasked.
outer_axis_ratio : float
The axis-ratio of the outer ellipse within which pixels are unmasked.
outer_phi : float
The rotation angle of the outer ellipse within which pixels are unmasked, (counter-clockwise from the \
positive x-axis).
sub_size : int
The size (sub_size x sub_size) of each unmasked pixels sub-array.
origin : (float, float)
The (y,x) scaled units origin of the mask's coordinate system.
centre: (float, float)
The (y,x) scaled units centre of the elliptical annuli used to mask pixels.
invert : bool
If `True`, the `bool`'s of the input `mask` are inverted, for example `False`'s become `True`
and visa versa.
"""
if type(pixel_scales) is not tuple:
if type(pixel_scales) is float or int:
pixel_scales = (float(pixel_scales), float(pixel_scales))
mask = mask_2d_util.mask_2d_elliptical_annular_from(
shape_native=shape_native,
pixel_scales=pixel_scales,
inner_major_axis_radius=inner_major_axis_radius,
inner_axis_ratio=inner_axis_ratio,
inner_phi=inner_phi,
outer_major_axis_radius=outer_major_axis_radius,
outer_axis_ratio=outer_axis_ratio,
outer_phi=outer_phi,
centre=centre,
)
return cls.manual(
mask=mask,
pixel_scales=pixel_scales,
sub_size=sub_size,
origin=origin,
invert=invert,
)
@classmethod
def from_pixel_coordinates(
cls,
shape_native: (int, int),
pixel_coordinates: [[int, int]],
pixel_scales: (float, float),
sub_size: int = 1,
origin: (float, float) = (0.0, 0.0),
buffer: int = 0,
invert: bool = False,
) -> "Mask2D":
"""
Returns a Mask2D (see *Mask2D.__new__*) where all `False` entries are defined from an input list of list of
pixel coordinates.
These may be buffed via an input `buffer`, whereby all entries in all 8 neighboring directions by this
amount.
Parameters
----------
shape_native (int, int)
The (y,x) shape of the mask in units of pixels.
pixel_coordinates : [[int, int]]
The input lists of 2D pixel coordinates where `False` entries are created.
pixel_scales : (float, float)
The scaled units to pixel units conversion factor of each pixel.
sub_size : int
The size (sub_size x sub_size) of each unmasked pixels sub-array.
origin : (float, float)
The (y,x) scaled units origin of the mask's coordinate system.
buffer : int
All input `pixel_coordinates` are buffed with `False` entries in all 8 neighboring directions by this
amount.
invert : bool
If `True`, the `bool`'s of the input `mask` are inverted, for example `False`'s become `True`
and visa versa.
"""
mask = mask_2d_util.mask_2d_via_pixel_coordinates_from(
shape_native=shape_native,
pixel_coordinates=pixel_coordinates,
buffer=buffer,
)
return cls.manual(
mask=mask,
pixel_scales=pixel_scales,
sub_size=sub_size,
origin=origin,
invert=invert,
)
@classmethod
def from_fits(
cls,
file_path: str,
pixel_scales: (float, float),
hdu: int = 0,
sub_size: int = 1,
origin: (float, float) = (0.0, 0.0),
resized_mask_shape: (int, int) = None,
) -> "Mask2D":
"""
Loads the image from a .fits file.
Parameters
----------
file_path : str
The full path of the fits file.
hdu : int
The HDU number in the fits file containing the image image.
pixel_scales : float or (float, float)
The scaled units to pixel units conversion factor of each pixel.
sub_size : int
The size (sub_size x sub_size) of each unmasked pixels sub-array.
origin : (float, float)
The (y,x) scaled units origin of the mask's coordinate system.
"""
if type(pixel_scales) is not tuple:
if type(pixel_scales) is float or int:
pixel_scales = (float(pixel_scales), float(pixel_scales))
mask = cls(
array_2d_util.numpy_array_2d_from_fits(file_path=file_path, hdu=hdu),
pixel_scales=pixel_scales,
sub_size=sub_size,
origin=origin,
)
if resized_mask_shape is not None:
mask = mask.resized_mask_from_new_shape(new_shape=resized_mask_shape)
return mask
```
#### File: autoarray/mock/fixtures.py
```python
import numpy as np
from autoarray.mask import mask_1d
from autoarray.mask import mask_2d
from autoarray.structures.arrays.two_d import array_2d
from autoarray.structures.grids.one_d import grid_1d
from autoarray.structures.grids.two_d import grid_2d
from autoarray.structures.grids.two_d import grid_2d_iterate
from autoarray.structures.grids.two_d import grid_2d_irregular
from autoarray.structures.grids.two_d import grid_2d_pixelization
from autoarray.layout import layout as lo
from autoarray.structures import kernel_2d
from autoarray.structures import visibilities as vis
from autoarray.dataset import imaging
from autoarray.dataset import interferometer
from autoarray.operators import convolver
from autoarray.operators import transformer
from autoarray.fit import fit
from autoarray.inversion import regularization as reg
from autoarray.inversion import mappers
from autoarray.inversion import inversions
def make_mask_1d_7():
mask = np.array([True, True, False, False, False, True, True])
return mask_1d.Mask1D.manual(mask=mask, pixel_scales=(1.0,), sub_size=1)
def make_sub_mask_1d_7():
mask = np.array([True, True, False, False, False, True, True])
return mask_1d.Mask1D.manual(mask=mask, pixel_scales=(1.0,), sub_size=2)
def make_mask_2d_7x7():
mask = np.array(
[
[True, True, True, True, True, True, True],
[True, True, True, True, True, True, True],
[True, True, False, False, False, True, True],
[True, True, False, False, False, True, True],
[True, True, False, False, False, True, True],
[True, True, True, True, True, True, True],
[True, True, True, True, True, True, True],
]
)
return mask_2d.Mask2D.manual(mask=mask, pixel_scales=(1.0, 1.0), sub_size=1)
def make_sub_mask_2d_7x7():
mask = np.array(
[
[True, True, True, True, True, True, True],
[True, True, True, True, True, True, True],
[True, True, False, False, False, True, True],
[True, True, False, False, False, True, True],
[True, True, False, False, False, True, True],
[True, True, True, True, True, True, True],
[True, True, True, True, True, True, True],
]
)
return mask_2d.Mask2D.manual(mask=mask, sub_size=2, pixel_scales=(1.0, 1.0))
def make_mask_2d_7x7_1_pix():
mask = np.array(
[
[True, True, True, True, True, True, True],
[True, True, True, True, True, True, True],
[True, True, True, True, True, True, True],
[True, True, True, False, True, True, True],
[True, True, True, True, True, True, True],
[True, True, True, True, True, True, True],
[True, True, True, True, True, True, True],
]
)
return mask_2d.Mask2D.manual(mask=mask, pixel_scales=(1.0, 1.0))
def make_blurring_mask_2d_7x7():
blurring_mask = np.array(
[
[True, True, True, True, True, True, True],
[True, False, False, False, False, False, True],
[True, False, True, True, True, False, True],
[True, False, True, True, True, False, True],
[True, False, True, True, True, False, True],
[True, False, False, False, False, False, True],
[True, True, True, True, True, True, True],
]
)
return mask_2d.Mask2D.manual(mask=blurring_mask, pixel_scales=(1.0, 1.0))
### arrays ###
def make_array_2d_7x7():
return array_2d.Array2D.ones(shape_native=(7, 7), pixel_scales=(1.0, 1.0))
def make_layout_2d_7x7():
return lo.Layout2D(
shape_2d=(7, 7),
original_roe_corner=(1, 0),
serial_overscan=(0, 6, 6, 7),
serial_prescan=(0, 7, 0, 1),
parallel_overscan=(6, 7, 1, 6),
)
def make_array_2d_layout_7x7():
return array_2d.Array2D.ones(
shape_native=(7, 7), pixel_scales=(1.0, 1.0), layout=make_layout_2d_7x7()
)
# GRIDS #
def make_grid_1d_7():
return grid_1d.Grid1D.from_mask(mask=make_mask_1d_7())
def make_sub_grid_1d_7():
return grid_1d.Grid1D.from_mask(mask=make_sub_mask_1d_7())
def make_grid_2d_7x7():
return grid_2d.Grid2D.from_mask(mask=make_mask_2d_7x7())
def make_sub_grid_2d_7x7():
return grid_2d.Grid2D.from_mask(mask=make_sub_mask_2d_7x7())
def make_grid_2d_iterate_7x7():
return grid_2d_iterate.Grid2DIterate.from_mask(
mask=make_mask_2d_7x7(), fractional_accuracy=0.9999, sub_steps=[2, 4, 8, 16]
)
def make_sub_grid_2d_7x7_simple():
sub_grid_2d_7x7 = make_sub_grid_2d_7x7()
sub_grid_2d_7x7[0] = np.array([1.0, 1.0])
sub_grid_2d_7x7[1] = np.array([1.0, 0.0])
sub_grid_2d_7x7[2] = np.array([1.0, 1.0])
sub_grid_2d_7x7[3] = np.array([1.0, 0.0])
return sub_grid_2d_7x7
def make_blurring_grid_2d_7x7():
return grid_2d.Grid2D.from_mask(mask=make_blurring_mask_2d_7x7())
# CONVOLVERS #
def make_convolver_7x7():
return convolver.Convolver(mask=make_mask_2d_7x7(), kernel=make_psf_3x3())
def make_image_7x7():
return array_2d.Array2D.ones(shape_native=(7, 7), pixel_scales=(1.0, 1.0))
def make_psf_3x3():
return kernel_2d.Kernel2D.ones(shape_native=(3, 3), pixel_scales=(1.0, 1.0))
def make_psf_no_blur_3x3():
return kernel_2d.Kernel2D.no_blur(pixel_scales=(1.0, 1.0))
def make_noise_map_7x7():
return array_2d.Array2D.full(
fill_value=2.0, shape_native=(7, 7), pixel_scales=(1.0, 1.0)
)
def make_grid_2d_irregular_7x7():
return grid_2d_irregular.Grid2DIrregular(grid=[(0.1, 0.1), (0.2, 0.2)])
def make_grid_2d_irregular_7x7_list():
return [
grid_2d_irregular.Grid2DIrregular(grid=[(0.1, 0.1), (0.2, 0.2)]),
grid_2d_irregular.Grid2DIrregular(grid=[(0.3, 0.3)]),
]
def make_imaging_7x7():
return imaging.Imaging(
image=make_image_7x7(),
psf=make_psf_3x3(),
noise_map=make_noise_map_7x7(),
name="mock_imaging_7x7",
)
def make_imaging_no_blur_7x7():
return imaging.Imaging(
image=make_image_7x7(),
psf=make_psf_no_blur_3x3(),
noise_map=make_noise_map_7x7(),
name="mock_imaging_7x7",
)
def make_visibilities_7():
visibilities = vis.Visibilities.full(shape_slim=(7,), fill_value=1.0)
visibilities[6] = -1.0 - 1.0j
return visibilities
def make_visibilities_noise_map_7():
return vis.VisibilitiesNoiseMap.full(shape_slim=(7,), fill_value=2.0)
def make_uv_wavelengths_7x2():
return np.array(
[
[-55636.4609375, 171376.90625],
[-6903.21923828, 51155.578125],
[-63488.4140625, 4141.28369141],
[55502.828125, 47016.7265625],
[54160.75390625, -99354.1796875],
[-9327.66308594, -95212.90625],
[0.0, 0.0],
]
)
def make_interferometer_7():
return interferometer.Interferometer(
visibilities=make_visibilities_7(),
noise_map=make_visibilities_noise_map_7(),
uv_wavelengths=make_uv_wavelengths_7x2(),
real_space_mask=make_sub_mask_2d_7x7(),
settings=interferometer.SettingsInterferometer(
grid_class=grid_2d.Grid2D,
sub_size=1,
transformer_class=transformer.TransformerDFT,
),
)
def make_interferometer_7_grid():
return interferometer.Interferometer(
visibilities=make_visibilities_7(),
noise_map=make_visibilities_noise_map_7(),
uv_wavelengths=make_uv_wavelengths_7x2(),
real_space_mask=make_sub_mask_2d_7x7(),
settings=interferometer.SettingsInterferometer(
sub_size=1, transformer_class=transformer.TransformerDFT
),
)
def make_interferometer_7_lop():
return interferometer.Interferometer(
visibilities=make_visibilities_7(),
noise_map=make_visibilities_noise_map_7(),
uv_wavelengths=make_uv_wavelengths_7x2(),
real_space_mask=make_mask_2d_7x7(),
settings=interferometer.SettingsInterferometer(
sub_size_inversion=1, transformer_class=transformer.TransformerNUFFT
),
)
def make_transformer_7x7_7():
return transformer.TransformerDFT(
uv_wavelengths=make_uv_wavelengths_7x2(), real_space_mask=make_mask_2d_7x7()
)
### MASKED DATA ###
def make_masked_imaging_7x7():
imaging_7x7 = make_imaging_7x7()
return imaging_7x7.apply_mask(mask=make_sub_mask_2d_7x7())
def make_masked_imaging_no_blur_7x7():
imaging_7x7 = make_imaging_no_blur_7x7()
return imaging_7x7.apply_mask(mask=make_sub_mask_2d_7x7())
def make_imaging_fit_x1_plane_7x7():
return fit.FitImaging(
imaging=make_masked_imaging_7x7(),
model_image=5.0 * make_masked_imaging_7x7().image,
use_mask_in_fit=False,
)
def make_fit_interferometer_7():
interferometer_7 = make_interferometer_7()
fit_interferometer = fit.FitInterferometer(
interferometer=interferometer_7,
model_visibilities=5.0 * interferometer_7.visibilities,
use_mask_in_fit=False,
)
fit_interferometer.dataset = interferometer_7
return fit_interferometer
def make_rectangular_pixelization_grid_3x3():
return grid_2d_pixelization.Grid2DRectangular.overlay_grid(
grid=make_grid_2d_7x7(), shape_native=(3, 3)
)
def make_rectangular_mapper_7x7_3x3():
return mappers.mapper(
source_grid_slim=make_grid_2d_7x7(),
source_pixelization_grid=make_rectangular_pixelization_grid_3x3(),
)
def make_voronoi_pixelization_grid_9():
grid_9 = grid_2d.Grid2D.manual_slim(
grid=[
[0.6, -0.3],
[0.5, -0.8],
[0.2, 0.1],
[0.0, 0.5],
[-0.3, -0.8],
[-0.6, -0.5],
[-0.4, -1.1],
[-1.2, 0.8],
[-1.5, 0.9],
],
shape_native=(3, 3),
pixel_scales=1.0,
)
return grid_2d_pixelization.Grid2DVoronoi(
grid=grid_9,
nearest_pixelization_index_for_slim_index=np.zeros(
shape=make_grid_2d_7x7().shape_slim, dtype="int"
),
)
def make_voronoi_mapper_9_3x3():
return mappers.mapper(
source_grid_slim=make_grid_2d_7x7(),
source_pixelization_grid=make_voronoi_pixelization_grid_9(),
data_pixelization_grid=grid_2d.Grid2D.uniform(
shape_native=(2, 2), pixel_scales=0.1
),
)
def make_rectangular_inversion_7x7_3x3():
regularization = reg.Constant(coefficient=1.0)
return inversions.inversion(
dataset=make_masked_imaging_7x7(),
mapper=make_rectangular_mapper_7x7_3x3(),
regularization=regularization,
)
def make_voronoi_inversion_9_3x3():
regularization = reg.Constant(coefficient=1.0)
return inversions.inversion(
dataset=make_masked_imaging_7x7(),
mapper=make_voronoi_mapper_9_3x3(),
regularization=regularization,
)
### EUCLID DATA ####
def make_euclid_data():
return np.zeros((2086, 2128))
### ACS DATA ####
def make_acs_ccd():
return np.zeros((2068, 4144))
def make_acs_quadrant():
return np.zeros((2068, 2072))
```
#### File: structures/grids/abstract_grid.py
```python
import numpy as np
def convert_grid(grid):
if type(grid) is list:
grid = np.asarray(grid)
return grid
```
#### File: structures/grids/grid_decorators.py
```python
import numpy as np
from functools import wraps
from autoconf import conf
from autoarray.structures.grids.one_d import abstract_grid_1d
from autoarray.structures.grids.two_d import grid_2d
from autoarray.structures.grids.two_d import grid_2d_interpolate
from autoarray.structures.grids.two_d import grid_2d_iterate
from autoarray.structures.grids.two_d import grid_2d_irregular
from autoarray.structures.arrays.one_d import array_1d
from autoarray.structures.arrays import values
from autoarray import exc
from typing import Union
def grid_1d_to_structure(func):
"""
Homogenize the inputs and outputs of functions that take 2D grids of (y,x) coordinates that return the results
as a NumPy array.
Parameters
----------
func : (obj, grid, *args, **kwargs) -> Object
A function which computes a set of values from a 2D grid of (y,x) coordinates.
Returns
-------
A function that can except cartesian or transformed coordinates
"""
@wraps(func)
def wrapper(
obj, grid, *args, **kwargs
) -> Union[array_1d.Array1D, values.ValuesIrregular]:
"""
This decorator homogenizes the input of a "grid_like" 2D structure (`Grid2D`, `Grid2DIterate`,
`Grid2DInterpolate`, `Grid2DIrregular` or `AbstractGrid1D`) into a function. It allows these classes to be
interchangeably input into a function, such that the grid is used to evaluate the function at every (y,x)
coordinates of the grid using specific functionality of the input grid.
The grid_like objects `Grid2D` and `Grid2DIrregular` are input into the function as a slimmed 2D NumPy array
of shape [total_coordinates, 2] where the second dimension stores the (y,x) values. If a `Grid2DIterate` is
input, the function is evaluated using the appropriate iterated_*_from_func* function.
The outputs of the function are converted from a 1D or 2D NumPy Array2D to an `Array2D`, `Grid2D`,
`ValuesIrregular` or `Grid2DIrregular` objects, whichever is applicable as follows:
- If the function returns (y,x) coordinates at every input point, the returned results are a `Grid2D`
or `Grid2DIrregular` structure, the same structure as the input.
- If the function returns scalar values at every input point and a `Grid2D` is input, the returned results are
an `Array2D` structure which uses the same dimensions and mask as the `Grid2D`.
- If the function returns scalar values at every input point and `Grid2DIrregular` are input, the returned
results are a `ValuesIrregular` object with structure resembling that of the `Grid2DIrregular`.
If the input array is not a `Grid2D` structure (e.g. it is a 2D NumPy array) the output is a NumPy array.
Parameters
----------
obj : object
An object whose function uses grid_like inputs to compute quantities at every coordinate on the grid.
grid : Grid2D or Grid2DIrregular
A grid_like object of (y,x) coordinates on which the function values are evaluated.
Returns
-------
The function values evaluated on the grid with the same structure as the input grid_like object.
"""
centre = (0.0, 0.0)
if hasattr(obj, "centre"):
if obj.centre is not None:
centre = obj.centre
angle = 0.0
if hasattr(obj, "angle"):
if obj.angle is not None:
angle = obj.angle + 90.0
if (
isinstance(grid, grid_2d.Grid2D)
or isinstance(grid, grid_2d_iterate.Grid2DIterate)
or isinstance(grid, grid_2d_interpolate.Grid2DInterpolate)
):
grid_2d_projected = grid.grid_2d_radial_projected_from(
centre=centre, angle=angle
)
result = func(obj, grid_2d_projected, *args, **kwargs)
return array_1d.Array1D.manual_slim(
array=result, pixel_scales=grid.pixel_scale
)
elif isinstance(grid, grid_2d_irregular.Grid2DIrregular):
result = func(obj, grid, *args, **kwargs)
return grid.structure_2d_from_result(result=result)
elif isinstance(grid, abstract_grid_1d.AbstractGrid1D):
grid_2d_radial = grid.project_to_radial_grid_2d(angle=angle)
result = func(obj, grid_2d_radial, *args, **kwargs)
return array_1d.Array1D.manual_slim(
array=result, pixel_scales=grid.pixel_scale
)
raise exc.GridException(
"You cannot input a NumPy array to a `quantity_1d_from_grid` method."
)
return wrapper
def grid_1d_output_structure(func):
"""
Homogenize the inputs and outputs of functions that take 2D grids of (y,x) coordinates that return the results
as a NumPy array.
Parameters
----------
func : (obj, grid, *args, **kwargs) -> Object
A function which computes a set of values from a 2D grid of (y,x) coordinates.
Returns
-------
A function that can except cartesian or transformed coordinates
"""
@wraps(func)
def wrapper(
obj, grid, *args, **kwargs
) -> Union[array_1d.Array1D, values.ValuesIrregular]:
"""
This decorator homogenizes the input of a "grid_like" 2D structure (`Grid2D`, `Grid2DIterate`,
`Grid2DInterpolate`, `Grid2DIrregular` or `AbstractGrid1D`) into a function. It allows these classes to be
interchangeably input into a function, such that the grid is used to evaluate the function at every (y,x)
coordinates of the grid using specific functionality of the input grid.
The grid_like objects `Grid2D` and `Grid2DIrregular` are input into the function as a slimmed 2D NumPy array
of shape [total_coordinates, 2] where the second dimension stores the (y,x) values. If a `Grid2DIterate` is
input, the function is evaluated using the appropriate iterated_*_from_func* function.
The outputs of the function are converted from a 1D or 2D NumPy Array2D to an `Array2D`, `Grid2D`,
`ValuesIrregular` or `Grid2DIrregular` objects, whichever is applicable as follows:
- If the function returns (y,x) coordinates at every input point, the returned results are a `Grid2D`
or `Grid2DIrregular` structure, the same structure as the input.
- If the function returns scalar values at every input point and a `Grid2D` is input, the returned results are
an `Array2D` structure which uses the same dimensions and mask as the `Grid2D`.
- If the function returns scalar values at every input point and `Grid2DIrregular` are input, the returned
results are a `ValuesIrregular` object with structure resembling that of the `Grid2DIrregular`.
If the input array is not a `Grid2D` structure (e.g. it is a 2D NumPy array) the output is a NumPy array.
Parameters
----------
obj : object
An object whose function uses grid_like inputs to compute quantities at every coordinate on the grid.
grid : Grid2D or Grid2DIrregular
A grid_like object of (y,x) coordinates on which the function values are evaluated.
Returns
-------
The function values evaluated on the grid with the same structure as the input grid_like object.
"""
result = func(obj, grid, *args, **kwargs)
if (
isinstance(grid, grid_2d.Grid2D)
or isinstance(grid, grid_2d_iterate.Grid2DIterate)
or isinstance(grid, grid_2d_interpolate.Grid2DInterpolate)
):
return array_1d.Array1D.manual_slim(
array=result, pixel_scales=grid.pixel_scale
)
elif isinstance(grid, grid_2d_irregular.Grid2DIrregular):
return grid.structure_2d_from_result(result=result)
elif isinstance(grid, abstract_grid_1d.AbstractGrid1D):
return array_1d.Array1D.manual_slim(
array=result, pixel_scales=grid.pixel_scale
)
raise exc.GridException(
"You cannot input a NumPy array to a `quantity_1d_from_grid` method."
)
return wrapper
def grid_2d_to_structure(func):
"""
Homogenize the inputs and outputs of functions that take 2D grids of (y,x) coordinates that return the results
as a NumPy array.
Parameters
----------
func : (obj, grid, *args, **kwargs) -> Object
A function which computes a set of values from a 2D grid of (y,x) coordinates.
Returns
-------
A function that can except cartesian or transformed coordinates
"""
@wraps(func)
def wrapper(obj, grid, *args, **kwargs):
"""
This decorator homogenizes the input of a "grid_like" 2D structure (`Grid2D`, `Grid2DIterate`,
`Grid2DInterpolate`, `Grid2DIrregular` or `AbstractGrid1D`) into a function. It allows these classes to be
interchangeably input into a function, such that the grid is used to evaluate the function at every (y,x)
coordinates of the grid using specific functionality of the input grid.
The grid_like objects `Grid2D` and `Grid2DIrregular` are input into the function as a slimmed 2D NumPy array
of shape [total_coordinates, 2] where the second dimension stores the (y,x) values. If a `Grid2DIterate` is
input, the function is evaluated using the appropriate iterated_*_from_func* function.
The outputs of the function are converted from a 1D or 2D NumPy Array2D to an `Array2D`, `Grid2D`,
`ValuesIrregular` or `Grid2DIrregular` objects, whichever is applicable as follows:
- If the function returns (y,x) coordinates at every input point, the returned results are a `Grid2D`
or `Grid2DIrregular` structure, the same structure as the input.
- If the function returns scalar values at every input point and a `Grid2D` is input, the returned results are
an `Array2D` structure which uses the same dimensions and mask as the `Grid2D`.
- If the function returns scalar values at every input point and `Grid2DIrregular` are input, the returned
results are a `ValuesIrregular` object with structure resembling that of the `Grid2DIrregular`.
If the input array is not a `Grid2D` structure (e.g. it is a 2D NumPy array) the output is a NumPy array.
Parameters
----------
obj : object
An object whose function uses grid_like inputs to compute quantities at every coordinate on the grid.
grid : Grid2D or Grid2DIrregular
A grid_like object of (y,x) coordinates on which the function values are evaluated.
Returns
-------
The function values evaluated on the grid with the same structure as the input grid_like object.
"""
if isinstance(grid, grid_2d_iterate.Grid2DIterate):
return grid.iterated_result_from_func(func=func, cls=obj)
elif isinstance(grid, grid_2d_interpolate.Grid2DInterpolate):
return grid.result_from_func(func=func, cls=obj)
elif isinstance(grid, grid_2d_irregular.Grid2DIrregular):
result = func(obj, grid, *args, **kwargs)
return grid.structure_2d_from_result(result=result)
elif isinstance(grid, grid_2d.Grid2D):
result = func(obj, grid, *args, **kwargs)
return grid.structure_2d_from_result(result=result)
elif isinstance(grid, abstract_grid_1d.AbstractGrid1D):
grid_2d_radial = grid.project_to_radial_grid_2d()
result = func(obj, grid_2d_radial, *args, **kwargs)
return grid.structure_2d_from_result(result=result)
if not isinstance(grid, grid_2d_irregular.Grid2DIrregular) and not isinstance(
grid, grid_2d.Grid2D
):
return func(obj, grid, *args, **kwargs)
return wrapper
def grid_2d_to_structure_list(func):
"""
Homogenize the inputs and outputs of functions that take 2D grids of (y,x) coordinates and return the results as
a list of NumPy arrays.
Parameters
----------
func : (obj, grid, *args, **kwargs) -> Object
A function which computes a set of values from a 2D grid of (y,x) coordinates.
Returns
-------
A function that can except cartesian or transformed coordinates
"""
@wraps(func)
def wrapper(obj, grid, *args, **kwargs):
"""
This decorator serves the same purpose as the `grid_2d_to_structure` decorator, but it deals with functions whose
output is a list of results as opposed to a single NumPy array. It simply iterates over these lists to perform
the same conversions as `grid_2d_to_structure`.
Parameters
----------
obj : object
An object whose function uses grid_like inputs to compute quantities at every coordinate on the grid.
grid : Grid2D or Grid2DIrregular
A grid_like object of (y,x) coordinates on which the function values are evaluated.
Returns
-------
The function values evaluated on the grid with the same structure as the input grid_like object in a list
of NumPy arrays.
"""
if isinstance(grid, grid_2d_iterate.Grid2DIterate):
mask = grid.mask.mask_new_sub_size_from(
mask=grid.mask, sub_size=max(grid.sub_steps)
)
grid_compute = grid_2d.Grid2D.from_mask(mask=mask)
result_list = func(obj, grid_compute, *args, **kwargs)
result_list = [
grid_compute.structure_2d_from_result(result=result)
for result in result_list
]
result_list = [result.binned for result in result_list]
return grid.grid.structure_2d_list_from_result_list(result_list=result_list)
elif isinstance(grid, grid_2d_interpolate.Grid2DInterpolate):
return func(obj, grid, *args, **kwargs)
elif isinstance(grid, grid_2d_irregular.Grid2DIrregular):
result_list = func(obj, grid, *args, **kwargs)
return grid.structure_2d_list_from_result_list(result_list=result_list)
elif isinstance(grid, grid_2d.Grid2D):
result_list = func(obj, grid, *args, **kwargs)
return grid.structure_2d_list_from_result_list(result_list=result_list)
elif isinstance(grid, abstract_grid_1d.AbstractGrid1D):
grid_2d_radial = grid.project_to_radial_grid_2d()
result_list = func(obj, grid_2d_radial, *args, **kwargs)
return grid.structure_2d_list_from_result_list(result_list=result_list)
if not isinstance(grid, grid_2d_irregular.Grid2DIrregular) and not isinstance(
grid, grid_2d.Grid2D
):
return func(obj, grid, *args, **kwargs)
return wrapper
def transform(func):
"""Checks whether the input Grid2D of (y,x) coordinates have previously been transformed. If they have not \
been transformed then they are transformed.
Parameters
----------
func : (profile, grid *args, **kwargs) -> Object
A function where the input grid is the grid whose coordinates are transformed.
Returns
-------
A function that can except cartesian or transformed coordinates
"""
@wraps(func)
def wrapper(cls, grid, *args, **kwargs):
"""
Parameters
----------
cls : Profile
The class that owns the function.
grid : grid_like
The (y, x) coordinates in the original reference frame of the grid.
Returns
-------
A grid_like object whose coordinates may be transformed.
"""
if not isinstance(
grid,
(
grid_2d.Grid2DTransformed,
grid_2d.Grid2DTransformedNumpy,
grid_2d_irregular.Grid2DIrregularTransformed,
),
):
result = func(
cls, cls.transform_grid_to_reference_frame(grid), *args, **kwargs
)
return result
else:
return func(cls, grid, *args, **kwargs)
return wrapper
def relocate_to_radial_minimum(func):
""" Checks whether any coordinates in the grid are radially near (0.0, 0.0), which can lead to numerical faults in \
the evaluation of a function (e.g. numerical integration reaching a singularity at (0.0, 0.0)). If any coordinates
are radially within the the radial minimum threshold, their (y,x) coordinates are shifted to that value to ensure
they are evaluated at that coordinate.
The value the (y,x) coordinates are rounded to is set in the 'radial_min.ini' config.
Parameters
----------
func : (profile, *args, **kwargs) -> Object
A function that takes a grid of coordinates which may have a singularity as (0.0, 0.0)
Returns
-------
A function that can except cartesian or transformed coordinates
"""
@wraps(func)
def wrapper(cls, grid, *args, **kwargs):
"""
Parameters
----------
cls : Profile
The class that owns the function.
grid : grid_like
The (y, x) coordinates which are to be radially moved from (0.0, 0.0).
Returns
-------
The grid_like object whose coordinates are radially moved from (0.0, 0.0).
"""
grid_radial_minimum = conf.instance["grids"]["radial_minimum"][
"radial_minimum"
][cls.__class__.__name__]
with np.errstate(all="ignore"): # Division by zero fixed via isnan
grid_radii = cls.grid_to_grid_radii(grid=grid)
grid_radial_scale = np.where(
grid_radii < grid_radial_minimum, grid_radial_minimum / grid_radii, 1.0
)
grid = np.multiply(grid, grid_radial_scale[:, None])
grid[np.isnan(grid)] = grid_radial_minimum
return func(cls, grid, *args, **kwargs)
return wrapper
```
#### File: test_autoarray/dataset/test_imaging.py
```python
import os
from os import path
import numpy as np
import pytest
import shutil
import autoarray as aa
test_data_dir = path.join(
"{}".format(path.dirname(path.realpath(__file__))), "files", "imaging"
)
class TestImaging:
def test__psf_and_mask_hit_edge__automatically_pads_image_and_noise_map(self):
image = aa.Array2D.ones(shape_native=(3, 3), pixel_scales=1.0)
noise_map = aa.Array2D.ones(shape_native=(3, 3), pixel_scales=1.0)
psf = aa.Kernel2D.ones(shape_native=(3, 3), pixel_scales=1.0)
imaging = aa.Imaging(
image=image, noise_map=noise_map, psf=psf, setup_convolver=False
)
assert imaging.image.shape_native == (3, 3)
assert imaging.noise_map.shape_native == (3, 3)
imaging = aa.Imaging(
image=image, noise_map=noise_map, psf=psf, setup_convolver=True
)
assert imaging.image.shape_native == (5, 5)
assert imaging.noise_map.shape_native == (5, 5)
assert imaging.image.mask[0, 0] == True
assert imaging.image.mask[1, 1] == False
def test__new_imaging_with_signal_to_noise_limit__limit_above_max_signal_to_noise__signal_to_noise_map_unchanged(
self,
):
image = aa.Array2D.full(fill_value=20.0, shape_native=(2, 2), pixel_scales=1.0)
image[3] = 5.0
noise_map_array = aa.Array2D.full(
fill_value=5.0, shape_native=(2, 2), pixel_scales=1.0
)
noise_map_array[3] = 2.0
imaging = aa.Imaging(
image=image,
psf=aa.Kernel2D.zeros(shape_native=(3, 3), pixel_scales=1.0),
noise_map=noise_map_array,
)
imaging = imaging.signal_to_noise_limited_from(signal_to_noise_limit=100.0)
assert (imaging.image == np.array([20.0, 20.0, 20.0, 5.0])).all()
assert (imaging.noise_map == np.array([5.0, 5.0, 5.0, 2.0])).all()
assert (imaging.signal_to_noise_map == np.array([4.0, 4.0, 4.0, 2.5])).all()
def test__new_imaging_with_signal_to_noise_limit_below_max_signal_to_noise__signal_to_noise_map_capped_to_limit(
self,
):
image = aa.Array2D.full(fill_value=20.0, shape_native=(2, 2), pixel_scales=1.0)
image[3] = 5.0
noise_map_array = aa.Array2D.full(
fill_value=5.0, shape_native=(2, 2), pixel_scales=1.0
)
noise_map_array[3] = 2.0
imaging = aa.Imaging(
image=image,
psf=aa.Kernel2D.zeros(shape_native=(3, 3), pixel_scales=1.0),
noise_map=noise_map_array,
)
imaging_capped = imaging.signal_to_noise_limited_from(signal_to_noise_limit=2.0)
assert (
imaging_capped.image.native == np.array([[20.0, 20.0], [20.0, 5.0]])
).all()
assert (
imaging_capped.noise_map.native == np.array([[10.0, 10.0], [10.0, 2.5]])
).all()
assert (
imaging_capped.signal_to_noise_map.native
== np.array([[2.0, 2.0], [2.0, 2.0]])
).all()
def test__new_imaging_with_signal_to_noise_limit__include_mask_to_only_increase_centre_values(
self,
):
image = aa.Array2D.full(fill_value=20.0, shape_native=(2, 2), pixel_scales=1.0)
image[2] = 5.0
image[3] = 5.0
noise_map_array = aa.Array2D.full(
fill_value=5.0, shape_native=(2, 2), pixel_scales=1.0
)
noise_map_array[2] = 2.0
noise_map_array[3] = 2.0
mask = aa.Mask2D.manual(mask=[[True, False], [False, True]], pixel_scales=1.0)
imaging = aa.Imaging(
image=image,
psf=aa.Kernel2D.zeros(shape_native=(3, 3), pixel_scales=1.0),
noise_map=noise_map_array,
)
imaging_capped = imaging.signal_to_noise_limited_from(
signal_to_noise_limit=2.0, mask=mask
)
assert (
imaging_capped.image.native == np.array([[20.0, 20.0], [5.0, 5.0]])
).all()
assert (
imaging_capped.noise_map.native == np.array([[5.0, 10.0], [2.5, 2.0]])
).all()
assert (
imaging_capped.signal_to_noise_map.native
== np.array([[4.0, 2.0], [2.0, 2.5]])
).all()
def test__from_fits__loads_arrays_and_psf_is_normalized(self):
imaging = aa.Imaging.from_fits(
pixel_scales=0.1,
image_path=path.join(test_data_dir, "3x3_ones.fits"),
psf_path=path.join(test_data_dir, "3x3_twos.fits"),
noise_map_path=path.join(test_data_dir, "3x3_threes.fits"),
)
assert (imaging.image.native == np.ones((3, 3))).all()
assert (imaging.psf.native == (1.0 / 9.0) * np.ones((3, 3))).all()
assert (imaging.noise_map.native == 3.0 * np.ones((3, 3))).all()
assert imaging.pixel_scales == (0.1, 0.1)
assert imaging.psf.mask.pixel_scales == (0.1, 0.1)
assert imaging.noise_map.mask.pixel_scales == (0.1, 0.1)
def test__from_fits__all_files_in_one_fits__load_using_different_hdus(self):
imaging = aa.Imaging.from_fits(
pixel_scales=0.1,
image_path=path.join(test_data_dir, "3x3_multiple_hdu.fits"),
image_hdu=0,
psf_path=path.join(test_data_dir, "3x3_multiple_hdu.fits"),
psf_hdu=1,
noise_map_path=path.join(test_data_dir, "3x3_multiple_hdu.fits"),
noise_map_hdu=2,
)
assert (imaging.image.native == np.ones((3, 3))).all()
assert (imaging.psf.native == (1.0 / 9.0) * np.ones((3, 3))).all()
assert (imaging.noise_map.native == 3.0 * np.ones((3, 3))).all()
assert imaging.pixel_scales == (0.1, 0.1)
assert imaging.psf.mask.pixel_scales == (0.1, 0.1)
assert imaging.noise_map.mask.pixel_scales == (0.1, 0.1)
def test__output_to_fits__outputs_all_imaging_arrays(self):
imaging = aa.Imaging.from_fits(
pixel_scales=0.1,
image_path=path.join(test_data_dir, "3x3_ones.fits"),
psf_path=path.join(test_data_dir, "3x3_twos.fits"),
noise_map_path=path.join(test_data_dir, "3x3_threes.fits"),
)
output_data_dir = path.join(
"{}".format(os.path.dirname(os.path.realpath(__file__))),
"files",
"array",
"output_test",
)
if os.path.exists(output_data_dir):
shutil.rmtree(output_data_dir)
os.makedirs(output_data_dir)
imaging.output_to_fits(
image_path=path.join(output_data_dir, "image.fits"),
psf_path=path.join(output_data_dir, "psf.fits"),
noise_map_path=path.join(output_data_dir, "noise_map.fits"),
)
imaging = aa.Imaging.from_fits(
pixel_scales=0.1,
image_path=path.join(output_data_dir, "image.fits"),
psf_path=path.join(output_data_dir, "psf.fits"),
noise_map_path=path.join(output_data_dir, "noise_map.fits"),
)
assert (imaging.image.native == np.ones((3, 3))).all()
assert (imaging.psf.native == (1.0 / 9.0) * np.ones((3, 3))).all()
assert (imaging.noise_map.native == 3.0 * np.ones((3, 3))).all()
assert imaging.pixel_scales == (0.1, 0.1)
assert imaging.psf.mask.pixel_scales == (0.1, 0.1)
assert imaging.noise_map.mask.pixel_scales == (0.1, 0.1)
class TestImagingApplyMask:
def test__apply_mask__masks_dataset(self, imaging_7x7, sub_mask_2d_7x7):
masked_imaging_7x7 = imaging_7x7.apply_mask(mask=sub_mask_2d_7x7)
assert (masked_imaging_7x7.image.slim == np.ones(9)).all()
assert (
masked_imaging_7x7.image.native
== np.ones((7, 7)) * np.invert(sub_mask_2d_7x7)
).all()
assert (masked_imaging_7x7.noise_map.slim == 2.0 * np.ones(9)).all()
assert (
masked_imaging_7x7.noise_map.native
== 2.0 * np.ones((7, 7)) * np.invert(sub_mask_2d_7x7)
).all()
assert (masked_imaging_7x7.psf.slim == (1.0 / 9.0) * np.ones(9)).all()
assert (masked_imaging_7x7.psf.native == (1.0 / 9.0) * np.ones((3, 3))).all()
def test__grid(
self,
imaging_7x7,
sub_mask_2d_7x7,
grid_2d_7x7,
sub_grid_2d_7x7,
blurring_grid_2d_7x7,
grid_2d_iterate_7x7,
):
masked_imaging_7x7 = imaging_7x7.apply_mask(mask=sub_mask_2d_7x7)
masked_imaging_7x7 = masked_imaging_7x7.apply_settings(
settings=aa.SettingsImaging(grid_class=aa.Grid2D, sub_size=2)
)
assert isinstance(masked_imaging_7x7.grid, aa.Grid2D)
assert (masked_imaging_7x7.grid.binned == grid_2d_7x7).all()
assert (masked_imaging_7x7.grid.slim == sub_grid_2d_7x7).all()
assert isinstance(masked_imaging_7x7.blurring_grid, aa.Grid2D)
assert (masked_imaging_7x7.blurring_grid.slim == blurring_grid_2d_7x7).all()
masked_imaging_7x7 = imaging_7x7.apply_mask(mask=sub_mask_2d_7x7)
masked_imaging_7x7 = masked_imaging_7x7.apply_settings(
settings=aa.SettingsImaging(grid_class=aa.Grid2DIterate)
)
assert isinstance(masked_imaging_7x7.grid, aa.Grid2DIterate)
assert (masked_imaging_7x7.grid.binned == grid_2d_iterate_7x7).all()
assert isinstance(masked_imaging_7x7.blurring_grid, aa.Grid2DIterate)
assert (masked_imaging_7x7.blurring_grid.slim == blurring_grid_2d_7x7).all()
masked_imaging_7x7 = imaging_7x7.apply_mask(mask=sub_mask_2d_7x7)
masked_imaging_7x7 = masked_imaging_7x7.apply_settings(
settings=aa.SettingsImaging(
grid_class=aa.Grid2DInterpolate, pixel_scales_interp=1.0, sub_size=2
)
)
grid = aa.Grid2DInterpolate.from_mask(
mask=sub_mask_2d_7x7, pixel_scales_interp=1.0
)
blurring_grid = grid.blurring_grid_from_kernel_shape(kernel_shape_native=(3, 3))
assert isinstance(masked_imaging_7x7.grid, aa.Grid2DInterpolate)
assert (masked_imaging_7x7.grid == grid).all()
assert (masked_imaging_7x7.grid.vtx == grid.vtx).all()
assert (masked_imaging_7x7.grid.wts == grid.wts).all()
assert isinstance(masked_imaging_7x7.blurring_grid, aa.Grid2DInterpolate)
assert (masked_imaging_7x7.blurring_grid == blurring_grid).all()
assert (masked_imaging_7x7.blurring_grid.vtx == blurring_grid.vtx).all()
assert (masked_imaging_7x7.blurring_grid.wts == blurring_grid.wts).all()
def test__psf_and_convolvers(self, imaging_7x7, sub_mask_2d_7x7):
masked_imaging_7x7 = imaging_7x7.apply_mask(mask=sub_mask_2d_7x7)
assert type(masked_imaging_7x7.psf) == aa.Kernel2D
assert type(masked_imaging_7x7.convolver) == aa.Convolver
def test__masked_imaging__uses_signal_to_noise_limit_and_radii(
self, imaging_7x7, mask_2d_7x7
):
masked_imaging_7x7 = imaging_7x7.apply_mask(mask=mask_2d_7x7)
masked_imaging_7x7 = masked_imaging_7x7.apply_settings(
settings=aa.SettingsImaging(grid_class=aa.Grid2D, signal_to_noise_limit=0.1)
)
imaging_snr_limit = imaging_7x7.signal_to_noise_limited_from(
signal_to_noise_limit=0.1
)
assert (
masked_imaging_7x7.image.native
== imaging_snr_limit.image.native * np.invert(mask_2d_7x7)
).all()
assert (
masked_imaging_7x7.noise_map.native
== imaging_snr_limit.noise_map.native * np.invert(mask_2d_7x7)
).all()
masked_imaging_7x7 = imaging_7x7.apply_mask(mask=mask_2d_7x7)
masked_imaging_7x7 = masked_imaging_7x7.apply_settings(
settings=aa.SettingsImaging(
grid_class=aa.Grid2D,
signal_to_noise_limit=0.1,
signal_to_noise_limit_radii=1.0,
)
)
assert (
masked_imaging_7x7.noise_map.native[3, 3]
!= imaging_7x7.noise_map.native[3, 3]
)
assert masked_imaging_7x7.noise_map.native[3, 3] == 10.0
assert masked_imaging_7x7.noise_map.native[3, 4] == 10.0
assert masked_imaging_7x7.noise_map.native[4, 4] == 2.0
def test__different_imaging_without_mock_objects__customize_constructor_inputs(
self,
):
psf = aa.Kernel2D.ones(shape_native=(7, 7), pixel_scales=3.0)
imaging = aa.Imaging(
image=aa.Array2D.ones(shape_native=(19, 19), pixel_scales=3.0),
psf=psf,
noise_map=aa.Array2D.full(
fill_value=2.0, shape_native=(19, 19), pixel_scales=3.0
),
)
mask = aa.Mask2D.unmasked(
shape_native=(19, 19), pixel_scales=1.0, invert=True, sub_size=8
)
mask[9, 9] = False
masked_imaging = imaging.apply_mask(mask=mask)
assert masked_imaging.psf.native == pytest.approx(
(1.0 / 49.0) * np.ones((7, 7)), 1.0e-4
)
assert masked_imaging.convolver.kernel.shape_native == (7, 7)
assert (masked_imaging.image == np.array([1.0])).all()
assert (masked_imaging.noise_map == np.array([2.0])).all()
def test__modified_image_and_noise_map(
self, image_7x7, noise_map_7x7, imaging_7x7, sub_mask_2d_7x7
):
masked_imaging_7x7 = imaging_7x7.apply_mask(mask=sub_mask_2d_7x7)
image_7x7[0] = 10.0
noise_map_7x7[0] = 11.0
noise_map_7x7[0] = 11.0
masked_imaging_7x7 = masked_imaging_7x7.modify_noise_map(
noise_map=noise_map_7x7
)
assert masked_imaging_7x7.noise_map[0] == 11.0
masked_imaging_7x7 = masked_imaging_7x7.modify_image_and_noise_map(
image=image_7x7, noise_map=noise_map_7x7
)
assert masked_imaging_7x7.image.slim[0] == 10.0
assert masked_imaging_7x7.image.native[0, 0] == 10.0
assert masked_imaging_7x7.noise_map.slim[0] == 11.0
assert masked_imaging_7x7.noise_map.native[0, 0] == 11.0
class TestSimulatorImaging:
def test__from_image__all_features_off(self):
image = aa.Array2D.manual_native(
array=np.array([[0.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 0.0]]),
pixel_scales=0.1,
)
simulator = aa.SimulatorImaging(exposure_time=1.0, add_poisson_noise=False)
imaging = simulator.from_image(image=image)
assert (
imaging.image.native
== np.array([[0.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 0.0]])
).all()
assert imaging.pixel_scales == (0.1, 0.1)
def test__from_image__noise_off___noise_map_is_noise_value(self):
image = aa.Array2D.manual_native(
array=np.array([[0.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 0.0]]),
pixel_scales=1.0,
)
simulator = aa.SimulatorImaging(
exposure_time=1.0,
add_poisson_noise=False,
noise_if_add_noise_false=0.2,
noise_seed=1,
)
imaging = simulator.from_image(image=image)
assert (
imaging.image.native
== np.array([[0.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 0.0]])
).all()
assert (imaging.noise_map.native == 0.2 * np.ones((3, 3))).all()
def test__from_image__psf_blurs_image_with_edge_trimming(self):
image = aa.Array2D.manual_native(
array=np.array([[0.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 0.0]]),
pixel_scales=1.0,
)
psf = aa.Kernel2D.manual_native(
array=np.array([[0.0, 1.0, 0.0], [1.0, 2.0, 1.0], [0.0, 1.0, 0.0]]),
pixel_scales=1.0,
)
simulator = aa.SimulatorImaging(
exposure_time=1.0, psf=psf, add_poisson_noise=False, normalize_psf=False
)
imaging = simulator.from_image(image=image)
assert (
imaging.image.native
== np.array([[0.0, 1.0, 0.0], [1.0, 2.0, 1.0], [0.0, 1.0, 0.0]])
).all()
def test__setup_with_noise(self):
image = aa.Array2D.manual_native(
array=np.array([[0.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 0.0]]),
pixel_scales=1.0,
)
simulator = aa.SimulatorImaging(
exposure_time=20.0, add_poisson_noise=True, noise_seed=1
)
imaging = simulator.from_image(image=image)
assert imaging.image.native == pytest.approx(
np.array([[0.0, 0.0, 0.0], [0.0, 1.05, 0.0], [0.0, 0.0, 0.0]]), 1e-2
)
# Because of the value is 1.05, the estimated Poisson noise_map_1d is:
# sqrt((1.05 * 20))/20 = 0.2291
assert imaging.noise_map.native == pytest.approx(
np.array([[0.0, 0.0, 0.0], [0.0, 0.2291, 0.0], [0.0, 0.0, 0.0]]), 1e-2
)
def test__from_image__background_sky_on__noise_on_so_background_adds_noise_to_image(
self,
):
image = aa.Array2D.manual_native(
array=np.array([[0.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 0.0]]),
pixel_scales=1.0,
)
simulator = aa.SimulatorImaging(
exposure_time=1.0,
background_sky_level=16.0,
add_poisson_noise=True,
noise_seed=1,
)
imaging = simulator.from_image(image=image)
assert (
imaging.image.native
== np.array([[1.0, 5.0, 4.0], [1.0, 2.0, 1.0], [5.0, 2.0, 7.0]])
).all()
assert imaging.noise_map.native[0, 0] == pytest.approx(4.12310, 1.0e-4)
def test__from_image__psf_and_noise__noise_added_to_blurred_image(self):
image = aa.Array2D.manual_native(
array=np.array([[0.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 0.0]]),
pixel_scales=1.0,
)
psf = aa.Kernel2D.manual_native(
array=np.array([[0.0, 1.0, 0.0], [1.0, 2.0, 1.0], [0.0, 1.0, 0.0]]),
pixel_scales=1.0,
)
simulator = aa.SimulatorImaging(
exposure_time=20.0,
psf=psf,
add_poisson_noise=True,
noise_seed=1,
normalize_psf=False,
)
imaging = simulator.from_image(image=image)
assert imaging.image.native == pytest.approx(
np.array([[0.0, 1.05, 0.0], [1.3, 2.35, 1.05], [0.0, 1.05, 0.0]]), 1e-2
)
```
#### File: test_autoarray/instruments/test_euclid.py
```python
import os
import numpy as np
import autoarray as aa
path = "{}/".format(os.path.dirname(os.path.realpath(__file__)))
class TestArray2DEuclid:
def test__euclid_array_for_four_quandrants__loads_data_and_dimensions(
self, euclid_data
):
euclid_array = aa.euclid.Array2DEuclid.top_left(array_electrons=euclid_data)
assert euclid_array.shape_native == (2086, 2128)
assert (euclid_array.native == np.zeros((2086, 2128))).all()
euclid_array = aa.euclid.Array2DEuclid.top_right(array_electrons=euclid_data)
assert euclid_array.shape_native == (2086, 2128)
assert (euclid_array.native == np.zeros((2086, 2128))).all()
euclid_array = aa.euclid.Array2DEuclid.bottom_left(array_electrons=euclid_data)
assert euclid_array.shape_native == (2086, 2128)
assert (euclid_array.native == np.zeros((2086, 2128))).all()
euclid_array = aa.euclid.Array2DEuclid.bottom_right(array_electrons=euclid_data)
assert euclid_array.shape_native == (2086, 2128)
assert (euclid_array.native == np.zeros((2086, 2128))).all()
class TestLayout2DEuclid:
def test__euclid_layout_for_four_quandrants__loads_data_and_dimensions(
self, euclid_data
):
layout = aa.euclid.Layout2DEuclid.top_left(
parallel_size=2086,
serial_size=2128,
serial_prescan_size=51,
serial_overscan_size=29,
parallel_overscan_size=20,
)
assert layout.original_roe_corner == (0, 0)
assert layout.shape_2d == (2086, 2128)
assert layout.parallel_overscan == (2066, 2086, 51, 2099)
assert layout.serial_prescan == (0, 2086, 0, 51)
assert layout.serial_overscan == (20, 2086, 2099, 2128)
layout = aa.euclid.Layout2DEuclid.top_left(
parallel_size=2086,
serial_size=2128,
serial_prescan_size=41,
serial_overscan_size=10,
parallel_overscan_size=15,
)
assert layout.original_roe_corner == (0, 0)
assert layout.shape_2d == (2086, 2128)
assert layout.parallel_overscan == (2071, 2086, 41, 2118)
assert layout.serial_prescan == (0, 2086, 0, 41)
assert layout.serial_overscan == (15, 2086, 2118, 2128)
layout = aa.euclid.Layout2DEuclid.top_right(
parallel_size=2086,
serial_size=2128,
serial_prescan_size=51,
serial_overscan_size=29,
parallel_overscan_size=20,
)
assert layout.original_roe_corner == (0, 1)
assert layout.shape_2d == (2086, 2128)
assert layout.parallel_overscan == (2066, 2086, 51, 2099)
assert layout.serial_prescan == (0, 2086, 0, 51)
assert layout.serial_overscan == (20, 2086, 2099, 2128)
layout = aa.euclid.Layout2DEuclid.top_right(
parallel_size=2086,
serial_size=2128,
serial_prescan_size=41,
serial_overscan_size=10,
parallel_overscan_size=15,
)
assert layout.original_roe_corner == (0, 1)
assert layout.shape_2d == (2086, 2128)
assert layout.parallel_overscan == (2071, 2086, 41, 2118)
assert layout.serial_prescan == (0, 2086, 0, 41)
assert layout.serial_overscan == (15, 2086, 2118, 2128)
layout = aa.euclid.Layout2DEuclid.bottom_left(
parallel_size=2086,
serial_size=2128,
serial_prescan_size=51,
serial_overscan_size=29,
parallel_overscan_size=20,
)
assert layout.original_roe_corner == (1, 0)
assert layout.shape_2d == (2086, 2128)
assert layout.parallel_overscan == (2066, 2086, 51, 2099)
assert layout.serial_prescan == (0, 2086, 0, 51)
assert layout.serial_overscan == (0, 2066, 2099, 2128)
layout = aa.euclid.Layout2DEuclid.bottom_left(
parallel_size=2086,
serial_size=2128,
serial_prescan_size=41,
serial_overscan_size=10,
parallel_overscan_size=15,
)
assert layout.original_roe_corner == (1, 0)
assert layout.shape_2d == (2086, 2128)
assert layout.parallel_overscan == (2071, 2086, 41, 2118)
assert layout.serial_prescan == (0, 2086, 0, 41)
assert layout.serial_overscan == (0, 2071, 2118, 2128)
layout = aa.euclid.Layout2DEuclid.bottom_right(
parallel_size=2086,
serial_size=2128,
serial_prescan_size=51,
serial_overscan_size=29,
parallel_overscan_size=20,
)
assert layout.original_roe_corner == (1, 1)
assert layout.shape_2d == (2086, 2128)
assert layout.parallel_overscan == (2066, 2086, 51, 2099)
assert layout.serial_prescan == (0, 2086, 0, 51)
assert layout.serial_overscan == (0, 2066, 2099, 2128)
layout = aa.euclid.Layout2DEuclid.bottom_right(
parallel_size=2086,
serial_size=2128,
serial_prescan_size=41,
serial_overscan_size=10,
parallel_overscan_size=15,
)
assert layout.original_roe_corner == (1, 1)
assert layout.shape_2d == (2086, 2128)
assert layout.parallel_overscan == (2071, 2086, 41, 2118)
assert layout.serial_prescan == (0, 2086, 0, 41)
assert layout.serial_overscan == (0, 2071, 2118, 2128)
def test__left_side__chooses_correct_layout_given_input(self, euclid_data):
layout = aa.euclid.Layout2DEuclid.from_ccd_and_quadrant_id(
ccd_id="text1", quadrant_id="E"
)
assert layout.original_roe_corner == (1, 0)
layout = aa.euclid.Layout2DEuclid.from_ccd_and_quadrant_id(
ccd_id="text2", quadrant_id="E"
)
assert layout.original_roe_corner == (1, 0)
layout = aa.euclid.Layout2DEuclid.from_ccd_and_quadrant_id(
ccd_id="text3", quadrant_id="E"
)
assert layout.original_roe_corner == (1, 0)
layout = aa.euclid.Layout2DEuclid.from_ccd_and_quadrant_id(
ccd_id="text1", quadrant_id="F"
)
assert layout.original_roe_corner == (1, 1)
layout = aa.euclid.Layout2DEuclid.from_ccd_and_quadrant_id(
ccd_id="text2", quadrant_id="F"
)
assert layout.original_roe_corner == (1, 1)
layout = aa.euclid.Layout2DEuclid.from_ccd_and_quadrant_id(
ccd_id="text3", quadrant_id="F"
)
assert layout.original_roe_corner == (1, 1)
layout = aa.euclid.Layout2DEuclid.from_ccd_and_quadrant_id(
ccd_id="text1", quadrant_id="G"
)
assert layout.original_roe_corner == (0, 1)
layout = aa.euclid.Layout2DEuclid.from_ccd_and_quadrant_id(
ccd_id="text2", quadrant_id="G"
)
assert layout.original_roe_corner == (0, 1)
layout = aa.euclid.Layout2DEuclid.from_ccd_and_quadrant_id(
ccd_id="text3", quadrant_id="G"
)
assert layout.original_roe_corner == (0, 1)
layout = aa.euclid.Layout2DEuclid.from_ccd_and_quadrant_id(
ccd_id="text1", quadrant_id="H"
)
assert layout.original_roe_corner == (0, 0)
layout = aa.euclid.Layout2DEuclid.from_ccd_and_quadrant_id(
ccd_id="text2", quadrant_id="H"
)
assert layout.original_roe_corner == (0, 0)
layout = aa.euclid.Layout2DEuclid.from_ccd_and_quadrant_id(
ccd_id="text3", quadrant_id="H"
)
assert layout.original_roe_corner == (0, 0)
def test__right_side__chooses_correct_layout_given_input(self, euclid_data):
layout = aa.euclid.Layout2DEuclid.from_ccd_and_quadrant_id(
ccd_id="text4", quadrant_id="E"
)
assert layout.original_roe_corner == (0, 1)
layout = aa.euclid.Layout2DEuclid.from_ccd_and_quadrant_id(
ccd_id="text5", quadrant_id="E"
)
assert layout.original_roe_corner == (0, 1)
layout = aa.euclid.Layout2DEuclid.from_ccd_and_quadrant_id(
ccd_id="text6", quadrant_id="E"
)
assert layout.original_roe_corner == (0, 1)
layout = aa.euclid.Layout2DEuclid.from_ccd_and_quadrant_id(
ccd_id="text4", quadrant_id="F"
)
assert layout.original_roe_corner == (0, 0)
layout = aa.euclid.Layout2DEuclid.from_ccd_and_quadrant_id(
ccd_id="text5", quadrant_id="F"
)
assert layout.original_roe_corner == (0, 0)
layout = aa.euclid.Layout2DEuclid.from_ccd_and_quadrant_id(
ccd_id="text6", quadrant_id="F"
)
assert layout.original_roe_corner == (0, 0)
layout = aa.euclid.Layout2DEuclid.from_ccd_and_quadrant_id(
ccd_id="text4", quadrant_id="G"
)
assert layout.original_roe_corner == (1, 0)
layout = aa.euclid.Layout2DEuclid.from_ccd_and_quadrant_id(
ccd_id="text5", quadrant_id="G"
)
assert layout.original_roe_corner == (1, 0)
layout = aa.euclid.Layout2DEuclid.from_ccd_and_quadrant_id(
ccd_id="text6", quadrant_id="G"
)
assert layout.original_roe_corner == (1, 0)
layout = aa.euclid.Layout2DEuclid.from_ccd_and_quadrant_id(
ccd_id="text4", quadrant_id="H"
)
assert layout.original_roe_corner == (1, 1)
layout = aa.euclid.Layout2DEuclid.from_ccd_and_quadrant_id(
ccd_id="text5", quadrant_id="H"
)
assert layout.original_roe_corner == (1, 1)
layout = aa.euclid.Layout2DEuclid.from_ccd_and_quadrant_id(
ccd_id="text6", quadrant_id="H"
)
assert layout.original_roe_corner == (1, 1)
```
#### File: test_autoarray/inversion/test_pixelizations.py
```python
import autoarray as aa
import numpy as np
class TestSettingsPixelization:
def test__settings_with_is_stochastic_true(self):
settings = aa.SettingsPixelization(is_stochastic=False)
settings = settings.settings_with_is_stochastic_true()
assert settings.is_stochastic is True
settings = aa.SettingsPixelization(is_stochastic=True)
settings = settings.settings_with_is_stochastic_true()
assert settings.is_stochastic is True
class TestRectangular:
def test__pixelization_grid_returns_none_as_not_used(self, sub_grid_2d_7x7):
pixelization = aa.pix.Rectangular(shape=(3, 3))
assert pixelization.sparse_grid_from_grid(grid=sub_grid_2d_7x7) == None
class TestVoronoiMagnification:
def test__number_of_pixels_setup_correct(self):
pixelization = aa.pix.VoronoiMagnification(shape=(3, 3))
assert pixelization.shape == (3, 3)
def test__pixelization_grid_returns_same_as_computed_from_grids_module(
self, sub_grid_2d_7x7
):
pixelization = aa.pix.VoronoiMagnification(shape=(3, 3))
sparse_grid = pixelization.sparse_grid_from_grid(grid=sub_grid_2d_7x7)
pixelization_grid = aa.Grid2DVoronoi(
grid=sparse_grid,
nearest_pixelization_index_for_slim_index=sparse_grid.sparse_index_for_slim_index,
)
assert (pixelization_grid == sparse_grid).all()
assert (
pixelization_grid.nearest_pixelization_index_for_slim_index
== sparse_grid.sparse_index_for_slim_index
).all()
class TestVoronoiBrightness:
def test__hyper_image_doesnt_use_min_and_max_weight_map_uses_floor_and_power(self):
hyper_image = np.array([0.0, 1.0, 0.0])
pixelization = aa.pix.VoronoiBrightnessImage(
pixels=5, weight_floor=0.0, weight_power=0.0
)
weight_map = pixelization.weight_map_from_hyper_image(hyper_image=hyper_image)
assert (weight_map == np.ones(3)).all()
pixelization = aa.pix.VoronoiBrightnessImage(
pixels=5, weight_floor=0.0, weight_power=1.0
)
weight_map = pixelization.weight_map_from_hyper_image(hyper_image=hyper_image)
assert (weight_map == np.array([0.0, 1.0, 0.0])).all()
pixelization = aa.pix.VoronoiBrightnessImage(
pixels=5, weight_floor=1.0, weight_power=1.0
)
weight_map = pixelization.weight_map_from_hyper_image(hyper_image=hyper_image)
assert (weight_map == np.array([1.0, 2.0, 1.0])).all()
pixelization = aa.pix.VoronoiBrightnessImage(
pixels=5, weight_floor=1.0, weight_power=2.0
)
weight_map = pixelization.weight_map_from_hyper_image(hyper_image=hyper_image)
assert (weight_map == np.array([1.0, 4.0, 1.0])).all()
def test__hyper_image_uses_min_and_max__weight_map_uses_floor_and_power(self):
hyper_image = np.array([-1.0, 1.0, 3.0])
pixelization = aa.pix.VoronoiBrightnessImage(
pixels=5, weight_floor=0.0, weight_power=1.0
)
weight_map = pixelization.weight_map_from_hyper_image(hyper_image=hyper_image)
assert (weight_map == np.array([0.0, 0.5, 1.0])).all()
pixelization = aa.pix.VoronoiBrightnessImage(
pixels=5, weight_floor=0.0, weight_power=2.0
)
weight_map = pixelization.weight_map_from_hyper_image(hyper_image=hyper_image)
assert (weight_map == np.array([0.0, 0.25, 1.0])).all()
pixelization = aa.pix.VoronoiBrightnessImage(
pixels=5, weight_floor=1.0, weight_power=1.0
)
weight_map = pixelization.weight_map_from_hyper_image(hyper_image=hyper_image)
assert (weight_map == np.array([3.0, 3.5, 4.0])).all()
def test__pixelization_grid_returns_same_as_computed_from_grids_module(
self, sub_grid_2d_7x7
):
pixelization = aa.pix.VoronoiBrightnessImage(
pixels=6, weight_floor=0.1, weight_power=2.0
)
hyper_image = np.array([0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0])
weight_map = pixelization.weight_map_from_hyper_image(hyper_image=hyper_image)
sparse_grid = aa.Grid2DSparse.from_total_pixels_grid_and_weight_map(
total_pixels=pixelization.pixels,
grid=sub_grid_2d_7x7,
weight_map=weight_map,
seed=1,
)
pixelization_grid = aa.Grid2DVoronoi(
grid=sparse_grid,
nearest_pixelization_index_for_slim_index=sparse_grid.sparse_index_for_slim_index,
)
assert (pixelization_grid == sparse_grid).all()
assert (
pixelization_grid.nearest_pixelization_index_for_slim_index
== sparse_grid.sparse_index_for_slim_index
).all()
class TestRegression:
def test__grid_is_relocated_via_border(self, sub_grid_2d_7x7):
pixelization = aa.pix.VoronoiMagnification(shape=(3, 3))
mask = aa.Mask2D.circular(
shape_native=(60, 60),
radius=1.0,
pixel_scales=(0.1, 0.1),
centre=(1.0, 1.0),
sub_size=1,
)
grid = aa.Grid2D.from_mask(mask=mask)
sparse_grid = pixelization.sparse_grid_from_grid(grid=grid)
grid[8, 0] = 100.0
mapper = pixelization.mapper_from_grid_and_sparse_grid(
grid=grid,
sparse_grid=sparse_grid,
settings=aa.SettingsPixelization(use_border=True),
)
assert grid[8, 0] != mapper.source_grid_slim[8, 0]
assert mapper.source_grid_slim[8, 0] < 5.0
grid[0, 0] = 0.0
sparse_grid[0, 0] = 100.0
mapper = pixelization.mapper_from_grid_and_sparse_grid(
grid=grid,
sparse_grid=sparse_grid,
settings=aa.SettingsPixelization(use_border=True),
)
assert sparse_grid[0, 0] != mapper.source_pixelization_grid[0, 0]
assert mapper.source_pixelization_grid[0, 0] < 5.0
```
#### File: test_autoarray/inversion/test_pixelization_util.py
```python
import autoarray as aa
import numpy as np
import scipy.spatial
class TestRectangular:
def test__rectangular_neighbors_from(self):
# I0I1I2I
# I3I4I5I
# I6I7I8I
(
pixel_neighbors,
pixel_neighbors_size,
) = aa.util.pixelization.rectangular_neighbors_from(shape_native=(3, 3))
assert (pixel_neighbors[0] == [1, 3, -1, -1]).all()
assert (pixel_neighbors[1] == [0, 2, 4, -1]).all()
assert (pixel_neighbors[2] == [1, 5, -1, -1]).all()
assert (pixel_neighbors[3] == [0, 4, 6, -1]).all()
assert (pixel_neighbors[4] == [1, 3, 5, 7]).all()
assert (pixel_neighbors[5] == [2, 4, 8, -1]).all()
assert (pixel_neighbors[6] == [3, 7, -1, -1]).all()
assert (pixel_neighbors[7] == [4, 6, 8, -1]).all()
assert (pixel_neighbors[8] == [5, 7, -1, -1]).all()
assert (pixel_neighbors_size == np.array([2, 3, 2, 3, 4, 3, 2, 3, 2])).all()
# I0I1I 2I 3I
# I4I5I 6I 7I
# I8I9I10I11I
(
pixel_neighbors,
pixel_neighbors_size,
) = aa.util.pixelization.rectangular_neighbors_from(shape_native=(3, 4))
assert (pixel_neighbors[0] == [1, 4, -1, -1]).all()
assert (pixel_neighbors[1] == [0, 2, 5, -1]).all()
assert (pixel_neighbors[2] == [1, 3, 6, -1]).all()
assert (pixel_neighbors[3] == [2, 7, -1, -1]).all()
assert (pixel_neighbors[4] == [0, 5, 8, -1]).all()
assert (pixel_neighbors[5] == [1, 4, 6, 9]).all()
assert (pixel_neighbors[6] == [2, 5, 7, 10]).all()
assert (pixel_neighbors[7] == [3, 6, 11, -1]).all()
assert (pixel_neighbors[8] == [4, 9, -1, -1]).all()
assert (pixel_neighbors[9] == [5, 8, 10, -1]).all()
assert (pixel_neighbors[10] == [6, 9, 11, -1]).all()
assert (pixel_neighbors[11] == [7, 10, -1, -1]).all()
assert (
pixel_neighbors_size == np.array([2, 3, 3, 2, 3, 4, 4, 3, 2, 3, 3, 2])
).all()
# I0I 1I 2I
# I3I 4I 5I
# I6I 7I 8I
# I9I10I11I
(
pixel_neighbors,
pixel_neighbors_size,
) = aa.util.pixelization.rectangular_neighbors_from(shape_native=(4, 3))
assert (pixel_neighbors[0] == [1, 3, -1, -1]).all()
assert (pixel_neighbors[1] == [0, 2, 4, -1]).all()
assert (pixel_neighbors[2] == [1, 5, -1, -1]).all()
assert (pixel_neighbors[3] == [0, 4, 6, -1]).all()
assert (pixel_neighbors[4] == [1, 3, 5, 7]).all()
assert (pixel_neighbors[5] == [2, 4, 8, -1]).all()
assert (pixel_neighbors[6] == [3, 7, 9, -1]).all()
assert (pixel_neighbors[7] == [4, 6, 8, 10]).all()
assert (pixel_neighbors[8] == [5, 7, 11, -1]).all()
assert (pixel_neighbors[9] == [6, 10, -1, -1]).all()
assert (pixel_neighbors[10] == [7, 9, 11, -1]).all()
assert (pixel_neighbors[11] == [8, 10, -1, -1]).all()
assert (
pixel_neighbors_size == np.array([2, 3, 2, 3, 4, 3, 3, 4, 3, 2, 3, 2])
).all()
# I0 I 1I 2I 3I
# I4 I 5I 6I 7I
# I8 I 9I10I11I
# I12I13I14I15I
(
pixel_neighbors,
pixel_neighbors_size,
) = aa.util.pixelization.rectangular_neighbors_from(shape_native=(4, 4))
assert (pixel_neighbors[0] == [1, 4, -1, -1]).all()
assert (pixel_neighbors[1] == [0, 2, 5, -1]).all()
assert (pixel_neighbors[2] == [1, 3, 6, -1]).all()
assert (pixel_neighbors[3] == [2, 7, -1, -1]).all()
assert (pixel_neighbors[4] == [0, 5, 8, -1]).all()
assert (pixel_neighbors[5] == [1, 4, 6, 9]).all()
assert (pixel_neighbors[6] == [2, 5, 7, 10]).all()
assert (pixel_neighbors[7] == [3, 6, 11, -1]).all()
assert (pixel_neighbors[8] == [4, 9, 12, -1]).all()
assert (pixel_neighbors[9] == [5, 8, 10, 13]).all()
assert (pixel_neighbors[10] == [6, 9, 11, 14]).all()
assert (pixel_neighbors[11] == [7, 10, 15, -1]).all()
assert (pixel_neighbors[12] == [8, 13, -1, -1]).all()
assert (pixel_neighbors[13] == [9, 12, 14, -1]).all()
assert (pixel_neighbors[14] == [10, 13, 15, -1]).all()
assert (pixel_neighbors[15] == [11, 14, -1, -1]).all()
assert (
pixel_neighbors_size
== np.array([2, 3, 3, 2, 3, 4, 4, 3, 3, 4, 4, 3, 2, 3, 3, 2])
).all()
class TestVoronoi:
def test__voronoi_neighbors_from(self):
points = np.array(
[[1.0, -1.0], [1.0, 1.0], [0.0, 0.0], [-1.0, -1.0], [-1.0, 1.0]]
)
voronoi = scipy.spatial.Voronoi(points, qhull_options="Qbb Qc Qx Qm")
(
pixel_neighbors,
pixel_neighbors_size,
) = aa.util.pixelization.voronoi_neighbors_from(
pixels=5, ridge_points=np.array(voronoi.ridge_points)
)
assert set(pixel_neighbors[0]) == {1, 2, 3, -1}
assert set(pixel_neighbors[1]) == {0, 2, 4, -1}
assert set(pixel_neighbors[2]) == {0, 1, 3, 4}
assert set(pixel_neighbors[3]) == {0, 2, 4, -1}
assert set(pixel_neighbors[4]) == {1, 2, 3, -1}
assert (pixel_neighbors_size == np.array([3, 3, 4, 3, 3])).all()
# 9 points in a square - makes a square (this is the example int he scipy documentaiton page)
points = np.array(
[
[2.0, 0.0],
[2.0, 1.0],
[2.0, 2.0],
[1.0, 0.0],
[1.0, 1.0],
[1.0, 2.0],
[0.0, 0.0],
[0.0, 1.0],
[0.0, 2.0],
]
)
voronoi = scipy.spatial.Voronoi(points, qhull_options="Qbb Qc Qx Qm")
(
pixel_neighbors,
pixel_neighbors_size,
) = aa.util.pixelization.voronoi_neighbors_from(
pixels=9, ridge_points=np.array(voronoi.ridge_points)
)
assert set(pixel_neighbors[0]) == {1, 3, -1, -1}
assert set(pixel_neighbors[1]) == {0, 2, 4, -1}
assert set(pixel_neighbors[2]) == {1, 5, -1, -1}
assert set(pixel_neighbors[3]) == {0, 4, 6, -1}
assert set(pixel_neighbors[4]) == {1, 3, 5, 7}
assert set(pixel_neighbors[5]) == {2, 4, 8, -1}
assert set(pixel_neighbors[6]) == {3, 7, -1, -1}
assert set(pixel_neighbors[7]) == {4, 6, 8, -1}
assert set(pixel_neighbors[8]) == {5, 7, -1, -1}
assert (pixel_neighbors_size == np.array([2, 3, 2, 3, 4, 3, 2, 3, 2])).all()
```
#### File: test_autoarray/layout/test_layout_util.py
```python
import numpy as np
import pytest
import autoarray as aa
class TestRotations:
def test__rotate_array__all_4_rotations_with_rotation_back(self):
arr = np.array([[0.0, 1.0, 0.0], [1.0, 2.0, 0.0], [0.0, 0.0, 0.0]])
arr_bl = aa.util.layout.rotate_array_from_roe_corner(
array=arr, roe_corner=(1, 0)
)
assert arr_bl == pytest.approx(
np.array([[0.0, 1.0, 0.0], [1.0, 2.0, 0.0], [0.0, 0.0, 0.0]]), 1.0e-4
)
arr_bl = aa.util.layout.rotate_array_from_roe_corner(
array=arr_bl, roe_corner=(1, 0)
)
assert arr_bl == pytest.approx(np.array(arr), 1.0e-4)
arr_br = aa.util.layout.rotate_array_from_roe_corner(
array=arr, roe_corner=(1, 1)
)
assert arr_br == pytest.approx(
np.array([[0.0, 1.0, 0.0], [0.0, 2.0, 1.0], [0.0, 0.0, 0.0]]), 1.0e-4
)
arr_br = aa.util.layout.rotate_array_from_roe_corner(
array=arr_br, roe_corner=(1, 1)
)
assert arr_br == pytest.approx(np.array(arr), 1.0e-4)
arr_tl = aa.util.layout.rotate_array_from_roe_corner(
array=arr, roe_corner=(0, 0)
)
assert arr_tl == pytest.approx(
np.array([[0.0, 0.0, 0.0], [1.0, 2.0, 0.0], [0.0, 1.0, 0.0]]), 1.0e-4
)
arr_tl = aa.util.layout.rotate_array_from_roe_corner(
array=arr_tl, roe_corner=(0, 0)
)
assert arr_tl == pytest.approx(np.array(arr), 1.0e-4)
arr_tr = aa.util.layout.rotate_array_from_roe_corner(
array=arr, roe_corner=(0, 1)
)
assert arr_tr == pytest.approx(
np.array([[0.0, 0.0, 0.0], [0.0, 2.0, 1.0], [0.0, 1.0, 0.0]]), 1.0e-4
)
arr_tr = aa.util.layout.rotate_array_from_roe_corner(
array=arr_tr, roe_corner=(0, 1)
)
assert arr_tr == pytest.approx(np.array(arr), 1.0e-4)
def test__rotate_region__all_4_rotations_with_rotation_back(self):
region = (0, 2, 1, 3)
shape_native = (8, 10)
region_bl = aa.util.layout.rotate_region_from_roe_corner(
region=region, shape_native=shape_native, roe_corner=(1, 0)
)
assert region_bl == (0, 2, 1, 3)
region_bl = aa.util.layout.rotate_region_from_roe_corner(
region=region_bl, shape_native=shape_native, roe_corner=(1, 0)
)
assert region_bl == (0, 2, 1, 3)
region_br = aa.util.layout.rotate_region_from_roe_corner(
region=region, shape_native=shape_native, roe_corner=(1, 1)
)
assert region_br == (0, 2, 7, 9)
region_br = aa.util.layout.rotate_region_from_roe_corner(
region=region_br, shape_native=shape_native, roe_corner=(1, 1)
)
assert region_br == (0, 2, 1, 3)
region_tl = aa.util.layout.rotate_region_from_roe_corner(
region=region, shape_native=shape_native, roe_corner=(0, 0)
)
assert region_tl == (6, 8, 1, 3)
region_tl = aa.util.layout.rotate_region_from_roe_corner(
region=region_tl, shape_native=shape_native, roe_corner=(0, 0)
)
assert region_tl == (0, 2, 1, 3)
region_tr = aa.util.layout.rotate_region_from_roe_corner(
region=region, shape_native=shape_native, roe_corner=(0, 1)
)
assert region_tr == (6, 8, 7, 9)
region_tr = aa.util.layout.rotate_region_from_roe_corner(
region=region_tr, shape_native=shape_native, roe_corner=(0, 1)
)
assert region_tr == (0, 2, 1, 3)
# def test__rotate_pattern_ci__all_4_rotations_with_rotation_back(self):
#
# pattern = pattern_ci.PatternCIUniform(
# regions=[(0, 1, 1, 2), (0, 2, 0, 2)], normalization=10.0
# )
#
# shape_native = (2, 2)
#
# pattern_bl = aa.util.layout.rotate_pattern_ci_from_roe_corner(
# pattern_ci=pattern, shape_native=shape_native, roe_corner=(1, 0)
# )
#
# assert pattern_bl.regions == [(0, 1, 1, 2), (0, 2, 0, 2)]
# assert pattern_bl.normalization == 10.0
#
# pattern_bl = aa.util.layout.rotate_pattern_ci_from_roe_corner(
# pattern_ci=pattern_bl, shape_native=shape_native, roe_corner=(1, 0)
# )
#
# assert pattern_bl.regions == [(0, 1, 1, 2), (0, 2, 0, 2)]
#
# pattern_br = aa.util.layout.rotate_pattern_ci_from_roe_corner(
# pattern_ci=pattern, shape_native=shape_native, roe_corner=(1, 1)
# )
#
# assert pattern_br.regions == [(0, 1, 0, 1), (0, 2, 0, 2)]
#
# pattern_br = aa.util.layout.rotate_pattern_ci_from_roe_corner(
# pattern_ci=pattern_br, shape_native=shape_native, roe_corner=(1, 1)
# )
#
# assert pattern_br.regions == [(0, 1, 1, 2), (0, 2, 0, 2)]
#
# pattern_tl = aa.util.layout.rotate_pattern_ci_from_roe_corner(
# pattern_ci=pattern, shape_native=shape_native, roe_corner=(0, 0)
# )
#
# assert pattern_tl.regions == [(1, 2, 1, 2), (0, 2, 0, 2)]
#
# pattern_tl = aa.util.layout.rotate_pattern_ci_from_roe_corner(
# pattern_ci=pattern_tl, shape_native=shape_native, roe_corner=(0, 0)
# )
#
# assert pattern_tl.regions == [(0, 1, 1, 2), (0, 2, 0, 2)]
#
# pattern_tr = aa.util.layout.rotate_pattern_ci_from_roe_corner(
# pattern_ci=pattern, shape_native=shape_native, roe_corner=(0, 1)
# )
#
# assert pattern_tr.regions == [(1, 2, 0, 1), (0, 2, 0, 2)]
#
# pattern_tr = aa.util.layout.rotate_pattern_ci_from_roe_corner(
# pattern_ci=pattern_tr, shape_native=shape_native, roe_corner=(0, 1)
# )
#
# assert pattern_tr.regions == [(0, 1, 1, 2), (0, 2, 0, 2)]
class TestRegionAfterExtraction:
def test__simple_test_cases(self):
region = aa.util.layout.region_after_extraction(
original_region=(2, 4, 2, 4), extraction_region=(0, 6, 0, 6)
)
assert region == (2, 4, 2, 4)
region = aa.util.layout.region_after_extraction(
original_region=(2, 4, 2, 4), extraction_region=(3, 5, 3, 5)
)
assert region == (0, 1, 0, 1)
region = aa.util.layout.region_after_extraction(
original_region=(2, 4, 2, 4), extraction_region=(2, 5, 2, 5)
)
assert region == (0, 2, 0, 2)
region = aa.util.layout.region_after_extraction(
original_region=(2, 4, 2, 4), extraction_region=(0, 3, 0, 3)
)
assert region == (2, 3, 2, 3)
def test__regions_do_not_overlap__returns_none(self):
region = aa.util.layout.region_after_extraction(
original_region=(2, 4, 2, 4), extraction_region=(0, 6, 0, 1)
)
assert region == None
region = aa.util.layout.region_after_extraction(
original_region=(2, 4, 2, 4), extraction_region=(0, 1, 0, 6)
)
assert region == None
region = aa.util.layout.region_after_extraction(
original_region=(2, 4, 2, 4), extraction_region=(0, 1, 0, 1)
)
assert region == None
region = aa.util.layout.region_after_extraction(
original_region=None, extraction_region=(0, 6, 0, 1)
)
assert region == None
class Testx0x1AfterExtraction:
def test__case_1__original_region_at_0__1d_extracted_region_is_fully_within_original_region(
self,
):
x0, x1 = aa.util.layout.x0x1_after_extraction(x0o=0, x1o=6, x0e=2, x1e=4)
assert x0 == 0
assert x1 == 2
x0, x1 = aa.util.layout.x0x1_after_extraction(x0o=0, x1o=6, x0e=3, x1e=5)
assert x0 == 0
assert x1 == 2
x0, x1 = aa.util.layout.x0x1_after_extraction(x0o=0, x1o=6, x0e=4, x1e=6)
assert x0 == 0
assert x1 == 2
x0, x1 = aa.util.layout.x0x1_after_extraction(x0o=0, x1o=6, x0e=5, x1e=6)
assert x0 == 0
assert x1 == 1
x0, x1 = aa.util.layout.x0x1_after_extraction(x0o=0, x1o=6, x0e=2, x1e=5)
assert x0 == 0
assert x1 == 3
def test__case_2__original_region_offset_from_0__1d_extracted_region_is_fully_within_original_region(
self,
):
x0, x1 = aa.util.layout.x0x1_after_extraction(x0o=2, x1o=6, x0e=2, x1e=4)
assert x0 == 0
assert x1 == 2
x0, x1 = aa.util.layout.x0x1_after_extraction(x0o=2, x1o=6, x0e=3, x1e=5)
assert x0 == 0
assert x1 == 2
x0, x1 = aa.util.layout.x0x1_after_extraction(x0o=2, x1o=6, x0e=4, x1e=6)
assert x0 == 0
assert x1 == 2
x0, x1 = aa.util.layout.x0x1_after_extraction(x0o=2, x1o=6, x0e=5, x1e=6)
assert x0 == 0
assert x1 == 1
x0, x1 = aa.util.layout.x0x1_after_extraction(x0o=2, x1o=6, x0e=2, x1e=5)
assert x0 == 0
assert x1 == 3
def test__case_3__original_region_offset_from_0__1d_extracted_region_partly_overlaps_to_left_original_region(
self,
):
x0, x1 = aa.util.layout.x0x1_after_extraction(x0o=2, x1o=6, x0e=1, x1e=3)
assert x0 == 1
assert x1 == 2
x0, x1 = aa.util.layout.x0x1_after_extraction(x0o=2, x1o=6, x0e=1, x1e=4)
assert x0 == 1
assert x1 == 3
x0, x1 = aa.util.layout.x0x1_after_extraction(x0o=2, x1o=6, x0e=0, x1e=3)
assert x0 == 2
assert x1 == 3
x0, x1 = aa.util.layout.x0x1_after_extraction(x0o=2, x1o=6, x0e=0, x1e=5)
assert x0 == 2
assert x1 == 5
def test__case_4__original_region_offset_from_0__1d_extracted_region_partly_overlaps_to_right_original_region(
self,
):
x0, x1 = aa.util.layout.x0x1_after_extraction(x0o=2, x1o=6, x0e=5, x1e=7)
assert x0 == 0
assert x1 == 1
x0, x1 = aa.util.layout.x0x1_after_extraction(x0o=2, x1o=6, x0e=5, x1e=8)
assert x0 == 0
assert x1 == 1
x0, x1 = aa.util.layout.x0x1_after_extraction(x0o=2, x1o=6, x0e=4, x1e=7)
assert x0 == 0
assert x1 == 2
x0, x1 = aa.util.layout.x0x1_after_extraction(x0o=2, x1o=6, x0e=2, x1e=8)
assert x0 == 0
assert x1 == 4
def test__case_5__extraction_region_over_full_original_region(self):
x0, x1 = aa.util.layout.x0x1_after_extraction(x0o=2, x1o=6, x0e=0, x1e=8)
assert x0 == 2
assert x1 == 6
x0, x1 = aa.util.layout.x0x1_after_extraction(x0o=2, x1o=6, x0e=0, x1e=7)
assert x0 == 2
assert x1 == 6
x0, x1 = aa.util.layout.x0x1_after_extraction(x0o=2, x1o=6, x0e=1, x1e=8)
assert x0 == 1
assert x1 == 5
def test__case_6__extraction_region_misses_original_region(self):
x0, x1 = aa.util.layout.x0x1_after_extraction(x0o=2, x1o=6, x0e=7, x1e=8)
assert x0 == None
assert x1 == None
x0, x1 = aa.util.layout.x0x1_after_extraction(x0o=2, x1o=6, x0e=6, x1e=8)
assert x0 == None
assert x1 == None
x0, x1 = aa.util.layout.x0x1_after_extraction(x0o=2, x1o=6, x0e=0, x1e=1)
assert x0 == None
assert x1 == None
x0, x1 = aa.util.layout.x0x1_after_extraction(x0o=2, x1o=6, x0e=0, x1e=2)
assert x0 == None
assert x1 == None
```
#### File: test_autoarray/mask/test_mask_1d_util.py
```python
from autoarray import exc
from autoarray import util
import numpy as np
import pytest
class TestMask1D:
def test__total_image_pixels_1d_from(self):
mask_1d = np.array([False, True, False, False, False, True])
assert util.mask_1d.total_pixels_1d_from(mask_1d=mask_1d) == 4
def test__total_sub_pixels_1d_from(self):
mask_1d = np.array([False, True, False, False, False, True])
assert util.mask_1d.total_sub_pixels_1d_from(mask_1d=mask_1d, sub_size=2) == 8
def test__sub_native_index_for_sub_slim_index_1d_from(self):
mask_1d = np.array([False, False, False, False])
sub_native_index_for_sub_slim_index_1d = util.mask_1d.native_index_for_slim_index_1d_from(
mask_1d=mask_1d, sub_size=1
)
assert (sub_native_index_for_sub_slim_index_1d == np.array([0, 1, 2, 3])).all()
mask_1d = np.array([False, False, True, False, False])
sub_native_index_for_sub_slim_index_1d = util.mask_1d.native_index_for_slim_index_1d_from(
mask_1d=mask_1d, sub_size=1
)
assert (sub_native_index_for_sub_slim_index_1d == np.array([0, 1, 3, 4])).all()
mask_1d = np.array([True, False, False, True, False, False])
sub_native_index_for_sub_slim_index_1d = util.mask_1d.native_index_for_slim_index_1d_from(
mask_1d=mask_1d, sub_size=1
)
assert (sub_native_index_for_sub_slim_index_1d == np.array([1, 2, 4, 5])).all()
mask_1d = np.array([True, False, False, True, False, False])
sub_native_index_for_sub_slim_index_1d = util.mask_1d.native_index_for_slim_index_1d_from(
mask_1d=mask_1d, sub_size=2
)
assert (
sub_native_index_for_sub_slim_index_1d
== np.array([2, 3, 4, 5, 8, 9, 10, 11])
).all()
```
#### File: plot/mat_wrap/test_visuals.py
```python
import autoarray.plot as aplt
class TestAbstractVisuals:
def test__add_visuals_together__replaces_nones(self):
visuals_1 = aplt.Visuals2D(mask=1)
visuals_0 = aplt.Visuals2D(border=10)
visuals = visuals_0 + visuals_1
assert visuals.mask == 1
assert visuals.border == 10
assert visuals_1.mask == 1
assert visuals_1.border == 10
assert visuals_0.border == 10
assert visuals_0.mask == None
visuals_0 = aplt.Visuals2D(mask=1)
visuals_1 = aplt.Visuals2D(mask=2)
visuals = visuals_1 + visuals_0
assert visuals.mask == 1
assert visuals.border == None
assert visuals_1.mask == 2
```
#### File: test_autoarray/plot/test_fit_imaging_plotters.py
```python
import autoarray as aa
import autoarray.plot as aplt
import pytest
from os import path
directory = path.dirname(path.realpath(__file__))
@pytest.fixture(name="plot_path")
def make_plot_path_setup():
return path.join(
"{}".format(path.dirname(path.realpath(__file__))),
"files",
"plots",
"fit_imaging",
)
def test__visuals_in_constructor_use_imaging_and_include(fit_imaging_7x7):
visuals_2d = aplt.Visuals2D(origin=(1.0, 1.0), vector_field=2)
include = aplt.Include2D(origin=True, mask=True, border=True)
fit_imaging_plotter = aplt.FitImagingPlotter(
fit=fit_imaging_7x7, visuals_2d=visuals_2d, include_2d=include
)
assert fit_imaging_plotter.visuals_2d.origin == (1.0, 1.0)
assert fit_imaging_plotter.visuals_with_include_2d.origin == (1.0, 1.0)
assert (
fit_imaging_plotter.visuals_with_include_2d.mask == fit_imaging_7x7.image.mask
).all()
assert (
fit_imaging_plotter.visuals_with_include_2d.border
== fit_imaging_7x7.image.mask.border_grid_sub_1.binned
).all()
assert fit_imaging_plotter.visuals_with_include_2d.vector_field == 2
include = aplt.Include2D(origin=False, mask=False, border=False)
fit_imaging_plotter = aplt.FitImagingPlotter(
fit=fit_imaging_7x7, visuals_2d=visuals_2d, include_2d=include
)
assert fit_imaging_plotter.visuals_with_include_2d.origin == (1.0, 1.0)
assert fit_imaging_plotter.visuals_with_include_2d.mask == None
assert fit_imaging_plotter.visuals_with_include_2d.border == None
assert fit_imaging_plotter.visuals_with_include_2d.vector_field == 2
def test__fit_quantities_are_output(fit_imaging_7x7, plot_path, plot_patch):
fit_imaging_plotter = aplt.FitImagingPlotter(
fit=fit_imaging_7x7,
include_2d=aplt.Include2D(origin=True, mask=True, border=True),
mat_plot_2d=aplt.MatPlot2D(output=aplt.Output(path=plot_path, format="png")),
)
fit_imaging_plotter.figures_2d(
image=True,
noise_map=True,
signal_to_noise_map=True,
model_image=True,
residual_map=True,
normalized_residual_map=True,
chi_squared_map=True,
)
assert path.join(plot_path, "image_2d.png") in plot_patch.paths
assert path.join(plot_path, "noise_map.png") in plot_patch.paths
assert path.join(plot_path, "signal_to_noise_map.png") in plot_patch.paths
assert path.join(plot_path, "model_image.png") in plot_patch.paths
assert path.join(plot_path, "residual_map.png") in plot_patch.paths
assert path.join(plot_path, "normalized_residual_map.png") in plot_patch.paths
assert path.join(plot_path, "chi_squared_map.png") in plot_patch.paths
plot_patch.paths = []
fit_imaging_plotter.figures_2d(
image=True,
noise_map=False,
signal_to_noise_map=False,
model_image=True,
chi_squared_map=True,
)
assert path.join(plot_path, "image_2d.png") in plot_patch.paths
assert path.join(plot_path, "noise_map.png") not in plot_patch.paths
assert path.join(plot_path, "signal_to_noise_map.png") not in plot_patch.paths
assert path.join(plot_path, "model_image.png") in plot_patch.paths
assert path.join(plot_path, "residual_map.png") not in plot_patch.paths
assert path.join(plot_path, "normalized_residual_map.png") not in plot_patch.paths
assert path.join(plot_path, "chi_squared_map.png") in plot_patch.paths
def test__fit_sub_plot(fit_imaging_7x7, plot_path, plot_patch):
fit_imaging_plotter = aplt.FitImagingPlotter(
fit=fit_imaging_7x7,
include_2d=aplt.Include2D(origin=True, mask=True, border=True),
mat_plot_2d=aplt.MatPlot2D(output=aplt.Output(path=plot_path, format="png")),
)
fit_imaging_plotter.subplot_fit_imaging()
assert path.join(plot_path, "subplot_fit_imaging.png") in plot_patch.paths
def test__output_as_fits__correct_output_format(
fit_imaging_7x7, grid_2d_irregular_7x7_list, mask_2d_7x7, plot_path, plot_patch
):
fit_imaging_plotter = aplt.FitImagingPlotter(
fit=fit_imaging_7x7,
include_2d=aplt.Include2D(origin=True, mask=True, border=True),
mat_plot_2d=aplt.MatPlot2D(output=aplt.Output(path=plot_path, format="fits")),
)
fit_imaging_plotter.figures_2d(image=True)
image_from_plot = aa.util.array_2d.numpy_array_2d_from_fits(
file_path=path.join(plot_path, "image.fits"), hdu=0
)
assert image_from_plot.shape == (5, 5)
```
#### File: test_autoarray/plot/test_interferometer_plotters.py
```python
from os import path
import pytest
import autoarray.plot as aplt
directory = path.dirname(path.realpath(__file__))
@pytest.fixture(name="plot_path")
def make_plot_path_setup():
return path.join(
"{}".format(path.dirname(path.realpath(__file__))),
"files",
"plots",
"interferometer",
)
def test__individual_attributes_are_output(interferometer_7, plot_path, plot_patch):
interferometer_plotter = aplt.InterferometerPlotter(
interferometer=interferometer_7,
mat_plot_1d=aplt.MatPlot1D(output=aplt.Output(path=plot_path, format="png")),
mat_plot_2d=aplt.MatPlot2D(output=aplt.Output(path=plot_path, format="png")),
)
interferometer_plotter.figures_2d(
visibilities=True,
noise_map=True,
u_wavelengths=True,
v_wavelengths=True,
uv_wavelengths=True,
amplitudes_vs_uv_distances=True,
phases_vs_uv_distances=True,
dirty_image=True,
dirty_noise_map=True,
dirty_signal_to_noise_map=True,
dirty_inverse_noise_map=True,
)
assert path.join(plot_path, "visibilities.png") in plot_patch.paths
assert path.join(plot_path, "noise_map.png") in plot_patch.paths
assert path.join(plot_path, "u_wavelengths.png") in plot_patch.paths
assert path.join(plot_path, "v_wavelengths.png") in plot_patch.paths
assert path.join(plot_path, "uv_wavelengths.png") in plot_patch.paths
assert path.join(plot_path, "amplitudes_vs_uv_distances.png") in plot_patch.paths
assert path.join(plot_path, "phases_vs_uv_distances.png") in plot_patch.paths
assert path.join(plot_path, "dirty_image_2d.png") in plot_patch.paths
assert path.join(plot_path, "dirty_noise_map_2d.png") in plot_patch.paths
assert path.join(plot_path, "dirty_signal_to_noise_map_2d.png") in plot_patch.paths
assert path.join(plot_path, "dirty_inverse_noise_map_2d.png") in plot_patch.paths
plot_patch.paths = []
interferometer_plotter.figures_2d(
visibilities=True,
u_wavelengths=False,
v_wavelengths=True,
amplitudes_vs_uv_distances=True,
)
assert path.join(plot_path, "visibilities.png") in plot_patch.paths
assert not path.join(plot_path, "u_wavelengths.png") in plot_patch.paths
assert path.join(plot_path, "v_wavelengths.png") in plot_patch.paths
assert path.join(plot_path, "amplitudes_vs_uv_distances.png") in plot_patch.paths
assert path.join(plot_path, "phases_vs_uv_distances.png") not in plot_patch.paths
def test__subplots_are_output(interferometer_7, plot_path, plot_patch):
interferometer_plotter = aplt.InterferometerPlotter(
interferometer=interferometer_7,
mat_plot_1d=aplt.MatPlot1D(output=aplt.Output(path=plot_path, format="png")),
mat_plot_2d=aplt.MatPlot2D(output=aplt.Output(path=plot_path, format="png")),
)
interferometer_plotter.subplot_interferometer()
assert path.join(plot_path, "subplot_interferometer.png") in plot_patch.paths
interferometer_plotter.subplot_dirty_images()
assert path.join(plot_path, "subplot_dirty_images.png") in plot_patch.paths
```
#### File: test_autoarray/plot/test_structure_plotters.py
```python
import autoarray as aa
import autoarray.plot as aplt
from os import path
import pytest
import numpy as np
import shutil
directory = path.dirname(path.realpath(__file__))
@pytest.fixture(name="plot_path")
def make_plot_path_setup():
return path.join(
"{}".format(path.dirname(path.realpath(__file__))), "files", "structures"
)
class TestYX1DPlotter:
def test__plot_yx_line__works_with_all_extras_included(self, plot_path, plot_patch):
visuals_1d = aplt.Visuals1D(vertical_line=1.0)
mat_plot_1d = aplt.MatPlot1D(
yx_plot=aplt.YXPlot(plot_axis_type="loglog", c="k"),
vertical_line_axvline=aplt.AXVLine(c="k"),
output=aplt.Output(path=plot_path, filename="yx_1", format="png"),
)
yx_1d_plotter = aplt.YX1DPlotter(
y=np.array([1.0, 2.0, 3.0]),
x=np.array([0.5, 1.0, 1.5]),
mat_plot_1d=mat_plot_1d,
visuals_1d=visuals_1d,
)
yx_1d_plotter.figure_1d()
assert path.join(plot_path, "yx_1.png") in plot_patch.paths
class TestArray2DPlotter:
def test___visuals_in_constructor_use_array_and_include(self, array_2d_7x7):
visuals_2d = aplt.Visuals2D(origin=(1.0, 1.0), vector_field=2)
include = aplt.Include2D(origin=True, mask=True, border=True)
array_plotter = aplt.Array2DPlotter(
array=array_2d_7x7, visuals_2d=visuals_2d, include_2d=include
)
assert array_plotter.visuals_2d.origin == (1.0, 1.0)
assert array_plotter.visuals_with_include_2d.origin == (1.0, 1.0)
assert array_plotter.visuals_2d.mask == None
assert (array_plotter.visuals_with_include_2d.mask == array_2d_7x7.mask).all()
assert array_plotter.visuals_2d.border == None
assert (
array_plotter.visuals_with_include_2d.border
== array_2d_7x7.mask.border_grid_sub_1.binned
).all()
assert array_plotter.visuals_2d.vector_field == 2
assert array_plotter.visuals_with_include_2d.vector_field == 2
include = aplt.Include2D(origin=False, mask=False, border=False)
array_plotter = aplt.Array2DPlotter(
array=array_2d_7x7, visuals_2d=visuals_2d, include_2d=include
)
assert array_plotter.visuals_with_include_2d.origin == (1.0, 1.0)
assert array_plotter.visuals_with_include_2d.mask == None
assert array_plotter.visuals_with_include_2d.border == None
assert array_plotter.visuals_with_include_2d.vector_field == 2
def test__works_with_all_extras_included(
self,
array_2d_7x7,
mask_2d_7x7,
grid_2d_7x7,
grid_2d_irregular_7x7_list,
plot_path,
plot_patch,
):
array_plotter = aplt.Array2DPlotter(
array=array_2d_7x7,
mat_plot_2d=aplt.MatPlot2D(
output=aplt.Output(path=plot_path, filename="array1", format="png")
),
)
array_plotter.figure_2d()
assert path.join(plot_path, "array1.png") in plot_patch.paths
array_plotter = aplt.Array2DPlotter(
array=array_2d_7x7,
include_2d=aplt.Include2D(origin=True, mask=True, border=True),
mat_plot_2d=aplt.MatPlot2D(
output=aplt.Output(path=plot_path, filename="array2", format="png")
),
)
array_plotter.figure_2d()
assert path.join(plot_path, "array2.png") in plot_patch.paths
visuals_2d = aplt.Visuals2D(
origin=grid_2d_irregular_7x7_list,
mask=mask_2d_7x7,
border=mask_2d_7x7.border_grid_sub_1.binned,
grid=grid_2d_7x7,
positions=grid_2d_irregular_7x7_list,
# lines=grid_2d_irregular_7x7_list,
array_overlay=array_2d_7x7,
)
array_plotter = aplt.Array2DPlotter(
array=array_2d_7x7,
visuals_2d=visuals_2d,
mat_plot_2d=aplt.MatPlot2D(
output=aplt.Output(path=plot_path, filename="array3", format="png")
),
)
array_plotter.figure_2d()
assert path.join(plot_path, "array3.png") in plot_patch.paths
def test__fits_files_output_correctly(self, array_2d_7x7, plot_path):
plot_path = path.join(plot_path, "fits")
array_plotter = aplt.Array2DPlotter(
array=array_2d_7x7,
mat_plot_2d=aplt.MatPlot2D(
output=aplt.Output(path=plot_path, filename="array", format="fits")
),
)
if path.exists(plot_path):
shutil.rmtree(plot_path)
array_plotter.figure_2d()
arr = aa.util.array_2d.numpy_array_2d_from_fits(
file_path=path.join(plot_path, "array.fits"), hdu=0
)
assert (arr == array_2d_7x7.native).all()
class TestGrid2DPlotter:
def test___visuals_in_constructor_use_grid_and_include(self, grid_2d_7x7):
visuals_2d = aplt.Visuals2D(origin=(1.0, 1.0), vector_field=2)
include = aplt.Include2D(origin=True)
grid_plotter = aplt.Grid2DPlotter(
grid=grid_2d_7x7, visuals_2d=visuals_2d, include_2d=include
)
assert grid_plotter.visuals_2d.origin == (1.0, 1.0)
assert grid_plotter.visuals_with_include_2d.origin == (1.0, 1.0)
include = aplt.Include2D(origin=False)
grid_plotter = aplt.Grid2DPlotter(
grid=grid_2d_7x7, visuals_2d=visuals_2d, include_2d=include
)
assert grid_plotter.visuals_with_include_2d.origin == (1.0, 1.0)
assert grid_plotter.visuals_with_include_2d.vector_field == 2
def test__works_with_all_extras_included(
self,
array_2d_7x7,
grid_2d_7x7,
mask_2d_7x7,
grid_2d_irregular_7x7_list,
plot_path,
plot_patch,
):
grid_plotter = aplt.Grid2DPlotter(
grid=grid_2d_7x7,
visuals_2d=aplt.Visuals2D(indexes=[0, 1, 2]),
mat_plot_2d=aplt.MatPlot2D(
output=aplt.Output(path=plot_path, filename="grid1", format="png")
),
)
color_array = np.linspace(start=0.0, stop=1.0, num=grid_2d_7x7.shape_slim)
grid_plotter.figure_2d(color_array=color_array)
assert path.join(plot_path, "grid1.png") in plot_patch.paths
grid_plotter = aplt.Grid2DPlotter(
grid=grid_2d_7x7,
visuals_2d=aplt.Visuals2D(indexes=[0, 1, 2]),
mat_plot_2d=aplt.MatPlot2D(
output=aplt.Output(path=plot_path, filename="grid2", format="png")
),
include_2d=aplt.Include2D(origin=True, mask=True, border=True),
)
grid_plotter.figure_2d(color_array=color_array)
assert path.join(plot_path, "grid2.png") in plot_patch.paths
visuals_2d = aplt.Visuals2D(
origin=grid_2d_irregular_7x7_list,
mask=mask_2d_7x7,
border=mask_2d_7x7.border_grid_sub_1.binned,
grid=grid_2d_7x7,
positions=grid_2d_irregular_7x7_list,
lines=grid_2d_irregular_7x7_list,
array_overlay=array_2d_7x7,
indexes=[0, 1, 2],
)
grid_plotter = aplt.Grid2DPlotter(
grid=grid_2d_7x7,
mat_plot_2d=aplt.MatPlot2D(
output=aplt.Output(path=plot_path, filename="grid3", format="png")
),
visuals_2d=visuals_2d,
)
grid_plotter.figure_2d(color_array=color_array)
assert path.join(plot_path, "grid3.png") in plot_patch.paths
class TestMapperPlotter:
def test__visuals_for_data_from_rectangular_mapper(
self, rectangular_mapper_7x7_3x3
):
include = aplt.Include2D(
origin=True, mask=True, mapper_data_pixelization_grid=True, border=True
)
mapper_plotter = aplt.MapperPlotter(
mapper=rectangular_mapper_7x7_3x3, include_2d=include
)
assert mapper_plotter.visuals_data_with_include_2d.origin.in_list == [
(0.0, 0.0)
]
assert (
mapper_plotter.visuals_data_with_include_2d.mask
== rectangular_mapper_7x7_3x3.source_grid_slim.mask
).all()
assert mapper_plotter.visuals_data_with_include_2d.grid == None
# assert visuals.border == (0, 2)
include = aplt.Include2D(
origin=False, mask=False, mapper_data_pixelization_grid=False, border=False
)
mapper_plotter = aplt.MapperPlotter(
mapper=rectangular_mapper_7x7_3x3, include_2d=include
)
assert mapper_plotter.visuals_data_with_include_2d.origin == None
assert mapper_plotter.visuals_data_with_include_2d.mask == None
assert mapper_plotter.visuals_data_with_include_2d.grid == None
assert mapper_plotter.visuals_data_with_include_2d.border == None
def test__visuals_for_data_from_voronoi_mapper(self, voronoi_mapper_9_3x3):
include = aplt.Include2D(
origin=True, mask=True, mapper_data_pixelization_grid=True, border=True
)
mapper_plotter = aplt.MapperPlotter(
mapper=voronoi_mapper_9_3x3, include_2d=include
)
assert mapper_plotter.visuals_data_with_include_2d.origin.in_list == [
(0.0, 0.0)
]
assert (
mapper_plotter.visuals_data_with_include_2d.mask
== voronoi_mapper_9_3x3.source_grid_slim.mask
).all()
assert (
mapper_plotter.visuals_data_with_include_2d.pixelization_grid
== aa.Grid2D.uniform(shape_native=(2, 2), pixel_scales=0.1)
).all()
# assert visuals.border.shape == (0, 2)
include = aplt.Include2D(
origin=False, mask=False, mapper_data_pixelization_grid=False, border=False
)
mapper_plotter = aplt.MapperPlotter(
mapper=voronoi_mapper_9_3x3, include_2d=include
)
assert mapper_plotter.visuals_data_with_include_2d.origin == None
assert mapper_plotter.visuals_data_with_include_2d.mask == None
assert mapper_plotter.visuals_data_with_include_2d.grid == None
assert mapper_plotter.visuals_data_with_include_2d.pixelization_grid == None
assert mapper_plotter.visuals_data_with_include_2d.border == None
def test__visuals_for_source_from_rectangular_mapper(
self, rectangular_mapper_7x7_3x3
):
include = aplt.Include2D(
origin=True,
mapper_source_grid_slim=True,
mapper_source_pixelization_grid=True,
border=True,
)
mapper_plotter = aplt.MapperPlotter(
mapper=rectangular_mapper_7x7_3x3, include_2d=include
)
assert mapper_plotter.visuals_2d.origin == None
assert mapper_plotter.visuals_source_with_include_2d.origin.in_list == [
(0.0, 0.0)
]
assert (
mapper_plotter.visuals_source_with_include_2d.grid
== rectangular_mapper_7x7_3x3.source_grid_slim
).all()
assert (
mapper_plotter.visuals_source_with_include_2d.pixelization_grid
== rectangular_mapper_7x7_3x3.source_pixelization_grid
).all()
assert (
mapper_plotter.visuals_source_with_include_2d.border
== rectangular_mapper_7x7_3x3.source_grid_slim.sub_border_grid
).all()
include = aplt.Include2D(
origin=False,
border=False,
mapper_source_grid_slim=False,
mapper_source_pixelization_grid=False,
)
mapper_plotter = aplt.MapperPlotter(
mapper=rectangular_mapper_7x7_3x3, include_2d=include
)
assert mapper_plotter.visuals_source_with_include_2d.origin == None
assert mapper_plotter.visuals_source_with_include_2d.grid == None
assert mapper_plotter.visuals_source_with_include_2d.pixelization_grid == None
assert mapper_plotter.visuals_source_with_include_2d.border == None
def test__visuals_for_source_from_voronoi_mapper(self, voronoi_mapper_9_3x3):
include = aplt.Include2D(
origin=True,
border=True,
mapper_source_grid_slim=True,
mapper_source_pixelization_grid=True,
)
mapper_plotter = aplt.MapperPlotter(
mapper=voronoi_mapper_9_3x3, include_2d=include
)
assert mapper_plotter.visuals_2d.origin == None
assert mapper_plotter.visuals_source_with_include_2d.origin.in_list == [
(0.0, 0.0)
]
assert (
mapper_plotter.visuals_source_with_include_2d.grid
== voronoi_mapper_9_3x3.source_grid_slim
).all()
assert (
mapper_plotter.visuals_source_with_include_2d.pixelization_grid
== voronoi_mapper_9_3x3.source_pixelization_grid
).all()
assert (
mapper_plotter.visuals_source_with_include_2d.border
== voronoi_mapper_9_3x3.source_grid_slim.sub_border_grid
).all()
include = aplt.Include2D(
origin=False, border=False, mapper_source_pixelization_grid=False
)
mapper_plotter = aplt.MapperPlotter(
mapper=voronoi_mapper_9_3x3, include_2d=include
)
assert mapper_plotter.visuals_source_with_include_2d.origin == None
assert mapper_plotter.visuals_source_with_include_2d.grid == None
assert mapper_plotter.visuals_source_with_include_2d.border == None
def test__plot_rectangular_mapper__works_with_all_extras_included(
self, rectangular_mapper_7x7_3x3, plot_path, plot_patch
):
mapper_plotter = aplt.MapperPlotter(
mapper=rectangular_mapper_7x7_3x3,
visuals_2d=aplt.Visuals2D(
indexes=[[(0, 0), (0, 1)], [(1, 2)]], pixelization_indexes=[[0, 1], [2]]
),
mat_plot_2d=aplt.MatPlot2D(
output=aplt.Output(path=plot_path, filename="mapper1", format="png")
),
)
mapper_plotter.figure_2d()
assert path.join(plot_path, "mapper1.png") in plot_patch.paths
mapper_plotter = aplt.MapperPlotter(
mapper=rectangular_mapper_7x7_3x3,
visuals_2d=aplt.Visuals2D(
indexes=[[(0, 0), (0, 1)], [(1, 2)]], pixelization_indexes=[[0, 1], [2]]
),
mat_plot_2d=aplt.MatPlot2D(
output=aplt.Output(path=plot_path, filename="mapper2", format="png")
),
)
mapper_plotter.figure_2d()
assert path.join(plot_path, "mapper2.png") in plot_patch.paths
mapper_plotter = aplt.MapperPlotter(
mapper=rectangular_mapper_7x7_3x3,
visuals_2d=aplt.Visuals2D(
indexes=[[(0, 0), (0, 1)], [(1, 2)]], pixelization_indexes=[[0, 1], [2]]
),
mat_plot_2d=aplt.MatPlot2D(
output=aplt.Output(path=plot_path, filename="mapper3", format="png")
),
include_2d=aplt.Include2D(
origin=True, mapper_source_pixelization_grid=True
),
)
mapper_plotter.figure_2d()
assert path.join(plot_path, "mapper3.png") in plot_patch.paths
def test__plot_voronoi_mapper__works_with_all_extras_included(
self, voronoi_mapper_9_3x3, plot_path, plot_patch
):
mapper_plotter = aplt.MapperPlotter(
mapper=voronoi_mapper_9_3x3,
visuals_2d=aplt.Visuals2D(
indexes=[[(0, 0), (0, 1)], [(1, 2)]], pixelization_indexes=[[0, 1], [2]]
),
mat_plot_2d=aplt.MatPlot2D(
output=aplt.Output(path=plot_path, filename="mapper1", format="png")
),
)
mapper_plotter.figure_2d()
assert path.join(plot_path, "mapper1.png") in plot_patch.paths
mapper_plotter = aplt.MapperPlotter(
visuals_2d=aplt.Visuals2D(
indexes=[[(0, 0), (0, 1)], [(1, 2)]], pixelization_indexes=[[0, 1], [2]]
),
mapper=voronoi_mapper_9_3x3,
mat_plot_2d=aplt.MatPlot2D(
output=aplt.Output(path=plot_path, filename="mapper2", format="png")
),
)
mapper_plotter.figure_2d()
assert path.join(plot_path, "mapper2.png") in plot_patch.paths
mapper_plotter = aplt.MapperPlotter(
visuals_2d=aplt.Visuals2D(
indexes=[[(0, 0), (0, 1)], [(1, 2)]], pixelization_indexes=[[0, 1], [2]]
),
mapper=voronoi_mapper_9_3x3,
mat_plot_2d=aplt.MatPlot2D(
output=aplt.Output(path=plot_path, filename="mapper3", format="png")
),
)
mapper_plotter.figure_2d()
assert path.join(plot_path, "mapper3.png") in plot_patch.paths
def test__image_and_mapper_subplot_is_output_for_all_mappers(
self,
imaging_7x7,
rectangular_mapper_7x7_3x3,
voronoi_mapper_9_3x3,
plot_path,
plot_patch,
):
mapper_plotter = aplt.MapperPlotter(
mapper=rectangular_mapper_7x7_3x3,
visuals_2d=aplt.Visuals2D(
indexes=[[(0, 0), (0, 1)], [(1, 2)]], pixelization_indexes=[[0, 1], [2]]
),
mat_plot_2d=aplt.MatPlot2D(
output=aplt.Output(path=plot_path, format="png")
),
include_2d=aplt.Include2D(mapper_source_pixelization_grid=True),
)
mapper_plotter.subplot_image_and_mapper(image=imaging_7x7.image)
assert path.join(plot_path, "subplot_image_and_mapper.png") in plot_patch.paths
mapper_plotter.subplot_image_and_mapper(image=imaging_7x7.image)
assert path.join(plot_path, "subplot_image_and_mapper.png") in plot_patch.paths
```
#### File: grids/one_d/test_grid_1d.py
```python
from os import path
import numpy as np
import pytest
import autoarray as aa
test_grid_dir = path.join(
"{}".format(path.dirname(path.realpath(__file__))), "files", "grids"
)
class TestAPI:
def test__manual(self):
grid_1d = aa.Grid1D.manual_native(
grid=[1.0, 2.0, 3.0, 4.0], pixel_scales=1.0, sub_size=2
)
assert type(grid_1d) == aa.Grid1D
assert (grid_1d.native == np.array([1.0, 2.0, 3.0, 4.0])).all()
assert (grid_1d.slim == np.array([1.0, 2.0, 3.0, 4.0])).all()
assert (grid_1d.binned.native == np.array([1.5, 3.5])).all()
assert (grid_1d.binned == np.array([1.5, 3.5])).all()
assert grid_1d.pixel_scales == (1.0,)
assert grid_1d.origin == (0.0,)
grid_1d = aa.Grid1D.manual_slim(
grid=[1.0, 2.0, 3.0, 4.0], pixel_scales=1.0, sub_size=2
)
assert type(grid_1d) == aa.Grid1D
assert (grid_1d.native == np.array([1.0, 2.0, 3.0, 4.0])).all()
assert (grid_1d.slim == np.array([1.0, 2.0, 3.0, 4.0])).all()
assert (grid_1d.binned.native == np.array([1.5, 3.5])).all()
assert (grid_1d.binned == np.array([1.5, 3.5])).all()
assert grid_1d.pixel_scales == (1.0,)
assert grid_1d.origin == (0.0,)
grid_1d = aa.Grid1D.manual_slim(
grid=[1.0, 2.0, 3.0, 4.0], pixel_scales=1.0, sub_size=2, origin=(1.0,)
)
assert type(grid_1d) == aa.Grid1D
assert (grid_1d.native == np.array([1.0, 2.0, 3.0, 4.0])).all()
assert (grid_1d.slim == np.array([1.0, 2.0, 3.0, 4.0])).all()
assert (grid_1d.binned.native == np.array([1.5, 3.5])).all()
assert (grid_1d.binned == np.array([1.5, 3.5])).all()
assert grid_1d.pixel_scales == (1.0,)
assert grid_1d.origin == (1.0,)
def test__manual_mask(self):
mask = aa.Mask1D.unmasked(shape_slim=(2,), pixel_scales=1.0, sub_size=2)
grid = aa.Grid1D.manual_mask(grid=[1.0, 2.0, 3.0, 4.0], mask=mask)
assert type(grid) == aa.Grid1D
assert (grid.native == np.array([1.0, 2.0, 3.0, 4.0])).all()
assert (grid.slim == np.array([1.0, 2.0, 3.0, 4.0])).all()
assert (grid.binned.native == np.array([1.5, 3.5])).all()
assert (grid.binned == np.array([1.5, 3.5])).all()
assert grid.pixel_scales == (1.0,)
assert grid.origin == (0.0,)
mask = aa.Mask1D.manual(mask=[True, False, False], pixel_scales=1.0, sub_size=2)
grid = aa.Grid1D.manual_mask(grid=[1.0, 2.0, 3.0, 4.0, 5.0, 6.0], mask=mask)
assert type(grid) == aa.Grid1D
assert (grid.native == np.array([0.0, 0.0, 3.0, 4.0, 5.0, 6.0])).all()
assert (grid.slim == np.array([3.0, 4.0, 5.0, 6.0])).all()
assert (grid.binned.native == np.array([0.0, 3.5, 5.5])).all()
assert (grid.binned == np.array([3.5, 5.5])).all()
assert grid.pixel_scales == (1.0,)
assert grid.origin == (0.0,)
assert (grid.slim.native == np.array([0.0, 0.0, 3.0, 4.0, 5.0, 6.0])).all()
assert (grid.native.slim == np.array([3.0, 4.0, 5.0, 6.0])).all()
def test__from_mask(self):
mask = aa.Mask1D.unmasked(shape_slim=(4,), pixel_scales=1.0, sub_size=1)
grid = aa.Grid1D.from_mask(mask=mask)
assert type(grid) == aa.Grid1D
assert (grid.native == np.array([-1.5, -0.5, 0.5, 1.5])).all()
assert (grid.slim == np.array([-1.5, -0.5, 0.5, 1.5])).all()
assert (grid.binned.native == np.array([-1.5, -0.5, 0.5, 1.5])).all()
assert (grid.binned == np.array([-1.5, -0.5, 0.5, 1.5])).all()
assert grid.pixel_scales == (1.0,)
assert grid.origin == (0.0,)
mask = aa.Mask1D.manual(mask=[True, False], pixel_scales=1.0, sub_size=2)
grid = aa.Grid1D.from_mask(mask=mask)
assert type(grid) == aa.Grid1D
assert (grid.native == np.array([0.0, 0.0, 0.25, 0.75])).all()
assert (grid.slim == np.array([0.25, 0.75])).all()
assert (grid.binned.native == np.array([0.0, 0.5])).all()
assert (grid.binned == np.array([0.5])).all()
assert grid.pixel_scales == (1.0,)
assert grid.origin == (0.0,)
mask = aa.Mask1D.manual(
mask=[True, False, False, False], pixel_scales=1.0, sub_size=1
)
grid = aa.Grid1D.from_mask(mask=mask)
assert type(grid) == aa.Grid1D
assert (grid.native == np.array([0.0, -0.5, 0.5, 1.5])).all()
assert (grid.slim == np.array([-0.5, 0.5, 1.5])).all()
assert (grid.binned.native == np.array([0.0, -0.5, 0.5, 1.5])).all()
assert (grid.binned == np.array([-0.5, 0.5, 1.5])).all()
assert grid.pixel_scales == (1.0,)
assert grid.origin == (0.0,)
def test__uniform(self):
grid_1d = aa.Grid1D.uniform(
shape_native=(2,), pixel_scales=1.0, sub_size=1, origin=(0.0,)
)
assert type(grid_1d) == aa.Grid1D
assert (grid_1d.native == np.array([-0.5, 0.5])).all()
assert (grid_1d.slim == np.array([-0.5, 0.5])).all()
assert (grid_1d.binned.native == np.array([-0.5, 0.5])).all()
assert grid_1d.pixel_scales == (1.0,)
assert grid_1d.origin == (0.0,)
grid_1d = aa.Grid1D.uniform(
shape_native=(2,), pixel_scales=1.0, sub_size=2, origin=(0.0,)
)
assert type(grid_1d) == aa.Grid1D
assert (grid_1d.native == np.array([-0.75, -0.25, 0.25, 0.75])).all()
assert (grid_1d.slim == np.array([-0.75, -0.25, 0.25, 0.75])).all()
assert (grid_1d.binned.native == np.array([-0.5, 0.5])).all()
assert grid_1d.pixel_scales == (1.0,)
assert grid_1d.origin == (0.0,)
grid_1d = aa.Grid1D.uniform(
shape_native=(2,), pixel_scales=1.0, sub_size=1, origin=(1.0,)
)
assert type(grid_1d) == aa.Grid1D
assert (grid_1d.native == np.array([0.5, 1.5])).all()
assert (grid_1d.slim == np.array([0.5, 1.5])).all()
assert (grid_1d.binned.native == np.array([0.5, 1.5])).all()
assert grid_1d.pixel_scales == (1.0,)
assert grid_1d.origin == (1.0,)
def test__uniform_from_zero(self):
grid_1d = aa.Grid1D.uniform_from_zero(
shape_native=(2,), pixel_scales=1.0, sub_size=1
)
assert type(grid_1d) == aa.Grid1D
assert (grid_1d.native == np.array([0.0, 1.0])).all()
assert (grid_1d.slim == np.array([0.0, 1.0])).all()
assert (grid_1d.binned.native == np.array([0.0, 1.0])).all()
assert grid_1d.pixel_scales == (1.0,)
assert grid_1d.origin == (0.0,)
grid_1d = aa.Grid1D.uniform_from_zero(
shape_native=(3,), pixel_scales=1.5, sub_size=1
)
assert type(grid_1d) == aa.Grid1D
assert (grid_1d.native == np.array([0.0, 1.5, 3.0])).all()
assert (grid_1d.slim == np.array([0.0, 1.5, 3.0])).all()
assert (grid_1d.binned.native == np.array([0.0, 1.5, 3.0])).all()
assert grid_1d.pixel_scales == (1.5,)
assert grid_1d.origin == (0.0,)
grid_1d = aa.Grid1D.uniform_from_zero(
shape_native=(3,), pixel_scales=1.5, sub_size=2
)
assert type(grid_1d) == aa.Grid1D
assert (grid_1d.native == np.array([0.0, 0.75, 1.5, 2.25, 3.0, 3.75])).all()
assert (grid_1d.slim == np.array([0, 0.75, 1.5, 2.25, 3.0, 3.75])).all()
assert (grid_1d.binned.native == np.array([0.375, 1.875, 3.375])).all()
assert grid_1d.pixel_scales == (1.5,)
assert grid_1d.origin == (0.0,)
def test__recursive_shape_storage(self):
mask = aa.Mask1D.unmasked(shape_slim=(2,), pixel_scales=1.0, sub_size=2)
grid = aa.Grid1D.manual_mask(grid=[1.0, 2.0, 3.0, 4.0], mask=mask)
assert (grid.slim.native.slim == np.array([1.0, 2.0, 3.0, 4.0])).all()
assert (grid.native.slim.native == np.array([1.0, 2.0, 3.0, 4.0])).all()
mask = aa.Mask1D.manual(mask=[True, False, False], pixel_scales=1.0, sub_size=2)
grid = aa.Grid1D.manual_mask(grid=[1.0, 2.0, 3.0, 4.0, 5.0, 6.0], mask=mask)
assert (grid.slim.native.slim == np.array([3.0, 4.0, 5.0, 6.0])).all()
assert (
grid.native.slim.native == np.array([0.0, 0.0, 3.0, 4.0, 5.0, 6.0])
).all()
class TestGrid1D:
def test__grid_2d_with_other_value_out(self):
grid_1d = aa.Grid1D.manual_native(
grid=[1.0, 2.0, 3.0, 4.0], pixel_scales=1.0, sub_size=1
)
grid_2d = grid_1d.project_to_radial_grid_2d(angle=0.0)
assert type(grid_2d) == aa.Grid2DIrregular
assert grid_2d.slim == pytest.approx(
np.array([[0.0, 1.0], [0.0, 2.0], [0.0, 3.0], [0.0, 4.0]]), 1.0e-4
)
grid_2d = grid_1d.project_to_radial_grid_2d(angle=90.0)
assert grid_2d.slim == pytest.approx(
np.array([[-1.0, 0.0], [-2.0, 0.0], [-3.0, 0.0], [-4.0, 0.0]]), 1.0e-4
)
grid_2d = grid_1d.project_to_radial_grid_2d(angle=45.0)
assert grid_2d.slim == pytest.approx(
np.array(
[
[-0.5 * np.sqrt(2), 0.5 * np.sqrt(2)],
[-1.0 * np.sqrt(2), 1.0 * np.sqrt(2)],
[-1.5 * np.sqrt(2), 1.5 * np.sqrt(2)],
[-2.0 * np.sqrt(2), 2.0 * np.sqrt(2)],
]
),
1.0e-4,
)
def test__structure_2d_from_result__maps_numpy_array_to__auto_array_or_grid(self):
mask = np.array([True, False, False, True])
mask = aa.Mask1D.manual(mask=mask, pixel_scales=(1.0,), sub_size=1)
grid_1d = aa.Grid1D.from_mask(mask=mask)
result = grid_1d.structure_2d_from_result(result=np.array([1.0, 2.0]))
assert isinstance(result, aa.Array1D)
assert (result.native == np.array([0.0, 1.0, 2.0, 0.0])).all()
result = grid_1d.structure_2d_from_result(
result=np.array([[1.0, 1.0], [2.0, 2.0]])
)
assert isinstance(result, aa.Grid2D)
assert (
result.native
== np.array([[[0.0, 0.0], [1.0, 1.0], [2.0, 2.0], [0.0, 0.0]]])
).all()
def test__structure_2d_list_from_result_list__maps_list_to_auto_arrays_or_grids(
self
):
mask = np.array([True, False, False, True])
mask = aa.Mask1D.manual(mask=mask, pixel_scales=(1.0,), sub_size=1)
grid_1d = aa.Grid1D.from_mask(mask=mask)
result = grid_1d.structure_2d_list_from_result_list(
result_list=[np.array([1.0, 2.0])]
)
assert isinstance(result[0], aa.Array1D)
assert (result[0].native == np.array([0.0, 1.0, 2.0, 0.0])).all()
result = grid_1d.structure_2d_list_from_result_list(
result_list=[np.array([[1.0, 1.0], [2.0, 2.0]])]
)
assert isinstance(result[0], aa.Grid2D)
assert (
result[0].native
== np.array([[[0.0, 0.0], [1.0, 1.0], [2.0, 2.0], [0.0, 0.0]]])
).all()
```
#### File: grids/two_d/test_grid_2d_interpolate.py
```python
import numpy as np
import pytest
import autoarray as aa
from autoarray.mock.mock import ndarray_1d_from_grid, ndarray_2d_from_grid
class TestObj:
def test__blurring_grid_from_mask__compare_to_array_util(self):
mask = np.array(
[
[True, True, True, True, True, True, True, True, True],
[True, True, True, True, True, True, True, True, True],
[True, True, True, True, True, True, True, True, True],
[True, True, True, True, True, True, True, True, True],
[True, True, True, True, False, True, True, True, True],
[True, True, True, True, True, True, True, True, True],
[True, True, True, True, True, True, True, True, True],
[True, True, True, True, True, True, True, True, True],
[True, True, True, True, True, True, True, True, True],
]
)
mask = aa.Mask2D.manual(mask=mask, pixel_scales=(2.0, 2.0), sub_size=2)
blurring_mask_util = aa.util.mask_2d.blurring_mask_2d_from(
mask_2d=mask, kernel_shape_native=(3, 5)
)
blurring_grid_util = aa.util.grid_2d.grid_2d_slim_via_mask_from(
mask_2d=blurring_mask_util, pixel_scales=(2.0, 2.0), sub_size=1
)
grid = aa.Grid2DInterpolate.from_mask(mask=mask, pixel_scales_interp=0.1)
blurring_grid = grid.blurring_grid_from_kernel_shape(kernel_shape_native=(3, 5))
assert isinstance(blurring_grid, aa.Grid2DInterpolate)
assert len(blurring_grid.shape) == 2
assert blurring_grid == pytest.approx(blurring_grid_util, 1e-4)
assert blurring_grid.pixel_scales == (2.0, 2.0)
assert blurring_grid.pixel_scales_interp == (0.1, 0.1)
def test__blurring_grid_from_kernel_shape__compare_to_array_util(self):
mask = np.array(
[
[True, True, True, True, True, True, True, True, True],
[True, True, True, True, True, True, True, True, True],
[True, True, False, True, True, True, False, True, True],
[True, True, True, True, True, True, True, True, True],
[True, True, True, True, True, True, True, True, True],
[True, True, True, True, True, True, True, True, True],
[True, True, False, True, True, True, False, True, True],
[True, True, True, True, True, True, True, True, True],
[True, True, True, True, True, True, True, True, True],
]
)
mask = aa.Mask2D.manual(mask=mask, pixel_scales=(2.0, 2.0))
blurring_mask_util = aa.util.mask_2d.blurring_mask_2d_from(
mask_2d=mask, kernel_shape_native=(3, 5)
)
blurring_grid_util = aa.util.grid_2d.grid_2d_slim_via_mask_from(
mask_2d=blurring_mask_util, pixel_scales=(2.0, 2.0), sub_size=1
)
mask = aa.Mask2D.manual(mask=mask, pixel_scales=(2.0, 2.0))
blurring_grid = aa.Grid2DInterpolate.blurring_grid_from_mask_and_kernel_shape(
mask=mask, kernel_shape_native=(3, 5), pixel_scales_interp=0.1
)
assert isinstance(blurring_grid, aa.Grid2DInterpolate)
assert len(blurring_grid.shape) == 2
assert blurring_grid == pytest.approx(blurring_grid_util, 1e-4)
assert blurring_grid.pixel_scales == (2.0, 2.0)
assert blurring_grid.pixel_scales_interp == (0.1, 0.1)
def test__padded_grid_from_kernel_shape(self):
grid = aa.Grid2DInterpolate.uniform(
shape_native=(4, 4), pixel_scales=3.0, pixel_scales_interp=0.1
)
padded_grid = grid.padded_grid_from_kernel_shape(kernel_shape_native=(3, 3))
assert isinstance(padded_grid, aa.Grid2DInterpolate)
assert padded_grid.pixel_scales_interp == (0.1, 0.1)
mask = aa.Mask2D.unmasked(
shape_native=(6, 6), pixel_scales=(3.0, 3.0), sub_size=1
)
grid = aa.Grid2DInterpolate.from_mask(mask=mask, pixel_scales_interp=0.1)
assert isinstance(padded_grid, aa.Grid2DInterpolate)
assert padded_grid.pixel_scales_interp == (0.1, 0.1)
assert (padded_grid.vtx == grid.vtx).all()
assert (padded_grid.wts == grid.wts).all()
mask = aa.Mask2D.manual(
mask=np.full((2, 5), False), pixel_scales=(8.0, 8.0), sub_size=4
)
grid = aa.Grid2DInterpolate.from_mask(mask=mask, pixel_scales_interp=0.2)
padded_grid = grid.padded_grid_from_kernel_shape(kernel_shape_native=(5, 5))
padded_grid_util = aa.util.grid_2d.grid_2d_slim_via_mask_from(
mask_2d=np.full((6, 9), False), pixel_scales=(8.0, 8.0), sub_size=4
)
assert isinstance(padded_grid, aa.Grid2DInterpolate)
assert padded_grid.pixel_scales_interp == (0.2, 0.2)
assert isinstance(padded_grid, aa.Grid2DInterpolate)
assert padded_grid.shape == (864, 2)
assert (padded_grid.mask == np.full(fill_value=False, shape=(6, 9))).all()
assert padded_grid == pytest.approx(padded_grid_util, 1e-4)
class TestInterpolatedResult:
def test__function_returns_binary_ndarray_1d__returns_interpolated_array(self):
# noinspection PyUnusedLocal
class MockInterpolateClass:
def func(self, profile, grid):
result = np.zeros(grid.shape[0])
result[0] = 1
return result
mask = aa.Mask2D.unmasked(
shape_native=(3, 3), pixel_scales=(1.0, 1.0), sub_size=1
)
grid = aa.Grid2DInterpolate.from_mask(mask=mask, pixel_scales_interp=0.5)
cls = MockInterpolateClass()
interp_array = grid.result_from_func(func=cls.func, cls=MockInterpolateClass())
assert isinstance(interp_array, aa.Array2D)
assert interp_array.ndim == 1
assert interp_array.shape == (9,)
assert (interp_array != np.array([[1, 0, 0, 0, 0, 0, 0, 0, 0]])).any()
def test__function_is_false_in_config__does_not_use_interpolatin(self):
# noinspection PyUnusedLocal
class MockInterpolateClass:
def func_off(self, profile, grid):
result = np.zeros(grid.shape[0])
result[0] = 1
return result
mask = aa.Mask2D.unmasked(
shape_native=(3, 3), pixel_scales=(1.0, 1.0), sub_size=1
)
grid = aa.Grid2DInterpolate.from_mask(mask=mask, pixel_scales_interp=0.5)
cls = MockInterpolateClass()
arr = grid.result_from_func(func=cls.func_off, cls=MockInterpolateClass())
assert isinstance(arr, aa.Array2D)
assert arr.ndim == 1
assert arr.shape == (9,)
assert (arr == np.array([[1, 0, 0, 0, 0, 0, 0, 0, 0]])).any()
def test__function_returns_binary_ndarray_2d__returns_interpolated_grid(self):
# noinspection PyUnusedLocal
class MockInterpolateClass:
def func(self, profile, grid):
result = np.zeros((grid.shape[0], 2))
result[0, :] = 1
return result
mask = aa.Mask2D.unmasked(
shape_native=(3, 3), pixel_scales=(1.0, 1.0), sub_size=1
)
grid = aa.Grid2DInterpolate.from_mask(mask=mask, pixel_scales_interp=0.5)
cls = MockInterpolateClass()
interp_grid = grid.result_from_func(func=cls.func, cls=MockInterpolateClass())
assert isinstance(interp_grid, aa.Grid2D)
assert interp_grid.ndim == 2
assert interp_grid.shape == (9, 2)
assert (
interp_grid
!= np.array(
np.array(
[
[1, 1],
[0, 0],
[0, 0],
[0, 0],
[0, 0],
[0, 0],
[0, 0],
[0, 0],
[0, 0],
]
)
)
).any()
def test__function_returns_ndarray_1d__interpolation_used_and_accurate(self):
# noinspection PyUnusedLocal
class MockInterpolateObj:
def ndarray_1d_from_grid(self, profile, grid):
return ndarray_1d_from_grid(profile=profile, grid=grid)
cls = MockInterpolateObj()
mask = aa.Mask2D.circular_annular(
shape_native=(20, 20),
pixel_scales=(1.0, 1.0),
sub_size=1,
inner_radius=3.0,
outer_radius=8.0,
)
grid = aa.Grid2D.from_mask(mask=mask)
true_array = ndarray_1d_from_grid(profile=None, grid=grid)
grid = aa.Grid2DInterpolate.from_mask(mask=mask, pixel_scales_interp=1.0)
interpolated_array = grid.result_from_func(
func=cls.ndarray_1d_from_grid, cls=MockInterpolateObj()
)
assert interpolated_array.shape[0] == mask.pixels_in_mask
assert (true_array == interpolated_array).all()
grid = aa.Grid2DInterpolate.from_mask(mask=mask, pixel_scales_interp=0.1)
interpolated_array = grid.result_from_func(
func=cls.ndarray_1d_from_grid, cls=MockInterpolateObj()
)
assert interpolated_array.shape[0] == mask.pixels_in_mask
assert true_array[0] != interpolated_array[0]
assert np.max(true_array - interpolated_array) < 0.001
mask = aa.Mask2D.circular_annular(
shape_native=(28, 28),
pixel_scales=(1.0, 1.0),
sub_size=1,
inner_radius=3.0,
outer_radius=8.0,
centre=(3.0, 3.0),
)
grid = aa.Grid2D.from_mask(mask=mask)
true_array = ndarray_1d_from_grid(profile=None, grid=grid)
grid = aa.Grid2DInterpolate.from_mask(mask=mask, pixel_scales_interp=0.1)
interpolated_array = grid.result_from_func(
func=cls.ndarray_1d_from_grid, cls=MockInterpolateObj()
)
assert interpolated_array.shape[0] == mask.pixels_in_mask
assert true_array[0] != interpolated_array[0]
assert np.max(true_array - interpolated_array) < 0.001
def test__function_returns_ndarray_2d__interpolation_used_and_accurate(self):
# noinspection PyUnusedLocal
class MockInterpolateObj:
def ndarray_2d_from_grid(self, profile, grid):
return ndarray_2d_from_grid(profile=profile, grid=grid)
cls = MockInterpolateObj()
mask = aa.Mask2D.circular_annular(
shape_native=(20, 20),
pixel_scales=(1.0, 1.0),
sub_size=1,
inner_radius=3.0,
outer_radius=8.0,
)
grid = aa.Grid2D.from_mask(mask=mask)
true_grid = ndarray_2d_from_grid(profile=None, grid=grid)
grid = aa.Grid2DInterpolate.from_mask(mask=mask, pixel_scales_interp=1.0)
interpolated_grid = grid.result_from_func(
func=cls.ndarray_2d_from_grid, cls=MockInterpolateObj()
)
assert interpolated_grid.shape[0] == mask.pixels_in_mask
assert interpolated_grid.shape[1] == 2
assert (true_grid == interpolated_grid).all()
grid = aa.Grid2DInterpolate.from_mask(mask=mask, pixel_scales_interp=0.1)
interpolated_grid = grid.result_from_func(
func=cls.ndarray_2d_from_grid, cls=MockInterpolateObj()
)
assert interpolated_grid.shape[0] == mask.pixels_in_mask
assert interpolated_grid.shape[1] == 2
assert true_grid[0, 0] != interpolated_grid[0, 0]
assert np.max(true_grid[:, 0] - interpolated_grid[:, 0]) < 0.001
assert np.max(true_grid[:, 1] - interpolated_grid[:, 1]) < 0.001
mask = aa.Mask2D.circular_annular(
shape_native=(28, 28),
pixel_scales=(1.0, 1.0),
sub_size=1,
inner_radius=3.0,
outer_radius=8.0,
centre=(3.0, 3.0),
)
grid = aa.Grid2D.from_mask(mask=mask)
true_grid = ndarray_2d_from_grid(profile=None, grid=grid)
grid = aa.Grid2DInterpolate.from_mask(mask=mask, pixel_scales_interp=0.1)
interpolated_grid = grid.result_from_func(
func=cls.ndarray_2d_from_grid, cls=MockInterpolateObj()
)
assert interpolated_grid.shape[0] == mask.pixels_in_mask
assert interpolated_grid.shape[1] == 2
assert true_grid[0, 0] != interpolated_grid[0, 0]
assert np.max(true_grid[:, 0] - interpolated_grid[:, 0]) < 0.01
assert np.max(true_grid[:, 1] - interpolated_grid[:, 1]) < 0.01
class TestAPI:
def test__manual_slim(self):
grid = aa.Grid2DInterpolate.manual_slim(
grid=[[1.0, 2.0], [3.0, 4.0], [5.0, 6.0], [7.0, 8.0]],
shape_native=(2, 2),
pixel_scales=1.0,
pixel_scales_interp=0.1,
origin=(0.0, 1.0),
)
assert type(grid) == aa.Grid2DInterpolate
assert type(grid.slim) == aa.Grid2DInterpolate
assert type(grid.native) == aa.Grid2DInterpolate
assert (
grid == np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0], [7.0, 8.0]])
).all()
assert (
grid.native
== np.array([[[1.0, 2.0], [3.0, 4.0]], [[5.0, 6.0], [7.0, 8.0]]])
).all()
assert (
grid.slim == np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0], [7.0, 8.0]])
).all()
assert grid.pixel_scales == (1.0, 1.0)
assert grid.pixel_scales_interp == (0.1, 0.1)
assert grid.origin == (0.0, 1.0)
grid = aa.Grid2DInterpolate.manual_slim(
grid=[[1.0, 2.0], [3.0, 4.0], [5.0, 6.0], [7.0, 8.0]],
shape_native=(1, 1),
pixel_scales=1.0,
pixel_scales_interp=0.1,
sub_size=2,
origin=(0.0, 1.0),
)
assert type(grid) == aa.Grid2DInterpolate
assert type(grid.slim) == aa.Grid2DInterpolate
assert type(grid.native) == aa.Grid2DInterpolate
assert (
grid == np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0], [7.0, 8.0]])
).all()
assert (
grid.native
== np.array([[[1.0, 2.0], [3.0, 4.0]], [[5.0, 6.0], [7.0, 8.0]]])
).all()
assert (
grid.slim == np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0], [7.0, 8.0]])
).all()
assert (grid.binned.native == np.array([[[4.0, 5.0]]])).all()
assert (grid.binned == np.array([[4.0, 5.0]])).all()
assert grid.pixel_scales == (1.0, 1.0)
assert grid.pixel_scales_interp == (0.1, 0.1)
assert grid.origin == (0.0, 1.0)
assert grid.sub_size == 2
def test__from_mask(self):
mask = np.array(
[
[True, True, False, False],
[True, False, True, True],
[True, True, False, False],
]
)
mask = aa.Mask2D.manual(mask=mask, pixel_scales=(2.0, 2.0), sub_size=1)
grid_via_util = aa.util.grid_2d.grid_2d_slim_via_mask_from(
mask_2d=mask, sub_size=1, pixel_scales=(2.0, 2.0)
)
grid = aa.Grid2DInterpolate.from_mask(mask=mask, pixel_scales_interp=0.1)
assert type(grid) == aa.Grid2DInterpolate
assert type(grid.slim) == aa.Grid2DInterpolate
assert type(grid.native) == aa.Grid2DInterpolate
assert grid == pytest.approx(grid_via_util, 1e-4)
assert grid.pixel_scales_interp == (0.1, 0.1)
assert grid.sub_size == 1
grid_via_util = aa.util.grid_2d.grid_2d_via_mask_from(
mask_2d=mask, sub_size=1, pixel_scales=(2.0, 2.0)
)
mask = np.array(
[
[True, True, False, False],
[True, False, True, True],
[True, True, False, False],
]
)
mask = aa.Mask2D.manual(mask=mask, pixel_scales=(2.0, 2.0), sub_size=2)
grid_via_util = aa.util.grid_2d.grid_2d_slim_via_mask_from(
mask_2d=mask, sub_size=2, pixel_scales=(2.0, 2.0)
)
grid = aa.Grid2DInterpolate.from_mask(mask=mask, pixel_scales_interp=0.1)
assert type(grid) == aa.Grid2DInterpolate
assert type(grid.slim) == aa.Grid2DInterpolate
assert type(grid.native) == aa.Grid2DInterpolate
assert grid == pytest.approx(grid_via_util, 1e-4)
assert grid.pixel_scales_interp == (0.1, 0.1)
assert grid.sub_size == 2
def test__uniform(self):
grid = aa.Grid2DInterpolate.uniform(
shape_native=(2, 2), pixel_scales=2.0, pixel_scales_interp=0.1
)
assert type(grid) == aa.Grid2DInterpolate
assert type(grid.slim) == aa.Grid2DInterpolate
assert type(grid.native) == aa.Grid2DInterpolate
assert (
grid == np.array([[1.0, -1.0], [1.0, 1.0], [-1.0, -1.0], [-1.0, 1.0]])
).all()
assert (
grid.native
== np.array([[[1.0, -1.0], [1.0, 1.0]], [[-1.0, -1.0], [-1.0, 1.0]]])
).all()
assert (
grid.slim == np.array([[1.0, -1.0], [1.0, 1.0], [-1.0, -1.0], [-1.0, 1.0]])
).all()
assert grid.pixel_scales == (2.0, 2.0)
assert grid.pixel_scales_interp == (0.1, 0.1)
assert grid.origin == (0.0, 0.0)
grid = aa.Grid2DInterpolate.uniform(
shape_native=(2, 1), pixel_scales=1.0, pixel_scales_interp=0.2, sub_size=2
)
assert type(grid) == aa.Grid2DInterpolate
assert type(grid.slim) == aa.Grid2DInterpolate
assert type(grid.native) == aa.Grid2DInterpolate
assert (
grid.native
== np.array(
[
[[0.75, -0.25], [0.75, 0.25]],
[[0.25, -0.25], [0.25, 0.25]],
[[-0.25, -0.25], [-0.25, 0.25]],
[[-0.75, -0.25], [-0.75, 0.25]],
]
)
).all()
assert (
grid.slim
== np.array(
[
[0.75, -0.25],
[0.75, 0.25],
[0.25, -0.25],
[0.25, 0.25],
[-0.25, -0.25],
[-0.25, 0.25],
[-0.75, -0.25],
[-0.75, 0.25],
]
)
).all()
assert (grid.binned.native == np.array([[[0.5, 0.0]], [[-0.5, 0.0]]])).all()
assert (grid.binned == np.array([[0.5, 0.0], [-0.5, 0.0]])).all()
assert grid.pixel_scales == (1.0, 1.0)
assert grid.pixel_scales_interp == (0.2, 0.2)
assert grid.origin == (0.0, 0.0)
assert grid.sub_size == 2
```
#### File: test_autoarray/structures/test_kernel_2d.py
```python
from os import path
import numpy as np
import pytest
from astropy import units
from astropy.modeling import functional_models
from astropy.coordinates import Angle
import autoarray as aa
from autoarray import exc
test_data_dir = path.join("{}".format(path.dirname(path.realpath(__file__))), "files")
class TestAPI:
def test__manual__input_kernel__all_attributes_correct_including_data_inheritance(
self,
):
kernel = aa.Kernel2D.ones(
shape_native=(3, 3), pixel_scales=1.0, normalize=False
)
assert kernel.shape_native == (3, 3)
assert (kernel.native == np.ones((3, 3))).all()
assert kernel.pixel_scales == (1.0, 1.0)
assert kernel.origin == (0.0, 0.0)
kernel = aa.Kernel2D.ones(
shape_native=(4, 3), pixel_scales=1.0, normalize=False
)
assert kernel.shape_native == (4, 3)
assert (kernel.native == np.ones((4, 3))).all()
assert kernel.pixel_scales == (1.0, 1.0)
assert kernel.origin == (0.0, 0.0)
def test__full_kernel_is_set_of_full_values(self):
kernel = aa.Kernel2D.full(fill_value=3.0, shape_native=(3, 3), pixel_scales=1.0)
assert kernel.shape_native == (3, 3)
assert (kernel.native == 3.0 * np.ones((3, 3))).all()
assert kernel.pixel_scales == (1.0, 1.0)
assert kernel.origin == (0.0, 0.0)
def test__ones_zeros__kernel_is_set_of_full_values(self):
kernel = aa.Kernel2D.ones(shape_native=(3, 3), pixel_scales=1.0)
assert kernel.shape_native == (3, 3)
assert (kernel.native == np.ones((3, 3))).all()
assert kernel.pixel_scales == (1.0, 1.0)
assert kernel.origin == (0.0, 0.0)
kernel = aa.Kernel2D.zeros(shape_native=(3, 3), pixel_scales=1.0)
assert kernel.shape_native == (3, 3)
assert (kernel.native == np.zeros((3, 3))).all()
assert kernel.pixel_scales == (1.0, 1.0)
assert kernel.origin == (0.0, 0.0)
def test__from_fits__input_kernel_3x3__all_attributes_correct_including_data_inheritance(
self,
):
kernel = aa.Kernel2D.from_fits(
file_path=path.join(test_data_dir, "3x2_ones.fits"), hdu=0, pixel_scales=1.0
)
assert (kernel.native == np.ones((3, 2))).all()
kernel = aa.Kernel2D.from_fits(
file_path=path.join(test_data_dir, "3x2_twos.fits"), hdu=0, pixel_scales=1.0
)
assert (kernel.native == 2.0 * np.ones((3, 2))).all()
def test__no_blur__correct_kernel(self):
kernel = aa.Kernel2D.no_blur(pixel_scales=1.0)
assert (kernel.native == np.array([[1.0]])).all()
assert kernel.pixel_scales == (1.0, 1.0)
kernel = aa.Kernel2D.no_blur(pixel_scales=2.0)
assert (kernel.native == np.array([[1.0]])).all()
assert kernel.pixel_scales == (2.0, 2.0)
class TestNormalize:
def test__input_is_already_normalized__no_change(self):
kernel_data = np.ones((3, 3)) / 9.0
kernel = aa.Kernel2D.manual_native(
array=kernel_data, pixel_scales=1.0, normalize=True
)
assert kernel.native == pytest.approx(kernel_data, 1e-3)
def test__input_is_above_normalization_so_is_normalized(self):
kernel_data = np.ones((3, 3))
kernel = aa.Kernel2D.manual_native(
array=kernel_data, pixel_scales=1.0, normalize=True
)
assert kernel.native == pytest.approx(np.ones((3, 3)) / 9.0, 1e-3)
kernel = aa.Kernel2D.manual_native(
array=kernel_data, pixel_scales=1.0, normalize=False
)
kernel = kernel.normalized
assert kernel.native == pytest.approx(np.ones((3, 3)) / 9.0, 1e-3)
def test__same_as_above__renomalized_false_does_not_normalize(self):
kernel_data = np.ones((3, 3))
kernel = aa.Kernel2D.manual_native(
array=kernel_data, pixel_scales=1.0, normalize=False
)
assert kernel.native == pytest.approx(np.ones((3, 3)), 1e-3)
class TestBinnedUp:
def test__kernel_is_even_x_even__rescaled_to_odd_x_odd__no_use_of_dimension_trimming(
self,
):
array_2d = np.ones((6, 6))
kernel = aa.Kernel2D.manual_native(
array=array_2d, pixel_scales=1.0, normalize=False
)
kernel = kernel.rescaled_with_odd_dimensions_from_rescale_factor(
rescale_factor=0.5, normalize=True
)
assert kernel.pixel_scales == (2.0, 2.0)
assert (kernel.native == (1.0 / 9.0) * np.ones((3, 3))).all()
array_2d = np.ones((9, 9))
kernel = aa.Kernel2D.manual_native(
array=array_2d, pixel_scales=1.0, normalize=False
)
kernel = kernel.rescaled_with_odd_dimensions_from_rescale_factor(
rescale_factor=0.333333333333333, normalize=True
)
assert kernel.pixel_scales == (3.0, 3.0)
assert (kernel.native == (1.0 / 9.0) * np.ones((3, 3))).all()
array_2d = np.ones((18, 6))
kernel = aa.Kernel2D.manual_native(
array=array_2d, pixel_scales=1.0, normalize=False
)
kernel = kernel.rescaled_with_odd_dimensions_from_rescale_factor(
rescale_factor=0.5, normalize=True
)
assert kernel.pixel_scales == (2.0, 2.0)
assert (kernel.native == (1.0 / 27.0) * np.ones((9, 3))).all()
array_2d = np.ones((6, 18))
kernel = aa.Kernel2D.manual_native(
array=array_2d, pixel_scales=1.0, normalize=False
)
kernel = kernel.rescaled_with_odd_dimensions_from_rescale_factor(
rescale_factor=0.5, normalize=True
)
assert kernel.pixel_scales == (2.0, 2.0)
assert (kernel.native == (1.0 / 27.0) * np.ones((3, 9))).all()
def test__kernel_is_even_x_even_after_binning_up__resized_to_odd_x_odd_with_shape_plus_one(
self,
):
kernel = aa.Kernel2D.ones(
shape_native=(2, 2), pixel_scales=1.0, normalize=False
)
kernel = kernel.rescaled_with_odd_dimensions_from_rescale_factor(
rescale_factor=2.0, normalize=True
)
assert kernel.pixel_scales == (0.4, 0.4)
assert (kernel.native == (1.0 / 25.0) * np.ones((5, 5))).all()
kernel = aa.Kernel2D.ones(
shape_native=(40, 40), pixel_scales=1.0, normalize=False
)
kernel = kernel.rescaled_with_odd_dimensions_from_rescale_factor(
rescale_factor=0.1, normalize=True
)
assert kernel.pixel_scales == (8.0, 8.0)
assert (kernel.native == (1.0 / 25.0) * np.ones((5, 5))).all()
kernel = aa.Kernel2D.ones(
shape_native=(2, 4), pixel_scales=1.0, normalize=False
)
kernel = kernel.rescaled_with_odd_dimensions_from_rescale_factor(
rescale_factor=2.0, normalize=True
)
assert kernel.pixel_scales[0] == pytest.approx(0.4, 1.0e-4)
assert kernel.pixel_scales[1] == pytest.approx(0.4444444, 1.0e-4)
assert (kernel.native == (1.0 / 45.0) * np.ones((5, 9))).all()
kernel = aa.Kernel2D.ones(
shape_native=(4, 2), pixel_scales=1.0, normalize=False
)
kernel = kernel.rescaled_with_odd_dimensions_from_rescale_factor(
rescale_factor=2.0, normalize=True
)
assert kernel.pixel_scales[0] == pytest.approx(0.4444444, 1.0e-4)
assert kernel.pixel_scales[1] == pytest.approx(0.4, 1.0e-4)
assert (kernel.native == (1.0 / 45.0) * np.ones((9, 5))).all()
def test__kernel_is_odd_and_even_after_binning_up__resized_to_odd_and_odd_with_shape_plus_one(
self,
):
kernel = aa.Kernel2D.ones(
shape_native=(6, 4), pixel_scales=1.0, normalize=False
)
kernel = kernel.rescaled_with_odd_dimensions_from_rescale_factor(
rescale_factor=0.5, normalize=True
)
assert kernel.pixel_scales == pytest.approx((2.0, 1.3333333333), 1.0e-4)
assert (kernel.native == (1.0 / 9.0) * np.ones((3, 3))).all()
kernel = aa.Kernel2D.ones(
shape_native=(9, 12), pixel_scales=1.0, normalize=False
)
kernel = kernel.rescaled_with_odd_dimensions_from_rescale_factor(
rescale_factor=0.33333333333, normalize=True
)
assert kernel.pixel_scales == pytest.approx((3.0, 2.4), 1.0e-4)
assert (kernel.native == (1.0 / 15.0) * np.ones((3, 5))).all()
kernel = aa.Kernel2D.ones(
shape_native=(4, 6), pixel_scales=1.0, normalize=False
)
kernel = kernel.rescaled_with_odd_dimensions_from_rescale_factor(
rescale_factor=0.5, normalize=True
)
assert kernel.pixel_scales == pytest.approx((1.33333333333, 2.0), 1.0e-4)
assert (kernel.native == (1.0 / 9.0) * np.ones((3, 3))).all()
kernel = aa.Kernel2D.ones(
shape_native=(12, 9), pixel_scales=1.0, normalize=False
)
kernel = kernel.rescaled_with_odd_dimensions_from_rescale_factor(
rescale_factor=0.33333333333, normalize=True
)
assert kernel.pixel_scales == pytest.approx((2.4, 3.0), 1.0e-4)
assert (kernel.native == (1.0 / 15.0) * np.ones((5, 3))).all()
class TestConvolve:
def test__kernel_is_not_odd_x_odd__raises_error(self):
kernel = aa.Kernel2D.manual_native(
array=[[0.0, 1.0], [1.0, 2.0]], pixel_scales=1.0
)
with pytest.raises(exc.KernelException):
kernel.convolved_array_from_array(np.ones((5, 5)))
def test__image_is_3x3_central_value_of_one__kernel_is_cross__blurred_image_becomes_cross(
self,
):
image = aa.Array2D.manual_native(
array=[[0.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 0.0]], pixel_scales=1.0
)
kernel = aa.Kernel2D.manual_native(
array=[[0.0, 1.0, 0.0], [1.0, 2.0, 1.0], [0.0, 1.0, 0.0]], pixel_scales=1.0
)
blurred_image = kernel.convolved_array_from_array(image)
assert (blurred_image == kernel).all()
def test__image_is_4x4_central_value_of_one__kernel_is_cross__blurred_image_becomes_cross(
self,
):
image = aa.Array2D.manual_native(
array=[
[0.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0],
],
pixel_scales=1.0,
)
kernel = aa.Kernel2D.manual_native(
array=[[0.0, 1.0, 0.0], [1.0, 2.0, 1.0], [0.0, 1.0, 0.0]], pixel_scales=1.0
)
blurred_image = kernel.convolved_array_from_array(array=image)
assert (
blurred_image.native
== np.array(
[
[0.0, 1.0, 0.0, 0.0],
[1.0, 2.0, 1.0, 0.0],
[0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0],
]
)
).all()
def test__image_is_4x3_central_value_of_one__kernel_is_cross__blurred_image_becomes_cross(
self,
):
image = aa.Array2D.manual_native(
array=[[0.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]],
pixel_scales=1.0,
)
kernel = aa.Kernel2D.manual_native(
array=[[0.0, 1.0, 0.0], [1.0, 2.0, 1.0], [0.0, 1.0, 0.0]], pixel_scales=1.0
)
blurred_image = kernel.convolved_array_from_array(image)
assert (
blurred_image.native
== np.array(
[[0.0, 1.0, 0.0], [1.0, 2.0, 1.0], [0.0, 1.0, 0.0], [0.0, 0.0, 0.0]]
)
).all()
def test__image_is_3x4_central_value_of_one__kernel_is_cross__blurred_image_becomes_cross(
self,
):
image = aa.Array2D.manual_native(
array=[[0.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]],
pixel_scales=1.0,
)
kernel = aa.Kernel2D.manual_native(
array=[[0.0, 1.0, 0.0], [1.0, 2.0, 1.0], [0.0, 1.0, 0.0]], pixel_scales=1.0
)
blurred_image = kernel.convolved_array_from_array(image)
assert (
blurred_image.native
== np.array(
[[0.0, 1.0, 0.0, 0.0], [1.0, 2.0, 1.0, 0.0], [0.0, 1.0, 0.0, 0.0]]
)
).all()
def test__image_is_4x4_has_two_central_values__kernel_is_asymmetric__blurred_image_follows_convolution(
self,
):
image = aa.Array2D.manual_native(
array=[
[0.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 0.0],
],
pixel_scales=1.0,
)
kernel = aa.Kernel2D.manual_native(
array=[[1.0, 1.0, 1.0], [2.0, 2.0, 1.0], [1.0, 3.0, 3.0]], pixel_scales=1.0
)
blurred_image = kernel.convolved_array_from_array(image)
assert (
blurred_image.native
== np.array(
[
[1.0, 1.0, 1.0, 0.0],
[2.0, 3.0, 2.0, 1.0],
[1.0, 5.0, 5.0, 1.0],
[0.0, 1.0, 3.0, 3.0],
]
)
).all()
def test__image_is_4x4_values_are_on_edge__kernel_is_asymmetric__blurring_does_not_account_for_edge_effects(
self,
):
image = aa.Array2D.manual_native(
[
[0.0, 0.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0],
[0.0, 0.0, 0.0, 0.0],
],
pixel_scales=1.0,
)
kernel = aa.Kernel2D.manual_native(
array=[[1.0, 1.0, 1.0], [2.0, 2.0, 1.0], [1.0, 3.0, 3.0]], pixel_scales=1.0
)
blurred_image = kernel.convolved_array_from_array(image)
assert (
blurred_image.native
== np.array(
[
[1.0, 1.0, 0.0, 0.0],
[2.0, 1.0, 1.0, 1.0],
[3.0, 3.0, 2.0, 2.0],
[0.0, 0.0, 1.0, 3.0],
]
)
).all()
def test__image_is_4x4_values_are_on_corner__kernel_is_asymmetric__blurring_does_not_account_for_edge_effects(
self,
):
image = aa.Array2D.manual_native(
array=[
[1.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0],
],
pixel_scales=1.0,
)
kernel = aa.Kernel2D.manual_native(
array=[[1.0, 1.0, 1.0], [2.0, 2.0, 1.0], [1.0, 3.0, 3.0]], pixel_scales=1.0
)
blurred_image = kernel.convolved_array_from_array(image)
assert (
blurred_image.native
== np.array(
[
[2.0, 1.0, 0.0, 0.0],
[3.0, 3.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 2.0, 2.0],
]
)
).all()
class TestFromGaussian:
def test__identical_to_gaussian_light_profile(self):
kernel = aa.Kernel2D.from_gaussian(
shape_native=(3, 3),
pixel_scales=1.0,
centre=(0.1, 0.1),
axis_ratio=0.9,
angle=45.0,
sigma=1.0,
normalize=True,
)
assert kernel.native == pytest.approx(
np.array(
[
[0.06281, 0.13647, 0.0970],
[0.11173, 0.21589, 0.136477],
[0.065026, 0.11173, 0.06281],
]
),
1.0e-3,
)
class TestFromAlmaGaussian:
def test__identical_to_astropy_gaussian_model__circular_no_rotation(self):
pixel_scales = 0.1
x_stddev = (
2.0e-5
* (units.deg).to(units.arcsec)
/ pixel_scales
/ (2.0 * np.sqrt(2.0 * np.log(2.0)))
)
y_stddev = (
2.0e-5
* (units.deg).to(units.arcsec)
/ pixel_scales
/ (2.0 * np.sqrt(2.0 * np.log(2.0)))
)
gaussian_astropy = functional_models.Gaussian2D(
amplitude=1.0,
x_mean=2.0,
y_mean=2.0,
x_stddev=x_stddev,
y_stddev=y_stddev,
theta=0.0,
)
shape = (5, 5)
y, x = np.mgrid[0 : shape[1], 0 : shape[0]]
kernel_astropy = gaussian_astropy(x, y)
kernel_astropy /= np.sum(kernel_astropy)
kernel = aa.Kernel2D.from_as_gaussian_via_alma_fits_header_parameters(
shape_native=shape,
pixel_scales=pixel_scales,
y_stddev=2.0e-5,
x_stddev=2.0e-5,
theta=0.0,
normalize=True,
)
assert kernel_astropy == pytest.approx(kernel.native, 1e-4)
def test__identical_to_astropy_gaussian_model__circular_no_rotation_different_pixel_scale(
self,
):
pixel_scales = 0.02
x_stddev = (
2.0e-5
* (units.deg).to(units.arcsec)
/ pixel_scales
/ (2.0 * np.sqrt(2.0 * np.log(2.0)))
)
y_stddev = (
2.0e-5
* (units.deg).to(units.arcsec)
/ pixel_scales
/ (2.0 * np.sqrt(2.0 * np.log(2.0)))
)
gaussian_astropy = functional_models.Gaussian2D(
amplitude=1.0,
x_mean=2.0,
y_mean=2.0,
x_stddev=x_stddev,
y_stddev=y_stddev,
theta=0.0,
)
shape = (5, 5)
y, x = np.mgrid[0 : shape[1], 0 : shape[0]]
kernel_astropy = gaussian_astropy(x, y)
kernel_astropy /= np.sum(kernel_astropy)
kernel = aa.Kernel2D.from_as_gaussian_via_alma_fits_header_parameters(
shape_native=shape,
pixel_scales=pixel_scales,
y_stddev=2.0e-5,
x_stddev=2.0e-5,
theta=0.0,
normalize=True,
)
assert kernel_astropy == pytest.approx(kernel.native, 1e-4)
def test__identical_to_astropy_gaussian_model__include_ellipticity_from_x_and_y_stddev(
self,
):
pixel_scales = 0.1
x_stddev = (
1.0e-5
* (units.deg).to(units.arcsec)
/ pixel_scales
/ (2.0 * np.sqrt(2.0 * np.log(2.0)))
)
y_stddev = (
2.0e-5
* (units.deg).to(units.arcsec)
/ pixel_scales
/ (2.0 * np.sqrt(2.0 * np.log(2.0)))
)
theta_deg = 0.0
theta = Angle(theta_deg, "deg").radian
gaussian_astropy = functional_models.Gaussian2D(
amplitude=1.0,
x_mean=2.0,
y_mean=2.0,
x_stddev=x_stddev,
y_stddev=y_stddev,
theta=theta,
)
shape = (5, 5)
y, x = np.mgrid[0 : shape[1], 0 : shape[0]]
kernel_astropy = gaussian_astropy(x, y)
kernel_astropy /= np.sum(kernel_astropy)
kernel = aa.Kernel2D.from_as_gaussian_via_alma_fits_header_parameters(
shape_native=shape,
pixel_scales=pixel_scales,
y_stddev=2.0e-5,
x_stddev=1.0e-5,
theta=theta_deg,
normalize=True,
)
assert kernel_astropy == pytest.approx(kernel.native, 1e-4)
def test__identical_to_astropy_gaussian_model__include_different_ellipticity_from_x_and_y_stddev(
self,
):
pixel_scales = 0.1
x_stddev = (
3.0e-5
* (units.deg).to(units.arcsec)
/ pixel_scales
/ (2.0 * np.sqrt(2.0 * np.log(2.0)))
)
y_stddev = (
2.0e-5
* (units.deg).to(units.arcsec)
/ pixel_scales
/ (2.0 * np.sqrt(2.0 * np.log(2.0)))
)
theta_deg = 0.0
theta = Angle(theta_deg, "deg").radian
gaussian_astropy = functional_models.Gaussian2D(
amplitude=1.0,
x_mean=2.0,
y_mean=2.0,
x_stddev=x_stddev,
y_stddev=y_stddev,
theta=theta,
)
shape = (5, 5)
y, x = np.mgrid[0 : shape[1], 0 : shape[0]]
kernel_astropy = gaussian_astropy(x, y)
kernel_astropy /= np.sum(kernel_astropy)
kernel = aa.Kernel2D.from_as_gaussian_via_alma_fits_header_parameters(
shape_native=shape,
pixel_scales=pixel_scales,
y_stddev=2.0e-5,
x_stddev=3.0e-5,
theta=theta_deg,
normalize=True,
)
assert kernel_astropy == pytest.approx(kernel.native, 1e-4)
def test__identical_to_astropy_gaussian_model__include_rotation_angle_30(self):
pixel_scales = 0.1
x_stddev = (
1.0e-5
* (units.deg).to(units.arcsec)
/ pixel_scales
/ (2.0 * np.sqrt(2.0 * np.log(2.0)))
)
y_stddev = (
2.0e-5
* (units.deg).to(units.arcsec)
/ pixel_scales
/ (2.0 * np.sqrt(2.0 * np.log(2.0)))
)
theta_deg = 30.0
theta = Angle(theta_deg, "deg").radian
gaussian_astropy = functional_models.Gaussian2D(
amplitude=1.0,
x_mean=1.0,
y_mean=1.0,
x_stddev=x_stddev,
y_stddev=y_stddev,
theta=theta,
)
shape = (3, 3)
y, x = np.mgrid[0 : shape[1], 0 : shape[0]]
kernel_astropy = gaussian_astropy(x, y)
kernel_astropy /= np.sum(kernel_astropy)
kernel = aa.Kernel2D.from_as_gaussian_via_alma_fits_header_parameters(
shape_native=shape,
pixel_scales=pixel_scales,
y_stddev=2.0e-5,
x_stddev=1.0e-5,
theta=theta_deg,
normalize=True,
)
assert kernel_astropy == pytest.approx(kernel.native, 1e-4)
def test__identical_to_astropy_gaussian_model__include_rotation_angle_230(self):
pixel_scales = 0.1
x_stddev = (
1.0e-5
* (units.deg).to(units.arcsec)
/ pixel_scales
/ (2.0 * np.sqrt(2.0 * np.log(2.0)))
)
y_stddev = (
2.0e-5
* (units.deg).to(units.arcsec)
/ pixel_scales
/ (2.0 * np.sqrt(2.0 * np.log(2.0)))
)
theta_deg = 230.0
theta = Angle(theta_deg, "deg").radian
gaussian_astropy = functional_models.Gaussian2D(
amplitude=1.0,
x_mean=1.0,
y_mean=1.0,
x_stddev=x_stddev,
y_stddev=y_stddev,
theta=theta,
)
shape = (3, 3)
y, x = np.mgrid[0 : shape[1], 0 : shape[0]]
kernel_astropy = gaussian_astropy(x, y)
kernel_astropy /= np.sum(kernel_astropy)
kernel = aa.Kernel2D.from_as_gaussian_via_alma_fits_header_parameters(
shape_native=shape,
pixel_scales=pixel_scales,
y_stddev=2.0e-5,
x_stddev=1.0e-5,
theta=theta_deg,
normalize=True,
)
assert kernel_astropy == pytest.approx(kernel.native, 1e-4)
``` |
{
"source": "jonathanfrawley/PyAutoBuild",
"score": 2
} |
#### File: jonathanfrawley/PyAutoBuild/build_util.py
```python
import glob
import datetime
import os
import re
import subprocess
TIMEOUT_SECS = 60
BUILD_PATH = os.getcwd()
def py_to_notebook(filename):
if filename == "temp.py":
return
# print(f'py_to_notebook: {filename}')
subprocess.run(
[
"python3",
f"{BUILD_PATH}/add_notebook_quotes/add_notebook_quotes.py",
filename,
"temp.py",
],
check=True,
)
subprocess.run(
["ipynb-py-convert", "temp.py", f'{filename.split(".py")[0]}.ipynb'], check=True
)
os.remove("temp.py")
def uncomment_jupyter_magic(f):
with open(f, "r") as sources:
lines = sources.readlines()
with open(f, "w") as sources:
for line in lines:
line = re.sub(r"# %matplotlib", "%matplotlib", line)
line = re.sub(r"# from pyproj", "from pyproj", line)
line = re.sub(r"# workspace_path", "workspace_path", line)
line = re.sub(r"# %cd", "%cd", line)
line = re.sub(r"# print\(f", "print(f", line)
sources.write(line)
def exexcute_notebooks_in_folder(ROOT_PATH, NOTEBOOKS_NO_RUN=None):
NOTEBOOKS_NO_RUN = NOTEBOOKS_NO_RUN or []
os.chdir(ROOT_PATH)
for x in [t[0] for t in os.walk(".")]:
notebooks_path = f"{ROOT_PATH}/{x}"
os.chdir(notebooks_path)
for f in glob.glob(f"*.ipynb"):
run_notebook = True
for no_run in NOTEBOOKS_NO_RUN:
if no_run in f:
run_notebook = False
if run_notebook:
execute_notebook(f)
def execute_notebook(f):
print(f"Running <{f}> at {datetime.datetime.now().isoformat()}")
try:
subprocess.run(
["jupyter", "nbconvert", "--to", "notebook", "--execute", "--output", f, f],
check=True,
timeout=TIMEOUT_SECS,
)
except subprocess.TimeoutExpired:
pass
# subprocess.run(['jupyter', 'nbconvert', '--to', 'notebook', '--execute', f'{f}'], check=True)
def execute_script(f):
args = ['python3', f]
print(f'Running <{args}>')
subprocess.run(args, check=True)
try:
subprocess.run(
args,
check=True,
timeout=TIMEOUT_SECS,
)
except subprocess.TimeoutExpired:
pass
# subprocess.run(['jupyter', 'nbconvert', '--to', 'notebook', '--execute', f'{f}'], check=True)
def execute_scripts_in_folder(workspace_path, folder, root_path, scripts_no_run=None):
scripts_no_run = scripts_no_run or []
os.chdir(root_path)
for script_dir in [t[0] for t in os.walk(".")]:
scripts_path = f"{root_path}/{script_dir}"
os.chdir(scripts_path)
files = glob.glob(f"*.py")
os.chdir(workspace_path)
for f in files:
run_script = True
for no_run in scripts_no_run:
if no_run in f:
run_script = False
if run_script:
execute_script(os.path.join('scripts', folder, script_dir, f))
``` |
{
"source": "jonathanfrawley/PyAutoConf",
"score": 2
} |
#### File: PyAutoConf/test_autoconf/conftest.py
```python
import pathlib
import pytest
from autoconf import conf
@pytest.fixture(
name="files_directory"
)
def make_files_directory():
return pathlib.Path(
__file__
).parent / "files"
@pytest.fixture(
name="config"
)
def make_config(files_directory):
return conf.Config(
files_directory / "config",
files_directory / "default",
)
```
#### File: test_autoconf/json_prior/test_json_config.py
```python
import pytest
import autoconf as aconf
from autoconf.mock.mock_real import SphProfile
@pytest.fixture(name="geometry_profile_path")
def make_geometry_profile_path():
return ["autoconf", "mock", "mock_real", "SphProfile"]
def test_path_for_class(geometry_profile_path):
assert aconf.path_for_class(SphProfile) == geometry_profile_path
@pytest.mark.parametrize(
"config_dict, paths",
[
(
{
"autoconf.mock.mock_real.SphProfile": "test",
"autoconf.mock.mock_real.Other": "toast",
},
["autoconf.mock.mock_real.SphProfile", "autoconf.mock.mock_real.Other"],
),
(
{"autoconf.mock.mock_real": {"SphProfile": "test", "Other": "toast"}},
[
"autoconf.mock.mock_real",
"autoconf.mock.mock_real.SphProfile",
"autoconf.mock.mock_real.Other",
],
),
(
{"autoconf": {"mock": {"mock_real": {"SphProfile": "test", "Other": "toast"}}}},
[
"autoconf",
"autoconf.mock",
"autoconf.mock.mock_real",
"autoconf.mock.mock_real.SphProfile",
"autoconf.mock.mock_real.Other",
],
),
(
{"autoconf": {"mock": {"mock_real.SphProfile": "test", "mock_real.Other": "toast"}}},
[
"autoconf",
"autoconf.mock",
"autoconf.mock.mock_real.SphProfile",
"autoconf.mock.mock_real.Other",
],
),
({"SphProfile": "test", "Other": "toast"}, ["SphProfile", "Other"]),
(
{"mock_real.SphProfile": "test", "mock_real.Other": "toast"},
["mock_real.SphProfile", "mock_real.Other"],
),
(
{"mock_real": {"SphProfile": "test", "Other": "toast"}},
["mock_real", "mock_real.SphProfile", "mock_real.Other"],
),
],
)
def test_paths(config_dict, paths):
config = aconf.JSONPriorConfig(config_dict)
assert config.paths == paths
@pytest.mark.parametrize(
"config_dict",
[
{
"autoconf.mock.mock_real.SphProfile": "test",
"autoconf.mock.mock_real.Other": "toast",
},
{"autoconf.mock.mock_real": {"SphProfile": "test", "Other": "toast"}},
{"autoconf":{"mock": {"mock_real": {"SphProfile": "test", "Other": "toast"}}}},
{"autoconf":{"mock": {"mock_real.SphProfile": "test", "mock_real.Other": "toast"}}},
{"SphProfile": "test", "Other": "toast"},
{"mock_real": {"SphProfile": "test", "Other": "toast"}},
],
)
def test_config_for_path(geometry_profile_path, config_dict):
config = aconf.JSONPriorConfig(config_dict)
assert config(geometry_profile_path) == "test"
assert config(["autoconf", "mock", "mock_real", "Other"]) == "toast"
def test_path_double():
config = aconf.JSONPriorConfig({"mock_real": {"SphProfile": "test"}})
assert config(["something", "mock_real", "mock_real", "SphProfile"]) == "test"
``` |
{
"source": "jonathanfrawley/PyAutoFit",
"score": 3
} |
#### File: database/aggregator/aggregator.py
```python
from typing import Optional, List, Union
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker, Session
from autofit.database import query as q
from .scrape import scrape_directory
from .. import model as m
from ..query.query import AbstractQuery, Attribute
class NullPredicate(AbstractQuery):
@property
def fit_query(self) -> str:
return "SELECT id FROM fit"
def __and__(self, other):
return other
class Query:
"""
API for creating a query on the best fit instance
"""
@staticmethod
def for_name(name: str) -> q.Q:
"""
Create a query for fits based on the name of a
top level instance attribute
Parameters
----------
name
The name of the attribute. e.g. galaxies
Returns
-------
A query generating object
"""
return q.Q(name)
def __getattr__(self, name):
return self.for_name(name)
class FitQuery(Query):
"""
API for creating a query on the attributes of a fit,
such as:
name
unique_tag
path_prefix
is_complete
is_grid_search
"""
@staticmethod
def for_name(name: str) -> Union[
AbstractQuery,
Attribute
]:
"""
Create a query based on some attribute of the Fit.
Parameters
----------
name
The name of an attribute of the Fit class
Returns
-------
A query based on an attribute
Examples
--------
aggregator.fit.name == 'example name'
"""
if name not in m.fit_attributes:
raise AttributeError(
f"Fit has no attribute {name}"
)
if m.fit_attributes[
name
].type.python_type == bool:
return q.BA(name)
return q.A(name)
class Aggregator:
def __init__(
self,
session: Session,
filename: Optional[str] = None,
predicate: AbstractQuery = NullPredicate(),
offset=0,
limit=None
):
"""
Query results from an intermediary SQLite database.
Results can be scraped from a directory structure and stored in the database.
Parameters
----------
session
A session for communicating with the database.
filename
"""
self.session = session
self.filename = filename
self._fits = None
self._predicate = predicate
self._offset = offset
self._limit = limit
def __iter__(self):
return iter(
self.fits
)
@property
def search(self) -> FitQuery:
"""
An object facilitating queries on fit attributes such as:
name
unique_tag
path_prefix
is_complete
is_grid_search
"""
return FitQuery()
@property
def info(self):
"""
Query info associated with the fit in the info dictionary
"""
return q.AnonymousInfo()
def values(self, name: str) -> list:
"""
Retrieve the value associated with each fit with the given
parameter name
Parameters
----------
name
The name of some pickle, such as 'samples'
Returns
-------
A list of objects, one for each fit
"""
return [
fit[name]
for fit
in self
]
def __len__(self):
return len(self.fits)
def __eq__(self, other):
if isinstance(other, list):
return self.fits == other
return super().__eq__(other)
@property
def fits(self) -> List[m.Fit]:
"""
Lazily query the database for a list of Fit objects that
match the aggregator's predicate.
"""
if self._fits is None:
self._fits = self._fits_for_query(
self._predicate.fit_query
)
return self._fits
def map(self, func):
for fit in self.fits:
yield func(fit)
def __repr__(self):
return f"<{self.__class__.__name__} {self.filename} {len(self)}>"
def __getattr__(self, name: str) -> Union[AbstractQuery, q.A]:
"""
Facilitates query construction. If the Fit class has an
attribute with the given name then a predicate is generated
based on that attribute. Otherwise the query is assumed to
apply to the best fit instance.
Parameters
----------
name
The name of an attribute of the Fit class or the model
Returns
-------
A query
"""
return Query.for_name(name)
def __call__(self, predicate) -> "Aggregator":
"""
Concise query syntax
"""
return self.query(predicate)
def query(self, predicate: AbstractQuery) -> "Aggregator":
# noinspection PyUnresolvedReferences
"""
Apply a query on the model.
Parameters
----------
predicate
A predicate constructed to express which models should be included.
Returns
-------
A list of objects that match the predicate
Examples
--------
>>>
>>> aggregator = Aggregator.from_database(
>>> "my_database.sqlite"
>>> )
>>>
>>> lens = aggregator.galaxies.lens
>>>
>>> aggregator.filter((lens.bulge == EllSersicCore) & (lens.disk == EllSersic))
>>> aggregator.filter((lens.bulge == EllSersicCore) | (lens.disk == EllSersic))
"""
return self._new_with(
predicate=self._predicate & predicate
)
def _new_with(
self,
**kwargs
):
kwargs = {
"session": self.session,
"filename": self.filename,
"predicate": self._predicate,
**kwargs
}
return Aggregator(
**kwargs
)
def children(self) -> "Aggregator":
"""
An aggregator comprising the children of the fits encapsulated
by this aggregator. This is used to query children in a grid search.
"""
return Aggregator(
session=self.session,
filename=self.filename,
predicate=q.ChildQuery(
self._predicate
)
)
def __getitem__(self, item):
offset = self._offset
limit = self._limit
if isinstance(
item, int
):
return self.fits[item]
elif isinstance(
item, slice
):
if item.start is not None:
if item.start >= 0:
offset += item.start
else:
offset = len(self) + item.start
if item.stop is not None:
if item.stop >= 0:
limit = len(self) - item.stop - offset
else:
limit = len(self) + item.stop
return self._new_with(
offset=offset,
limit=limit
)
def _fits_for_query(
self,
query: str
) -> List[m.Fit]:
"""
Execute a raw SQL query and return a Fit object
for each Fit id returned by the query
Parameters
----------
query
A SQL query that selects ids from the fit table
Returns
-------
A list of fit objects, one for each id returned by the
query
"""
fit_ids = {
row[0]
for row
in self.session.execute(
query
)
}
return self.session.query(
m.Fit
).filter(
m.Fit.id.in_(
fit_ids
)
).offset(
self._offset
).limit(
self._limit
).all()
def add_directory(
self,
directory: str,
auto_commit=True
):
"""
Recursively search a directory for autofit results
and add them to this database.
Any pickles found in the pickles file are implicitly added
to the fit object.
Warnings
--------
If a directory is added twice then that will result in
duplicate entries in the database.
Parameters
----------
auto_commit
If True the session is committed writing the new objects
to the database
directory
A directory containing autofit results embedded in a
file structure
"""
for fit in scrape_directory(
directory
):
self.session.add(
fit
)
if auto_commit:
self.session.commit()
@classmethod
def from_database(
cls,
filename: str,
completed_only: bool = False
) -> "Aggregator":
"""
Create an instance from a sqlite database file.
If no file exists then one is created with the schema of the database.
Parameters
----------
completed_only
filename
The name of the database file.
Returns
-------
An aggregator connected to the database specified by the file.
"""
engine = create_engine(
f'sqlite:///{filename}'
)
session = sessionmaker(
bind=engine
)()
m.Base.metadata.create_all(
engine
)
aggregator = Aggregator(
session,
filename
)
if completed_only:
return aggregator(
aggregator.search.is_complete
)
return aggregator
```
#### File: database/model/prior.py
```python
from typing import Union
from sqlalchemy import Column, Integer, ForeignKey
from autofit import mapper as af
from .model import Object
class CollectionPriorModel(Object):
"""
A collection
"""
__tablename__ = "collection_prior_model"
id = Column(
Integer,
ForeignKey(
"object.id"
),
primary_key=True,
)
__mapper_args__ = {
'polymorphic_identity': 'collection_prior_model'
}
@classmethod
def _from_object(
cls,
source: Union[
af.CollectionPriorModel,
list,
dict
]
):
instance = cls()
if not isinstance(
source,
af.CollectionPriorModel
):
source = af.CollectionPriorModel(
source
)
instance._add_children(
source.items()
)
instance.cls = af.CollectionPriorModel
return instance
class PriorModel(Object):
"""
A prior model
"""
__tablename__ = "prior_model"
id = Column(
Integer,
ForeignKey(
"object.id"
),
primary_key=True,
)
__mapper_args__ = {
'polymorphic_identity': 'prior_model'
}
@classmethod
def _from_object(
cls,
model: af.PriorModel,
):
instance = cls()
instance.cls = model.cls
instance._add_children(model.items())
return instance
def _make_instance(self):
instance = object.__new__(af.PriorModel)
instance.cls = self.cls
instance._assertions = []
return instance
class Prior(Object):
"""
A prior
"""
__tablename__ = "prior"
id = Column(
Integer,
ForeignKey(
"object.id"
),
primary_key=True,
)
__mapper_args__ = {
'polymorphic_identity': 'prior'
}
@classmethod
def _from_object(
cls,
model: af.Prior
):
instance = cls()
instance.cls = type(model)
instance._add_children(model.__dict__.items())
return instance
```
#### File: autofit/graphical/declarative.py
```python
from abc import ABC, abstractmethod
from typing import Callable, cast, Set, List, Dict, Optional
import numpy as np
from autofit import ModelInstance, Analysis, DirectoryPaths
from autofit.graphical.expectation_propagation import AbstractFactorOptimiser
from autofit.graphical.expectation_propagation import EPMeanField
from autofit.graphical.expectation_propagation import EPOptimiser
from autofit.graphical.factor_graphs.factor import Factor
from autofit.graphical.factor_graphs.graph import FactorGraph
from autofit.graphical.messages import NormalMessage
from autofit.mapper.prior.prior import Prior
from autofit.mapper.prior_model.collection import CollectionPriorModel
from autofit.mapper.prior_model.prior_model import PriorModel, AbstractPriorModel
class AbstractModelFactor(Analysis, ABC):
@property
@abstractmethod
def model_factors(self) -> List["ModelFactor"]:
"""
A list of factors that comprise a PriorModel and corresponding fitness function
"""
def freeze(self):
for model_factor in self.model_factors:
model_factor.freeze()
@property
def priors(self) -> Set[Prior]:
"""
A set of all priors encompassed by the contained likelihood models
"""
return {
prior
for model
in self.model_factors
for prior
in model.prior_model.priors
}
@property
def prior_factors(self) -> List[Factor]:
"""
A list of factors that act as priors on latent variables. One factor exists
for each unique prior.
"""
return [
Factor(
cast(
Callable,
prior
),
x=prior
)
for prior
in self.priors
]
@property
def message_dict(self) -> Dict[Prior, NormalMessage]:
"""
Dictionary mapping priors to messages.
TODO: should support more than just GaussianPriors/NormalMessages
"""
return {
prior: NormalMessage.from_prior(
prior
)
for prior
in self.priors
}
@property
def graph(self) -> FactorGraph:
"""
The complete graph made by combining all factors and priors
"""
return cast(
FactorGraph,
np.prod(
[
model
for model
in self.model_factors
] + self.prior_factors
)
)
def mean_field_approximation(self) -> EPMeanField:
"""
Returns a EPMeanField of the factor graph
"""
return EPMeanField.from_approx_dists(
self.graph,
self.message_dict
)
def _make_ep_optimiser(
self,
optimiser: AbstractFactorOptimiser
) -> EPOptimiser:
return EPOptimiser(
self.graph,
default_optimiser=optimiser,
factor_optimisers={
factor: factor.optimiser
for factor in self.model_factors
if factor.optimiser is not None
}
)
def optimise(
self,
optimiser:
AbstractFactorOptimiser
) -> CollectionPriorModel:
"""
Use an EP Optimiser to optimise the graph associated with this collection
of factors and create a Collection to represent the results.
Parameters
----------
optimiser
An optimiser that acts on graphs
Returns
-------
A collection of prior models
"""
self.freeze()
opt = self._make_ep_optimiser(
optimiser
)
updated_model = opt.run(
self.mean_field_approximation()
)
collection = CollectionPriorModel([
factor.prior_model
for factor
in self.model_factors
])
arguments = {
prior: updated_model.mean_field[
prior
].as_prior()
for prior
in collection.priors
}
return collection.gaussian_prior_model_for_arguments(
arguments
)
def visualize(
self,
paths: DirectoryPaths,
instance: ModelInstance,
during_analysis: bool
):
"""
Visualise the instances provided using each factor.
Instances in the ModelInstance must have the same order as the factors.
Parameters
----------
paths
Object describing where data should be saved to
instance
A collection of instances, each corresponding to a factor
during_analysis
Is this visualisation during analysis?
"""
for model_factor, instance in zip(
self.model_factors,
instance
):
model_factor.visualize(
paths,
instance,
during_analysis
)
def log_likelihood_function(
self,
instance: ModelInstance
) -> float:
"""
Compute the combined likelihood of each factor from a collection of instances
with the same ordering as the factors.
Parameters
----------
instance
A collection of instances, one corresponding to each factor
Returns
-------
The combined likelihood of all factors
"""
likelihood = abs(
self.model_factors[0].analysis.log_likelihood_function(
instance[0]
)
)
for model_factor, instance_ in zip(
self.model_factors[1:],
instance[1:]
):
likelihood *= abs(
model_factor.analysis.log_likelihood_function(
instance_
)
)
return -likelihood
@property
def global_prior_model(self) -> CollectionPriorModel:
"""
A collection of prior models, with one model for each factor.
"""
return CollectionPriorModel([
model_factor.prior_model
for model_factor
in self.model_factors
])
class ModelFactor(Factor, AbstractModelFactor):
def __init__(
self,
prior_model: AbstractPriorModel,
analysis: Analysis,
optimiser: Optional[AbstractFactorOptimiser] = None
):
"""
A factor in the graph that actually computes the likelihood of a model
given values for each variable that model contains
Parameters
----------
prior_model
A model with some dimensionality
analysis
A class that implements a function which evaluates how well an
instance of the model fits some data
optimiser
A custom optimiser that will be used to fit this factor specifically
instead of the default optimiser
"""
self.prior_model = prior_model
self.analysis = analysis
self.optimiser = optimiser
prior_variable_dict = {
prior.name: prior
for prior
in prior_model.priors
}
def _factor(
**kwargs: np.ndarray
) -> float:
"""
Returns an instance of the prior model and evaluates it, forming
a factor.
Parameters
----------
kwargs
Arguments with names that are unique for each prior.
Returns
-------
Calculated likelihood
"""
arguments = dict()
for name, array in kwargs.items():
prior_id = int(name.split("_")[1])
prior = prior_model.prior_with_id(
prior_id
)
arguments[prior] = array
instance = prior_model.instance_for_arguments(
arguments
)
return analysis.log_likelihood_function(
instance
)
super().__init__(
_factor,
**prior_variable_dict
)
def freeze(self):
self.prior_model.freeze()
@property
def model_factors(self) -> List["ModelFactor"]:
return [self]
def optimise(self, optimiser) -> PriorModel:
"""
Optimise this factor on its own returning a PriorModel
representing the final state of the messages.
Parameters
----------
optimiser
Returns
-------
A PriorModel representing the optimised factor
"""
return super().optimise(
optimiser
)[0]
class FactorGraphModel(AbstractModelFactor):
def __init__(self, *model_factors: ModelFactor):
"""
A collection of factors that describe models, which can be
used to create a graph and messages.
If the models have shared priors then the graph has shared variables
Parameters
----------
model_factors
"""
self._model_factors = model_factors
@property
def model_factors(self):
return self._model_factors
```
#### File: mapper/prior/prior.py
```python
import inspect
import math
import sys
from abc import ABC, abstractmethod
from typing import Union, Tuple
import numpy as np
from scipy import stats
from scipy.special import erfcinv
from autoconf import conf
from autofit import exc
from autofit.mapper.model_object import ModelObject
from autofit.mapper.prior.arithmetic import ArithmeticMixin
from autofit.mapper.prior.deferred import DeferredArgument
from autofit.mapper.prior_model.attribute_pair import (
cast_collection,
PriorNameValue,
InstanceNameValue,
)
from autofit.mapper.variable import Variable
class WidthModifier:
def __init__(self, value):
self.value = float(value)
@classmethod
def name_of_class(cls) -> str:
"""
A string name for the class, with the prior suffix removed.
"""
return cls.__name__.replace("WidthModifier", "")
@classmethod
def from_dict(cls, width_modifier_dict):
return width_modifier_type_dict[width_modifier_dict["type"]](
value=width_modifier_dict["value"]
)
@property
def dict(self):
return {"type": self.name_of_class(), "value": self.value}
@staticmethod
def for_class_and_attribute_name(cls, attribute_name):
prior_dict = conf.instance.prior_config.for_class_and_suffix_path(
cls, [attribute_name, "width_modifier"]
)
return WidthModifier.from_dict(prior_dict)
def __eq__(self, other):
return self.__class__ is other.__class__ and self.value == other.value
class Limits:
@staticmethod
def for_class_and_attributes_name(cls, attribute_name):
limit_dict = conf.instance.prior_config.for_class_and_suffix_path(
cls, [attribute_name, "gaussian_limits"]
)
return limit_dict["lower"], limit_dict["upper"]
class RelativeWidthModifier(WidthModifier):
def __call__(self, mean):
return self.value * mean
class AbsoluteWidthModifier(WidthModifier):
def __call__(self, _):
return self.value
class TuplePrior(ModelObject):
"""
A prior comprising one or more priors in a tuple
"""
@property
@cast_collection(PriorNameValue)
def prior_tuples(self):
"""
Returns
-------
priors: [(String, Prior)]
A list of priors contained in this tuple
"""
return list(filter(lambda t: isinstance(t[1], Prior), self.__dict__.items()))
@property
def unique_prior_tuples(self):
return self.prior_tuples
@property
@cast_collection(InstanceNameValue)
def instance_tuples(self):
"""
Returns
-------
instances: [(String, instance)]
A list of instances
"""
return list(
sorted(
filter(lambda t: isinstance(t[1], float), self.__dict__.items()),
key=lambda tup: tup[0],
)
)
def value_for_arguments(self, arguments):
"""
Parameters
----------
arguments: {Prior: float}
A dictionary of arguments
Returns
-------
tuple: (float,...)
A tuple of float values
"""
def convert(tup):
if hasattr(tup, "prior"):
return arguments[tup.prior]
return tup.instance
return tuple(
map(
convert,
sorted(
self.prior_tuples + self.instance_tuples, key=lambda tup: tup.name
),
)
)
def gaussian_tuple_prior_for_arguments(self, arguments):
"""
Parameters
----------
arguments: {Prior: float}
A dictionary of arguments
Returns
-------
tuple_prior: TuplePrior
A new tuple prior with gaussian priors
"""
tuple_prior = TuplePrior()
for prior_tuple in self.prior_tuples:
setattr(tuple_prior, prior_tuple.name, arguments[prior_tuple.prior])
return tuple_prior
def __getitem__(self, item):
return self.prior_tuples[item][1]
class Prior(Variable, ABC, ArithmeticMixin):
def __init__(self, lower_limit=0.0, upper_limit=1.0):
"""
An object used to mappers a unit value to an attribute value for a specific
class attribute.
Parameters
----------
lower_limit: Float
The lowest value this prior can return
upper_limit: Float
The highest value this prior can return
"""
super().__init__()
self.lower_limit = float(lower_limit)
self.upper_limit = float(upper_limit)
if self.lower_limit >= self.upper_limit:
raise exc.PriorException(
"The upper limit of a prior must be greater than its lower limit"
)
def assert_within_limits(self, value):
if not (self.lower_limit <= value <= self.upper_limit):
raise exc.PriorLimitException(
"The physical value {} for a prior "
"was not within its limits {}, {}".format(
value, self.lower_limit, self.upper_limit
)
)
@staticmethod
def for_class_and_attribute_name(cls, attribute_name):
prior_dict = conf.instance.prior_config.for_class_and_suffix_path(
cls, [attribute_name]
)
return Prior.from_dict(prior_dict)
@property
def width(self):
return self.upper_limit - self.lower_limit
@abstractmethod
def value_for(self, unit: float) -> float:
"""
Return a physical value for a value between 0 and 1 with the transformation
described by this prior.
Parameters
----------
unit
A hypercube value between 0 and 1.
Returns
-------
A physical value.
"""
def instance_for_arguments(self, arguments):
return arguments[self]
def __eq__(self, other):
try:
return self.id == other.id
except AttributeError:
return False
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self.id)
def __repr__(self):
return "<{} id={} lower_limit={} upper_limit={}>".format(
self.__class__.__name__, self.id, self.lower_limit, self.upper_limit
)
@classmethod
def from_dict(cls, prior_dict: dict) -> Union["Prior", DeferredArgument]:
"""
Returns a prior from a JSON representation.
Parameters
----------
prior_dict : dict
A dictionary representation of a prior including a type (e.g. Uniform) and all constructor arguments.
Returns
-------
An instance of a child of this class.
"""
if prior_dict["type"] == "Constant":
return prior_dict["value"]
if prior_dict["type"] == "Deferred":
return DeferredArgument()
# noinspection PyProtectedMember
return prior_type_dict[prior_dict["type"]](
**{
key: value
for key, value in prior_dict.items()
if key not in ("type", "width_modifier", "gaussian_limits")
}
)
@property
def dict(self) -> dict:
"""
A dictionary representation of this prior
"""
prior_dict = {
"lower_limit": self.lower_limit,
"upper_limit": self.upper_limit,
"type": self.name_of_class(),
}
return prior_dict
@classmethod
def name_of_class(cls) -> str:
"""
A string name for the class, with the prior suffix removed.
"""
return cls.__name__.replace("Prior", "")
@property
def limits(self) -> Tuple[float, float]:
return self.lower_limit, self.upper_limit
class GaussianPrior(Prior):
"""A prior with a gaussian distribution"""
__name__ = "gaussian_prior"
def __init__(self, mean, sigma, lower_limit=-math.inf, upper_limit=math.inf):
super().__init__(lower_limit, upper_limit)
self.mean = float(mean)
self.sigma = float(sigma)
self._log_pdf = None
@property
def logpdf(self):
if self._log_pdf is None:
norm = stats.norm(
loc=self.mean,
scale=self.sigma
)
self._log_pdf = norm.logpdf
return self._log_pdf
def __call__(self, x):
return self.logpdf(x)
def value_for(self, unit):
"""
Parameters
----------
unit: Float
A unit hypercube value between 0 and 1
Returns
-------
value: Float
A value for the attribute biased to the gaussian distribution
"""
return self.mean + (self.sigma * math.sqrt(2) * erfcinv(2.0 * (1.0 - unit)))
def log_prior_from_value(self, value):
"""
Returns the log prior of a physical value, so the log likelihood of a model evaluation can be converted to a
posterior as log_prior + log_likelihood.
This is used by Emcee in the log likelihood function evaluation.
Parameters
----------
value : float
The physical value of this prior's corresponding parameter in a `NonLinearSearch` sample."""
return (value - self.mean) ** 2.0 / (2 * self.sigma ** 2.0)
def __str__(self):
"""The line of text describing this prior for the model_mapper.info file"""
return (
"GaussianPrior, mean = " + str(self.mean) + ", sigma = " + str(self.sigma)
)
def __repr__(self):
return (
"<GaussianPrior id={} mean={} sigma={} "
"lower_limit={} upper_limit={}>".format(
self.id, self.mean, self.sigma, self.lower_limit, self.upper_limit
)
)
@property
def dict(self) -> dict:
"""
A dictionary representation of this prior
"""
prior_dict = super().dict
return {**prior_dict, "mean": self.mean, "sigma": self.sigma}
class UniformPrior(Prior):
"""A prior with a uniform distribution between a lower and upper limit"""
def value_for(self, unit):
"""
Parameters
----------
unit: Float
A unit hypercube value between 0 and 1
Returns
-------
value: Float
A value for the attribute between the upper and lower limits
"""
return self.lower_limit + unit * (self.upper_limit - self.lower_limit)
def log_prior_from_value(self, value):
"""
Returns the log prior of a physical value, so the log likelihood of a model evaluation can be converted to a
posterior as log_prior + log_likelihood.
This is used by Emcee in the log likelihood function evaluation.
NOTE: For a UniformPrior this is always zero, provided the value is between the lower and upper limit. Given
this is check for when the instance is made (in the *instance_from_vector* function), we thus can simply return
zero in this function.
Parameters
----------
value : float
The physical value of this prior's corresponding parameter in a `NonLinearSearch` sample."""
return 0.0
@property
def mean(self):
return self.lower_limit + (self.upper_limit - self.lower_limit) / 2
@mean.setter
def mean(self, new_value):
difference = new_value - self.mean
self.lower_limit += difference
self.upper_limit += difference
def __str__(self):
"""The line of text describing this prior for the model_mapper.info file"""
return (
"UniformPrior, lower_limit = "
+ str(self.lower_limit)
+ ", upper_limit = "
+ str(self.upper_limit)
)
class LogUniformPrior(UniformPrior):
"""A prior with a uniform distribution between a lower and upper limit"""
def __init__(self, lower_limit=1e-6, upper_limit=1.0):
"""
An object used to mappers a unit value to an attribute value for a specific
class attribute.
Parameters
----------
lower_limit: Float
The lowest value this prior can return
upper_limit: Float
The highest value this prior can return
"""
super().__init__(lower_limit=lower_limit, upper_limit=upper_limit)
if (self.lower_limit <= 0.0):
raise exc.PriorException(
"The lower limit of a LogUniformPrior cannot be zero or negative."
)
def value_for(self, unit):
"""
Parameters
----------
unit: Float
A unit hypercube value between 0 and 1
Returns
-------
value: Float
A value for the attribute between the upper and lower limits
"""
return 10.0 ** (
np.log10(self.lower_limit)
+ unit * (np.log10(self.upper_limit) - np.log10(self.lower_limit))
)
def log_prior_from_value(self, value):
"""
Returns the log prior of a physical value, so the log likelihood of a model evaluation can be converted to a
posterior as log_prior + log_likelihood.
This is used by Emcee in the log likelihood function evaluation.
Parameters
----------
value : float
The physical value of this prior's corresponding parameter in a `NonLinearSearch` sample."""
return 1.0 / value
def __str__(self):
"""The line of text describing this prior for the model_mapper.info file"""
return (
"LogUniformPrior, lower_limit = "
+ str(self.lower_limit)
+ ", upper_limit = "
+ str(self.upper_limit)
)
def make_type_dict(cls):
return {
obj.name_of_class(): obj
for _, obj in inspect.getmembers(sys.modules[__name__])
if (inspect.isclass(obj) and issubclass(obj, cls) and obj != Prior)
}
prior_type_dict = make_type_dict(Prior)
width_modifier_type_dict = make_type_dict(WidthModifier)
```
#### File: grid/grid_search/__init__.py
```python
import copy
from os import path
from typing import List, Tuple, Union
from autofit import exc
from autofit.mapper.prior import prior as p
from autofit.non_linear.parallel import Process
from .job import Job
from .result import GridSearchResult
class GridSearch:
def __init__(self, search, number_of_steps=4, number_of_cores=1):
"""
Performs a non linear optimiser search for each square in a grid. The dimensionality of the search depends on
the number of distinct priors passed to the fit function. (1 / step_size) ^ no_dimension steps are performed
per an optimisation.
Parameters
----------
number_of_steps: int
The number of steps to go in each direction
search: class
The class of the search that is run at each step
"""
self.paths = search.paths
self.number_of_cores = number_of_cores or 1
if self.number_of_cores == 1:
self.parallel = False
else:
self.parallel = True
self.number_of_steps = number_of_steps
self.search = search
self.prior_passer = search.prior_passer
@property
def step_size(self):
"""
Returns
-------
step_size: float
The size of a step in any given dimension in hyper space.
"""
return 1 / self.number_of_steps
def make_physical_lists(self, grid_priors) -> List[List[float]]:
lists = self.make_lists(grid_priors)
return [
[prior.value_for(value) for prior, value in zip(grid_priors, l)]
for l in lists
]
def make_lists(self, grid_priors):
"""
Produces a list of lists of floats, where each list of floats represents the values in each dimension for one
step of the grid search.
Parameters
----------
grid_priors: [p.Prior]
A list of priors that are to be searched using the grid search.
Returns
-------
lists: [[float]]
"""
return make_lists(
len(grid_priors), step_size=self.step_size, centre_steps=False
)
def make_arguments(self, values, grid_priors):
arguments = {}
for value, grid_prior in zip(values, grid_priors):
if (
float("-inf") == grid_prior.lower_limit
or float("inf") == grid_prior.upper_limit
):
raise exc.PriorException(
"Priors passed to the grid search must have definite limits"
)
lower_limit = grid_prior.lower_limit + value * grid_prior.width
upper_limit = (
grid_prior.lower_limit
+ (value + self.step_size) * grid_prior.width
)
prior = p.UniformPrior(lower_limit=lower_limit, upper_limit=upper_limit)
arguments[grid_prior] = prior
return arguments
def model_mappers(self, model, grid_priors):
grid_priors = list(set(grid_priors))
lists = self.make_lists(grid_priors)
for values in lists:
arguments = self.make_arguments(values, grid_priors)
yield model.mapper_from_partial_prior_arguments(arguments)
def fit(self, model, analysis, grid_priors):
"""
Fit an analysis with a set of grid priors. The grid priors are priors associated with the model mapper
of this instance that are replaced by uniform priors for each step of the grid search.
Parameters
----------
model
analysis: autofit.non_linear.non_linear.Analysis
An analysis used to determine the fitness of a given model instance
grid_priors: [p.Prior]
A list of priors to be substituted for uniform priors across the grid.
Returns
-------
result: GridSearchResult
An object that comprises the results from each individual fit
"""
func = self.fit_parallel if self.parallel else self.fit_sequential
return func(
model=model,
analysis=analysis,
grid_priors=grid_priors
)
def fit_parallel(self, model, analysis, grid_priors):
"""
Perform the grid search in parallel, with all the optimisation for each grid square being performed on a
different process.
Parameters
----------
analysis
An analysis
grid_priors
Priors describing the position in the grid
Returns
-------
result: GridSearchResult
The result of the grid search
"""
grid_priors = list(set(grid_priors))
results = []
lists = self.make_lists(grid_priors)
physical_lists = self.make_physical_lists(grid_priors)
results_list = [
["index"]
+ list(map(model.name_for_prior, grid_priors))
+ ["likelihood_merit"]
]
for result in Process.run_jobs(
self.make_jobs(
model,
analysis,
grid_priors
),
self.number_of_cores
):
results.append(result)
results = sorted(results)
results_list.append(result.result_list_row)
self.write_results(results_list)
return GridSearchResult(
[
result.result
for result
in results
],
lists,
physical_lists
)
def fit_sequential(self, model, analysis, grid_priors):
"""
Perform the grid search sequentially, with all the optimisation for each grid square being performed on the
same process.
Parameters
----------
analysis
An analysis
grid_priors
Priors describing the position in the grid
Returns
-------
result: GridSearchResult
The result of the grid search
"""
grid_priors = list(sorted(set(grid_priors), key=lambda prior: prior.id))
results = []
lists = self.make_lists(grid_priors)
physical_lists = self.make_physical_lists(grid_priors)
results_list = [
["index"]
+ list(map(model.name_for_prior, grid_priors))
+ ["max_log_likelihood"]
]
for job in self.make_jobs(
model,
analysis,
grid_priors
):
result = job.perform()
results.append(result.result)
results_list.append(result.result_list_row)
self.write_results(results_list)
return GridSearchResult(results, lists, physical_lists)
def make_jobs(self, model, analysis, grid_priors):
grid_priors = list(set(grid_priors))
lists = self.make_lists(grid_priors)
jobs = list()
for index, values in enumerate(lists):
jobs.append(
self.job_for_analysis_grid_priors_and_values(
analysis=copy.deepcopy(analysis),
model=model,
grid_priors=grid_priors,
values=values,
index=index,
)
)
return jobs
def write_results(self, results_list):
with open(path.join(self.paths.output_path, "results"), "w+") as f:
f.write(
"\n".join(
map(
lambda ls: ", ".join(
map(
lambda value: "{:.2f}".format(value)
if isinstance(value, float)
else str(value),
ls,
)
),
results_list,
)
)
)
def job_for_analysis_grid_priors_and_values(
self, model, analysis, grid_priors, values, index
):
self.paths.model = model
self.paths.search = self
arguments = self.make_arguments(values=values, grid_priors=grid_priors)
model = model.mapper_from_partial_prior_arguments(arguments=arguments)
labels = []
for prior in sorted(arguments.values(), key=lambda pr: pr.id):
labels.append(
"{}_{:.2f}_{:.2f}".format(
model.name_for_prior(prior), prior.lower_limit, prior.upper_limit
)
)
name_path = path.join(
self.paths.name,
self.paths.identifier,
"_".join(labels),
)
search_instance = self.search_instance(name_path=name_path)
search_instance.paths.model = model
return Job(
search_instance=search_instance,
model=model,
analysis=analysis,
arguments=arguments,
index=index,
)
def search_instance(self, name_path):
search_instance = self.search.copy_with_paths(
self.paths.create_child(
name=name_path,
path_prefix=self.paths.path_prefix,
is_identifier_in_paths=False
)
)
for key, value in self.__dict__.items():
if key not in ("model", "instance", "paths"):
try:
setattr(search_instance, key, value)
except AttributeError:
pass
return search_instance
def grid(fitness_function, no_dimensions, step_size):
"""
Grid2D search using a fitness function over a given number of dimensions and a given step size between inclusive
limits of 0 and 1.
Parameters
----------
fitness_function: function
A function that takes a tuple of floats as an argument
no_dimensions: int
The number of dimensions of the grid search
step_size: float
The step size of the grid search
Returns
-------
best_arguments: tuple[float]
The tuple of arguments that gave the highest fitness
"""
best_fitness = float("-inf")
best_arguments = None
for arguments in make_lists(no_dimensions, step_size):
fitness = fitness_function(tuple(arguments))
if fitness > best_fitness:
best_fitness = fitness
best_arguments = tuple(arguments)
return best_arguments
def make_lists(
no_dimensions: int,
step_size: Union[Tuple[float], float],
centre_steps=True
):
"""
Returns a list of lists of floats covering every combination across no_dimensions of points of integer step size
between 0 and 1 inclusive.
Parameters
----------
no_dimensions
The number of dimensions, that is the length of the lists
step_size
The step size. This can be a float or a tuple with the same number of dimensions
centre_steps
Returns
-------
lists: [[float]]
A list of lists
"""
if isinstance(step_size, float):
step_size = tuple(
step_size
for _
in range(no_dimensions)
)
if no_dimensions == 0:
return [[]]
sub_lists = make_lists(
no_dimensions - 1,
step_size[1:],
centre_steps=centre_steps
)
step_size = step_size[0]
return [
[
step_size * value + (
0.5 * step_size
if centre_steps
else 0)
] + sub_list
for value in range(int((1 / step_size)))
for sub_list in sub_lists
]
```
#### File: grid/grid_search/result.py
```python
from typing import List
import numpy as np
from autofit import exc
from autofit.mapper import model_mapper as mm
from autofit.non_linear.result import Result
class GridSearchResult:
def __init__(
self,
results: List[Result],
lower_limit_lists: List[List[float]],
physical_lower_limits_lists: List[List[float]],
):
"""
The result of a grid search.
Parameters
----------
results
The results of the non linear optimizations performed at each grid step
lower_limit_lists
A list of lists of values representing the lower bounds of the grid searched values at each step
physical_lower_limits_lists
A list of lists of values representing the lower physical bounds of the grid search values
at each step.
"""
self.lower_limit_lists = lower_limit_lists
self.physical_lower_limits_lists = physical_lower_limits_lists
self.results = results
self.no_dimensions = len(self.lower_limit_lists[0])
self.no_steps = len(self.lower_limit_lists)
self.side_length = int(self.no_steps ** (1 / self.no_dimensions))
def __getattr__(self, item: str) -> object:
"""
We default to getting attributes from the best result. This allows promises to reference best results.
"""
return getattr(self.best_result, item)
def __getstate__(self):
return self.__dict__
def __setstate__(self, state):
self.__dict__.update(state)
@property
def shape(self):
return tuple([
self.side_length
for _ in range(
self.no_dimensions
)
])
@property
def best_result(self):
"""
The best result of the grid search. That is, the result output by the non linear search that had the highest
maximum figure of merit.
Returns
-------
best_result: Result
"""
best_result = None
for result in self.results:
if (
best_result is None
or result.log_likelihood > best_result.log_likelihood
):
best_result = result
return best_result
@property
def best_model(self):
"""
Returns
-------
best_model: mm.ModelMapper
The model mapper instance associated with the highest figure of merit from the grid search
"""
return self.best_result.model
@property
def all_models(self):
"""
Returns
-------
all_models: [mm.ModelMapper]
All model mapper instances used in the grid search
"""
return [result.model for result in self.results]
@property
def physical_step_sizes(self):
physical_step_sizes = []
# TODO : Make this work for all dimensions in a less ugly way.
for dim in range(self.no_dimensions):
values = [value[dim] for value in self.physical_lower_limits_lists]
diff = [abs(values[n] - values[n - 1]) for n in range(1, len(values))]
if dim == 0:
physical_step_sizes.append(np.max(diff))
elif dim == 1:
physical_step_sizes.append(np.min(diff))
else:
raise exc.GridSearchException(
"This feature does not support > 2 dimensions"
)
return tuple(physical_step_sizes)
@property
def physical_centres_lists(self):
return [
[
lower_limit[dim] + self.physical_step_sizes[dim] / 2
for dim in range(self.no_dimensions)
]
for lower_limit in self.physical_lower_limits_lists
]
@property
def physical_upper_limits_lists(self):
return [
[
lower_limit[dim] + self.physical_step_sizes[dim]
for dim in range(self.no_dimensions)
]
for lower_limit in self.physical_lower_limits_lists
]
@property
def results_reshaped(self):
"""
Returns
-------
likelihood_merit_array: np.ndarray
An arrays of figures of merit. This arrays has the same dimensionality as the grid search, with the value in
each entry being the figure of merit taken from the optimization performed at that point.
"""
return np.reshape(
np.array([result for result in self.results]),
tuple(self.side_length for _ in range(self.no_dimensions)),
)
@property
def max_log_likelihood_values(self):
"""
Returns
-------
likelihood_merit_array: np.ndarray
An arrays of figures of merit. This arrays has the same dimensionality as the grid search, with the value in
each entry being the figure of merit taken from the optimization performed at that point.
"""
return np.reshape(
np.array([result.log_likelihood for result in self.results]),
tuple(self.side_length for _ in range(self.no_dimensions)),
)
@property
def log_evidence_values(self):
"""
Returns
-------
likelihood_merit_array: np.ndarray
An arrays of figures of merit. This arrays has the same dimensionality as the grid search, with the value in
each entry being the figure of merit taken from the optimization performed at that point.
"""
return np.reshape(
np.array([result.samples.log_evidence for result in self.results]),
tuple(self.side_length for _ in range(self.no_dimensions)),
)
``` |
{
"source": "jonathanfrawley/PyAutoFit_copy",
"score": 3
} |
#### File: autofit/database/model.py
```python
import importlib
import re
from typing import List, Tuple, Any, Iterable, Union, ItemsView
from sqlalchemy import Column, Integer, String, ForeignKey
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
import autofit as af
Base = declarative_base()
_schema_version = 1
class Object(Base):
__tablename__ = "object"
type = Column(
String
)
id = Column(
Integer,
primary_key=True,
)
parent_id = Column(
Integer,
ForeignKey(
"object.id"
)
)
parent = relationship(
"Object",
uselist=False,
remote_side=[id]
)
children: List["Object"] = relationship(
"Object",
uselist=True,
)
name = Column(String)
__mapper_args__ = {
'polymorphic_identity': 'object',
'polymorphic_on': type
}
# noinspection PyProtectedMember
@classmethod
def from_object(
cls,
source,
name=None
):
"""
Create a database object for an object in a model.
The specific database class used depends on the type of
the object.
Parameters
----------
source
A model
name
The name of the object wrt its parent
Returns
-------
An instance of a concrete child of this class
"""
if source is None:
from .instance import NoneInstance
instance = NoneInstance()
elif isinstance(source, af.PriorModel):
from .prior import PriorModel
instance = PriorModel._from_object(
source
)
elif isinstance(source, af.Prior):
from .prior import Prior
instance = Prior._from_object(
source
)
elif isinstance(source, (float, int)):
from .instance import Value
instance = Value._from_object(
source
)
elif isinstance(source, (af.CollectionPriorModel, dict, list)):
from .prior import CollectionPriorModel
instance = CollectionPriorModel._from_object(
source
)
elif isinstance(source, str):
from .instance import StringValue
instance = StringValue._from_object(
source
)
else:
from .instance import Instance
instance = Instance._from_object(
source
)
instance.name = name
return instance
def _make_instance(self) -> object:
"""
Create the real instance for this object
"""
return self.cls()
def __call__(self):
"""
Create the real instance for this object, with child
attributes attached
"""
instance = self._make_instance()
for child in self.children:
setattr(
instance,
child.name,
child()
)
return instance
def _add_children(
self,
items: Union[
ItemsView[str, Any],
Iterable[Tuple[str, Any]]
]
):
"""
Add database representations of child attributes
Parameters
----------
items
Attributes such as floats or priors that are associated
with the real object
"""
for key, value in items:
child = Object.from_object(
value,
name=key
)
self.children.append(
child
)
class_path = Column(
String
)
@property
def _class_path_array(self) -> List[str]:
"""
A list of strings describing the module and class of the
real object represented here
"""
return self.class_path.split(".")
@property
def _class_name(self) -> str:
"""
The name of the real class
"""
return self._class_path_array[-1]
@property
def _module_path(self) -> str:
"""
The path of the module containing the real class
"""
return ".".join(self._class_path_array[:-1])
@property
def _module(self):
"""
The module containing the real class
"""
return importlib.import_module(
self._module_path
)
@property
def cls(self) -> type:
"""
The class of the real object
"""
return getattr(
self._module,
self._class_name
)
@cls.setter
def cls(self, cls: type):
self.class_path = get_class_path(cls)
def get_class_path(cls: type) -> str:
"""
The full import path of the type
"""
return re.search("'(.*)'", str(cls))[1]
```
#### File: non_linear/grid/grid_search.py
```python
import copy
from os import path
from typing import List, Tuple, Union
import numpy as np
from autoconf import conf
from autofit import exc
from autofit.mapper import model_mapper as mm
from autofit.mapper.prior import prior as p
from autofit.non_linear.abstract_search import Result
from autofit.non_linear.parallel import AbstractJob, Process, AbstractJobResult
from autofit.non_linear.paths import Paths
class GridSearchResult:
def __init__(
self,
results: List[Result],
lower_limit_lists: List[List[float]],
physical_lower_limits_lists: List[List[float]],
):
"""
The result of a grid search.
Parameters
----------
results
The results of the non linear optimizations performed at each grid step
lower_limit_lists
A list of lists of values representing the lower bounds of the grid searched values at each step
physical_lower_limits_lists
A list of lists of values representing the lower physical bounds of the grid search values
at each step.
"""
self.lower_limit_lists = lower_limit_lists
self.physical_lower_limits_lists = physical_lower_limits_lists
self.results = results
self.no_dimensions = len(self.lower_limit_lists[0])
self.no_steps = len(self.lower_limit_lists)
self.side_length = int(self.no_steps ** (1 / self.no_dimensions))
def __getattr__(self, item: str) -> object:
"""
We default to getting attributes from the best result. This allows promises to reference best results.
"""
return getattr(self.best_result, item)
def __getstate__(self):
return self.__dict__
def __setstate__(self, state):
self.__dict__.update(state)
@property
def shape(self):
return tuple([
self.side_length
for _ in range(
self.no_dimensions
)
])
@property
def best_result(self):
"""
The best result of the grid search. That is, the result output by the non linear search that had the highest
maximum figure of merit.
Returns
-------
best_result: Result
"""
best_result = None
for result in self.results:
if (
best_result is None
or result.log_likelihood > best_result.log_likelihood
):
best_result = result
return best_result
@property
def best_model(self):
"""
Returns
-------
best_model: mm.ModelMapper
The model mapper instance associated with the highest figure of merit from the grid search
"""
return self.best_result.model
@property
def all_models(self):
"""
Returns
-------
all_models: [mm.ModelMapper]
All model mapper instances used in the grid search
"""
return [result.model for result in self.results]
@property
def physical_step_sizes(self):
physical_step_sizes = []
# TODO : Make this work for all dimensions in a less ugly way.
for dim in range(self.no_dimensions):
values = [value[dim] for value in self.physical_lower_limits_lists]
diff = [abs(values[n] - values[n - 1]) for n in range(1, len(values))]
if dim == 0:
physical_step_sizes.append(np.max(diff))
elif dim == 1:
physical_step_sizes.append(np.min(diff))
else:
raise exc.GridSearchException(
"This feature does not support > 2 dimensions"
)
return tuple(physical_step_sizes)
@property
def physical_centres_lists(self):
return [
[
lower_limit[dim] + self.physical_step_sizes[dim] / 2
for dim in range(self.no_dimensions)
]
for lower_limit in self.physical_lower_limits_lists
]
@property
def physical_upper_limits_lists(self):
return [
[
lower_limit[dim] + self.physical_step_sizes[dim]
for dim in range(self.no_dimensions)
]
for lower_limit in self.physical_lower_limits_lists
]
@property
def results_reshaped(self):
"""
Returns
-------
likelihood_merit_array: np.ndarray
An arrays of figures of merit. This arrays has the same dimensionality as the grid search, with the value in
each entry being the figure of merit taken from the optimization performed at that point.
"""
return np.reshape(
np.array([result for result in self.results]),
tuple(self.side_length for _ in range(self.no_dimensions)),
)
@property
def max_log_likelihood_values(self):
"""
Returns
-------
likelihood_merit_array: np.ndarray
An arrays of figures of merit. This arrays has the same dimensionality as the grid search, with the value in
each entry being the figure of merit taken from the optimization performed at that point.
"""
return np.reshape(
np.array([result.log_likelihood for result in self.results]),
tuple(self.side_length for _ in range(self.no_dimensions)),
)
@property
def log_evidence_values(self):
"""
Returns
-------
likelihood_merit_array: np.ndarray
An arrays of figures of merit. This arrays has the same dimensionality as the grid search, with the value in
each entry being the figure of merit taken from the optimization performed at that point.
"""
return np.reshape(
np.array([result.samples.log_evidence for result in self.results]),
tuple(self.side_length for _ in range(self.no_dimensions)),
)
class GridSearch:
# TODO: this should be using paths
def __init__(self, search, paths=None, number_of_steps=4, parallel=False):
"""
Performs a non linear optimiser search for each square in a grid. The dimensionality of the search depends on
the number of distinct priors passed to the fit function. (1 / step_size) ^ no_dimension steps are performed
per an optimisation.
Parameters
----------
number_of_steps: int
The number of steps to go in each direction
search: class
The class of the search that is run at each step
"""
if paths is None:
self.paths = search.paths
else:
self.paths = paths
self.parallel = parallel
self.number_of_cores = conf.instance["non_linear"]["GridSearch"]["general"]["number_of_cores"]
self.number_of_steps = number_of_steps
self.search = search
@property
def hyper_step_size(self):
"""
Returns
-------
hyper_step_size: float
The size of a step in any given dimension in hyper space.
"""
return 1 / self.number_of_steps
def make_physical_lists(self, grid_priors) -> List[List[float]]:
lists = self.make_lists(grid_priors)
return [
[prior.value_for(value) for prior, value in zip(grid_priors, l)]
for l in lists
]
def make_lists(self, grid_priors):
"""
Produces a list of lists of floats, where each list of floats represents the values in each dimension for one
step of the grid search.
Parameters
----------
grid_priors: [p.Prior]
A list of priors that are to be searched using the grid search.
Returns
-------
lists: [[float]]
"""
return make_lists(
len(grid_priors), step_size=self.hyper_step_size, centre_steps=False
)
def make_arguments(self, values, grid_priors):
arguments = {}
for value, grid_prior in zip(values, grid_priors):
if (
float("-inf") == grid_prior.lower_limit
or float("inf") == grid_prior.upper_limit
):
raise exc.PriorException(
"Priors passed to the grid search must have definite limits"
)
lower_limit = grid_prior.lower_limit + value * grid_prior.width
upper_limit = (
grid_prior.lower_limit
+ (value + self.hyper_step_size) * grid_prior.width
)
prior = p.UniformPrior(lower_limit=lower_limit, upper_limit=upper_limit)
arguments[grid_prior] = prior
return arguments
def model_mappers(self, model, grid_priors):
grid_priors = list(set(grid_priors))
lists = self.make_lists(grid_priors)
for values in lists:
arguments = self.make_arguments(values, grid_priors)
yield model.mapper_from_partial_prior_arguments(arguments)
def fit(self, model, analysis, grid_priors):
"""
Fit an analysis with a set of grid priors. The grid priors are priors associated with the model mapper
of this instance that are replaced by uniform priors for each step of the grid search.
Parameters
----------
model
analysis: autofit.non_linear.non_linear.Analysis
An analysis used to determine the fitness of a given model instance
grid_priors: [p.Prior]
A list of priors to be substituted for uniform priors across the grid.
Returns
-------
result: GridSearchResult
An object that comprises the results from each individual fit
"""
func = self.fit_parallel if self.parallel else self.fit_sequential
return func(
model=model,
analysis=analysis,
grid_priors=grid_priors
)
def fit_parallel(self, model, analysis, grid_priors):
"""
Perform the grid search in parallel, with all the optimisation for each grid square being performed on a
different process.
Parameters
----------
analysis
An analysis
grid_priors
Priors describing the position in the grid
Returns
-------
result: GridSearchResult
The result of the grid search
"""
grid_priors = list(set(grid_priors))
results = []
lists = self.make_lists(grid_priors)
physical_lists = self.make_physical_lists(grid_priors)
results_list = [
["index"]
+ list(map(model.name_for_prior, grid_priors))
+ ["likelihood_merit"]
]
jobs = list()
for index, values in enumerate(lists):
jobs.append(
self.job_for_analysis_grid_priors_and_values(
analysis=copy.deepcopy(analysis),
model=model,
grid_priors=grid_priors,
values=values,
index=index,
)
)
for result in Process.run_jobs(
jobs,
self.number_of_cores
):
results.append(result)
results = sorted(results)
results_list.append(result.result_list_row)
self.write_results(results_list)
return GridSearchResult(
[
result.result
for result
in results
],
lists,
physical_lists
)
def fit_sequential(self, model, analysis, grid_priors):
"""
Perform the grid search sequentially, with all the optimisation for each grid square being performed on the
same process.
Parameters
----------
analysis
An analysis
grid_priors
Priors describing the position in the grid
Returns
-------
result: GridSearchResult
The result of the grid search
"""
grid_priors = list(sorted(set(grid_priors), key=lambda prior: prior.id))
results = []
lists = self.make_lists(grid_priors)
physical_lists = self.make_physical_lists(grid_priors)
results_list = [
["index"]
+ list(map(model.name_for_prior, grid_priors))
+ ["max_log_likelihood"]
]
for index, values in enumerate(lists):
job = self.job_for_analysis_grid_priors_and_values(
analysis=analysis,
model=model,
grid_priors=grid_priors,
values=values,
index=index,
)
result = job.perform()
results.append(result.result)
results_list.append(result.result_list_row)
self.write_results(results_list)
return GridSearchResult(results, lists, physical_lists)
def write_results(self, results_list):
with open(path.join(self.paths.output_path, "results"), "w+") as f:
f.write(
"\n".join(
map(
lambda ls: ", ".join(
map(
lambda value: "{:.2f}".format(value)
if isinstance(value, float)
else str(value),
ls,
)
),
results_list,
)
)
)
def job_for_analysis_grid_priors_and_values(
self, model, analysis, grid_priors, values, index
):
arguments = self.make_arguments(values=values, grid_priors=grid_priors)
model = model.mapper_from_partial_prior_arguments(arguments=arguments)
labels = []
for prior in sorted(arguments.values(), key=lambda pr: pr.id):
labels.append(
"{}_{:.2f}_{:.2f}".format(
model.name_for_prior(prior), prior.lower_limit, prior.upper_limit
)
)
name_path = path.join(
self.paths.name,
self.paths.tag,
self.paths.non_linear_tag,
"_".join(labels),
)
search_instance = self.search_instance(name_path=name_path)
return Job(
search_instance=search_instance,
model=model,
analysis=analysis,
arguments=arguments,
index=index,
)
def search_instance(self, name_path):
search_instance = self.search.copy_with_paths(
Paths(
name=name_path,
tag=self.paths.tag,
path_prefix=self.paths.path_prefix,
remove_files=self.paths.remove_files,
)
)
for key, value in self.__dict__.items():
if key not in ("model", "instance", "paths"):
try:
setattr(search_instance, key, value)
except AttributeError:
pass
return search_instance
class JobResult(AbstractJobResult):
def __init__(self, result, result_list_row, number):
"""
The result of a job
Parameters
----------
result
The result of a grid search
result_list_row
A row in the result list
"""
super().__init__(number)
self.result = result
self.result_list_row = result_list_row
class Job(AbstractJob):
def __init__(self, search_instance, model, analysis, arguments, index):
"""
A job to be performed in parallel.
Parameters
----------
search_instance
An instance of an optimiser
analysis
An analysis
arguments
The grid search arguments
"""
super().__init__()
self.search_instance = search_instance
self.analysis = analysis
self.model = model
self.arguments = arguments
self.index = index
def perform(self):
result = self.search_instance.fit(model=self.model, analysis=self.analysis)
result_list_row = [
self.index,
*[prior.lower_limit for prior in self.arguments.values()],
result.log_likelihood,
]
return JobResult(result, result_list_row, self.number)
def grid(fitness_function, no_dimensions, step_size):
"""
Grid2D search using a fitness function over a given number of dimensions and a given step size between inclusive
limits of 0 and 1.
Parameters
----------
fitness_function: function
A function that takes a tuple of floats as an argument
no_dimensions: int
The number of dimensions of the grid search
step_size: float
The step size of the grid search
Returns
-------
best_arguments: tuple[float]
The tuple of arguments that gave the highest fitness
"""
best_fitness = float("-inf")
best_arguments = None
for arguments in make_lists(no_dimensions, step_size):
fitness = fitness_function(tuple(arguments))
if fitness > best_fitness:
best_fitness = fitness
best_arguments = tuple(arguments)
return best_arguments
def make_lists(
no_dimensions: int,
step_size: Union[Tuple[float], float],
centre_steps=True
):
"""
Returns a list of lists of floats covering every combination across no_dimensions of points of integer step size
between 0 and 1 inclusive.
Parameters
----------
no_dimensions
The number of dimensions, that is the length of the lists
step_size
The step size. This can be a float or a tuple with the same number of dimensions
centre_steps
Returns
-------
lists: [[float]]
A list of lists
"""
if isinstance(step_size, float):
step_size = tuple(
step_size
for _
in range(no_dimensions)
)
if no_dimensions == 0:
return [[]]
sub_lists = make_lists(
no_dimensions - 1,
step_size[1:],
centre_steps=centre_steps
)
step_size = step_size[0]
return [
[
step_size * value + (
0.5 * step_size
if centre_steps
else 0)
] + sub_list
for value in range(int((1 / step_size)))
for sub_list in sub_lists
]
```
#### File: unit/database/test_serialize.py
```python
import pytest
import autofit as af
from autofit import database as db
from autofit.mock import mock as m
@pytest.fixture(
name="model"
)
def make_model():
return af.PriorModel(
m.Gaussian
)
@pytest.fixture(
name="serialized_model"
)
def make_serialized_model(model):
return db.Object.from_object(
model
)
@pytest.fixture(
name="collection"
)
def make_collection(model):
return af.CollectionPriorModel(
model=model
)
@pytest.fixture(
name="serialized_collection"
)
def make_serialized_collection(collection):
return db.Object.from_object(
collection
)
class TestInstance:
def test_serialize(self):
serialized_instance = db.Object.from_object(
m.Gaussian()
)
assert len(
serialized_instance.children
) == 3
class TestModel:
def test_serialize(
self,
serialized_model
):
assert isinstance(
serialized_model, db.PriorModel
)
assert serialized_model.cls is m.Gaussian
def test_deserialize(
self,
serialized_model
):
assert serialized_model().cls is m.Gaussian
class TestPriors:
def test_serialize(
self,
serialized_model,
):
assert len(serialized_model.children) == 3
def test_deserialize(
self,
serialized_model
):
model = serialized_model()
assert len(model.priors) == 3
assert isinstance(
model.centre,
af.UniformPrior
)
class TestCollection:
def test_serialize(
self,
serialized_collection
):
assert isinstance(
serialized_collection,
db.CollectionPriorModel
)
child, = serialized_collection.children
assert len(child.children) == 3
def test_deserialize(
self,
serialized_collection
):
collection = serialized_collection()
assert len(collection) == 1
assert isinstance(
collection.model,
af.PriorModel
)
def test_none():
assert db.Object.from_object(None)() is None
def test_commit(session):
model = af.PriorModel(
m.Gaussian
)
serialized = db.Object.from_object(model)
session.add(serialized)
session.commit()
```
#### File: non_linear/grid/test_optimizer_grid_search.py
```python
import pickle
import pytest
import autofit as af
from autofit import exc
from autofit.mock import mock
from autofit.mock.mock import MockAnalysis
@pytest.fixture(name="mapper")
def make_mapper():
mapper = af.ModelMapper()
mapper.component = mock.MockClassx2Tuple
return mapper
@pytest.fixture(name="grid_search")
def make_grid_search(mapper):
return af.SearchGridSearch(
paths=af.Paths(name=""), number_of_steps=10, search=af.MockSearch()
)
def test_unpickle_result():
result = af.GridSearchResult(
[af.Result(samples=None, previous_model=None)],
lower_limit_lists=[[1]],
physical_lower_limits_lists=[[1]],
)
result = pickle.loads(pickle.dumps(result))
assert result is not None
class TestGridSearchablePriors:
def test_generated_models(self, grid_search, mapper):
mappers = list(
grid_search.model_mappers(
mapper,
grid_priors=[
mapper.component.one_tuple.one_tuple_0,
mapper.component.one_tuple.one_tuple_1,
],
)
)
assert len(mappers) == 100
assert mappers[0].component.one_tuple.one_tuple_0.lower_limit == 0.0
assert mappers[0].component.one_tuple.one_tuple_0.upper_limit == 0.1
assert mappers[0].component.one_tuple.one_tuple_1.lower_limit == 0.0
assert mappers[0].component.one_tuple.one_tuple_1.upper_limit == 0.2
assert mappers[-1].component.one_tuple.one_tuple_0.lower_limit == 0.9
assert mappers[-1].component.one_tuple.one_tuple_0.upper_limit == 1.0
assert mappers[-1].component.one_tuple.one_tuple_1.lower_limit == 1.8
assert mappers[-1].component.one_tuple.one_tuple_1.upper_limit == 2.0
def test_non_grid_searched_dimensions(self, mapper):
grid_search = af.SearchGridSearch(
paths=af.Paths(name=""), number_of_steps=10, search=af.MockSearch()
)
mappers = list(
grid_search.model_mappers(
mapper, grid_priors=[mapper.component.one_tuple.one_tuple_0]
)
)
assert len(mappers) == 10
assert mappers[0].component.one_tuple.one_tuple_0.lower_limit == 0.0
assert mappers[0].component.one_tuple.one_tuple_0.upper_limit == 0.1
assert mappers[0].component.one_tuple.one_tuple_1.lower_limit == 0.0
assert mappers[0].component.one_tuple.one_tuple_1.upper_limit == 2.0
assert mappers[-1].component.one_tuple.one_tuple_0.lower_limit == 0.9
assert mappers[-1].component.one_tuple.one_tuple_0.upper_limit == 1.0
assert mappers[-1].component.one_tuple.one_tuple_1.lower_limit == 0.0
assert mappers[-1].component.one_tuple.one_tuple_1.upper_limit == 2.0
def test_tied_priors(self, grid_search, mapper):
mapper.component.one_tuple.one_tuple_0 = mapper.component.one_tuple.one_tuple_1
mappers = list(
grid_search.model_mappers(
grid_priors=[
mapper.component.one_tuple.one_tuple_0,
mapper.component.one_tuple.one_tuple_1,
],
model=mapper,
)
)
assert len(mappers) == 10
assert mappers[0].component.one_tuple.one_tuple_0.lower_limit == 0.0
assert mappers[0].component.one_tuple.one_tuple_0.upper_limit == 0.2
assert mappers[0].component.one_tuple.one_tuple_1.lower_limit == 0.0
assert mappers[0].component.one_tuple.one_tuple_1.upper_limit == 0.2
assert mappers[-1].component.one_tuple.one_tuple_0.lower_limit == 1.8
assert mappers[-1].component.one_tuple.one_tuple_0.upper_limit == 2.0
assert mappers[-1].component.one_tuple.one_tuple_1.lower_limit == 1.8
assert mappers[-1].component.one_tuple.one_tuple_1.upper_limit == 2.0
for mapper in mappers:
assert (
mapper.component.one_tuple.one_tuple_0
== mapper.component.one_tuple.one_tuple_1
)
def test_different_prior_width(self, grid_search, mapper):
mapper.component.one_tuple.one_tuple_0 = af.UniformPrior(0.0, 2.0)
mappers = list(
grid_search.model_mappers(
grid_priors=[mapper.component.one_tuple.one_tuple_0], model=mapper
)
)
assert len(mappers) == 10
assert mappers[0].component.one_tuple.one_tuple_0.lower_limit == 0.0
assert mappers[0].component.one_tuple.one_tuple_0.upper_limit == 0.2
assert mappers[-1].component.one_tuple.one_tuple_0.lower_limit == 1.8
assert mappers[-1].component.one_tuple.one_tuple_0.upper_limit == 2.0
mapper.component.one_tuple.one_tuple_0 = af.UniformPrior(1.0, 1.5)
mappers = list(
grid_search.model_mappers(
mapper, grid_priors=[mapper.component.one_tuple.one_tuple_0]
)
)
assert len(mappers) == 10
assert mappers[0].component.one_tuple.one_tuple_0.lower_limit == 1.0
assert mappers[0].component.one_tuple.one_tuple_0.upper_limit == 1.05
assert mappers[-1].component.one_tuple.one_tuple_0.lower_limit == 1.45
assert mappers[-1].component.one_tuple.one_tuple_0.upper_limit == 1.5
def test_raises_exception_for_bad_limits(self, grid_search, mapper):
mapper.component.one_tuple.one_tuple_0 = af.GaussianPrior(
0.0, 2.0, lower_limit=float("-inf"), upper_limit=float("inf")
)
with pytest.raises(exc.PriorException):
list(
grid_search.make_arguments(
[[0, 1]], grid_priors=[mapper.component.one_tuple.one_tuple_0]
)
)
@pytest.fixture(name="grid_search_05")
def make_grid_search_05():
return af.SearchGridSearch(
search=MockOptimizer(), number_of_steps=2, paths=af.Paths(name="sample_name")
)
class MockOptimizer(af.MockSearch):
init_args = list()
def __init__(self, paths=None):
super().__init__(paths=paths or af.Paths(), fit_fast=False)
self.init_args.append(paths)
@pytest.fixture(autouse=True)
def empty_args():
MockOptimizer.init_args = list()
class TestGridNLOBehaviour:
def test_results(self, grid_search_05, mapper):
result = grid_search_05.fit(
model=mapper,
analysis=MockAnalysis(),
grid_priors=[
mapper.component.one_tuple.one_tuple_0,
mapper.component.one_tuple.one_tuple_1,
],
)
assert len(result.results) == 4
assert result.no_dimensions == 2
grid_search = af.SearchGridSearch(
search=MockOptimizer(),
number_of_steps=10,
paths=af.Paths(name="sample_name"),
)
result = grid_search.fit(
model=mapper,
analysis=MockAnalysis(),
grid_priors=[
mapper.component.one_tuple.one_tuple_0,
mapper.component.one_tuple.one_tuple_1,
],
)
assert len(result.results) == 100
assert result.no_dimensions == 2
assert result.max_log_likelihood_values.shape == (10, 10)
# def test_results_parallel(self, mapper, container):
# grid_search = af.SearchGridSearch(
# search=container.MockOptimizer,
# number_of_steps=10,
# paths=af.Paths(name="sample_name"),
# parallel=True,
# )
# result = grid_search.fit(
# container.MockAnalysis(),
# mapper,
# [mapper.component.one_tuple.one_tuple_0, mapper.component.one_tuple.one_tuple_1],
# )
#
# assert len(result.results) == 100
# assert result.no_dimensions == 2
# assert result.likelihood_merit_array.shape == (10, 10)
# def test_generated_models_with_instances(self, grid_search, container, mapper):
# instance_component = mock.MockClassx2Tuple()
# mapper.instance_component = instance_component
#
# analysis = container.MockAnalysis()
#
# grid_search.fit(analysis, mapper, [mapper.component.one_tuple.one_tuple_0])
#
# for instance in container.fit_instances:
# assert isinstance(instance.component, mock.MockClassx2Tuple)
# assert instance.instance_component == instance_component
#
# def test_generated_models_with_instance_attributes(
# self, grid_search, mapper, container
# ):
# instance = 2.0
# mapper.component.one_tuple.one_tuple_1 = instance
#
# analysis = container.MockAnalysis()
#
# grid_search.fit(analysis, mapper, [mapper.component.one_tuple.one_tuple_0])
#
# assert len(container.fit_instances) > 0
#
# for instance in container.fit_instances:
# assert isinstance(instance.component, mock.MockClassx2Tuple)
# # noinspection PyUnresolvedReferences
# assert instance.component.centre[1] == 2
def test_passes_attributes(self):
grid_search = af.SearchGridSearch(
paths=af.Paths(name=""), number_of_steps=10, search=af.DynestyStatic()
)
grid_search.n_live_points = 20
grid_search.sampling_efficiency = 0.3
search = grid_search.search_instance("name_path")
assert search.n_live_points is grid_search.n_live_points
assert grid_search.paths.path != search.paths.path
assert grid_search.paths.output_path != search.paths.output_path
class MockResult:
def __init__(self, log_likelihood):
self.log_likelihood = log_likelihood
self.model = log_likelihood
@pytest.fixture(name="grid_search_result")
def make_grid_search_result():
one = MockResult(1)
two = MockResult(2)
# noinspection PyTypeChecker
return af.GridSearchResult([one, two], [[1], [2]], [[1], [2]])
class TestGridSearchResult:
def test_best_result(self, grid_search_result):
assert grid_search_result.best_result.log_likelihood == 2
def test_attributes(self, grid_search_result):
assert grid_search_result.model == 2
def test_best_model(self, grid_search_result):
assert grid_search_result.best_model == 2
def test_all_models(self, grid_search_result):
assert grid_search_result.all_models == [1, 2]
def test__result_derived_properties(self):
lower_limit_lists = [[0.0, 0.0], [0.0, 0.5], [0.5, 0.0], [0.5, 0.5]]
physical_lower_limits_lists = [
[-2.0, -3.0],
[-2.0, 0.0],
[0.0, -3.0],
[0.0, 0.0],
]
grid_search_result = af.GridSearchResult(
results=None,
physical_lower_limits_lists=physical_lower_limits_lists,
lower_limit_lists=lower_limit_lists,
)
print(grid_search_result)
assert grid_search_result.shape == (2, 2)
assert grid_search_result.physical_step_sizes == (2.0, 3.0)
assert grid_search_result.physical_centres_lists == [
[-1.0, -1.5],
[-1.0, 1.5],
[1.0, -1.5],
[1.0, 1.5],
]
assert grid_search_result.physical_upper_limits_lists == [
[0.0, 0.0],
[0.0, 3.0],
[2.0, 0.0],
[2.0, 3.0],
]
``` |
{
"source": "jonathanfrawley/PyAutoFit",
"score": 3
} |
#### File: query/internal/test_new_model.py
```python
from autofit.database import query as q
class TestCombination:
def test_simple(
self,
less_than,
greater_than,
simple_and
):
assert q.Q(
"a",
less_than
) & q.Q(
"a",
greater_than
) == simple_and
def test_second_level(
self,
less_than,
greater_than,
second_level
):
first = q.Q("a", less_than)
second = q.Q(
'a',
q.Q('b', greater_than)
)
assert first & second == second_level
def test_complicated(
self,
less_than,
greater_than
):
first = q.Q(
"a",
q.Q(
"b",
q.And(
q.Q(
"c",
less_than
),
greater_than
)
)
)
second = q.Q(
"a",
q.Q(
"b",
q.Q(
"c",
greater_than
)
)
)
combined = q.Q(
"a",
q.Q(
"b",
q.And(
q.Q(
"c",
q.And(
less_than,
greater_than
)
),
greater_than
)
)
)
assert first & second == combined
```
#### File: non_linear/grid/conftest.py
```python
import pytest
import autofit as af
from autofit.mock import mock
@pytest.fixture(name="mapper")
def make_mapper():
return af.Collection(
component=af.Model(
mock.MockClassx2Tuple
)
)
@pytest.fixture(name="grid_search")
def make_grid_search(mapper):
search = af.SearchGridSearch(
number_of_steps=10, search=af.MockSearch()
)
search.paths = af.DirectoryPaths(name="")
return search
``` |
{
"source": "jonathanfrawley/PyAutoGalaxy",
"score": 2
} |
#### File: PyAutoGalaxy/autogalaxy/lensing.py
```python
import numpy as np
from autoarray.structures.arrays.two_d import array_2d
from autoarray.structures.grids.two_d import grid_2d
from autoarray.structures.grids.two_d import grid_2d_irregular
from skimage import measure
from functools import wraps
def precompute_jacobian(func):
@wraps(func)
def wrapper(lensing_obj, grid, jacobian=None):
if jacobian is None:
jacobian = lensing_obj.jacobian_from_grid(grid=grid)
return func(lensing_obj, grid, jacobian)
return wrapper
def evaluation_grid(func):
@wraps(func)
def wrapper(lensing_obj, grid, pixel_scale=0.05):
if hasattr(grid, "is_evaluation_grid"):
if grid.is_evaluation_grid:
return func(lensing_obj, grid, pixel_scale)
pixel_scale_ratio = grid.pixel_scale / pixel_scale
zoom_shape_native = grid.mask.zoom_shape_native
shape_native = (
int(pixel_scale_ratio * zoom_shape_native[0]),
int(pixel_scale_ratio * zoom_shape_native[1]),
)
grid = grid_2d.Grid2D.uniform(
shape_native=shape_native,
pixel_scales=(pixel_scale, pixel_scale),
origin=grid.mask.zoom_offset_scaled,
)
grid.is_evaluation_grid = True
return func(lensing_obj, grid, pixel_scale)
return wrapper
class LensingObject:
centre = None
angle = None
def convergence_func(self, grid_radius):
raise NotImplementedError
def convergence_1d_from_grid(self, grid):
raise NotImplementedError
def convergence_2d_from_grid(self, grid):
raise NotImplementedError
def potential_func(self, u, y, x):
raise NotImplementedError
def potential_1d_from_grid(self, grid):
raise NotImplementedError
def potential_2d_from_grid(self, grid):
raise NotImplementedError
def deflections_2d_from_grid(self, grid):
raise NotImplementedError
def mass_integral(self, x):
"""Routine to integrate an elliptical light profiles - set axis ratio to 1 to compute the luminosity within a \
circle"""
return 2 * np.pi * x * self.convergence_func(grid_radius=x)
def deflection_magnitudes_from_grid(self, grid):
deflections = self.deflections_2d_from_grid(grid=grid)
return deflections.distances_from_coordinate(coordinate=(0.0, 0.0))
def deflections_2d_via_potential_2d_from_grid(self, grid):
potential = self.potential_2d_from_grid(grid=grid)
deflections_y_2d = np.gradient(potential.native, grid.native[:, 0, 0], axis=0)
deflections_x_2d = np.gradient(potential.native, grid.native[0, :, 1], axis=1)
return grid_2d.Grid2D.manual_mask(
grid=np.stack((deflections_y_2d, deflections_x_2d), axis=-1), mask=grid.mask
)
def jacobian_from_grid(self, grid):
deflections = self.deflections_2d_from_grid(grid=grid)
a11 = array_2d.Array2D.manual_mask(
array=1.0
- np.gradient(deflections.native[:, :, 1], grid.native[0, :, 1], axis=1),
mask=grid.mask,
)
a12 = array_2d.Array2D.manual_mask(
array=-1.0
* np.gradient(deflections.native[:, :, 1], grid.native[:, 0, 0], axis=0),
mask=grid.mask,
)
a21 = array_2d.Array2D.manual_mask(
array=-1.0
* np.gradient(deflections.native[:, :, 0], grid.native[0, :, 1], axis=1),
mask=grid.mask,
)
a22 = array_2d.Array2D.manual_mask(
array=1
- np.gradient(deflections.native[:, :, 0], grid.native[:, 0, 0], axis=0),
mask=grid.mask,
)
return [[a11, a12], [a21, a22]]
@precompute_jacobian
def convergence_via_jacobian_from_grid(self, grid, jacobian=None):
convergence = 1 - 0.5 * (jacobian[0][0] + jacobian[1][1])
return array_2d.Array2D(array=convergence, mask=grid.mask)
@precompute_jacobian
def shear_via_jacobian_from_grid(self, grid, jacobian=None):
shear_y = -0.5 * (jacobian[0][1] + jacobian[1][0])
shear_x = 0.5 * (jacobian[1][1] - jacobian[0][0])
return array_2d.Array2D(
array=(shear_x ** 2 + shear_y ** 2) ** 0.5, mask=grid.mask
)
@precompute_jacobian
def tangential_eigen_value_from_grid(self, grid, jacobian=None):
convergence = self.convergence_via_jacobian_from_grid(
grid=grid, jacobian=jacobian
)
shear = self.shear_via_jacobian_from_grid(grid=grid, jacobian=jacobian)
return array_2d.Array2D(array=1 - convergence - shear, mask=grid.mask)
@precompute_jacobian
def radial_eigen_value_from_grid(self, grid, jacobian=None):
convergence = self.convergence_via_jacobian_from_grid(
grid=grid, jacobian=jacobian
)
shear = self.shear_via_jacobian_from_grid(grid=grid, jacobian=jacobian)
return array_2d.Array2D(array=1 - convergence + shear, mask=grid.mask)
def magnification_2d_from_grid(self, grid):
jacobian = self.jacobian_from_grid(grid=grid)
det_jacobian = jacobian[0][0] * jacobian[1][1] - jacobian[0][1] * jacobian[1][0]
return array_2d.Array2D(array=1 / det_jacobian, mask=grid.mask)
def hessian_from_grid(self, grid, buffer=0.01, deflections_func=None):
if deflections_func is None:
deflections_func = self.deflections_2d_from_grid
grid_shift_y_up = np.zeros(grid.shape)
grid_shift_y_up[:, 0] = grid[:, 0] + buffer
grid_shift_y_up[:, 1] = grid[:, 1]
grid_shift_y_down = np.zeros(grid.shape)
grid_shift_y_down[:, 0] = grid[:, 0] - buffer
grid_shift_y_down[:, 1] = grid[:, 1]
grid_shift_x_left = np.zeros(grid.shape)
grid_shift_x_left[:, 0] = grid[:, 0]
grid_shift_x_left[:, 1] = grid[:, 1] - buffer
grid_shift_x_right = np.zeros(grid.shape)
grid_shift_x_right[:, 0] = grid[:, 0]
grid_shift_x_right[:, 1] = grid[:, 1] + buffer
deflections_up = deflections_func(grid=grid_shift_y_up)
deflections_down = deflections_func(grid=grid_shift_y_down)
deflections_left = deflections_func(grid=grid_shift_x_left)
deflections_right = deflections_func(grid=grid_shift_x_right)
hessian_yy = 0.5 * (deflections_up[:, 0] - deflections_down[:, 0]) / buffer
hessian_xy = 0.5 * (deflections_up[:, 1] - deflections_down[:, 1]) / buffer
hessian_yx = 0.5 * (deflections_right[:, 0] - deflections_left[:, 0]) / buffer
hessian_xx = 0.5 * (deflections_right[:, 1] - deflections_left[:, 1]) / buffer
return hessian_yy, hessian_xy, hessian_yx, hessian_xx
def convergence_via_hessian_from_grid(self, grid, buffer=0.01):
hessian_yy, hessian_xy, hessian_yx, hessian_xx = self.hessian_from_grid(
grid=grid, buffer=buffer
)
return grid.values_from_array_slim(array_slim=0.5 * (hessian_yy + hessian_xx))
def shear_via_hessian_from_grid(self, grid, buffer=0.01):
hessian_yy, hessian_xy, hessian_yx, hessian_xx = self.hessian_from_grid(
grid=grid, buffer=buffer
)
shear_y = 0.5 * (hessian_xx - hessian_yy)
shear_x = hessian_xy
return grid.values_from_array_slim(
array_slim=(shear_x ** 2 + shear_y ** 2) ** 0.5
)
def magnification_via_hessian_from_grid(
self, grid, buffer=0.01, deflections_func=None
):
hessian_yy, hessian_xy, hessian_yx, hessian_xx = self.hessian_from_grid(
grid=grid, buffer=buffer, deflections_func=deflections_func
)
det_A = (1 - hessian_xx) * (1 - hessian_yy) - hessian_xy * hessian_yx
return grid.values_from_array_slim(array_slim=1.0 / det_A)
@evaluation_grid
def tangential_critical_curve_from_grid(self, grid, pixel_scale=0.05):
tangential_eigen_values = self.tangential_eigen_value_from_grid(grid=grid)
tangential_critical_curve_indices = measure.find_contours(
tangential_eigen_values.native, 0
)
if len(tangential_critical_curve_indices) == 0:
return []
tangential_critical_curve = grid.mask.grid_scaled_from_grid_pixels_1d_for_marching_squares(
grid_pixels_1d=tangential_critical_curve_indices[0],
shape_native=tangential_eigen_values.sub_shape_native,
)
try:
return grid_2d_irregular.Grid2DIrregular(tangential_critical_curve)
except IndexError:
return []
@evaluation_grid
def radial_critical_curve_from_grid(self, grid, pixel_scale=0.05):
radial_eigen_values = self.radial_eigen_value_from_grid(grid=grid)
radial_critical_curve_indices = measure.find_contours(
radial_eigen_values.native, 0
)
if len(radial_critical_curve_indices) == 0:
return []
radial_critical_curve = grid.mask.grid_scaled_from_grid_pixels_1d_for_marching_squares(
grid_pixels_1d=radial_critical_curve_indices[0],
shape_native=radial_eigen_values.sub_shape_native,
)
try:
return grid_2d_irregular.Grid2DIrregular(radial_critical_curve)
except IndexError:
return []
@evaluation_grid
def critical_curves_from_grid(self, grid, pixel_scale=0.05):
try:
return grid_2d_irregular.Grid2DIrregular(
[
self.tangential_critical_curve_from_grid(
grid=grid, pixel_scale=pixel_scale
),
self.radial_critical_curve_from_grid(
grid=grid, pixel_scale=pixel_scale
),
]
)
except (IndexError, ValueError):
return []
@evaluation_grid
def tangential_caustic_from_grid(self, grid, pixel_scale=0.05):
tangential_critical_curve = self.tangential_critical_curve_from_grid(
grid=grid, pixel_scale=pixel_scale
)
if len(tangential_critical_curve) == 0:
return []
deflections_critical_curve = self.deflections_2d_from_grid(
grid=tangential_critical_curve
)
return tangential_critical_curve - deflections_critical_curve
@evaluation_grid
def radial_caustic_from_grid(self, grid, pixel_scale=0.05):
radial_critical_curve = self.radial_critical_curve_from_grid(
grid=grid, pixel_scale=pixel_scale
)
if len(radial_critical_curve) == 0:
return []
deflections_critical_curve = self.deflections_2d_from_grid(
grid=radial_critical_curve
)
return radial_critical_curve - deflections_critical_curve
@evaluation_grid
def caustics_from_grid(self, grid, pixel_scale=0.05):
try:
return grid_2d_irregular.Grid2DIrregular(
[
self.tangential_caustic_from_grid(
grid=grid, pixel_scale=pixel_scale
),
self.radial_caustic_from_grid(grid=grid, pixel_scale=pixel_scale),
]
)
except (IndexError, ValueError):
return []
@evaluation_grid
def area_within_tangential_critical_curve_from_grid(self, grid, pixel_scale=0.05):
tangential_critical_curve = self.tangential_critical_curve_from_grid(
grid=grid, pixel_scale=pixel_scale
)
x, y = tangential_critical_curve[:, 0], tangential_critical_curve[:, 1]
return np.abs(0.5 * np.sum(y[:-1] * np.diff(x) - x[:-1] * np.diff(y)))
@evaluation_grid
def einstein_radius_from_grid(self, grid, pixel_scale=0.05):
try:
return np.sqrt(
self.area_within_tangential_critical_curve_from_grid(
grid=grid, pixel_scale=pixel_scale
)
/ np.pi
)
except TypeError:
raise TypeError("The grid input was unable to estimate the Einstein Radius")
@evaluation_grid
def einstein_mass_angular_from_grid(self, grid, pixel_scale=0.05):
return np.pi * (
self.einstein_radius_from_grid(grid=grid, pixel_scale=pixel_scale) ** 2
)
```
#### File: autogalaxy/plot/fit_interferometer_plotters.py
```python
from autoarray.plot import inversion_plotters, fit_interferometer_plotters
from autogalaxy.plot.mat_wrap import lensing_mat_plot, lensing_include, lensing_visuals
from autogalaxy.plot import plane_plotters
from autogalaxy.fit import fit_interferometer
class FitInterferometerPlotter(
fit_interferometer_plotters.AbstractFitInterferometerPlotter
):
def __init__(
self,
fit: fit_interferometer.FitInterferometer,
mat_plot_1d: lensing_mat_plot.MatPlot1D = lensing_mat_plot.MatPlot1D(),
visuals_1d: lensing_visuals.Visuals1D = lensing_visuals.Visuals1D(),
include_1d: lensing_include.Include1D = lensing_include.Include1D(),
mat_plot_2d: lensing_mat_plot.MatPlot2D = lensing_mat_plot.MatPlot2D(),
visuals_2d: lensing_visuals.Visuals2D = lensing_visuals.Visuals2D(),
include_2d: lensing_include.Include2D = lensing_include.Include2D(),
):
super().__init__(
fit=fit,
mat_plot_1d=mat_plot_1d,
include_1d=include_1d,
visuals_1d=visuals_1d,
mat_plot_2d=mat_plot_2d,
include_2d=include_2d,
visuals_2d=visuals_2d,
)
@property
def plane(self):
return self.fit.plane
@property
def visuals_with_include_2d(self):
visuals_2d = super(FitInterferometerPlotter, self).visuals_with_include_2d
return visuals_2d + visuals_2d.__class__()
def plane_plotter_from(self, plane):
return plane_plotters.PlanePlotter(
plane=plane,
grid=self.fit.interferometer.grid,
mat_plot_2d=self.mat_plot_2d,
visuals_2d=self.visuals_with_include_2d,
include_2d=self.include_2d,
)
@property
def inversion_plotter(self):
return inversion_plotters.InversionPlotter(
inversion=self.fit.inversion,
mat_plot_2d=self.mat_plot_2d,
visuals_2d=self.visuals_with_include_2d,
include_2d=self.include_2d,
)
def subplot_fit_real_space(self):
if self.fit.inversion is None:
plane_plotter = self.plane_plotter_from(plane=self.plane)
plane_plotter.subplot(
image=True, plane_image=True, auto_filename="subplot_fit_real_space"
)
elif self.fit.inversion is not None:
self.inversion_plotter.subplot(
reconstructed_image=True,
reconstruction=True,
auto_filename="subplot_fit_real_space",
)
``` |
{
"source": "jonathanfrawley/PyAutoGalaxy_copy",
"score": 2
} |
#### File: autogalaxy/dataset/imaging.py
```python
import copy
import numpy as np
from autoarray.structures import arrays
from autoarray.structures import grids
from autoarray.structures import kernel
from autoarray.dataset import imaging
from autogalaxy.plane import plane as pl
class SettingsMaskedImaging(imaging.SettingsMaskedImaging):
def __init__(
self,
grid_class=grids.Grid2D,
grid_inversion_class=grids.Grid2D,
sub_size=2,
sub_size_inversion=2,
fractional_accuracy=0.9999,
sub_steps=None,
pixel_scales_interp=None,
bin_up_factor=None,
signal_to_noise_limit=None,
psf_shape_2d=None,
renormalize_psf=True,
):
"""
The lens dataset is the collection of data_type (image, noise-map, PSF), a mask, grid, convolver \
and other utilities that are used for modeling and fitting an image of a strong lens.
Whilst the image, noise-map, etc. are loaded in 2D, the lens dataset creates reduced 1D arrays of each \
for lens calculations.
Parameters
----------
grid_class : ag.Grid2D
The type of grid used to create the image from the `Galaxy` and `Plane`. The options are `Grid2D`,
`Grid2DIterate` and `Grid2DInterpolate` (see the `Grid2D` documentation for a description of these options).
grid_inversion_class : ag.Grid2D
The type of grid used to create the grid that maps the `Inversion` source pixels to the data's image-pixels.
The options are `Grid2D`, `Grid2DIterate` and `Grid2DInterpolate` (see the `Grid2D` documentation for a
description of these options).
sub_size : int
If the grid and / or grid_inversion use a `Grid2D`, this sets the sub-size used by the `Grid2D`.
fractional_accuracy : float
If the grid and / or grid_inversion use a `Grid2DIterate`, this sets the fractional accuracy it
uses when evaluating functions.
sub_steps : [int]
If the grid and / or grid_inversion use a `Grid2DIterate`, this sets the steps the sub-size is increased by
to meet the fractional accuracy when evaluating functions.
pixel_scales_interp : float or (float, float)
If the grid and / or grid_inversion use a `Grid2DInterpolate`, this sets the resolution of the interpolation
grid.
signal_to_noise_limit : float
If input, the dataset's noise-map is rescaled such that no pixel has a signal-to-noise above the
signa to noise limit.
"""
super().__init__(
grid_class=grid_class,
grid_inversion_class=grid_inversion_class,
sub_size=sub_size,
sub_size_inversion=sub_size_inversion,
fractional_accuracy=fractional_accuracy,
sub_steps=sub_steps,
pixel_scales_interp=pixel_scales_interp,
bin_up_factor=bin_up_factor,
signal_to_noise_limit=signal_to_noise_limit,
psf_shape_2d=psf_shape_2d,
renormalize_psf=renormalize_psf,
)
class MaskedImaging(imaging.MaskedImaging):
def __init__(self, imaging, mask, settings=SettingsMaskedImaging()):
"""
The lens dataset is the collection of data (image, noise-map, PSF), a mask, grid, convolver \
and other utilities that are used for modeling and fitting an image of a strong lens.
Whilst the image, noise-map, etc. are loaded in 2D, the lens dataset creates reduced 1D arrays of each \
for lens calculations.
Parameters
----------
imaging: im.Imaging
The imaging data all in 2D (the image, noise-map, PSF, etc.)
mask: msk.Mask2D
The 2D mask that is applied to the image.
sub_size : int
The size of the sub-grid used for each lens SubGrid. E.g. a value of 2 grid each image-pixel on a 2x2 \
sub-grid.
psf_shape_2d : (int, int)
The shape of the PSF used for convolving model image generated using analytic light profiles. A smaller \
shape will trim the PSF relative to the input image PSF, giving a faster analysis run-time.
positions : [[]]
Lists of image-pixel coordinates (arc-seconds) that mappers close to one another in the source-plane(s), \
used to speed up the non-linear sampling.
pixel_scales_interp : float
If `True`, expensive to compute mass profile deflection angles will be computed on a sparse grid and \
interpolated to the grid, sub and blurring grids.
"""
super(MaskedImaging, self).__init__(
imaging=imaging, mask=mask, settings=settings
)
class SimulatorImaging(imaging.SimulatorImaging):
def __init__(
self,
exposure_time: float,
background_sky_level: float = 0.0,
psf: kernel.Kernel2D = None,
renormalize_psf: bool = True,
read_noise: float = None,
add_poisson_noise: bool = True,
noise_if_add_noise_false: float = 0.1,
noise_seed: int = -1,
):
"""A class representing a Imaging observation, using the shape of the image, the pixel scale,
psf, exposure time, etc.
Parameters
----------
psf : Kernel2D
An arrays describing the PSF kernel of the image.
exposure_time : float
The exposure time of the simulated imaging.
background_sky_level : float
The level of the background sky of the simulated imaging.
renormalize_psf : bool
If `True`, the PSF kernel is renormalized so all values sum to 1.0.
read_noise : float
The level of read-noise added to the simulated imaging by drawing from a Gaussian distribution with
sigma equal to the value `read_noise`.
add_poisson_noise : bool
Whether Poisson noise corresponding to photon count statistics on the imaging observation is added.
noise_if_add_noise_false : float
If noise is not added to the simulated dataset a `noise_map` must still be returned. This value gives
the value of noise assigned to every pixel in the noise-map.
noise_seed : int
The random seed used to add random noise, where -1 corresponds to a random seed every run.
"""
super(SimulatorImaging, self).__init__(
psf=psf,
exposure_time=exposure_time,
background_sky_level=background_sky_level,
renormalize_psf=renormalize_psf,
read_noise=read_noise,
add_poisson_noise=add_poisson_noise,
noise_if_add_noise_false=noise_if_add_noise_false,
noise_seed=noise_seed,
)
def from_plane_and_grid(self, plane, grid, name=None):
"""
Returns a realistic simulated image by applying effects to a plain simulated image.
Parameters
----------
"""
image = plane.padded_image_from_grid_and_psf_shape(
grid=grid, psf_shape_2d=self.psf.shape_native
)
imaging = self.from_image(image=image.slim_binned, name=name)
return imaging.trimmed_after_convolution_from(
kernel_shape=self.psf.shape_native
)
def from_galaxies_and_grid(self, galaxies, grid, name=None):
"""Simulate imaging data for this data, as follows:
1) Setup the image-plane grid of the Imaging arrays, which defines the coordinates used for the ray-tracing.
2) Use this grid and the lens and source galaxies to setup a plane, which generates the image of \
the simulated imaging data.
3) Simulate the imaging data, using a special image which ensures edge-effects don't
degrade simulator of the telescope optics (e.g. the PSF convolution).
4) Plot the image using Matplotlib, if the plot_imaging bool is True.
5) Output the dataset to .fits format if a dataset_path and data_name are specified. Otherwise, return the simulated \
imaging data instance."""
plane = pl.Plane(
redshift=float(np.mean([galaxy.redshift for galaxy in galaxies])),
galaxies=galaxies,
)
return self.from_plane_and_grid(plane=plane, grid=grid, name=name)
```
#### File: autogalaxy/dataset/interferometer.py
```python
import numpy as np
from autoarray.structures import grids
from autoarray.dataset import interferometer
from autoarray.operators import transformer
from autogalaxy.plane import plane as pl
class SettingsMaskedInterferometer(interferometer.SettingsMaskedInterferometer):
def __init__(
self,
grid_class=grids.Grid2D,
grid_inversion_class=grids.Grid2D,
sub_size=2,
sub_size_inversion=2,
fractional_accuracy=0.9999,
sub_steps=None,
pixel_scales_interp=None,
signal_to_noise_limit=None,
transformer_class=transformer.TransformerNUFFT,
):
"""
The lens dataset is the collection of data_type (image, noise-map), a mask, grid, convolver \
and other utilities that are used for modeling and fitting an image of a strong lens.
Whilst the image, noise-map, etc. are loaded in 2D, the lens dataset creates reduced 1D arrays of each \
for lens calculations.
Parameters
----------
grid_class : ag.Grid2D
The type of grid used to create the image from the `Galaxy` and `Plane`. The options are `Grid2D`,
`Grid2DIterate` and `Grid2DInterpolate` (see the `Grid2D` documentation for a description of these options).
grid_inversion_class : ag.Grid2D
The type of grid used to create the grid that maps the `Inversion` source pixels to the data's image-pixels.
The options are `Grid2D`, `Grid2DIterate` and `Grid2DInterpolate` (see the `Grid2D` documentation for a
description of these options).
sub_size : int
If the grid and / or grid_inversion use a `Grid2D`, this sets the sub-size used by the `Grid2D`.
fractional_accuracy : float
If the grid and / or grid_inversion use a `Grid2DIterate`, this sets the fractional accuracy it
uses when evaluating functions.
sub_steps : [int]
If the grid and / or grid_inversion use a `Grid2DIterate`, this sets the steps the sub-size is increased by
to meet the fractional accuracy when evaluating functions.
pixel_scales_interp : float or (float, float)
If the grid and / or grid_inversion use a `Grid2DInterpolate`, this sets the resolution of the interpolation
grid.
signal_to_noise_limit : float
If input, the dataset's noise-map is rescaled such that no pixel has a signal-to-noise above the
signa to noise limit.
"""
super().__init__(
grid_class=grid_class,
grid_inversion_class=grid_inversion_class,
sub_size=sub_size,
sub_size_inversion=sub_size_inversion,
fractional_accuracy=fractional_accuracy,
sub_steps=sub_steps,
pixel_scales_interp=pixel_scales_interp,
signal_to_noise_limit=signal_to_noise_limit,
transformer_class=transformer_class,
)
class MaskedInterferometer(interferometer.MaskedInterferometer):
def __init__(
self,
interferometer,
visibilities_mask,
real_space_mask,
settings=SettingsMaskedInterferometer(),
):
"""
The lens dataset is the collection of data (image, noise-map), a mask, grid, convolver \
and other utilities that are used for modeling and fitting an image of a strong lens.
Whilst the image, noise-map, etc. are loaded in 2D, the lens dataset creates reduced 1D arrays of each \
for lens calculations.
Parameters
----------
imaging: im.Imaging
The imaging data all in 2D (the image, noise-map, etc.)
real_space_mask: msk.Mask2D
The 2D mask that is applied to the image.
sub_size : int
The size of the sub-grid used for each lens SubGrid. E.g. a value of 2 grid each image-pixel on a 2x2 \
sub-grid.
positions : [[]]
Lists of image-pixel coordinates (arc-seconds) that mappers close to one another in the source-plane(s), \
used to speed up the non-linear sampling.
pixel_scales_interp : float
If `True`, expensive to compute mass profile deflection angles will be computed on a sparse grid and \
interpolated to the grid, sub and blurring grids.
"""
super(MaskedInterferometer, self).__init__(
interferometer=interferometer,
visibilities_mask=visibilities_mask,
real_space_mask=real_space_mask,
settings=settings,
)
class SimulatorInterferometer(interferometer.SimulatorInterferometer):
def __init__(
self,
uv_wavelengths,
exposure_time: float,
background_sky_level: float = 0.0,
transformer_class=transformer.TransformerDFT,
noise_sigma=0.1,
noise_if_add_noise_false=0.1,
noise_seed=-1,
):
"""A class representing a Imaging observation, using the shape of the image, the pixel scale,
psf, exposure time, etc.
Parameters
----------
shape_native : (int, int)
The shape of the observation. Note that we do not simulator a full Imaging frame (e.g. 2000 x 2000 pixels for \
Hubble imaging), but instead just a cut-out around the strong lens.
pixel_scales : float
The size of each pixel in arc seconds.
psf : PSF
An arrays describing the PSF kernel of the image.
exposure_time_map : float
The exposure time of an observation using this data.
background_sky_map : float
The level of the background sky of an observationg using this data.
"""
super(SimulatorInterferometer, self).__init__(
uv_wavelengths=uv_wavelengths,
exposure_time=exposure_time,
background_sky_level=background_sky_level,
transformer_class=transformer_class,
noise_sigma=noise_sigma,
noise_if_add_noise_false=noise_if_add_noise_false,
noise_seed=noise_seed,
)
def from_plane_and_grid(self, plane, grid, name=None):
"""
Returns a realistic simulated image by applying effects to a plain simulated image.
Parameters
----------
name
image : np.ndarray
The image before simulating (e.g. the lens and source galaxies before optics blurring and Imaging read-out).
pixel_scales: float
The scale of each pixel in arc seconds
exposure_time_map : np.ndarray
An arrays representing the effective exposure time of each pixel.
psf: PSF
An arrays describing the PSF the simulated image is blurred with.
background_sky_map : np.ndarray
The value of background sky in every image pixel (electrons per second).
add_poisson_noise: Bool
If `True` poisson noise_maps is simulated and added to the image, based on the total counts in each image
pixel
noise_seed: int
A seed for random noise_maps generation
"""
image = plane.image_from_grid(grid=grid)
return self.from_image(image=image.slim_binned, name=name)
def from_galaxies_and_grid(self, galaxies, grid, name=None):
"""Simulate imaging data for this data, as follows:
1) Setup the image-plane grid of the Imaging arrays, which defines the coordinates used for the ray-tracing.
2) Use this grid and the lens and source galaxies to setup a plane, which generates the image of \
the simulated imaging data.
3) Simulate the imaging data, using a special image which ensures edge-effects don't
degrade simulator of the telescope optics (e.g. the PSF convolution).
4) Plot the image using Matplotlib, if the plot_imaging bool is True.
5) Output the dataset to .fits format if a dataset_path and data_name are specified. Otherwise, return the simulated \
imaging data instance."""
plane = pl.Plane(
redshift=float(np.mean([galaxy.redshift for galaxy in galaxies])),
galaxies=galaxies,
)
return self.from_plane_and_grid(plane=plane, grid=grid, name=name)
```
#### File: autogalaxy/galaxy/galaxy_model.py
```python
import inspect
from autofit.mapper.prior_model.prior_model import PriorModel
from autogalaxy.galaxy import Galaxy
from autogalaxy.profiles import light_profiles
from autogalaxy.profiles import mass_profiles
def is_light_profile_class(cls):
"""
Parameters
----------
cls
Some object
Returns
-------
bool: is_light_profile_class
True if cls is a class that inherits from light profile
"""
return inspect.isclass(cls) and issubclass(cls, light_profiles.LightProfile)
def is_mass_profile_class(cls):
"""
Parameters
----------
cls
Some object
Returns
-------
bool: is_mass_profile_class
True if cls is a class that inherits from mass profile
"""
return inspect.isclass(cls) and issubclass(cls, mass_profiles.MassProfile)
class GalaxyModel(PriorModel):
"""
@DynamicAttrs
"""
def __init__(
self,
redshift,
pixelization=None,
regularization=None,
hyper_galaxy=None,
**kwargs
):
"""Class to produce Galaxy instances from sets of profile classes and other model-fitting attributes (e.g. \
pixelizations, regularization schemes, hyper_galaxies-galaxyes) using the model mapper.
Parameters
----------
light_profile_classes: [LightProfile]
The `LightProfile` classes for which model light profile instances are generated for this galaxy model.
mass_profile_classes: [MassProfile]
The `MassProfile` classes for which model mass profile instances are generated for this galaxy model.
redshift : float or Type[g.Redshift]
The redshift of this model galaxy.
model_redshift : bool
If `True`, the galaxy redshift will be treated as a free-parameter that is fitted for by the non-linear \
search.
pixelization : Pixelization
The pixelization used to reconstruct the galaxy light and fit the observed if using an inversion.
regularization : Regularization
The regularization-scheme used to regularization reconstruct the galaxy light when fitting the observed \
if using an inversion.
hyper_galaxy : HyperGalaxy
A model hyper_galaxies-galaxy used for scaling the observed grid's noise_map.
"""
super().__init__(
Galaxy,
redshift=redshift,
pixelization=pixelization,
regularization=regularization,
hyper_galaxy=hyper_galaxy,
**kwargs
)
profile_models = []
for name, prior_model in self.prior_model_tuples:
cls = prior_model.cls
if is_mass_profile_class(cls) or is_light_profile_class(cls):
profile_models.append(prior_model)
if pixelization is not None and regularization is None:
raise AssertionError(
"If the galaxy prior has a pixelization, it must also have a "
"regularization."
)
if pixelization is None and regularization is not None:
raise AssertionError(
"If the galaxy prior has a regularization, it must also have a "
"pixelization."
)
```
#### File: autogalaxy/galaxy/masked_galaxy_data.py
```python
from autoarray.dataset import abstract_dataset
from autoarray.structures import arrays, grids
from autogalaxy import exc
class MaskedGalaxyDataset:
def __init__(
self,
galaxy_data,
mask,
grid_class=grids.Grid2D,
fractional_accuracy=0.9999,
sub_steps=None,
pixel_scales_interp=None,
use_image=False,
use_convergence=False,
use_potential=False,
use_deflections_y=False,
use_deflections_x=False,
):
""" A galaxy-fit data is a collection of fit data components which are used to fit a galaxy to another galaxy. \
This is where a component of a galaxy's light profiles (e.g. image) or mass profiles (e.g. surface \
density, potential or deflection angles) are fitted to one another.
This is primarily performed for automatic prior linking, as a means to efficiently link the priors of a galaxy \
using one inferred parametrization of light or mass profiles to a new galaxy with a different parametrization \
of light or mass profiles.
This omits a number of the fit data components typically used when fitting an image (e.g. the observed image, PSF, \
exposure time map), but still has a number of the other components (e.g. an effective noise_map, grid_stacks).
Parameters
----------
galaxy_data : GalaxyData
The collection of data about the galaxy (image of its profile map, noise-map, etc.) that is fitted.
mask: aa.AbstractMask
The 2D masks that is applied to image fit data.
sub_size : int
The size of the sub-grid used for computing the SubGrid (see imaging.masks.SubGrid).
Attributes
----------
noise_map_1d : np.ndarray
The masked 1D arrays of the noise_map
grid_stacks : imaging.masks.GridStack
Grids of (y,x) Cartesian coordinates which map over the masked 1D fit data arrays's pixels (includes an \
grid, sub-grid, etc.)
"""
self.mask = mask
self.galaxy_data = galaxy_data
self.pixel_scales = galaxy_data.pixel_scales
self.image = arrays.Array2D.manual_mask(
array=galaxy_data.image.native_binned, mask=mask.mask_sub_1
)
self.noise_map = arrays.Array2D.manual_mask(
array=galaxy_data.noise_map.native_binned, mask=mask.mask_sub_1
)
self.signal_to_noise_map = self.image / self.noise_map
self.sub_size = mask.sub_size
self.grid = abstract_dataset.grid_from_mask_and_grid_class(
mask=mask,
grid_class=grid_class,
fractional_accuracy=fractional_accuracy,
sub_steps=sub_steps,
pixel_scales_interp=pixel_scales_interp,
)
if all(
not element
for element in [
use_image,
use_convergence,
use_potential,
use_deflections_y,
use_deflections_x,
]
):
raise exc.GalaxyException(
"The galaxy fit data has not been supplied with a use_ method."
)
if (
sum(
[
use_image,
use_convergence,
use_potential,
use_deflections_y,
use_deflections_x,
]
)
> 1
):
raise exc.GalaxyException(
"The galaxy fit data has not been supplied with multiple use_ methods, only supply "
"one."
)
self.use_image = use_image
self.use_convergence = use_convergence
self.use_potential = use_potential
self.use_deflections_y = use_deflections_y
self.use_deflections_x = use_deflections_x
def profile_quantity_from_galaxies(self, galaxies):
if self.use_image:
image = sum(map(lambda g: g.image_from_grid(grid=self.grid), galaxies))
return arrays.Array2D.manual_mask(array=image, mask=self.mask)
elif self.use_convergence:
convergence = sum(
map(lambda g: g.convergence_from_grid(grid=self.grid), galaxies)
)
return arrays.Array2D.manual_mask(array=convergence, mask=self.mask)
elif self.use_potential:
potential = sum(
map(lambda g: g.potential_from_grid(grid=self.grid), galaxies)
)
return arrays.Array2D.manual_mask(array=potential, mask=self.mask)
elif self.use_deflections_y:
deflections = sum(
map(lambda g: g.deflections_from_grid(grid=self.grid), galaxies)
)
return arrays.Array2D.manual_mask(
array=deflections[:, 0], mask=self.grid.mask
)
elif self.use_deflections_x:
deflections = sum(
map(lambda g: g.deflections_from_grid(grid=self.grid), galaxies)
)
return arrays.Array2D.manual_mask(
array=deflections[:, 1], mask=self.grid.mask
)
@property
def data(self):
return self.image
```
#### File: phase/abstract/result.py
```python
import autofit as af
from autogalaxy.galaxy import galaxy as g
class Result(af.Result):
def __init__(
self, samples, previous_model, analysis, search, use_as_hyper_dataset=False
):
"""
The results of a `NonLinearSearch` performed by a phase.
Parameters
----------
samples : af.Samples
A class containing the samples of the non-linear search, including methods to get the maximum log
likelihood model, errors, etc.
previous_model : af.ModelMapper
The model used in this result model-fit.
analysis : Analysis
The Analysis class used by this model-fit to fit the model to the data.
search : af.NonLinearSearch
The `NonLinearSearch` search used by this model fit.
use_as_hyper_dataset : bool
Whether this result's phase contains hyper phases, allowing it to be used a hyper dataset.
"""
super().__init__(samples=samples, previous_model=previous_model, search=search)
self.analysis = analysis
self.use_as_hyper_dataset = use_as_hyper_dataset
@property
def max_log_likelihood_plane(self):
instance = self.analysis.associate_hyper_images(instance=self.instance)
return self.analysis.plane_for_instance(instance=instance)
@property
def path_galaxy_tuples(self) -> [(str, g.Galaxy)]:
"""
Tuples associating the names of galaxies with instances from the best fit
"""
return self.instance.path_instance_tuples_for_class(cls=g.Galaxy)
```
#### File: phase/dataset/analysis.py
```python
import autofit as af
from autogalaxy.pipeline.phase.abstract import analysis as abstract_analysis
from autogalaxy.galaxy import galaxy as g
from autogalaxy.plane import plane as pl
import numpy as np
import pickle
import dill
def last_result_with_use_as_hyper_dataset(results):
if results is not None:
if results.last is not None:
for index, result in enumerate(reversed(results)):
if hasattr(result, "use_as_hyper_dataset"):
if result.use_as_hyper_dataset:
return result
class Analysis(abstract_analysis.Analysis):
def __init__(self, masked_dataset, cosmology, settings, results):
super().__init__(cosmology=cosmology, settings=settings)
self.masked_dataset = masked_dataset
result = last_result_with_use_as_hyper_dataset(results=results)
if result is not None:
self.hyper_galaxy_image_path_dict = result.hyper_galaxy_image_path_dict
self.hyper_model_image = result.hyper_model_image
else:
self.hyper_galaxy_image_path_dict = None
self.hyper_model_image = None
def hyper_image_sky_for_instance(self, instance):
if hasattr(instance, "hyper_image_sky"):
return instance.hyper_image_sky
def hyper_background_noise_for_instance(self, instance):
if hasattr(instance, "hyper_background_noise"):
return instance.hyper_background_noise
def plane_for_instance(self, instance):
return pl.Plane(galaxies=instance.galaxies)
def associate_hyper_images(self, instance: af.ModelInstance) -> af.ModelInstance:
"""
Takes images from the last result, if there is one, and associates them with galaxies in this phase
where full-path galaxy names match.
If the galaxy collection has a different name then an association is not made.
e.g.
galaxies.lens will match with:
galaxies.lens
but not with:
galaxies.lens
galaxies.source
Parameters
----------
instance
A model instance with 0 or more galaxies in its tree
Returns
-------
instance
The input instance with images associated with galaxies where possible.
"""
if self.hyper_galaxy_image_path_dict is not None:
for galaxy_path, galaxy in instance.path_instance_tuples_for_class(
g.Galaxy
):
if galaxy_path in self.hyper_galaxy_image_path_dict:
galaxy.hyper_model_image = self.hyper_model_image
galaxy.hyper_galaxy_image = self.hyper_galaxy_image_path_dict[
galaxy_path
]
return instance
def save_attributes_for_aggregator(self, paths: af.Paths):
self.save_dataset(paths=paths)
self.save_mask(paths=paths)
self.save_settings(paths=paths)
self.save_attributes(paths=paths)
def save_dataset(self, paths: af.Paths):
"""
Save the dataset associated with the phase
"""
with open(f"{paths.pickle_path}/dataset.pickle", "wb") as f:
pickle.dump(self.masked_dataset.dataset, f)
def save_mask(self, paths: af.Paths):
"""
Save the mask associated with the phase
"""
with open(f"{paths.pickle_path}/mask.pickle", "wb") as f:
dill.dump(self.masked_dataset.mask, f)
def make_attributes(self):
raise NotImplementedError
def save_attributes(self, paths: af.Paths):
attributes = self.make_attributes()
with open(f"{paths.pickle_path}/attributes.pickle", "wb+") as f:
pickle.dump(attributes, f)
```
#### File: phase/dataset/result.py
```python
from autogalaxy.pipeline.phase import abstract
class Result(abstract.result.Result):
@property
def max_log_likelihood_fit(self):
hyper_image_sky = self.analysis.hyper_image_sky_for_instance(
instance=self.instance
)
hyper_background_noise = self.analysis.hyper_background_noise_for_instance(
instance=self.instance
)
return self.analysis.fit_positions_for_tracer(
plane=self.max_log_likelihood_plane,
hyper_image_sky=hyper_image_sky,
hyper_background_noise=hyper_background_noise,
)
@property
def mask(self):
return self.max_log_likelihood_fit.mask
@property
def masked_dataset(self):
return self.max_log_likelihood_fit.masked_dataset
@property
def pixelization(self):
for galaxy in self.max_log_likelihood_fit.galaxies:
if galaxy.pixelization is not None:
return galaxy.pixelization
```
#### File: phase/imaging/phase.py
```python
from os import path
from astropy import cosmology as cosmo
import autofit as af
from autogalaxy.dataset import imaging
from autogalaxy.pipeline.phase import dataset
from autoarray.inversion import pixelizations as pix
from autoarray.inversion import regularization as reg
from autogalaxy.pipeline.phase.imaging.analysis import Analysis
from autogalaxy.pipeline.phase.imaging.result import Result
from autogalaxy.pipeline.phase.settings import SettingsPhaseImaging
class PhaseImaging(dataset.PhaseDataset):
galaxies = af.PhaseProperty("galaxies")
hyper_image_sky = af.PhaseProperty("hyper_image_sky")
hyper_background_noise = af.PhaseProperty("hyper_background_noise")
Analysis = Analysis
Result = Result
def __init__(
self,
*,
search,
galaxies=None,
hyper_image_sky=None,
hyper_background_noise=None,
settings=SettingsPhaseImaging(),
cosmology=cosmo.Planck15,
use_as_hyper_dataset=False
):
"""
A phase in an lens pipeline. Uses the set non_linear search to try to fit models and hyper_galaxies
passed to it.
Parameters
----------
search: class
The class of a non_linear search
sub_size: int
The side length of the subgrid
"""
super().__init__(
search=search,
galaxies=galaxies,
settings=settings,
cosmology=cosmology,
use_as_hyper_dataset=use_as_hyper_dataset,
)
self.hyper_image_sky = hyper_image_sky
self.hyper_background_noise = hyper_background_noise
self.is_hyper_phase = False
def make_analysis(self, dataset, mask, results=None):
"""
Returns an lens object. Also calls the prior passing and masked_imaging modifying functions to allow child
classes to change the behaviour of the phase.
Parameters
----------
mask: Mask2D
The default masks passed in by the pipeline
dataset: im.Imaging
An masked_imaging that has been masked
results: autofit.tools.pipeline.ResultsCollection
The result from the previous phase
Returns
-------
lens : Analysis
An lens object that the `NonLinearSearch` calls to determine the fit of a set of values
"""
masked_imaging = imaging.MaskedImaging(
imaging=dataset, mask=mask, settings=self.settings.settings_masked_imaging
)
self.output_phase_info()
return self.Analysis(
masked_imaging=masked_imaging,
settings=self.settings,
cosmology=self.cosmology,
results=results,
)
def output_phase_info(self):
file_phase_info = path.join(self.search.paths.output_path, "phase.info")
with open(file_phase_info, "w") as phase_info:
phase_info.write("Optimizer = {} \n".format(type(self.search).__name__))
phase_info.write(
"Sub-grid size = {} \n".format(
self.settings.settings_masked_imaging.sub_size
)
)
phase_info.write(
"PSF shape = {} \n".format(
self.settings.settings_masked_imaging.psf_shape_2d
)
)
phase_info.write("Cosmology = {} \n".format(self.cosmology))
phase_info.close()
class PhaseAttributes:
def __init__(self, cosmology, hyper_model_image, hyper_galaxy_image_path_dict):
self.cosmology = cosmology
self.hyper_model_image = hyper_model_image
self.hyper_galaxy_image_path_dict = hyper_galaxy_image_path_dict
```
#### File: pipeline/phase/phase_galaxy.py
```python
import autofit as af
from astropy import cosmology as cosmo
from autogalaxy.galaxy import fit_galaxy
from autogalaxy.galaxy import masked_galaxy_data
from autogalaxy.pipeline import visualizer
from autogalaxy.pipeline.phase import abstract
class Analysis(af.Analysis):
def __init__(self, cosmology, results):
self.cosmology = cosmology
self.results = results
self.visualizer = visualizer.Visualizer()
# noinspection PyAbstractClass
class AnalysisSingle(Analysis):
def __init__(self, galaxy_data, cosmology, results=None):
super().__init__(cosmology=cosmology, results=results)
self.galaxy_data = galaxy_data
def log_likelihood_function(self, instance):
fit = self.fit_for_instance(instance=instance)
return fit.figure_of_merit
def visualize(self, paths, instance, during_analysis):
fit = self.fit_for_instance(instance=instance)
self.visualizer.visualize_galaxy_fit_subplot(fit)
if during_analysis:
self.visualizer.visualize_fit_individuals(fit)
else:
if self.visualizer.plot_plane_all_at_end_png:
self.visualizer.visualize_fit_individuals(
fit=fit, plot_all=True, image_format="png"
)
if self.visualizer.plot_plane_all_at_end_fits:
self.visualizer.visualize_fit_individuals(
fit=fit, plot_all=True, image_format="fits", path_suffix="/fits/"
)
return fit
def fit_for_instance(self, instance):
"""
Determine the fit of a lens galaxy and source galaxy to the masked_imaging in
this lens.
Parameters
----------
instance
A model instance with attributes
Returns
-------
fit: Fit
A fractional value indicating how well this model fit and the model
masked_imaging itself
"""
return fit_galaxy.FitGalaxy(
masked_galaxy_dataset=self.galaxy_data, model_galaxies=instance.galaxies
)
# noinspection PyAbstractClass
class AnalysisDeflections(Analysis):
def __init__(self, galaxy_data_y, galaxy_data_x, cosmology, results=None):
super().__init__(cosmology=cosmology, results=results)
self.galaxy_data_y = galaxy_data_y
self.galaxy_data_x = galaxy_data_x
def log_likelihood_function(self, instance):
fit_y, fit_x = self.fit_for_instance(instance=instance)
return fit_y.figure_of_merit + fit_x.figure_of_merit
def visualize(self, paths, instance, during_analysis):
fit_y, fit_x = self.fit_for_instance(instance=instance)
if self.visualizer.plot_subplot_galaxy_fit:
self.visualizer.visualize_galaxy_fit_subplot(fit_y, path_suffix="/fit_y_")
self.visualizer.visualize_galaxy_fit_subplot(fit_x, path_suffix="/fit_x_")
if during_analysis:
self.visualizer.visualize_fit_individuals(fit_y, path_suffix="/fit_y")
self.visualizer.visualize_fit_individuals(fit_x, path_suffix="/fit_x")
else:
if self.visualizer.plot_plane_all_at_end_png:
self.visualizer.visualize_fit_individuals(
fit_y, path_suffix="/fits/fit_y", plot_all=True
)
self.visualizer.visualize_fit_individuals(
fit_x, path_suffix="/fits/fit_x", plot_all=True
)
if self.visualizer.plot_plane_all_at_end_fits:
self.visualizer.visualize_fit_individuals(
fit_y, path_suffix="/fits/fit_y", plot_all=True, image_format="fits"
)
self.visualizer.visualize_fit_individuals(
fit_x, path_suffix="/fits/fit_x", plot_all=True, image_format="fits"
)
return fit_y, fit_x
def fit_for_instance(self, instance):
fit_y = fit_galaxy.FitGalaxy(
masked_galaxy_dataset=self.galaxy_data_y, model_galaxies=instance.galaxies
)
fit_x = fit_galaxy.FitGalaxy(
masked_galaxy_dataset=self.galaxy_data_x, model_galaxies=instance.galaxies
)
return fit_y, fit_x
class PhaseGalaxy(abstract.AbstractPhase):
galaxies = af.PhaseProperty("galaxies")
Analysis = Analysis
def __init__(
self,
name,
search,
galaxies=None,
use_image=False,
use_convergence=False,
use_potential=False,
use_deflections=False,
sub_size=2,
pixel_scales_interp=None,
cosmology=cosmo.Planck15,
):
"""
A phase in an lens pipeline. Uses the set non_linear search to try to fit
models and hyper_galaxies passed to it.
Parameters
----------
search: class
The class of a non_linear search
sub_size: int
The side length of the subgrid
"""
super(PhaseGalaxy, self).__init__(name=name, search=search)
self.cosmology = cosmology
self.use_image = use_image
self.use_convergence = use_convergence
self.use_potential = use_potential
self.use_deflections = use_deflections
self.galaxies = galaxies
self.sub_size = sub_size
self.pixel_scales_interp = pixel_scales_interp
def run(self, galaxy_data, mask, info=None, results=None):
"""
Run this phase.
Parameters
----------
galaxy_data
mask: Mask2D
The default masks passed in by the pipeline
results: autofit.tools.pipeline.ResultsCollection
An object describing the results of the last phase or None if no phase has
been executed
Returns
-------
result: AbstractPhase.Result
A result object comprising the best fit model and other hyper_galaxies.
"""
analysis = self.make_analysis(
galaxy_data=galaxy_data, results=results, mask=mask
)
self.save_metadata(galaxy_data.name)
self.model = self.model.populate(results)
result = self.run_analysis(
analysis=analysis, info=info, pickle_files=pickle_files
)
return self.make_result(result, analysis)
def make_analysis(self, galaxy_data, mask, results=None):
"""
Returns an lens object. Also calls the prior passing and masked_imaging modifying
functions to allow child classes to change the behaviour of the phase.
Parameters
----------
galaxy_data
mask: Mask2D
The default masks passed in by the pipeline
results: autofit.tools.pipeline.ResultsCollection
The result from the previous phase
Returns
-------
lens: Analysis
An lens object that the `NonLinearSearch` calls to determine the fit of a
set of values
"""
if self.use_image or self.use_convergence or self.use_potential:
galaxy_data = masked_galaxy_data.MaskedGalaxyDataset(
galaxy_data=galaxy_data[0],
mask=mask,
pixel_scales_interp=self.pixel_scales_interp,
use_image=self.use_image,
use_convergence=self.use_convergence,
use_potential=self.use_potential,
use_deflections_y=self.use_deflections,
use_deflections_x=self.use_deflections,
)
return AnalysisSingle(
galaxy_data=galaxy_data, cosmology=self.cosmology, results=results
)
elif self.use_deflections:
galaxy_data_y = masked_galaxy_data.MaskedGalaxyDataset(
galaxy_data=galaxy_data[0],
mask=mask,
pixel_scales_interp=self.pixel_scales_interp,
use_image=self.use_image,
use_convergence=self.use_convergence,
use_potential=self.use_potential,
use_deflections_y=self.use_deflections,
use_deflections_x=False,
)
galaxy_data_x = masked_galaxy_data.MaskedGalaxyDataset(
galaxy_data=galaxy_data[1],
mask=mask,
pixel_scales_interp=self.pixel_scales_interp,
use_image=self.use_image,
use_convergence=self.use_convergence,
use_potential=self.use_potential,
use_deflections_y=False,
use_deflections_x=self.use_deflections,
)
return AnalysisDeflections(
galaxy_data_y=galaxy_data_y,
galaxy_data_x=galaxy_data_x,
cosmology=self.cosmology,
results=results,
)
# noinspection PyAbstractClass
```
#### File: plot/plotters/galaxy_plotters.py
```python
from autoarray.plot.mat_wrap import mat_plot as mp
from autogalaxy.plot.plotters import lensing_obj_plotter
from autogalaxy.plot.mat_wrap import lensing_mat_plot, lensing_include, lensing_visuals
from autogalaxy.plot.plotters import light_profile_plotters, mass_profile_plotters
from autogalaxy.profiles import light_profiles
class GalaxyPlotter(lensing_obj_plotter.LensingObjPlotter):
def __init__(
self,
galaxy,
grid,
mat_plot_1d: lensing_mat_plot.MatPlot1D = lensing_mat_plot.MatPlot1D(),
visuals_1d: lensing_visuals.Visuals1D = lensing_visuals.Visuals1D(),
include_1d: lensing_include.Include1D = lensing_include.Include1D(),
mat_plot_2d: lensing_mat_plot.MatPlot2D = lensing_mat_plot.MatPlot2D(),
visuals_2d: lensing_visuals.Visuals2D = lensing_visuals.Visuals2D(),
include_2d: lensing_include.Include2D = lensing_include.Include2D(),
):
super().__init__(
lensing_obj=galaxy,
grid=grid,
mat_plot_2d=mat_plot_2d,
include_2d=include_2d,
visuals_2d=visuals_2d,
mat_plot_1d=mat_plot_1d,
include_1d=include_1d,
visuals_1d=visuals_1d,
)
@property
def galaxy(self):
return self.lensing_obj
@property
def visuals_with_include_2d(self) -> "vis.Visuals2D":
"""
Extracts from a `Structure` attributes that can be plotted and return them in a `Visuals` object.
Only attributes with `True` entries in the `Include` object are extracted for plotting.
From an `AbstractStructure` the following attributes can be extracted for plotting:
- origin: the (y,x) origin of the structure's coordinate system.
- mask: the mask of the structure.
- border: the border of the structure's mask.
Parameters
----------
structure : abstract_structure.AbstractStructure
The structure whose attributes are extracted for plotting.
Returns
-------
vis.Visuals2D
The collection of attributes that can be plotted by a `Plotter2D` object.
"""
visuals_2d = super(GalaxyPlotter, self).visuals_with_include_2d
return visuals_2d + visuals_2d.__class__(
light_profile_centres=self.extract_2d(
"light_profile_centres",
self.lensing_obj.extract_attribute(
cls=light_profiles.LightProfile, name="centre"
),
)
)
def light_profile_plotter_from(self, light_profile):
return light_profile_plotters.LightProfilePlotter(
light_profile=light_profile,
grid=self.grid,
mat_plot_2d=self.mat_plot_2d,
visuals_2d=self.visuals_2d,
include_2d=self.include_2d,
mat_plot_1d=self.mat_plot_1d,
visuals_1d=self.visuals_1d,
include_1d=self.include_1d,
)
def mass_profile_plotter_from(self, mass_profile):
return mass_profile_plotters.MassProfilePlotter(
mass_profile=mass_profile,
grid=self.grid,
mat_plot_2d=self.mat_plot_2d,
visuals_2d=self.visuals_2d,
include_2d=self.include_2d,
mat_plot_1d=self.mat_plot_1d,
visuals_1d=self.visuals_1d,
include_1d=self.include_1d,
)
def figures(
self,
image=False,
convergence=False,
potential=False,
deflections_y=False,
deflections_x=False,
magnification=False,
contribution_map=False,
):
if image:
self.mat_plot_2d.plot_array(
array=self.galaxy.image_from_grid(grid=self.grid),
visuals_2d=self.visuals_with_include_2d,
auto_labels=mp.AutoLabels(title="Image", filename="image"),
)
super().figures(
convergence=convergence,
potential=potential,
deflections_y=deflections_y,
deflections_x=deflections_x,
magnification=magnification,
)
if contribution_map:
self.mat_plot_2d.plot_array(
array=self.galaxy.contribution_map,
visuals_2d=self.visuals_with_include_2d,
auto_labels=mp.AutoLabels(
title="Contribution Map", filename="contribution_map"
),
)
def subplot_of_light_profiles(self, image=False):
light_profile_plotters = [
self.light_profile_plotter_from(light_profile)
for light_profile in self.galaxy.light_profiles
]
if image:
self.subplot_of_plotters_figure(
plotters=light_profile_plotters, name="image"
)
def subplot_of_mass_profiles(
self,
convergence=False,
potential=False,
deflections_y=False,
deflections_x=False,
):
mass_profile_plotters = [
self.mass_profile_plotter_from(mass_profile)
for mass_profile in self.galaxy.mass_profiles
]
if convergence:
self.subplot_of_plotters_figure(
plotters=mass_profile_plotters, name="convergence"
)
if potential:
self.subplot_of_plotters_figure(
plotters=mass_profile_plotters, name="potential"
)
if deflections_y:
self.subplot_of_plotters_figure(
plotters=mass_profile_plotters, name="deflections_y"
)
if deflections_x:
self.subplot_of_plotters_figure(
plotters=mass_profile_plotters, name="deflections_x"
)
```
#### File: plot/plotters/mass_profile_plotters.py
```python
from autoarray.structures import grids
from autogalaxy.plot.plotters import lensing_obj_plotter
from autogalaxy.plot.mat_wrap import lensing_mat_plot, lensing_include, lensing_visuals
from autogalaxy.profiles.mass_profiles import mass_profiles as mp
class MassProfilePlotter(lensing_obj_plotter.LensingObjPlotter):
def __init__(
self,
mass_profile: mp.MassProfile,
grid: grids.Grid2D,
mat_plot_1d: lensing_mat_plot.MatPlot1D = lensing_mat_plot.MatPlot1D(),
visuals_1d: lensing_visuals.Visuals1D = lensing_visuals.Visuals1D(),
include_1d: lensing_include.Include1D = lensing_include.Include1D(),
mat_plot_2d: lensing_mat_plot.MatPlot2D = lensing_mat_plot.MatPlot2D(),
visuals_2d: lensing_visuals.Visuals2D = lensing_visuals.Visuals2D(),
include_2d: lensing_include.Include2D = lensing_include.Include2D(),
):
super().__init__(
lensing_obj=mass_profile,
grid=grid,
mat_plot_2d=mat_plot_2d,
include_2d=include_2d,
visuals_2d=visuals_2d,
mat_plot_1d=mat_plot_1d,
include_1d=include_1d,
visuals_1d=visuals_1d,
)
@property
def mass_profile(self):
return self.lensing_obj
```
#### File: profiles/mass_profiles/mass_sheets.py
```python
import numpy as np
from autoarray.structures import grids
from autogalaxy.profiles import geometry_profiles
from autogalaxy.profiles import mass_profiles as mp
from autogalaxy import convert
import typing
from scipy.interpolate import griddata
from autogalaxy import exc
class MassSheet(geometry_profiles.SphericalProfile, mp.MassProfile):
def __init__(
self, centre: typing.Tuple[float, float] = (0.0, 0.0), kappa: float = 0.0
):
"""
Represents a mass-sheet
Parameters
----------
centre: (float, float)
The (y,x) arc-second coordinates of the profile centre.
kappa : float
The magnitude of the convergence of the mass-sheet.
"""
super(MassSheet, self).__init__(centre=centre)
self.kappa = kappa
def convergence_func(self, grid_radius):
return 0.0
@grids.grid_like_to_structure
def convergence_from_grid(self, grid):
return np.full(shape=grid.shape[0], fill_value=self.kappa)
@grids.grid_like_to_structure
def potential_from_grid(self, grid):
return np.zeros(shape=grid.shape[0])
@grids.grid_like_to_structure
@grids.transform
@grids.relocate_to_radial_minimum
def deflections_from_grid(self, grid):
grid_radii = self.grid_to_grid_radii(grid=grid)
return self.grid_to_grid_cartesian(grid=grid, radius=self.kappa * grid_radii)
# noinspection PyAbstractClass
class ExternalShear(geometry_profiles.EllipticalProfile, mp.MassProfile):
def __init__(self, elliptical_comps: typing.Tuple[float, float] = (0.0, 0.0)):
"""
An `ExternalShear` term, to model the line-of-sight contribution of other galaxies / satellites.
The shear angle phi is defined in the direction of stretching of the image. Therefore, if an object located \
outside the lens is responsible for the shear, it will be offset 90 degrees from the value of phi.
Parameters
----------
magnitude : float
The overall magnitude of the shear (gamma).
phi : float
The rotation axis of the shear.
"""
super(ExternalShear, self).__init__(
centre=(0.0, 0.0), elliptical_comps=elliptical_comps
)
magnitude, phi = convert.shear_magnitude_and_phi_from(
elliptical_comps=elliptical_comps
)
self.magnitude = magnitude
self.phi = phi
def convergence_func(self, grid_radius):
return 0.0
def average_convergence_of_1_radius(self):
return 0.0
@grids.grid_like_to_structure
def convergence_from_grid(self, grid):
return np.zeros(shape=grid.shape[0])
@grids.grid_like_to_structure
def potential_from_grid(self, grid):
return np.zeros(shape=grid.shape[0])
@grids.grid_like_to_structure
@grids.transform
@grids.relocate_to_radial_minimum
def deflections_from_grid(self, grid):
"""
Calculate the deflection angles at a given set of arc-second gridded coordinates.
Parameters
----------
grid : aa.Grid2D
The grid of (y,x) arc-second coordinates the deflection angles are computed on.
"""
deflection_y = -np.multiply(self.magnitude, grid[:, 0])
deflection_x = np.multiply(self.magnitude, grid[:, 1])
return self.rotate_grid_from_profile(np.vstack((deflection_y, deflection_x)).T)
class InputDeflections(mp.MassProfile):
def __init__(
self,
deflections_y,
deflections_x,
image_plane_grid,
preload_grid=None,
preload_blurring_grid=None,
# normalization_scale: float = 1.0,
):
"""
Represents a known deflection angle map (e.g. from an already performed lens model or particle simulation
of a mass distribution) which can be used for model fitting.
The image-plane grid of the delflection angles is used to align an input grid to the input deflections, so that
a new deflection angle map can be computed via interpolation using the scipy.interpolate.griddata method.
A normalization scale can be included, which scales the overall normalization of the deflection angle map
interpolated by a multiplicative factor.
Parameters
----------
deflections_y : aa.Array2D
The input array of the y components of the deflection angles.
deflections_x : aa.Array2D
The input array of the x components of the deflection angles.
image_plane_grid : aa.Grid2D
The image-plane grid from which the deflection angles are defined.
grid_interp : aa.Grid2D
The grid that interpolated quantities are computed on. If this is input in advance, the interpolation
weights can be precomputed to speed up the calculation time.
normalization_scale : float
The calculated deflection angles are multiplied by this factor scaling their values up and doown.
"""
super().__init__()
self.deflections_y = deflections_y
self.deflections_x = deflections_x
self.image_plane_grid = image_plane_grid
self.centre = image_plane_grid.origin
self.preload_grid = preload_grid
self.preload_deflections = None
self.preload_blurring_grid = preload_blurring_grid
self.preload_blurring_deflections = None
if self.preload_grid is not None:
self.normalization_scale = 1.0
self.preload_deflections = self.deflections_from_grid(grid=preload_grid)
if self.preload_blurring_grid is not None:
self.normalization_scale = 1.0
self.preload_blurring_deflections = self.deflections_from_grid(
grid=preload_blurring_grid
)
self.normalization_scale = 1.0 # normalization_scale
@grids.grid_like_to_structure
def convergence_from_grid(self, grid):
return self.convergence_via_jacobian_from_grid(grid=grid)
@grids.grid_like_to_structure
def potential_from_grid(self, grid):
return np.zeros(shape=grid.shape[0])
@grids.grid_like_to_structure
def deflections_from_grid(self, grid):
if self.preload_grid is not None and self.preload_deflections is not None:
if grid.sub_shape_slim == self.preload_grid.sub_shape_slim:
if np.allclose(grid, self.preload_grid, 1e-8):
return self.normalization_scale * self.preload_deflections
if (
self.preload_blurring_grid is not None
and self.preload_blurring_deflections is not None
):
if grid.sub_shape_slim == self.preload_blurring_grid.sub_shape_slim:
if np.allclose(grid, self.preload_blurring_grid, 1e-8):
return self.normalization_scale * self.preload_blurring_deflections
deflections_y = self.normalization_scale * griddata(
points=self.image_plane_grid, values=self.deflections_y, xi=grid
)
deflections_x = self.normalization_scale * griddata(
points=self.image_plane_grid, values=self.deflections_x, xi=grid
)
if np.isnan(deflections_y).any() or np.isnan(deflections_x).any():
raise exc.ProfileException(
"The grid input into the DefectionsInput.deflections_from_grid() method has (y,x)"
"coodinates extending beyond the input image_plane_grid."
""
"Update the image_plane_grid to include deflection angles reaching to larger"
"radii or reduce the input grid. "
)
return np.stack((deflections_y, deflections_x), axis=-1)
```
#### File: autogalaxy/profiles/point_sources.py
```python
import numpy as np
from autoarray.structures import grids
import typing
class PointSource:
def __init__(self, centre: typing.Tuple[float, float] = (0.0, 0.0)):
self.centre = centre
class PointSourceFlux(PointSource):
def __init__(
self, centre: typing.Tuple[float, float] = (0.0, 0.0), flux: float = 0.1
):
super().__init__(centre=centre)
self.flux = flux
```
#### File: unit/fit/test_fit.py
```python
import numpy as np
import pytest
import autogalaxy as ag
from autoarray.inversion import inversions
from autogalaxy.mock.mock import MockLightProfile
class MockFitImaging:
def __init__(self, model_images_of_galaxies):
self.model_images_of_galaxies = model_images_of_galaxies
class TestFitImaging:
class TestLikelihood:
def test__1x2_image__no_psf_blurring__plane_fits_data_with_chi_sq_5(self):
# The image plane image generated by the galaxy is [1.0, 1.0]
# Thus the chi squared is 4.0**2.0 + 3.0**2.0 = 25.0
psf = ag.Kernel2D.manual_native(
array=[[0.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 0.0]],
pixel_scales=1.0,
)
imaging = ag.Imaging(
image=5.0 * ag.Array2D.ones(shape_native=(3, 4), pixel_scales=1.0),
psf=psf,
noise_map=ag.Array2D.ones(shape_native=(3, 4), pixel_scales=1.0),
)
imaging.image[6] = 4.0
mask = ag.Mask2D.manual(
mask=[
[True, True, True, True],
[True, False, False, True],
[True, True, True, True],
],
pixel_scales=1.0,
)
masked_imaging_7x7 = ag.MaskedImaging(
imaging=imaging,
mask=mask,
settings=ag.SettingsMaskedImaging(grid_class=ag.Grid2D, sub_size=1),
)
# Setup as a ray trace instance, using a light profile for the galaxy
g0 = ag.Galaxy(
redshift=0.5, light_profile=MockLightProfile(value=1.0, size=2)
)
plane = ag.Plane(galaxies=[g0])
fit = ag.FitImaging(masked_imaging=masked_imaging_7x7, plane=plane)
assert (
fit.mask
== np.array(
[
[True, True, True, True],
[True, False, False, True],
[True, True, True, True],
]
)
).all()
assert (
fit.image.native
== np.array(
[[0.0, 0.0, 0.0, 0.0], [0.0, 5.0, 4.0, 0.0], [0.0, 0.0, 0.0, 0.0]]
)
).all()
assert (
fit.noise_map.native
== np.array(
[[0.0, 0.0, 0.0, 0.0], [0.0, 1.0, 1.0, 0.0], [0.0, 0.0, 0.0, 0.0]]
)
).all()
assert (
fit.model_image.native
== np.array(
[[0.0, 0.0, 0.0, 0.0], [0.0, 1.0, 1.0, 0.0], [0.0, 0.0, 0.0, 0.0]]
)
).all()
assert (
fit.residual_map.native
== np.array(
[[0.0, 0.0, 0.0, 0.0], [0.0, 4.0, 3.0, 0.0], [0.0, 0.0, 0.0, 0.0]]
)
).all()
assert (
fit.normalized_residual_map.native
== np.array(
[[0.0, 0.0, 0.0, 0.0], [0.0, 4.0, 3.0, 0.0], [0.0, 0.0, 0.0, 0.0]]
)
).all()
assert (
fit.chi_squared_map.native
== np.array(
[[0.0, 0.0, 0.0, 0.0], [0.0, 16.0, 9.0, 0.0], [0.0, 0.0, 0.0, 0.0]]
)
).all()
assert fit.chi_squared == 25.0
assert fit.reduced_chi_squared == 25.0 / 2.0
assert fit.noise_normalization == (2.0 * np.log(2 * np.pi * 1.0 ** 2.0))
assert fit.log_likelihood == -0.5 * (
25.0 + 2.0 * np.log(2 * np.pi * 1.0 ** 2.0)
)
def test__1x2_image__include_psf_blurring__plane_fits_data_with_chi_sq_4(self):
# This PSF changes the blurred image plane image from [1.0, 1.0] to [1.0, 5.0]
# Thus, the chi squared is 4.0**2.0 + 0.0**2.0 = 16.0
psf = ag.Kernel2D.manual_native(
array=[[0.0, 0.0, 0.0], [0.0, 1.0, 3.0], [0.0, 0.0, 0.0]],
pixel_scales=1.0,
renormalize=False,
)
imaging = ag.Imaging(
image=5.0 * ag.Array2D.ones(shape_native=(3, 4), pixel_scales=1.0),
psf=psf,
noise_map=ag.Array2D.ones(shape_native=(3, 4), pixel_scales=1.0),
)
imaging.image[6] = 4.0
mask = ag.Mask2D.manual(
mask=[
[True, True, True, True],
[True, False, False, True],
[True, True, True, True],
],
pixel_scales=1.0,
)
masked_imaging_7x7 = ag.MaskedImaging(
imaging=imaging,
mask=mask,
settings=ag.SettingsMaskedImaging(
grid_class=ag.Grid2D, renormalize_psf=False, sub_size=1
),
)
# Setup as a ray trace instance, using a light profile for the galaxy
g0 = ag.Galaxy(
redshift=0.5, light_profile=MockLightProfile(value=1.0, size=2)
)
plane = ag.Plane(galaxies=[g0])
fit = ag.FitImaging(masked_imaging=masked_imaging_7x7, plane=plane)
assert (
fit.mask
== np.array(
[
[True, True, True, True],
[True, False, False, True],
[True, True, True, True],
]
)
).all()
assert (
fit.image.native
== np.array(
[[0.0, 0.0, 0.0, 0.0], [0.0, 5.0, 4.0, 0.0], [0.0, 0.0, 0.0, 0.0]]
)
).all()
assert (
fit.noise_map.native
== np.array(
[[0.0, 0.0, 0.0, 0.0], [0.0, 1.0, 1.0, 0.0], [0.0, 0.0, 0.0, 0.0]]
)
).all()
assert (
fit.model_image.native
== np.array(
[[0.0, 0.0, 0.0, 0.0], [0.0, 1.0, 4.0, 0.0], [0.0, 0.0, 0.0, 0.0]]
)
).all()
assert (
fit.residual_map.native
== np.array(
[[0.0, 0.0, 0.0, 0.0], [0.0, 4.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]]
)
).all()
assert (
fit.normalized_residual_map.native
== np.array(
[[0.0, 0.0, 0.0, 0.0], [0.0, 4.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]]
)
).all()
assert (
fit.chi_squared_map.native
== np.array(
[[0.0, 0.0, 0.0, 0.0], [0.0, 16.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]]
)
).all()
assert fit.chi_squared == 16.0
assert fit.reduced_chi_squared == 16.0 / 2.0
assert fit.noise_normalization == (2.0 * np.log(2 * np.pi * 1.0 ** 2.0))
assert fit.log_likelihood == -0.5 * (
16.0 + 2.0 * np.log(2 * np.pi * 1.0 ** 2.0)
)
def test__hyper_galaxy_changes_noise_above_from_1_to_2__reflected_in_likelihood(
self,
):
# This PSF changes the blurred image plane image from [1.0, 1.0] to [1.0, 5.0]
# Thus, the chi squared is 4.0**2.0 + 0.0**2.0 = 16.0
# The hyper_galaxies galaxy increases the noise in both pixels by 1.0, to 2.0.
# This reduces the chi squared to 2.0 instead of 4.0
psf = ag.Kernel2D.manual_native(
array=[[0.0, 0.0, 0.0], [0.0, 1.0, 3.0], [0.0, 0.0, 0.0]],
pixel_scales=1.0,
)
imaging = ag.Imaging(
image=5.0 * ag.Array2D.ones(shape_native=(3, 4), pixel_scales=1.0),
psf=psf,
noise_map=ag.Array2D.ones(shape_native=(3, 4), pixel_scales=1.0),
)
imaging.image[6] = 4.0
mask = ag.Mask2D.manual(
mask=[
[True, True, True, True],
[True, False, False, True],
[True, True, True, True],
],
pixel_scales=1.0,
)
masked_imaging_7x7 = ag.MaskedImaging(
imaging=imaging,
mask=mask,
settings=ag.SettingsMaskedImaging(
grid_class=ag.Grid2D, renormalize_psf=False, sub_size=1
),
)
# Setup as a ray trace instance, using a light profile for the galaxy
g0 = ag.Galaxy(
redshift=0.5,
light_profile=MockLightProfile(value=1.0, size=2),
hyper_galaxy=ag.HyperGalaxy(
contribution_factor=1.0, noise_factor=1.0, noise_power=1.0
),
hyper_model_image=ag.Array2D.ones(
shape_native=(1, 2), pixel_scales=1.0
),
hyper_galaxy_image=ag.Array2D.ones(
shape_native=(1, 2), pixel_scales=1.0
),
hyper_minimum_value=0.0,
)
plane = ag.Plane(galaxies=[g0])
fit = ag.FitImaging(masked_imaging=masked_imaging_7x7, plane=plane)
assert (
fit.noise_map.native
== np.array(
[[0.0, 0.0, 0.0, 0.0], [0.0, 2.0, 2.0, 0.0], [0.0, 0.0, 0.0, 0.0]]
)
).all()
assert (
fit.chi_squared_map.native
== np.array(
[[0.0, 0.0, 0.0, 0.0], [0.0, 4.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]]
)
).all()
assert fit.chi_squared == 4.0
assert fit.reduced_chi_squared == 4.0 / 2.0
assert fit.noise_normalization == (2.0 * np.log(2 * np.pi * 2.0 ** 2.0))
assert fit.log_likelihood == -0.5 * (
4.0 + 2.0 * np.log(2 * np.pi * 2.0 ** 2.0)
)
def test__hyper_image_changes_background_sky__reflected_in_likelihood(self):
psf = ag.Kernel2D.manual_native(
array=[[0.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 0.0]],
pixel_scales=1.0,
)
imaging = ag.Imaging(
image=ag.Array2D.full(
fill_value=4.0, shape_native=(3, 4), pixel_scales=1.0
),
psf=psf,
noise_map=ag.Array2D.ones(shape_native=(3, 4), pixel_scales=1.0),
)
imaging.image[5] = 5.0
mask = ag.Mask2D.manual(
mask=[
[True, True, True, True],
[True, False, False, True],
[True, True, True, True],
],
pixel_scales=1.0,
)
masked_imaging_7x7 = ag.MaskedImaging(
imaging=imaging,
mask=mask,
settings=ag.SettingsMaskedImaging(grid_class=ag.Grid2D, sub_size=1),
)
# Setup as a ray trace instance, using a light profile for the galaxy
g0 = ag.Galaxy(
redshift=0.5, light_profile=MockLightProfile(value=1.0, size=2)
)
plane = ag.Plane(galaxies=[g0])
hyper_image_sky = ag.hyper_data.HyperImageSky(sky_scale=1.0)
fit = ag.FitImaging(
masked_imaging=masked_imaging_7x7,
plane=plane,
hyper_image_sky=hyper_image_sky,
)
assert (
fit.mask
== np.array(
[
[True, True, True, True],
[True, False, False, True],
[True, True, True, True],
]
)
).all()
assert (
fit.image.native
== np.array(
[[0.0, 0.0, 0.0, 0.0], [0.0, 6.0, 5.0, 0.0], [0.0, 0.0, 0.0, 0.0]]
)
).all()
assert (
fit.chi_squared_map.native
== np.array(
[[0.0, 0.0, 0.0, 0.0], [0.0, 25.0, 16.0, 0.0], [0.0, 0.0, 0.0, 0.0]]
)
).all()
assert fit.chi_squared == 41.0
assert fit.reduced_chi_squared == 41.0 / 2.0
assert fit.noise_normalization == (2.0 * np.log(2 * np.pi * 1.0 ** 2.0))
assert fit.log_likelihood == -0.5 * (
41.0 + 2.0 * np.log(2 * np.pi * 1.0 ** 2.0)
)
def test__hyper_background_changes_background_noise_map__reflected_in_likelihood(
self,
):
psf = ag.Kernel2D.manual_native(
array=[[0.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 0.0]],
pixel_scales=1.0,
)
imaging = ag.Imaging(
image=5.0 * ag.Array2D.ones(shape_native=(3, 4), pixel_scales=1.0),
psf=psf,
noise_map=ag.Array2D.ones(shape_native=(3, 4), pixel_scales=1.0),
)
imaging.image[6] = 4.0
mask = ag.Mask2D.manual(
mask=[
[True, True, True, True],
[True, False, False, True],
[True, True, True, True],
],
pixel_scales=1.0,
)
masked_imaging_7x7 = ag.MaskedImaging(
imaging=imaging,
mask=mask,
settings=ag.SettingsMaskedImaging(grid_class=ag.Grid2D, sub_size=1),
)
# Setup as a ray trace instance, using a light profile for the galaxy
g0 = ag.Galaxy(
redshift=0.5, light_profile=MockLightProfile(value=1.0, size=2)
)
plane = ag.Plane(galaxies=[g0])
hyper_background_noise = ag.hyper_data.HyperBackgroundNoise(noise_scale=1.0)
fit = ag.FitImaging(
masked_imaging=masked_imaging_7x7,
plane=plane,
hyper_background_noise=hyper_background_noise,
)
assert (
fit.noise_map.native
== np.array(
[[0.0, 0.0, 0.0, 0.0], [0.0, 2.0, 2.0, 0.0], [0.0, 0.0, 0.0, 0.0]]
)
).all()
assert fit.chi_squared == 6.25
assert fit.reduced_chi_squared == 6.25 / 2.0
assert fit.noise_normalization == (2.0 * np.log(2 * np.pi * 2.0 ** 2.0))
assert fit.log_likelihood == -0.5 * (
6.25 + 2.0 * np.log(2 * np.pi * 2.0 ** 2.0)
)
def test__hyper_galaxy_changes_noise_above_hyper_noise_limit__rounded_down_to_limit(
self,
):
# This PSF changes the blurred image plane image from [1.0, 1.0] to [1.0, 5.0]
# Thus, the chi squared is 4.0**2.0 + 0.0**2.0 = 16.0
# The hyper_galaxies galaxy increases the noise in both pixels by 1.0, to 2.0.
# This reduces the chi squared to 2.0 instead of 4.0
psf = ag.Kernel2D.manual_native(
array=[[0.0, 0.0, 0.0], [0.0, 1.0, 3.0], [0.0, 0.0, 0.0]],
pixel_scales=1.0,
)
imaging = ag.Imaging(
image=5.0 * ag.Array2D.ones(shape_native=(3, 4), pixel_scales=1.0),
psf=psf,
noise_map=ag.Array2D.ones(shape_native=(3, 4), pixel_scales=1.0),
)
imaging.image[6] = 4.0
mask = ag.Mask2D.manual(
mask=[
[True, True, True, True],
[True, False, False, True],
[True, True, True, True],
],
pixel_scales=1.0,
)
masked_imaging_7x7 = ag.MaskedImaging(
imaging=imaging,
mask=mask,
settings=ag.SettingsMaskedImaging(
grid_class=ag.Grid2D, renormalize_psf=False, sub_size=1
),
)
# Setup as a ray trace instance, using a light profile for the galaxy
g0 = ag.Galaxy(
redshift=0.5,
light_profile=MockLightProfile(value=1.0, size=2),
hyper_galaxy=ag.HyperGalaxy(
contribution_factor=1.0, noise_factor=1.0e9, noise_power=1.0
),
hyper_model_image=ag.Array2D.ones(
shape_native=(1, 2), pixel_scales=1.0
),
hyper_galaxy_image=ag.Array2D.ones(
shape_native=(1, 2), pixel_scales=1.0
),
hyper_minimum_value=0.0,
)
plane = ag.Plane(galaxies=[g0])
fit = ag.FitImaging(masked_imaging=masked_imaging_7x7, plane=plane)
assert (
fit.noise_map.native
== np.array(
[
[0.0, 0.0, 0.0, 0.0],
[0.0, 1.0e8, 1.0e8, 0.0],
[0.0, 0.0, 0.0, 0.0],
]
)
).all()
class TestCompareToManualProfilesOnly:
def test___all_fit_quantities__no_hyper_methods(self, masked_imaging_7x7):
g0 = ag.Galaxy(
redshift=0.5,
light_profile=ag.lp.EllipticalSersic(intensity=1.0),
mass_profile=ag.mp.SphericalIsothermal(einstein_radius=1.0),
)
g1 = ag.Galaxy(
redshift=1.0, light_profile=ag.lp.EllipticalSersic(intensity=1.0)
)
plane = ag.Plane(redshift=0.75, galaxies=[g0, g1])
fit = ag.FitImaging(masked_imaging=masked_imaging_7x7, plane=plane)
assert masked_imaging_7x7.noise_map.native == pytest.approx(
fit.noise_map.native
)
model_image = plane.blurred_image_from_grid_and_convolver(
grid=masked_imaging_7x7.grid,
convolver=masked_imaging_7x7.convolver,
blurring_grid=masked_imaging_7x7.blurring_grid,
)
assert model_image.native == pytest.approx(fit.model_image.native)
residual_map = ag.util.fit.residual_map_from(
data=masked_imaging_7x7.image, model_data=model_image
)
assert residual_map.native == pytest.approx(fit.residual_map.native)
normalized_residual_map = ag.util.fit.normalized_residual_map_from(
residual_map=residual_map, noise_map=masked_imaging_7x7.noise_map
)
assert normalized_residual_map.native == pytest.approx(
fit.normalized_residual_map.native
)
chi_squared_map = ag.util.fit.chi_squared_map_from(
residual_map=residual_map, noise_map=masked_imaging_7x7.noise_map
)
assert chi_squared_map.native == pytest.approx(fit.chi_squared_map.native)
chi_squared = ag.util.fit.chi_squared_from(chi_squared_map=chi_squared_map)
noise_normalization = ag.util.fit.noise_normalization_from(
noise_map=masked_imaging_7x7.noise_map
)
log_likelihood = ag.util.fit.log_likelihood_from(
chi_squared=chi_squared, noise_normalization=noise_normalization
)
assert log_likelihood == pytest.approx(fit.log_likelihood, 1e-4)
assert log_likelihood == fit.figure_of_merit
def test___fit_galaxy_model_image_dict__corresponds_to_blurred_galaxy_images(
self, masked_imaging_7x7
):
g0 = ag.Galaxy(
redshift=0.5,
light_profile=ag.lp.EllipticalSersic(intensity=1.0),
mass_profile=ag.mp.SphericalIsothermal(einstein_radius=1.0),
)
g1 = ag.Galaxy(
redshift=1.0, light_profile=ag.lp.EllipticalSersic(intensity=1.0)
)
g2 = ag.Galaxy(redshift=1.0)
plane = ag.Plane(redshift=0.75, galaxies=[g0, g1, g2])
fit = ag.FitImaging(masked_imaging=masked_imaging_7x7, plane=plane)
g0_blurred_image = g0.blurred_image_from_grid_and_convolver(
grid=masked_imaging_7x7.grid,
blurring_grid=masked_imaging_7x7.blurring_grid,
convolver=masked_imaging_7x7.convolver,
)
g1_blurred_image = g1.blurred_image_from_grid_and_convolver(
grid=masked_imaging_7x7.grid,
blurring_grid=masked_imaging_7x7.blurring_grid,
convolver=masked_imaging_7x7.convolver,
)
assert fit.galaxy_model_image_dict[g0] == pytest.approx(
g0_blurred_image, 1.0e-4
)
assert fit.galaxy_model_image_dict[g1] == pytest.approx(
g1_blurred_image, 1.0e-4
)
assert (fit.galaxy_model_image_dict[g2].slim == np.zeros(9)).all()
assert fit.model_image.native == pytest.approx(
fit.galaxy_model_image_dict[g0].native
+ fit.galaxy_model_image_dict[g1].native,
1.0e-4,
)
def test___all_fit_quantities__including_hyper_methods(
self, masked_imaging_7x7
):
hyper_image_sky = ag.hyper_data.HyperImageSky(sky_scale=1.0)
hyper_background_noise = ag.hyper_data.HyperBackgroundNoise(noise_scale=1.0)
image = hyper_image_sky.hyper_image_from_image(
image=masked_imaging_7x7.image
)
g0 = ag.Galaxy(
redshift=0.5,
light_profile=ag.lp.EllipticalSersic(intensity=1.0),
mass_profile=ag.mp.SphericalIsothermal(einstein_radius=1.0),
hyper_galaxy=ag.HyperGalaxy(
contribution_factor=1.0, noise_factor=1.0, noise_power=1.0
),
hyper_model_image=np.ones(9),
hyper_galaxy_image=np.ones(9),
hyper_minimum_value=0.0,
)
g1 = ag.Galaxy(
redshift=1.0, light_profile=ag.lp.EllipticalSersic(intensity=1.0)
)
plane = ag.Plane(redshift=0.75, galaxies=[g0, g1])
fit = ag.FitImaging(
masked_imaging=masked_imaging_7x7,
plane=plane,
hyper_image_sky=hyper_image_sky,
hyper_background_noise=hyper_background_noise,
)
hyper_noise_map_background = hyper_background_noise.hyper_noise_map_from_noise_map(
noise_map=masked_imaging_7x7.noise_map
)
hyper_noise = plane.hyper_noise_map_from_noise_map(
noise_map=masked_imaging_7x7.noise_map
)
hyper_noise_map = hyper_noise_map_background + hyper_noise
assert hyper_noise_map.native == pytest.approx(fit.noise_map.native)
model_image = plane.blurred_image_from_grid_and_convolver(
grid=masked_imaging_7x7.grid,
convolver=masked_imaging_7x7.convolver,
blurring_grid=masked_imaging_7x7.blurring_grid,
)
assert model_image.native == pytest.approx(fit.model_image.native)
residual_map = ag.util.fit.residual_map_from(
data=image, model_data=model_image
)
assert residual_map.native == pytest.approx(fit.residual_map.native)
normalized_residual_map = ag.util.fit.normalized_residual_map_from(
residual_map=residual_map, noise_map=hyper_noise_map
)
assert normalized_residual_map.native == pytest.approx(
fit.normalized_residual_map.native
)
chi_squared_map = ag.util.fit.chi_squared_map_from(
residual_map=residual_map, noise_map=hyper_noise_map
)
assert chi_squared_map.native == pytest.approx(fit.chi_squared_map.native)
chi_squared = ag.util.fit.chi_squared_from(chi_squared_map=chi_squared_map)
noise_normalization = ag.util.fit.noise_normalization_from(
noise_map=hyper_noise_map
)
log_likelihood = ag.util.fit.log_likelihood_from(
chi_squared=chi_squared, noise_normalization=noise_normalization
)
assert log_likelihood == pytest.approx(fit.log_likelihood, 1e-4)
assert log_likelihood == fit.figure_of_merit
fit = ag.FitImaging(
masked_imaging=masked_imaging_7x7,
plane=plane,
hyper_image_sky=hyper_image_sky,
hyper_background_noise=hyper_background_noise,
use_hyper_scalings=False,
)
assert fit.image == pytest.approx(masked_imaging_7x7.image, 1.0e-4)
assert fit.noise_map == pytest.approx(masked_imaging_7x7.noise_map, 1.0e-4)
def test___blurred_and_model_images_of_galaxies_and_unmasked_blurred_image_properties(
self, masked_imaging_7x7
):
g0 = ag.Galaxy(
redshift=0.5,
light_profile=ag.lp.EllipticalSersic(intensity=1.0),
mass_profile=ag.mp.SphericalIsothermal(einstein_radius=1.0),
)
g1 = ag.Galaxy(
redshift=1.0, light_profile=ag.lp.EllipticalSersic(intensity=1.0)
)
plane = ag.Plane(redshift=0.75, galaxies=[g0, g1])
fit = ag.FitImaging(masked_imaging=masked_imaging_7x7, plane=plane)
blurred_images_of_galaxies = plane.blurred_images_of_galaxies_from_grid_and_convolver(
grid=masked_imaging_7x7.grid,
convolver=masked_imaging_7x7.convolver,
blurring_grid=masked_imaging_7x7.blurring_grid,
)
assert blurred_images_of_galaxies[0].native == pytest.approx(
fit.model_images_of_galaxies[0].native, 1.0e-4
)
assert blurred_images_of_galaxies[1].native == pytest.approx(
fit.model_images_of_galaxies[1].native, 1.0e-4
)
unmasked_blurred_image = plane.unmasked_blurred_image_from_grid_and_psf(
grid=masked_imaging_7x7.grid, psf=masked_imaging_7x7.psf
)
assert (unmasked_blurred_image == fit.unmasked_blurred_image).all()
unmasked_blurred_image_of_galaxies = plane.unmasked_blurred_image_of_galaxies_from_grid_and_psf(
grid=masked_imaging_7x7.grid, psf=masked_imaging_7x7.psf
)
assert (
unmasked_blurred_image_of_galaxies[0]
== fit.unmasked_blurred_image_of_galaxies[0]
).all()
assert (
unmasked_blurred_image_of_galaxies[1]
== fit.unmasked_blurred_image_of_galaxies[1]
).all()
class TestCompareToManualInversionOnly:
def test___all_quantities__no_hyper_methods(self, masked_imaging_7x7):
# Ensures the inversion grid is used, as this would cause the test to fail.
masked_imaging_7x7.grid[0, 0] = -100.0
pix = ag.pix.Rectangular(shape=(3, 3))
reg = ag.reg.Constant(coefficient=1.0)
g0 = ag.Galaxy(redshift=0.5, pixelization=pix, regularization=reg)
plane = ag.Plane(galaxies=[ag.Galaxy(redshift=0.5), g0])
fit = ag.FitImaging(masked_imaging=masked_imaging_7x7, plane=plane)
mapper = pix.mapper_from_grid_and_sparse_grid(
grid=masked_imaging_7x7.grid_inversion, sparse_grid=None
)
inversion = inversions.InversionImagingMatrix.from_data_mapper_and_regularization(
mapper=mapper,
regularization=reg,
image=masked_imaging_7x7.image,
noise_map=masked_imaging_7x7.noise_map,
convolver=masked_imaging_7x7.convolver,
)
assert inversion.mapped_reconstructed_image.native == pytest.approx(
fit.model_image.native, 1.0e-4
)
residual_map = ag.util.fit.residual_map_from(
data=masked_imaging_7x7.image,
model_data=inversion.mapped_reconstructed_image,
)
assert residual_map.native == pytest.approx(fit.residual_map.native, 1.0e-4)
normalized_residual_map = ag.util.fit.normalized_residual_map_from(
residual_map=residual_map, noise_map=masked_imaging_7x7.noise_map
)
assert normalized_residual_map.native == pytest.approx(
fit.normalized_residual_map.native, 1.0e-4
)
chi_squared_map = ag.util.fit.chi_squared_map_from(
residual_map=residual_map, noise_map=masked_imaging_7x7.noise_map
)
assert chi_squared_map.native == pytest.approx(
fit.chi_squared_map.native, 1.0e-4
)
chi_squared = ag.util.fit.chi_squared_from(chi_squared_map=chi_squared_map)
noise_normalization = ag.util.fit.noise_normalization_from(
noise_map=masked_imaging_7x7.noise_map
)
log_likelihood = ag.util.fit.log_likelihood_from(
chi_squared=chi_squared, noise_normalization=noise_normalization
)
assert log_likelihood == pytest.approx(fit.log_likelihood, 1e-4)
log_likelihood_with_regularization = ag.util.fit.log_likelihood_with_regularization_from(
chi_squared=chi_squared,
regularization_term=inversion.regularization_term,
noise_normalization=noise_normalization,
)
assert log_likelihood_with_regularization == pytest.approx(
fit.log_likelihood_with_regularization, 1e-4
)
log_evidence = ag.util.fit.log_evidence_from(
chi_squared=chi_squared,
regularization_term=inversion.regularization_term,
log_curvature_regularization_term=inversion.log_det_curvature_reg_matrix_term,
log_regularization_term=inversion.log_det_regularization_matrix_term,
noise_normalization=noise_normalization,
)
assert log_evidence == fit.log_evidence
assert log_evidence == fit.figure_of_merit
def test___fit_galaxy_model_image_dict__has_inversion_mapped_reconstructed_image(
self, masked_imaging_7x7
):
pix = ag.pix.Rectangular(shape=(3, 3))
reg = ag.reg.Constant(coefficient=1.0)
g0 = ag.Galaxy(redshift=0.5)
g1 = ag.Galaxy(redshift=1.0, pixelization=pix, regularization=reg)
plane = ag.Plane(redshift=0.75, galaxies=[g0, g1])
fit = ag.FitImaging(masked_imaging=masked_imaging_7x7, plane=plane)
mapper = pix.mapper_from_grid_and_sparse_grid(
grid=masked_imaging_7x7.grid, sparse_grid=None
)
inversion = inversions.InversionImagingMatrix.from_data_mapper_and_regularization(
mapper=mapper,
regularization=reg,
image=masked_imaging_7x7.image,
noise_map=masked_imaging_7x7.noise_map,
convolver=masked_imaging_7x7.convolver,
)
assert (fit.galaxy_model_image_dict[g0] == np.zeros(9)).all()
assert fit.galaxy_model_image_dict[g1].native == pytest.approx(
inversion.mapped_reconstructed_image.native, 1.0e-4
)
assert fit.model_image.native == pytest.approx(
fit.galaxy_model_image_dict[g1].native, 1.0e-4
)
def test___all_fit_quantities__include_hyper_methods(self, masked_imaging_7x7):
hyper_image_sky = ag.hyper_data.HyperImageSky(sky_scale=1.0)
hyper_background_noise = ag.hyper_data.HyperBackgroundNoise(noise_scale=1.0)
image = hyper_image_sky.hyper_image_from_image(
image=masked_imaging_7x7.image
)
hyper_noise_map_background = hyper_background_noise.hyper_noise_map_from_noise_map(
noise_map=masked_imaging_7x7.noise_map
)
pix = ag.pix.Rectangular(shape=(3, 3))
reg = ag.reg.Constant(coefficient=1.0)
g0 = ag.Galaxy(
redshift=0.5,
pixelization=pix,
regularization=reg,
hyper_galaxy=ag.HyperGalaxy(
contribution_factor=1.0, noise_factor=1.0, noise_power=1.0
),
hyper_model_image=np.ones(9),
hyper_galaxy_image=np.ones(9),
hyper_minimum_value=0.0,
)
plane = ag.Plane(galaxies=[ag.Galaxy(redshift=0.5), g0])
fit = ag.FitImaging(
masked_imaging=masked_imaging_7x7,
plane=plane,
hyper_image_sky=hyper_image_sky,
hyper_background_noise=hyper_background_noise,
)
hyper_noise = plane.hyper_noise_map_from_noise_map(
noise_map=masked_imaging_7x7.noise_map
)
hyper_noise_map = hyper_noise_map_background + hyper_noise
assert hyper_noise_map.native == pytest.approx(fit.noise_map.native)
mapper = pix.mapper_from_grid_and_sparse_grid(
grid=masked_imaging_7x7.grid,
settings=ag.SettingsPixelization(use_border=False),
)
inversion = inversions.InversionImagingMatrix.from_data_mapper_and_regularization(
mapper=mapper,
regularization=reg,
image=image,
noise_map=hyper_noise_map,
convolver=masked_imaging_7x7.convolver,
)
assert inversion.mapped_reconstructed_image.native == pytest.approx(
fit.model_image.native, 1.0e-4
)
residual_map = ag.util.fit.residual_map_from(
data=image, model_data=inversion.mapped_reconstructed_image
)
assert residual_map.native == pytest.approx(fit.residual_map.native)
normalized_residual_map = ag.util.fit.normalized_residual_map_from(
residual_map=residual_map, noise_map=hyper_noise_map
)
assert normalized_residual_map.native == pytest.approx(
fit.normalized_residual_map.native
)
chi_squared_map = ag.util.fit.chi_squared_map_from(
residual_map=residual_map, noise_map=hyper_noise_map
)
assert chi_squared_map.native == pytest.approx(fit.chi_squared_map.native)
chi_squared = ag.util.fit.chi_squared_from(chi_squared_map=chi_squared_map)
noise_normalization = ag.util.fit.noise_normalization_from(
noise_map=hyper_noise_map
)
log_likelihood = ag.util.fit.log_likelihood_from(
chi_squared=chi_squared, noise_normalization=noise_normalization
)
assert log_likelihood == pytest.approx(fit.log_likelihood, 1e-4)
log_likelihood_with_regularization = ag.util.fit.log_likelihood_with_regularization_from(
chi_squared=chi_squared,
regularization_term=inversion.regularization_term,
noise_normalization=noise_normalization,
)
assert log_likelihood_with_regularization == pytest.approx(
fit.log_likelihood_with_regularization, 1e-4
)
log_evidence = ag.util.fit.log_evidence_from(
chi_squared=chi_squared,
regularization_term=inversion.regularization_term,
log_curvature_regularization_term=inversion.log_det_curvature_reg_matrix_term,
log_regularization_term=inversion.log_det_regularization_matrix_term,
noise_normalization=noise_normalization,
)
assert log_evidence == fit.log_evidence
assert log_evidence == fit.figure_of_merit
def test___blurred_and_model_images_of_galaxies_and_unmasked_blurred_image_properties(
self, masked_imaging_7x7
):
pix = ag.pix.Rectangular(shape=(3, 3))
reg = ag.reg.Constant(coefficient=1.0)
g0 = ag.Galaxy(redshift=1.0, pixelization=pix, regularization=reg)
plane = ag.Plane(redshift=0.75, galaxies=[ag.Galaxy(redshift=0.5), g0])
fit = ag.FitImaging(masked_imaging=masked_imaging_7x7, plane=plane)
mapper = pix.mapper_from_grid_and_sparse_grid(
grid=masked_imaging_7x7.grid,
settings=ag.SettingsPixelization(use_border=False),
)
inversion = inversions.InversionImagingMatrix.from_data_mapper_and_regularization(
mapper=mapper,
regularization=reg,
image=masked_imaging_7x7.image,
noise_map=masked_imaging_7x7.noise_map,
convolver=masked_imaging_7x7.convolver,
)
assert (fit.model_images_of_galaxies[0].native == np.zeros((7, 7))).all()
assert inversion.mapped_reconstructed_image.native == pytest.approx(
fit.model_images_of_galaxies[1].native, 1.0e-4
)
class TestCompareToManualProfilesAndInversion:
def test___all_fit_quantities__no_hyper_methods(self, masked_imaging_7x7):
galaxy_light = ag.Galaxy(
redshift=0.5, light_profile=ag.lp.EllipticalSersic(intensity=1.0)
)
pix = ag.pix.Rectangular(shape=(3, 3))
reg = ag.reg.Constant(coefficient=1.0)
galaxy_pix = ag.Galaxy(redshift=1.0, pixelization=pix, regularization=reg)
plane = ag.Plane(redshift=0.75, galaxies=[galaxy_light, galaxy_pix])
fit = ag.FitImaging(masked_imaging=masked_imaging_7x7, plane=plane)
blurred_image = plane.blurred_image_from_grid_and_convolver(
grid=masked_imaging_7x7.grid,
convolver=masked_imaging_7x7.convolver,
blurring_grid=masked_imaging_7x7.blurring_grid,
)
assert blurred_image.native == pytest.approx(fit.blurred_image.native)
profile_subtracted_image = masked_imaging_7x7.image - blurred_image
assert profile_subtracted_image.native == pytest.approx(
fit.profile_subtracted_image.native
)
mapper = pix.mapper_from_grid_and_sparse_grid(
grid=masked_imaging_7x7.grid,
settings=ag.SettingsPixelization(use_border=False),
)
inversion = inversions.InversionImagingMatrix.from_data_mapper_and_regularization(
image=profile_subtracted_image,
noise_map=masked_imaging_7x7.noise_map,
convolver=masked_imaging_7x7.convolver,
mapper=mapper,
regularization=reg,
)
model_image = blurred_image + inversion.mapped_reconstructed_image
assert model_image.native == pytest.approx(fit.model_image.native)
residual_map = ag.util.fit.residual_map_from(
data=masked_imaging_7x7.image, model_data=model_image
)
assert residual_map.native == pytest.approx(fit.residual_map.native)
normalized_residual_map = ag.util.fit.normalized_residual_map_from(
residual_map=residual_map, noise_map=masked_imaging_7x7.noise_map
)
assert normalized_residual_map.native == pytest.approx(
fit.normalized_residual_map.native
)
chi_squared_map = ag.util.fit.chi_squared_map_from(
residual_map=residual_map, noise_map=masked_imaging_7x7.noise_map
)
assert chi_squared_map.native == pytest.approx(fit.chi_squared_map.native)
chi_squared = ag.util.fit.chi_squared_from(chi_squared_map=chi_squared_map)
noise_normalization = ag.util.fit.noise_normalization_from(
noise_map=masked_imaging_7x7.noise_map
)
log_likelihood = ag.util.fit.log_likelihood_from(
chi_squared=chi_squared, noise_normalization=noise_normalization
)
assert log_likelihood == pytest.approx(fit.log_likelihood, 1e-4)
log_likelihood_with_regularization = ag.util.fit.log_likelihood_with_regularization_from(
chi_squared=chi_squared,
regularization_term=inversion.regularization_term,
noise_normalization=noise_normalization,
)
assert log_likelihood_with_regularization == pytest.approx(
fit.log_likelihood_with_regularization, 1e-4
)
log_evidence = ag.util.fit.log_evidence_from(
chi_squared=chi_squared,
regularization_term=inversion.regularization_term,
log_curvature_regularization_term=inversion.log_det_curvature_reg_matrix_term,
log_regularization_term=inversion.log_det_regularization_matrix_term,
noise_normalization=noise_normalization,
)
assert log_evidence == fit.log_evidence
assert log_evidence == fit.figure_of_merit
def test___fit_galaxy_model_image_dict__has_blurred_images_and_inversion_mapped_reconstructed_image(
self, masked_imaging_7x7
):
g0 = ag.Galaxy(
redshift=0.5, light_profile=ag.lp.EllipticalSersic(intensity=1.0)
)
g1 = ag.Galaxy(
redshift=0.5, light_profile=ag.lp.EllipticalSersic(intensity=2.0)
)
g2 = ag.Galaxy(redshift=0.5)
pix = ag.pix.Rectangular(shape=(3, 3))
reg = ag.reg.Constant(coefficient=1.0)
galaxy_pix = ag.Galaxy(redshift=1.0, pixelization=pix, regularization=reg)
plane = ag.Plane(redshift=0.75, galaxies=[g0, g1, g2, galaxy_pix])
masked_imaging_7x7.image[0] = 3.0
fit = ag.FitImaging(masked_imaging=masked_imaging_7x7, plane=plane)
g0_blurred_image = g0.blurred_image_from_grid_and_convolver(
grid=masked_imaging_7x7.grid,
convolver=masked_imaging_7x7.convolver,
blurring_grid=masked_imaging_7x7.blurring_grid,
)
g1_blurred_image = g1.blurred_image_from_grid_and_convolver(
grid=masked_imaging_7x7.grid,
convolver=masked_imaging_7x7.convolver,
blurring_grid=masked_imaging_7x7.blurring_grid,
)
blurred_image = g0_blurred_image + g1_blurred_image
profile_subtracted_image = masked_imaging_7x7.image - blurred_image
mapper = pix.mapper_from_grid_and_sparse_grid(
grid=masked_imaging_7x7.grid,
settings=ag.SettingsPixelization(use_border=False),
)
inversion = inversions.InversionImagingMatrix.from_data_mapper_and_regularization(
image=profile_subtracted_image,
noise_map=masked_imaging_7x7.noise_map,
convolver=masked_imaging_7x7.convolver,
mapper=mapper,
regularization=reg,
)
assert (fit.galaxy_model_image_dict[g2] == np.zeros(9)).all()
assert fit.galaxy_model_image_dict[g0].native == pytest.approx(
g0_blurred_image.native, 1.0e-4
)
assert fit.galaxy_model_image_dict[g1].native == pytest.approx(
g1_blurred_image.native, 1.0e-4
)
assert fit.galaxy_model_image_dict[galaxy_pix].native == pytest.approx(
inversion.mapped_reconstructed_image.native, 1.0e-4
)
assert fit.model_image.native == pytest.approx(
fit.galaxy_model_image_dict[g0].native
+ fit.galaxy_model_image_dict[g1].native
+ inversion.mapped_reconstructed_image.native,
1.0e-4,
)
def test___all_fit_quantities__include_hyper_methods(self, masked_imaging_7x7):
hyper_image_sky = ag.hyper_data.HyperImageSky(sky_scale=1.0)
hyper_background_noise = ag.hyper_data.HyperBackgroundNoise(noise_scale=1.0)
image = hyper_image_sky.hyper_image_from_image(
image=masked_imaging_7x7.image
)
hyper_noise_map_background = hyper_background_noise.hyper_noise_map_from_noise_map(
noise_map=masked_imaging_7x7.noise_map
)
galaxy_light = ag.Galaxy(
redshift=0.5,
light_profile=ag.lp.EllipticalSersic(intensity=1.0),
hyper_galaxy=ag.HyperGalaxy(
contribution_factor=1.0, noise_factor=1.0, noise_power=1.0
),
hyper_model_image=ag.Array2D.ones(
shape_native=(3, 3), pixel_scales=1.0
),
hyper_galaxy_image=ag.Array2D.ones(
shape_native=(3, 3), pixel_scales=1.0
),
hyper_minimum_value=0.0,
)
pix = ag.pix.Rectangular(shape=(3, 3))
reg = ag.reg.Constant(coefficient=1.0)
galaxy_pix = ag.Galaxy(redshift=1.0, pixelization=pix, regularization=reg)
plane = ag.Plane(redshift=0.75, galaxies=[galaxy_light, galaxy_pix])
fit = ag.FitImaging(
masked_imaging=masked_imaging_7x7,
plane=plane,
hyper_image_sky=hyper_image_sky,
hyper_background_noise=hyper_background_noise,
)
hyper_noise = plane.hyper_noise_map_from_noise_map(
noise_map=masked_imaging_7x7.noise_map
)
hyper_noise_map = hyper_noise_map_background + hyper_noise
assert hyper_noise_map.native == pytest.approx(fit.noise_map.native, 1.0e-4)
blurred_image = plane.blurred_image_from_grid_and_convolver(
grid=masked_imaging_7x7.grid,
convolver=masked_imaging_7x7.convolver,
blurring_grid=masked_imaging_7x7.blurring_grid,
)
assert blurred_image.native == pytest.approx(fit.blurred_image.native)
profile_subtracted_image = image - blurred_image
assert profile_subtracted_image.native == pytest.approx(
fit.profile_subtracted_image.native
)
mapper = pix.mapper_from_grid_and_sparse_grid(
grid=masked_imaging_7x7.grid,
settings=ag.SettingsPixelization(use_border=False),
)
inversion = inversions.InversionImagingMatrix.from_data_mapper_and_regularization(
image=profile_subtracted_image,
noise_map=hyper_noise_map,
convolver=masked_imaging_7x7.convolver,
mapper=mapper,
regularization=reg,
)
model_image = blurred_image + inversion.mapped_reconstructed_image
assert model_image.native == pytest.approx(fit.model_image.native, 1.0e-4)
residual_map = ag.util.fit.residual_map_from(
data=image, model_data=model_image
)
assert residual_map.native == pytest.approx(fit.residual_map.native, 1.0e-4)
normalized_residual_map = ag.util.fit.normalized_residual_map_from(
residual_map=residual_map, noise_map=hyper_noise_map
)
assert normalized_residual_map.native == pytest.approx(
fit.normalized_residual_map.native, 1.0e-4
)
chi_squared_map = ag.util.fit.chi_squared_map_from(
residual_map=residual_map, noise_map=hyper_noise_map
)
assert chi_squared_map.native == pytest.approx(
fit.chi_squared_map.native, 1.0e-4
)
chi_squared = ag.util.fit.chi_squared_from(chi_squared_map=chi_squared_map)
noise_normalization = ag.util.fit.noise_normalization_from(
noise_map=hyper_noise_map
)
log_likelihood = ag.util.fit.log_likelihood_from(
chi_squared=chi_squared, noise_normalization=noise_normalization
)
assert log_likelihood == pytest.approx(fit.log_likelihood, 1e-4)
log_likelihood_with_regularization = ag.util.fit.log_likelihood_with_regularization_from(
chi_squared=chi_squared,
regularization_term=inversion.regularization_term,
noise_normalization=noise_normalization,
)
assert log_likelihood_with_regularization == pytest.approx(
fit.log_likelihood_with_regularization, 1e-4
)
log_evidence = ag.util.fit.log_evidence_from(
chi_squared=chi_squared,
regularization_term=inversion.regularization_term,
log_curvature_regularization_term=inversion.log_det_curvature_reg_matrix_term,
log_regularization_term=inversion.log_det_regularization_matrix_term,
noise_normalization=noise_normalization,
)
assert log_evidence == fit.log_evidence
assert log_evidence == fit.figure_of_merit
def test___blurred_and_model_images_of_galaxies_and_unmasked_blurred_image_properties(
self, masked_imaging_7x7
):
galaxy_light = ag.Galaxy(
redshift=0.5, light_profile=ag.lp.EllipticalSersic(intensity=1.0)
)
pix = ag.pix.Rectangular(shape=(3, 3))
reg = ag.reg.Constant(coefficient=1.0)
galaxy_pix = ag.Galaxy(redshift=1.0, pixelization=pix, regularization=reg)
plane = ag.Plane(redshift=0.75, galaxies=[galaxy_light, galaxy_pix])
fit = ag.FitImaging(masked_imaging=masked_imaging_7x7, plane=plane)
blurred_image = plane.blurred_image_from_grid_and_convolver(
grid=masked_imaging_7x7.grid,
convolver=masked_imaging_7x7.convolver,
blurring_grid=masked_imaging_7x7.blurring_grid,
)
profile_subtracted_image = masked_imaging_7x7.image - blurred_image
mapper = pix.mapper_from_grid_and_sparse_grid(
grid=masked_imaging_7x7.grid,
settings=ag.SettingsPixelization(use_border=False),
)
inversion = inversions.InversionImagingMatrix.from_data_mapper_and_regularization(
image=profile_subtracted_image,
noise_map=masked_imaging_7x7.noise_map,
convolver=masked_imaging_7x7.convolver,
mapper=mapper,
regularization=reg,
)
assert blurred_image.native == pytest.approx(
fit.model_images_of_galaxies[0].native, 1.0e-4
)
assert inversion.mapped_reconstructed_image.native == pytest.approx(
fit.model_images_of_galaxies[1].native, 1.0e-4
)
class TestAttributes:
def test__subtracted_images_of_galaxies(self, masked_imaging_no_blur_7x7):
g0 = ag.Galaxy(redshift=0.5, light_profile=MockLightProfile(value=1.0))
g1 = ag.Galaxy(redshift=1.0, light_profile=MockLightProfile(value=2.0))
g2 = ag.Galaxy(redshift=1.0, light_profile=MockLightProfile(value=3.0))
plane = ag.Plane(redshift=0.75, galaxies=[g0, g1, g2])
fit = ag.FitImaging(masked_imaging=masked_imaging_no_blur_7x7, plane=plane)
assert fit.subtracted_images_of_galaxies[0].slim[0] == -4.0
assert fit.subtracted_images_of_galaxies[1].slim[0] == -3.0
assert fit.subtracted_images_of_galaxies[2].slim[0] == -2.0
g0 = ag.Galaxy(redshift=0.5, light_profile=MockLightProfile(value=1.0))
g1 = ag.Galaxy(redshift=0.5)
g2 = ag.Galaxy(redshift=1.0, light_profile=MockLightProfile(value=3.0))
plane = ag.Plane(redshift=0.75, galaxies=[g0, g1, g2])
fit = ag.FitImaging(masked_imaging=masked_imaging_no_blur_7x7, plane=plane)
assert fit.subtracted_images_of_galaxies[0].slim[0] == -2.0
assert fit.subtracted_images_of_galaxies[1].slim[0] == -3.0
assert fit.subtracted_images_of_galaxies[2].slim[0] == 0.0
class TestFitInterferometer:
class TestLikelihood:
def test__1x2_image__1x2_visibilities__simple_fourier_transform(self):
# The image plane image generated by the galaxy is [1.0, 1.0]
# Thus the chi squared is 4.0**2.0 + 3.0**2.0 = 25.0
interferometer = ag.Interferometer(
visibilities=ag.Visibilities.full(fill_value=5.0, shape_slim=(1,)),
noise_map=ag.Visibilities.ones(shape_slim=(1,)),
uv_wavelengths=np.array([[0.0, 0.0]]),
)
interferometer.visibilities[0] = 5.0 + 4.0j
visibilities_mask = np.full(fill_value=False, shape=(1,))
real_space_mask = ag.Mask2D.manual(
mask=[
[True, True, True, True],
[True, False, False, True],
[True, True, True, True],
],
pixel_scales=1.0,
)
masked_interferometer = ag.MaskedInterferometer(
interferometer=interferometer,
visibilities_mask=visibilities_mask,
real_space_mask=real_space_mask,
settings=ag.SettingsMaskedInterferometer(
grid_class=ag.Grid2D,
sub_size=1,
transformer_class=ag.TransformerDFT,
),
)
# Setup as a ray trace instance, using a light profile for the galaxy
g0 = ag.Galaxy(
redshift=0.5, light_profile=MockLightProfile(value=1.0, size=2)
)
plane = ag.Plane(galaxies=[g0])
fit = ag.FitInterferometer(
masked_interferometer=masked_interferometer, plane=plane
)
assert (fit.visibilities_mask == np.array([False])).all()
assert (fit.visibilities.slim == np.array([5.0 + 4.0j])).all()
assert (fit.noise_map.slim == np.array([1.0 + 1.0j])).all()
assert (fit.model_visibilities.slim == np.array([2.0 + 0.0j])).all()
assert (fit.residual_map.slim == np.array([3.0 + 4.0j])).all()
assert (fit.normalized_residual_map.slim == np.array([3.0 + 4.0j])).all()
assert (fit.chi_squared_map.slim == np.array([9.0 + 16.0j])).all()
assert fit.chi_squared == 25.0
assert fit.noise_normalization == (2.0 * np.log(2 * np.pi * 1.0 ** 2.0))
assert fit.log_likelihood == -0.5 * (
25.0 + 2.0 * np.log(2 * np.pi * 1.0 ** 2.0)
)
def test__hyper_background_changes_background_sky__reflected_in_likelihood(
self,
):
uv_wavelengths = np.array([[1.0, 0.0], [1.0, 1.0], [2.0, 2.0]])
interferometer = ag.Interferometer(
visibilities=ag.Visibilities.full(fill_value=5.0, shape_slim=(3,)),
noise_map=ag.Visibilities.full(fill_value=2.0, shape_slim=(3,)),
uv_wavelengths=uv_wavelengths,
)
visibilities_mask = np.full(fill_value=False, shape=(1,))
real_space_mask = ag.Mask2D.manual(
mask=[
[True, True, True, True, True],
[True, False, False, False, True],
[True, True, True, True, True],
],
pixel_scales=1.0,
)
masked_interferometer = ag.MaskedInterferometer(
interferometer=interferometer,
visibilities_mask=visibilities_mask,
real_space_mask=real_space_mask,
settings=ag.SettingsMaskedInterferometer(
grid_class=ag.Grid2D, sub_size=1
),
)
# Setup as a ray trace instance, using a light profile for the galaxy
g0 = ag.Galaxy(
redshift=0.5, light_profile=MockLightProfile(value=1.0, size=2)
)
plane = ag.Plane(galaxies=[g0])
hyper_background_noise = ag.hyper_data.HyperBackgroundNoise(noise_scale=1.0)
fit = ag.FitInterferometer(
masked_interferometer=masked_interferometer,
plane=plane,
hyper_background_noise=hyper_background_noise,
)
assert (
fit.visibilities.slim == np.array([5.0 + 5.0j, 5.0 + 5.0j, 5.0 + 5.0j])
).all()
assert (
fit.noise_map.slim == np.array([3.0 + 3.0j, 3.0 + 3.0j, 3.0 + 3.0j])
).all()
class TestCompareToManualProfilesOnly:
def test___all_fit_quantities__no_hyper_methods(self, masked_interferometer_7):
g0 = ag.Galaxy(
redshift=0.5,
light_profile=ag.lp.EllipticalSersic(intensity=1.0),
mass_profile=ag.mp.SphericalIsothermal(einstein_radius=1.0),
)
g1 = ag.Galaxy(
redshift=1.0, light_profile=ag.lp.EllipticalSersic(intensity=1.0)
)
plane = ag.Plane(redshift=0.75, galaxies=[g0, g1])
fit = ag.FitInterferometer(
masked_interferometer=masked_interferometer_7, plane=plane
)
assert masked_interferometer_7.noise_map == pytest.approx(fit.noise_map)
model_visibilities = plane.profile_visibilities_from_grid_and_transformer(
grid=masked_interferometer_7.grid,
transformer=masked_interferometer_7.transformer,
)
assert model_visibilities == pytest.approx(fit.model_visibilities, 1e-4)
residual_map = ag.util.fit.residual_map_from(
data=masked_interferometer_7.visibilities, model_data=model_visibilities
)
assert residual_map == pytest.approx(fit.residual_map, 1e-4)
normalized_residual_map = ag.util.fit.normalized_residual_map_complex_from(
residual_map=residual_map, noise_map=masked_interferometer_7.noise_map
)
assert normalized_residual_map == pytest.approx(
fit.normalized_residual_map, 1e-4
)
chi_squared_map = ag.util.fit.chi_squared_map_complex_from(
residual_map=residual_map, noise_map=masked_interferometer_7.noise_map
)
assert chi_squared_map == pytest.approx(fit.chi_squared_map, 1e-4)
chi_squared = ag.util.fit.chi_squared_complex_from(
chi_squared_map=fit.chi_squared_map
)
noise_normalization = ag.util.fit.noise_normalization_complex_from(
noise_map=masked_interferometer_7.noise_map
)
log_likelihood = ag.util.fit.log_likelihood_from(
chi_squared=chi_squared, noise_normalization=noise_normalization
)
assert log_likelihood == pytest.approx(fit.log_likelihood, 1e-4)
assert log_likelihood == fit.figure_of_merit
def test___fit_galaxy_model_image_dict__corresponds_to_profile_galaxy_images(
self, masked_interferometer_7
):
g0 = ag.Galaxy(
redshift=0.5,
light_profile=ag.lp.EllipticalSersic(intensity=1.0),
mass_profile=ag.mp.SphericalIsothermal(einstein_radius=1.0),
)
g1 = ag.Galaxy(
redshift=1.0, light_profile=ag.lp.EllipticalSersic(intensity=1.0)
)
plane = ag.Plane(redshift=0.75, galaxies=[g0, g1])
fit = ag.FitInterferometer(
masked_interferometer=masked_interferometer_7, plane=plane
)
g0_image = g0.image_from_grid(grid=masked_interferometer_7.grid)
g1_image = g1.image_from_grid(grid=masked_interferometer_7.grid)
assert fit.galaxy_model_image_dict[g0].slim == pytest.approx(
g0_image, 1.0e-4
)
assert fit.galaxy_model_image_dict[g1].slim == pytest.approx(
g1_image, 1.0e-4
)
def test___fit_galaxy_visibilities_dict__corresponds_to_galaxy_visibilities(
self, masked_interferometer_7
):
g0 = ag.Galaxy(
redshift=0.5,
light_profile=ag.lp.EllipticalSersic(intensity=1.0),
mass_profile=ag.mp.SphericalIsothermal(einstein_radius=1.0),
)
g1 = ag.Galaxy(
redshift=1.0, light_profile=ag.lp.EllipticalSersic(intensity=1.0)
)
plane = ag.Plane(redshift=0.75, galaxies=[g0, g1])
fit = ag.FitInterferometer(
masked_interferometer=masked_interferometer_7, plane=plane
)
g0_profile_visibilities = g0.profile_visibilities_from_grid_and_transformer(
grid=masked_interferometer_7.grid,
transformer=masked_interferometer_7.transformer,
)
g1_profile_visibilities = g1.profile_visibilities_from_grid_and_transformer(
grid=masked_interferometer_7.grid,
transformer=masked_interferometer_7.transformer,
)
assert fit.galaxy_model_visibilities_dict[g0].slim == pytest.approx(
g0_profile_visibilities, 1.0e-4
)
assert fit.galaxy_model_visibilities_dict[g1].slim == pytest.approx(
g1_profile_visibilities, 1.0e-4
)
assert fit.model_visibilities.slim == pytest.approx(
fit.galaxy_model_visibilities_dict[g0].slim
+ fit.galaxy_model_visibilities_dict[g1].slim,
1.0e-4,
)
def test___all_fit_quantities__hyper_background_noise(
self, masked_interferometer_7
):
hyper_background_noise = ag.hyper_data.HyperBackgroundNoise(noise_scale=1.0)
hyper_noise_map = hyper_background_noise.hyper_noise_map_from_complex_noise_map(
noise_map=masked_interferometer_7.noise_map
)
g0 = ag.Galaxy(
redshift=0.5,
light_profile=ag.lp.EllipticalSersic(intensity=1.0),
mass_profile=ag.mp.SphericalIsothermal(einstein_radius=1.0),
)
g1 = ag.Galaxy(
redshift=1.0, light_profile=ag.lp.EllipticalSersic(intensity=1.0)
)
plane = ag.Plane(redshift=0.75, galaxies=[g0, g1])
fit = ag.FitInterferometer(
masked_interferometer=masked_interferometer_7,
plane=plane,
hyper_background_noise=hyper_background_noise,
)
assert hyper_noise_map.slim == pytest.approx(fit.noise_map.slim)
fit = ag.FitInterferometer(
masked_interferometer=masked_interferometer_7,
plane=plane,
hyper_background_noise=hyper_background_noise,
use_hyper_scalings=False,
)
assert fit.noise_map == pytest.approx(
masked_interferometer_7.noise_map, 1.0e-4
)
assert fit.noise_map != pytest.approx(hyper_noise_map.slim, 1.0e-4)
class TestCompareToManualInversionOnly:
def test___all_fit_quantities__no_hyper_methods(self, masked_interferometer_7):
# Ensures the inversion grid is used, as this would cause the test to fail.
masked_interferometer_7.grid[0, 0] = -100.0
pix = ag.pix.Rectangular(shape=(3, 3))
reg = ag.reg.Constant(coefficient=0.01)
g0 = ag.Galaxy(redshift=0.5, pixelization=pix, regularization=reg)
plane = ag.Plane(galaxies=[ag.Galaxy(redshift=0.5), g0])
fit = ag.FitInterferometer(
masked_interferometer=masked_interferometer_7, plane=plane
)
mapper = pix.mapper_from_grid_and_sparse_grid(
grid=masked_interferometer_7.grid_inversion, sparse_grid=None
)
inversion = inversions.InversionInterferometerMatrix.from_data_mapper_and_regularization(
mapper=mapper,
regularization=reg,
visibilities=masked_interferometer_7.visibilities,
noise_map=masked_interferometer_7.noise_map,
transformer=masked_interferometer_7.transformer,
)
assert inversion.mapped_reconstructed_visibilities == pytest.approx(
fit.model_visibilities, 1.0e-4
)
residual_map = ag.util.fit.residual_map_from(
data=masked_interferometer_7.visibilities,
model_data=inversion.mapped_reconstructed_visibilities,
)
assert residual_map.slim == pytest.approx(fit.residual_map.slim, 1.0e-4)
normalized_residual_map = ag.util.fit.normalized_residual_map_complex_from(
residual_map=residual_map, noise_map=masked_interferometer_7.noise_map
)
assert normalized_residual_map.slim == pytest.approx(
fit.normalized_residual_map.slim, 1.0e-4
)
chi_squared_map = ag.util.fit.chi_squared_map_complex_from(
residual_map=residual_map, noise_map=masked_interferometer_7.noise_map
)
assert chi_squared_map.slim == pytest.approx(
fit.chi_squared_map.slim, 1.0e-4
)
chi_squared = ag.util.fit.chi_squared_complex_from(
chi_squared_map=chi_squared_map
)
noise_normalization = ag.util.fit.noise_normalization_complex_from(
noise_map=masked_interferometer_7.noise_map
)
log_likelihood = ag.util.fit.log_likelihood_from(
chi_squared=chi_squared, noise_normalization=noise_normalization
)
assert log_likelihood == pytest.approx(fit.log_likelihood, 1e-4)
log_likelihood_with_regularization = ag.util.fit.log_likelihood_with_regularization_from(
chi_squared=chi_squared,
regularization_term=inversion.regularization_term,
noise_normalization=noise_normalization,
)
assert log_likelihood_with_regularization == pytest.approx(
fit.log_likelihood_with_regularization, 1e-4
)
log_evidence = ag.util.fit.log_evidence_from(
chi_squared=chi_squared,
regularization_term=inversion.regularization_term,
log_curvature_regularization_term=inversion.log_det_curvature_reg_matrix_term,
log_regularization_term=inversion.log_det_regularization_matrix_term,
noise_normalization=noise_normalization,
)
assert log_evidence == fit.log_evidence
assert log_evidence == fit.figure_of_merit
mapped_reconstructed_image = ag.util.inversion.mapped_reconstructed_data_from(
mapping_matrix=fit.inversion.mapper.mapping_matrix,
reconstruction=fit.inversion.reconstruction,
)
assert (
fit.inversion.mapped_reconstructed_image.slim
== mapped_reconstructed_image
).all()
def test___fit_galaxy_model_image_dict__images_and_inversion_mapped_reconstructed_image(
self, masked_interferometer_7
):
pix = ag.pix.Rectangular(shape=(3, 3))
reg = ag.reg.Constant(coefficient=1.0)
g0 = ag.Galaxy(redshift=0.5)
g1 = ag.Galaxy(redshift=1.0, pixelization=pix, regularization=reg)
plane = ag.Plane(redshift=0.75, galaxies=[g0, g1])
fit = ag.FitInterferometer(
masked_interferometer=masked_interferometer_7, plane=plane
)
mapper = pix.mapper_from_grid_and_sparse_grid(
grid=masked_interferometer_7.grid, sparse_grid=None
)
inversion = inversions.InversionInterferometerMatrix.from_data_mapper_and_regularization(
mapper=mapper,
regularization=reg,
visibilities=masked_interferometer_7.visibilities,
noise_map=masked_interferometer_7.noise_map,
transformer=masked_interferometer_7.transformer,
)
assert (fit.galaxy_model_image_dict[g0].native == np.zeros((7, 7))).all()
assert fit.galaxy_model_image_dict[g1].slim == pytest.approx(
inversion.mapped_reconstructed_image.slim, 1.0e-4
)
def test___fit_galaxy_model_visibilities_dict__has_inversion_mapped_reconstructed_visibilities(
self, masked_interferometer_7
):
pix = ag.pix.Rectangular(shape=(3, 3))
reg = ag.reg.Constant(coefficient=1.0)
g0 = ag.Galaxy(redshift=0.5)
g1 = ag.Galaxy(redshift=1.0, pixelization=pix, regularization=reg)
plane = ag.Plane(redshift=0.75, galaxies=[g0, g1])
fit = ag.FitInterferometer(
masked_interferometer=masked_interferometer_7, plane=plane
)
mapper = pix.mapper_from_grid_and_sparse_grid(
grid=masked_interferometer_7.grid, sparse_grid=None
)
inversion = inversions.InversionInterferometerMatrix.from_data_mapper_and_regularization(
mapper=mapper,
regularization=reg,
visibilities=masked_interferometer_7.visibilities,
noise_map=masked_interferometer_7.noise_map,
transformer=masked_interferometer_7.transformer,
)
assert (
fit.galaxy_model_visibilities_dict[g0] == 0.0 + 0.0j * np.zeros((7,))
).all()
assert fit.galaxy_model_visibilities_dict[g1].slim == pytest.approx(
inversion.mapped_reconstructed_visibilities.slim, 1.0e-4
)
assert fit.model_visibilities.slim == pytest.approx(
fit.galaxy_model_visibilities_dict[g1].slim, 1.0e-4
)
def test___all_fit_quantities__hyper_background_noise(
self, masked_interferometer_7
):
hyper_background_noise = ag.hyper_data.HyperBackgroundNoise(noise_scale=1.0)
hyper_noise_map = hyper_background_noise.hyper_noise_map_from_complex_noise_map(
noise_map=masked_interferometer_7.noise_map
)
pix = ag.pix.Rectangular(shape=(3, 3))
reg = ag.reg.Constant(coefficient=0.01)
g0 = ag.Galaxy(redshift=0.5, pixelization=pix, regularization=reg)
plane = ag.Plane(galaxies=[ag.Galaxy(redshift=0.5), g0])
fit = ag.FitInterferometer(
masked_interferometer=masked_interferometer_7,
plane=plane,
hyper_background_noise=hyper_background_noise,
)
assert hyper_noise_map.slim == pytest.approx(
fit.inversion.noise_map, 1.0e-4
)
assert hyper_noise_map.slim == pytest.approx(fit.noise_map.slim)
def test___all_fit_quantities__uses_linear_operator_inversion(
self, masked_interferometer_7_lop
):
# Ensures the inversion grid is used, as this would cause the test to fail.
masked_interferometer_7_lop.grid[0, 0] = -100.0
pix = ag.pix.Rectangular(shape=(3, 3))
reg = ag.reg.Constant(coefficient=0.01)
g0 = ag.Galaxy(redshift=0.5, pixelization=pix, regularization=reg)
plane = ag.Plane(galaxies=[ag.Galaxy(redshift=0.5), g0])
fit = ag.FitInterferometer(
masked_interferometer=masked_interferometer_7_lop,
plane=plane,
settings_inversion=ag.SettingsInversion(use_linear_operators=True),
)
mapper = pix.mapper_from_grid_and_sparse_grid(
grid=masked_interferometer_7_lop.grid_inversion, sparse_grid=None
)
inversion = inversions.InversionInterferometerLinearOperator.from_data_mapper_and_regularization(
mapper=mapper,
regularization=reg,
visibilities=masked_interferometer_7_lop.visibilities,
noise_map=masked_interferometer_7_lop.noise_map,
transformer=masked_interferometer_7_lop.transformer,
settings=ag.SettingsInversion(use_linear_operators=True),
)
assert inversion.mapped_reconstructed_visibilities == pytest.approx(
fit.model_visibilities, 1.0e-4
)
residual_map = ag.util.fit.residual_map_from(
data=masked_interferometer_7_lop.visibilities,
model_data=inversion.mapped_reconstructed_visibilities,
)
assert residual_map.slim == pytest.approx(fit.residual_map.slim, 1.0e-4)
normalized_residual_map = ag.util.fit.normalized_residual_map_complex_from(
residual_map=residual_map,
noise_map=masked_interferometer_7_lop.noise_map,
)
assert normalized_residual_map.slim == pytest.approx(
fit.normalized_residual_map.slim, 1.0e-4
)
chi_squared_map = ag.util.fit.chi_squared_map_complex_from(
residual_map=residual_map,
noise_map=masked_interferometer_7_lop.noise_map,
)
assert chi_squared_map.slim == pytest.approx(
fit.chi_squared_map.slim, 1.0e-4
)
chi_squared = ag.util.fit.chi_squared_complex_from(
chi_squared_map=chi_squared_map
)
noise_normalization = ag.util.fit.noise_normalization_complex_from(
noise_map=masked_interferometer_7_lop.noise_map
)
log_likelihood = ag.util.fit.log_likelihood_from(
chi_squared=chi_squared, noise_normalization=noise_normalization
)
assert log_likelihood == pytest.approx(fit.log_likelihood, 1e-4)
log_likelihood_with_regularization = ag.util.fit.log_likelihood_with_regularization_from(
chi_squared=chi_squared,
regularization_term=inversion.regularization_term,
noise_normalization=noise_normalization,
)
assert log_likelihood_with_regularization == pytest.approx(
fit.log_likelihood_with_regularization, 1e-4
)
log_evidence = ag.util.fit.log_evidence_from(
chi_squared=chi_squared,
regularization_term=inversion.regularization_term,
log_curvature_regularization_term=inversion.log_det_curvature_reg_matrix_term,
log_regularization_term=inversion.log_det_regularization_matrix_term,
noise_normalization=noise_normalization,
)
assert log_evidence == fit.log_evidence
assert log_evidence == fit.figure_of_merit
mapped_reconstructed_image = ag.util.inversion.mapped_reconstructed_data_from(
mapping_matrix=fit.inversion.mapper.mapping_matrix,
reconstruction=fit.inversion.reconstruction,
)
assert (
fit.inversion.mapped_reconstructed_image.slim
== mapped_reconstructed_image
).all()
class TestCompareToManualProfilesAndInversion:
def test___all_fit_quantities__no_hyper_methods(self, masked_interferometer_7):
galaxy_light = ag.Galaxy(
redshift=0.5, light_profile=ag.lp.EllipticalSersic(intensity=1.0)
)
pix = ag.pix.Rectangular(shape=(3, 3))
reg = ag.reg.Constant(coefficient=1.0)
galaxy_pix = ag.Galaxy(redshift=1.0, pixelization=pix, regularization=reg)
plane = ag.Plane(redshift=0.75, galaxies=[galaxy_light, galaxy_pix])
fit = ag.FitInterferometer(
masked_interferometer=masked_interferometer_7, plane=plane
)
profile_visibilities = plane.profile_visibilities_from_grid_and_transformer(
grid=masked_interferometer_7.grid,
transformer=masked_interferometer_7.transformer,
)
assert profile_visibilities.slim == pytest.approx(
fit.profile_visibilities.slim
)
profile_subtracted_visibilities = (
masked_interferometer_7.visibilities - profile_visibilities
)
assert profile_subtracted_visibilities.slim == pytest.approx(
fit.profile_subtracted_visibilities.slim
)
mapper = pix.mapper_from_grid_and_sparse_grid(
grid=masked_interferometer_7.grid,
settings=ag.SettingsPixelization(use_border=False),
)
inversion = inversions.InversionInterferometerMatrix.from_data_mapper_and_regularization(
visibilities=profile_subtracted_visibilities,
noise_map=masked_interferometer_7.noise_map,
transformer=masked_interferometer_7.transformer,
mapper=mapper,
regularization=reg,
)
model_visibilities = (
profile_visibilities + inversion.mapped_reconstructed_visibilities
)
assert model_visibilities.slim == pytest.approx(fit.model_visibilities.slim)
residual_map = ag.util.fit.residual_map_from(
data=masked_interferometer_7.visibilities, model_data=model_visibilities
)
assert residual_map.slim == pytest.approx(fit.residual_map.slim)
normalized_residual_map = ag.util.fit.normalized_residual_map_complex_from(
residual_map=residual_map, noise_map=masked_interferometer_7.noise_map
)
assert normalized_residual_map.slim == pytest.approx(
fit.normalized_residual_map.slim
)
chi_squared_map = ag.util.fit.chi_squared_map_complex_from(
residual_map=residual_map, noise_map=masked_interferometer_7.noise_map
)
assert chi_squared_map.slim == pytest.approx(fit.chi_squared_map.slim)
chi_squared = ag.util.fit.chi_squared_complex_from(
chi_squared_map=chi_squared_map
)
noise_normalization = ag.util.fit.noise_normalization_complex_from(
noise_map=masked_interferometer_7.noise_map
)
log_likelihood = ag.util.fit.log_likelihood_from(
chi_squared=chi_squared, noise_normalization=noise_normalization
)
assert log_likelihood == pytest.approx(fit.log_likelihood, 1e-4)
log_likelihood_with_regularization = ag.util.fit.log_likelihood_with_regularization_from(
chi_squared=chi_squared,
regularization_term=inversion.regularization_term,
noise_normalization=noise_normalization,
)
assert log_likelihood_with_regularization == pytest.approx(
fit.log_likelihood_with_regularization, 1e-4
)
log_evidence = ag.util.fit.log_evidence_from(
chi_squared=chi_squared,
regularization_term=inversion.regularization_term,
log_curvature_regularization_term=inversion.log_det_curvature_reg_matrix_term,
log_regularization_term=inversion.log_det_regularization_matrix_term,
noise_normalization=noise_normalization,
)
assert log_evidence == fit.log_evidence
assert log_evidence == fit.figure_of_merit
mapped_reconstructed_image = ag.util.inversion.mapped_reconstructed_data_from(
mapping_matrix=fit.inversion.mapper.mapping_matrix,
reconstruction=fit.inversion.reconstruction,
)
assert (
fit.inversion.mapped_reconstructed_image.slim
== mapped_reconstructed_image
).all()
def test___fit_galaxy_model_visibilities_dict__has_image_and_inversion_mapped_reconstructed_image(
self, masked_interferometer_7
):
g0 = ag.Galaxy(
redshift=0.5, light_profile=ag.lp.EllipticalSersic(intensity=1.0)
)
g1 = ag.Galaxy(
redshift=0.5, light_profile=ag.lp.EllipticalSersic(intensity=2.0)
)
pix = ag.pix.Rectangular(shape=(3, 3))
reg = ag.reg.Constant(coefficient=1.0)
galaxy_pix = ag.Galaxy(redshift=1.0, pixelization=pix, regularization=reg)
plane = ag.Plane(redshift=0.75, galaxies=[g0, g1, galaxy_pix])
fit = ag.FitInterferometer(
masked_interferometer=masked_interferometer_7, plane=plane
)
g0_visibilities = g0.profile_visibilities_from_grid_and_transformer(
grid=masked_interferometer_7.grid,
transformer=masked_interferometer_7.transformer,
)
g1_visibilities = g1.profile_visibilities_from_grid_and_transformer(
grid=masked_interferometer_7.grid,
transformer=masked_interferometer_7.transformer,
)
profile_visibilities = g0_visibilities + g1_visibilities
profile_subtracted_visibilities = (
masked_interferometer_7.visibilities - profile_visibilities
)
mapper = pix.mapper_from_grid_and_sparse_grid(
grid=masked_interferometer_7.grid,
settings=ag.SettingsPixelization(use_border=False),
)
inversion = inversions.InversionInterferometerMatrix.from_data_mapper_and_regularization(
visibilities=profile_subtracted_visibilities,
noise_map=masked_interferometer_7.noise_map,
transformer=masked_interferometer_7.transformer,
mapper=mapper,
regularization=reg,
)
g0_image = g0.image_from_grid(grid=masked_interferometer_7.grid)
g1_image = g1.image_from_grid(grid=masked_interferometer_7.grid)
assert fit.galaxy_model_image_dict[g0].slim == pytest.approx(
g0_image.slim, 1.0e-4
)
assert fit.galaxy_model_image_dict[g1].slim == pytest.approx(
g1_image.slim, 1.0e-4
)
assert fit.galaxy_model_image_dict[galaxy_pix].slim == pytest.approx(
inversion.mapped_reconstructed_image.slim, 1.0e-4
)
def test___fit_galaxy_model_visibilities_dict__has_profile_visibilitiess_and_inversion_mapped_reconstructed_visibilities(
self, masked_interferometer_7
):
g0 = ag.Galaxy(
redshift=0.5, light_profile=ag.lp.EllipticalSersic(intensity=1.0)
)
g1 = ag.Galaxy(
redshift=0.5, light_profile=ag.lp.EllipticalSersic(intensity=2.0)
)
g2 = ag.Galaxy(redshift=0.5)
pix = ag.pix.Rectangular(shape=(3, 3))
reg = ag.reg.Constant(coefficient=1.0)
galaxy_pix = ag.Galaxy(redshift=1.0, pixelization=pix, regularization=reg)
plane = ag.Plane(redshift=0.75, galaxies=[g0, g1, g2, galaxy_pix])
fit = ag.FitInterferometer(
masked_interferometer=masked_interferometer_7, plane=plane
)
g0_visibilities = g0.profile_visibilities_from_grid_and_transformer(
grid=masked_interferometer_7.grid,
transformer=masked_interferometer_7.transformer,
)
g1_visibilities = g1.profile_visibilities_from_grid_and_transformer(
grid=masked_interferometer_7.grid,
transformer=masked_interferometer_7.transformer,
)
profile_visibilities = g0_visibilities + g1_visibilities
profile_subtracted_visibilities = (
masked_interferometer_7.visibilities - profile_visibilities
)
mapper = pix.mapper_from_grid_and_sparse_grid(
grid=masked_interferometer_7.grid,
settings=ag.SettingsPixelization(use_border=False),
)
inversion = inversions.InversionInterferometerMatrix.from_data_mapper_and_regularization(
visibilities=profile_subtracted_visibilities,
noise_map=masked_interferometer_7.noise_map,
transformer=masked_interferometer_7.transformer,
mapper=mapper,
regularization=reg,
)
assert (
fit.galaxy_model_visibilities_dict[g2] == 0.0 + 0.0j * np.zeros((7,))
).all()
assert fit.galaxy_model_visibilities_dict[g0].slim == pytest.approx(
g0_visibilities.slim, 1.0e-4
)
assert fit.galaxy_model_visibilities_dict[g1].slim == pytest.approx(
g1_visibilities.slim, 1.0e-4
)
assert fit.galaxy_model_visibilities_dict[galaxy_pix].slim == pytest.approx(
inversion.mapped_reconstructed_visibilities.slim, 1.0e-4
)
assert fit.model_visibilities.slim == pytest.approx(
fit.galaxy_model_visibilities_dict[g0].slim
+ fit.galaxy_model_visibilities_dict[g1].slim
+ inversion.mapped_reconstructed_visibilities.slim,
1.0e-4,
)
def test___all_fit_quantities__hyper_background_noise(
self, masked_interferometer_7
):
hyper_background_noise = ag.hyper_data.HyperBackgroundNoise(noise_scale=1.0)
hyper_noise_map = hyper_background_noise.hyper_noise_map_from_complex_noise_map(
noise_map=masked_interferometer_7.noise_map
)
galaxy_light = ag.Galaxy(
redshift=0.5, light_profile=ag.lp.EllipticalSersic(intensity=1.0)
)
pix = ag.pix.Rectangular(shape=(3, 3))
reg = ag.reg.Constant(coefficient=1.0)
galaxy_pix = ag.Galaxy(redshift=1.0, pixelization=pix, regularization=reg)
plane = ag.Plane(redshift=0.75, galaxies=[galaxy_light, galaxy_pix])
fit = ag.FitInterferometer(
masked_interferometer=masked_interferometer_7,
plane=plane,
hyper_background_noise=hyper_background_noise,
)
assert hyper_noise_map.slim == pytest.approx(
fit.inversion.noise_map, 1.0e-4
)
assert hyper_noise_map.slim == pytest.approx(fit.noise_map.slim)
```
#### File: unit/galaxy/test_galaxy_model.py
```python
import os
from autoconf import conf
import autofit as af
import autogalaxy as ag
import pytest
class MockPriorModel:
def __init__(self, name, cls):
self.name = name
self.cls = cls
self.centre = "origin for {}".format(name)
self.elliptical_comps = "elliptical_comps for {}".format(name)
class MockModelMapper:
def __init__(self):
self.classes = {}
def add_class(self, name, cls):
self.classes[name] = cls
return MockPriorModel(name, cls)
class MockModelInstance:
pass
@pytest.fixture(name="mass_and_light")
def make_profile():
return ag.lmp.EllipticalSersicRadialGradient()
@pytest.fixture(name="mapper")
def make_mapper():
return af.ModelMapper()
@pytest.fixture(name="galaxy_model_2")
def make_galaxy_model_2(mapper,):
galaxy_model_2 = ag.GalaxyModel(
redshift=ag.Redshift,
light_profile=ag.lp.EllipticalDevVaucouleurs,
mass_profile=ag.mp.EllipticalCoredIsothermal,
)
mapper.galaxy_2 = galaxy_model_2
return galaxy_model_2
@pytest.fixture(name="galaxy_model")
def make_galaxy_model(mapper,):
galaxy_model_1 = ag.GalaxyModel(
redshift=ag.Redshift,
light_profile=ag.lp.EllipticalDevVaucouleurs,
mass_profile=ag.mp.EllipticalCoredIsothermal,
)
mapper.galaxy_1 = galaxy_model_1
return galaxy_model_1
class TestMassAndLightProfiles:
def test_make_galaxy_from_instance_profile(self, mass_and_light):
prior = ag.GalaxyModel(redshift=0.5, profile=mass_and_light)
galaxy = prior.instance_for_arguments({})
assert galaxy.light_profiles[0] == mass_and_light
assert galaxy.mass_profiles[0] == mass_and_light
def test_make_galaxy_from_model_profile(self):
galaxy_model = ag.GalaxyModel(redshift=0.5, profile=ag.lmp.EllipticalSersic)
arguments = {
galaxy_model.profile.centre.centre_0: 1.0,
galaxy_model.profile.centre.centre_1: 0.2,
galaxy_model.profile.elliptical_comps.elliptical_comps_0: 0.4,
galaxy_model.profile.elliptical_comps.elliptical_comps_1: 0.5,
galaxy_model.profile.intensity: 0.6,
galaxy_model.profile.effective_radius: 0.7,
galaxy_model.profile.sersic_index: 0.8,
galaxy_model.profile.mass_to_light_ratio: 0.5,
}
galaxy = galaxy_model.instance_for_arguments(arguments)
assert galaxy.light_profiles[0] == galaxy.mass_profiles[0]
assert isinstance(galaxy.light_profiles[0], ag.lmp.EllipticalSersic)
assert galaxy.mass_profiles[0].centre == (1.0, 0.2)
assert galaxy.mass_profiles[0].elliptical_comps == (0.4, 0.5)
assert galaxy.mass_profiles[0].intensity == 0.6
assert galaxy.mass_profiles[0].effective_radius == 0.7
assert galaxy.mass_profiles[0].sersic_index == 0.8
assert galaxy.mass_profiles[0].mass_to_light_ratio == 0.5
class TestGalaxyModel:
def test_init_to_model_mapper(self, mapper):
mapper.galaxy_1 = ag.GalaxyModel(
redshift=ag.Redshift,
light_profile=ag.lp.EllipticalDevVaucouleurs,
mass_profile=ag.mp.EllipticalCoredIsothermal,
)
print(mapper.galaxy_1.redshift)
assert len(mapper.prior_tuples_ordered_by_id) == 13
def test_multiple_galaxies(self, mapper):
mapper.galaxy_1 = ag.GalaxyModel(
redshift=ag.Redshift,
light_profile=ag.lp.EllipticalDevVaucouleurs,
mass_profile=ag.mp.EllipticalCoredIsothermal,
)
mapper.galaxy_2 = ag.GalaxyModel(
redshift=ag.Redshift,
light_profile=ag.lp.EllipticalDevVaucouleurs,
mass_profile=ag.mp.EllipticalCoredIsothermal,
)
assert len(mapper.prior_model_tuples) == 2
class TestNamedProfiles:
def test_get_prior_model(self):
galaxy_model = ag.GalaxyModel(
redshift=ag.Redshift,
light_profile=ag.lp.EllipticalSersic,
mass_profile=ag.mp.EllipticalSersic,
)
assert isinstance(galaxy_model.light_profile, af.PriorModel)
assert isinstance(galaxy_model.mass_profile, af.PriorModel)
def test_set_prior_model(self):
mapper = af.ModelMapper()
galaxy_model = ag.GalaxyModel(
redshift=ag.Redshift,
light_profile=ag.lp.EllipticalSersic,
mass_profile=ag.mp.EllipticalSersic,
)
mapper.galaxy = galaxy_model
assert 16 == len(mapper.prior_tuples_ordered_by_id)
galaxy_model.light_profile = af.PriorModel(ag.lp.LightProfile)
assert 9 == len(mapper.prior_tuples_ordered_by_id)
class TestResultForArguments:
def test_simple_instance_for_arguments(self):
galaxy_model = ag.GalaxyModel(redshift=ag.Redshift)
arguments = {galaxy_model.redshift.redshift: 0.5}
galaxy = galaxy_model.instance_for_arguments(arguments)
assert galaxy.redshift == 0.5
def test_complicated_instance_for_arguments(self):
galaxy_model = ag.GalaxyModel(
redshift=ag.Redshift,
light_profile=ag.lp.EllipticalSersic,
mass_profile=ag.mp.SphericalIsothermal,
)
galaxy_model.light_profile.centre = galaxy_model.mass_profile.centre
arguments = {
galaxy_model.redshift.redshift: 0.5,
galaxy_model.mass_profile.centre.centre_0: 0.9,
galaxy_model.mass_profile.centre.centre_1: 0.3,
galaxy_model.mass_profile.einstein_radius: 0.3,
galaxy_model.light_profile.elliptical_comps.elliptical_comps_0: 0.5,
galaxy_model.light_profile.elliptical_comps.elliptical_comps_1: 0.6,
galaxy_model.light_profile.intensity: 0.6,
galaxy_model.light_profile.effective_radius: 0.7,
galaxy_model.light_profile.sersic_index: 0.5,
}
galaxy = galaxy_model.instance_for_arguments(arguments)
assert galaxy.light_profiles[0].centre[0] == 0.9
assert galaxy.light_profiles[0].centre[1] == 0.3
def test_gaussian_prior_model_for_arguments(self):
galaxy_model = ag.GalaxyModel(
redshift=ag.Redshift,
light_profile=ag.lp.EllipticalSersic,
mass_profile=ag.mp.SphericalIsothermal,
)
galaxy_model.light_profile.centre = galaxy_model.mass_profile.centre
redshift_prior = af.GaussianPrior(1, 1)
einstein_radius_prior = af.GaussianPrior(4, 1)
intensity_prior = af.GaussianPrior(7, 1)
arguments = {
galaxy_model.redshift.redshift: redshift_prior,
galaxy_model.mass_profile.centre.centre_0: af.GaussianPrior(2, 1),
galaxy_model.mass_profile.centre.centre_1: af.GaussianPrior(3, 1),
galaxy_model.mass_profile.einstein_radius: einstein_radius_prior,
galaxy_model.light_profile.elliptical_comps.elliptical_comps_0: af.GaussianPrior(
6, 1
),
galaxy_model.light_profile.elliptical_comps.elliptical_comps_1: af.GaussianPrior(
6, 1
),
galaxy_model.light_profile.intensity: intensity_prior,
galaxy_model.light_profile.effective_radius: af.GaussianPrior(8, 1),
galaxy_model.light_profile.sersic_index: af.GaussianPrior(9, 1),
}
gaussian_galaxy_model_model = galaxy_model.gaussian_prior_model_for_arguments(
arguments
)
assert gaussian_galaxy_model_model.redshift.redshift == redshift_prior
assert (
gaussian_galaxy_model_model.mass_profile.einstein_radius
== einstein_radius_prior
)
assert gaussian_galaxy_model_model.light_profile.intensity == intensity_prior
class TestPixelization:
def test_pixelization(self):
galaxy_model = ag.GalaxyModel(
redshift=ag.Redshift,
pixelization=ag.pix.Rectangular,
regularization=ag.reg.Constant,
)
arguments = {
galaxy_model.redshift.redshift: 2.0,
galaxy_model.pixelization.shape_0: 24.0,
galaxy_model.pixelization.shape_1: 23.0,
galaxy_model.regularization.coefficient: 0.5,
}
galaxy = galaxy_model.instance_for_arguments(arguments)
assert galaxy.pixelization.shape[0] == 24
assert galaxy.pixelization.shape[1] == 23
def test_fixed_pixelization(self):
galaxy_model = ag.GalaxyModel(
redshift=ag.Redshift,
pixelization=ag.pix.Rectangular(),
regularization=ag.reg.Constant(),
)
arguments = {galaxy_model.redshift.redshift: 2.0}
galaxy = galaxy_model.instance_for_arguments(arguments)
assert galaxy.pixelization.shape[0] == 3
assert galaxy.pixelization.shape[1] == 3
def test__if_no_pixelization_raises_error(self):
with pytest.raises(AssertionError):
ag.GalaxyModel(redshift=ag.Redshift, pixelization=ag.pix.Voronoi)
class TestRegularization:
def test_regularization(self):
galaxy_model = ag.GalaxyModel(
redshift=ag.Redshift,
pixelization=ag.pix.Rectangular,
regularization=ag.reg.Constant,
)
arguments = {
galaxy_model.redshift.redshift: 2.0,
galaxy_model.pixelization.shape_0: 24.0,
galaxy_model.pixelization.shape_1: 23.0,
galaxy_model.regularization.coefficient: 0.5,
}
galaxy = galaxy_model.instance_for_arguments(arguments)
assert galaxy.regularization.coefficient == 0.5
def test_fixed_regularization(self):
galaxy_model = ag.GalaxyModel(
redshift=ag.Redshift,
pixelization=ag.pix.Voronoi(),
regularization=ag.reg.Constant(),
)
arguments = {galaxy_model.redshift.redshift: 2.0}
galaxy = galaxy_model.instance_for_arguments(arguments)
assert galaxy.regularization.coefficient == 1.0
def test__if_no_pixelization_raises_error(self):
with pytest.raises(AssertionError):
ag.GalaxyModel(redshift=ag.Redshift, regularization=ag.reg.Constant)
class TestHyperGalaxy:
def test_hyper_galaxy(self,):
galaxy_model = ag.GalaxyModel(redshift=ag.Redshift, hyper_galaxy=ag.HyperGalaxy)
arguments = {
galaxy_model.redshift.redshift: 0.2,
galaxy_model.hyper_galaxy.contribution_factor: 1,
galaxy_model.hyper_galaxy.noise_factor: 2,
galaxy_model.hyper_galaxy.noise_power: 1.5,
}
galaxy = galaxy_model.instance_for_arguments(arguments)
assert galaxy.hyper_galaxy.contribution_factor == 1
assert galaxy.hyper_galaxy.noise_factor == 2
assert galaxy.hyper_galaxy.noise_power == 1.5
assert galaxy.hyper_galaxy_image is None
def test_fixed_hyper_galaxy(self,):
galaxy_model = ag.GalaxyModel(
redshift=ag.Redshift, hyper_galaxy=ag.HyperGalaxy()
)
arguments = {galaxy_model.redshift.redshift: 2.0}
galaxy = galaxy_model.instance_for_arguments(arguments)
assert galaxy.hyper_galaxy.contribution_factor == 0.0
assert galaxy.hyper_galaxy.noise_factor == 0.0
assert galaxy.hyper_galaxy.noise_power == 1.0
assert galaxy.hyper_galaxy_image is None
class TestFixedProfiles:
def test_fixed_light(self):
galaxy_model = ag.GalaxyModel(
redshift=ag.Redshift, light_profile=ag.lp.EllipticalSersic()
)
arguments = {galaxy_model.redshift.redshift: 2.0}
galaxy = galaxy_model.instance_for_arguments(arguments)
assert len(galaxy.light_profiles) == 1
def test_fixed_mass(self):
galaxy_model = ag.GalaxyModel(
redshift=ag.Redshift, nass_profile=ag.mp.SphericalNFW()
)
arguments = {galaxy_model.redshift.redshift: 2.0}
galaxy = galaxy_model.instance_for_arguments(arguments)
assert len(galaxy.mass_profiles) == 1
def test_fixed_and_model(self):
galaxy_model = ag.GalaxyModel(
redshift=ag.Redshift,
mass_profile=ag.mp.SphericalNFW(),
light_profile=ag.lp.EllipticalSersic(),
model_light=ag.lp.EllipticalSersic,
)
arguments = {
galaxy_model.redshift.redshift: 0.2,
galaxy_model.model_light.elliptical_comps.elliptical_comps_0: 0.5,
galaxy_model.model_light.elliptical_comps.elliptical_comps_1: 0.6,
galaxy_model.model_light.intensity: 0.6,
galaxy_model.model_light.effective_radius: 0.7,
galaxy_model.model_light.sersic_index: 0.8,
galaxy_model.model_light.centre.centre_0: 0,
galaxy_model.model_light.centre.centre_1: 0,
}
galaxy = galaxy_model.instance_for_arguments(arguments)
assert len(galaxy.light_profiles) == 2
assert len(galaxy.mass_profiles) == 1
class TestRedshift:
def test_set_redshift_class(self):
galaxy_model = ag.GalaxyModel(redshift=ag.Redshift)
galaxy_model.redshift = ag.Redshift(3)
assert galaxy_model.redshift == 3
def test_set_redshift_float(self):
galaxy_model = ag.GalaxyModel(redshift=ag.Redshift)
galaxy_model.redshift = 3
# noinspection PyUnresolvedReferences
assert galaxy_model.redshift == 3
def test_set_redshift_instance(self):
galaxy_model = ag.GalaxyModel(redshift=ag.Redshift)
galaxy_model.redshift = 3
# noinspection PyUnresolvedReferences
assert galaxy_model.redshift == 3
@pytest.fixture(name="galaxy")
def make_galaxy():
return ag.Galaxy(
redshift=3,
sersic=ag.lp.EllipticalSersic(),
exponential=ag.lp.EllipticalExponential(),
spherical=ag.mp.SphericalIsothermal(),
)
```
#### File: phase/dataset/test_analysis_dataset.py
```python
from os import path
import autofit as af
import autogalaxy as ag
import numpy as np
import pytest
from astropy import cosmology as cosmo
from autogalaxy.mock import mock
pytestmark = pytest.mark.filterwarnings(
"ignore:Using a non-tuple sequence for multidimensional indexing is deprecated; use `arr[tuple(seq)]` instead of "
"`arr[seq]`. In the future this will be interpreted as an arrays index, `arr[np.arrays(seq)]`, which will result "
"either in an error or a different result."
)
directory = path.dirname(path.realpath(__file__))
class TestHyperMethods:
def test__associate_images(self, masked_imaging_7x7):
galaxies = af.ModelInstance()
galaxies.galaxy = ag.Galaxy(redshift=0.5)
galaxies.source = ag.Galaxy(redshift=1.0)
instance = af.ModelInstance()
instance.galaxies = galaxies
hyper_galaxy_image_path_dict = {
("galaxies", "galaxy"): ag.Array2D.ones(
shape_native=(3, 3), pixel_scales=1.0
),
("galaxies", "source"): ag.Array2D.full(
fill_value=2.0, shape_native=(3, 3), pixel_scales=1.0
),
}
results = mock.MockResults(
instance=instance,
hyper_galaxy_image_path_dict=hyper_galaxy_image_path_dict,
hyper_model_image=ag.Array2D.full(
fill_value=3.0, shape_native=(3, 3), pixel_scales=1.0
),
use_as_hyper_dataset=True,
)
analysis = ag.PhaseImaging.Analysis(
masked_imaging=masked_imaging_7x7,
settings=ag.SettingsPhaseImaging(),
results=results,
cosmology=cosmo.Planck15,
)
instance = analysis.associate_hyper_images(instance=instance)
assert instance.galaxies.galaxy.hyper_galaxy_image.native == pytest.approx(
np.ones((3, 3)), 1.0e-4
)
assert instance.galaxies.source.hyper_galaxy_image.native == pytest.approx(
2.0 * np.ones((3, 3)), 1.0e-4
)
assert instance.galaxies.galaxy.hyper_model_image.native == pytest.approx(
3.0 * np.ones((3, 3)), 1.0e-4
)
assert instance.galaxies.source.hyper_model_image.native == pytest.approx(
3.0 * np.ones((3, 3)), 1.0e-4
)
```
#### File: phase/dataset/test_phase_dataset.py
```python
from os import path
import pytest
import autogalaxy as ag
from autogalaxy.mock import mock
pytestmark = pytest.mark.filterwarnings(
"ignore:Using a non-tuple sequence for multidimensional indexing is deprecated; use `arr[tuple(seq)]` instead of "
"`arr[seq]`. In the future this will be interpreted as an arrays index, `arr[np.arrays(seq)]`, which will result "
"either in an error or a different result."
)
directory = path.dirname(path.realpath(__file__))
class TestPhase:
def test__extend_with_hyper_phase(self):
phase_with_hyper_sky = ag.PhaseImaging(search=mock.MockSearch())
setup_hyper = ag.SetupHyper(
hyper_image_sky=ag.hyper_data.HyperImageSky,
hyper_search_with_inversion=mock.MockSearch("test_phase"),
)
phase_extended = phase_with_hyper_sky.extend_with_hyper_phase(
setup_hyper=setup_hyper
)
assert isinstance(phase_extended, ag.HyperPhase)
assert phase_extended.hyper_image_sky is ag.hyper_data.HyperImageSky
phase_with_hyper_sky = ag.PhaseImaging(search=mock.MockSearch())
phase_extended = phase_with_hyper_sky.extend_with_hyper_phase(
setup_hyper=ag.SetupHyper(
hyper_background_noise=ag.hyper_data.HyperBackgroundNoise,
hyper_search_with_inversion=mock.MockSearch("test_phase"),
)
)
assert isinstance(phase_extended, ag.HyperPhase)
phase_with_pixelization = ag.PhaseImaging(
galaxies=dict(
source=ag.GalaxyModel(
redshift=0.5,
pixelization=ag.pix.Rectangular,
regularization=ag.reg.Constant,
)
),
search=mock.MockSearch(),
)
phase_extended = phase_with_pixelization.extend_with_hyper_phase(
setup_hyper=ag.SetupHyper(
hyper_search_with_inversion=mock.MockSearch("test_phase")
)
)
assert isinstance(phase_extended, ag.HyperPhase)
def test__extend_with_hyper_phase__does_not_extend_if_no_hyper_compoennts_to_optimize(
self
):
phase_no_pixelization = ag.PhaseImaging(search=mock.MockSearch("test_phase"))
phase_extended = phase_no_pixelization.extend_with_hyper_phase(
setup_hyper=ag.SetupHyper()
)
assert phase_extended == phase_no_pixelization
phase_no_pixelization = ag.PhaseImaging(search=mock.MockSearch("test_phase"))
phase_extended = phase_no_pixelization.extend_with_hyper_phase(
setup_hyper=ag.SetupHyper(hyper_image_sky=ag.hyper_data.HyperImageSky),
include_hyper_image_sky=False,
)
assert phase_extended == phase_no_pixelization
class TestMakeAnalysis:
def test__mask_input_uses_mask(self, phase_imaging_7x7, imaging_7x7):
# If an input mask is supplied we use mask input.
mask_input = ag.Mask2D.circular(
shape_native=imaging_7x7.shape_native,
pixel_scales=1.0,
sub_size=1,
radius=1.5,
)
analysis = phase_imaging_7x7.make_analysis(
dataset=imaging_7x7, mask=mask_input, results=mock.MockResults()
)
assert (analysis.masked_imaging.mask == mask_input).all()
assert analysis.masked_imaging.mask.pixel_scales == mask_input.pixel_scales
def test__mask_changes_sub_size_depending_on_phase_attribute(
self, phase_imaging_7x7, imaging_7x7
):
# If an input mask is supplied we use mask input.
mask_input = ag.Mask2D.circular(
shape_native=imaging_7x7.shape_native,
pixel_scales=1,
sub_size=1,
radius=1.5,
)
phase_imaging_7x7 = ag.PhaseImaging(
search=mock.MockSearch("test_phase"),
settings=ag.SettingsPhaseImaging(
settings_masked_imaging=ag.SettingsMaskedImaging(sub_size=1)
),
)
analysis = phase_imaging_7x7.make_analysis(
dataset=imaging_7x7, mask=mask_input, results=mock.MockResults()
)
assert (analysis.masked_imaging.mask == mask_input).all()
assert analysis.masked_imaging.mask.sub_size == 1
assert analysis.masked_imaging.mask.pixel_scales == mask_input.pixel_scales
phase_imaging_7x7 = ag.PhaseImaging(
search=mock.MockSearch("test_phase"),
settings=ag.SettingsPhaseImaging(
settings_masked_imaging=ag.SettingsMaskedImaging(sub_size=2)
),
)
analysis = phase_imaging_7x7.make_analysis(
dataset=imaging_7x7, mask=mask_input, results=mock.MockResults()
)
assert (analysis.masked_imaging.mask == mask_input).all()
assert analysis.masked_imaging.mask.sub_size == 2
assert analysis.masked_imaging.mask.pixel_scales == mask_input.pixel_scales
class TestPhasePickle:
# noinspection PyTypeChecker
def test_assertion_failure(self, imaging_7x7, mask_7x7):
phase_imaging_7x7 = ag.PhaseImaging(
galaxies=dict(
galaxy=ag.Galaxy(light=ag.lp.EllipticalLightProfile, redshift=1)
),
search=mock.MockSearch("name"),
)
result = phase_imaging_7x7.run(dataset=imaging_7x7, mask=mask_7x7, results=None)
assert result is not None
phase_imaging_7x7 = ag.PhaseImaging(
galaxies=dict(
galaxy=ag.Galaxy(light=ag.lp.EllipticalLightProfile, redshift=1)
),
search=mock.MockSearch("name"),
)
result = phase_imaging_7x7.run(dataset=imaging_7x7, mask=mask_7x7, results=None)
assert result is not None
```
#### File: phase/imaging/test_analysis_imaging.py
```python
from os import path
import autofit as af
import autogalaxy as ag
import pytest
from astropy import cosmology as cosmo
from autogalaxy.fit.fit import FitImaging
from autogalaxy.mock import mock
pytestmark = pytest.mark.filterwarnings(
"ignore:Using a non-tuple sequence for multidimensional indexing is deprecated; use `arr[tuple(seq)]` instead of "
"`arr[seq]`. In the future this will be interpreted as an arrays index, `arr[np.arrays(seq)]`, which will result "
"either in an error or a different result."
)
directory = path.dirname(path.realpath(__file__))
class TestFit:
def test__fit_using_imaging(self, imaging_7x7, mask_7x7, samples_with_result):
phase_imaging_7x7 = ag.PhaseImaging(
galaxies=dict(
galaxy=ag.GalaxyModel(redshift=0.5, light=ag.lp.EllipticalSersic),
source=ag.GalaxyModel(redshift=1.0, light=ag.lp.EllipticalSersic),
),
search=mock.MockSearch(samples=samples_with_result, name="test_phase"),
)
result = phase_imaging_7x7.run(
dataset=imaging_7x7, mask=mask_7x7, results=mock.MockResults()
)
assert isinstance(result.instance.galaxies[0], ag.Galaxy)
assert isinstance(result.instance.galaxies[0], ag.Galaxy)
def test__figure_of_merit__matches_correct_fit_given_galaxy_profiles(
self, imaging_7x7, mask_7x7
):
galaxy = ag.Galaxy(redshift=0.5, light=ag.lp.EllipticalSersic(intensity=0.1))
phase_imaging_7x7 = ag.PhaseImaging(
galaxies=dict(galaxy=galaxy),
settings=ag.SettingsPhaseImaging(
settings_masked_imaging=ag.SettingsMaskedImaging(sub_size=1)
),
search=mock.MockSearch(name="test_phase"),
)
analysis = phase_imaging_7x7.make_analysis(
dataset=imaging_7x7, mask=mask_7x7, results=mock.MockResults()
)
instance = phase_imaging_7x7.model.instance_from_unit_vector([])
fit_figure_of_merit = analysis.log_likelihood_function(instance=instance)
masked_imaging = ag.MaskedImaging(
imaging=imaging_7x7,
mask=mask_7x7,
settings=ag.SettingsMaskedImaging(sub_size=1),
)
plane = analysis.plane_for_instance(instance=instance)
fit = ag.FitImaging(masked_imaging=masked_imaging, plane=plane)
assert fit.log_likelihood == fit_figure_of_merit
def test__figure_of_merit__includes_hyper_image_and_noise__matches_fit(
self, imaging_7x7, mask_7x7
):
hyper_image_sky = ag.hyper_data.HyperImageSky(sky_scale=1.0)
hyper_background_noise = ag.hyper_data.HyperBackgroundNoise(noise_scale=1.0)
galalxy = ag.Galaxy(redshift=0.5, light=ag.lp.EllipticalSersic(intensity=0.1))
phase_imaging_7x7 = ag.PhaseImaging(
galaxies=dict(galaxy=galalxy),
hyper_image_sky=hyper_image_sky,
hyper_background_noise=hyper_background_noise,
settings=ag.SettingsPhaseImaging(
settings_masked_imaging=ag.SettingsMaskedImaging(sub_size=4)
),
search=mock.MockSearch(name="test_phase"),
)
analysis = phase_imaging_7x7.make_analysis(
dataset=imaging_7x7, mask=mask_7x7, results=mock.MockResults()
)
instance = phase_imaging_7x7.model.instance_from_unit_vector([])
fit_figure_of_merit = analysis.log_likelihood_function(instance=instance)
assert analysis.masked_imaging.mask.sub_size == 4
masked_imaging = ag.MaskedImaging(
imaging=imaging_7x7,
mask=mask_7x7,
settings=ag.SettingsMaskedImaging(sub_size=4),
)
plane = analysis.plane_for_instance(instance=instance)
fit = FitImaging(
masked_imaging=masked_imaging,
plane=plane,
hyper_image_sky=hyper_image_sky,
hyper_background_noise=hyper_background_noise,
)
assert fit.log_likelihood == fit_figure_of_merit
def test__uses_hyper_fit_correctly(self, masked_imaging_7x7):
galaxies = af.ModelInstance()
galaxies.galaxy = ag.Galaxy(
redshift=0.5,
light=ag.lp.EllipticalSersic(intensity=1.0),
mass=ag.mp.SphericalIsothermal,
)
galaxies.source = ag.Galaxy(redshift=1.0, light=ag.lp.EllipticalSersic())
instance = af.ModelInstance()
instance.galaxies = galaxies
galaxy_hyper_image = ag.Array2D.ones(shape_native=(3, 3), pixel_scales=0.1)
galaxy_hyper_image[4] = 10.0
hyper_model_image = ag.Array2D.full(
fill_value=0.5, shape_native=(3, 3), pixel_scales=0.1
)
hyper_galaxy_image_path_dict = {("galaxies", "galaxy"): galaxy_hyper_image}
results = mock.MockResults(
use_as_hyper_dataset=True,
hyper_galaxy_image_path_dict=hyper_galaxy_image_path_dict,
hyper_model_image=hyper_model_image,
)
analysis = ag.PhaseImaging.Analysis(
masked_imaging=masked_imaging_7x7,
settings=ag.SettingsPhaseImaging(),
results=results,
cosmology=cosmo.Planck15,
)
hyper_galaxy = ag.HyperGalaxy(
contribution_factor=1.0, noise_factor=1.0, noise_power=1.0
)
instance.galaxies.galaxy.hyper_galaxy = hyper_galaxy
fit_likelihood = analysis.log_likelihood_function(instance=instance)
g0 = ag.Galaxy(
redshift=0.5,
light_profile=instance.galaxies.galaxy.light,
mass_profile=instance.galaxies.galaxy.mass,
hyper_galaxy=hyper_galaxy,
hyper_model_image=hyper_model_image,
hyper_galaxy_image=galaxy_hyper_image,
hyper_minimum_value=0.0,
)
g1 = ag.Galaxy(redshift=1.0, light_profile=instance.galaxies.source.light)
plane = ag.Plane(galaxies=[g0, g1])
fit = FitImaging(masked_imaging=masked_imaging_7x7, plane=plane)
assert (fit.plane.galaxies[0].hyper_galaxy_image == galaxy_hyper_image).all()
assert fit_likelihood == fit.log_likelihood
```
#### File: phase/imaging/test_phase_imaging.py
```python
from os import path
import numpy as np
import pytest
import autofit as af
import autogalaxy as ag
from autogalaxy.mock import mock
pytestmark = pytest.mark.filterwarnings(
"ignore:Using a non-tuple sequence for multidimensional indexing is deprecated; use `arr[tuple(seq)]` instead of "
"`arr[seq]`. In the future this will be interpreted as an arrays index, `arr[np.arrays(seq)]`, which will result "
"either in an error or a different result."
)
directory = path.dirname(path.realpath(__file__))
class TestMakeAnalysis:
def test__masks_image_and_noise_map_correctly(
self, phase_imaging_7x7, imaging_7x7, mask_7x7
):
analysis = phase_imaging_7x7.make_analysis(
dataset=imaging_7x7, mask=mask_7x7, results=mock.MockResults()
)
assert (
analysis.masked_imaging.image.native
== imaging_7x7.image.native * np.invert(mask_7x7)
).all()
assert (
analysis.masked_imaging.noise_map.native
== imaging_7x7.noise_map.native * np.invert(mask_7x7)
).all()
def test___phase_info_is_made(self, phase_imaging_7x7, imaging_7x7, mask_7x7):
phase_imaging_7x7.make_analysis(
dataset=imaging_7x7, mask=mask_7x7, results=mock.MockResults()
)
file_phase_info = path.join(
phase_imaging_7x7.search.paths.output_path, "phase.info"
)
phase_info = open(file_phase_info, "r")
search = phase_info.readline()
sub_size = phase_info.readline()
psf_shape_2d = phase_info.readline()
cosmology = phase_info.readline()
phase_info.close()
assert search == "Optimizer = MockSearch \n"
assert sub_size == "Sub-grid size = 2 \n"
assert psf_shape_2d == "PSF shape = None \n"
assert (
cosmology
== 'Cosmology = FlatLambdaCDM(name="Planck15", H0=67.7 km / (Mpc s), Om0=0.307, Tcmb0=2.725 K, '
"Neff=3.05, m_nu=[0. 0. 0.06] eV, Ob0=0.0486) \n"
)
def test__masked_imaging__settings_inputs_are_used_in_masked_imaging(
self, imaging_7x7, mask_7x7
):
phase_imaging_7x7 = ag.PhaseImaging(
settings=ag.SettingsPhaseImaging(
settings_masked_imaging=ag.SettingsMaskedImaging(
grid_class=ag.Grid2D,
grid_inversion_class=ag.Grid2D,
sub_size=3,
signal_to_noise_limit=1.0,
bin_up_factor=2,
psf_shape_2d=(3, 3),
)
),
search=mock.MockSearch("phase_imaging_7x7"),
)
assert phase_imaging_7x7.settings.settings_masked_imaging.sub_size == 3
assert (
phase_imaging_7x7.settings.settings_masked_imaging.signal_to_noise_limit
== 1.0
)
assert phase_imaging_7x7.settings.settings_masked_imaging.bin_up_factor == 2
assert phase_imaging_7x7.settings.settings_masked_imaging.psf_shape_2d == (3, 3)
analysis = phase_imaging_7x7.make_analysis(
dataset=imaging_7x7, mask=mask_7x7, results=mock.MockResults()
)
assert isinstance(analysis.masked_dataset.grid, ag.Grid2D)
assert isinstance(analysis.masked_dataset.grid_inversion, ag.Grid2D)
phase_imaging_7x7 = ag.PhaseImaging(
settings=ag.SettingsPhaseImaging(
settings_masked_imaging=ag.SettingsMaskedImaging(
grid_class=ag.Grid2DIterate,
sub_size=3,
fractional_accuracy=0.99,
sub_steps=[2],
)
),
search=mock.MockSearch("phase_imaging_7x7"),
)
analysis = phase_imaging_7x7.make_analysis(
dataset=imaging_7x7, mask=mask_7x7, results=mock.MockResults()
)
assert isinstance(analysis.masked_dataset.grid, ag.Grid2DIterate)
assert analysis.masked_dataset.grid.sub_size == 1
assert analysis.masked_dataset.grid.fractional_accuracy == 0.99
assert analysis.masked_dataset.grid.sub_steps == [2]
def test__masked_imaging__uses_signal_to_noise_limit(
self, imaging_7x7, mask_7x7_1_pix
):
imaging_snr_limit = imaging_7x7.signal_to_noise_limited_from(
signal_to_noise_limit=1.0
)
phase_imaging_7x7 = ag.PhaseImaging(
settings=ag.SettingsPhaseImaging(
settings_masked_imaging=ag.SettingsMaskedImaging(
signal_to_noise_limit=1.0
)
),
search=mock.MockSearch("phase_imaging_7x7"),
)
analysis = phase_imaging_7x7.make_analysis(
dataset=imaging_7x7, mask=mask_7x7_1_pix, results=mock.MockResults()
)
assert (
analysis.masked_dataset.image.native
== imaging_snr_limit.image.native * np.invert(mask_7x7_1_pix)
).all()
assert (
analysis.masked_dataset.noise_map.native
== imaging_snr_limit.noise_map.native * np.invert(mask_7x7_1_pix)
).all()
def test__masked_imaging__uses_bin_up_factor(self, imaging_7x7, mask_7x7_1_pix):
binned_up_imaging = imaging_7x7.binned_up_from(bin_up_factor=2)
binned_up_mask = mask_7x7_1_pix.binned_mask_from_bin_up_factor(bin_up_factor=2)
phase_imaging_7x7 = ag.PhaseImaging(
settings=ag.SettingsPhaseImaging(
settings_masked_imaging=ag.SettingsMaskedImaging(bin_up_factor=2)
),
search=mock.MockSearch("phase_imaging_7x7"),
)
analysis = phase_imaging_7x7.make_analysis(
dataset=imaging_7x7, mask=mask_7x7_1_pix, results=mock.MockResults()
)
assert (
analysis.masked_dataset.image.native
== binned_up_imaging.image.native * np.invert(binned_up_mask)
).all()
assert (
analysis.masked_dataset.psf == (1.0 / 9.0) * binned_up_imaging.psf
).all()
assert (
analysis.masked_dataset.noise_map.native
== binned_up_imaging.noise_map.native * np.invert(binned_up_mask)
).all()
assert (analysis.masked_dataset.mask == binned_up_mask).all()
class TestHyperMethods:
def test__phase_can_receive_hyper_image_and_noise_maps(self):
phase_imaging_7x7 = ag.PhaseImaging(
galaxies=dict(
galaxy=ag.GalaxyModel(redshift=ag.Redshift),
galaxy1=ag.GalaxyModel(redshift=ag.Redshift),
),
hyper_image_sky=ag.hyper_data.HyperImageSky,
hyper_background_noise=ag.hyper_data.HyperBackgroundNoise,
search=mock.MockSearch("test_phase"),
)
instance = phase_imaging_7x7.model.instance_from_vector([0.1, 0.2, 0.3, 0.4])
assert instance.galaxies[0].redshift == 0.1
assert instance.galaxies[1].redshift == 0.2
assert instance.hyper_image_sky.sky_scale == 0.3
assert instance.hyper_background_noise.noise_scale == 0.4
def test__phase_is_extended_with_hyper_phases__sets_up_hyper_dataset_from_results(
self, imaging_7x7, mask_7x7
):
galaxies = af.ModelInstance()
galaxies.galaxy = ag.Galaxy(redshift=0.5)
galaxies.source = ag.Galaxy(redshift=1.0)
instance = af.ModelInstance()
instance.galaxies = galaxies
hyper_galaxy_image_path_dict = {
("galaxies", "galaxy"): ag.Array2D.ones(
shape_native=(3, 3), pixel_scales=1.0
),
("galaxies", "source"): ag.Array2D.full(
fill_value=2.0, shape_native=(3, 3), pixel_scales=1.0
),
}
results = mock.MockResults(
hyper_galaxy_image_path_dict=hyper_galaxy_image_path_dict,
hyper_model_image=ag.Array2D.full(
fill_value=3.0, shape_native=(3, 3), pixel_scales=1.0
),
mask=mask_7x7,
use_as_hyper_dataset=True,
)
phase_imaging_7x7 = ag.PhaseImaging(
galaxies=dict(
galaxy=ag.GalaxyModel(redshift=0.5, hyper_galaxy=ag.HyperGalaxy)
),
search=mock.MockSearch("test_phase"),
)
analysis = phase_imaging_7x7.make_analysis(
dataset=imaging_7x7, mask=mask_7x7, results=results
)
assert (
analysis.hyper_galaxy_image_path_dict[("galaxies", "galaxy")].native
== np.ones((3, 3))
).all()
assert (
analysis.hyper_galaxy_image_path_dict[("galaxies", "source")].native
== 2.0 * np.ones((3, 3))
).all()
assert (analysis.hyper_model_image.native == 3.0 * np.ones((3, 3))).all()
```
#### File: phase/imaging/test_result_imaging.py
```python
import autofit as af
import autogalaxy as ag
import numpy as np
from astropy import cosmology as cosmo
from autogalaxy.mock import mock
class TestImagePassing:
def test___image_dict(self, masked_imaging_7x7):
galaxies = af.ModelInstance()
galaxies.galaxy = ag.Galaxy(redshift=0.5)
galaxies.source = ag.Galaxy(redshift=1.0)
instance = af.ModelInstance()
instance.galaxies = galaxies
analysis = ag.PhaseImaging.Analysis(
masked_imaging=masked_imaging_7x7,
settings=ag.SettingsPhaseImaging(),
results=mock.MockResults(),
cosmology=cosmo.Planck15,
)
result = ag.PhaseImaging.Result(
samples=mock.MockSamples(max_log_likelihood_instance=instance),
previous_model=af.ModelMapper(),
analysis=analysis,
search=None,
)
image_dict = result.image_galaxy_dict
assert isinstance(image_dict[("galaxies", "galaxy")], np.ndarray)
assert isinstance(image_dict[("galaxies", "source")], np.ndarray)
result.instance.galaxies.light = ag.Galaxy(redshift=0.5)
image_dict = result.image_galaxy_dict
assert (image_dict[("galaxies", "galaxy")].native == np.zeros((7, 7))).all()
assert isinstance(image_dict[("galaxies", "source")], np.ndarray)
```
#### File: plot/plotters/test_fit_imaging_plotters.py
```python
from os import path
import pytest
import autogalaxy.plot as aplt
directory = path.dirname(path.realpath(__file__))
@pytest.fixture(name="plot_path")
def make_fit_imaging_plotter_setup():
return path.join(
"{}".format(path.dirname(path.realpath(__file__))), "files", "plots", "fit"
)
def test__fit_individuals__source_and_galaxy__dependent_on_input(
masked_imaging_fit_x2_galaxy_7x7, include_2d_all, plot_path, plot_patch
):
fit_imaging_plotter = aplt.FitImagingPlotter(
fit=masked_imaging_fit_x2_galaxy_7x7,
include_2d=include_2d_all,
mat_plot_2d=aplt.MatPlot2D(output=aplt.Output(plot_path, format="png")),
)
fit_imaging_plotter.figures(
image=True,
noise_map=False,
signal_to_noise_map=False,
model_image=True,
chi_squared_map=True,
)
assert path.join(plot_path, "image.png") in plot_patch.paths
assert path.join(plot_path, "noise_map.png") not in plot_patch.paths
assert path.join(plot_path, "signal_to_noise_map.png") not in plot_patch.paths
assert path.join(plot_path, "model_image.png") in plot_patch.paths
assert path.join(plot_path, "residual_map.png") not in plot_patch.paths
assert path.join(plot_path, "normalized_residual_map.png") not in plot_patch.paths
assert path.join(plot_path, "chi_squared_map.png") in plot_patch.paths
def test__figures_of_galaxies(
masked_imaging_fit_x2_galaxy_7x7, include_2d_all, plot_path, plot_patch
):
fit_imaging_plotter = aplt.FitImagingPlotter(
fit=masked_imaging_fit_x2_galaxy_7x7,
include_2d=include_2d_all,
mat_plot_2d=aplt.MatPlot2D(output=aplt.Output(plot_path, format="png")),
)
fit_imaging_plotter.figures_of_galaxies(subtracted_image=True)
assert path.join(plot_path, "subtracted_image_of_galaxy_0.png") in plot_patch.paths
assert path.join(plot_path, "subtracted_image_of_galaxy_1.png") in plot_patch.paths
fit_imaging_plotter.figures_of_galaxies(model_image=True)
assert path.join(plot_path, "model_image_of_galaxy_0.png") in plot_patch.paths
assert path.join(plot_path, "model_image_of_galaxy_1.png") in plot_patch.paths
plot_patch.paths = []
fit_imaging_plotter.figures_of_galaxies(subtracted_image=True, galaxy_index=0)
assert path.join(plot_path, "subtracted_image_of_galaxy_0.png") in plot_patch.paths
assert (
path.join(plot_path, "subtracted_image_of_galaxy_1.png") not in plot_patch.paths
)
fit_imaging_plotter.figures_of_galaxies(model_image=True, galaxy_index=1)
assert path.join(plot_path, "model_image_of_galaxy_0.png") not in plot_patch.paths
assert path.join(plot_path, "model_image_of_galaxy_1.png") in plot_patch.paths
def test__subplot_of_galaxy(
masked_imaging_fit_x2_galaxy_7x7, include_2d_all, plot_path, plot_patch
):
fit_imaging_plotter = aplt.FitImagingPlotter(
fit=masked_imaging_fit_x2_galaxy_7x7,
include_2d=include_2d_all,
mat_plot_2d=aplt.MatPlot2D(output=aplt.Output(plot_path, format="png")),
)
fit_imaging_plotter.subplots_of_galaxies()
assert path.join(plot_path, "subplot_of_galaxy_0.png") in plot_patch.paths
assert path.join(plot_path, "subplot_of_galaxy_1.png") in plot_patch.paths
plot_patch.paths = []
fit_imaging_plotter.subplots_of_galaxies(galaxy_index=0)
assert path.join(plot_path, "subplot_of_galaxy_0.png") in plot_patch.paths
assert path.join(plot_path, "subplot_of_galaxy_1.png") not in plot_patch.paths
```
#### File: plot/plotters/test_galaxy_plotters.py
```python
from os import path
import autogalaxy as ag
import autogalaxy.plot as aplt
import pytest
directory = path.dirname(path.realpath(__file__))
@pytest.fixture(name="plot_path")
def make_galaxy_plotter_setup():
return path.join(
"{}".format(path.dirname(path.realpath(__file__))), "files", "plots", "galaxy"
)
def test__visuals_with_include_2d(gal_x1_lp_x1_mp, grid_7x7):
visuals_2d = aplt.Visuals2D(vector_field=2)
include = aplt.Include2D(
origin=True,
mask=True,
border=True,
light_profile_centres=True,
mass_profile_centres=True,
critical_curves=True,
caustics=True,
)
galaxy_plotter = aplt.GalaxyPlotter(
galaxy=gal_x1_lp_x1_mp, grid=grid_7x7, visuals_2d=visuals_2d, include_2d=include
)
assert galaxy_plotter.visuals_with_include_2d.origin.in_list == [(0.0, 0.0)]
assert (galaxy_plotter.visuals_with_include_2d.mask == grid_7x7.mask).all()
assert (
galaxy_plotter.visuals_with_include_2d.border
== grid_7x7.mask.border_grid_sub_1.slim_binned
).all()
assert galaxy_plotter.visuals_with_include_2d.light_profile_centres.in_list == [
gal_x1_lp_x1_mp.light_profile_0.centre
]
assert galaxy_plotter.visuals_with_include_2d.mass_profile_centres.in_list == [
gal_x1_lp_x1_mp.mass_profile_0.centre
]
assert galaxy_plotter.visuals_with_include_2d.vector_field == 2
include = aplt.Include2D(origin=False, mask=False, border=False)
galaxy_plotter = aplt.GalaxyPlotter(
galaxy=gal_x1_lp_x1_mp, grid=grid_7x7, visuals_2d=visuals_2d, include_2d=include
)
assert galaxy_plotter.visuals_with_include_2d.origin == None
assert galaxy_plotter.visuals_with_include_2d.mask == None
assert galaxy_plotter.visuals_with_include_2d.border == None
assert galaxy_plotter.visuals_with_include_2d.vector_field == 2
def test__individual_images_are_output(
gal_x1_lp_x1_mp,
sub_grid_7x7,
mask_7x7,
grid_irregular_7x7_list,
include_2d_all,
plot_path,
plot_patch,
):
galaxy_plotter = aplt.GalaxyPlotter(
galaxy=gal_x1_lp_x1_mp,
grid=sub_grid_7x7,
include_2d=include_2d_all,
mat_plot_2d=aplt.MatPlot2D(output=aplt.Output(plot_path, format="png")),
)
galaxy_plotter.figures(image=True, convergence=True)
assert path.join(plot_path, "image.png") in plot_patch.paths
assert path.join(plot_path, "convergence.png") in plot_patch.paths
gal_x1_lp_x1_mp.hyper_galaxy = ag.HyperGalaxy()
gal_x1_lp_x1_mp.hyper_model_image = ag.Array2D.ones(
shape_native=(7, 7), pixel_scales=0.1
)
gal_x1_lp_x1_mp.hyper_galaxy_image = ag.Array2D.ones(
shape_native=(7, 7), pixel_scales=0.1
)
galaxy_plotter.figures(contribution_map=True)
assert path.join(plot_path, "contribution_map.png") in plot_patch.paths
def test__subplots_galaxy_quantities__all_are_output(
gal_x1_lp_x1_mp,
sub_grid_7x7,
grid_irregular_7x7_list,
include_2d_all,
plot_path,
plot_patch,
):
galaxy_plotter = aplt.GalaxyPlotter(
galaxy=gal_x1_lp_x1_mp,
grid=sub_grid_7x7,
include_2d=include_2d_all,
mat_plot_2d=aplt.MatPlot2D(output=aplt.Output(plot_path, format="png")),
)
galaxy_plotter.subplot_of_light_profiles(image=True)
assert path.join(plot_path, "subplot_image.png") in plot_patch.paths
galaxy_plotter.subplot_of_mass_profiles(
convergence=True, potential=True, deflections_y=True, deflections_x=True
)
assert path.join(plot_path, "subplot_convergence.png") in plot_patch.paths
assert path.join(plot_path, "subplot_potential.png") in plot_patch.paths
assert path.join(plot_path, "subplot_deflections_y.png") in plot_patch.paths
assert path.join(plot_path, "subplot_deflections_x.png") in plot_patch.paths
```
#### File: profiles/mass_profiles/test_mass_profiles.py
```python
import math
import autogalaxy as ag
from autogalaxy import exc
import numpy as np
import pytest
def mass_within_radius_of_profile_from_grid_calculation(radius, profile):
mass_total = 0.0
xs = np.linspace(-radius * 1.5, radius * 1.5, 40)
ys = np.linspace(-radius * 1.5, radius * 1.5, 40)
edge = xs[1] - xs[0]
area = edge ** 2
for x in xs:
for y in ys:
eta = profile.grid_to_elliptical_radii(grid=np.array([[x, y]]))
if eta < radius:
mass_total += profile.convergence_func(eta) * area
return mass_total
class TestMassWithin:
def test__compare_to_analytic_and_grid_calculations(self):
sis = ag.mp.SphericalIsothermal(einstein_radius=2.0)
mass = sis.mass_angular_within_circle(radius=2.0)
assert math.pi * sis.einstein_radius * 2.0 == pytest.approx(mass, 1e-3)
sis = ag.mp.SphericalIsothermal(einstein_radius=4.0)
mass = sis.mass_angular_within_circle(radius=4.0)
assert math.pi * sis.einstein_radius * 4.0 == pytest.approx(mass, 1e-3)
sis = ag.mp.SphericalIsothermal(einstein_radius=2.0)
mass_grid = mass_within_radius_of_profile_from_grid_calculation(
radius=1.0, profile=sis
)
mass = sis.mass_angular_within_circle(radius=1.0)
assert mass_grid == pytest.approx(mass, 0.02)
class TestRadiusAverageConvergenceOne:
def test__radius_of_average_convergence(self):
sis = ag.mp.SphericalIsothermal(centre=(0.0, 0.0), einstein_radius=2.0)
assert sis.average_convergence_of_1_radius == pytest.approx(2.0, 1e-4)
sie = ag.mp.EllipticalIsothermal(
centre=(0.0, 0.0), einstein_radius=1.0, elliptical_comps=(0.0, 0.111111)
)
assert sie.average_convergence_of_1_radius == pytest.approx(1.0, 1e-4)
sie = ag.mp.EllipticalIsothermal(
centre=(0.0, 0.0), einstein_radius=3.0, elliptical_comps=(0.0, 0.333333)
)
assert sie.average_convergence_of_1_radius == pytest.approx(3.0, 1e-4)
sie = ag.mp.EllipticalIsothermal(
centre=(0.0, 0.0), einstein_radius=8.0, elliptical_comps=(0.0, 0.666666)
)
assert sie.average_convergence_of_1_radius == pytest.approx(8.0, 1e-4)
class TestDensityBetweenAnnuli:
def test__circular_annuli__sis__analyic_density_agrees(self):
einstein_radius = 1.0
sis = ag.mp.SphericalIsothermal(
centre=(0.0, 0.0), einstein_radius=einstein_radius
)
inner_annuli_radius = 2.0
outer_annuli_radius = 3.0
inner_mass = math.pi * einstein_radius * inner_annuli_radius
outer_mass = math.pi * einstein_radius * outer_annuli_radius
density_between_annuli = sis.density_between_circular_annuli(
inner_annuli_radius=inner_annuli_radius,
outer_annuli_radius=outer_annuli_radius,
)
annuli_area = (np.pi * outer_annuli_radius ** 2.0) - (
np.pi * inner_annuli_radius ** 2.0
)
assert (outer_mass - inner_mass) / annuli_area == pytest.approx(
density_between_annuli, 1e-4
)
def test__circular_annuli__nfw_profile__compare_to_manual_mass(self):
nfw = ag.mp.EllipticalNFW(
centre=(0.0, 0.0), elliptical_comps=(0.111111, 0.0), kappa_s=1.0
)
inner_mass = nfw.mass_angular_within_circle(radius=1.0)
outer_mass = nfw.mass_angular_within_circle(radius=2.0)
density_between_annuli = nfw.density_between_circular_annuli(
inner_annuli_radius=1.0, outer_annuli_radius=2.0
)
annuli_area = (np.pi * 2.0 ** 2.0) - (np.pi * 1.0 ** 2.0)
assert (outer_mass - inner_mass) / annuli_area == pytest.approx(
density_between_annuli, 1e-4
)
class TestNormalizationEinstienRadius:
def test__mass_angular_from_normalization_and_radius(self):
sis = ag.mp.SphericalIsothermal(centre=(0.0, 0.0), einstein_radius=2.0)
mass_angular_from_normalization = sis.mass_angular_from_normalization_and_radius(
normalization=1.0, radius=2.0
)
assert mass_angular_from_normalization == pytest.approx(2.0 * np.pi, 1.0e-2)
mass_angular_from_normalization = sis.mass_angular_from_normalization_and_radius(
normalization=1.0, radius=4.0
)
assert mass_angular_from_normalization == pytest.approx(4.0 * np.pi, 1.0e-2)
nfw = ag.mp.SphericalNFW(centre=(0.0, 0.0), kappa_s=1.0, scale_radius=1.0)
mass_angular_from_normalization = nfw.mass_angular_from_normalization_and_radius(
normalization=2.0, radius=2.0
)
assert mass_angular_from_normalization == pytest.approx(15.19525, 1.0e-4)
sersic = ag.mp.SphericalSersic(
centre=(0.0, 0.0),
intensity=1.0,
effective_radius=1.0,
sersic_index=3.0,
mass_to_light_ratio=1.0,
)
mass_angular_from_normalization = sersic.mass_angular_from_normalization_and_radius(
normalization=2.0, radius=2.0
)
sersic = ag.mp.SphericalSersic(
centre=(0.0, 0.0),
intensity=1.0,
effective_radius=1.0,
sersic_index=3.0,
mass_to_light_ratio=2.0,
)
assert mass_angular_from_normalization == pytest.approx(28.32431, 1.0e-4)
mass_angular_from_normalization = sersic.mass_angular_from_normalization_and_radius(
normalization=0.1, radius=2.0
)
assert mass_angular_from_normalization == pytest.approx(1.416215, 1.0e-2)
def test__normalization_from_mass_angular_and_radius(self):
sersic = ag.mp.SphericalIsothermal(centre=(0.0, 0.0), einstein_radius=2.0)
normalization = sersic.normalization_from_mass_angular_and_radius(
mass_angular=5.0,
radius=2.0,
normalization_min=0.5,
normalization_max=3.0,
bins=5,
)
assert normalization == pytest.approx(0.79577, 1.0e-2)
nfw = ag.mp.SphericalNFW(centre=(0.0, 0.0), kappa_s=3.0, scale_radius=1.0)
normalization = nfw.normalization_from_mass_angular_and_radius(
mass_angular=6.35829,
radius=2.0,
normalization_min=0.5,
normalization_max=3.0,
bins=5,
)
assert normalization == pytest.approx(0.83687, 1.0e-2)
sersic = ag.mp.SphericalSersic(
centre=(0.0, 0.0),
intensity=1.0,
effective_radius=1.0,
sersic_index=3.0,
mass_to_light_ratio=1.0,
)
normalization = sersic.normalization_from_mass_angular_and_radius(
mass_angular=2.15403,
radius=2.0,
normalization_min=0.01,
normalization_max=30.0,
bins=5,
)
sersic = sersic.with_new_normalization(normalization=normalization)
assert normalization == pytest.approx(0.152097, 1.0e-2)
with pytest.raises(exc.ProfileException):
sersic.normalization_from_mass_angular_and_radius(
mass_angular=1.0,
radius=2.0,
normalization_min=1e-4,
normalization_max=1e-3,
bins=2,
)
def test__einstein_radius_from_normalization(self):
sis = ag.mp.SphericalIsothermal(centre=(0.0, 0.0), einstein_radius=2.0)
einstein_radius_from_normalization = sis.einstein_radius_from_normalization(
normalization=1.0
)
assert einstein_radius_from_normalization == pytest.approx(1.0, 1.0e-2)
nfw = ag.mp.SphericalNFW(centre=(0.0, 0.0), kappa_s=1.0, scale_radius=1.0)
einstein_radius_from_normalization = nfw.einstein_radius_from_normalization(
normalization=2.0
)
assert einstein_radius_from_normalization == pytest.approx(2.35829, 1.0e-4)
sersic = ag.mp.SphericalSersic(
centre=(0.0, 0.0),
intensity=1.0,
effective_radius=1.0,
sersic_index=3.0,
mass_to_light_ratio=1.0,
)
einstein_radius_from_normalization = sersic.einstein_radius_from_normalization(
normalization=1.0
)
einstein_radius_from_profile = sersic.average_convergence_of_1_radius
assert einstein_radius_from_normalization == pytest.approx(
einstein_radius_from_profile, 1.0e-4
)
einstein_radius_from_normalization = sersic.einstein_radius_from_normalization(
normalization=0.1
)
assert einstein_radius_from_normalization == pytest.approx(0.381544, 1.0e-2)
einstein_radius_from_normalization = sersic.einstein_radius_from_normalization(
normalization=1e-4
)
assert einstein_radius_from_normalization == None
einstein_radius_from_normalization = sersic.einstein_radius_from_normalization(
normalization=1e9
)
assert einstein_radius_from_normalization == None
def test__normalization_from_einstein_radius(self):
sersic = ag.mp.SphericalIsothermal(centre=(0.0, 0.0), einstein_radius=2.0)
normalization = sersic.normalization_from_einstein_radius(
einstein_radius=1.0, normalization_min=0.5, normalization_max=3.0, bins=5
)
assert normalization == pytest.approx(1.0, 1.0e-2)
nfw = ag.mp.SphericalNFW(centre=(0.0, 0.0), kappa_s=3.0, scale_radius=1.0)
normalization = nfw.normalization_from_einstein_radius(
einstein_radius=2.35829,
normalization_min=0.5,
normalization_max=3.0,
bins=5,
)
assert normalization == pytest.approx(2.0, 1.0e-2)
sersic = ag.mp.SphericalSersic(
centre=(0.0, 0.0),
intensity=1.0,
effective_radius=1.0,
sersic_index=3.0,
mass_to_light_ratio=1.0,
)
normalization = sersic.normalization_from_einstein_radius(
einstein_radius=2.15403,
normalization_min=0.01,
normalization_max=30.0,
bins=5,
)
assert normalization == pytest.approx(1.0, 1.0e-2)
with pytest.raises(exc.ProfileException):
sersic.normalization_from_einstein_radius(
einstein_radius=1.0,
normalization_min=1e-4,
normalization_max=1e-3,
bins=2,
)
class TestExtractObject:
def test__extract_works(self):
sis = ag.mp.SphericalIsothermal(centre=(0.0, 0.0), einstein_radius=2.0)
einstein_radii = sis.extract_attribute(
cls=ag.mp.MassProfile, name="einstein_radius"
)
assert einstein_radii.in_list[0] == 2.0
centres = sis.extract_attribute(cls=ag.mp.MassProfile, name="centre")
assert centres.in_list[0] == (0.0, 0.0)
assert (
sis.extract_attribute(cls=ag.mp.MassProfile, name="einstein_radiu") == None
)
sis.extract_attribute(cls=ag.lp.LightProfile, name="einstein_radius")
class TestRegression:
def test__centre_of_profile_in_right_place(self):
grid = ag.Grid2D.uniform(shape_native=(7, 7), pixel_scales=1.0)
mass_profile = ag.mp.EllipticalIsothermal(
centre=(2.0, 1.0), einstein_radius=1.0
)
convergence = mass_profile.convergence_from_grid(grid=grid)
max_indexes = np.unravel_index(
convergence.native.argmax(), convergence.shape_native
)
assert max_indexes == (1, 4)
potential = mass_profile.potential_from_grid(grid=grid)
max_indexes = np.unravel_index(
potential.native.argmin(), potential.shape_native
)
assert max_indexes == (1, 4)
deflections = mass_profile.deflections_from_grid(grid=grid)
assert deflections.native[1, 4, 0] > 0
assert deflections.native[2, 4, 0] < 0
assert deflections.native[1, 4, 1] > 0
assert deflections.native[1, 3, 1] < 0
mass_profile = ag.mp.SphericalIsothermal(centre=(2.0, 1.0), einstein_radius=1.0)
convergence = mass_profile.convergence_from_grid(grid=grid)
max_indexes = np.unravel_index(
convergence.native.argmax(), convergence.shape_native
)
assert max_indexes == (1, 4)
mass_profile = ag.mp.SphericalIsothermal(centre=(2.0, 1.0), einstein_radius=1.0)
potential = mass_profile.potential_from_grid(grid=grid)
max_indexes = np.unravel_index(
potential.native.argmin(), potential.shape_native
)
assert max_indexes == (1, 4)
deflections = mass_profile.deflections_from_grid(grid=grid)
assert deflections.native[1, 4, 0] > 0
assert deflections.native[2, 4, 0] < 0
assert deflections.native[1, 4, 1] > 0
assert deflections.native[1, 3, 1] < 0
grid = ag.Grid2DIterate.uniform(
shape_native=(7, 7),
pixel_scales=1.0,
fractional_accuracy=0.99,
sub_steps=[2, 4],
)
mass_profile = ag.mp.EllipticalIsothermal(
centre=(2.0, 1.0), einstein_radius=1.0
)
convergence = mass_profile.convergence_from_grid(grid=grid)
max_indexes = np.unravel_index(
convergence.native.argmax(), convergence.shape_native
)
assert max_indexes == (1, 4)
potential = mass_profile.potential_from_grid(grid=grid)
max_indexes = np.unravel_index(
potential.native.argmin(), potential.shape_native
)
assert max_indexes == (1, 4)
deflections = mass_profile.deflections_from_grid(grid=grid)
assert deflections.native[1, 4, 0] >= 0
assert deflections.native[2, 4, 0] <= 0
assert deflections.native[1, 4, 1] >= 0
assert deflections.native[1, 3, 1] <= 0
mass_profile = ag.mp.SphericalIsothermal(centre=(2.0, 1.0), einstein_radius=1.0)
convergence = mass_profile.convergence_from_grid(grid=grid)
max_indexes = np.unravel_index(
convergence.native.argmax(), convergence.shape_native
)
assert max_indexes == (1, 4)
potential = mass_profile.potential_from_grid(grid=grid)
max_indexes = np.unravel_index(
potential.native.argmin(), potential.shape_native
)
assert max_indexes == (1, 4)
deflections = mass_profile.deflections_from_grid(grid=grid)
assert deflections.native[1, 4, 0] >= 0
assert deflections.native[2, 4, 0] <= 0
assert deflections.native[1, 4, 1] >= 0
assert deflections.native[1, 3, 1] <= 0
class TestDecorators:
def test__grid_iterate_in__iterates_grid_result_correctly(self, gal_x1_mp):
mask = ag.Mask2D.manual(
mask=[
[True, True, True, True, True],
[True, False, False, False, True],
[True, False, False, False, True],
[True, False, False, False, True],
[True, True, True, True, True],
],
pixel_scales=(1.0, 1.0),
)
grid = ag.Grid2DIterate.from_mask(
mask=mask, fractional_accuracy=1.0, sub_steps=[2]
)
mass_profile = ag.mp.EllipticalIsothermal(
centre=(0.08, 0.08), einstein_radius=1.0
)
deflections = mass_profile.deflections_from_grid(grid=grid)
mask_sub_2 = mask.mask_new_sub_size_from_mask(mask=mask, sub_size=2)
grid_sub_2 = ag.Grid2D.from_mask(mask=mask_sub_2)
deflections_sub_2 = mass_profile.deflections_from_grid(
grid=grid_sub_2
).slim_binned
assert deflections == pytest.approx(deflections_sub_2, 1.0e-6)
grid = ag.Grid2DIterate.from_mask(
mask=mask, fractional_accuracy=0.99, sub_steps=[2, 4, 8]
)
mass_profile = ag.mp.EllipticalIsothermal(
centre=(0.08, 0.08), einstein_radius=1.0
)
deflections = mass_profile.deflections_from_grid(grid=grid)
mask_sub_4 = mask.mask_new_sub_size_from_mask(mask=mask, sub_size=4)
grid_sub_4 = ag.Grid2D.from_mask(mask=mask_sub_4)
deflections_sub_4 = mass_profile.deflections_from_grid(
grid=grid_sub_4
).slim_binned
assert deflections[0, 0] == deflections_sub_4[0, 0]
mask_sub_8 = mask.mask_new_sub_size_from_mask(mask=mask, sub_size=8)
grid_sub_8 = ag.Grid2D.from_mask(mask=mask_sub_8)
deflections_sub_8 = mass_profile.deflections_from_grid(
grid=grid_sub_8
).slim_binned
assert deflections[4, 0] == deflections_sub_8[4, 0]
def test__grid_interpolate_in__convergence__interpolates_based_on_intepolate_config(
self,
):
# `False` in interpolate.ini
mask = ag.Mask2D.manual(
mask=[
[True, True, True, True, True],
[True, False, False, False, True],
[True, False, False, False, True],
[True, False, False, False, True],
[True, True, True, True, True],
],
pixel_scales=(1.0, 1.0),
)
grid = ag.Grid2D.from_mask(mask=mask)
grid_interpolate = ag.Grid2DInterpolate.from_mask(
mask=mask, pixel_scales_interp=0.1
)
mass_profile = ag.mp.EllipticalIsothermal(einstein_radius=1.0)
convergence = mass_profile.convergence_from_grid(grid=grid)
convergence_no_interpolate = mass_profile.convergence_from_grid(
grid=grid_interpolate
)
assert (convergence == convergence_no_interpolate).all()
# `False` in interpolate.ini
mass_profile = ag.mp.SphericalIsothermal(einstein_radius=1.0)
convergence = mass_profile.convergence_from_grid(grid=grid)
convergence_interpolate = mass_profile.convergence_from_grid(
grid=grid_interpolate
)
assert (convergence != convergence_interpolate).all()
array_interp = mass_profile.convergence_from_grid(
grid=grid_interpolate.grid_interp
)
interpolated_array = grid_interpolate.interpolated_array_from_array_interp(
array_interp=array_interp
)
assert (convergence_interpolate == interpolated_array).all()
def test__grid_interpolate_in__potential__interpolates_based_on_intepolate_config(
self,
):
# `False` in interpolate.ini
mask = ag.Mask2D.manual(
mask=[
[True, True, True, True, True],
[True, False, False, False, True],
[True, False, False, False, True],
[True, False, False, False, True],
[True, True, True, True, True],
],
pixel_scales=(1.0, 1.0),
)
grid = ag.Grid2D.from_mask(mask=mask)
grid_interpolate = ag.Grid2DInterpolate.from_mask(
mask=mask, pixel_scales_interp=0.1
)
mass_profile = ag.mp.EllipticalIsothermal(einstein_radius=1.0)
potential = mass_profile.potential_from_grid(grid=grid)
potential_no_interpolate = mass_profile.potential_from_grid(
grid=grid_interpolate
)
assert (potential == potential_no_interpolate).all()
# `False` in interpolate.ini
mass_profile = ag.mp.SphericalIsothermal(einstein_radius=1.0)
potential = mass_profile.potential_from_grid(grid=grid)
potential_interpolate = mass_profile.potential_from_grid(grid=grid_interpolate)
assert (potential != potential_interpolate).all()
array_interp = mass_profile.potential_from_grid(
grid=grid_interpolate.grid_interp
)
interpolated_array = grid_interpolate.interpolated_array_from_array_interp(
array_interp=array_interp
)
assert (potential_interpolate == interpolated_array).all()
def test__grid_interpolate_in__deflections__interpolates_based_on_intepolate_config(
self,
):
# `False` in interpolate.ini
mask = ag.Mask2D.manual(
mask=[
[True, True, True, True, True],
[True, False, False, False, True],
[True, False, False, False, True],
[True, False, False, False, True],
[True, True, True, True, True],
],
pixel_scales=(1.0, 1.0),
)
grid = ag.Grid2D.from_mask(mask=mask)
grid_interpolate = ag.Grid2DInterpolate.from_mask(
mask=mask, pixel_scales_interp=0.1
)
mass_profile = ag.mp.EllipticalIsothermal(einstein_radius=1.0)
deflections = mass_profile.deflections_from_grid(grid=grid)
deflections_no_interpolate = mass_profile.deflections_from_grid(
grid=grid_interpolate
)
assert (deflections == deflections_no_interpolate).all()
# `False` in interpolate.ini
mass_profile = ag.mp.SphericalIsothermal(einstein_radius=1.0)
deflections_interpolate = mass_profile.deflections_from_grid(
grid=grid_interpolate
)
grid_interp = mass_profile.deflections_from_grid(
grid=grid_interpolate.grid_interp
)
interpolated_grid = grid_interpolate.interpolated_grid_from_grid_interp(
grid_interp=grid_interp
)
assert (deflections_interpolate == interpolated_grid).all()
``` |
{
"source": "jonathanfrawley/PyAutoGalaxy",
"score": 2
} |
#### File: test_autogalaxy/analysis/test_aggregator.py
```python
from os import path
import os
import pytest
import shutil
from autoconf import conf
import autofit as af
import autogalaxy as ag
from autogalaxy.mock import mock
directory = path.dirname(path.realpath(__file__))
@pytest.fixture(name="path")
def make_path():
return path.join("{}".format(path.dirname(path.realpath(__file__))), "files")
@pytest.fixture(name="samples")
def make_samples():
galaxy_0 = ag.Galaxy(redshift=0.5, light=ag.lp.EllSersic(centre=(0.0, 1.0)))
galaxy_1 = ag.Galaxy(redshift=1.0, light=ag.lp.EllSersic())
plane = ag.Plane(galaxies=[galaxy_0, galaxy_1])
return mock.MockSamples(max_log_likelihood_instance=plane)
@pytest.fixture(name="model")
def make_model():
return af.Collection(
galaxies=af.Collection(
galaxy=af.Model(ag.Galaxy, redshift=0.5, light=ag.lp.EllSersic),
source=af.Model(ag.Galaxy, redshift=1.0, light=ag.lp.EllSersic),
)
)
def clean(database_file, result_path):
if path.exists(database_file):
os.remove(database_file)
if path.exists(result_path):
shutil.rmtree(result_path)
def test__plane_generator_from_aggregator(masked_imaging_7x7, samples, model):
path_prefix = "aggregator_plane_gen"
database_file = path.join(conf.instance.output_path, "plane.sqlite")
result_path = path.join(conf.instance.output_path, path_prefix)
clean(database_file=database_file, result_path=result_path)
search = mock.MockSearch(samples=samples)
search.paths = af.DirectoryPaths(path_prefix=path_prefix)
analysis = ag.AnalysisImaging(dataset=masked_imaging_7x7)
search.fit(model=model, analysis=analysis)
agg = af.Aggregator.from_database(filename=database_file)
agg.add_directory(directory=result_path)
plane_gen = ag.agg.Plane(aggregator=agg)
for plane in plane_gen:
assert plane.galaxies[0].redshift == 0.5
assert plane.galaxies[0].light.centre == (0.0, 1.0)
assert plane.galaxies[1].redshift == 1.0
clean(database_file=database_file, result_path=result_path)
#
def test__imaging_generator_from_aggregator(imaging_7x7, mask_2d_7x7, samples, model):
path_prefix = "aggregator_imaging_gen"
database_file = path.join(conf.instance.output_path, "imaging.sqlite")
result_path = path.join(conf.instance.output_path, path_prefix)
clean(database_file=database_file, result_path=result_path)
masked_imaging_7x7 = imaging_7x7.apply_mask(mask=mask_2d_7x7)
masked_imaging_7x7 = masked_imaging_7x7.apply_settings(
settings=ag.SettingsImaging(
grid_class=ag.Grid2DIterate,
grid_inversion_class=ag.Grid2DIterate,
fractional_accuracy=0.5,
sub_steps=[2],
)
)
search = mock.MockSearch(samples=samples)
search.paths = af.DirectoryPaths(path_prefix=path_prefix)
analysis = ag.AnalysisImaging(dataset=masked_imaging_7x7)
search.fit(model=model, analysis=analysis)
agg = af.Aggregator.from_database(filename=database_file)
agg.add_directory(directory=result_path)
imaging_gen = ag.agg.Imaging(aggregator=agg)
for imaging in imaging_gen:
assert (imaging.image == masked_imaging_7x7.image).all()
assert isinstance(imaging.grid, ag.Grid2DIterate)
assert isinstance(imaging.grid_inversion, ag.Grid2DIterate)
assert imaging.grid.sub_steps == [2]
assert imaging.grid.fractional_accuracy == 0.5
clean(database_file=database_file, result_path=result_path)
def test__fit_imaging_generator_from_aggregator(masked_imaging_7x7, samples, model):
path_prefix = "aggregator_fit_imaging_gen"
database_file = path.join(conf.instance.output_path, "fit_imaging.sqlite")
result_path = path.join(conf.instance.output_path, path_prefix)
clean(database_file=database_file, result_path=result_path)
search = mock.MockSearch(samples=samples)
search.paths = af.DirectoryPaths(path_prefix=path_prefix)
analysis = ag.AnalysisImaging(dataset=masked_imaging_7x7)
search.fit(model=model, analysis=analysis)
agg = af.Aggregator.from_database(filename=database_file)
agg.add_directory(directory=result_path)
fit_imaging_gen = ag.agg.FitImaging(aggregator=agg)
for fit_imaging in fit_imaging_gen:
assert (fit_imaging.image == masked_imaging_7x7.image).all()
clean(database_file=database_file, result_path=result_path)
def test__interferometer_generator_from_aggregator(
visibilities_7,
visibilities_noise_map_7,
uv_wavelengths_7x2,
mask_2d_7x7,
samples,
model,
):
path_prefix = "aggregator_interferometer"
database_file = path.join(conf.instance.output_path, "interferometer.sqlite")
result_path = path.join(conf.instance.output_path, path_prefix)
clean(database_file=database_file, result_path=result_path)
interferometer_7 = ag.Interferometer(
visibilities=visibilities_7,
noise_map=visibilities_noise_map_7,
uv_wavelengths=uv_wavelengths_7x2,
real_space_mask=mask_2d_7x7,
settings=ag.SettingsInterferometer(
grid_class=ag.Grid2DIterate,
grid_inversion_class=ag.Grid2DIterate,
fractional_accuracy=0.5,
sub_steps=[2],
transformer_class=ag.TransformerDFT,
),
)
search = mock.MockSearch(samples=samples)
search.paths = af.DirectoryPaths(path_prefix=path_prefix)
analysis = ag.AnalysisInterferometer(dataset=interferometer_7)
search.fit(model=model, analysis=analysis)
agg = af.Aggregator.from_database(filename=database_file)
agg.add_directory(directory=result_path)
interferometer_gen = ag.agg.Interferometer(aggregator=agg)
for interferometer in interferometer_gen:
assert (interferometer.visibilities == interferometer_7.visibilities).all()
assert (interferometer.real_space_mask == mask_2d_7x7).all()
assert isinstance(interferometer.grid, ag.Grid2DIterate)
assert isinstance(interferometer.grid_inversion, ag.Grid2DIterate)
assert interferometer.grid.sub_steps == [2]
assert interferometer.grid.fractional_accuracy == 0.5
assert isinstance(interferometer.transformer, ag.TransformerDFT)
clean(database_file=database_file, result_path=result_path)
def test__fit_interferometer_generator_from_aggregator(
interferometer_7, mask_2d_7x7, samples, model
):
path_prefix = "aggregator_fit_interferometer"
database_file = path.join(conf.instance.output_path, "fit_interferometer.sqlite")
result_path = path.join(conf.instance.output_path, path_prefix)
clean(database_file=database_file, result_path=result_path)
search = mock.MockSearch(samples=samples)
search.paths = af.DirectoryPaths(path_prefix=path_prefix)
analysis = ag.AnalysisInterferometer(dataset=interferometer_7)
search.fit(model=model, analysis=analysis)
agg = af.Aggregator.from_database(filename=database_file)
agg.add_directory(directory=result_path)
fit_interferometer_gen = ag.agg.FitInterferometer(aggregator=agg)
for fit_interferometer in fit_interferometer_gen:
assert (
fit_interferometer.interferometer.visibilities
== interferometer_7.visibilities
).all()
assert (fit_interferometer.interferometer.real_space_mask == mask_2d_7x7).all()
clean(database_file=database_file, result_path=result_path)
```
#### File: test_autogalaxy/plane/test_plane.py
```python
import autogalaxy as ag
import numpy as np
import pytest
from autogalaxy import exc
from autogalaxy.plane import plane
from skimage import measure
from autogalaxy.mock import mock
def critical_curve_via_magnification_from_plane_and_grid(plane, grid):
magnification = plane.magnification_2d_from_grid(grid=grid)
inverse_magnification = 1 / magnification
critical_curves_indices = measure.find_contours(inverse_magnification.native, 0)
no_critical_curves = len(critical_curves_indices)
contours = []
critical_curves = []
for jj in np.arange(no_critical_curves):
contours.append(critical_curves_indices[jj])
contour_x, contour_y = contours[jj].T
pixel_coord = np.stack((contour_x, contour_y), axis=-1)
critical_curve = grid.mask.grid_scaled_from_grid_pixels_1d_for_marching_squares(
grid_pixels_1d=pixel_coord, shape_native=magnification.sub_shape_native
)
critical_curve = np.array(grid=critical_curve)
critical_curves.append(critical_curve)
return critical_curves
def caustics_via_magnification_from_plane_and_grid(plane, grid):
caustics = []
critical_curves = critical_curve_via_magnification_from_plane_and_grid(
plane=plane, grid=grid
)
for i in range(len(critical_curves)):
critical_curve = critical_curves[i]
deflections_1d = plane.deflections_2d_from_grid(grid=critical_curve)
caustic = critical_curve - deflections_1d
caustics.append(caustic)
return caustics
class TestAbstractPlane:
class TestProperties:
def test__point_dict(self, ps_0, ps_1):
plane = ag.Plane(galaxies=[ag.Galaxy(redshift=0.5)], redshift=None)
assert plane.point_dict == {}
plane = ag.Plane(
galaxies=[ag.Galaxy(redshift=0.5, point_0=ps_0)], redshift=None
)
assert plane.point_dict == {"point_0": ps_0}
plane = ag.Plane(
galaxies=[ag.Galaxy(redshift=0.5, point_0=ps_0, point_1=ps_1)],
redshift=None,
)
assert plane.point_dict == {"point_0": ps_0, "point_1": ps_1}
plane = ag.Plane(
galaxies=[
ag.Galaxy(redshift=0.5, point_0=ps_0, point_1=ps_1),
ag.Galaxy(redshift=0.5, point_2=ps_0),
],
redshift=None,
)
assert plane.point_dict == {
"point_0": ps_0,
"point_1": ps_1,
"point_2": ps_0,
}
def test__has_light_profile(self):
plane = ag.Plane(galaxies=[ag.Galaxy(redshift=0.5)], redshift=None)
assert plane.has_light_profile is False
plane = ag.Plane(
galaxies=[ag.Galaxy(redshift=0.5, light_profile=ag.lp.LightProfile())],
redshift=None,
)
assert plane.has_light_profile is True
plane = ag.Plane(
galaxies=[
ag.Galaxy(redshift=0.5, light_profile=ag.lp.LightProfile()),
ag.Galaxy(redshift=0.5),
],
redshift=None,
)
assert plane.has_light_profile is True
def test__has_mass_profile(self):
plane = ag.Plane(galaxies=[ag.Galaxy(redshift=0.5)], redshift=None)
assert plane.has_mass_profile is False
plane = ag.Plane(
galaxies=[ag.Galaxy(redshift=0.5, mass_profile=ag.mp.MassProfile())],
redshift=None,
)
assert plane.has_mass_profile is True
plane = ag.Plane(
galaxies=[
ag.Galaxy(redshift=0.5, mass_profile=ag.mp.MassProfile()),
ag.Galaxy(redshift=0.5),
],
redshift=None,
)
assert plane.has_mass_profile is True
def test__has_pixelization(self):
plane = ag.Plane(galaxies=[ag.Galaxy(redshift=0.5)], redshift=None)
assert plane.has_pixelization is False
galaxy_pix = ag.Galaxy(
redshift=0.5,
pixelization=ag.pix.Pixelization(),
regularization=ag.reg.Regularization(),
)
plane = ag.Plane(galaxies=[galaxy_pix], redshift=None)
assert plane.has_pixelization is True
plane = ag.Plane(
galaxies=[galaxy_pix, ag.Galaxy(redshift=0.5)], redshift=None
)
assert plane.has_pixelization is True
def test__has_regularization(self):
plane = ag.Plane(galaxies=[ag.Galaxy(redshift=0.5)], redshift=None)
assert plane.has_regularization is False
galaxy_pix = ag.Galaxy(
redshift=0.5,
pixelization=ag.pix.Pixelization(),
regularization=ag.reg.Regularization(),
)
plane = ag.Plane(galaxies=[galaxy_pix], redshift=None)
assert plane.has_regularization is True
plane = ag.Plane(
galaxies=[galaxy_pix, ag.Galaxy(redshift=0.5)], redshift=None
)
assert plane.has_regularization is True
def test__has_hyper_galaxy(self):
plane = ag.Plane(galaxies=[ag.Galaxy(redshift=0.5)], redshift=None)
assert plane.has_hyper_galaxy is False
galaxy = ag.Galaxy(redshift=0.5, hyper_galaxy=ag.HyperGalaxy())
plane = ag.Plane(galaxies=[galaxy], redshift=None)
assert plane.has_hyper_galaxy is True
plane = ag.Plane(galaxies=[galaxy, ag.Galaxy(redshift=0.5)], redshift=None)
assert plane.has_hyper_galaxy is True
def test__mass_profiles(self):
plane = ag.Plane(galaxies=[ag.Galaxy(redshift=0.5)], redshift=None)
assert plane.mass_profiles == []
sis_0 = ag.mp.SphIsothermal(einstein_radius=1.0)
sis_1 = ag.mp.SphIsothermal(einstein_radius=2.0)
sis_2 = ag.mp.SphIsothermal(einstein_radius=3.0)
plane = ag.Plane(
galaxies=[ag.Galaxy(redshift=0.5, mass_profile=sis_0)], redshift=None
)
assert plane.mass_profiles == [sis_0]
plane = ag.Plane(
galaxies=[
ag.Galaxy(redshift=0.5, mass_profile_0=sis_0, mass_profile_1=sis_1),
ag.Galaxy(redshift=0.5, mass_profile_0=sis_2, mass_profile_1=sis_1),
],
redshift=None,
)
assert plane.mass_profiles == [sis_0, sis_1, sis_2, sis_1]
def test__hyper_image_of_galaxy_with_pixelization(self):
galaxy_pix = ag.Galaxy(
redshift=0.5,
pixelization=ag.pix.Pixelization(),
regularization=ag.reg.Regularization(),
)
plane = ag.Plane(galaxies=[galaxy_pix], redshift=None)
assert plane.hyper_galaxy_image_of_galaxy_with_pixelization is None
galaxy_pix = ag.Galaxy(
redshift=0.5,
pixelization=ag.pix.Pixelization(),
regularization=ag.reg.Regularization(),
hyper_galaxy_image=1,
)
plane = ag.Plane(
galaxies=[galaxy_pix, ag.Galaxy(redshift=0.5)], redshift=None
)
assert plane.hyper_galaxy_image_of_galaxy_with_pixelization == 1
plane = ag.Plane(galaxies=[ag.Galaxy(redshift=0.5)], redshift=None)
assert plane.hyper_galaxy_image_of_galaxy_with_pixelization is None
class TestPixelization:
def test__no_galaxies_with_pixelizations_in_plane__returns_none(self):
galaxy_no_pix = ag.Galaxy(redshift=0.5)
plane = ag.Plane(galaxies=[galaxy_no_pix], redshift=None)
assert plane.pixelization is None
def test__1_galaxy_in_plane__it_has_pixelization__returns_mapper(self):
galaxy_pix = ag.Galaxy(
redshift=0.5,
pixelization=mock.MockPixelization(value=1),
regularization=mock.MockRegularization(matrix_shape=(1, 1)),
)
plane = ag.Plane(galaxies=[galaxy_pix], redshift=None)
assert plane.pixelization.value == 1
galaxy_pix = ag.Galaxy(
redshift=0.5,
pixelization=mock.MockPixelization(value=2),
regularization=mock.MockRegularization(matrix_shape=(2, 2)),
)
galaxy_no_pix = ag.Galaxy(redshift=0.5)
plane = ag.Plane(galaxies=[galaxy_no_pix, galaxy_pix], redshift=None)
assert plane.pixelization.value == 2
def test__2_galaxies_in_plane__both_have_pixelization__raises_error(self):
galaxy_pix_0 = ag.Galaxy(
redshift=0.5,
pixelization=mock.MockPixelization(value=1),
regularization=mock.MockRegularization(matrix_shape=(1, 1)),
)
galaxy_pix_1 = ag.Galaxy(
redshift=0.5,
pixelization=mock.MockPixelization(value=2),
regularization=mock.MockRegularization(matrix_shape=(1, 1)),
)
plane = ag.Plane(galaxies=[galaxy_pix_0, galaxy_pix_1], redshift=None)
with pytest.raises(exc.PixelizationException):
print(plane.pixelization)
class TestRegularization:
def test__no_galaxies_with_regularizations_in_plane__returns_none(self):
galaxy_no_pix = ag.Galaxy(redshift=0.5)
plane = ag.Plane(galaxies=[galaxy_no_pix], redshift=None)
assert plane.regularization is None
def test__1_galaxy_in_plane__it_has_regularization__returns_regularization(
self,
):
galaxy_reg = ag.Galaxy(
redshift=0.5,
pixelization=mock.MockPixelization(value=1),
regularization=mock.MockRegularization(matrix_shape=(1, 1)),
)
plane = ag.Plane(galaxies=[galaxy_reg], redshift=None)
assert plane.regularization.shape == (1, 1)
galaxy_reg = ag.Galaxy(
redshift=0.5,
pixelization=mock.MockPixelization(value=1),
regularization=mock.MockRegularization(matrix_shape=(2, 2)),
)
galaxy_no_reg = ag.Galaxy(redshift=0.5)
plane = ag.Plane(galaxies=[galaxy_no_reg, galaxy_reg], redshift=None)
assert plane.regularization.shape == (2, 2)
def test__2_galaxies_in_plane__both_have_regularization__raises_error(self):
galaxy_reg_0 = ag.Galaxy(
redshift=0.5,
pixelization=mock.MockPixelization(value=1),
regularization=mock.MockRegularization(matrix_shape=(1, 1)),
)
galaxy_reg_1 = ag.Galaxy(
redshift=0.5,
pixelization=mock.MockPixelization(value=2),
regularization=mock.MockRegularization(matrix_shape=(1, 1)),
)
plane = ag.Plane(galaxies=[galaxy_reg_0, galaxy_reg_1], redshift=None)
with pytest.raises(exc.PixelizationException):
print(plane.regularization)
class TestAbstractPlaneProfiles:
class TestProfileImage:
def test__image_2d_from_grid__same_as_its_light_image(
self, sub_grid_2d_7x7, gal_x1_lp
):
light_profile = gal_x1_lp.light_profiles[0]
lp_image = light_profile.image_2d_from_grid(grid=sub_grid_2d_7x7)
# Perform sub gridding average manually
lp_image_pixel_0 = (
lp_image[0] + lp_image[1] + lp_image[2] + lp_image[3]
) / 4
lp_image_pixel_1 = (
lp_image[4] + lp_image[5] + lp_image[6] + lp_image[7]
) / 4
plane = ag.Plane(galaxies=[gal_x1_lp], redshift=None)
image = plane.image_2d_from_grid(grid=sub_grid_2d_7x7)
assert (image.binned[0] == lp_image_pixel_0).all()
assert (image.binned[1] == lp_image_pixel_1).all()
assert (image == lp_image).all()
def test__image_2d_from_grid__same_as_its_galaxy_image(
self, sub_grid_2d_7x7, gal_x1_lp
):
galaxy_image = gal_x1_lp.image_2d_from_grid(grid=sub_grid_2d_7x7)
plane = ag.Plane(galaxies=[gal_x1_lp], redshift=None)
image = plane.image_2d_from_grid(grid=sub_grid_2d_7x7)
assert image == pytest.approx(galaxy_image, 1.0e-4)
def test__image_from_positions__same_as_galaxy_image_with_conversions(
self, grid_2d_irregular_7x7, gal_x1_lp
):
galaxy_image = gal_x1_lp.image_2d_from_grid(grid=grid_2d_irregular_7x7)
plane = ag.Plane(galaxies=[gal_x1_lp], redshift=None)
image = plane.image_2d_from_grid(grid=grid_2d_irregular_7x7)
assert image.in_list[0] == pytest.approx(galaxy_image.in_list[0], 1.0e-4)
def test__images_of_galaxies(self, sub_grid_2d_7x7):
# Overwrite one value so intensity in each pixel is different
sub_grid_2d_7x7[5] = np.array([2.0, 2.0])
g0 = ag.Galaxy(redshift=0.5, light_profile=ag.lp.EllSersic(intensity=1.0))
g1 = ag.Galaxy(redshift=0.5, light_profile=ag.lp.EllSersic(intensity=2.0))
lp0 = g0.light_profiles[0]
lp1 = g1.light_profiles[0]
lp0_image = lp0.image_2d_from_grid(grid=sub_grid_2d_7x7)
lp1_image = lp1.image_2d_from_grid(grid=sub_grid_2d_7x7)
# Perform sub gridding average manually
lp0_image_pixel_0 = (
lp0_image[0] + lp0_image[1] + lp0_image[2] + lp0_image[3]
) / 4
lp0_image_pixel_1 = (
lp0_image[4] + lp0_image[5] + lp0_image[6] + lp0_image[7]
) / 4
lp1_image_pixel_0 = (
lp1_image[0] + lp1_image[1] + lp1_image[2] + lp1_image[3]
) / 4
lp1_image_pixel_1 = (
lp1_image[4] + lp1_image[5] + lp1_image[6] + lp1_image[7]
) / 4
plane = ag.Plane(galaxies=[g0, g1], redshift=None)
image = plane.image_2d_from_grid(grid=sub_grid_2d_7x7)
assert image.binned[0] == pytest.approx(
lp0_image_pixel_0 + lp1_image_pixel_0, 1.0e-4
)
assert image.binned[1] == pytest.approx(
lp0_image_pixel_1 + lp1_image_pixel_1, 1.0e-4
)
image_of_galaxies = plane.images_of_galaxies_from_grid(grid=sub_grid_2d_7x7)
assert image_of_galaxies[0].binned[0] == lp0_image_pixel_0
assert image_of_galaxies[0].binned[1] == lp0_image_pixel_1
assert image_of_galaxies[1].binned[0] == lp1_image_pixel_0
assert image_of_galaxies[1].binned[1] == lp1_image_pixel_1
def test__same_as_above__use_multiple_galaxies(self, sub_grid_2d_7x7):
# Overwrite one value so intensity in each pixel is different
sub_grid_2d_7x7[5] = np.array([2.0, 2.0])
g0 = ag.Galaxy(redshift=0.5, light_profile=ag.lp.EllSersic(intensity=1.0))
g1 = ag.Galaxy(redshift=0.5, light_profile=ag.lp.EllSersic(intensity=2.0))
g0_image = g0.image_2d_from_grid(grid=sub_grid_2d_7x7)
g1_image = g1.image_2d_from_grid(grid=sub_grid_2d_7x7)
plane = ag.Plane(galaxies=[g0, g1], redshift=None)
image = plane.image_2d_from_grid(grid=sub_grid_2d_7x7)
assert image == pytest.approx(g0_image + g1_image, 1.0e-4)
def test__same_as_above__grid_is_positions(self):
# Overwrite one value so intensity in each pixel is different
positions = ag.Grid2DIrregular(grid=[(2.0, 2.0), (3.0, 3.0)])
g0 = ag.Galaxy(redshift=0.5, light_profile=ag.lp.EllSersic(intensity=1.0))
g1 = ag.Galaxy(redshift=0.5, light_profile=ag.lp.EllSersic(intensity=2.0))
g0_image = g0.image_2d_from_grid(grid=positions)
g1_image = g1.image_2d_from_grid(grid=positions)
plane = ag.Plane(galaxies=[g0, g1], redshift=None)
image = plane.image_2d_from_grid(grid=positions)
assert image.in_list[0] == pytest.approx(
g0_image.in_list[0] + g1_image.in_list[0], 1.0e-4
)
assert image.in_list[1] == pytest.approx(
g0_image.in_list[1] + g1_image.in_list[1], 1.0e-4
)
def test__plane_has_no_galaxies__image_is_zeros_size_of_ungalaxyed_grid(
self, sub_grid_2d_7x7
):
plane = ag.Plane(galaxies=[], redshift=0.5)
image = plane.image_2d_from_grid(grid=sub_grid_2d_7x7)
assert image.shape_native == (7, 7)
assert (image[0] == 0.0).all()
assert (image[1] == 0.0).all()
def test__x1_plane__padded_image__compare_to_galaxy_images_using_padded_grid_stack(
self, sub_grid_2d_7x7
):
padded_grid = sub_grid_2d_7x7.padded_grid_from_kernel_shape(
kernel_shape_native=(3, 3)
)
g0 = ag.Galaxy(redshift=0.5, light_profile=ag.lp.EllSersic(intensity=1.0))
g1 = ag.Galaxy(redshift=0.5, light_profile=ag.lp.EllSersic(intensity=2.0))
g2 = ag.Galaxy(redshift=0.5, light_profile=ag.lp.EllSersic(intensity=3.0))
padded_g0_image = g0.image_2d_from_grid(grid=padded_grid)
padded_g1_image = g1.image_2d_from_grid(grid=padded_grid)
padded_g2_image = g2.image_2d_from_grid(grid=padded_grid)
plane = ag.Plane(galaxies=[g0, g1, g2])
padded_plane_image = plane.padded_image_2d_from_grid_and_psf_shape(
grid=sub_grid_2d_7x7, psf_shape_2d=(3, 3)
)
assert padded_plane_image.shape_native == (9, 9)
assert padded_plane_image == pytest.approx(
padded_g0_image + padded_g1_image + padded_g2_image, 1.0e-4
)
def test__galaxy_image_dict_from_grid(self, sub_grid_2d_7x7):
g0 = ag.Galaxy(redshift=0.5, light_profile=ag.lp.EllSersic(intensity=1.0))
g1 = ag.Galaxy(
redshift=0.5,
mass_profile=ag.mp.SphIsothermal(einstein_radius=1.0),
light_profile=ag.lp.EllSersic(intensity=2.0),
)
g2 = ag.Galaxy(redshift=0.5, light_profile=ag.lp.EllSersic(intensity=3.0))
g0_image = g0.image_2d_from_grid(grid=sub_grid_2d_7x7)
g1_image = g1.image_2d_from_grid(grid=sub_grid_2d_7x7)
g2_image = g2.image_2d_from_grid(grid=sub_grid_2d_7x7)
plane = ag.Plane(redshift=-0.75, galaxies=[g1, g0, g2])
image_1d_dict = plane.galaxy_image_dict_from_grid(grid=sub_grid_2d_7x7)
assert (image_1d_dict[g0].slim == g0_image).all()
assert (image_1d_dict[g1].slim == g1_image).all()
assert (image_1d_dict[g2].slim == g2_image).all()
image_dict = plane.galaxy_image_dict_from_grid(grid=sub_grid_2d_7x7)
assert (image_dict[g0].native == g0_image.native).all()
assert (image_dict[g1].native == g1_image.native).all()
assert (image_dict[g2].native == g2_image.native).all()
class TestConvergence:
def test__convergence_same_as_multiple_galaxies__include_reshape_mapping(
self, sub_grid_2d_7x7
):
# The *ungalaxyed* sub-grid must be used to compute the convergence. This changes the subgrid to ensure this
# is the case.
sub_grid_2d_7x7[5] = np.array([5.0, 2.0])
g0 = ag.Galaxy(
redshift=0.5,
mass_profile=ag.mp.SphIsothermal(
einstein_radius=1.0, centre=(1.0, 0.0)
),
)
g1 = ag.Galaxy(
redshift=0.5,
mass_profile=ag.mp.SphIsothermal(
einstein_radius=2.0, centre=(1.0, 1.0)
),
)
mp0 = g0.mass_profiles[0]
mp1 = g1.mass_profiles[0]
mp0_sub_convergence = mp0.convergence_2d_from_grid(grid=sub_grid_2d_7x7)
mp1_sub_convergence = mp1.convergence_2d_from_grid(grid=sub_grid_2d_7x7)
mp_sub_convergence = mp0_sub_convergence + mp1_sub_convergence
# Perform sub gridding average manually
mp_convergence_pixel_0 = (
mp_sub_convergence[0]
+ mp_sub_convergence[1]
+ mp_sub_convergence[2]
+ mp_sub_convergence[3]
) / 4
mp_convergence_pixel_1 = (
mp_sub_convergence[4]
+ mp_sub_convergence[5]
+ mp_sub_convergence[6]
+ mp_sub_convergence[7]
) / 4
plane = ag.Plane(galaxies=[g0, g1], redshift=None)
convergence = plane.convergence_2d_from_grid(grid=sub_grid_2d_7x7)
assert convergence.binned.native[2, 2] == pytest.approx(
mp_convergence_pixel_0, 1.0e-4
)
assert convergence.binned.native[2, 3] == pytest.approx(
mp_convergence_pixel_1, 1.0e-4
)
def test__same_as_above_galaxies___use_galaxy_to_compute_convergence(
self, sub_grid_2d_7x7
):
g0 = ag.Galaxy(
redshift=0.5, mass_profile=ag.mp.SphIsothermal(einstein_radius=1.0)
)
g1 = ag.Galaxy(
redshift=0.5, mass_profile=ag.mp.SphIsothermal(einstein_radius=2.0)
)
g0_convergence = g0.convergence_2d_from_grid(grid=sub_grid_2d_7x7)
g1_convergence = g1.convergence_2d_from_grid(grid=sub_grid_2d_7x7)
plane = ag.Plane(galaxies=[g0, g1], redshift=None)
convergence = plane.convergence_2d_from_grid(grid=sub_grid_2d_7x7)
assert convergence == pytest.approx(g0_convergence + g1_convergence, 1.0e-8)
def test__convergence_2d_from_grid_as_positions(self, grid_2d_irregular_7x7):
g0 = ag.Galaxy(
redshift=0.5, mass_profile=ag.mp.SphIsothermal(einstein_radius=1.0)
)
g0_convergence = g0.convergence_2d_from_grid(grid=grid_2d_irregular_7x7)
plane = ag.Plane(galaxies=[g0], redshift=None)
convergence = plane.convergence_2d_from_grid(grid=grid_2d_irregular_7x7)
assert convergence.in_list[0] == pytest.approx(
g0_convergence.in_list[0], 1.0e-8
)
def test__plane_has_no_galaxies__convergence_is_zeros_size_of_reshaped_sub_array(
self, sub_grid_2d_7x7
):
plane = ag.Plane(galaxies=[], redshift=0.5)
convergence = plane.convergence_2d_from_grid(grid=sub_grid_2d_7x7)
assert convergence.sub_shape_slim == sub_grid_2d_7x7.sub_shape_slim
convergence = plane.convergence_2d_from_grid(grid=sub_grid_2d_7x7)
assert convergence.sub_shape_native == (14, 14)
convergence = plane.convergence_2d_from_grid(grid=sub_grid_2d_7x7)
assert convergence.shape_native == (7, 7)
class TestPotential:
def test__potential_same_as_multiple_galaxies__include_reshape_mapping(
self, sub_grid_2d_7x7
):
# The *ungalaxyed* sub-grid must be used to compute the potential. This changes the subgrid to ensure this
# is the case.
sub_grid_2d_7x7[5] = np.array([5.0, 2.0])
g0 = ag.Galaxy(
redshift=0.5,
mass_profile=ag.mp.SphIsothermal(
einstein_radius=1.0, centre=(1.0, 0.0)
),
)
g1 = ag.Galaxy(
redshift=0.5,
mass_profile=ag.mp.SphIsothermal(
einstein_radius=2.0, centre=(1.0, 1.0)
),
)
mp0 = g0.mass_profiles[0]
mp1 = g1.mass_profiles[0]
mp0_sub_potential = mp0.potential_2d_from_grid(grid=sub_grid_2d_7x7)
mp1_sub_potential = mp1.potential_2d_from_grid(grid=sub_grid_2d_7x7)
mp_sub_potential = mp0_sub_potential + mp1_sub_potential
# Perform sub gridding average manually
mp_potential_pixel_0 = (
mp_sub_potential[0]
+ mp_sub_potential[1]
+ mp_sub_potential[2]
+ mp_sub_potential[3]
) / 4
mp_potential_pixel_1 = (
mp_sub_potential[4]
+ mp_sub_potential[5]
+ mp_sub_potential[6]
+ mp_sub_potential[7]
) / 4
plane = ag.Plane(galaxies=[g0, g1], redshift=None)
potential = plane.potential_2d_from_grid(grid=sub_grid_2d_7x7)
assert potential.binned.native[2, 2] == pytest.approx(
mp_potential_pixel_0, 1.0e-4
)
assert potential.binned.native[2, 3] == pytest.approx(
mp_potential_pixel_1, 1.0e-4
)
def test__same_as_above_galaxies___use_galaxy_to_compute_potential(
self, sub_grid_2d_7x7
):
g0 = ag.Galaxy(
redshift=0.5, mass_profile=ag.mp.SphIsothermal(einstein_radius=1.0)
)
g1 = ag.Galaxy(
redshift=0.5, mass_profile=ag.mp.SphIsothermal(einstein_radius=2.0)
)
g0_potential = g0.potential_2d_from_grid(grid=sub_grid_2d_7x7)
g1_potential = g1.potential_2d_from_grid(grid=sub_grid_2d_7x7)
plane = ag.Plane(galaxies=[g0, g1], redshift=None)
potential = plane.potential_2d_from_grid(grid=sub_grid_2d_7x7)
assert potential == pytest.approx(g0_potential + g1_potential, 1.0e-8)
def test__potential_2d_from_grid_as_positions(self, grid_2d_irregular_7x7):
g0 = ag.Galaxy(
redshift=0.5, mass_profile=ag.mp.SphIsothermal(einstein_radius=1.0)
)
g0_potential = g0.potential_2d_from_grid(grid=grid_2d_irregular_7x7)
plane = ag.Plane(galaxies=[g0], redshift=None)
potential = plane.potential_2d_from_grid(grid=grid_2d_irregular_7x7)
assert potential.in_list[0] == pytest.approx(
g0_potential.in_list[0], 1.0e-8
)
def test__plane_has_no_galaxies__potential_is_zeros_size_of_reshaped_sub_array(
self, sub_grid_2d_7x7
):
plane = ag.Plane(galaxies=[], redshift=0.5)
potential = plane.potential_2d_from_grid(grid=sub_grid_2d_7x7)
assert potential.sub_shape_slim == sub_grid_2d_7x7.sub_shape_slim
potential = plane.potential_2d_from_grid(grid=sub_grid_2d_7x7)
assert potential.sub_shape_native == (14, 14)
potential = plane.potential_2d_from_grid(grid=sub_grid_2d_7x7)
assert potential.shape_native == (7, 7)
class TestDeflections:
def test__deflections_from_plane__same_as_the_galaxy_mass_profiles(
self, sub_grid_2d_7x7
):
# Overwrite one value so intensity in each pixel is different
sub_grid_2d_7x7[5] = np.array([2.0, 2.0])
g0 = ag.Galaxy(
redshift=0.5, mass_profile=ag.mp.SphIsothermal(einstein_radius=1.0)
)
g1 = ag.Galaxy(
redshift=0.5, mass_profile=ag.mp.SphIsothermal(einstein_radius=2.0)
)
mp0 = g0.mass_profiles[0]
mp1 = g1.mass_profiles[0]
mp0_image = mp0.deflections_2d_from_grid(grid=sub_grid_2d_7x7)
mp1_image = mp1.deflections_2d_from_grid(grid=sub_grid_2d_7x7)
# Perform sub gridding average manually
mp0_image_pixel_0x = (
mp0_image[0, 0] + mp0_image[1, 0] + mp0_image[2, 0] + mp0_image[3, 0]
) / 4
mp0_image_pixel_1x = (
mp0_image[4, 0] + mp0_image[5, 0] + mp0_image[6, 0] + mp0_image[7, 0]
) / 4
mp0_image_pixel_0y = (
mp0_image[0, 1] + mp0_image[1, 1] + mp0_image[2, 1] + mp0_image[3, 1]
) / 4
mp0_image_pixel_1y = (
mp0_image[4, 1] + mp0_image[5, 1] + mp0_image[6, 1] + mp0_image[7, 1]
) / 4
mp1_image_pixel_0x = (
mp1_image[0, 0] + mp1_image[1, 0] + mp1_image[2, 0] + mp1_image[3, 0]
) / 4
mp1_image_pixel_1x = (
mp1_image[4, 0] + mp1_image[5, 0] + mp1_image[6, 0] + mp1_image[7, 0]
) / 4
mp1_image_pixel_0y = (
mp1_image[0, 1] + mp1_image[1, 1] + mp1_image[2, 1] + mp1_image[3, 1]
) / 4
mp1_image_pixel_1y = (
mp1_image[4, 1] + mp1_image[5, 1] + mp1_image[6, 1] + mp1_image[7, 1]
) / 4
plane = ag.Plane(galaxies=[g0, g1], redshift=None)
deflections = plane.deflections_2d_from_grid(grid=sub_grid_2d_7x7)
assert deflections.binned[0, 0] == pytest.approx(
mp0_image_pixel_0x + mp1_image_pixel_0x, 1.0e-4
)
assert deflections.binned[1, 0] == pytest.approx(
mp0_image_pixel_1x + mp1_image_pixel_1x, 1.0e-4
)
assert deflections.binned[0, 1] == pytest.approx(
mp0_image_pixel_0y + mp1_image_pixel_0y, 1.0e-4
)
assert deflections.binned[1, 1] == pytest.approx(
mp0_image_pixel_1y + mp1_image_pixel_1y, 1.0e-4
)
def test__deflections_same_as_its_galaxy___use_multiple_galaxies(
self, sub_grid_2d_7x7
):
# Overwrite one value so intensity in each pixel is different
sub_grid_2d_7x7[5] = np.array([2.0, 2.0])
g0 = ag.Galaxy(
redshift=0.5, mass_profile=ag.mp.SphIsothermal(einstein_radius=1.0)
)
g1 = ag.Galaxy(
redshift=0.5, mass_profile=ag.mp.SphIsothermal(einstein_radius=2.0)
)
g0_deflections = g0.deflections_2d_from_grid(grid=sub_grid_2d_7x7)
g1_deflections = g1.deflections_2d_from_grid(grid=sub_grid_2d_7x7)
plane = ag.Plane(galaxies=[g0, g1], redshift=None)
deflections = plane.deflections_2d_from_grid(grid=sub_grid_2d_7x7)
assert deflections == pytest.approx(g0_deflections + g1_deflections, 1.0e-4)
def test__deflections_2d_from_grid_as_positions(self, grid_2d_irregular_7x7):
g0 = ag.Galaxy(
redshift=0.5, mass_profile=ag.mp.SphIsothermal(einstein_radius=1.0)
)
g0_deflections = g0.deflections_2d_from_grid(grid=grid_2d_irregular_7x7)
plane = ag.Plane(galaxies=[g0], redshift=None)
deflections = plane.deflections_2d_from_grid(grid=grid_2d_irregular_7x7)
assert deflections.in_list[0][0] == pytest.approx(
g0_deflections.in_list[0][0], 1.0e-8
)
assert deflections.in_list[0][1] == pytest.approx(
g0_deflections.in_list[0][1], 1.0e-8
)
def test__deflections_numerics__x2_galaxy_in_plane__or_galaxy_x2_sis__deflections_double(
self, grid_2d_7x7, gal_x1_mp, gal_x2_mp
):
plane = ag.Plane(galaxies=[gal_x2_mp], redshift=None)
deflections = plane.deflections_2d_from_grid(grid=grid_2d_7x7)
assert deflections[0:2] == pytest.approx(
np.array([[3.0 * 0.707, -3.0 * 0.707], [3.0, 0.0]]), 1e-3
)
plane = ag.Plane(galaxies=[gal_x1_mp, gal_x1_mp], redshift=None)
deflections = plane.deflections_2d_from_grid(grid=grid_2d_7x7)
assert deflections[0:2] == pytest.approx(
np.array([[2.0 * 0.707, -2.0 * 0.707], [2.0, 0.0]]), 1e-3
)
def test__plane_has_no_galaxies__deflections_are_zeros_size_of_ungalaxyed_grid(
self, sub_grid_2d_7x7
):
plane = ag.Plane(redshift=0.5, galaxies=[])
deflections = plane.deflections_2d_from_grid(grid=sub_grid_2d_7x7)
assert deflections.shape_native == (7, 7)
assert (deflections.binned[0, 0] == 0.0).all()
assert (deflections.binned[0, 1] == 0.0).all()
assert (deflections.binned[1, 0] == 0.0).all()
assert (deflections.binned[0] == 0.0).all()
class TestLensingObject:
def test__correct_einstein_mass_caclulated_for_multiple_mass_profiles__means_all_innherited_methods_work(
self,
):
grid = ag.Grid2D.uniform(shape_native=(50, 50), pixel_scales=0.15)
sis_0 = ag.mp.SphIsothermal(centre=(0.0, 0.0), einstein_radius=0.2)
sis_1 = ag.mp.SphIsothermal(centre=(0.0, 0.0), einstein_radius=0.4)
sis_2 = ag.mp.SphIsothermal(centre=(0.0, 0.0), einstein_radius=0.6)
sis_3 = ag.mp.SphIsothermal(centre=(0.0, 0.0), einstein_radius=0.8)
galaxy_0 = ag.Galaxy(
mass_profile_0=sis_0, mass_profile_1=sis_1, redshift=0.5
)
galaxy_1 = ag.Galaxy(
mass_profile_0=sis_2, mass_profile_1=sis_3, redshift=0.5
)
plane = ag.Plane(galaxies=[galaxy_0, galaxy_1])
einstein_mass = plane.einstein_mass_angular_from_grid(grid=grid)
assert einstein_mass == pytest.approx(np.pi * 2.0 ** 2.0, 1.0e-1)
class TestAbstractPlaneData:
class TestBlurredImagePlaneImage:
def test__blurred_image_2d_from_grid_and_psf(
self, sub_grid_2d_7x7, blurring_grid_2d_7x7, psf_3x3, convolver_7x7
):
g0 = ag.Galaxy(redshift=0.5, light_profile=ag.lp.EllSersic(intensity=1.0))
g1 = ag.Galaxy(redshift=1.0, light_profile=ag.lp.EllSersic(intensity=2.0))
blurred_g0_image = g0.blurred_image_2d_from_grid_and_convolver(
grid=sub_grid_2d_7x7,
blurring_grid=blurring_grid_2d_7x7,
convolver=convolver_7x7,
)
blurred_g1_image = g1.blurred_image_2d_from_grid_and_convolver(
grid=sub_grid_2d_7x7,
blurring_grid=blurring_grid_2d_7x7,
convolver=convolver_7x7,
)
plane = ag.Plane(redshift=0.5, galaxies=[g0, g1])
blurred_image = plane.blurred_image_2d_from_grid_and_psf(
grid=sub_grid_2d_7x7, blurring_grid=blurring_grid_2d_7x7, psf=psf_3x3
)
assert blurred_image.slim == pytest.approx(
blurred_g0_image.slim + blurred_g1_image.slim, 1.0e-4
)
assert blurred_image.native == pytest.approx(
blurred_g0_image.native + blurred_g1_image.native, 1.0e-4
)
def test__blurred_image_of_galaxies_from_grid_and_psf(
self, sub_grid_2d_7x7, blurring_grid_2d_7x7, psf_3x3
):
g0 = ag.Galaxy(redshift=0.5, light_profile=ag.lp.EllSersic(intensity=1.0))
g1 = ag.Galaxy(redshift=1.0, light_profile=ag.lp.EllSersic(intensity=2.0))
blurred_g0_image = g0.blurred_image_2d_from_grid_and_psf(
grid=sub_grid_2d_7x7, blurring_grid=blurring_grid_2d_7x7, psf=psf_3x3
)
blurred_g1_image = g1.blurred_image_2d_from_grid_and_psf(
grid=sub_grid_2d_7x7, blurring_grid=blurring_grid_2d_7x7, psf=psf_3x3
)
plane = ag.Plane(redshift=0.5, galaxies=[g0, g1])
blurred_images_of_galaxies = plane.blurred_images_of_galaxies_from_grid_and_psf(
grid=sub_grid_2d_7x7, blurring_grid=blurring_grid_2d_7x7, psf=psf_3x3
)
assert blurred_g0_image.shape_slim == 9
assert blurred_images_of_galaxies[0].slim == pytest.approx(
blurred_g0_image.slim, 1.0e-4
)
assert blurred_g1_image.shape_slim == 9
assert blurred_images_of_galaxies[1].slim == pytest.approx(
blurred_g1_image.slim, 1.0e-4
)
assert blurred_images_of_galaxies[0].native == pytest.approx(
blurred_g0_image.native, 1.0e-4
)
assert blurred_images_of_galaxies[1].native == pytest.approx(
blurred_g1_image.native, 1.0e-4
)
def test__blurred_image_2d_from_grid_and_convolver(
self, sub_grid_2d_7x7, blurring_grid_2d_7x7, convolver_7x7
):
g0 = ag.Galaxy(redshift=0.5, light_profile=ag.lp.EllSersic(intensity=1.0))
g1 = ag.Galaxy(redshift=1.0, light_profile=ag.lp.EllSersic(intensity=2.0))
blurred_g0_image = g0.blurred_image_2d_from_grid_and_convolver(
grid=sub_grid_2d_7x7,
convolver=convolver_7x7,
blurring_grid=blurring_grid_2d_7x7,
)
blurred_g1_image = g1.blurred_image_2d_from_grid_and_convolver(
grid=sub_grid_2d_7x7,
convolver=convolver_7x7,
blurring_grid=blurring_grid_2d_7x7,
)
plane = ag.Plane(redshift=0.5, galaxies=[g0, g1])
blurred_image = plane.blurred_image_2d_from_grid_and_convolver(
grid=sub_grid_2d_7x7,
convolver=convolver_7x7,
blurring_grid=blurring_grid_2d_7x7,
)
assert blurred_image.slim == pytest.approx(
blurred_g0_image.slim + blurred_g1_image.slim, 1.0e-4
)
assert blurred_image.native == pytest.approx(
blurred_g0_image.native + blurred_g1_image.native, 1.0e-4
)
def test__blurred_image_of_galaxies_from_grid_and_convolver(
self, sub_grid_2d_7x7, blurring_grid_2d_7x7, convolver_7x7
):
g0 = ag.Galaxy(redshift=0.5, light_profile=ag.lp.EllSersic(intensity=1.0))
g1 = ag.Galaxy(redshift=1.0, light_profile=ag.lp.EllSersic(intensity=2.0))
blurred_g0_image = g0.blurred_image_2d_from_grid_and_convolver(
grid=sub_grid_2d_7x7,
convolver=convolver_7x7,
blurring_grid=blurring_grid_2d_7x7,
)
blurred_g1_image = g1.blurred_image_2d_from_grid_and_convolver(
grid=sub_grid_2d_7x7,
convolver=convolver_7x7,
blurring_grid=blurring_grid_2d_7x7,
)
plane = ag.Plane(redshift=0.5, galaxies=[g0, g1])
blurred_images_of_galaxies = plane.blurred_images_of_galaxies_from_grid_and_convolver(
grid=sub_grid_2d_7x7,
blurring_grid=blurring_grid_2d_7x7,
convolver=convolver_7x7,
)
assert blurred_g0_image.shape_slim == 9
assert blurred_images_of_galaxies[0].slim == pytest.approx(
blurred_g0_image.slim, 1.0e-4
)
assert blurred_g1_image.shape_slim == 9
assert blurred_images_of_galaxies[1].slim == pytest.approx(
blurred_g1_image.slim, 1.0e-4
)
assert blurred_images_of_galaxies[0].native == pytest.approx(
blurred_g0_image.native, 1.0e-4
)
assert blurred_images_of_galaxies[1].native == pytest.approx(
blurred_g1_image.native, 1.0e-4
)
def test__galaxy_blurred_image_dict_from_grid_and_convolver(
self, sub_grid_2d_7x7, blurring_grid_2d_7x7, convolver_7x7
):
g0 = ag.Galaxy(redshift=0.5, light_profile=ag.lp.EllSersic(intensity=1.0))
g1 = ag.Galaxy(
redshift=0.5,
mass_profile=ag.mp.SphIsothermal(einstein_radius=1.0),
light_profile=ag.lp.EllSersic(intensity=2.0),
)
g2 = ag.Galaxy(redshift=0.5, light_profile=ag.lp.EllSersic(intensity=3.0))
g0_blurred_image = g0.blurred_image_2d_from_grid_and_convolver(
grid=sub_grid_2d_7x7,
convolver=convolver_7x7,
blurring_grid=blurring_grid_2d_7x7,
)
g1_blurred_image = g1.blurred_image_2d_from_grid_and_convolver(
grid=sub_grid_2d_7x7,
convolver=convolver_7x7,
blurring_grid=blurring_grid_2d_7x7,
)
g2_blurred_image = g2.blurred_image_2d_from_grid_and_convolver(
grid=sub_grid_2d_7x7,
convolver=convolver_7x7,
blurring_grid=blurring_grid_2d_7x7,
)
plane = ag.Plane(redshift=-0.75, galaxies=[g1, g0, g2])
blurred_image_dict = plane.galaxy_blurred_image_dict_from_grid_and_convolver(
grid=sub_grid_2d_7x7,
convolver=convolver_7x7,
blurring_grid=blurring_grid_2d_7x7,
)
assert (blurred_image_dict[g0].slim == g0_blurred_image.slim).all()
assert (blurred_image_dict[g1].slim == g1_blurred_image.slim).all()
assert (blurred_image_dict[g2].slim == g2_blurred_image.slim).all()
class TestUnmaskedBlurredProfileImages:
def test__unmasked_images_of_plane_planes_and_galaxies(self):
psf = ag.Kernel2D.manual_native(
array=(np.array([[0.0, 3.0, 0.0], [0.0, 1.0, 2.0], [0.0, 0.0, 0.0]])),
pixel_scales=1.0,
)
mask = ag.Mask2D.manual(
mask=[[True, True, True], [True, False, True], [True, True, True]],
pixel_scales=1.0,
sub_size=1,
)
grid = ag.Grid2D.from_mask(mask=mask)
g0 = ag.Galaxy(redshift=0.5, light_profile=ag.lp.EllSersic(intensity=0.1))
g1 = ag.Galaxy(redshift=1.0, light_profile=ag.lp.EllSersic(intensity=0.2))
plane = ag.Plane(redshift=0.75, galaxies=[g0, g1])
padded_grid = grid.padded_grid_from_kernel_shape(
kernel_shape_native=psf.shape_native
)
manual_blurred_image_0 = plane.images_of_galaxies_from_grid(
grid=padded_grid
)[0]
manual_blurred_image_0 = psf.convolved_array_from_array(
array=manual_blurred_image_0
)
manual_blurred_image_1 = plane.images_of_galaxies_from_grid(
grid=padded_grid
)[1]
manual_blurred_image_1 = psf.convolved_array_from_array(
array=manual_blurred_image_1
)
unmasked_blurred_image = plane.unmasked_blurred_image_2d_from_grid_and_psf(
grid=grid, psf=psf
)
assert unmasked_blurred_image.native == pytest.approx(
manual_blurred_image_0.binned.native[1:4, 1:4]
+ manual_blurred_image_1.binned.native[1:4, 1:4],
1.0e-4,
)
unmasked_blurred_image_of_galaxies = plane.unmasked_blurred_image_of_galaxies_from_grid_and_psf(
grid=grid, psf=psf
)
assert unmasked_blurred_image_of_galaxies[0].native == pytest.approx(
manual_blurred_image_0.binned.native[1:4, 1:4], 1.0e-4
)
assert unmasked_blurred_image_of_galaxies[1].native == pytest.approx(
manual_blurred_image_1.binned.native[1:4, 1:4], 1.0e-4
)
class TestVisibilities:
def test__visibilities_from_grid_and_transformer(
self, sub_grid_2d_7x7, transformer_7x7_7
):
g0 = ag.Galaxy(redshift=0.5, light_profile=ag.lp.EllSersic(intensity=1.0))
image = g0.image_2d_from_grid(grid=sub_grid_2d_7x7)
visibilities = transformer_7x7_7.visibilities_from_image(image=image)
plane = ag.Plane(redshift=0.5, galaxies=[g0])
plane_visibilities = plane.profile_visibilities_from_grid_and_transformer(
grid=sub_grid_2d_7x7, transformer=transformer_7x7_7
)
assert (visibilities == plane_visibilities).all()
g1 = ag.Galaxy(redshift=0.5, light_profile=ag.lp.EllSersic(intensity=2.0))
image = g0.image_2d_from_grid(grid=sub_grid_2d_7x7) + g1.image_2d_from_grid(
grid=sub_grid_2d_7x7
)
visibilities = transformer_7x7_7.visibilities_from_image(image=image)
plane = ag.Plane(redshift=0.5, galaxies=[g0, g1])
plane_visibilities = plane.profile_visibilities_from_grid_and_transformer(
grid=sub_grid_2d_7x7, transformer=transformer_7x7_7
)
assert visibilities == pytest.approx(plane_visibilities, 1.0e-4)
def test__visibilities_from_grid_and_transformer__plane_has_no_galaxies__returns_zeros(
self, sub_grid_2d_7x7, transformer_7x7_7
):
plane = ag.Plane(redshift=0.5, galaxies=[])
plane_visibilities = plane.profile_visibilities_from_grid_and_transformer(
grid=sub_grid_2d_7x7, transformer=transformer_7x7_7
)
assert (plane_visibilities.slim == 0.0 + 0.0j * np.zeros((7,))).all()
def test__visibilities_of_galaxies_from_grid_and_transformer(
self, sub_grid_2d_7x7, transformer_7x7_7
):
g0 = ag.Galaxy(redshift=0.5, light_profile=ag.lp.EllSersic(intensity=1.0))
g1 = ag.Galaxy(redshift=0.5, light_profile=ag.lp.EllSersic(intensity=2.0))
g0_image = g0.image_2d_from_grid(grid=sub_grid_2d_7x7)
g1_image = g1.image_2d_from_grid(grid=sub_grid_2d_7x7)
g0_visibilities = transformer_7x7_7.visibilities_from_image(image=g0_image)
g1_visibilities = transformer_7x7_7.visibilities_from_image(image=g1_image)
plane = ag.Plane(redshift=0.5, galaxies=[g0, g1])
plane_visibilities_of_galaxies = plane.profile_visibilities_of_galaxies_from_grid_and_transformer(
grid=sub_grid_2d_7x7, transformer=transformer_7x7_7
)
assert (g0_visibilities == plane_visibilities_of_galaxies[0]).all()
assert (g1_visibilities == plane_visibilities_of_galaxies[1]).all()
plane_visibilities = plane.profile_visibilities_from_grid_and_transformer(
grid=sub_grid_2d_7x7, transformer=transformer_7x7_7
)
assert sum(plane_visibilities_of_galaxies) == pytest.approx(
plane_visibilities, 1.0e-4
)
def test__galaxy_visibilities_dict_from_grid_and_transformer(
self, sub_grid_2d_7x7, transformer_7x7_7
):
g0 = ag.Galaxy(redshift=0.5, light_profile=ag.lp.EllSersic(intensity=1.0))
g1 = ag.Galaxy(
redshift=0.5,
mass_profile=ag.mp.SphIsothermal(einstein_radius=1.0),
light_profile=ag.lp.EllSersic(intensity=2.0),
)
g2 = ag.Galaxy(redshift=0.5, light_profile=ag.lp.EllSersic(intensity=3.0))
g3 = ag.Galaxy(redshift=1.0, light_profile=ag.lp.EllSersic(intensity=5.0))
g0_visibilities = g0.profile_visibilities_from_grid_and_transformer(
grid=sub_grid_2d_7x7, transformer=transformer_7x7_7
)
g1_visibilities = g1.profile_visibilities_from_grid_and_transformer(
grid=sub_grid_2d_7x7, transformer=transformer_7x7_7
)
g2_visibilities = g2.profile_visibilities_from_grid_and_transformer(
grid=sub_grid_2d_7x7, transformer=transformer_7x7_7
)
plane = ag.Plane(redshift=-0.75, galaxies=[g1, g0, g2])
visibilities_dict = plane.galaxy_profile_visibilities_dict_from_grid_and_transformer(
grid=sub_grid_2d_7x7, transformer=transformer_7x7_7
)
assert (visibilities_dict[g0] == g0_visibilities).all()
assert (visibilities_dict[g1] == g1_visibilities).all()
assert (visibilities_dict[g2] == g2_visibilities).all()
class TestGrid2DIrregular:
def test__no_galaxies_with_pixelizations_in_plane__returns_none(
self, sub_grid_2d_7x7
):
galaxy_no_pix = ag.Galaxy(redshift=0.5)
plane = ag.Plane(galaxies=[galaxy_no_pix], redshift=0.5)
sparse_grid = plane.sparse_image_plane_grid_from_grid(grid=sub_grid_2d_7x7)
assert sparse_grid is None
def test__1_galaxy_in_plane__it_has_pixelization__returns_sparse_grid(
self, sub_grid_2d_7x7
):
galaxy_pix = ag.Galaxy(
redshift=0.5,
pixelization=mock.MockPixelization(value=1, grid=[[1.0, 1.0]]),
regularization=mock.MockRegularization(matrix_shape=(1, 1)),
)
plane = ag.Plane(galaxies=[galaxy_pix], redshift=0.5)
sparse_grid = plane.sparse_image_plane_grid_from_grid(grid=sub_grid_2d_7x7)
assert (sparse_grid == np.array([[1.0, 1.0]])).all()
def test__1_galaxy_in_plane__it_has_pixelization_and_hyper_image_returns_sparse_grid_and_uses_hyper_image(
self, sub_grid_2d_7x7
):
# In the MockPixelization class the grid is returned if hyper image=None, and grid*hyper image is
# returned otherwise.
galaxy_pix = ag.Galaxy(
redshift=0.5,
pixelization=mock.MockPixelization(
value=1, grid=np.array([[1.0, 1.0]])
),
regularization=mock.MockRegularization(matrix_shape=(1, 1)),
hyper_galaxy_image=2,
)
plane = ag.Plane(galaxies=[galaxy_pix], redshift=0.5)
sparse_grid = plane.sparse_image_plane_grid_from_grid(grid=sub_grid_2d_7x7)
assert (sparse_grid == np.array([[2.0, 2.0]])).all()
class TestMapper:
def test__no_galaxies_with_pixelizations_in_plane__returns_none(
self, sub_grid_2d_7x7
):
galaxy_no_pix = ag.Galaxy(redshift=0.5)
plane = ag.Plane(galaxies=[galaxy_no_pix], redshift=0.5)
mapper = plane.mapper_from_grid_and_sparse_grid(
grid=sub_grid_2d_7x7, sparse_grid=sub_grid_2d_7x7
)
assert mapper is None
def test__1_galaxy_in_plane__it_has_pixelization__returns_mapper(
self, sub_grid_2d_7x7
):
galaxy_pix = ag.Galaxy(
redshift=0.5,
pixelization=mock.MockPixelization(value=1),
regularization=mock.MockRegularization(matrix_shape=(1, 1)),
)
plane = ag.Plane(galaxies=[galaxy_pix], redshift=0.5)
mapper = plane.mapper_from_grid_and_sparse_grid(
grid=sub_grid_2d_7x7, sparse_grid=sub_grid_2d_7x7
)
assert mapper == 1
galaxy_pix = ag.Galaxy(
redshift=0.5,
pixelization=mock.MockPixelization(value=1),
regularization=mock.MockRegularization(matrix_shape=(1, 1)),
)
galaxy_no_pix = ag.Galaxy(redshift=0.5)
plane = ag.Plane(galaxies=[galaxy_no_pix, galaxy_pix], redshift=0.5)
mapper = plane.mapper_from_grid_and_sparse_grid(
grid=sub_grid_2d_7x7, sparse_grid=sub_grid_2d_7x7
)
assert mapper == 1
def test__inversion_use_border_is_false__still_returns_mapper(
self, sub_grid_2d_7x7
):
galaxy_pix = ag.Galaxy(
redshift=0.5,
pixelization=mock.MockPixelization(value=1),
regularization=mock.MockRegularization(matrix_shape=(1, 1)),
)
galaxy_no_pix = ag.Galaxy(redshift=0.5)
plane = ag.Plane(galaxies=[galaxy_no_pix, galaxy_pix], redshift=0.5)
mapper = plane.mapper_from_grid_and_sparse_grid(
grid=sub_grid_2d_7x7,
sparse_grid=sub_grid_2d_7x7,
settings_pixelization=ag.SettingsPixelization(use_border=False),
)
assert mapper == 1
def test__2_galaxies_in_plane__both_have_pixelization__raises_error(
self, sub_grid_2d_7x7
):
galaxy_pix_0 = ag.Galaxy(
redshift=0.5,
pixelization=mock.MockPixelization(value=1),
regularization=mock.MockRegularization(matrix_shape=(1, 1)),
)
galaxy_pix_1 = ag.Galaxy(
redshift=0.5,
pixelization=mock.MockPixelization(value=2),
regularization=mock.MockRegularization(matrix_shape=(1, 1)),
)
plane = ag.Plane(galaxies=[galaxy_pix_0, galaxy_pix_1], redshift=None)
with pytest.raises(exc.PixelizationException):
plane.mapper_from_grid_and_sparse_grid(
grid=sub_grid_2d_7x7,
sparse_grid=sub_grid_2d_7x7,
settings_pixelization=ag.SettingsPixelization(use_border=False),
)
class TestInversion:
def test__x1_inversion_imaging_in_plane__performs_inversion_correctly(
self, sub_grid_2d_7x7, masked_imaging_7x7
):
pix = ag.pix.Rectangular(shape=(3, 3))
reg = ag.reg.Constant(coefficient=0.0)
g0 = ag.Galaxy(redshift=0.5, pixelization=pix, regularization=reg)
plane = ag.Plane(galaxies=[ag.Galaxy(redshift=0.5), g0])
inversion = plane.inversion_imaging_from_grid_and_data(
grid=sub_grid_2d_7x7,
image=masked_imaging_7x7.image,
noise_map=masked_imaging_7x7.noise_map,
convolver=masked_imaging_7x7.convolver,
settings_pixelization=ag.SettingsPixelization(use_border=False),
)
assert inversion.mapped_reconstructed_image == pytest.approx(
masked_imaging_7x7.image, 1.0e-2
)
def test__x1_inversion_interferometer_in_plane__performs_inversion_correctly(
self, sub_grid_2d_7x7, interferometer_7
):
interferometer_7.data = ag.Visibilities.ones(shape_slim=(7,))
pix = ag.pix.Rectangular(shape=(7, 7))
reg = ag.reg.Constant(coefficient=0.0)
g0 = ag.Galaxy(redshift=0.5, pixelization=pix, regularization=reg)
plane = ag.Plane(galaxies=[ag.Galaxy(redshift=0.5), g0])
inversion = plane.inversion_interferometer_from_grid_and_data(
grid=sub_grid_2d_7x7,
visibilities=interferometer_7.visibilities,
noise_map=interferometer_7.noise_map,
transformer=interferometer_7.transformer,
settings_pixelization=ag.SettingsPixelization(use_border=False),
settings_inversion=ag.SettingsInversion(use_linear_operators=False),
)
assert inversion.mapped_reconstructed_visibilities.real == pytest.approx(
interferometer_7.visibilities.real, 1.0e-2
)
class TestPlaneImage:
def test__3x3_grid__extracts_max_min_coordinates__ignores_other_coordinates_more_central(
self, sub_grid_2d_7x7
):
sub_grid_2d_7x7[1] = np.array([2.0, 2.0])
galaxy = ag.Galaxy(redshift=0.5, light=ag.lp.EllSersic(intensity=1.0))
plane = ag.Plane(galaxies=[galaxy], redshift=None)
plane_image_from_func = ag.plane.plane.plane_util.plane_image_of_galaxies_from(
shape=(7, 7),
grid=sub_grid_2d_7x7.mask.unmasked_grid_sub_1,
galaxies=[galaxy],
)
plane_image_from_plane = plane.plane_image_2d_from_grid(
grid=sub_grid_2d_7x7
)
assert (plane_image_from_func.array == plane_image_from_plane.array).all()
def test__ensure_index_of_plane_image_has_negative_arcseconds_at_start(self,):
# The grid coordinates -2.0 -> 2.0 mean a plane of shape (5,5) has arc second coordinates running over
# -1.6, -0.8, 0.0, 0.8, 1.6. The origin -1.6, -1.6 of the model_galaxy means its brighest pixel should be
# index 0 of the 1D grid and (0,0) of the 2d plane data.
mask = ag.Mask2D.unmasked(shape_native=(5, 5), pixel_scales=1.0, sub_size=1)
grid = ag.Grid2D.from_mask(mask=mask)
g0 = ag.Galaxy(
redshift=0.5,
light_profile=ag.lp.EllSersic(centre=(1.6, -1.6), intensity=1.0),
)
plane = ag.Plane(galaxies=[g0], redshift=None)
plane_image = plane.plane_image_2d_from_grid(grid=grid)
assert plane_image.array.shape_native == (5, 5)
assert np.unravel_index(
plane_image.array.native.argmax(), plane_image.array.native.shape
) == (0, 0)
g0 = ag.Galaxy(
redshift=0.5,
light_profile=ag.lp.EllSersic(centre=(1.6, 1.6), intensity=1.0),
)
plane = ag.Plane(galaxies=[g0], redshift=None)
plane_image = plane.plane_image_2d_from_grid(grid=grid)
assert np.unravel_index(
plane_image.array.native.argmax(), plane_image.array.native.shape
) == (0, 4)
g0 = ag.Galaxy(
redshift=0.5,
light_profile=ag.lp.EllSersic(centre=(-1.6, -1.6), intensity=1.0),
)
plane = ag.Plane(galaxies=[g0], redshift=None)
plane_image = plane.plane_image_2d_from_grid(grid=grid)
assert np.unravel_index(
plane_image.array.native.argmax(), plane_image.array.native.shape
) == (4, 0)
g0 = ag.Galaxy(
redshift=0.5,
light_profile=ag.lp.EllSersic(centre=(-1.6, 1.6), intensity=1.0),
)
plane = ag.Plane(galaxies=[g0], redshift=None)
plane_image = plane.plane_image_2d_from_grid(grid=grid)
assert np.unravel_index(
plane_image.array.native.argmax(), plane_image.array.native.shape
) == (4, 4)
class TestContributionMaps:
def test__x2_hyper_galaxy__use_numerical_values_for_noise_scaling(self):
hyper_galaxy_0 = ag.HyperGalaxy(
contribution_factor=0.0, noise_factor=0.0, noise_power=1.0
)
hyper_galaxy_1 = ag.HyperGalaxy(
contribution_factor=1.0, noise_factor=0.0, noise_power=1.0
)
hyper_model_image = ag.Array2D.manual_native(
array=[[0.5, 1.0, 1.5]], pixel_scales=1.0
)
hyper_galaxy_image_0 = ag.Array2D.manual_native(
array=[[0.5, 1.0, 1.5]], pixel_scales=1.0
)
hyper_galaxy_image_1 = ag.Array2D.manual_native(
array=[[0.5, 1.0, 1.5]], pixel_scales=1.0
)
galaxy_0 = ag.Galaxy(
redshift=0.5,
hyper_galaxy=hyper_galaxy_0,
hyper_model_image=hyper_model_image,
hyper_galaxy_image=hyper_galaxy_image_0,
)
galaxy_1 = ag.Galaxy(
redshift=0.5,
hyper_galaxy=hyper_galaxy_1,
hyper_model_image=hyper_model_image,
hyper_galaxy_image=hyper_galaxy_image_1,
)
plane = ag.Plane(redshift=0.5, galaxies=[galaxy_0, galaxy_1])
assert (
plane.contribution_maps_of_galaxies[0].native
== np.array([[1.0, 1.0, 1.0]])
).all()
assert (
plane.contribution_maps_of_galaxies[1].native
== np.array([[5.0 / 9.0, (1.0 / 2.0) / (1.5 / 2.5), 1.0]])
).all()
def test__contribution_maps_are_same_as_hyper_galaxy_calculation(self):
hyper_model_image = ag.Array2D.manual_native(
[[2.0, 4.0, 10.0]], pixel_scales=1.0
)
hyper_galaxy_image = ag.Array2D.manual_native(
[[1.0, 5.0, 8.0]], pixel_scales=1.0
)
hyper_galaxy_0 = ag.HyperGalaxy(contribution_factor=5.0)
hyper_galaxy_1 = ag.HyperGalaxy(contribution_factor=10.0)
contribution_map_0 = hyper_galaxy_0.contribution_map_from_hyper_images(
hyper_model_image=hyper_model_image,
hyper_galaxy_image=hyper_galaxy_image,
)
contribution_map_1 = hyper_galaxy_1.contribution_map_from_hyper_images(
hyper_model_image=hyper_model_image,
hyper_galaxy_image=hyper_galaxy_image,
)
galaxy_0 = ag.Galaxy(
redshift=0.5,
hyper_galaxy=hyper_galaxy_0,
hyper_model_image=hyper_model_image,
hyper_galaxy_image=hyper_galaxy_image,
)
galaxy_1 = ag.Galaxy(
redshift=0.5,
hyper_galaxy=hyper_galaxy_1,
hyper_model_image=hyper_model_image,
hyper_galaxy_image=hyper_galaxy_image,
)
plane = ag.Plane(redshift=0.5, galaxies=[galaxy_0])
assert (
plane.contribution_maps_of_galaxies[0].slim == contribution_map_0
).all()
plane = ag.Plane(redshift=0.5, galaxies=[galaxy_1])
assert (
plane.contribution_maps_of_galaxies[0].slim == contribution_map_1
).all()
plane = ag.Plane(redshift=0.5, galaxies=[galaxy_1, galaxy_0])
assert (
plane.contribution_maps_of_galaxies[0].slim == contribution_map_1
).all()
assert (
plane.contribution_maps_of_galaxies[1].slim == contribution_map_0
).all()
def test__contriution_maps_are_none_for_galaxy_without_hyper_galaxy(self):
hyper_model_image = ag.Array2D.manual_native(
[[2.0, 4.0, 10.0]], pixel_scales=1.0
)
hyper_galaxy_image = ag.Array2D.manual_native(
[[1.0, 5.0, 8.0]], pixel_scales=1.0
)
hyper_galaxy = ag.HyperGalaxy(contribution_factor=5.0)
contribution_map = hyper_galaxy.contribution_map_from_hyper_images(
hyper_model_image=hyper_model_image,
hyper_galaxy_image=hyper_galaxy_image,
)
galaxy = ag.Galaxy(
redshift=0.5,
hyper_galaxy=hyper_galaxy,
hyper_model_image=hyper_model_image,
hyper_galaxy_image=hyper_galaxy_image,
)
plane = ag.Plane(
redshift=0.5,
galaxies=[galaxy, ag.Galaxy(redshift=0.5), ag.Galaxy(redshift=0.5)],
)
assert (
plane.contribution_maps_of_galaxies[0].slim == contribution_map
).all()
assert plane.contribution_maps_of_galaxies[1] == None
assert plane.contribution_maps_of_galaxies[2] == None
def test__contribution_map_is_sum_of_galaxy_contribution_maps__handles_nones_correctly(
self,
):
hyper_galaxy_0 = ag.HyperGalaxy(
contribution_factor=0.0, noise_factor=0.0, noise_power=1.0
)
hyper_galaxy_1 = ag.HyperGalaxy(
contribution_factor=1.0, noise_factor=0.0, noise_power=1.0
)
hyper_model_image = ag.Array2D.manual_native(
array=[[0.5, 1.0, 1.5]], pixel_scales=1.0
)
hyper_galaxy_image_0 = ag.Array2D.manual_native(
array=[[0.5, 1.0, 1.5]], pixel_scales=1.0
)
hyper_galaxy_image_1 = ag.Array2D.manual_native(
array=[[0.5, 1.0, 1.5]], pixel_scales=1.0
)
galaxy_0 = ag.Galaxy(
redshift=0.5,
hyper_galaxy=hyper_galaxy_0,
hyper_model_image=hyper_model_image,
hyper_galaxy_image=hyper_galaxy_image_0,
)
galaxy_1 = ag.Galaxy(
redshift=0.5,
hyper_galaxy=hyper_galaxy_1,
hyper_model_image=hyper_model_image,
hyper_galaxy_image=hyper_galaxy_image_1,
)
plane = ag.Plane(redshift=0.5, galaxies=[galaxy_0, galaxy_1])
assert (
sum(plane.contribution_maps_of_galaxies) == plane.contribution_map
).all()
galaxy_1 = ag.Galaxy(redshift=0.5)
plane = ag.Plane(redshift=0.5, galaxies=[galaxy_0, galaxy_1])
assert (galaxy_0.contribution_map == plane.contribution_map).all()
galaxy_0 = ag.Galaxy(redshift=0.5)
plane = ag.Plane(redshift=0.5, galaxies=[galaxy_0, galaxy_1])
assert plane.contribution_map == None
class TestHyperNoiseMap:
def test__x2_hyper_galaxy__use_numerical_values_of_hyper_noise_map_scaling(
self,
):
noise_map = ag.Array2D.manual_native(
array=[[1.0, 2.0, 3.0]], pixel_scales=1.0
)
hyper_galaxy_0 = ag.HyperGalaxy(
contribution_factor=0.0, noise_factor=1.0, noise_power=1.0
)
hyper_galaxy_1 = ag.HyperGalaxy(
contribution_factor=3.0, noise_factor=1.0, noise_power=2.0
)
hyper_model_image = ag.Array2D.manual_native(
array=[[0.5, 1.0, 1.5]], pixel_scales=1.0
)
hyper_galaxy_image_0 = ag.Array2D.manual_native(
array=[[0.0, 1.0, 1.5]], pixel_scales=1.0
)
hyper_galaxy_image_1 = ag.Array2D.manual_native(
array=[[1.0, 1.0, 1.5]], pixel_scales=1.0
)
galaxy_0 = ag.Galaxy(
redshift=0.5,
hyper_galaxy=hyper_galaxy_0,
hyper_model_image=hyper_model_image,
hyper_galaxy_image=hyper_galaxy_image_0,
)
galaxy_1 = ag.Galaxy(
redshift=0.5,
hyper_galaxy=hyper_galaxy_1,
hyper_model_image=hyper_model_image,
hyper_galaxy_image=hyper_galaxy_image_1,
)
plane = ag.Plane(redshift=0.5, galaxies=[galaxy_0, galaxy_1])
hyper_noise_maps = plane.hyper_noise_maps_of_galaxies_from_noise_map(
noise_map=noise_map
)
assert (hyper_noise_maps[0].slim == np.array([0.0, 2.0, 3.0])).all()
assert hyper_noise_maps[1].slim == pytest.approx(
np.array([0.73468, (2.0 * 0.75) ** 2.0, 3.0 ** 2.0]), 1.0e-4
)
def test__hyper_noise_maps_are_same_as_hyper_galaxy_calculation(self):
noise_map = ag.Array2D.manual_native(
array=[[5.0, 3.0, 1.0]], pixel_scales=1.0
)
hyper_model_image = ag.Array2D.manual_native(
array=[[2.0, 4.0, 10.0]], pixel_scales=1.0
)
hyper_galaxy_image = ag.Array2D.manual_native(
array=[[1.0, 5.0, 8.0]], pixel_scales=1.0
)
hyper_galaxy_0 = ag.HyperGalaxy(contribution_factor=5.0)
hyper_galaxy_1 = ag.HyperGalaxy(contribution_factor=10.0)
contribution_map_0 = hyper_galaxy_0.contribution_map_from_hyper_images(
hyper_model_image=hyper_model_image,
hyper_galaxy_image=hyper_galaxy_image,
)
contribution_map_1 = hyper_galaxy_1.contribution_map_from_hyper_images(
hyper_model_image=hyper_model_image,
hyper_galaxy_image=hyper_galaxy_image,
)
hyper_noise_map_0 = hyper_galaxy_0.hyper_noise_map_from_contribution_map(
noise_map=noise_map, contribution_map=contribution_map_0
)
hyper_noise_map_1 = hyper_galaxy_1.hyper_noise_map_from_contribution_map(
noise_map=noise_map, contribution_map=contribution_map_1
)
galaxy_0 = ag.Galaxy(
redshift=0.5,
hyper_galaxy=hyper_galaxy_0,
hyper_model_image=hyper_model_image,
hyper_galaxy_image=hyper_galaxy_image,
)
galaxy_1 = ag.Galaxy(
redshift=0.5,
hyper_galaxy=hyper_galaxy_1,
hyper_model_image=hyper_model_image,
hyper_galaxy_image=hyper_galaxy_image,
)
plane = ag.Plane(redshift=0.5, galaxies=[galaxy_0])
hyper_noise_maps = plane.hyper_noise_maps_of_galaxies_from_noise_map(
noise_map=noise_map
)
assert (hyper_noise_maps[0].slim == hyper_noise_map_0).all()
plane = ag.Plane(redshift=0.5, galaxies=[galaxy_1])
hyper_noise_maps = plane.hyper_noise_maps_of_galaxies_from_noise_map(
noise_map=noise_map
)
assert (hyper_noise_maps[0].slim == hyper_noise_map_1).all()
plane = ag.Plane(redshift=0.5, galaxies=[galaxy_1, galaxy_0])
hyper_noise_maps = plane.hyper_noise_maps_of_galaxies_from_noise_map(
noise_map=noise_map
)
assert (hyper_noise_maps[0].slim == hyper_noise_map_1).all()
assert (hyper_noise_maps[1].slim == hyper_noise_map_0).all()
def test__hyper_noise_maps_are_none_for_galaxy_without_hyper_galaxy(self):
noise_map = ag.Array2D.manual_native(
array=[[5.0, 3.0, 1.0]], pixel_scales=1.0
)
hyper_model_image = ag.Array2D.manual_native(
array=[[2.0, 4.0, 10.0]], pixel_scales=1.0
)
hyper_galaxy_image = ag.Array2D.manual_native(
array=[[1.0, 5.0, 8.0]], pixel_scales=1.0
)
hyper_galaxy_0 = ag.HyperGalaxy(contribution_factor=5.0)
hyper_galaxy_1 = ag.HyperGalaxy(contribution_factor=10.0)
contribution_map_0 = hyper_galaxy_0.contribution_map_from_hyper_images(
hyper_model_image=hyper_model_image,
hyper_galaxy_image=hyper_galaxy_image,
)
contribution_map_1 = hyper_galaxy_1.contribution_map_from_hyper_images(
hyper_model_image=hyper_model_image,
hyper_galaxy_image=hyper_galaxy_image,
)
hyper_noise_map_0 = hyper_galaxy_0.hyper_noise_map_from_contribution_map(
noise_map=noise_map, contribution_map=contribution_map_0
)
hyper_noise_map_1 = hyper_galaxy_1.hyper_noise_map_from_contribution_map(
noise_map=noise_map, contribution_map=contribution_map_1
)
galaxy_0 = ag.Galaxy(
redshift=0.5,
hyper_galaxy=hyper_galaxy_0,
hyper_model_image=hyper_model_image,
hyper_galaxy_image=hyper_galaxy_image,
)
galaxy_1 = ag.Galaxy(
redshift=0.5,
hyper_galaxy=hyper_galaxy_1,
hyper_model_image=hyper_model_image,
hyper_galaxy_image=hyper_galaxy_image,
)
plane = ag.Plane(redshift=0.5, galaxies=[galaxy_0, ag.Galaxy(redshift=0.5)])
hyper_noise_maps = plane.hyper_noise_maps_of_galaxies_from_noise_map(
noise_map=noise_map
)
assert (hyper_noise_maps[0].slim == hyper_noise_map_0).all()
assert (hyper_noise_maps[1].slim == np.zeros(shape=(3, 1))).all()
plane = ag.Plane(redshift=0.5, galaxies=[ag.Galaxy(redshift=0.5), galaxy_1])
hyper_noise_maps = plane.hyper_noise_maps_of_galaxies_from_noise_map(
noise_map=noise_map
)
assert (hyper_noise_maps[0].slim == np.zeros(shape=(3, 1))).all()
assert (hyper_noise_maps[1].slim == hyper_noise_map_1).all()
plane = ag.Plane(
redshift=0.5,
galaxies=[
ag.Galaxy(redshift=0.5),
galaxy_1,
galaxy_0,
ag.Galaxy(redshift=0.5),
],
)
hyper_noise_maps = plane.hyper_noise_maps_of_galaxies_from_noise_map(
noise_map=noise_map
)
assert (hyper_noise_maps[0].slim == np.zeros(shape=(3, 1))).all()
assert (hyper_noise_maps[1].slim == hyper_noise_map_1).all()
assert (hyper_noise_maps[2].slim == hyper_noise_map_0).all()
assert (hyper_noise_maps[3].slim == np.zeros(shape=(3, 1))).all()
def test__hyper_noise_map_from_noise_map__is_sum_of_galaxy_hyper_noise_maps__filters_nones(
self,
):
noise_map = ag.Array2D.manual_native(
array=[[5.0, 3.0, 1.0]], pixel_scales=1.0
)
hyper_model_image = ag.Array2D.manual_native(
array=[[2.0, 4.0, 10.0]], pixel_scales=1.0
)
hyper_galaxy_image = ag.Array2D.manual_native(
array=[[1.0, 5.0, 8.0]], pixel_scales=1.0
)
hyper_galaxy_0 = ag.HyperGalaxy(contribution_factor=5.0)
hyper_galaxy_1 = ag.HyperGalaxy(contribution_factor=10.0)
contribution_map_0 = hyper_galaxy_0.contribution_map_from_hyper_images(
hyper_model_image=hyper_model_image,
hyper_galaxy_image=hyper_galaxy_image,
)
contribution_map_1 = hyper_galaxy_1.contribution_map_from_hyper_images(
hyper_model_image=hyper_model_image,
hyper_galaxy_image=hyper_galaxy_image,
)
hyper_noise_map_0 = hyper_galaxy_0.hyper_noise_map_from_contribution_map(
noise_map=noise_map, contribution_map=contribution_map_0
)
hyper_noise_map_1 = hyper_galaxy_1.hyper_noise_map_from_contribution_map(
noise_map=noise_map, contribution_map=contribution_map_1
)
galaxy_0 = ag.Galaxy(
redshift=0.5,
hyper_galaxy=hyper_galaxy_0,
hyper_model_image=hyper_model_image,
hyper_galaxy_image=hyper_galaxy_image,
)
galaxy_1 = ag.Galaxy(
redshift=0.5,
hyper_galaxy=hyper_galaxy_1,
hyper_model_image=hyper_model_image,
hyper_galaxy_image=hyper_galaxy_image,
)
plane = ag.Plane(redshift=0.5, galaxies=[galaxy_0])
hyper_noise_map = plane.hyper_noise_map_from_noise_map(noise_map=noise_map)
assert (hyper_noise_map.slim == hyper_noise_map_0).all()
plane = ag.Plane(redshift=0.5, galaxies=[galaxy_1])
hyper_noise_map = plane.hyper_noise_map_from_noise_map(noise_map=noise_map)
assert (hyper_noise_map.slim == hyper_noise_map_1).all()
plane = ag.Plane(redshift=0.5, galaxies=[galaxy_1, galaxy_0])
hyper_noise_map = plane.hyper_noise_map_from_noise_map(noise_map=noise_map)
assert (hyper_noise_map.slim == hyper_noise_map_0 + hyper_noise_map_1).all()
plane = ag.Plane(
redshift=0.5,
galaxies=[
ag.Galaxy(redshift=0.5),
galaxy_1,
galaxy_0,
ag.Galaxy(redshift=0.5),
],
)
hyper_noise_map = plane.hyper_noise_map_from_noise_map(noise_map=noise_map)
assert (hyper_noise_map.slim == hyper_noise_map_0 + hyper_noise_map_1).all()
def test__plane_has_no_hyper_galaxies__hyper_noise_map_function_returns_none(
self,
):
noise_map = ag.Array2D.manual_native(
array=[[5.0, 3.0, 1.0]], pixel_scales=1.0
)
plane = ag.Plane(redshift=0.5, galaxies=[ag.Galaxy(redshift=0.5)])
hyper_noise_map = plane.hyper_noise_map_from_noise_map(noise_map=noise_map)
assert (hyper_noise_map == np.zeros((3, 1))).all()
class TestPlane:
class TestTracedGrid:
def test__traced_grid_same_as_manual_deflections_calc_via_galaxy___use_multiple_galaxies(
self, sub_grid_2d_7x7
):
# Overwrite one value so intensity in each pixel is different
sub_grid_2d_7x7[5] = np.array([2.0, 2.0])
g0 = ag.Galaxy(
redshift=0.5, mass_profile=ag.mp.SphIsothermal(einstein_radius=1.0)
)
g1 = ag.Galaxy(
redshift=0.5, mass_profile=ag.mp.SphIsothermal(einstein_radius=2.0)
)
g0_deflections = g0.deflections_2d_from_grid(grid=sub_grid_2d_7x7)
g1_deflections = g1.deflections_2d_from_grid(grid=sub_grid_2d_7x7)
traced_grid = sub_grid_2d_7x7 - (g0_deflections + g1_deflections)
plane = ag.Plane(galaxies=[g0, g1], redshift=None)
plane_traced_grid = plane.traced_grid_from_grid(grid=sub_grid_2d_7x7)
assert plane_traced_grid == pytest.approx(traced_grid, 1.0e-4)
def test__traced_grid_numerics__uses_deflections__x2_sis_galaxies(
self, sub_grid_2d_7x7_simple, gal_x1_mp
):
plane = ag.Plane(galaxies=[gal_x1_mp, gal_x1_mp], redshift=None)
traced_grid = plane.traced_grid_from_grid(grid=sub_grid_2d_7x7_simple)
assert traced_grid[0] == pytest.approx(
np.array([1.0 - 2.0 * 0.707, 1.0 - 2.0 * 0.707]), 1e-3
)
assert traced_grid[1] == pytest.approx(np.array([-1.0, 0.0]), 1e-3)
assert traced_grid[2] == pytest.approx(
np.array([1.0 - 2.0 * 0.707, 1.0 - 2.0 * 0.707]), 1e-3
)
assert traced_grid[3] == pytest.approx(np.array([-1.0, 0.0]), 1e-3)
def test__traced_grid__grid_is_positions__uses_deflections__x2_sis_galaxies(
self, gal_x1_mp
):
positions = ag.Grid2DIrregular(grid=[(1.0, 1.0), (1.0, 0.0)])
plane = ag.Plane(galaxies=[gal_x1_mp, gal_x1_mp], redshift=None)
traced_grid = plane.traced_grid_from_grid(grid=positions)
assert traced_grid.in_list[0] == pytest.approx(
(1.0 - 2.0 * 0.707, 1.0 - 2.0 * 0.707), 1e-3
)
assert traced_grid.in_list[1] == pytest.approx((-1.0, 0.0), 1e-3)
def test__plane_has_no_galaxies__traced_grid_is_input_grid_of_sub_grid_2d_7x7(
self, sub_grid_2d_7x7
):
plane = ag.Plane(galaxies=[], redshift=1.0)
traced_grid = plane.traced_grid_from_grid(grid=sub_grid_2d_7x7)
assert (traced_grid == sub_grid_2d_7x7).all()
class TestGalaxies:
def test__no_galaxies__raises_exception_if_no_plane_redshift_input(self):
plane = ag.Plane(galaxies=[], redshift=0.5)
assert plane.redshift == 0.5
with pytest.raises(exc.PlaneException):
ag.Plane(galaxies=[])
def test__galaxy_redshifts_gives_list_of_redshifts(self):
g0 = ag.Galaxy(redshift=1.0)
g1 = ag.Galaxy(redshift=1.0)
g2 = ag.Galaxy(redshift=1.0)
plane = ag.Plane(galaxies=[g0, g1, g2])
assert plane.redshift == 1.0
assert plane.galaxy_redshifts == [1.0, 1.0, 1.0]
class TestExtractAttribute:
def test__extract_attribute(self):
g0 = ag.Galaxy(
redshift=0.5, mp_0=mock.MockMassProfile(value=0.9, value1=(1.0, 1.0))
)
g1 = ag.Galaxy(
redshift=0.5, mp_0=mock.MockMassProfile(value=0.8, value1=(2.0, 2.0))
)
g2 = ag.Galaxy(
redshift=0.5,
mp_0=mock.MockMassProfile(value=0.7),
mp_1=mock.MockMassProfile(value=0.6),
)
plane = ag.Plane(galaxies=[ag.Galaxy(redshift=0.5)], redshift=None)
values = plane.extract_attribute(cls=ag.mp.MassProfile, attr_name="value")
assert values == None
plane = ag.Plane(galaxies=[g0, g1], redshift=None)
values = plane.extract_attribute(cls=ag.mp.MassProfile, attr_name="value1")
assert values.in_list == [(1.0, 1.0), (2.0, 2.0)]
plane = ag.Plane(
galaxies=[g0, ag.Galaxy(redshift=0.5), g1, ag.Galaxy(redshift=0.5), g2],
redshift=None,
)
values = plane.extract_attribute(cls=ag.mp.MassProfile, attr_name="value")
assert values.in_list == [0.9, 0.8, 0.7, 0.6]
plane.extract_attribute(cls=ag.mp.MassProfile, attr_name="incorrect_value")
def test__extract_attributes_of_galaxies(self):
g0 = ag.Galaxy(
redshift=0.5, mp_0=mock.MockMassProfile(value=0.9, value1=(1.0, 1.0))
)
g1 = ag.Galaxy(
redshift=0.5, mp_0=mock.MockMassProfile(value=0.8, value1=(2.0, 2.0))
)
g2 = ag.Galaxy(
redshift=0.5,
mp_0=mock.MockMassProfile(value=0.7),
mp_1=mock.MockMassProfile(value=0.6),
)
plane = ag.Plane(galaxies=[ag.Galaxy(redshift=0.5)], redshift=None)
values = plane.extract_attributes_of_galaxies(
cls=ag.mp.MassProfile, attr_name="value"
)
assert values == [None]
plane = ag.Plane(galaxies=[g0, g1], redshift=None)
values = plane.extract_attributes_of_galaxies(
cls=ag.mp.MassProfile, attr_name="value1"
)
assert values[0].in_list == [(1.0, 1.0)]
assert values[1].in_list == [(2.0, 2.0)]
plane = ag.Plane(
galaxies=[g0, ag.Galaxy(redshift=0.5), g1, ag.Galaxy(redshift=0.5), g2],
redshift=None,
)
values = plane.extract_attributes_of_galaxies(
cls=ag.mp.MassProfile, attr_name="value", filter_nones=False
)
assert values[0].in_list == [0.9]
assert values[1] == None
assert values[2].in_list == [0.8]
assert values[3] == None
assert values[4].in_list == [0.7, 0.6]
values = plane.extract_attributes_of_galaxies(
cls=ag.mp.MassProfile, attr_name="value", filter_nones=True
)
assert values[0].in_list == [0.9]
assert values[1].in_list == [0.8]
assert values[2].in_list == [0.7, 0.6]
plane.extract_attributes_of_galaxies(
cls=ag.mp.MassProfile, attr_name="incorrect_value", filter_nones=True
)
class TestDecorators:
def test__grid_iterate_in__iterates_grid_correctly(self, gal_x1_lp):
mask = ag.Mask2D.manual(
mask=[
[True, True, True, True, True],
[True, False, False, False, True],
[True, False, False, False, True],
[True, False, False, False, True],
[True, True, True, True, True],
],
pixel_scales=(1.0, 1.0),
origin=(0.001, 0.001),
)
grid = ag.Grid2DIterate.from_mask(
mask=mask, fractional_accuracy=1.0, sub_steps=[2]
)
plane = ag.Plane(galaxies=[gal_x1_lp], redshift=None)
image = plane.image_2d_from_grid(grid=grid)
mask_sub_2 = mask.mask_new_sub_size_from(mask=mask, sub_size=2)
grid_sub_2 = ag.Grid2D.from_mask(mask=mask_sub_2)
image_sub_2 = plane.image_2d_from_grid(grid=grid_sub_2).binned
assert (image == image_sub_2).all()
grid = ag.Grid2DIterate.from_mask(
mask=mask, fractional_accuracy=0.95, sub_steps=[2, 4, 8]
)
galaxy = ag.Galaxy(
redshift=0.5, light=ag.lp.EllSersic(centre=(0.08, 0.08), intensity=1.0)
)
plane = ag.Plane(galaxies=[galaxy])
image = plane.image_2d_from_grid(grid=grid)
mask_sub_4 = mask.mask_new_sub_size_from(mask=mask, sub_size=4)
grid_sub_4 = ag.Grid2D.from_mask(mask=mask_sub_4)
image_sub_4 = plane.image_2d_from_grid(grid=grid_sub_4).binned
assert image[0] == image_sub_4[0]
mask_sub_8 = mask.mask_new_sub_size_from(mask=mask, sub_size=8)
grid_sub_8 = ag.Grid2D.from_mask(mask=mask_sub_8)
image_sub_8 = plane.image_2d_from_grid(grid=grid_sub_8).binned
assert image[4] == image_sub_8[4]
def test__grid_iterate_in__iterates_grid_result_correctly(self, gal_x1_mp):
mask = ag.Mask2D.manual(
mask=[
[True, True, True, True, True],
[True, False, False, False, True],
[True, False, False, False, True],
[True, False, False, False, True],
[True, True, True, True, True],
],
pixel_scales=(1.0, 1.0),
)
grid = ag.Grid2DIterate.from_mask(
mask=mask, fractional_accuracy=1.0, sub_steps=[2]
)
galaxy = ag.Galaxy(
redshift=0.5,
mass=ag.mp.EllIsothermal(centre=(0.08, 0.08), einstein_radius=1.0),
)
plane = ag.Plane(galaxies=[galaxy], redshift=None)
deflections = plane.deflections_2d_from_grid(grid=grid)
mask_sub_2 = mask.mask_new_sub_size_from(mask=mask, sub_size=2)
grid_sub_2 = ag.Grid2D.from_mask(mask=mask_sub_2)
deflections_sub_2 = galaxy.deflections_2d_from_grid(grid=grid_sub_2).binned
assert (deflections == deflections_sub_2).all()
grid = ag.Grid2DIterate.from_mask(
mask=mask, fractional_accuracy=0.99, sub_steps=[2, 4, 8]
)
galaxy = ag.Galaxy(
redshift=0.5,
mass=ag.mp.EllIsothermal(centre=(0.08, 0.08), einstein_radius=1.0),
)
plane = ag.Plane(galaxies=[galaxy], redshift=None)
deflections = plane.deflections_2d_from_grid(grid=grid)
mask_sub_4 = mask.mask_new_sub_size_from(mask=mask, sub_size=4)
grid_sub_4 = ag.Grid2D.from_mask(mask=mask_sub_4)
deflections_sub_4 = galaxy.deflections_2d_from_grid(grid=grid_sub_4).binned
assert deflections[0, 0] == deflections_sub_4[0, 0]
mask_sub_8 = mask.mask_new_sub_size_from(mask=mask, sub_size=8)
grid_sub_8 = ag.Grid2D.from_mask(mask=mask_sub_8)
deflections_sub_8 = galaxy.deflections_2d_from_grid(grid=grid_sub_8).binned
assert deflections[4, 0] == deflections_sub_8[4, 0]
def test__grid_interp_in__interps_based_on_intepolate_config(self):
# `False` in interpolate.ini
mask = ag.Mask2D.manual(
mask=[
[True, True, True, True, True],
[True, False, False, False, True],
[True, False, False, False, True],
[True, False, False, False, True],
[True, True, True, True, True],
],
pixel_scales=(1.0, 1.0),
)
grid = ag.Grid2D.from_mask(mask=mask)
grid_interp = ag.Grid2DInterpolate.from_mask(mask=mask, pixel_scales_interp=0.1)
light_profile = ag.lp.EllSersic(intensity=1.0)
light_profile_interp = ag.lp.SphSersic(intensity=1.0)
image_no_interp = light_profile.image_2d_from_grid(grid=grid)
array_interp = light_profile.image_2d_from_grid(grid=grid_interp.grid_interp)
image_interp = grid_interp.interpolated_array_from_array_interp(
array_interp=array_interp
)
galaxy = ag.Galaxy(
redshift=0.5, light=light_profile_interp, light_0=light_profile
)
plane = ag.Plane(galaxies=[galaxy])
image = plane.image_2d_from_grid(grid=grid_interp)
assert (image == image_no_interp + image_interp).all()
mass_profile = ag.mp.EllIsothermal(einstein_radius=1.0)
mass_profile_interp = ag.mp.SphIsothermal(einstein_radius=1.0)
convergence_no_interp = mass_profile.convergence_2d_from_grid(grid=grid)
array_interp = mass_profile_interp.convergence_2d_from_grid(
grid=grid_interp.grid_interp
)
convergence_interp = grid_interp.interpolated_array_from_array_interp(
array_interp=array_interp
)
galaxy = ag.Galaxy(redshift=0.5, mass=mass_profile_interp, mass_0=mass_profile)
plane = ag.Plane(galaxies=[galaxy])
convergence = plane.convergence_2d_from_grid(grid=grid_interp)
assert (convergence == convergence_no_interp + convergence_interp).all()
potential_no_interp = mass_profile.potential_2d_from_grid(grid=grid)
array_interp = mass_profile_interp.potential_2d_from_grid(
grid=grid_interp.grid_interp
)
potential_interp = grid_interp.interpolated_array_from_array_interp(
array_interp=array_interp
)
galaxy = ag.Galaxy(redshift=0.5, mass=mass_profile_interp, mass_0=mass_profile)
plane = ag.Plane(galaxies=[galaxy])
potential = plane.potential_2d_from_grid(grid=grid_interp)
assert (potential == potential_no_interp + potential_interp).all()
deflections_no_interp = mass_profile.deflections_2d_from_grid(grid=grid)
grid_interp_0 = mass_profile_interp.deflections_2d_from_grid(
grid=grid_interp.grid_interp
)
deflections_interp = grid_interp.interpolated_grid_from_grid_interp(
grid_interp=grid_interp_0
)
galaxy = ag.Galaxy(redshift=0.5, mass=mass_profile_interp, mass_0=mass_profile)
plane = ag.Plane(galaxies=[galaxy])
deflections = plane.deflections_2d_from_grid(grid=grid_interp)
assert (deflections == deflections_no_interp + deflections_interp).all()
class TestRegression:
def test__centre_of_profile_in_right_place(self):
grid = ag.Grid2D.uniform(shape_native=(7, 7), pixel_scales=1.0)
galaxy = ag.Galaxy(
redshift=0.5,
mass=ag.mp.EllIsothermal(centre=(2.0, 1.0), einstein_radius=1.0),
mass_0=ag.mp.EllIsothermal(centre=(2.0, 1.0), einstein_radius=1.0),
)
plane = ag.Plane(galaxies=[galaxy])
convergence = plane.convergence_2d_from_grid(grid=grid)
max_indexes = np.unravel_index(
convergence.native.argmax(), convergence.shape_native
)
assert max_indexes == (1, 4)
potential = plane.potential_2d_from_grid(grid=grid)
max_indexes = np.unravel_index(
potential.native.argmin(), potential.shape_native
)
assert max_indexes == (1, 4)
deflections = plane.deflections_2d_from_grid(grid=grid)
assert deflections.native[1, 4, 0] > 0
assert deflections.native[2, 4, 0] < 0
assert deflections.native[1, 4, 1] > 0
assert deflections.native[1, 3, 1] < 0
galaxy = ag.Galaxy(
redshift=0.5,
mass=ag.mp.SphIsothermal(centre=(2.0, 1.0), einstein_radius=1.0),
mass_0=ag.mp.SphIsothermal(centre=(2.0, 1.0), einstein_radius=1.0),
)
plane = ag.Plane(galaxies=[galaxy])
convergence = plane.convergence_2d_from_grid(grid=grid)
max_indexes = np.unravel_index(
convergence.native.argmax(), convergence.shape_native
)
assert max_indexes == (1, 4)
potential = plane.potential_2d_from_grid(grid=grid)
max_indexes = np.unravel_index(
potential.native.argmin(), potential.shape_native
)
assert max_indexes == (1, 4)
deflections = plane.deflections_2d_from_grid(grid=grid)
assert deflections.native[1, 4, 0] > 0
assert deflections.native[2, 4, 0] < 0
assert deflections.native[1, 4, 1] > 0
assert deflections.native[1, 3, 1] < 0
grid = ag.Grid2DIterate.uniform(
shape_native=(7, 7),
pixel_scales=1.0,
fractional_accuracy=0.99,
sub_steps=[2, 4],
)
galaxy = ag.Galaxy(
redshift=0.5,
mass=ag.mp.EllIsothermal(centre=(2.0, 1.0), einstein_radius=1.0),
mass_0=ag.mp.EllIsothermal(centre=(2.0, 1.0), einstein_radius=1.0),
)
plane = ag.Plane(galaxies=[galaxy])
convergence = plane.convergence_2d_from_grid(grid=grid)
max_indexes = np.unravel_index(
convergence.native.argmax(), convergence.shape_native
)
assert max_indexes == (1, 4)
potential = plane.potential_2d_from_grid(grid=grid)
max_indexes = np.unravel_index(
potential.native.argmin(), potential.shape_native
)
assert max_indexes == (1, 4)
deflections = plane.deflections_2d_from_grid(grid=grid)
assert deflections.native[1, 4, 0] >= 0
assert deflections.native[2, 4, 0] <= 0
assert deflections.native[1, 4, 1] >= 0
assert deflections.native[1, 3, 1] <= 0
galaxy = ag.Galaxy(
redshift=0.5,
mass=ag.mp.SphIsothermal(centre=(2.0, 1.0), einstein_radius=1.0),
)
plane = ag.Plane(galaxies=[galaxy])
convergence = plane.convergence_2d_from_grid(grid=grid)
max_indexes = np.unravel_index(
convergence.native.argmax(), convergence.shape_native
)
assert max_indexes == (1, 4)
potential = plane.potential_2d_from_grid(grid=grid)
max_indexes = np.unravel_index(
potential.native.argmin(), potential.shape_native
)
assert max_indexes == (1, 4)
deflections = plane.deflections_2d_from_grid(grid=grid)
assert deflections.native[1, 4, 0] >= 0
assert deflections.native[2, 4, 0] <= 0
assert deflections.native[1, 4, 1] >= 0
assert deflections.native[1, 3, 1] <= 0
```
#### File: plot/mat_wrap/test_lensing_mat_obj.py
```python
from os import path
import autogalaxy.plot as aplt
class TestLensingMatObj:
def test__all_load_from_config_correctly(self):
light_profile_centres_scatter = aplt.LightProfileCentresScatter()
assert light_profile_centres_scatter.config_dict["s"] == 1
mass_profile_centres_scatter = aplt.MassProfileCentresScatter()
assert mass_profile_centres_scatter.config_dict["s"] == 2
multiple_images_scatter = aplt.MultipleImagesScatter()
assert multiple_images_scatter.config_dict["s"] == 3
critical_curves_plot = aplt.CriticalCurvesPlot()
assert critical_curves_plot.config_dict["width"] == 4
caustics_plot = aplt.CausticsPlot()
assert caustics_plot.config_dict["width"] == 5
``` |
{
"source": "jonathanfrawley/PyAutoLens_copy",
"score": 2
} |
#### File: autolens/mock/fixtures.py
```python
import autolens as al
from autogalaxy.mock.fixtures import *
from autofit.mock.mock import MockSearch
from autolens.mock.mock import MockPositionsSolver
def make_masked_imaging_7x7():
return al.MaskedImaging(
imaging=make_imaging_7x7(),
mask=make_sub_mask_7x7(),
settings=al.SettingsMaskedImaging(sub_size=1),
)
def make_masked_interferometer_7():
return al.MaskedInterferometer(
interferometer=make_interferometer_7(),
visibilities_mask=make_visibilities_mask_7(),
real_space_mask=make_mask_7x7(),
settings=al.SettingsMaskedInterferometer(
sub_size=1, transformer_class=al.TransformerNUFFT
),
)
def make_masked_interferometer_7_grid():
return al.MaskedInterferometer(
interferometer=make_interferometer_7(),
visibilities_mask=make_visibilities_mask_7(),
real_space_mask=make_mask_7x7(),
settings=al.SettingsMaskedInterferometer(
grid_class=al.Grid2D, sub_size=1, transformer_class=aa.TransformerDFT
),
)
def make_positions_x2():
return al.Grid2DIrregular(grid=[(1.0, 1.0), (2.0, 2.0)])
def make_positions_noise_map_x2():
return al.ValuesIrregular(values=[1.0, 1.0])
def make_fluxes_x2():
return al.ValuesIrregular(values=[1.0, 2.0])
def make_fluxes_noise_map_x2():
return al.ValuesIrregular(values=[1.0, 1.0])
def make_tracer_x1_plane_7x7():
return al.Tracer.from_galaxies(galaxies=[make_gal_x1_lp()])
def make_tracer_x2_plane_7x7():
source_gal_x1_lp = al.Galaxy(redshift=1.0, light_profile_0=make_lp_0())
return al.Tracer.from_galaxies(
galaxies=[make_gal_x1_mp(), make_gal_x1_lp(), source_gal_x1_lp]
)
def make_tracer_x2_plane_inversion_7x7():
source_gal_inversion = al.Galaxy(
redshift=1.0,
pixelization=al.pix.Rectangular(),
regularization=al.reg.Constant(),
)
return al.Tracer.from_galaxies(
galaxies=[make_gal_x1_mp(), make_gal_x1_lp(), source_gal_inversion]
)
def make_masked_imaging_fit_x1_plane_7x7():
return al.FitImaging(
masked_imaging=make_masked_imaging_7x7(), tracer=make_tracer_x1_plane_7x7()
)
def make_masked_imaging_fit_x2_plane_7x7():
return al.FitImaging(
masked_imaging=make_masked_imaging_7x7(), tracer=make_tracer_x2_plane_7x7()
)
def make_masked_imaging_fit_x2_plane_inversion_7x7():
return al.FitImaging(
masked_imaging=make_masked_imaging_7x7(),
tracer=make_tracer_x2_plane_inversion_7x7(),
)
def make_masked_interferometer_fit_x1_plane_7x7():
return al.FitInterferometer(
masked_interferometer=make_masked_interferometer_7(),
tracer=make_tracer_x1_plane_7x7(),
)
def make_masked_interferometer_fit_x2_plane_7x7():
return al.FitInterferometer(
masked_interferometer=make_masked_interferometer_7(),
tracer=make_tracer_x2_plane_7x7(),
)
def make_masked_interferometer_fit_x2_plane_inversion_7x7():
return al.FitInterferometer(
masked_interferometer=make_masked_interferometer_7(),
tracer=make_tracer_x2_plane_inversion_7x7(),
)
def make_phase_imaging_7x7():
return al.PhaseImaging(search=MockSearch(name="test_phase"))
def make_phase_interferometer_7():
return al.PhaseInterferometer(
search=MockSearch(name="test_phase"), real_space_mask=make_mask_7x7()
)
def make_phase_positions_x2():
return al.PhasePointSource(
positions_solver=MockPositionsSolver(model_positions=make_positions_x2()),
search=MockSearch(name="test_phase"),
)
```
#### File: autolens/mock/mock.py
```python
import autofit as af
from autofit.mock.mock import MockSearch, MockSamples
from autogalaxy.mock.mock import MockLightProfile, MockMassProfile
class MockResult(af.MockResult):
def __init__(
self,
samples=None,
instance=None,
model=None,
analysis=None,
search=None,
mask=None,
model_image=None,
max_log_likelihood_tracer=None,
max_log_likelihood_fit=None,
hyper_galaxy_image_path_dict=None,
hyper_model_image=None,
hyper_galaxy_visibilities_path_dict=None,
hyper_model_visibilities=None,
pixelization=None,
positions=None,
updated_positions=None,
updated_positions_threshold=None,
stochastic_log_evidences=None,
use_as_hyper_dataset=False,
):
super().__init__(
samples=samples,
instance=instance,
model=model,
analysis=analysis,
search=search,
)
self.previous_model = model
self.gaussian_tuples = None
self.mask = None
self.positions = None
self.mask = mask
self.hyper_galaxy_image_path_dict = hyper_galaxy_image_path_dict
self.hyper_model_image = hyper_model_image
self.hyper_galaxy_visibilities_path_dict = hyper_galaxy_visibilities_path_dict
self.hyper_model_visibilities = hyper_model_visibilities
self.model_image = model_image
self.unmasked_model_image = model_image
self.max_log_likelihood_tracer = max_log_likelihood_tracer
self.max_log_likelihood_fit = max_log_likelihood_fit
self.pixelization = pixelization
self.use_as_hyper_dataset = use_as_hyper_dataset
self.positions = positions
self.updated_positions = (
updated_positions if updated_positions is not None else []
)
self.updated_positions_threshold = updated_positions_threshold
self._stochastic_log_evidences = stochastic_log_evidences
def stochastic_log_evidences(self):
return self._stochastic_log_evidences
@property
def image_plane_multiple_image_positions_of_source_plane_centres(self):
return self.updated_positions
class MockResults(af.ResultsCollection):
def __init__(
self,
samples=None,
instance=None,
model=None,
analysis=None,
search=None,
mask=None,
model_image=None,
max_log_likelihood_tracer=None,
hyper_galaxy_image_path_dict=None,
hyper_model_image=None,
hyper_galaxy_visibilities_path_dict=None,
hyper_model_visibilities=None,
pixelization=None,
positions=None,
updated_positions=None,
updated_positions_threshold=None,
stochastic_log_evidences=None,
use_as_hyper_dataset=False,
):
"""
A collection of results from previous phases. Results can be obtained using an index or the name of the phase
from whence they came.
"""
super().__init__()
result = MockResult(
samples=samples,
instance=instance,
model=model,
analysis=analysis,
search=search,
mask=mask,
model_image=model_image,
max_log_likelihood_tracer=max_log_likelihood_tracer,
hyper_galaxy_image_path_dict=hyper_galaxy_image_path_dict,
hyper_model_image=hyper_model_image,
hyper_galaxy_visibilities_path_dict=hyper_galaxy_visibilities_path_dict,
hyper_model_visibilities=hyper_model_visibilities,
pixelization=pixelization,
positions=positions,
updated_positions=updated_positions,
updated_positions_threshold=updated_positions_threshold,
stochastic_log_evidences=stochastic_log_evidences,
use_as_hyper_dataset=use_as_hyper_dataset,
)
self.__result_list = [result]
@property
def last(self):
"""
The result of the last phase
"""
if len(self.__result_list) > 0:
return self.__result_list[-1]
return None
def __getitem__(self, item):
"""
Get the result of a previous phase by index
Parameters
----------
item: int
The index of the result
Returns
-------
result: Result
The result of a previous phase
"""
return self.__result_list[item]
def __len__(self):
return len(self.__result_list)
class MockFit:
def __init__(self, grid):
self.grid = grid
class MockTracer:
def __init__(
self,
traced_grid=None,
attribute=None,
magnification=None,
flux_hack=None,
einstein_radius=None,
einstein_mass=None,
):
self.positions = traced_grid
self.magnification = magnification
self.attribute = attribute
self.flux_hack = flux_hack
self.einstein_radius = einstein_radius
self.einstein_mass = einstein_mass
@property
def has_mass_profile(self):
return True
def extract_attribute(self, cls, name):
return [self.attribute]
def traced_grids_of_planes_from_grid(self, grid, plane_index_limit=None):
return [self.positions]
def magnification_via_hessian_from_grid(self, grid):
return self.magnification
def einstein_radius_from_grid(self, grid):
return self.einstein_radius
def einstein_mass_angular_from_grid(self, grid):
return self.einstein_mass
class MockPositionsSolver:
def __init__(self, model_positions):
self.model_positions = model_positions
def solve(self, lensing_obj, source_plane_coordinate):
return self.model_positions
```
#### File: phase/dataset/analysis.py
```python
import autofit as af
from autolens.lens import ray_tracing
from autolens.pipeline import visualizer as vis
from os import path
import pickle
from typing import List
import json
import numpy as np
class Analysis:
def plane_for_instance(self, instance):
raise NotImplementedError()
def tracer_for_instance(self, instance):
return ray_tracing.Tracer.from_galaxies(
galaxies=instance.galaxies, cosmology=self.cosmology
)
def stochastic_log_evidences_for_instance(self, instance) -> List[float]:
raise NotImplementedError()
def save_stochastic_outputs(self, paths: af.Paths, samples: af.OptimizerSamples):
stochastic_log_evidences_json_file = path.join(
paths.output_path, "stochastic_log_evidences.json"
)
stochastic_log_evidences_pickle_file = path.join(
paths.pickle_path, "stochastic_log_evidences.pickle"
)
try:
with open(stochastic_log_evidences_json_file, "r") as f:
stochastic_log_evidences = np.asarray(json.load(f))
except FileNotFoundError:
instance = samples.max_log_likelihood_instance
stochastic_log_evidences = self.stochastic_log_evidences_for_instance(
instance=instance
)
if stochastic_log_evidences is None:
return
with open(stochastic_log_evidences_json_file, "w") as outfile:
json.dump(
[float(evidence) for evidence in stochastic_log_evidences], outfile
)
with open(stochastic_log_evidences_pickle_file, "wb") as f:
pickle.dump(stochastic_log_evidences, f)
visualizer = vis.Visualizer(visualize_path=paths.image_path)
visualizer.visualize_stochastic_histogram(
log_evidences=stochastic_log_evidences,
max_log_evidence=np.max(samples.log_likelihoods),
histogram_bins=self.settings.settings_lens.stochastic_histogram_bins,
)
```
#### File: unit/aggregator/test_aggregator.py
```python
from os import path
import autofit as af
import autolens as al
import pytest
from autolens.mock import mock
directory = path.dirname(path.realpath(__file__))
@pytest.fixture(name="path")
def make_path():
return path.join("{}".format(path.dirname(path.realpath(__file__))), "files")
@pytest.fixture(name="samples")
def make_samples():
galaxy_0 = al.Galaxy(redshift=0.5, light=al.lp.EllipticalSersic(centre=(0.0, 1.0)))
galaxy_1 = al.Galaxy(redshift=1.0, light=al.lp.EllipticalSersic())
tracer = al.Tracer.from_galaxies(galaxies=[galaxy_0, galaxy_1])
return mock.MockSamples(max_log_likelihood_instance=tracer)
def test__tracer_generator_from_aggregator(imaging_7x7, mask_7x7, samples):
phase_imaging_7x7 = al.PhaseImaging(
galaxies=dict(
lens=al.GalaxyModel(redshift=0.5, light=al.lp.EllipticalSersic),
source=al.GalaxyModel(redshift=1.0, light=al.lp.EllipticalSersic),
),
search=mock.MockSearch("test_phase_aggregator", samples=samples),
)
phase_imaging_7x7.run(
dataset=imaging_7x7, mask=mask_7x7, results=mock.MockResults(samples=samples)
)
agg = af.Aggregator(directory=phase_imaging_7x7.paths.output_path)
tracer_gen = al.agg.Tracer(aggregator=agg)
for tracer in tracer_gen:
assert tracer.galaxies[0].redshift == 0.5
assert tracer.galaxies[0].light.centre == (0.0, 1.0)
assert tracer.galaxies[1].redshift == 1.0
def test__masked_imaging_generator_from_aggregator(imaging_7x7, mask_7x7, samples):
phase_imaging_7x7 = al.PhaseImaging(
galaxies=dict(
lens=al.GalaxyModel(redshift=0.5, light=al.lp.EllipticalSersic),
source=al.GalaxyModel(redshift=1.0, light=al.lp.EllipticalSersic),
),
settings=al.SettingsPhaseImaging(
settings_masked_imaging=al.SettingsMaskedImaging(
grid_class=al.Grid2DIterate,
grid_inversion_class=al.Grid2DInterpolate,
fractional_accuracy=0.5,
sub_steps=[2],
pixel_scales_interp=0.1,
)
),
search=mock.MockSearch("test_phase_aggregator", samples=samples),
)
phase_imaging_7x7.run(
dataset=imaging_7x7, mask=mask_7x7, results=mock.MockResults(samples=samples)
)
agg = af.Aggregator(directory=phase_imaging_7x7.paths.output_path)
masked_imaging_gen = al.agg.MaskedImaging(aggregator=agg)
for masked_imaging in masked_imaging_gen:
assert (masked_imaging.imaging.image == imaging_7x7.image).all()
assert isinstance(masked_imaging.grid, al.Grid2DIterate)
assert isinstance(masked_imaging.grid_inversion, al.Grid2DInterpolate)
assert masked_imaging.grid.sub_steps == [2]
assert masked_imaging.grid.fractional_accuracy == 0.5
assert masked_imaging.grid_inversion.pixel_scales_interp == (0.1, 0.1)
def test__fit_imaging_generator_from_aggregator(imaging_7x7, mask_7x7, samples):
phase_imaging_7x7 = al.PhaseImaging(
galaxies=dict(
lens=al.GalaxyModel(redshift=0.5, light=al.lp.EllipticalSersic),
source=al.GalaxyModel(redshift=1.0, light=al.lp.EllipticalSersic),
),
search=mock.MockSearch("test_phase_aggregator", samples=samples),
)
phase_imaging_7x7.run(
dataset=imaging_7x7, mask=mask_7x7, results=mock.MockResults(samples=samples)
)
agg = af.Aggregator(directory=phase_imaging_7x7.paths.output_path)
fit_imaging_gen = al.agg.FitImaging(aggregator=agg)
for fit_imaging in fit_imaging_gen:
assert (fit_imaging.masked_imaging.imaging.image == imaging_7x7.image).all()
def test__masked_interferometer_generator_from_aggregator(
interferometer_7, visibilities_mask_7, mask_7x7, samples
):
phase_interferometer_7x7 = al.PhaseInterferometer(
galaxies=dict(
lens=al.GalaxyModel(redshift=0.5, light=al.lp.EllipticalSersic),
source=al.GalaxyModel(redshift=1.0, light=al.lp.EllipticalSersic),
),
settings=al.SettingsPhaseInterferometer(
settings_masked_interferometer=al.SettingsMaskedInterferometer(
transformer_class=al.TransformerDFT,
grid_class=al.Grid2DIterate,
grid_inversion_class=al.Grid2DInterpolate,
fractional_accuracy=0.5,
sub_steps=[2],
pixel_scales_interp=0.1,
)
),
search=mock.MockSearch("test_phase_aggregator", samples=samples),
real_space_mask=mask_7x7,
)
phase_interferometer_7x7.run(
dataset=interferometer_7,
mask=visibilities_mask_7,
results=mock.MockResults(samples=samples),
)
agg = af.Aggregator(directory=phase_interferometer_7x7.paths.output_path)
masked_interferometer_gen = al.agg.MaskedInterferometer(aggregator=agg)
for masked_interferometer in masked_interferometer_gen:
assert (
masked_interferometer.interferometer.visibilities
== interferometer_7.visibilities
).all()
assert (masked_interferometer.real_space_mask == mask_7x7).all()
assert isinstance(masked_interferometer.grid, al.Grid2DIterate)
assert isinstance(masked_interferometer.grid_inversion, al.Grid2DInterpolate)
assert masked_interferometer.grid.sub_steps == [2]
assert masked_interferometer.grid.fractional_accuracy == 0.5
assert masked_interferometer.grid_inversion.pixel_scales_interp == (0.1, 0.1)
assert isinstance(masked_interferometer.transformer, al.TransformerDFT)
def test__fit_interferometer_generator_from_aggregator(
interferometer_7, visibilities_mask_7, mask_7x7, samples
):
phase_interferometer_7x7 = al.PhaseInterferometer(
galaxies=dict(
lens=al.GalaxyModel(redshift=0.5, light=al.lp.EllipticalSersic),
source=al.GalaxyModel(redshift=1.0, light=al.lp.EllipticalSersic),
),
search=mock.MockSearch("test_phase_aggregator", samples=samples),
real_space_mask=mask_7x7,
)
phase_interferometer_7x7.run(
dataset=interferometer_7,
mask=visibilities_mask_7,
results=mock.MockResults(samples=samples),
)
agg = af.Aggregator(directory=phase_interferometer_7x7.paths.output_path)
fit_interferometer_gen = al.agg.FitInterferometer(aggregator=agg)
for fit_interferometer in fit_interferometer_gen:
assert (
fit_interferometer.masked_interferometer.interferometer.visibilities
== interferometer_7.visibilities
).all()
assert (
fit_interferometer.masked_interferometer.real_space_mask == mask_7x7
).all()
class MockResult:
def __init__(self, log_likelihood):
self.log_likelihood = log_likelihood
self.log_evidence_values = log_likelihood
self.model = log_likelihood
class MockAggregator:
def __init__(self, grid_search_result):
self.grid_search_result = grid_search_result
@property
def grid_search_results(self):
return iter([self.grid_search_result])
def values(self, str):
return self.grid_search_results
# def test__results_array_from_results_file(path):
#
# results = [
# MockResult(log_likelihood=1.0),
# MockResult(log_likelihood=2.0),
# MockResult(log_likelihood=3.0),
# MockResult(log_likelihood=4.0),
# ]
#
# lower_limit_lists = [[0.0, 0.0], [0.0, 0.5], [0.5, 0.0], [0.5, 0.5]]
# physical_lower_limits_lists = [[-1.0, -1.0], [-1.0, 0.0], [0.0, -1.0], [0.0, 0.0]]
#
# grid_search_result = af.GridSearchResult(
# results=results,
# physical_lower_limits_lists=physical_lower_limits_lists,
# lower_limit_lists=lower_limit_lists,
# )
#
# aggregator = MockAggregator(grid_search_result=grid_search_result)
#
# array = al.agg.grid_search_result_as_array(aggregator=aggregator)
#
# assert array.native == pytest.approx(np.array([[3.0, 2.0], [1.0, 4.0]]), 1.0e4)
# assert array.pixel_scales == (1.0, 1.0)
# def test__results_array_from_real_grid_search_pickle(path):
#
# with open("{}/{}.pickle".format(path, "grid_search_result"), "rb") as f:
# grid_search_result = pickle.load(f)
#
# array = al.agg.grid_search_log_evidences_as_array_from_grid_search_result(
# grid_search_result=grid_search_result
# )
#
# print(array.native)
#
# array = al.agg.grid_search_subhalo_masses_as_array_from_grid_search_result(
# grid_search_result=grid_search_result
# )
#
# print(array.native)
#
# array = al.agg.grid_search_subhalo_centres_as_array_from_grid_search_result(
# grid_search_result=grid_search_result
# )
#
# print(array)
```
#### File: phase/dataset/test_phase_dataset.py
```python
from os import path
import numpy as np
import pytest
import autofit as af
from autofit.mapper.prior.prior import TuplePrior
import autolens as al
from autolens.mock import mock
pytestmark = pytest.mark.filterwarnings(
"ignore:Using a non-tuple sequence for multidimensional indexing is deprecated; use `arr[tuple(seq)]` instead of "
"`arr[seq]`. In the future this will be interpreted as an arrays index, `arr[np.arrays(seq)]`, which will result "
"either in an error or a different result."
)
directory = path.dirname(path.realpath(__file__))
class TestMakeAnalysis:
def test__positions_are_input__are_used_in_analysis(
self, image_7x7, noise_map_7x7, mask_7x7
):
# If position threshold is input (not None) and positions are input, make the positions part of the lens dataset.
imaging_7x7 = al.Imaging(
image=image_7x7,
noise_map=noise_map_7x7,
positions=al.Grid2DIrregular([(1.0, 1.0), (2.0, 2.0)]),
)
phase_imaging_7x7 = al.PhaseImaging(
search=mock.MockSearch("test_phase"),
settings=al.SettingsPhaseImaging(
settings_lens=al.SettingsLens(positions_threshold=0.2)
),
)
phase_imaging_7x7.modify_dataset(
dataset=imaging_7x7, results=mock.MockResults()
)
phase_imaging_7x7.modify_settings(
dataset=imaging_7x7, results=mock.MockResults()
)
analysis = phase_imaging_7x7.make_analysis(
dataset=imaging_7x7, mask=mask_7x7, results=mock.MockResults()
)
assert (
analysis.masked_dataset.positions.in_list[0] == np.array([1.0, 1.0])
).all()
assert (
analysis.masked_dataset.positions.in_list[1] == np.array([2.0, 2.0])
).all()
assert analysis.settings.settings_lens.positions_threshold == 0.2
# If position threshold is input (not None) and but no positions are supplied, raise an error
# with pytest.raises(exc.PhaseException):
# imaging_7x7 = al.Imaging(
# image=image_7x7, noise_map=noise_map_7x7, positions=None
# )
#
# phase_imaging_7x7 = al.PhaseImaging(
# search=mock.MockSearch("test_phase"),
# settings=al.SettingsPhaseImaging(
# settings_lens=al.SettingsLens(positions_threshold=0.2)
# ),
# )
#
# phase_imaging_7x7.modify_dataset(
# dataset=imaging_7x7, results=mock.MockResults()
# )
# phase_imaging_7x7.modify_settings(
# dataset=imaging_7x7, results=mock.MockResults()
# )
def test__auto_einstein_radius_is_used__einstein_radius_used_in_analysis(
self, imaging_7x7, mask_7x7
):
# If position threshold is input (not None) and positions are input, make the positions part of the lens dataset.
phase_imaging_7x7 = al.PhaseImaging(
search=mock.MockSearch("test_phase"),
settings=al.SettingsPhaseImaging(
settings_lens=al.SettingsLens(auto_einstein_radius_factor=None)
),
)
tracer = mock.MockTracer(einstein_radius=2.0)
phase_imaging_7x7.modify_settings(
dataset=imaging_7x7,
results=mock.MockResults(max_log_likelihood_tracer=tracer),
)
analysis = phase_imaging_7x7.make_analysis(
dataset=imaging_7x7, mask=mask_7x7, results=mock.MockResults()
)
assert analysis.settings.settings_lens.einstein_radius_estimate == None
phase_imaging_7x7 = al.PhaseImaging(
search=mock.MockSearch("test_phase"),
settings=al.SettingsPhaseImaging(
settings_lens=al.SettingsLens(auto_einstein_radius_factor=1.0)
),
)
tracer = mock.MockTracer(einstein_radius=2.0)
phase_imaging_7x7.modify_settings(
dataset=imaging_7x7,
results=mock.MockResults(max_log_likelihood_tracer=tracer),
)
analysis = phase_imaging_7x7.make_analysis(
dataset=imaging_7x7,
mask=mask_7x7,
results=mock.MockResults(max_log_likelihood_tracer=tracer),
)
assert analysis.settings.settings_lens.einstein_radius_estimate == 2.0
def test__use_border__determines_if_border_pixel_relocation_is_used(
self, imaging_7x7, mask_7x7
):
# noinspection PyTypeChecker
lens_galaxy = al.Galaxy(
redshift=0.5, mass=al.mp.SphericalIsothermal(einstein_radius=100.0)
)
source_galaxy = al.Galaxy(
redshift=1.0,
pixelization=al.pix.Rectangular(shape=(3, 3)),
regularization=al.reg.Constant(coefficient=1.0),
)
phase_imaging_7x7 = al.PhaseImaging(
galaxies=[lens_galaxy, source_galaxy],
settings=al.SettingsPhaseImaging(
settings_masked_imaging=al.SettingsMaskedImaging(
grid_inversion_class=al.Grid2D
),
settings_pixelization=al.SettingsPixelization(use_border=True),
),
search=mock.MockSearch("test_phase"),
)
analysis = phase_imaging_7x7.make_analysis(
dataset=imaging_7x7, mask=mask_7x7, results=mock.MockResults()
)
analysis.masked_dataset.grid_inversion[4] = np.array([[500.0, 0.0]])
instance = phase_imaging_7x7.model.instance_from_unit_vector([])
tracer = analysis.tracer_for_instance(instance=instance)
fit = analysis.masked_imaging_fit_for_tracer(
tracer=tracer, hyper_image_sky=None, hyper_background_noise=None
)
assert fit.inversion.mapper.source_grid_slim[4][0] == pytest.approx(
97.19584, 1.0e-2
)
assert fit.inversion.mapper.source_grid_slim[4][1] == pytest.approx(
-3.699999, 1.0e-2
)
phase_imaging_7x7 = al.PhaseImaging(
galaxies=[lens_galaxy, source_galaxy],
settings=al.SettingsPhaseImaging(
settings_masked_imaging=al.SettingsMaskedImaging(
grid_inversion_class=al.Grid2D
),
settings_pixelization=al.SettingsPixelization(use_border=False),
),
search=mock.MockSearch("test_phase"),
)
analysis = phase_imaging_7x7.make_analysis(
dataset=imaging_7x7, mask=mask_7x7, results=mock.MockResults()
)
analysis.masked_dataset.grid_inversion[4] = np.array([300.0, 0.0])
instance = phase_imaging_7x7.model.instance_from_unit_vector([])
tracer = analysis.tracer_for_instance(instance=instance)
fit = analysis.masked_imaging_fit_for_tracer(
tracer=tracer, hyper_image_sky=None, hyper_background_noise=None
)
assert fit.inversion.mapper.source_grid_slim[4][0] == pytest.approx(
200.0, 1.0e-4
)
class TestAutoPositions:
def test__updates_correct_using_factor(
self, imaging_7x7, image_7x7, noise_map_7x7, mask_7x7
):
tracer = al.Tracer.from_galaxies(
galaxies=[al.Galaxy(redshift=0.5), al.Galaxy(redshift=1.0)]
)
# Auto positioning is OFF, so use input positions + threshold.
imaging_7x7 = al.Imaging(
image=image_7x7,
noise_map=noise_map_7x7,
positions=al.Grid2DIrregular([(1.0, 1.0)]),
)
phase_imaging_7x7 = al.PhaseImaging(
search=mock.MockSearch("test_phase"),
settings=al.SettingsPhaseImaging(
settings_lens=al.SettingsLens(positions_threshold=0.1)
),
)
results = mock.MockResults(max_log_likelihood_tracer=tracer)
phase_imaging_7x7.modify_dataset(dataset=imaging_7x7, results=results)
analysis = phase_imaging_7x7.make_analysis(
dataset=imaging_7x7, mask=mask_7x7, results=results
)
assert analysis.masked_dataset.positions.in_list == [(1.0, 1.0)]
# Auto positioning is ON, but there are no previous results, so use input positions.
imaging_7x7 = al.Imaging(
image=image_7x7,
noise_map=noise_map_7x7,
positions=al.Grid2DIrregular([(1.0, 1.0)]),
)
phase_imaging_7x7 = al.PhaseImaging(
search=mock.MockSearch("test_phase"),
settings=al.SettingsPhaseImaging(
settings_lens=al.SettingsLens(
positions_threshold=0.2, auto_positions_factor=2.0
)
),
)
phase_imaging_7x7.modify_dataset(dataset=imaging_7x7, results=results)
analysis = phase_imaging_7x7.make_analysis(
dataset=imaging_7x7, mask=mask_7x7, results=results
)
assert analysis.masked_dataset.positions.in_list == [(1.0, 1.0)]
# Auto positioning is ON, there are previous results so use their new positions and threshold (which is
# multiplied by the auto_positions_factor). However, only one set of positions is computed from the previous
# result, to use input positions.
imaging_7x7 = al.Imaging(
image=image_7x7,
noise_map=noise_map_7x7,
positions=al.Grid2DIrregular([(1.0, 1.0)]),
)
phase_imaging_7x7 = al.PhaseImaging(
search=mock.MockSearch("test_phase"),
settings=al.SettingsPhaseImaging(
settings_lens=al.SettingsLens(
positions_threshold=0.2, auto_positions_factor=2.0
)
),
)
results = mock.MockResults(
max_log_likelihood_tracer=tracer,
updated_positions=al.Grid2DIrregular(grid=[(2.0, 2.0)]),
updated_positions_threshold=0.3,
)
phase_imaging_7x7.modify_dataset(dataset=imaging_7x7, results=results)
analysis = phase_imaging_7x7.make_analysis(
dataset=imaging_7x7, mask=mask_7x7, results=results
)
assert analysis.masked_dataset.positions.in_list == [(1.0, 1.0)]
# Auto positioning is ON, but the tracer only has a single plane and thus no lensing, so use input positions.
imaging_7x7 = al.Imaging(
image=image_7x7,
noise_map=noise_map_7x7,
positions=al.Grid2DIrregular([(1.0, 1.0)]),
)
phase_imaging_7x7 = al.PhaseImaging(
search=mock.MockSearch("test_phase"),
settings=al.SettingsPhaseImaging(
settings_lens=al.SettingsLens(
positions_threshold=0.2, auto_positions_factor=1.0
)
),
)
tracer_x1_plane = al.Tracer.from_galaxies(galaxies=[al.Galaxy(redshift=0.5)])
results = mock.MockResults(
max_log_likelihood_tracer=tracer_x1_plane,
updated_positions=al.Grid2DIrregular(grid=[(2.0, 2.0), (3.0, 3.0)]),
updated_positions_threshold=0.3,
)
phase_imaging_7x7.modify_dataset(dataset=imaging_7x7, results=results)
analysis = phase_imaging_7x7.make_analysis(
dataset=imaging_7x7, mask=mask_7x7, results=results
)
assert analysis.masked_dataset.positions.in_list == [(1.0, 1.0)]
# Auto positioning is ON, there are previous results so use their new positions and threshold (which is
# multiplied by the auto_positions_factor). Multiple positions are available so these are now used.
imaging_7x7 = al.Imaging(
image=image_7x7,
noise_map=noise_map_7x7,
positions=al.Grid2DIrregular([(1.0, 1.0)]),
)
phase_imaging_7x7 = al.PhaseImaging(
search=mock.MockSearch("test_phase"),
settings=al.SettingsPhaseImaging(
settings_lens=al.SettingsLens(
positions_threshold=0.2, auto_positions_factor=2.0
)
),
)
results = mock.MockResults(
max_log_likelihood_tracer=tracer,
updated_positions=al.Grid2DIrregular(grid=[(2.0, 2.0), (3.0, 3.0)]),
updated_positions_threshold=0.3,
)
phase_imaging_7x7.modify_dataset(dataset=imaging_7x7, results=results)
analysis = phase_imaging_7x7.make_analysis(
dataset=imaging_7x7, mask=mask_7x7, results=results
)
assert analysis.masked_dataset.positions.in_list == [(2.0, 2.0), (3.0, 3.0)]
# Auto positioning is Off, but there are previous results with updated positions relative to the input
# positions, so use those with their positions threshold.
imaging_7x7 = al.Imaging(
image=image_7x7,
noise_map=noise_map_7x7,
positions=al.Grid2DIrregular([(2.0, 2.0)]),
)
phase_imaging_7x7 = al.PhaseImaging(
search=mock.MockSearch("test_phase"),
settings=al.SettingsPhaseImaging(
settings_lens=al.SettingsLens(positions_threshold=0.1)
),
)
results = mock.MockResults(
max_log_likelihood_tracer=tracer,
positions=al.Grid2DIrregular(grid=[(3.0, 3.0), (4.0, 4.0)]),
updated_positions_threshold=0.3,
)
phase_imaging_7x7.modify_dataset(dataset=imaging_7x7, results=results)
analysis = phase_imaging_7x7.make_analysis(
dataset=imaging_7x7, mask=mask_7x7, results=results
)
assert analysis.masked_dataset.positions.in_list == [(3.0, 3.0), (4.0, 4.0)]
def test__uses_auto_update_factor(self, image_7x7, noise_map_7x7, mask_7x7):
tracer = al.Tracer.from_galaxies(
galaxies=[al.Galaxy(redshift=0.5), al.Galaxy(redshift=1.0)]
)
# Auto positioning is OFF, so use input positions + threshold.
imaging_7x7 = al.Imaging(
image=image_7x7,
noise_map=noise_map_7x7,
positions=al.Grid2DIrregular([(1.0, 1.0)]),
)
phase_imaging_7x7 = al.PhaseImaging(
search=mock.MockSearch("test_phase"),
settings=al.SettingsPhaseImaging(
settings_lens=al.SettingsLens(positions_threshold=0.1)
),
)
results = mock.MockResults(max_log_likelihood_tracer=tracer)
phase_imaging_7x7.modify_dataset(dataset=imaging_7x7, results=results)
phase_imaging_7x7.modify_settings(dataset=imaging_7x7, results=results)
analysis = phase_imaging_7x7.make_analysis(
dataset=imaging_7x7, mask=mask_7x7, results=results
)
assert analysis.settings.settings_lens.positions_threshold == 0.1
# Auto positioning is ON, but there are no previous results, so use separate of postiions x positions factor..
imaging_7x7 = al.Imaging(
image=image_7x7,
noise_map=noise_map_7x7,
positions=al.Grid2DIrregular([(1.0, 0.0), (-1.0, 0.0)]),
)
phase_imaging_7x7 = al.PhaseImaging(
search=mock.MockSearch("test_phase"),
settings=al.SettingsPhaseImaging(
settings_lens=al.SettingsLens(
positions_threshold=0.1, auto_positions_factor=1.0
)
),
)
results = mock.MockResults(max_log_likelihood_tracer=tracer)
phase_imaging_7x7.modify_dataset(dataset=imaging_7x7, results=results)
phase_imaging_7x7.modify_settings(dataset=imaging_7x7, results=results)
analysis = phase_imaging_7x7.make_analysis(
dataset=imaging_7x7, mask=mask_7x7, results=results
)
assert analysis.settings.settings_lens.positions_threshold == 2.0
# Auto position is ON, and same as above but with a factor of 3.0 which increases the threshold.
imaging_7x7 = al.Imaging(
image=image_7x7,
noise_map=noise_map_7x7,
positions=al.Grid2DIrregular([(1.0, 0.0), (-1.0, 0.0)]),
)
phase_imaging_7x7 = al.PhaseImaging(
search=mock.MockSearch("test_phase"),
settings=al.SettingsPhaseImaging(
settings_lens=al.SettingsLens(
positions_threshold=0.2, auto_positions_factor=3.0
)
),
)
results = mock.MockResults(
max_log_likelihood_tracer=tracer, updated_positions_threshold=0.2
)
phase_imaging_7x7.modify_dataset(dataset=imaging_7x7, results=results)
phase_imaging_7x7.modify_settings(dataset=imaging_7x7, results=results)
analysis = phase_imaging_7x7.make_analysis(
dataset=imaging_7x7, mask=mask_7x7, results=results
)
assert analysis.settings.settings_lens.positions_threshold == 6.0
# Auto position is ON, and same as above but with a minimum auto positions threshold that rounds the value up.
imaging_7x7 = al.Imaging(
image=image_7x7,
noise_map=noise_map_7x7,
positions=al.Grid2DIrregular([(1.0, 0.0), (-1.0, 0.0)]),
)
phase_imaging_7x7 = al.PhaseImaging(
search=mock.MockSearch("test_phase"),
settings=al.SettingsPhaseImaging(
settings_lens=al.SettingsLens(
positions_threshold=0.2,
auto_positions_factor=3.0,
auto_positions_minimum_threshold=10.0,
)
),
)
results = mock.MockResults(
max_log_likelihood_tracer=tracer, updated_positions_threshold=0.2
)
phase_imaging_7x7.modify_dataset(dataset=imaging_7x7, results=results)
phase_imaging_7x7.modify_settings(dataset=imaging_7x7, results=results)
analysis = phase_imaging_7x7.make_analysis(
dataset=imaging_7x7, mask=mask_7x7, results=results
)
assert analysis.settings.settings_lens.positions_threshold == 10.0
# Auto positioning is ON, but positions are None and it cannot find new positions so no threshold.
imaging_7x7 = al.Imaging(
image=image_7x7, noise_map=noise_map_7x7, positions=None
)
phase_imaging_7x7 = al.PhaseImaging(
search=mock.MockSearch("test_phase"),
settings=al.SettingsPhaseImaging(
settings_lens=al.SettingsLens(auto_positions_factor=1.0)
),
)
results = mock.MockResults(max_log_likelihood_tracer=tracer)
phase_imaging_7x7.modify_dataset(dataset=imaging_7x7, results=results)
phase_imaging_7x7.modify_settings(dataset=imaging_7x7, results=results)
analysis = phase_imaging_7x7.make_analysis(
dataset=imaging_7x7, mask=mask_7x7, results=results
)
assert analysis.settings.settings_lens.positions_threshold == None
class TestExtensions:
def test__extend_with_stochastic_phase__sets_up_model_correctly(self, mask_7x7):
galaxies = af.ModelInstance()
galaxies.lens = al.Galaxy(
redshift=0.5,
light=al.lp.SphericalSersic(),
mass=al.mp.SphericalIsothermal(),
)
galaxies.source = al.Galaxy(
redshift=1.0,
pixelization=al.pix.VoronoiBrightnessImage(),
regularization=al.reg.AdaptiveBrightness(),
)
phase = al.PhaseImaging(
search=mock.MockSearch(),
galaxies=af.CollectionPriorModel(lens=al.GalaxyModel(redshift=0.5)),
)
phase_extended = phase.extend_with_stochastic_phase()
model = phase_extended.make_model(instance=galaxies)
assert isinstance(model.lens.mass.centre, TuplePrior)
assert isinstance(model.lens.light.intensity, float)
assert isinstance(model.source.pixelization.pixels, int)
assert isinstance(model.source.regularization.inner_coefficient, float)
phase_extended = phase.extend_with_stochastic_phase(include_lens_light=True)
model = phase_extended.make_model(instance=galaxies)
assert isinstance(model.lens.mass.centre, TuplePrior)
assert isinstance(model.lens.light.intensity, af.UniformPrior)
assert isinstance(model.source.pixelization.pixels, int)
assert isinstance(model.source.regularization.inner_coefficient, float)
phase_extended = phase.extend_with_stochastic_phase(include_pixelization=True)
model = phase_extended.make_model(instance=galaxies)
assert isinstance(model.lens.mass.centre, TuplePrior)
assert isinstance(model.lens.light.intensity, float)
assert isinstance(model.source.pixelization.pixels, af.UniformPrior)
assert not isinstance(
model.source.regularization.inner_coefficient, af.UniformPrior
)
phase_extended = phase.extend_with_stochastic_phase(include_regularization=True)
model = phase_extended.make_model(instance=galaxies)
assert isinstance(model.lens.mass.centre, TuplePrior)
assert isinstance(model.lens.light.intensity, float)
assert isinstance(model.source.pixelization.pixels, int)
assert isinstance(
model.source.regularization.inner_coefficient, af.UniformPrior
)
phase = al.PhaseInterferometer(
search=mock.MockSearch(),
real_space_mask=mask_7x7,
galaxies=af.CollectionPriorModel(lens=al.GalaxyModel(redshift=0.5)),
)
phase_extended = phase.extend_with_stochastic_phase()
model = phase_extended.make_model(instance=galaxies)
assert isinstance(model.lens.mass.centre, TuplePrior)
assert isinstance(model.lens.light.intensity, float)
assert isinstance(model.source.pixelization.pixels, int)
assert isinstance(model.source.regularization.inner_coefficient, float)
```
#### File: phase/imaging/test_analysis_imaging.py
```python
from os import path
import autofit as af
import autolens as al
from autolens import exc
import pytest
from astropy import cosmology as cosmo
from autolens.fit.fit import FitImaging
from autolens.mock import mock
import numpy as np
pytestmark = pytest.mark.filterwarnings(
"ignore:Using a non-tuple sequence for multidimensional indexing is deprecated; use `arr[tuple(seq)]` instead of "
"`arr[seq]`. In the future this will be interpreted as an arrays index, `arr[np.arrays(seq)]`, which will result "
"either in an error or a different result."
)
directory = path.dirname(path.realpath(__file__))
class TestLogLikelihoodFunction:
def test__positions_do_not_trace_within_threshold__raises_exception(
self, phase_imaging_7x7, imaging_7x7, mask_7x7
):
imaging_7x7.positions = al.Grid2DIrregular([(1.0, 100.0), (200.0, 2.0)])
phase_imaging_7x7 = al.PhaseImaging(
galaxies=dict(
lens=al.Galaxy(redshift=0.5, mass=al.mp.SphericalIsothermal()),
source=al.Galaxy(redshift=1.0),
),
settings=al.SettingsPhaseImaging(
settings_lens=al.SettingsLens(positions_threshold=0.01)
),
search=mock.MockSearch(),
)
analysis = phase_imaging_7x7.make_analysis(
dataset=imaging_7x7, mask=mask_7x7, results=mock.MockResults()
)
instance = phase_imaging_7x7.model.instance_from_unit_vector([])
with pytest.raises(exc.RayTracingException):
analysis.log_likelihood_function(instance=instance)
class TestFit:
def test__fit_using_imaging(self, imaging_7x7, mask_7x7, samples_with_result):
phase_imaging_7x7 = al.PhaseImaging(
galaxies=dict(
lens=al.GalaxyModel(redshift=0.5, light=al.lp.EllipticalSersic),
source=al.GalaxyModel(redshift=1.0, light=al.lp.EllipticalSersic),
),
search=mock.MockSearch(samples=samples_with_result),
)
result = phase_imaging_7x7.run(
dataset=imaging_7x7, mask=mask_7x7, results=mock.MockResults()
)
assert isinstance(result.instance.galaxies[0], al.Galaxy)
assert isinstance(result.instance.galaxies[0], al.Galaxy)
def test__figure_of_merit__matches_correct_fit_given_galaxy_profiles(
self, imaging_7x7, mask_7x7
):
lens_galaxy = al.Galaxy(
redshift=0.5, light=al.lp.EllipticalSersic(intensity=0.1)
)
phase_imaging_7x7 = al.PhaseImaging(
galaxies=dict(lens=lens_galaxy),
settings=al.SettingsPhaseImaging(
settings_masked_imaging=al.SettingsMaskedImaging(sub_size=1)
),
search=mock.MockSearch(),
)
analysis = phase_imaging_7x7.make_analysis(
dataset=imaging_7x7, mask=mask_7x7, results=mock.MockResults()
)
instance = phase_imaging_7x7.model.instance_from_unit_vector([])
fit_figure_of_merit = analysis.log_likelihood_function(instance=instance)
masked_imaging = al.MaskedImaging(
imaging=imaging_7x7,
mask=mask_7x7,
settings=al.SettingsMaskedImaging(sub_size=1),
)
tracer = analysis.tracer_for_instance(instance=instance)
fit = al.FitImaging(masked_imaging=masked_imaging, tracer=tracer)
assert fit.log_likelihood == fit_figure_of_merit
def test__figure_of_merit__includes_hyper_image_and_noise__matches_fit(
self, imaging_7x7, mask_7x7
):
hyper_image_sky = al.hyper_data.HyperImageSky(sky_scale=1.0)
hyper_background_noise = al.hyper_data.HyperBackgroundNoise(noise_scale=1.0)
lens_galaxy = al.Galaxy(
redshift=0.5, light=al.lp.EllipticalSersic(intensity=0.1)
)
phase_imaging_7x7 = al.PhaseImaging(
galaxies=dict(lens=lens_galaxy),
hyper_image_sky=hyper_image_sky,
hyper_background_noise=hyper_background_noise,
settings=al.SettingsPhaseImaging(
settings_masked_imaging=al.SettingsMaskedImaging(sub_size=4)
),
search=mock.MockSearch(),
)
analysis = phase_imaging_7x7.make_analysis(
dataset=imaging_7x7, mask=mask_7x7, results=mock.MockResults()
)
instance = phase_imaging_7x7.model.instance_from_unit_vector([])
fit_figure_of_merit = analysis.log_likelihood_function(instance=instance)
assert analysis.masked_dataset.mask.sub_size == 4
masked_imaging = al.MaskedImaging(
imaging=imaging_7x7,
mask=mask_7x7,
settings=al.SettingsMaskedImaging(sub_size=4),
)
tracer = analysis.tracer_for_instance(instance=instance)
fit = FitImaging(
masked_imaging=masked_imaging,
tracer=tracer,
hyper_image_sky=hyper_image_sky,
hyper_background_noise=hyper_background_noise,
)
assert fit.log_likelihood == fit_figure_of_merit
def test__uses_hyper_fit_correctly(self, masked_imaging_7x7):
galaxies = af.ModelInstance()
galaxies.lens = al.Galaxy(
redshift=0.5,
light=al.lp.EllipticalSersic(intensity=1.0),
mass=al.mp.SphericalIsothermal,
)
galaxies.source = al.Galaxy(redshift=1.0, light=al.lp.EllipticalSersic())
instance = af.ModelInstance()
instance.galaxies = galaxies
lens_hyper_image = al.Array2D.ones(shape_native=(3, 3), pixel_scales=0.1)
lens_hyper_image[4] = 10.0
hyper_model_image = al.Array2D.full(
fill_value=0.5, shape_native=(3, 3), pixel_scales=0.1
)
hyper_galaxy_image_path_dict = {("galaxies", "lens"): lens_hyper_image}
results = mock.MockResults(
use_as_hyper_dataset=True,
hyper_galaxy_image_path_dict=hyper_galaxy_image_path_dict,
hyper_model_image=hyper_model_image,
)
analysis = al.PhaseImaging.Analysis(
masked_imaging=masked_imaging_7x7,
settings=al.SettingsPhaseImaging(),
results=results,
cosmology=cosmo.Planck15,
)
hyper_galaxy = al.HyperGalaxy(
contribution_factor=1.0, noise_factor=1.0, noise_power=1.0
)
instance.galaxies.lens.hyper_galaxy = hyper_galaxy
fit_likelihood = analysis.log_likelihood_function(instance=instance)
g0 = al.Galaxy(
redshift=0.5,
light_profile=instance.galaxies.lens.light,
mass_profile=instance.galaxies.lens.mass,
hyper_galaxy=hyper_galaxy,
hyper_model_image=hyper_model_image,
hyper_galaxy_image=lens_hyper_image,
hyper_minimum_value=0.0,
)
g1 = al.Galaxy(redshift=1.0, light_profile=instance.galaxies.source.light)
tracer = al.Tracer.from_galaxies(galaxies=[g0, g1])
fit = FitImaging(masked_imaging=masked_imaging_7x7, tracer=tracer)
assert (fit.tracer.galaxies[0].hyper_galaxy_image == lens_hyper_image).all()
assert fit_likelihood == fit.log_likelihood
def test__figure_of_merit__with_stochastic_likelihood_resamples_matches_galaxy_profiles(
self, masked_imaging_7x7
):
galaxies = af.ModelInstance()
galaxies.lens = al.Galaxy(
redshift=0.5, mass=al.mp.SphericalIsothermal(einstein_radius=1.2)
)
galaxies.source = al.Galaxy(
redshift=1.0,
pixelization=al.pix.VoronoiBrightnessImage(pixels=5),
regularization=al.reg.Constant(),
)
instance = af.ModelInstance()
instance.galaxies = galaxies
lens_hyper_image = al.Array2D.ones(shape_native=(3, 3), pixel_scales=0.1)
lens_hyper_image[4] = 10.0
source_hyper_image = al.Array2D.ones(shape_native=(3, 3), pixel_scales=0.1)
source_hyper_image[4] = 10.0
hyper_model_image = al.Array2D.full(
fill_value=0.5, shape_native=(3, 3), pixel_scales=0.1
)
hyper_galaxy_image_path_dict = {
("galaxies", "lens"): lens_hyper_image,
("galaxies", "source"): source_hyper_image,
}
results = mock.MockResults(
use_as_hyper_dataset=True,
hyper_galaxy_image_path_dict=hyper_galaxy_image_path_dict,
hyper_model_image=hyper_model_image,
)
analysis = al.PhaseImaging.Analysis(
masked_imaging=masked_imaging_7x7,
settings=al.SettingsPhaseImaging(
settings_lens=al.SettingsLens(stochastic_likelihood_resamples=2)
),
results=results,
cosmology=cosmo.Planck15,
)
fit_figure_of_merit = analysis.log_likelihood_function(instance=instance)
# tracer = analysis.tracer_for_instance(instance=instance)
# settings_pixelization = al.SettingsPixelization(kmeans_seed=1)
#
# fit_0 = al.FitImaging(
# masked_imaging=masked_imaging_7x7,
# tracer=tracer,
# settings_pixelization=settings_pixelization,
# settings_inversion=analysis.settings.settings_inversion
# )
#
# settings_pixelization = al.SettingsPixelization(kmeans_seed=2)
#
# fit_1 = al.FitImaging(
# masked_imaging=masked_imaging_7x7,
# tracer=tracer,
# settings_pixelization=settings_pixelization,
# settings_inversion=analysis.settings.settings_inversion
# )
# assert fit_figure_of_merit == pytest.approx(
# np.mean([-22.947017744853934, -29.10665765185219]), 1.0e-8
# )
def test__stochastic_histogram_for_instance(self, masked_imaging_7x7):
galaxies = af.ModelInstance()
galaxies.lens = al.Galaxy(
redshift=0.5, mass=al.mp.SphericalIsothermal(einstein_radius=1.2)
)
galaxies.source = al.Galaxy(
redshift=1.0,
pixelization=al.pix.VoronoiBrightnessImage(pixels=5),
regularization=al.reg.Constant(),
)
instance = af.ModelInstance()
instance.galaxies = galaxies
lens_hyper_image = al.Array2D.ones(shape_native=(3, 3), pixel_scales=0.1)
lens_hyper_image[4] = 10.0
source_hyper_image = al.Array2D.ones(shape_native=(3, 3), pixel_scales=0.1)
source_hyper_image[4] = 10.0
hyper_model_image = al.Array2D.full(
fill_value=0.5, shape_native=(3, 3), pixel_scales=0.1
)
hyper_galaxy_image_path_dict = {
("galaxies", "lens"): lens_hyper_image,
("galaxies", "source"): source_hyper_image,
}
results = mock.MockResults(
use_as_hyper_dataset=True,
hyper_galaxy_image_path_dict=hyper_galaxy_image_path_dict,
hyper_model_image=hyper_model_image,
)
analysis = al.PhaseImaging.Analysis(
masked_imaging=masked_imaging_7x7,
settings=al.SettingsPhaseImaging(
settings_lens=al.SettingsLens(stochastic_samples=2)
),
results=results,
cosmology=cosmo.Planck15,
)
log_evidences = analysis.stochastic_log_evidences_for_instance(
instance=instance
)
assert len(log_evidences) == 2
assert log_evidences[0] != log_evidences[1]
``` |
{
"source": "JonathanFrederick/job-hunt",
"score": 3
} |
#### File: JonathanFrederick/job-hunt/company_scripts.py
```python
from selenium import webdriver
from companies.red_hat import red_hat
from app import db
from models import Company
def print_result(info):
"""Takes in a dictionary with keys for 'company', 'title', 'url',
and 'description' and prints them neatly to the terminal"""
for key in ['company', 'title', 'url', 'description']:
assert key in info.keys(), \
"The key '{}' is not in the dictionary".format(key)
assert isinstance(info[key], str), \
"The value at '{}' is not a string".format(key)
print('{} - {}'.format(info['company'], info['title']))
print(info['url'])
print(info['description'])
def main():
driver = webdriver.Firefox()
company_dict = {
"Red Hat": red_hat,
}
interesting_companies = db.session.query(Company) \
.filter(Company.interest == True)
for comp in interesting_companies:
company_dict[comp.name](driver)
driver.close()
# print_result({'company': 'comp',
# 'title': 'title',
# 'url': 'url.com',
# 'description': 'things and stuff'})
if __name__ == "__main__":
main()
```
#### File: migrations/versions/bb9f97cf66d7_.py
```python
revision = '<KEY>'
down_revision = '<KEY>'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
from sqlalchemy.types import String
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('companies',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=24), nullable=True),
sa.Column('interest', sa.Boolean(), nullable=True),
sa.Column('careers_url', sa.String(), nullable=True),
sa.Column('last_checked', sa.DateTime(), nullable=True),
sa.Column('keywords', postgresql.ARRAY(String(length=32)), nullable=True),
sa.Column('locations', postgresql.ARRAY(String(length=32)), nullable=True),
sa.Column('departments', postgresql.ARRAY(String(length=16)), nullable=True),
sa.PrimaryKeyConstraint('id')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('companies')
### end Alembic commands ###
```
#### File: migrations/versions/c8161cd56b29_.py
```python
revision = '<KEY>'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('listings',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('url', sa.String(), nullable=True),
sa.Column('company', sa.String(length=32), nullable=True),
sa.Column('title', sa.String(length=128), nullable=True),
sa.Column('status', sa.String(length=10), nullable=True),
sa.Column('scraped_dt', sa.DateTime(), nullable=True),
sa.Column('last_seen', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('listings')
### end Alembic commands ###
```
#### File: JonathanFrederick/job-hunt/models.py
```python
from app import db
from sqlalchemy.dialects.postgresql import ARRAY
from sqlalchemy.types import String
from datetime import datetime
class Listing(db.Model):
__tablename__ = 'listings'
id = db.Column(db.Integer, primary_key=True)
url = db.Column(db.String())
company = db.Column(db.String(32))
title = db.Column(db.String(128))
status = db.Column(db.String(10))
scraped_dt = db.Column(db.DateTime())
last_seen = db.Column(db.DateTime())
def seen_now(self):
"""Function for updating an entry on successive sightings"""
self.last_seen = datetime.now()
def __init__(self, url, company, title):
self.url = url
self.company = company
self.title = title
self.status = "NEW"
self.scraped_dt = datetime.now()
self.last_seen = datetime.now()
def __repr__(self):
return '{} : {}'.format(self.company, self.title)
class Company(db.Model):
__tablename__ = 'companies'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(24))
interest = db.Column(db.Boolean())
careers_url = db.Column(db.String())
last_checked = db.Column(db.DateTime())
keywords = db.Column(ARRAY(String(32)))
locations = db.Column(ARRAY(String(32)))
departments = db.Column(ARRAY(String(16)))
def __init__(self, name, careers_url, keywords, locations, departments):
self.name = name
self.interest = True
self.careers_url = careers_url
self.last_checked = datetime.now()
self.keywords = keywords
self.locations = locations
self.departments = departments
def __repr__(self):
return '{} ({})'.format(self.company, self.interest)
``` |
{
"source": "JonathanFrederick/pyquest",
"score": 3
} |
#### File: JonathanFrederick/pyquest/models.py
```python
from random import choice
class Player():
def __init__(self, name="<NAME>"):
self.name = name
self.level = 1
self.exp = 0
def get_class(self, p_class):
p_class = p_class.lower()
if p_class in ['warrior', 'mage', 'archer']:
self.p_class = p_class
return True
else:
return False
class Room():
def __init__(self):
self.floor = choice(['chest', 'tree', 'rock'])
``` |
{
"source": "JonathanFromm/HackerspaceTemplatePackage",
"score": 2
} |
#### File: hackerspace_specific/noisebridge_sf_ca_us/flaschentaschen.py
```python
import requests
def showText(text):
try:
requests.post('http://pegasus.noise:4444/api/text', {'text': text})
except:
print('Couldnt talk to Flaschentaschen. Make sure to deactivate your VPN connection and be in the local Noisebridge network.')
```
#### File: hackerspace_specific/noisebridge_sf_ca_us/marry.py
```python
import requests
import random
facts = [
'Every Tuesday before the General Meeting we have an extended tour at Noisebridge. Learn all kinds of exciting secrets about Noisebridge and how to give a great tour. Ask Ryan for more details',
'The door bell makes different sounds, depending on if someone rings downstairs or upstairs',
'You can controll what I say. Just visit pegasus.noise:5000 and enter a text',
'You can display text, images and videos on Flaschentaschen. Just visit pegasus.noise:9000 and enter a text or upload an image or video',
'We have a library with all kinds of awesome books!',
'Hackerspaces exist all over the world. And the movement actually started in Germany. See a map of all hackerspaces on hackerspaces.org',
'Everyone can organize events at Noisebridge. In fact its a great way to introduce more people to Noisebridge',
'Every Tuesday evening we have our weekly General Meeting. Where we talk about Noisebridge, announcements and more. In case you want to organize an event at Noisebridge, this is also a great chance to announce your event.'
]
def speak(text, intro='Did you know?'):
try:
# make marry speak
parts = text.split('. ')
if intro:
requests.get('http://pegasus.noise:5000?text='+intro)
for part in parts:
requests.get('http://pegasus.noise:5000?text=' +
part.replace('.', ' dot ').replace(':', ' colon '))
except:
print('Couldnt talk to marry. Make sure to deactivate your VPN connection and be in the local Noisebridge network.')
def interestingFacts():
entry_num = random.randint(0, len(facts)-1)
speak(facts[entry_num])
def weeklyMeetingReminder():
speak('Attention attention everyone. The Weekly General Meeting happens in 10 minutes in the Hackatorium. Please join us')
```
#### File: management/commands/publish_events.py
```python
from django.core.management.base import BaseCommand
from hackerspace.models import Event
from hackerspace.log import log
class Command(BaseCommand):
help = "Publish new events after 24hour"
def handle(self, *args, **options):
log('publish_events.py')
Event.objects.QUERYSET__not_approved().QUERYSET__older_then_24h().publish()
```
#### File: hackerspace/templatetags/secrets.py
```python
from django.template import Library
from getKey import BOOLEAN__key_exists, STR__get_key
register = Library()
@register.filter
def key_exists(str_key_name):
return BOOLEAN__key_exists(str_key_name)
@register.filter
def get_key(str_key_name):
return STR__get_key(str_key_name)
```
#### File: hackerspace/templatetags/translate.py
```python
from django.template import Library
register = Library()
@register.filter
def landingpage(text, language):
from django.template.loader import get_template
try:
return get_template('translations/landingpage/'+language+'.html').render({
'word': text
})
except:
try:
return get_template('translations/landingpage/english.html').render({
'word': text
})
except:
return text
@register.filter
def donate(text, language):
from django.template.loader import get_template
try:
return get_template('translations/donate/'+language+'.html').render({
'word': text
})
except:
try:
return get_template('translations/donate/english.html').render({
'word': text
})
except:
return text
@register.filter
def menu(text, language):
from django.template.loader import get_template
try:
return get_template('translations/menu/'+language+'.html').render({
'word': text
})
except:
try:
return get_template('translations/menu/english.html').render({
'word': text
})
except:
return text
@register.filter
def values(text, language):
from django.template.loader import get_template
try:
return get_template('translations/values/'+language+'.html').render({
'word': text
})
except:
try:
return get_template('translations/values/english.html').render({
'word': text
})
except:
return text
@register.filter
def events(text, language):
from django.template.loader import get_template
try:
return get_template('translations/events/'+language+'.html').render({
'word': text
})
except:
try:
return get_template('translations/events/english.html').render({
'word': text
})
except:
return text
@register.filter
def photos(text, language):
from django.template.loader import get_template
try:
return get_template('translations/photos/'+language+'.html').render({
'word': text
})
except:
try:
return get_template('translations/photos/english.html').render({
'word': text
})
except:
return text
``` |
{
"source": "JonathanFung13/portfolio-analysis",
"score": 3
} |
#### File: JonathanFung13/portfolio-analysis/ml_fund_manager.py
```python
import forecaster as fc
import optimizer as opt
import trader as td
import datetime as dt
import utilities as util
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
def calc_start_date(end_date=dt.datetime(2017,1,1), data_size=12):
return end_date-dt.timedelta(weeks=int(data_size * 52/12))
def run_today(start_date=dt.datetime(2015,1,1), end_date=dt.datetime(2017,1,1), n_days=21, data_size=12,
myport=['AAPL', 'GOOG'], allocations=[0.5,0.5],
train_size=0.7, max_k=50, max_trade_size=0.1, gen_plot=False, verbose=False, savelogs=False):
"""
:param start_date: Beginning of time period
:param end_date: End of time period
:param n_days: Number of days into the future to predict the daily returns of a fund
:param data_size: The number of months of data to use in the machine learning model.
:param myport: The funds available in your portfolio
:param allocations: The percentage of your portfolio invested in the funds
:param train_size: The percentage of data used for training the ML model, remained used for testing.
:param max_k: Maximum number of neighbors used in kNN
:param max_trade_size: The maximum percentage of your portfolio permitted to be traded in any one transaction.
:param gen_plot: Boolean to see if you want to plot results
:param verbose: Boolean to print out information during execution of application.
:return:
"""
start_date = calc_start_date(end_date, data_size)#end_date - dt.timedelta(weeks=int(data_size * 52/12))
#print('start:', start_date, 'end:', end_date)
if verbose: print('-'*20 + '\nFORECAST\n' + '-'*20)
forecast = fc.forecast(start_date, end_date, symbols=myport, train_size=train_size,
n_days=n_days, max_k=max_k, gen_plot=gen_plot, verbose=verbose, savelogs=savelogs)
if verbose: print('\n'+'-'*20 + '\nOPTIMIZE\n' + '-'*20)
target_allocations = opt.optimize_return(forecast, myport, allocations, gen_plot=gen_plot, verbose=verbose, savelogs=savelogs)
if verbose: print('\n' + '-'*20 + '\nORDERS\n' + '-'*20)
trade_date = forecast.index.max()
orders = td.create_orders(myport, allocations, target_allocations, trade_date=trade_date,max_trade_size=max_trade_size, verbose=verbose, savelogs=savelogs)
if verbose: print(orders)
new_allocations = allocations.copy()
for i in range(orders.shape[0]):
# fix this code so that the correct allocations are updated!
index = myport.index(orders.loc[i, 'Symbol'])
#symbol = orders.loc[i, 'Symbol']
if orders.loc[i, 'Action'] == 'SELL':
new_allocations[index] -= orders.loc[i, 'Quantity']
else:
new_allocations[index] += orders.loc[i, 'Quantity']
adr_current, vol_current, sr_current, pv_current = util.compute_returns(forecast, allocations=allocations)
adr_target, vol_target, sr_target, pv_target = util.compute_returns(forecast, allocations=target_allocations)
adr_new, vol_new, sr_new, pv_new = util.compute_returns(forecast, allocations=new_allocations)
if verbose:
print("Portfolios:", "Current", "Target","New")
print("Daily return: %.5f %.5f %.5f" % (adr_current, adr_target, adr_new))
print("Daily Risk: %.5f %.5f %.5f" % (vol_current, vol_target, vol_new))
print("Sharpe Ratio: %.5f %.5f %.5f" % (sr_current, sr_target, sr_new))
print("Return vs Risk: %.5f %.5f %.5f" % (adr_current/vol_current, adr_target/vol_target, adr_new/vol_new))
print("\nALLOCATIONS\n" + "-" * 40)
print("Symbol", "Current", "Target", 'New')
for i, symbol in enumerate(myport):
print("%s %.3f %.3f %.3f" %
(symbol, allocations[i], target_allocations[i], new_allocations[i]))
# Compare daily portfolio value with SPY using a normalized plot
if gen_plot:
fig, ax = plt.subplots()
ax.scatter(vol_current, adr_current, c='green', s=15, alpha=0.5) # Current portfolio
ax.scatter(vol_target, adr_target, c='red', s=15, alpha=0.5) # ef
ax.scatter(vol_new, adr_new, c='black', s=25, alpha=0.75) # ef
ax.set_xlabel('St. Dev. Daily Returns')
ax.set_ylabel('Mean Daily Returns')
#ax.set_xlim(min(vol)/1.5, max(vol)*1.5)
#ax.set_ylim(min(adr)/1.5, max(adr)*1.5)
ax.grid()
ax.grid(linestyle=':')
fig.tight_layout()
plt.show()
# add code to plot here
df_temp = pd.concat([pv_current, pv_target, pv_new], keys=['Current', 'Target', 'New'], axis=1)
df_temp = df_temp / df_temp.ix[0, :]
util.plot_data(df_temp, 'Forecasted Daily portfolio value and SPY', 'Date-21', 'Normalized Price')
if False: # meh was going to plot portfolio values for the last year but trying something else now
prior_prices = util.load_data(myport, start_date, end_date)
prior_prices.fillna(method='ffill', inplace=True)
prior_prices.fillna(method='bfill', inplace=True)
#prices_SPY = prior_prices['SPY'] # SPY prices, for benchmark comparison
prior_prices = prior_prices[myport] # prices of portfolio symbols
forecast_prices = forecast * prior_prices
time_span = pd.date_range(forecast.index.min(), end_date + dt.timedelta(days=n_days*2))
forecast_prices = forecast_prices.reindex(time_span)
forecast_prices = forecast_prices.shift(periods=n_days*2)
forecast_prices = forecast_prices.dropna()
forecast_prices = pd.concat([prior_prices, forecast_prices], axis=0)
adr_current, vol_current, sr_current, pv_current = util.compute_returns(forecast_prices, allocations=allocations)
adr_target, vol_target, sr_target, pv_target = util.compute_returns(forecast_prices, allocations=target_allocations)
adr_new, vol_new, sr_new, pv_new = util.compute_returns(forecast_prices, allocations=new_allocations)
df_temp = pd.concat([pv_current, pv_target, pv_new], keys=['Current', 'Target', 'New'], axis=1)
df_temp = df_temp / df_temp.ix[0, :]
util.plot_data(df_temp, 'Daily portfolio value and SPY', 'Date', 'Normalized Price')
return new_allocations, trade_date
def test_experiment_one(n_days=21, data_size=12, train_size=0.7, max_k=50, max_trade_size=0.1,
years_to_go_back=2, initial_investment=10000, gen_plot=False, verbose=False, savelogs=False):
today = dt.date.today()
yr = today.year - years_to_go_back
mo = today.month - 1 # Just temporary, take out 1 when data download is fixed.
da = today.day - 1
start_date = dt.datetime(yr, mo, da)
end_date = dt.datetime(yr + 1, mo, da)
adr = [None] * 12
vol = [None] * 12
sr = [None] * 12
myport = ['AAPL', 'GLD']
myalloc = [0.5,0.5]
# Portfolio values for Holding the Same Allocation (conservative case)
actual_prices = util.load_data(myport, start_date, end_date)
actual_prices.fillna(method='ffill', inplace=True)
actual_prices.fillna(method='bfill', inplace=True)
prices_SPY = actual_prices['SPY']
actual_prices = actual_prices[myport]
adr_cons, vol_cons, sharpe_cons, pv_cons = util.compute_returns(actual_prices, myalloc, sf=252.0, rfr=0.0)
# Portfolio values with monthly optimization using hindsight (best possible case)
# Portfolio values for Machine Learner
ml_allocs = []
ml_trade_dates = []
for i in range(int(252/n_days)):
temp = round(i*52*n_days/252)
test_date = start_date + dt.timedelta(weeks=round(i*52*n_days/252))
#print(i, temp, test_date)
if verbose: print(('EXPERIMENT %i - %s') % (i, str(test_date.strftime("%m/%d/%Y"))))
myalloc, trade_date = run_today(end_date=test_date, n_days=n_days, data_size=data_size,
myport=myport, allocations=myalloc,
train_size=train_size, max_k=max_k,
max_trade_size=max_trade_size, gen_plot=gen_plot, verbose=verbose, savelogs=savelogs)
ml_allocs.append(myalloc)
ml_trade_dates.append(trade_date)
ml_allocations = pd.DataFrame(data=ml_allocs, index=ml_trade_dates, columns=myport)
all_dates = actual_prices.index
#ml_allocaations = ml_allocaations.reindex(all_dates, method='ffill')
actual_prices['Cash'] = 1.0
ml_holdings = pd.DataFrame(data=0.0, index=all_dates, columns=myport)
ml_holdings['Cash'] = 0.0
ml_holdings.ix[0,'Cash'] = initial_investment
values = ml_holdings * actual_prices
porvals = values.sum(axis=1)
for index, allocation in ml_allocations.iterrows():
if index < ml_holdings.index.min():
index = ml_holdings.index.min()
#else:
# index = ml_holdings.index.get_loc(tdate, method='ffill')
tomorrow = ml_holdings.index.get_loc(index) + 1
for symbol in myport:
ml_holdings.loc[tomorrow:, symbol] = porvals.loc[index] * allocation[symbol] / actual_prices.loc[index,symbol]
values = ml_holdings * actual_prices
porvals = values.sum(axis=1)
if gen_plot:
# add code to plot here
df_temp = pd.concat([pv_cons, porvals, prices_SPY], keys=['Conservative', 'ML', 'SPY'],
axis=1)
df_temp = df_temp / df_temp.ix[0, :]
util.plot_data(df_temp, 'Daily portfolio value and SPY', 'Date', 'Normalized Price')
ret_cons = (pv_cons[-1] / pv_cons[0]) - 1
ret_porvals = (porvals[-1] / porvals[0]) - 1
ret_SPY = (prices_SPY[-1] / prices_SPY[0]) - 1
return ret_cons, ret_porvals, ret_SPY
if __name__ == "__main__":
work = True
initial_investment = 10000 # dollars invested from start
today = dt.date.today()
yr = today.year
mo = today.month # Just temporary, take out 1 when data download is fixed.
da = today.day - 1
end_date = dt.datetime(yr, mo, da)
if work:
print("Running ML Fund Manager")
myport, allocations = util.verify_allocations()
n_days = 21 # How long the forecast should look out
data_size = 3 # Number of months of data to use for Machine Learning
train_size = 0.70 # Percentage of data used for training, rest is test
max_k = 50 # Maximum value of k for kNN
max_trade_size= 0.25 # Maximum amount of allocation allowed in a trade
run_today(end_date=end_date, n_days=n_days, data_size=data_size,
myport=myport, allocations=allocations,
train_size=train_size, max_k=max_k,
max_trade_size=max_trade_size, gen_plot=True, verbose=True)
else:
print("Testing ML Fund Manager")
n_days = 21 # How long the forecast should look out
data_size = 12 # Number of months of data to use for Machine Learning
train_size = 0.70 # Percentage of data used for training, rest is test
max_k = 5 # Maximum value of k for kNN
max_trade_size = 0.10 # Maximum amount of allocation allowed in a trade
years_to_go_back = 3
n_days = [7, 14, 21] # How long the forecast should look out
data_size = [3, 6, 12] #, 18] # Number of months of data to use for Machine Learning
train_size = [0.6, 0.7, 0.8] # Percentage of data used for training, rest is test
max_k = [5, 10, 15] #, 20, 25] # Maximum value of k for kNN
max_trade_size = [0.10, 0.20, 0.30] #, 0.40] # Maximum amount of allocation allowed in a trade
years_to_go_back = [2, 1]
r_cons = []
r_porvals = []
r_SPY = []
nd = []
ds = []
ts = []
mk = []
maxt = []
yrs = []
nums = []
exp_no = 0
for year in years_to_go_back:
for mts in max_trade_size:
for k in max_k:
for t in train_size:
for d in data_size:
for n in n_days:
#print(n, d, t, k, mts, year)
one, two, three = test_experiment_one(n_days=n, data_size=d, train_size=t, max_k=k,
max_trade_size=mts, years_to_go_back=year,
gen_plot=False, verbose=False, savelogs=False)
exp_no += 1
print(exp_no, one, two, three, n, d, t, k, mts, year)
nums.append(exp_no)
r_cons.append(one)
r_porvals.append(two)
r_SPY.append(three)
nd.append(n)
ds.append(d)
ts.append(t)
mk.append(k)
maxt.append(mts)
yrs.append(year)
results = pd.DataFrame(data=list(zip(r_cons, r_porvals, r_SPY, nd, ds, ts, mk, maxt, yrs)),
columns=['cons_return', 'ml_return', 'spy_return', 'forecast', 'months_of_data',
'train_size', 'maxk', 'max_trade', 'yrs_lookback'],
index=nums)
util.save_df_as_csv(results,'results','%s_results' % (dt.date.today().strftime("%Y_%m_%d")), indexname='exp_num')
#test_experiment_one(n_days=n_days, data_size=data_size, train_size=train_size, max_k=max_k,
# max_trade_size=max_trade_size, years_to_go_back=years_to_go_back, gen_plot=False)
```
#### File: JonathanFung13/portfolio-analysis/optimizer.py
```python
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import datetime as dt
import scipy.optimize as spo
import utilities as util
import sklearn.model_selection as ms
from sklearn.neighbors import KNeighborsClassifier as knnC
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_selection import SelectFromModel
from sklearn.neighbors import KNeighborsRegressor
from sklearn.grid_search import GridSearchCV
import time
import csv
import os
"""
Read forecast, calculate daily prices, then calculate the portfolio stats and use efficient frontier to increase returns
"""
def optimize_return(forecasts, symbols=['AAPL', 'GOOG'],
allocations=[0.5,0.5], rfr=0.0, sf=252.0, gen_plot=False, verbose=False, savelogs=False):
"""
Plot return versus risk for current allocations as well as 500 random allocations.
Plot return versus risk for each scenario and return the one with the optimal return.
:param start:
:param end:
:param symbols:
:param allocations:
:param gen_plot:
:return:
"""
# Get statistics for current allocations
adr_curr, vol_curr, sr_curr, pv_curr = util.compute_returns(forecasts, allocations=allocations, rfr=rfr, sf=sf)
# Generate n random allocations
num_allocations = 2000
iallocations = [None] * num_allocations
for i in range(num_allocations):
weights = np.random.rand(len(symbols))
iallocations[i] = weights / sum(weights)
# Generate allocations for 100% in each of the available funds
for i in range(len(symbols)):
temp_alloc = [0.0] * len(symbols)
temp_alloc[i] = 1.0
iallocations.append(temp_alloc)
num_allocations += len(symbols)
adr = [None] * num_allocations
vol = [None] * num_allocations
sr = [None] * num_allocations
risk_at_max = 100.0
max_return = -100.0
sr_max = -100.0
#adr, vol, sr = map(compute_returns(), iallocations)
for i, allocs in enumerate(iallocations):
adr[i], vol[i], sr[i], pv_i = util.compute_returns(forecasts, allocations=iallocations[i], rfr=rfr, sf=sf)
# Logic attempt number 3 for optimizing portfolio: max Sharpe ratio
if sr[i] > sr_max:
sr_max = sr[i]
allocations_ef3 = iallocations[i]
# Logic attempt number 1 for optimizing portfolio: max return
if adr[i] > max_return:
max_return = adr[i]
risk_at_max = vol[i]
allocations_ef1 = iallocations[i]
allocations_ef2 = iallocations[i]
risk_ef = risk_at_max
temp_return = adr_curr
temp_vol = vol_curr
for i, ireturn in enumerate(adr):
# Logic attempt number 1 for optimizing portfolio: 90% of max return with lower risk
if ireturn > (0.9 * max_return) and vol[i] < risk_ef and False:
risk_ef = vol[i]
allocations_ef1 = iallocations[i]
# Logic attempt number 2 for optimizing portfolio: lowest risk with at least same return as current allocation
if ireturn > adr_curr and vol[i] < temp_vol:
allocations_ef2 = iallocations[i]
temp_vol = vol[i]
allocations_ef4 = np.sum([allocations_ef1, allocations_ef2, allocations_ef3], axis=0)
allocations_ef4 = np.round(allocations_ef4 / 3, decimals=3)
if verbose or gen_plot:
adr_ef1, vol_ef1, sr_ef1, pv_ef1 = util.compute_returns(forecasts, allocations=allocations_ef1, rfr=rfr, sf=sf)
adr_ef2, vol_ef2, sr_ef2, pv_ef2 = util.compute_returns(forecasts, allocations=allocations_ef2, rfr=rfr, sf=sf)
adr_ef3, vol_ef3, sr_ef3, pv_ef3 = util.compute_returns(forecasts, allocations=allocations_ef3, rfr=rfr, sf=sf)
adr_ef4, vol_ef4, sr_ef4, pv_ef4 = util.compute_returns(forecasts, allocations=allocations_ef4, rfr=rfr, sf=sf)
if verbose and False: # not going to print these out from here anymore
print("Portfolios:", "Current", "Efficient")
print("Daily return: %.5f %.5f %.5f %.5f %.5f" % (adr_curr, adr_ef1, adr_ef2, adr_ef3, adr_ef4))
print("Daily Risk: %.5f %.5f %.5f %.5f %.5f" % (vol_curr, vol_ef1, vol_ef2, vol_ef3, vol_ef4))
print("Sharpe Ratio: %.5f %.5f %.5f %.5f %.5f" % (sr_curr, sr_ef1, sr_ef2, sr_ef3, sr_ef4))
print("Return vs Risk: %.5f %.5f %.5f %.5f %.5f" % (adr_curr/vol_curr, adr_ef1/vol_ef1, adr_ef2/vol_ef2,
adr_ef3/vol_ef3, adr_ef4/vol_ef4))
print("\nALLOCATIONS\n" + "-" * 40)
print("", "Current", "Efficient")
for i, symbol in enumerate(symbols):
print("%s %.3f %.3f %.3f %.3f %.3f" %
(symbol, allocations[i], allocations_ef1[i], allocations_ef2[i], allocations_ef3[i], allocations_ef4[i]))
# Compare daily portfolio value with SPY using a normalized plot
if gen_plot:
fig, ax = plt.subplots()
ax.scatter(vol, adr, c='blue', s=5, alpha=0.1)
ax.scatter(vol_curr, adr_curr, c='green', s=35, alpha=0.75) # Current portfolio
ax.scatter(vol_ef1, adr_ef1, c='red', s=35, alpha=0.5) # ef
ax.scatter(vol_ef2, adr_ef2, c='red', s=35, alpha=0.5) # ef
ax.scatter(vol_ef3, adr_ef3, c='red', s=35, alpha=0.5) # ef
ax.scatter(vol_ef4, adr_ef4, c='black', s=25, alpha=0.75) # ef
ax.set_xlabel('St. Dev. Daily Returns')
ax.set_ylabel('Mean Daily Returns')
ax.set_xlim(min(vol)/1.5, max(vol)*1.5)
ax.set_ylim(min(adr)/1.5, max(adr)*1.5)
ax.grid()
ax.grid(linestyle=':')
fig.tight_layout()
plt.show()
# plt.plot(risk, returns, 'o', markersize=5)
# plt.plot(sddr, adr, 'g+') # Current portfolio
# plt.plot(sddr_opt, adr_opt, 'b+') # spo optimized
# plt.plot(risk_at_max, max_return, 'r+') # ef
# add code to plot here
# df_temp = pd.concat([port_val, port_val_opt, port_val_ef, prices_SPY], keys=['Portfolio', 'Optimized', 'EF','SPY'], axis=1)
# df_temp = df_temp / df_temp.ix[0, :]
# plot_data(df_temp, 'Daily portfolio value and SPY', 'Date', 'Normalized Price')
#
# # Add code here to properly compute end value
# ev = investment * (1+cr)
if savelogs:
target_allocations = pd.DataFrame(data=allocations_ef4, index=symbols, columns=['Allocations']) #, index=)
target_allocations.index.name = 'Symbol'
util.save_df_as_csv(target_allocations, 'logs', 'target', 'Symbol')
return allocations_ef4
if __name__ == "__main__":
print("Run ml_fund_manager.py instead")
``` |
{
"source": "JonathanGailliez/azure-sdk-for-python",
"score": 2
} |
#### File: authoring/models/application_info_response_py3.py
```python
from msrest.serialization import Model
class ApplicationInfoResponse(Model):
"""Response containing the Application Info.
:param id: The ID (GUID) of the application.
:type id: str
:param name: The name of the application.
:type name: str
:param description: The description of the application.
:type description: str
:param culture: The culture of the application. E.g.: en-us.
:type culture: str
:param usage_scenario: Defines the scenario for the new application.
Optional. E.g.: IoT.
:type usage_scenario: str
:param domain: The domain for the new application. Optional. E.g.: Comics.
:type domain: str
:param versions_count: Amount of model versions within the application.
:type versions_count: int
:param created_date_time: The version's creation timestamp.
:type created_date_time: str
:param endpoints: The Runtime endpoint URL for this model version.
:type endpoints: object
:param endpoint_hits_count: Number of calls made to this endpoint.
:type endpoint_hits_count: int
:param active_version: The version ID currently marked as active.
:type active_version: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'culture': {'key': 'culture', 'type': 'str'},
'usage_scenario': {'key': 'usageScenario', 'type': 'str'},
'domain': {'key': 'domain', 'type': 'str'},
'versions_count': {'key': 'versionsCount', 'type': 'int'},
'created_date_time': {'key': 'createdDateTime', 'type': 'str'},
'endpoints': {'key': 'endpoints', 'type': 'object'},
'endpoint_hits_count': {'key': 'endpointHitsCount', 'type': 'int'},
'active_version': {'key': 'activeVersion', 'type': 'str'},
}
def __init__(self, *, id: str=None, name: str=None, description: str=None, culture: str=None, usage_scenario: str=None, domain: str=None, versions_count: int=None, created_date_time: str=None, endpoints=None, endpoint_hits_count: int=None, active_version: str=None, **kwargs) -> None:
super(ApplicationInfoResponse, self).__init__(**kwargs)
self.id = id
self.name = name
self.description = description
self.culture = culture
self.usage_scenario = usage_scenario
self.domain = domain
self.versions_count = versions_count
self.created_date_time = created_date_time
self.endpoints = endpoints
self.endpoint_hits_count = endpoint_hits_count
self.active_version = active_version
```
#### File: authoring/models/application_settings.py
```python
from msrest.serialization import Model
class ApplicationSettings(Model):
"""The application settings.
All required parameters must be populated in order to send to Azure.
:param id: Required. The application ID.
:type id: str
:param is_public: Required. Setting your application as public allows
other people to use your application's endpoint using their own keys.
:type is_public: bool
"""
_validation = {
'id': {'required': True},
'is_public': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'is_public': {'key': 'public', 'type': 'bool'},
}
def __init__(self, **kwargs):
super(ApplicationSettings, self).__init__(**kwargs)
self.id = kwargs.get('id', None)
self.is_public = kwargs.get('is_public', None)
```
#### File: authoring/models/entity_label_object_py3.py
```python
from msrest.serialization import Model
class EntityLabelObject(Model):
"""Defines the entity type and position of the extracted entity within the
example.
All required parameters must be populated in order to send to Azure.
:param entity_name: Required. The entity type.
:type entity_name: str
:param start_char_index: Required. The index within the utterance where
the extracted entity starts.
:type start_char_index: int
:param end_char_index: Required. The index within the utterance where the
extracted entity ends.
:type end_char_index: int
"""
_validation = {
'entity_name': {'required': True},
'start_char_index': {'required': True},
'end_char_index': {'required': True},
}
_attribute_map = {
'entity_name': {'key': 'entityName', 'type': 'str'},
'start_char_index': {'key': 'startCharIndex', 'type': 'int'},
'end_char_index': {'key': 'endCharIndex', 'type': 'int'},
}
def __init__(self, *, entity_name: str, start_char_index: int, end_char_index: int, **kwargs) -> None:
super(EntityLabelObject, self).__init__(**kwargs)
self.entity_name = entity_name
self.start_char_index = start_char_index
self.end_char_index = end_char_index
```
#### File: authoring/models/pattern_feature_info.py
```python
from .feature_info_object import FeatureInfoObject
class PatternFeatureInfo(FeatureInfoObject):
"""Pattern feature.
:param id: A six-digit ID used for Features.
:type id: int
:param name: The name of the Feature.
:type name: str
:param is_active: Indicates if the feature is enabled.
:type is_active: bool
:param pattern: The Regular Expression to match.
:type pattern: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'int'},
'name': {'key': 'name', 'type': 'str'},
'is_active': {'key': 'isActive', 'type': 'bool'},
'pattern': {'key': 'pattern', 'type': 'str'},
}
def __init__(self, **kwargs):
super(PatternFeatureInfo, self).__init__(**kwargs)
self.pattern = kwargs.get('pattern', None)
```
#### File: authoring/models/prebuilt_domain_model_create_object_py3.py
```python
from msrest.serialization import Model
class PrebuiltDomainModelCreateObject(Model):
"""A model object containing the name of the custom prebuilt intent or entity
and the name of the domain to which this model belongs.
:param domain_name: The domain name.
:type domain_name: str
:param model_name: The intent name or entity name.
:type model_name: str
"""
_attribute_map = {
'domain_name': {'key': 'domainName', 'type': 'str'},
'model_name': {'key': 'modelName', 'type': 'str'},
}
def __init__(self, *, domain_name: str=None, model_name: str=None, **kwargs) -> None:
super(PrebuiltDomainModelCreateObject, self).__init__(**kwargs)
self.domain_name = domain_name
self.model_name = model_name
```
#### File: authoring/models/prebuilt_domain_py3.py
```python
from msrest.serialization import Model
class PrebuiltDomain(Model):
"""Prebuilt Domain.
:param name:
:type name: str
:param culture:
:type culture: str
:param description:
:type description: str
:param examples:
:type examples: str
:param intents:
:type intents:
list[~azure.cognitiveservices.language.luis.authoring.models.PrebuiltDomainItem]
:param entities:
:type entities:
list[~azure.cognitiveservices.language.luis.authoring.models.PrebuiltDomainItem]
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'culture': {'key': 'culture', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'examples': {'key': 'examples', 'type': 'str'},
'intents': {'key': 'intents', 'type': '[PrebuiltDomainItem]'},
'entities': {'key': 'entities', 'type': '[PrebuiltDomainItem]'},
}
def __init__(self, *, name: str=None, culture: str=None, description: str=None, examples: str=None, intents=None, entities=None, **kwargs) -> None:
super(PrebuiltDomain, self).__init__(**kwargs)
self.name = name
self.culture = culture
self.description = description
self.examples = examples
self.intents = intents
self.entities = entities
```
#### File: authoring/models/version_info_py3.py
```python
from msrest.serialization import Model
class VersionInfo(Model):
"""Object model of an application version.
All required parameters must be populated in order to send to Azure.
:param version: Required. The version ID. E.g.: "0.1"
:type version: str
:param created_date_time: The version's creation timestamp.
:type created_date_time: datetime
:param last_modified_date_time: Timestamp of the last update.
:type last_modified_date_time: datetime
:param last_trained_date_time: Timestamp of the last time the model was
trained.
:type last_trained_date_time: datetime
:param last_published_date_time: Timestamp when was last published.
:type last_published_date_time: datetime
:param endpoint_url: The Runtime endpoint URL for this model version.
:type endpoint_url: str
:param assigned_endpoint_key: The endpoint key.
:type assigned_endpoint_key: dict[str, str]
:param external_api_keys: External keys.
:type external_api_keys: object
:param intents_count: Number of intents in this model.
:type intents_count: int
:param entities_count: Number of entities in this model.
:type entities_count: int
:param endpoint_hits_count: Number of calls made to this endpoint.
:type endpoint_hits_count: int
:param training_status: Required. The current training status. Possible
values include: 'NeedsTraining', 'InProgress', 'Trained'
:type training_status: str or
~azure.cognitiveservices.language.luis.authoring.models.TrainingStatus
"""
_validation = {
'version': {'required': True},
'training_status': {'required': True},
}
_attribute_map = {
'version': {'key': 'version', 'type': 'str'},
'created_date_time': {'key': 'createdDateTime', 'type': 'iso-8601'},
'last_modified_date_time': {'key': 'lastModifiedDateTime', 'type': 'iso-8601'},
'last_trained_date_time': {'key': 'lastTrainedDateTime', 'type': 'iso-8601'},
'last_published_date_time': {'key': 'lastPublishedDateTime', 'type': 'iso-8601'},
'endpoint_url': {'key': 'endpointUrl', 'type': 'str'},
'assigned_endpoint_key': {'key': 'assignedEndpointKey', 'type': '{str}'},
'external_api_keys': {'key': 'externalApiKeys', 'type': 'object'},
'intents_count': {'key': 'intentsCount', 'type': 'int'},
'entities_count': {'key': 'entitiesCount', 'type': 'int'},
'endpoint_hits_count': {'key': 'endpointHitsCount', 'type': 'int'},
'training_status': {'key': 'trainingStatus', 'type': 'TrainingStatus'},
}
def __init__(self, *, version: str, training_status, created_date_time=None, last_modified_date_time=None, last_trained_date_time=None, last_published_date_time=None, endpoint_url: str=None, assigned_endpoint_key=None, external_api_keys=None, intents_count: int=None, entities_count: int=None, endpoint_hits_count: int=None, **kwargs) -> None:
super(VersionInfo, self).__init__(**kwargs)
self.version = version
self.created_date_time = created_date_time
self.last_modified_date_time = last_modified_date_time
self.last_trained_date_time = last_trained_date_time
self.last_published_date_time = last_published_date_time
self.endpoint_url = endpoint_url
self.assigned_endpoint_key = assigned_endpoint_key
self.external_api_keys = external_api_keys
self.intents_count = intents_count
self.entities_count = entities_count
self.endpoint_hits_count = endpoint_hits_count
self.training_status = training_status
```
#### File: runtime/models/entity_with_resolution.py
```python
from .entity_model import EntityModel
class EntityWithResolution(EntityModel):
"""EntityWithResolution.
All required parameters must be populated in order to send to Azure.
:param additional_properties: Unmatched properties from the message are
deserialized this collection
:type additional_properties: dict[str, object]
:param entity: Required. Name of the entity, as defined in LUIS.
:type entity: str
:param type: Required. Type of the entity, as defined in LUIS.
:type type: str
:param start_index: Required. The position of the first character of the
matched entity within the utterance.
:type start_index: int
:param end_index: Required. The position of the last character of the
matched entity within the utterance.
:type end_index: int
:param resolution: Required. Resolution values for pre-built LUIS
entities.
:type resolution: object
"""
_validation = {
'entity': {'required': True},
'type': {'required': True},
'start_index': {'required': True},
'end_index': {'required': True},
'resolution': {'required': True},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'entity': {'key': 'entity', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'start_index': {'key': 'startIndex', 'type': 'int'},
'end_index': {'key': 'endIndex', 'type': 'int'},
'resolution': {'key': 'resolution', 'type': 'object'},
}
def __init__(self, **kwargs):
super(EntityWithResolution, self).__init__(**kwargs)
self.resolution = kwargs.get('resolution', None)
```
#### File: runtime/operations/prediction_operations.py
```python
from msrest.pipeline import ClientRawResponse
from .. import models
class PredictionOperations(object):
"""PredictionOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.config = config
def resolve(
self, app_id, query, timezone_offset=None, verbose=None, staging=None, spell_check=None, bing_spell_check_subscription_key=None, log=None, custom_headers=None, raw=False, **operation_config):
"""Gets predictions for a given utterance, in the form of intents and
entities. The current maximum query size is 500 characters.
:param app_id: The LUIS application ID (Guid).
:type app_id: str
:param query: The utterance to predict.
:type query: str
:param timezone_offset: The timezone offset for the location of the
request.
:type timezone_offset: float
:param verbose: If true, return all intents instead of just the top
scoring intent.
:type verbose: bool
:param staging: Use the staging endpoint slot.
:type staging: bool
:param spell_check: Enable spell checking.
:type spell_check: bool
:param bing_spell_check_subscription_key: The subscription key to use
when enabling bing spell check
:type bing_spell_check_subscription_key: str
:param log: Log query (default is true)
:type log: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: LuisResult or ClientRawResponse if raw=true
:rtype:
~azure.cognitiveservices.language.luis.runtime.models.LuisResult or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`APIErrorException<azure.cognitiveservices.language.luis.runtime.models.APIErrorException>`
"""
# Construct URL
url = self.resolve.metadata['url']
path_format_arguments = {
'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True),
'appId': self._serialize.url("app_id", app_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if timezone_offset is not None:
query_parameters['timezoneOffset'] = self._serialize.query("timezone_offset", timezone_offset, 'float')
if verbose is not None:
query_parameters['verbose'] = self._serialize.query("verbose", verbose, 'bool')
if staging is not None:
query_parameters['staging'] = self._serialize.query("staging", staging, 'bool')
if spell_check is not None:
query_parameters['spellCheck'] = self._serialize.query("spell_check", spell_check, 'bool')
if bing_spell_check_subscription_key is not None:
query_parameters['bing-spell-check-subscription-key'] = self._serialize.query("bing_spell_check_subscription_key", bing_spell_check_subscription_key, 'str')
if log is not None:
query_parameters['log'] = self._serialize.query("log", log, 'bool')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(query, 'str')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.APIErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('LuisResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
resolve.metadata = {'url': '/apps/{appId}'}
```
#### File: spellcheck/models/spelling_flagged_token.py
```python
from msrest.serialization import Model
class SpellingFlaggedToken(Model):
"""SpellingFlaggedToken.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:param offset: Required.
:type offset: int
:param token: Required.
:type token: str
:param type: Required. Possible values include: 'UnknownToken',
'RepeatedToken'. Default value: "UnknownToken" .
:type type: str or
~azure.cognitiveservices.language.spellcheck.models.ErrorType
:ivar suggestions:
:vartype suggestions:
list[~azure.cognitiveservices.language.spellcheck.models.SpellingTokenSuggestion]
:ivar ping_url_suffix:
:vartype ping_url_suffix: str
"""
_validation = {
'offset': {'required': True},
'token': {'required': True},
'type': {'required': True},
'suggestions': {'readonly': True},
'ping_url_suffix': {'readonly': True},
}
_attribute_map = {
'offset': {'key': 'offset', 'type': 'int'},
'token': {'key': 'token', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'suggestions': {'key': 'suggestions', 'type': '[SpellingTokenSuggestion]'},
'ping_url_suffix': {'key': 'pingUrlSuffix', 'type': 'str'},
}
def __init__(self, **kwargs):
super(SpellingFlaggedToken, self).__init__(**kwargs)
self.offset = kwargs.get('offset', None)
self.token = kwargs.get('token', None)
self.type = kwargs.get('type', "UnknownToken")
self.suggestions = None
self.ping_url_suffix = None
```
#### File: textanalytics/models/sentiment_batch_result_item.py
```python
from msrest.serialization import Model
class SentimentBatchResultItem(Model):
"""SentimentBatchResultItem.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar score: A decimal number between 0 and 1 denoting the sentiment of
the document. A score above 0.7 usually refers to a positive document
while a score below 0.3 normally has a negative connotation. Mid values
refer to neutral text.
:vartype score: float
:ivar id: Unique document identifier.
:vartype id: str
"""
_validation = {
'score': {'readonly': True},
'id': {'readonly': True},
}
_attribute_map = {
'score': {'key': 'score', 'type': 'float'},
'id': {'key': 'id', 'type': 'str'},
}
def __init__(self):
super(SentimentBatchResultItem, self).__init__()
self.score = None
self.id = None
```
#### File: language/textanalytics/text_analytics_api.py
```python
from msrest.service_client import ServiceClient
from msrest import Configuration, Serializer, Deserializer
from .version import VERSION
from msrest.pipeline import ClientRawResponse
from . import models
class TextAnalyticsAPIConfiguration(Configuration):
"""Configuration for TextAnalyticsAPI
Note that all parameters used to create this instance are saved as instance
attributes.
:param azure_region: Supported Azure regions for Cognitive Services
endpoints. Possible values include: 'westus', 'westeurope',
'southeastasia', 'eastus2', 'westcentralus', 'westus2', 'eastus',
'southcentralus', 'northeurope', 'eastasia', 'australiaeast',
'brazilsouth'
:type azure_region: str or
~azure.cognitiveservices.language.textanalytics.models.AzureRegions
:param credentials: Subscription credentials which uniquely identify
client subscription.
:type credentials: None
"""
def __init__(
self, azure_region, credentials):
if azure_region is None:
raise ValueError("Parameter 'azure_region' must not be None.")
if credentials is None:
raise ValueError("Parameter 'credentials' must not be None.")
base_url = 'https://{AzureRegion}.api.cognitive.microsoft.com/text/analytics'
super(TextAnalyticsAPIConfiguration, self).__init__(base_url)
self.add_user_agent('azure-cognitiveservices-language-textanalytics/{}'.format(VERSION))
self.azure_region = azure_region
self.credentials = credentials
class TextAnalyticsAPI(object):
"""The Text Analytics API is a suite of text analytics web services built with best-in-class Microsoft machine learning algorithms. The API can be used to analyze unstructured text for tasks such as sentiment analysis, key phrase extraction and language detection. No training data is needed to use this API; just bring your text data. This API uses advanced natural language processing techniques to deliver best in class predictions. Further documentation can be found in https://docs.microsoft.com/en-us/azure/cognitive-services/text-analytics/overview
:ivar config: Configuration for client.
:vartype config: TextAnalyticsAPIConfiguration
:param azure_region: Supported Azure regions for Cognitive Services
endpoints. Possible values include: 'westus', 'westeurope',
'southeastasia', 'eastus2', 'westcentralus', 'westus2', 'eastus',
'southcentralus', 'northeurope', 'eastasia', 'australiaeast',
'brazilsouth'
:type azure_region: str or
~azure.cognitiveservices.language.textanalytics.models.AzureRegions
:param credentials: Subscription credentials which uniquely identify
client subscription.
:type credentials: None
"""
def __init__(
self, azure_region, credentials):
self.config = TextAnalyticsAPIConfiguration(azure_region, credentials)
self._client = ServiceClient(self.config.credentials, self.config)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self.api_version = 'v2.0'
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
def key_phrases(
self, documents=None, custom_headers=None, raw=False, **operation_config):
"""The API returns a list of strings denoting the key talking points in
the input text.
We employ techniques from Microsoft Office's sophisticated Natural
Language Processing toolkit. See the <a
href="https://docs.microsoft.com/en-us/azure/cognitive-services/text-analytics/overview#supported-languages">Text
Analytics Documentation</a> for details about the languages that are
supported by key phrase extraction.
:param documents:
:type documents:
list[~azure.cognitiveservices.language.textanalytics.models.MultiLanguageInput]
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: KeyPhraseBatchResult or ClientRawResponse if raw=true
:rtype:
~azure.cognitiveservices.language.textanalytics.models.KeyPhraseBatchResult
or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseException<azure.cognitiveservices.language.textanalytics.models.ErrorResponseException>`
"""
input = models.MultiLanguageBatchInput(documents=documents)
# Construct URL
url = '/v2.0/keyPhrases'
path_format_arguments = {
'AzureRegion': self._serialize.url("self.config.azure_region", self.config.azure_region, 'AzureRegions', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(input, 'MultiLanguageBatchInput')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('KeyPhraseBatchResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def detect_language(
self, documents=None, custom_headers=None, raw=False, **operation_config):
"""The API returns the detected language and a numeric score between 0 and
1.
Scores close to 1 indicate 100% certainty that the identified language
is true. A total of 120 languages are supported.
:param documents:
:type documents:
list[~azure.cognitiveservices.language.textanalytics.models.Input]
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: LanguageBatchResult or ClientRawResponse if raw=true
:rtype:
~azure.cognitiveservices.language.textanalytics.models.LanguageBatchResult
or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseException<azure.cognitiveservices.language.textanalytics.models.ErrorResponseException>`
"""
input = models.BatchInput(documents=documents)
# Construct URL
url = '/v2.0/languages'
path_format_arguments = {
'AzureRegion': self._serialize.url("self.config.azure_region", self.config.azure_region, 'AzureRegions', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(input, 'BatchInput')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('LanguageBatchResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def sentiment(
self, documents=None, custom_headers=None, raw=False, **operation_config):
"""The API returns a numeric score between 0 and 1.
Scores close to 1 indicate positive sentiment, while scores close to 0
indicate negative sentiment. Sentiment score is generated using
classification techniques. The input features to the classifier include
n-grams, features generated from part-of-speech tags, and word
embeddings. See the <a
href="https://docs.microsoft.com/en-us/azure/cognitive-services/text-analytics/overview#supported-languages">Text
Analytics Documentation</a> for details about the languages that are
supported by sentiment analysis.
:param documents:
:type documents:
list[~azure.cognitiveservices.language.textanalytics.models.MultiLanguageInput]
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: SentimentBatchResult or ClientRawResponse if raw=true
:rtype:
~azure.cognitiveservices.language.textanalytics.models.SentimentBatchResult
or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseException<azure.cognitiveservices.language.textanalytics.models.ErrorResponseException>`
"""
input = models.MultiLanguageBatchInput(documents=documents)
# Construct URL
url = '/v2.0/sentiment'
path_format_arguments = {
'AzureRegion': self._serialize.url("self.config.azure_region", self.config.azure_region, 'AzureRegions', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(input, 'MultiLanguageBatchInput')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('SentimentBatchResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
```
#### File: websearch/models/time_zone_time_zone_information.py
```python
from msrest.serialization import Model
class TimeZoneTimeZoneInformation(Model):
"""Defines a date and time for a geographical location.
All required parameters must be populated in order to send to Azure.
:param location: Required. The name of the geographical location.For
example, County; City; City, State; City, State, Country; or Time Zone.
:type location: str
:param time: Required. The data and time specified in the form,
YYYY-MM-DDThh;mm:ss.ssssssZ.
:type time: str
:param utc_offset: Required. The offset from UTC. For example, UTC-7.
:type utc_offset: str
"""
_validation = {
'location': {'required': True},
'time': {'required': True},
'utc_offset': {'required': True},
}
_attribute_map = {
'location': {'key': 'location', 'type': 'str'},
'time': {'key': 'time', 'type': 'str'},
'utc_offset': {'key': 'utcOffset', 'type': 'str'},
}
def __init__(self, **kwargs):
super(TimeZoneTimeZoneInformation, self).__init__(**kwargs)
self.location = kwargs.get('location', None)
self.time = kwargs.get('time', None)
self.utc_offset = kwargs.get('utc_offset', None)
```
#### File: computervision/models/image_description_details_py3.py
```python
from msrest.serialization import Model
class ImageDescriptionDetails(Model):
"""A collection of content tags, along with a list of captions sorted by
confidence level, and image metadata.
:param tags: A collection of image tags.
:type tags: list[str]
:param captions: A list of captions, sorted by confidence level.
:type captions:
list[~azure.cognitiveservices.vision.computervision.models.ImageCaption]
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': '[str]'},
'captions': {'key': 'captions', 'type': '[ImageCaption]'},
}
def __init__(self, *, tags=None, captions=None, **kwargs) -> None:
super(ImageDescriptionDetails, self).__init__(**kwargs)
self.tags = tags
self.captions = captions
```
#### File: computervision/models/ocr_line.py
```python
from msrest.serialization import Model
class OcrLine(Model):
"""An object describing a single recognized line of text.
:param bounding_box: Bounding box of a recognized line. The four integers
represent the x-coordinate of the left edge, the y-coordinate of the top
edge, width, and height of the bounding box, in the coordinate system of
the input image, after it has been rotated around its center according to
the detected text angle (see textAngle property), with the origin at the
top-left corner, and the y-axis pointing down.
:type bounding_box: str
:param words: An array of objects, where each object represents a
recognized word.
:type words:
list[~azure.cognitiveservices.vision.computervision.models.OcrWord]
"""
_attribute_map = {
'bounding_box': {'key': 'boundingBox', 'type': 'str'},
'words': {'key': 'words', 'type': '[OcrWord]'},
}
def __init__(self, **kwargs):
super(OcrLine, self).__init__(**kwargs)
self.bounding_box = kwargs.get('bounding_box', None)
self.words = kwargs.get('words', None)
```
#### File: training/models/image_tag_create_summary.py
```python
from msrest.serialization import Model
class ImageTagCreateSummary(Model):
"""ImageTagCreateSummary.
:param created:
:type created:
list[~azure.cognitiveservices.vision.customvision.training.models.ImageTagCreateEntry]
:param duplicated:
:type duplicated:
list[~azure.cognitiveservices.vision.customvision.training.models.ImageTagCreateEntry]
:param exceeded:
:type exceeded:
list[~azure.cognitiveservices.vision.customvision.training.models.ImageTagCreateEntry]
"""
_attribute_map = {
'created': {'key': 'created', 'type': '[ImageTagCreateEntry]'},
'duplicated': {'key': 'duplicated', 'type': '[ImageTagCreateEntry]'},
'exceeded': {'key': 'exceeded', 'type': '[ImageTagCreateEntry]'},
}
def __init__(self, **kwargs):
super(ImageTagCreateSummary, self).__init__(**kwargs)
self.created = kwargs.get('created', None)
self.duplicated = kwargs.get('duplicated', None)
self.exceeded = kwargs.get('exceeded', None)
```
#### File: face/models/exposure.py
```python
from msrest.serialization import Model
class Exposure(Model):
"""Properties describing exposure level of the image.
:param exposure_level: An enum value indicating level of exposure.
Possible values include: 'UnderExposure', 'GoodExposure', 'OverExposure'
:type exposure_level: str or
~azure.cognitiveservices.vision.face.models.ExposureLevel
:param value: A number indicating level of exposure level ranging from 0
to 1. [0, 0.25) is under exposure. [0.25, 0.75) is good exposure. [0.75,
1] is over exposure.
:type value: float
"""
_attribute_map = {
'exposure_level': {'key': 'exposureLevel', 'type': 'ExposureLevel'},
'value': {'key': 'value', 'type': 'float'},
}
def __init__(self, **kwargs):
super(Exposure, self).__init__(**kwargs)
self.exposure_level = kwargs.get('exposure_level', None)
self.value = kwargs.get('value', None)
```
#### File: face/models/update_face_request.py
```python
from msrest.serialization import Model
class UpdateFaceRequest(Model):
"""Request to update face data.
:param user_data: User-provided data attached to the face. The size limit
is 1KB.
:type user_data: str
"""
_validation = {
'user_data': {'max_length': 1024},
}
_attribute_map = {
'user_data': {'key': 'userData', 'type': 'str'},
}
def __init__(self, **kwargs):
super(UpdateFaceRequest, self).__init__(**kwargs)
self.user_data = kwargs.get('user_data', None)
```
#### File: face/operations/face_operations.py
```python
from msrest.pipeline import ClientRawResponse
from .. import models
class FaceOperations(object):
"""FaceOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.config = config
def find_similar(
self, face_id, face_list_id=None, large_face_list_id=None, face_ids=None, max_num_of_candidates_returned=20, mode="matchPerson", custom_headers=None, raw=False, **operation_config):
"""Given query face's faceId, find the similar-looking faces from a faceId
array, a face list or a large face list.
:param face_id: FaceId of the query face. User needs to call Face -
Detect first to get a valid faceId. Note that this faceId is not
persisted and will expire 24 hours after the detection call
:type face_id: str
:param face_list_id: An existing user-specified unique candidate face
list, created in Face List - Create a Face List. Face list contains a
set of persistedFaceIds which are persisted and will never expire.
Parameter faceListId, largeFaceListId and faceIds should not be
provided at the same time。
:type face_list_id: str
:param large_face_list_id: An existing user-specified unique candidate
large face list, created in LargeFaceList - Create. Large face list
contains a set of persistedFaceIds which are persisted and will never
expire. Parameter faceListId, largeFaceListId and faceIds should not
be provided at the same time.
:type large_face_list_id: str
:param face_ids: An array of candidate faceIds. All of them are
created by Face - Detect and the faceIds will expire 24 hours after
the detection call. The number of faceIds is limited to 1000.
Parameter faceListId, largeFaceListId and faceIds should not be
provided at the same time.
:type face_ids: list[str]
:param max_num_of_candidates_returned: The number of top similar faces
returned. The valid range is [1, 1000].
:type max_num_of_candidates_returned: int
:param mode: Similar face searching mode. It can be "matchPerson" or
"matchFace". Possible values include: 'matchPerson', 'matchFace'
:type mode: str or
~azure.cognitiveservices.vision.face.models.FindSimilarMatchMode
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: list or ClientRawResponse if raw=true
:rtype: list[~azure.cognitiveservices.vision.face.models.SimilarFace]
or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`APIErrorException<azure.cognitiveservices.vision.face.models.APIErrorException>`
"""
body = models.FindSimilarRequest(face_id=face_id, face_list_id=face_list_id, large_face_list_id=large_face_list_id, face_ids=face_ids, max_num_of_candidates_returned=max_num_of_candidates_returned, mode=mode)
# Construct URL
url = self.find_similar.metadata['url']
path_format_arguments = {
'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(body, 'FindSimilarRequest')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.APIErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('[SimilarFace]', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
find_similar.metadata = {'url': '/findsimilars'}
def group(
self, face_ids, custom_headers=None, raw=False, **operation_config):
"""Divide candidate faces into groups based on face similarity.
:param face_ids: Array of candidate faceId created by Face - Detect.
The maximum is 1000 faces
:type face_ids: list[str]
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: GroupResult or ClientRawResponse if raw=true
:rtype: ~azure.cognitiveservices.vision.face.models.GroupResult or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`APIErrorException<azure.cognitiveservices.vision.face.models.APIErrorException>`
"""
body = models.GroupRequest(face_ids=face_ids)
# Construct URL
url = self.group.metadata['url']
path_format_arguments = {
'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(body, 'GroupRequest')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.APIErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('GroupResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
group.metadata = {'url': '/group'}
def identify(
self, face_ids, person_group_id=None, large_person_group_id=None, max_num_of_candidates_returned=1, confidence_threshold=None, custom_headers=None, raw=False, **operation_config):
"""1-to-many identification to find the closest matches of the specific
query person face from a person group or large person group.
:param face_ids: Array of query faces faceIds, created by the Face -
Detect. Each of the faces are identified independently. The valid
number of faceIds is between [1, 10].
:type face_ids: list[str]
:param person_group_id: PersonGroupId of the target person group,
created by PersonGroup - Create. Parameter personGroupId and
largePersonGroupId should not be provided at the same time.
:type person_group_id: str
:param large_person_group_id: LargePersonGroupId of the target large
person group, created by LargePersonGroup - Create. Parameter
personGroupId and largePersonGroupId should not be provided at the
same time.
:type large_person_group_id: str
:param max_num_of_candidates_returned: The range of
maxNumOfCandidatesReturned is between 1 and 5 (default is 1).
:type max_num_of_candidates_returned: int
:param confidence_threshold: Confidence threshold of identification,
used to judge whether one face belong to one person. The range of
confidenceThreshold is [0, 1] (default specified by algorithm).
:type confidence_threshold: float
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: list or ClientRawResponse if raw=true
:rtype:
list[~azure.cognitiveservices.vision.face.models.IdentifyResult] or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`APIErrorException<azure.cognitiveservices.vision.face.models.APIErrorException>`
"""
body = models.IdentifyRequest(face_ids=face_ids, person_group_id=person_group_id, large_person_group_id=large_person_group_id, max_num_of_candidates_returned=max_num_of_candidates_returned, confidence_threshold=confidence_threshold)
# Construct URL
url = self.identify.metadata['url']
path_format_arguments = {
'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(body, 'IdentifyRequest')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.APIErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('[IdentifyResult]', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
identify.metadata = {'url': '/identify'}
def verify_face_to_face(
self, face_id1, face_id2, custom_headers=None, raw=False, **operation_config):
"""Verify whether two faces belong to a same person or whether one face
belongs to a person.
:param face_id1: FaceId of the first face, comes from Face - Detect
:type face_id1: str
:param face_id2: FaceId of the second face, comes from Face - Detect
:type face_id2: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: VerifyResult or ClientRawResponse if raw=true
:rtype: ~azure.cognitiveservices.vision.face.models.VerifyResult or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`APIErrorException<azure.cognitiveservices.vision.face.models.APIErrorException>`
"""
body = models.VerifyFaceToFaceRequest(face_id1=face_id1, face_id2=face_id2)
# Construct URL
url = self.verify_face_to_face.metadata['url']
path_format_arguments = {
'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(body, 'VerifyFaceToFaceRequest')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.APIErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VerifyResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
verify_face_to_face.metadata = {'url': '/verify'}
def detect_with_url(
self, url, return_face_id=True, return_face_landmarks=False, return_face_attributes=None, custom_headers=None, raw=False, **operation_config):
"""Detect human faces in an image and returns face locations, and
optionally with faceIds, landmarks, and attributes.
:param url: Publicly reachable URL of an image
:type url: str
:param return_face_id: A value indicating whether the operation should
return faceIds of detected faces.
:type return_face_id: bool
:param return_face_landmarks: A value indicating whether the operation
should return landmarks of the detected faces.
:type return_face_landmarks: bool
:param return_face_attributes: Analyze and return the one or more
specified face attributes in the comma-separated string like
"returnFaceAttributes=age,gender". Supported face attributes include
age, gender, headPose, smile, facialHair, glasses and emotion. Note
that each face attribute analysis has additional computational and
time cost.
:type return_face_attributes: list[str or
~azure.cognitiveservices.vision.face.models.FaceAttributeType]
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: list or ClientRawResponse if raw=true
:rtype: list[~azure.cognitiveservices.vision.face.models.DetectedFace]
or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`APIErrorException<azure.cognitiveservices.vision.face.models.APIErrorException>`
"""
image_url = models.ImageUrl(url=url)
# Construct URL
url = self.detect_with_url.metadata['url']
path_format_arguments = {
'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if return_face_id is not None:
query_parameters['returnFaceId'] = self._serialize.query("return_face_id", return_face_id, 'bool')
if return_face_landmarks is not None:
query_parameters['returnFaceLandmarks'] = self._serialize.query("return_face_landmarks", return_face_landmarks, 'bool')
if return_face_attributes is not None:
query_parameters['returnFaceAttributes'] = self._serialize.query("return_face_attributes", return_face_attributes, '[FaceAttributeType]', div=',')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(image_url, 'ImageUrl')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.APIErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('[DetectedFace]', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
detect_with_url.metadata = {'url': '/detect'}
def verify_face_to_person(
self, face_id, person_id, person_group_id=None, large_person_group_id=None, custom_headers=None, raw=False, **operation_config):
"""Verify whether two faces belong to a same person. Compares a face Id
with a Person Id.
:param face_id: FaceId of the face, comes from Face - Detect
:type face_id: str
:param person_id: Specify a certain person in a person group or a
large person group. personId is created in PersonGroup Person - Create
or LargePersonGroup Person - Create.
:type person_id: str
:param person_group_id: Using existing personGroupId and personId for
fast loading a specified person. personGroupId is created in
PersonGroup - Create. Parameter personGroupId and largePersonGroupId
should not be provided at the same time.
:type person_group_id: str
:param large_person_group_id: Using existing largePersonGroupId and
personId for fast loading a specified person. largePersonGroupId is
created in LargePersonGroup - Create. Parameter personGroupId and
largePersonGroupId should not be provided at the same time.
:type large_person_group_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: VerifyResult or ClientRawResponse if raw=true
:rtype: ~azure.cognitiveservices.vision.face.models.VerifyResult or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`APIErrorException<azure.cognitiveservices.vision.face.models.APIErrorException>`
"""
body = models.VerifyFaceToPersonRequest(face_id=face_id, person_group_id=person_group_id, large_person_group_id=large_person_group_id, person_id=person_id)
# Construct URL
url = self.verify_face_to_person.metadata['url']
path_format_arguments = {
'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(body, 'VerifyFaceToPersonRequest')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.APIErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VerifyResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
verify_face_to_person.metadata = {'url': '/verify'}
def detect_with_stream(
self, image, return_face_id=True, return_face_landmarks=False, return_face_attributes=None, custom_headers=None, raw=False, callback=None, **operation_config):
"""Detect human faces in an image and returns face locations, and
optionally with faceIds, landmarks, and attributes.
:param image: An image stream.
:type image: Generator
:param return_face_id: A value indicating whether the operation should
return faceIds of detected faces.
:type return_face_id: bool
:param return_face_landmarks: A value indicating whether the operation
should return landmarks of the detected faces.
:type return_face_landmarks: bool
:param return_face_attributes: Analyze and return the one or more
specified face attributes in the comma-separated string like
"returnFaceAttributes=age,gender". Supported face attributes include
age, gender, headPose, smile, facialHair, glasses and emotion. Note
that each face attribute analysis has additional computational and
time cost.
:type return_face_attributes: list[str or
~azure.cognitiveservices.vision.face.models.FaceAttributeType]
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param callback: When specified, will be called with each chunk of
data that is streamed. The callback should take two arguments, the
bytes of the current chunk of data and the response object. If the
data is uploading, response will be None.
:type callback: Callable[Bytes, response=None]
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: list or ClientRawResponse if raw=true
:rtype: list[~azure.cognitiveservices.vision.face.models.DetectedFace]
or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`APIErrorException<azure.cognitiveservices.vision.face.models.APIErrorException>`
"""
# Construct URL
url = self.detect_with_stream.metadata['url']
path_format_arguments = {
'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if return_face_id is not None:
query_parameters['returnFaceId'] = self._serialize.query("return_face_id", return_face_id, 'bool')
if return_face_landmarks is not None:
query_parameters['returnFaceLandmarks'] = self._serialize.query("return_face_landmarks", return_face_landmarks, 'bool')
if return_face_attributes is not None:
query_parameters['returnFaceAttributes'] = self._serialize.query("return_face_attributes", return_face_attributes, '[FaceAttributeType]', div=',')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/octet-stream'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._client.stream_upload(image, callback)
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.APIErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('[DetectedFace]', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
detect_with_stream.metadata = {'url': '/detect'}
```
#### File: v2016_10_01/models/sas_definition_bundle_py3.py
```python
from msrest.serialization import Model
class SasDefinitionBundle(Model):
"""A SAS definition bundle consists of key vault SAS definition details plus
its attributes.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: The SAS definition id.
:vartype id: str
:ivar secret_id: Storage account SAS definition secret id.
:vartype secret_id: str
:ivar parameters: The SAS definition metadata in the form of key-value
pairs.
:vartype parameters: dict[str, str]
:ivar attributes: The SAS definition attributes.
:vartype attributes:
~azure.keyvault.v2016_10_01.models.SasDefinitionAttributes
:ivar tags: Application specific metadata in the form of key-value pairs
:vartype tags: dict[str, str]
"""
_validation = {
'id': {'readonly': True},
'secret_id': {'readonly': True},
'parameters': {'readonly': True},
'attributes': {'readonly': True},
'tags': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'secret_id': {'key': 'sid', 'type': 'str'},
'parameters': {'key': 'parameters', 'type': '{str}'},
'attributes': {'key': 'attributes', 'type': 'SasDefinitionAttributes'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(self, **kwargs) -> None:
super(SasDefinitionBundle, self).__init__(**kwargs)
self.id = None
self.secret_id = None
self.parameters = None
self.attributes = None
self.tags = None
```
#### File: v7_0/models/certificate_operation_update_parameter.py
```python
from msrest.serialization import Model
class CertificateOperationUpdateParameter(Model):
"""The certificate operation update parameters.
All required parameters must be populated in order to send to Azure.
:param cancellation_requested: Required. Indicates if cancellation was
requested on the certificate operation.
:type cancellation_requested: bool
"""
_validation = {
'cancellation_requested': {'required': True},
}
_attribute_map = {
'cancellation_requested': {'key': 'cancellation_requested', 'type': 'bool'},
}
def __init__(self, **kwargs):
super(CertificateOperationUpdateParameter, self).__init__(**kwargs)
self.cancellation_requested = kwargs.get('cancellation_requested', None)
```
#### File: v7_0/models/deleted_storage_bundle_py3.py
```python
from .storage_bundle_py3 import StorageBundle
class DeletedStorageBundle(StorageBundle):
"""A deleted storage account bundle consisting of its previous id, attributes
and its tags, as well as information on when it will be purged.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: The storage account id.
:vartype id: str
:ivar resource_id: The storage account resource id.
:vartype resource_id: str
:ivar active_key_name: The current active storage account key name.
:vartype active_key_name: str
:ivar auto_regenerate_key: whether keyvault should manage the storage
account for the user.
:vartype auto_regenerate_key: bool
:ivar regeneration_period: The key regeneration time duration specified in
ISO-8601 format.
:vartype regeneration_period: str
:ivar attributes: The storage account attributes.
:vartype attributes: ~azure.keyvault.v7_0.models.StorageAccountAttributes
:ivar tags: Application specific metadata in the form of key-value pairs
:vartype tags: dict[str, str]
:param recovery_id: The url of the recovery object, used to identify and
recover the deleted storage account.
:type recovery_id: str
:ivar scheduled_purge_date: The time when the storage account is scheduled
to be purged, in UTC
:vartype scheduled_purge_date: datetime
:ivar deleted_date: The time when the storage account was deleted, in UTC
:vartype deleted_date: datetime
"""
_validation = {
'id': {'readonly': True},
'resource_id': {'readonly': True},
'active_key_name': {'readonly': True},
'auto_regenerate_key': {'readonly': True},
'regeneration_period': {'readonly': True},
'attributes': {'readonly': True},
'tags': {'readonly': True},
'scheduled_purge_date': {'readonly': True},
'deleted_date': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'resource_id': {'key': 'resourceId', 'type': 'str'},
'active_key_name': {'key': 'activeKeyName', 'type': 'str'},
'auto_regenerate_key': {'key': 'autoRegenerateKey', 'type': 'bool'},
'regeneration_period': {'key': 'regenerationPeriod', 'type': 'str'},
'attributes': {'key': 'attributes', 'type': 'StorageAccountAttributes'},
'tags': {'key': 'tags', 'type': '{str}'},
'recovery_id': {'key': 'recoveryId', 'type': 'str'},
'scheduled_purge_date': {'key': 'scheduledPurgeDate', 'type': 'unix-time'},
'deleted_date': {'key': 'deletedDate', 'type': 'unix-time'},
}
def __init__(self, *, recovery_id: str=None, **kwargs) -> None:
super(DeletedStorageBundle, self).__init__(**kwargs)
self.recovery_id = recovery_id
self.scheduled_purge_date = None
self.deleted_date = None
```
#### File: v7_0/models/json_web_key_py3.py
```python
from msrest.serialization import Model
class JsonWebKey(Model):
"""As of http://tools.ietf.org/html/draft-ietf-jose-json-web-key-18.
:param kid: Key identifier.
:type kid: str
:param kty: JsonWebKey Key Type (kty), as defined in
https://tools.ietf.org/html/draft-ietf-jose-json-web-algorithms-40.
Possible values include: 'EC', 'EC-HSM', 'RSA', 'RSA-HSM', 'oct'
:type kty: str or ~azure.keyvault.v7_0.models.JsonWebKeyType
:param key_ops:
:type key_ops: list[str]
:param n: RSA modulus.
:type n: bytes
:param e: RSA public exponent.
:type e: bytes
:param d: RSA private exponent, or the D component of an EC private key.
:type d: bytes
:param dp: RSA private key parameter.
:type dp: bytes
:param dq: RSA private key parameter.
:type dq: bytes
:param qi: RSA private key parameter.
:type qi: bytes
:param p: RSA secret prime.
:type p: bytes
:param q: RSA secret prime, with p < q.
:type q: bytes
:param k: Symmetric key.
:type k: bytes
:param t: HSM Token, used with 'Bring Your Own Key'.
:type t: bytes
:param crv: Elliptic curve name. For valid values, see
JsonWebKeyCurveName. Possible values include: 'P-256', 'P-384', 'P-521',
'P-256K'
:type crv: str or ~azure.keyvault.v7_0.models.JsonWebKeyCurveName
:param x: X component of an EC public key.
:type x: bytes
:param y: Y component of an EC public key.
:type y: bytes
"""
_attribute_map = {
'kid': {'key': 'kid', 'type': 'str'},
'kty': {'key': 'kty', 'type': 'str'},
'key_ops': {'key': 'key_ops', 'type': '[str]'},
'n': {'key': 'n', 'type': 'base64'},
'e': {'key': 'e', 'type': 'base64'},
'd': {'key': 'd', 'type': 'base64'},
'dp': {'key': 'dp', 'type': 'base64'},
'dq': {'key': 'dq', 'type': 'base64'},
'qi': {'key': 'qi', 'type': 'base64'},
'p': {'key': 'p', 'type': 'base64'},
'q': {'key': 'q', 'type': 'base64'},
'k': {'key': 'k', 'type': 'base64'},
't': {'key': 'key_hsm', 'type': 'base64'},
'crv': {'key': 'crv', 'type': 'str'},
'x': {'key': 'x', 'type': 'base64'},
'y': {'key': 'y', 'type': 'base64'},
}
def __init__(self, *, kid: str=None, kty=None, key_ops=None, n: bytes=None, e: bytes=None, d: bytes=None, dp: bytes=None, dq: bytes=None, qi: bytes=None, p: bytes=None, q: bytes=None, k: bytes=None, t: bytes=None, crv=None, x: bytes=None, y: bytes=None, **kwargs) -> None:
super(JsonWebKey, self).__init__(**kwargs)
self.kid = kid
self.kty = kty
self.key_ops = key_ops
self.n = n
self.e = e
self.d = d
self.dp = dp
self.dq = dq
self.qi = qi
self.p = p
self.q = q
self.k = k
self.t = t
self.crv = crv
self.x = x
self.y = y
```
#### File: v7_0/models/key_properties_py3.py
```python
from msrest.serialization import Model
class KeyProperties(Model):
"""Properties of the key pair backing a certificate.
:param exportable: Indicates if the private key can be exported.
:type exportable: bool
:param key_type: The type of key pair to be used for the certificate.
Possible values include: 'EC', 'EC-HSM', 'RSA', 'RSA-HSM', 'oct'
:type key_type: str or ~azure.keyvault.v7_0.models.JsonWebKeyType
:param key_size: The key size in bits. For example: 2048, 3072, or 4096
for RSA.
:type key_size: int
:param reuse_key: Indicates if the same key pair will be used on
certificate renewal.
:type reuse_key: bool
:param curve: Elliptic curve name. For valid values, see
JsonWebKeyCurveName. Possible values include: 'P-256', 'P-384', 'P-521',
'P-256K'
:type curve: str or ~azure.keyvault.v7_0.models.JsonWebKeyCurveName
"""
_attribute_map = {
'exportable': {'key': 'exportable', 'type': 'bool'},
'key_type': {'key': 'kty', 'type': 'str'},
'key_size': {'key': 'key_size', 'type': 'int'},
'reuse_key': {'key': 'reuse_key', 'type': 'bool'},
'curve': {'key': 'crv', 'type': 'str'},
}
def __init__(self, *, exportable: bool=None, key_type=None, key_size: int=None, reuse_key: bool=None, curve=None, **kwargs) -> None:
super(KeyProperties, self).__init__(**kwargs)
self.exportable = exportable
self.key_type = key_type
self.key_size = key_size
self.reuse_key = reuse_key
self.curve = curve
```
#### File: v7_0/models/storage_restore_parameters.py
```python
from msrest.serialization import Model
class StorageRestoreParameters(Model):
"""The secret restore parameters.
All required parameters must be populated in order to send to Azure.
:param storage_bundle_backup: Required. The backup blob associated with a
storage account.
:type storage_bundle_backup: bytes
"""
_validation = {
'storage_bundle_backup': {'required': True},
}
_attribute_map = {
'storage_bundle_backup': {'key': 'value', 'type': 'base64'},
}
def __init__(self, **kwargs):
super(StorageRestoreParameters, self).__init__(**kwargs)
self.storage_bundle_backup = kwargs.get('storage_bundle_backup', None)
```
#### File: azure-keyvault/tests/test_secrets.py
```python
from devtools_testutils import AzureMgmtTestCase, ResourceGroupPreparer
from keyvault_preparer import KeyVaultPreparer
from keyvault_testcase import KeyvaultTestCase
from azure.keyvault import KeyVaultId, KeyVaultClient, KeyVaultAuthentication, AccessToken
import copy
from dateutil import parser as date_parse
import time
import unittest
class KeyVaultSecretTest(KeyvaultTestCase):
def _validate_secret_bundle(self, bundle, vault, secret_name, secret_value):
prefix = '{}secrets/{}/'.format(vault, secret_name)
id = bundle.id
self.assertTrue(id.index(prefix) == 0,
"String should start with '{}', but value is '{}'".format(prefix, id))
self.assertEqual(bundle.value, secret_value,
"value should be '{}', but is '{}'".format(secret_value, bundle.value))
self.assertTrue(bundle.attributes.created and bundle.attributes.updated,
'Missing required date attributes.')
def _validate_secret_list(self, secrets, expected):
for secret in secrets:
if secret.id in expected.keys():
attributes = expected[secret.id]
self.assertEqual(attributes, secret.attributes)
del expected[secret.id]
@ResourceGroupPreparer()
@KeyVaultPreparer()
def test_secret_crud_operations(self, vault, **kwargs):
self.assertIsNotNone(vault)
vault_uri = vault.properties.vault_uri
secret_name = 'crud-secret'
secret_value = self.get_resource_name('crud_secret_value')
# create secret
secret_bundle = self.client.set_secret(vault_uri, secret_name, secret_value)
self._validate_secret_bundle(secret_bundle, vault_uri, secret_name, secret_value)
created_bundle = secret_bundle
secret_id = KeyVaultId.parse_secret_id(created_bundle.id)
# get secret without version
self.assertEqual(created_bundle, self.client.get_secret(secret_id.vault, secret_id.name, ''))
# get secret with version
self.assertEqual(created_bundle, self.client.get_secret(secret_id.vault, secret_id.name, secret_id.version))
def _update_secret(secret_uri):
updating_bundle = copy.deepcopy(created_bundle)
updating_bundle.content_type = 'text/plain'
updating_bundle.attributes.expires = date_parse.parse('2050-02-02T08:00:00.000Z')
updating_bundle.tags = {'foo': 'updated tag'}
sid = KeyVaultId.parse_secret_id(secret_uri)
secret_bundle = self.client.update_secret(
sid.vault, sid.name, sid.version, updating_bundle.content_type, updating_bundle.attributes,
updating_bundle.tags)
self.assertEqual(updating_bundle.tags, secret_bundle.tags)
self.assertEqual(updating_bundle.id, secret_bundle.id)
self.assertNotEqual(str(updating_bundle.attributes.updated), str(secret_bundle.attributes.updated))
return secret_bundle
# update secret without version
secret_bundle = _update_secret(secret_id.base_id)
# update secret with version
secret_bundle = _update_secret(secret_id.id)
# delete secret
self.client.delete_secret(secret_id.vault, secret_id.name)
# get secret returns not found
try:
self.client.get_secret(secret_id.vault, secret_id.name, '')
except Exception as ex:
if not hasattr(ex, 'message') or 'not found' not in ex.message.lower():
raise ex
@ResourceGroupPreparer()
@KeyVaultPreparer()
def test_secret_list(self, vault, **kwargs):
self.assertIsNotNone(vault)
vault_uri = vault.properties.vault_uri
max_secrets = self.list_test_size
expected = {}
# create many secrets
for x in range(0, max_secrets):
secret_name = 'sec{}'.format(x)
secret_value = self.get_resource_name('secVal{}'.format(x))
secret_bundle = None
error_count = 0
while not secret_bundle:
try:
secret_bundle = self.client.set_secret(vault_uri, secret_name, secret_value)
sid = KeyVaultId.parse_secret_id(secret_bundle.id).base_id.strip('/')
expected[sid] = secret_bundle.attributes
except Exception as ex:
if hasattr(ex, 'message') and 'Throttled' in ex.message:
error_count += 1
time.sleep(2.5 * error_count)
continue
else:
raise ex
# list secrets
result = list(self.client.get_secrets(vault_uri, self.list_test_size))
self._validate_secret_list(result, expected)
@ResourceGroupPreparer()
@KeyVaultPreparer()
def test_list_versions(self, vault, **kwargs):
self.assertIsNotNone(vault)
vault_uri = vault.properties.vault_uri
secret_name = self.get_resource_name('sec')
secret_value = self.get_resource_name('secVal')
max_secrets = self.list_test_size
expected = {}
# create many secret versions
for x in range(0, max_secrets):
secret_bundle = None
error_count = 0
while not secret_bundle:
try:
secret_bundle = self.client.set_secret(vault_uri, secret_name, secret_value)
sid = KeyVaultId.parse_secret_id(secret_bundle.id).id.strip('/')
expected[sid] = secret_bundle.attributes
except Exception as ex:
if hasattr(ex, 'message') and 'Throttled' in ex.message:
error_count += 1
time.sleep(2.5 * error_count)
continue
else:
raise ex
# list secret versions
self._validate_secret_list(list(self.client.get_secret_versions(vault_uri, secret_name)), expected)
@ResourceGroupPreparer()
@KeyVaultPreparer()
def test_backup_restore(self, vault, **kwargs):
self.assertIsNotNone(vault)
vault_uri = vault.properties.vault_uri
secret_name = self.get_resource_name('secbak')
secret_value = self.get_resource_name('secVal')
# create secret
created_bundle = self.client.set_secret(vault_uri, secret_name, secret_value)
secret_id = KeyVaultId.parse_secret_id(created_bundle.id)
# backup secret
secret_backup = self.client.backup_secret(secret_id.vault, secret_id.name).value
# delete secret
self.client.delete_secret(secret_id.vault, secret_id.name)
# restore secret
self.assertEqual(created_bundle.attributes, self.client.restore_secret(vault_uri, secret_backup).attributes)
@ResourceGroupPreparer()
@KeyVaultPreparer(enable_soft_delete=True)
def test_recover_purge(self, vault, **kwargs):
self.assertIsNotNone(vault)
vault_uri = vault.properties.vault_uri
secrets = {}
# create secrets to recover
for i in range(0, self.list_test_size):
secret_name = self.get_resource_name('secrec{}'.format(str(i)))
secret_value = self.get_resource_name('secval{}'.format((str(i))))
secrets[secret_name] = self.client.set_secret(vault_uri, secret_name, secret_value)
# create secrets to purge
for i in range(0, self.list_test_size):
secret_name = self.get_resource_name('secprg{}'.format(str(i)))
secret_value = self.get_resource_name('secval{}'.format((str(i))))
secrets[secret_name] = self.client.set_secret(vault_uri, secret_name, secret_value)
# delete all secrets
for secret_name in secrets.keys():
self.client.delete_secret(vault_uri, secret_name)
if not self.is_playback():
time.sleep(20)
# validate all our deleted secrets are returned by get_deleted_secrets
deleted = [KeyVaultId.parse_secret_id(s.id).name for s in self.client.get_deleted_secrets(vault_uri)]
self.assertTrue(all(s in deleted for s in secrets.keys()))
# recover select secrets
for secret_name in [s for s in secrets.keys() if s.startswith('secrec')]:
self.client.recover_deleted_secret(vault_uri, secret_name)
# purge select secrets
for secret_name in [s for s in secrets.keys() if s.startswith('secprg')]:
self.client.purge_deleted_secret(vault_uri, secret_name)
if not self.is_playback():
time.sleep(20)
# validate none of our deleted secrets are returned by get_deleted_secrets
deleted = [KeyVaultId.parse_secret_id(s.id).name for s in self.client.get_deleted_secrets(vault_uri)]
self.assertTrue(not any(s in deleted for s in secrets.keys()))
# validate the recovered secrets
expected = {k: v for k, v in secrets.items() if k.startswith('secrec')}
actual = {k: self.client.get_secret(vault_uri, k, KeyVaultId.version_none) for k in expected.keys()}
self.assertEqual(len(set(expected.keys()) & set(actual.keys())), len(expected))
```
#### File: alertsmanagement/models/alerts_summary_group_item.py
```python
from msrest.serialization import Model
class AlertsSummaryGroupItem(Model):
"""Alerts summary group item.
:param name: Value of the aggregated field
:type name: str
:param count: Count of the aggregated field
:type count: int
:param groupedby: Name of the field aggregated
:type groupedby: str
:param values: List of the items
:type values:
list[~azure.mgmt.alertsmanagement.models.AlertsSummaryGroupItem]
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'count': {'key': 'count', 'type': 'int'},
'groupedby': {'key': 'groupedby', 'type': 'str'},
'values': {'key': 'values', 'type': '[AlertsSummaryGroupItem]'},
}
def __init__(self, **kwargs):
super(AlertsSummaryGroupItem, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.count = kwargs.get('count', None)
self.groupedby = kwargs.get('groupedby', None)
self.values = kwargs.get('values', None)
```
#### File: alertsmanagement/models/essentials_py3.py
```python
from msrest.serialization import Model
class Essentials(Model):
"""This object contains normalized fields across different monitor service and
also contains state related fields.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar severity: Severity of alert Sev0 being highest and Sev3 being
lowest. Possible values include: 'Sev0', 'Sev1', 'Sev2', 'Sev3', 'Sev4'
:vartype severity: str or ~azure.mgmt.alertsmanagement.models.Severity
:ivar signal_type: Log based alert or metric based alert. Possible values
include: 'Metric', 'Log', 'Unknown'
:vartype signal_type: str or
~azure.mgmt.alertsmanagement.models.SignalType
:ivar alert_state: Alert object state, which is modified by the user.
Possible values include: 'New', 'Acknowledged', 'Closed'
:vartype alert_state: str or
~azure.mgmt.alertsmanagement.models.AlertState
:ivar monitor_condition: Represents rule condition(Fired/Resolved)
maintained by monitor service depending on the state of the state.
Possible values include: 'Fired', 'Resolved'
:vartype monitor_condition: str or
~azure.mgmt.alertsmanagement.models.MonitorCondition
:param target_resource: Target ARM resource, on which alert got created.
:type target_resource: str
:param target_resource_name: Name of the target ARM resource name, on
which alert got created.
:type target_resource_name: str
:param target_resource_group: Resource group of target ARM resource, on
which alert got created.
:type target_resource_group: str
:param target_resource_type: Resource type of target ARM resource, on
which alert got created.
:type target_resource_type: str
:ivar monitor_service: Monitor service on which the rule(monitor) is set.
Possible values include: 'Application Insights', 'ActivityLog
Administrative', 'ActivityLog Security', 'ActivityLog Recommendation',
'ActivityLog Policy', 'ActivityLog Autoscale', 'Log Analytics', 'Nagios',
'Platform', 'SCOM', 'ServiceHealth', 'SmartDetector', 'VM Insights',
'Zabbix'
:vartype monitor_service: str or
~azure.mgmt.alertsmanagement.models.MonitorService
:ivar alert_rule: Rule(monitor) which fired alert instance. Depending on
the monitor service, this would be ARM id or name of the rule.
:vartype alert_rule: str
:ivar source_created_id: Unique Id created by monitor service for each
alert instance. This could be used to track the issue at the monitor
service, in case of Nagios, Zabbix, SCOM etc.
:vartype source_created_id: str
:ivar smart_group_id: Unique Id of the smart group
:vartype smart_group_id: str
:ivar smart_grouping_reason: Verbose reason describing the reason why this
alert instance is added to a smart group
:vartype smart_grouping_reason: str
:ivar start_date_time: Creation time(ISO-8601 format) of alert instance.
:vartype start_date_time: datetime
:ivar last_modified_date_time: Last modification time(ISO-8601 format) of
alert instance.
:vartype last_modified_date_time: datetime
:ivar monitor_condition_resolved_date_time: Resolved time(ISO-8601 format)
of alert instance. This will be updated when monitor service resolves the
alert instance because of the rule condition is not met.
:vartype monitor_condition_resolved_date_time: datetime
:ivar last_modified_user_name: User who last modified the alert, in case
of monitor service updates user would be 'system', otherwise name of the
user.
:vartype last_modified_user_name: str
"""
_validation = {
'severity': {'readonly': True},
'signal_type': {'readonly': True},
'alert_state': {'readonly': True},
'monitor_condition': {'readonly': True},
'monitor_service': {'readonly': True},
'alert_rule': {'readonly': True},
'source_created_id': {'readonly': True},
'smart_group_id': {'readonly': True},
'smart_grouping_reason': {'readonly': True},
'start_date_time': {'readonly': True},
'last_modified_date_time': {'readonly': True},
'monitor_condition_resolved_date_time': {'readonly': True},
'last_modified_user_name': {'readonly': True},
}
_attribute_map = {
'severity': {'key': 'severity', 'type': 'str'},
'signal_type': {'key': 'signalType', 'type': 'str'},
'alert_state': {'key': 'alertState', 'type': 'str'},
'monitor_condition': {'key': 'monitorCondition', 'type': 'str'},
'target_resource': {'key': 'targetResource', 'type': 'str'},
'target_resource_name': {'key': 'targetResourceName', 'type': 'str'},
'target_resource_group': {'key': 'targetResourceGroup', 'type': 'str'},
'target_resource_type': {'key': 'targetResourceType', 'type': 'str'},
'monitor_service': {'key': 'monitorService', 'type': 'str'},
'alert_rule': {'key': 'alertRule', 'type': 'str'},
'source_created_id': {'key': 'sourceCreatedId', 'type': 'str'},
'smart_group_id': {'key': 'smartGroupId', 'type': 'str'},
'smart_grouping_reason': {'key': 'smartGroupingReason', 'type': 'str'},
'start_date_time': {'key': 'startDateTime', 'type': 'iso-8601'},
'last_modified_date_time': {'key': 'lastModifiedDateTime', 'type': 'iso-8601'},
'monitor_condition_resolved_date_time': {'key': 'monitorConditionResolvedDateTime', 'type': 'iso-8601'},
'last_modified_user_name': {'key': 'lastModifiedUserName', 'type': 'str'},
}
def __init__(self, *, target_resource: str=None, target_resource_name: str=None, target_resource_group: str=None, target_resource_type: str=None, **kwargs) -> None:
super(Essentials, self).__init__(**kwargs)
self.severity = None
self.signal_type = None
self.alert_state = None
self.monitor_condition = None
self.target_resource = target_resource
self.target_resource_name = target_resource_name
self.target_resource_group = target_resource_group
self.target_resource_type = target_resource_type
self.monitor_service = None
self.alert_rule = None
self.source_created_id = None
self.smart_group_id = None
self.smart_grouping_reason = None
self.start_date_time = None
self.last_modified_date_time = None
self.monitor_condition_resolved_date_time = None
self.last_modified_user_name = None
```
#### File: alertsmanagement/models/smart_group_modification_item.py
```python
from msrest.serialization import Model
class SmartGroupModificationItem(Model):
"""smartGroup modification item.
:param modification_event: Reason for the modification. Possible values
include: 'SmartGroupCreated', 'StateChange', 'AlertAdded', 'AlertRemoved'
:type modification_event: str or
~azure.mgmt.alertsmanagement.models.SmartGroupModificationEvent
:param old_value: Old value
:type old_value: str
:param new_value: New value
:type new_value: str
:param modified_at: Modified date and time
:type modified_at: str
:param modified_by: Modified user details (Principal client name)
:type modified_by: str
:param comments: Modification comments
:type comments: str
:param description: Description of the modification
:type description: str
"""
_attribute_map = {
'modification_event': {'key': 'modificationEvent', 'type': 'SmartGroupModificationEvent'},
'old_value': {'key': 'oldValue', 'type': 'str'},
'new_value': {'key': 'newValue', 'type': 'str'},
'modified_at': {'key': 'modifiedAt', 'type': 'str'},
'modified_by': {'key': 'modifiedBy', 'type': 'str'},
'comments': {'key': 'comments', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
}
def __init__(self, **kwargs):
super(SmartGroupModificationItem, self).__init__(**kwargs)
self.modification_event = kwargs.get('modification_event', None)
self.old_value = kwargs.get('old_value', None)
self.new_value = kwargs.get('new_value', None)
self.modified_at = kwargs.get('modified_at', None)
self.modified_by = kwargs.get('modified_by', None)
self.comments = kwargs.get('comments', None)
self.description = kwargs.get('description', None)
```
#### File: mgmt/authorization/authorization_management_client.py
```python
from msrest.service_client import SDKClient
from msrest import Serializer, Deserializer
from msrestazure import AzureConfiguration
from azure.profiles import KnownProfiles, ProfileDefinition
from azure.profiles.multiapiclient import MultiApiClientMixin
from .version import VERSION
class AuthorizationManagementClientConfiguration(AzureConfiguration):
"""Configuration for AuthorizationManagementClient
Note that all parameters used to create this instance are saved as instance
attributes.
:param credentials: Credentials needed for the client to connect to Azure.
:type credentials: :mod:`A msrestazure Credentials
object<msrestazure.azure_active_directory>`
:param subscription_id: The ID of the target subscription.
:type subscription_id: str
:param str base_url: Service URL
"""
def __init__(
self, credentials, subscription_id, base_url=None):
if credentials is None:
raise ValueError("Parameter 'credentials' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
if not base_url:
base_url = 'https://management.azure.com'
super(AuthorizationManagementClientConfiguration, self).__init__(base_url)
self.add_user_agent('azure-mgmt-authorization/{}'.format(VERSION))
self.add_user_agent('Azure-SDK-For-Python')
self.credentials = credentials
self.subscription_id = subscription_id
class AuthorizationManagementClient(MultiApiClientMixin, SDKClient):
"""Role based access control provides you a way to apply granular level policy administration down to individual resources or resource groups. These operations enable you to manage role definitions and role assignments. A role definition describes the set of actions that can be performed on resources. A role assignment grants access to Azure Active Directory users.
This ready contains multiple API versions, to help you deal with all Azure clouds
(Azure Stack, Azure Government, Azure China, etc.).
By default, uses latest API version available on public Azure.
For production, you should stick a particular api-version and/or profile.
The profile sets a mapping between the operation group and an API version.
The api-version parameter sets the default API version if the operation
group is not described in the profile.
:ivar config: Configuration for client.
:vartype config: AuthorizationManagementClientConfiguration
:param credentials: Credentials needed for the client to connect to Azure.
:type credentials: :mod:`A msrestazure Credentials
object<msrestazure.azure_active_directory>`
:param subscription_id: Subscription credentials which uniquely identify
Microsoft Azure subscription. The subscription ID forms part of the URI
for every service call.
:type subscription_id: str
:param str api_version: API version to use if no profile is provided, or if
missing in profile.
:param str base_url: Service URL
:param profile: A profile definition, from KnownProfiles to dict.
:type profile: azure.profiles.KnownProfiles
"""
DEFAULT_API_VERSION='2018-01-01-preview'
_PROFILE_TAG = "azure.mgmt.authorization.AuthorizationManagementClient"
LATEST_PROFILE = ProfileDefinition({
_PROFILE_TAG: {
'classic_administrators': '2015-06-01',
'deny_assignments': '2018-07-01-preview',
'role_assignments': '2018-09-01-preview',
None: DEFAULT_API_VERSION
}},
_PROFILE_TAG + " latest"
)
def __init__(self, credentials, subscription_id, api_version=None, base_url=None, profile=KnownProfiles.default):
self.config = AuthorizationManagementClientConfiguration(credentials, subscription_id, base_url)
super(AuthorizationManagementClient, self).__init__(
credentials,
self.config,
api_version=api_version,
profile=profile
)
############ Generated from here ############
@classmethod
def _models_dict(cls, api_version):
return {k: v for k, v in cls.models(api_version).__dict__.items() if isinstance(v, type)}
@classmethod
def models(cls, api_version=DEFAULT_API_VERSION):
"""Module depends on the API version:
* 2015-06-01: :mod:`v2015_06_01.models<azure.mgmt.authorization.v2015_06_01.models>`
* 2015-07-01: :mod:`v2015_07_01.models<azure.mgmt.authorization.v2015_07_01.models>`
* 2018-01-01-preview: :mod:`v2018_01_01_preview.models<azure.mgmt.authorization.v2018_01_01_preview.models>`
* 2018-07-01-preview: :mod:`v2018_07_01_preview.models<azure.mgmt.authorization.v2018_07_01_preview.models>`
* 2018-09-01-preview: :mod:`v2018_09_01_preview.models<azure.mgmt.authorization.v2018_09_01_preview.models>`
"""
if api_version == '2015-06-01':
from .v2015_06_01 import models
return models
elif api_version == '2015-07-01':
from .v2015_07_01 import models
return models
elif api_version == '2018-01-01-preview':
from .v2018_01_01_preview import models
return models
elif api_version == '2018-07-01-preview':
from .v2018_07_01_preview import models
return models
elif api_version == '2018-09-01-preview':
from .v2018_09_01_preview import models
return models
raise NotImplementedError("APIVersion {} is not available".format(api_version))
@property
def classic_administrators(self):
"""Instance depends on the API version:
* 2015-06-01: :class:`ClassicAdministratorsOperations<azure.mgmt.authorization.v2015_06_01.operations.ClassicAdministratorsOperations>`
"""
api_version = self._get_api_version('classic_administrators')
if api_version == '2015-06-01':
from .v2015_06_01.operations import ClassicAdministratorsOperations as OperationClass
else:
raise NotImplementedError("APIVersion {} is not available".format(api_version))
return OperationClass(self._client, self.config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def deny_assignments(self):
"""Instance depends on the API version:
* 2018-07-01-preview: :class:`DenyAssignmentsOperations<azure.mgmt.authorization.v2018_07_01_preview.operations.DenyAssignmentsOperations>`
"""
api_version = self._get_api_version('deny_assignments')
if api_version == '2018-07-01-preview':
from .v2018_07_01_preview.operations import DenyAssignmentsOperations as OperationClass
else:
raise NotImplementedError("APIVersion {} is not available".format(api_version))
return OperationClass(self._client, self.config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def permissions(self):
"""Instance depends on the API version:
* 2015-07-01: :class:`PermissionsOperations<azure.mgmt.authorization.v2015_07_01.operations.PermissionsOperations>`
* 2018-01-01-preview: :class:`PermissionsOperations<azure.mgmt.authorization.v2018_01_01_preview.operations.PermissionsOperations>`
"""
api_version = self._get_api_version('permissions')
if api_version == '2015-07-01':
from .v2015_07_01.operations import PermissionsOperations as OperationClass
elif api_version == '2018-01-01-preview':
from .v2018_01_01_preview.operations import PermissionsOperations as OperationClass
else:
raise NotImplementedError("APIVersion {} is not available".format(api_version))
return OperationClass(self._client, self.config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def provider_operations_metadata(self):
"""Instance depends on the API version:
* 2015-07-01: :class:`ProviderOperationsMetadataOperations<azure.mgmt.authorization.v2015_07_01.operations.ProviderOperationsMetadataOperations>`
* 2018-01-01-preview: :class:`ProviderOperationsMetadataOperations<azure.mgmt.authorization.v2018_01_01_preview.operations.ProviderOperationsMetadataOperations>`
"""
api_version = self._get_api_version('provider_operations_metadata')
if api_version == '2015-07-01':
from .v2015_07_01.operations import ProviderOperationsMetadataOperations as OperationClass
elif api_version == '2018-01-01-preview':
from .v2018_01_01_preview.operations import ProviderOperationsMetadataOperations as OperationClass
else:
raise NotImplementedError("APIVersion {} is not available".format(api_version))
return OperationClass(self._client, self.config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def role_assignments(self):
"""Instance depends on the API version:
* 2015-07-01: :class:`RoleAssignmentsOperations<azure.mgmt.authorization.v2015_07_01.operations.RoleAssignmentsOperations>`
* 2018-01-01-preview: :class:`RoleAssignmentsOperations<azure.mgmt.authorization.v2018_01_01_preview.operations.RoleAssignmentsOperations>`
* 2018-09-01-preview: :class:`RoleAssignmentsOperations<azure.mgmt.authorization.v2018_09_01_preview.operations.RoleAssignmentsOperations>`
"""
api_version = self._get_api_version('role_assignments')
if api_version == '2015-07-01':
from .v2015_07_01.operations import RoleAssignmentsOperations as OperationClass
elif api_version == '2018-01-01-preview':
from .v2018_01_01_preview.operations import RoleAssignmentsOperations as OperationClass
elif api_version == '2018-09-01-preview':
from .v2018_09_01_preview.operations import RoleAssignmentsOperations as OperationClass
else:
raise NotImplementedError("APIVersion {} is not available".format(api_version))
return OperationClass(self._client, self.config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def role_definitions(self):
"""Instance depends on the API version:
* 2015-07-01: :class:`RoleDefinitionsOperations<azure.mgmt.authorization.v2015_07_01.operations.RoleDefinitionsOperations>`
* 2018-01-01-preview: :class:`RoleDefinitionsOperations<azure.mgmt.authorization.v2018_01_01_preview.operations.RoleDefinitionsOperations>`
"""
api_version = self._get_api_version('role_definitions')
if api_version == '2015-07-01':
from .v2015_07_01.operations import RoleDefinitionsOperations as OperationClass
elif api_version == '2018-01-01-preview':
from .v2018_01_01_preview.operations import RoleDefinitionsOperations as OperationClass
else:
raise NotImplementedError("APIVersion {} is not available".format(api_version))
return OperationClass(self._client, self.config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
```
#### File: v2018_07_01_preview/models/deny_assignment_filter.py
```python
from msrest.serialization import Model
class DenyAssignmentFilter(Model):
"""Deny Assignments filter.
:param deny_assignment_name: Return deny assignment with specified name.
:type deny_assignment_name: str
:param principal_id: Return all deny assignments where the specified
principal is listed in the principals list of deny assignments.
:type principal_id: str
:param gdpr_export_principal_id: Return all deny assignments where the
specified principal is listed either in the principals list or exclude
principals list of deny assignments.
:type gdpr_export_principal_id: str
"""
_attribute_map = {
'deny_assignment_name': {'key': 'denyAssignmentName', 'type': 'str'},
'principal_id': {'key': 'principalId', 'type': 'str'},
'gdpr_export_principal_id': {'key': 'gdprExportPrincipalId', 'type': 'str'},
}
def __init__(self, **kwargs):
super(DenyAssignmentFilter, self).__init__(**kwargs)
self.deny_assignment_name = kwargs.get('deny_assignment_name', None)
self.principal_id = kwargs.get('principal_id', None)
self.gdpr_export_principal_id = kwargs.get('gdpr_export_principal_id', None)
```
#### File: batchai/models/app_insights_reference.py
```python
from msrest.serialization import Model
class AppInsightsReference(Model):
"""Specifies Azure Application Insights information for performance counters
reporting.
All required parameters must be populated in order to send to Azure.
:param component: Required. Specifies the Azure Application Insights
component resource id.
:type component: ~azure.mgmt.batchai.models.ResourceId
:param instrumentation_key: Value of the Azure Application Insights
instrumentation key.
:type instrumentation_key: str
:param instrumentation_key_secret_reference: Specifies a KeyVault Secret
containing Azure Application Insights instrumentation key. Specifies
KeyVault Store and Secret which contains Azure Application Insights
instrumentation key. One of instumentationKey or
instrumentationKeySecretReference must be specified.
:type instrumentation_key_secret_reference:
~azure.mgmt.batchai.models.KeyVaultSecretReference
"""
_validation = {
'component': {'required': True},
}
_attribute_map = {
'component': {'key': 'component', 'type': 'ResourceId'},
'instrumentation_key': {'key': 'instrumentationKey', 'type': 'str'},
'instrumentation_key_secret_reference': {'key': 'instrumentationKeySecretReference', 'type': 'KeyVaultSecretReference'},
}
def __init__(self, **kwargs):
super(AppInsightsReference, self).__init__(**kwargs)
self.component = kwargs.get('component', None)
self.instrumentation_key = kwargs.get('instrumentation_key', None)
self.instrumentation_key_secret_reference = kwargs.get('instrumentation_key_secret_reference', None)
```
#### File: batchai/models/azure_storage_credentials_info_py3.py
```python
from msrest.serialization import Model
class AzureStorageCredentialsInfo(Model):
"""Credentials to access Azure File Share.
:param account_key: Storage account key. One of accountKey or
accountKeySecretReference must be specified.
:type account_key: str
:param account_key_secret_reference: Specifies the location of the storage
account key, which is a Key Vault Secret. Users can store their secrets in
Azure KeyVault and pass it to the Batch AI Service to integrate with
KeyVault. One of accountKey or accountKeySecretReference must be
specified.
:type account_key_secret_reference:
~azure.mgmt.batchai.models.KeyVaultSecretReference
"""
_attribute_map = {
'account_key': {'key': 'accountKey', 'type': 'str'},
'account_key_secret_reference': {'key': 'accountKeySecretReference', 'type': 'KeyVaultSecretReference'},
}
def __init__(self, *, account_key: str=None, account_key_secret_reference=None, **kwargs) -> None:
super(AzureStorageCredentialsInfo, self).__init__(**kwargs)
self.account_key = account_key
self.account_key_secret_reference = account_key_secret_reference
```
#### File: batchai/models/file_server_reference.py
```python
from msrest.serialization import Model
class FileServerReference(Model):
"""Provides required information, for the service to be able to mount Azure
FileShare on the cluster nodes.
All required parameters must be populated in order to send to Azure.
:param file_server: Required. Reference to the file server resource.
:type file_server: ~azure.mgmt.batchai.models.ResourceId
:param source_directory: Specifies the source directory in File Server
that needs to be mounted. If this property is not specified, the entire
File Server will be mounted.
:type source_directory: str
:param relative_mount_path: Required. Specifies the relative path on the
compute node where the File Server will be mounted. Note that all cluster
level file servers will be mounted under $AZ_BATCHAI_MOUNT_ROOT location
and job level file servers will be mouted under
$AZ_BATCHAI_JOB_MOUNT_ROOT.
:type relative_mount_path: str
:param mount_options: Specifies the mount options for File Server.
:type mount_options: str
"""
_validation = {
'file_server': {'required': True},
'relative_mount_path': {'required': True},
}
_attribute_map = {
'file_server': {'key': 'fileServer', 'type': 'ResourceId'},
'source_directory': {'key': 'sourceDirectory', 'type': 'str'},
'relative_mount_path': {'key': 'relativeMountPath', 'type': 'str'},
'mount_options': {'key': 'mountOptions', 'type': 'str'},
}
def __init__(self, **kwargs):
super(FileServerReference, self).__init__(**kwargs)
self.file_server = kwargs.get('file_server', None)
self.source_directory = kwargs.get('source_directory', None)
self.relative_mount_path = kwargs.get('relative_mount_path', None)
self.mount_options = kwargs.get('mount_options', None)
```
#### File: batchai/models/job_properties_execution_info_py3.py
```python
from msrest.serialization import Model
class JobPropertiesExecutionInfo(Model):
"""Contains information about the execution of a job in the Azure Batch
service.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar start_time: The time at which the job started running. 'Running'
corresponds to the running state. If the job has been restarted or
retried, this is the most recent time at which the job started running.
This property is present only for job that are in the running or completed
state.
:vartype start_time: datetime
:ivar end_time: The time at which the job completed. This property is only
returned if the job is in completed state.
:vartype end_time: datetime
:ivar exit_code: The exit code of the job. This property is only returned
if the job is in completed state.
:vartype exit_code: int
:ivar errors: Contains details of various errors encountered by the
service during job execution.
:vartype errors: list[~azure.mgmt.batchai.models.BatchAIError]
"""
_validation = {
'start_time': {'readonly': True},
'end_time': {'readonly': True},
'exit_code': {'readonly': True},
'errors': {'readonly': True},
}
_attribute_map = {
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'end_time': {'key': 'endTime', 'type': 'iso-8601'},
'exit_code': {'key': 'exitCode', 'type': 'int'},
'errors': {'key': 'errors', 'type': '[BatchAIError]'},
}
def __init__(self, **kwargs) -> None:
super(JobPropertiesExecutionInfo, self).__init__(**kwargs)
self.start_time = None
self.end_time = None
self.exit_code = None
self.errors = None
```
#### File: batchai/models/manual_scale_settings.py
```python
from msrest.serialization import Model
class ManualScaleSettings(Model):
"""Manual scale settings for the cluster.
All required parameters must be populated in order to send to Azure.
:param target_node_count: Required. The desired number of compute nodes in
the Cluster. Default is 0. If autoScaleSettings are not specified, then
the Cluster starts with this target. Default value: 0 .
:type target_node_count: int
:param node_deallocation_option: Determines what to do with the job(s)
running on compute node if the Cluster size is decreasing. The default
value is requeue. Possible values include: 'requeue', 'terminate',
'waitforjobcompletion'. Default value: "requeue" .
:type node_deallocation_option: str or
~azure.mgmt.batchai.models.DeallocationOption
"""
_validation = {
'target_node_count': {'required': True},
}
_attribute_map = {
'target_node_count': {'key': 'targetNodeCount', 'type': 'int'},
'node_deallocation_option': {'key': 'nodeDeallocationOption', 'type': 'str'},
}
def __init__(self, **kwargs):
super(ManualScaleSettings, self).__init__(**kwargs)
self.target_node_count = kwargs.get('target_node_count', 0)
self.node_deallocation_option = kwargs.get('node_deallocation_option', "requeue")
```
#### File: batchai/models/node_state_counts_py3.py
```python
from msrest.serialization import Model
class NodeStateCounts(Model):
"""Counts of various compute node states on the cluster.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar idle_node_count: Number of compute nodes in idle state.
:vartype idle_node_count: int
:ivar running_node_count: Number of compute nodes which are running jobs.
:vartype running_node_count: int
:ivar preparing_node_count: Number of compute nodes which are being
prepared.
:vartype preparing_node_count: int
:ivar unusable_node_count: Number of compute nodes which are unusable.
:vartype unusable_node_count: int
:ivar leaving_node_count: Number of compute nodes which are leaving the
cluster.
:vartype leaving_node_count: int
"""
_validation = {
'idle_node_count': {'readonly': True},
'running_node_count': {'readonly': True},
'preparing_node_count': {'readonly': True},
'unusable_node_count': {'readonly': True},
'leaving_node_count': {'readonly': True},
}
_attribute_map = {
'idle_node_count': {'key': 'idleNodeCount', 'type': 'int'},
'running_node_count': {'key': 'runningNodeCount', 'type': 'int'},
'preparing_node_count': {'key': 'preparingNodeCount', 'type': 'int'},
'unusable_node_count': {'key': 'unusableNodeCount', 'type': 'int'},
'leaving_node_count': {'key': 'leavingNodeCount', 'type': 'int'},
}
def __init__(self, **kwargs) -> None:
super(NodeStateCounts, self).__init__(**kwargs)
self.idle_node_count = None
self.running_node_count = None
self.preparing_node_count = None
self.unusable_node_count = None
self.leaving_node_count = None
```
#### File: batchai/models/remote_login_information.py
```python
from msrest.serialization import Model
class RemoteLoginInformation(Model):
"""Contains remote login details to SSH/RDP to a compute node in cluster.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar node_id: Id of the compute node
:vartype node_id: str
:ivar ip_address: ip address
:vartype ip_address: str
:ivar port: port number.
:vartype port: float
"""
_validation = {
'node_id': {'readonly': True},
'ip_address': {'readonly': True},
'port': {'readonly': True},
}
_attribute_map = {
'node_id': {'key': 'nodeId', 'type': 'str'},
'ip_address': {'key': 'ipAddress', 'type': 'str'},
'port': {'key': 'port', 'type': 'float'},
}
def __init__(self, **kwargs):
super(RemoteLoginInformation, self).__init__(**kwargs)
self.node_id = None
self.ip_address = None
self.port = None
```
#### File: batchai/models/ssh_configuration_py3.py
```python
from msrest.serialization import Model
class SshConfiguration(Model):
"""SSH configuration settings for the VM.
All required parameters must be populated in order to send to Azure.
:param public_ips_to_allow: List of source IP ranges to allow SSH
connection to a node. Default value is '*' can be used to match all source
IPs. Maximum number of IP ranges that can be specified are 400.
:type public_ips_to_allow: list[str]
:param user_account_settings: Required. Settings for user account to be
created on a node.
:type user_account_settings:
~azure.mgmt.batchai.models.UserAccountSettings
"""
_validation = {
'user_account_settings': {'required': True},
}
_attribute_map = {
'public_ips_to_allow': {'key': 'publicIPsToAllow', 'type': '[str]'},
'user_account_settings': {'key': 'userAccountSettings', 'type': 'UserAccountSettings'},
}
def __init__(self, *, user_account_settings, public_ips_to_allow=None, **kwargs) -> None:
super(SshConfiguration, self).__init__(**kwargs)
self.public_ips_to_allow = public_ips_to_allow
self.user_account_settings = user_account_settings
```
#### File: batchai/models/unmanaged_file_system_reference_py3.py
```python
from msrest.serialization import Model
class UnmanagedFileSystemReference(Model):
"""Details of the file system to mount on the compute cluster nodes.
All required parameters must be populated in order to send to Azure.
:param mount_command: Required. Command used to mount the unmanaged file
system.
:type mount_command: str
:param relative_mount_path: Required. Specifies the relative path on the
compute cluster node where the file system will be mounted. Note that all
cluster level unmanaged file system will be mounted under
$AZ_BATCHAI_MOUNT_ROOT location and job level unmanaged file system will
be mounted under $AZ_BATCHAI_JOB_MOUNT_ROOT.
:type relative_mount_path: str
"""
_validation = {
'mount_command': {'required': True},
'relative_mount_path': {'required': True},
}
_attribute_map = {
'mount_command': {'key': 'mountCommand', 'type': 'str'},
'relative_mount_path': {'key': 'relativeMountPath', 'type': 'str'},
}
def __init__(self, *, mount_command: str, relative_mount_path: str, **kwargs) -> None:
super(UnmanagedFileSystemReference, self).__init__(**kwargs)
self.mount_command = mount_command
self.relative_mount_path = relative_mount_path
```
#### File: cdn/models/geo_filter_py3.py
```python
from msrest.serialization import Model
class GeoFilter(Model):
"""Rules defining user's geo access within a CDN endpoint.
All required parameters must be populated in order to send to Azure.
:param relative_path: Required. Relative path applicable to geo filter.
(e.g. '/mypictures', '/mypicture/kitty.jpg', and etc.)
:type relative_path: str
:param action: Required. Action of the geo filter, i.e. allow or block
access. Possible values include: 'Block', 'Allow'
:type action: str or ~azure.mgmt.cdn.models.GeoFilterActions
:param country_codes: Required. Two letter country codes defining user
country access in a geo filter, e.g. AU, MX, US.
:type country_codes: list[str]
"""
_validation = {
'relative_path': {'required': True},
'action': {'required': True},
'country_codes': {'required': True},
}
_attribute_map = {
'relative_path': {'key': 'relativePath', 'type': 'str'},
'action': {'key': 'action', 'type': 'GeoFilterActions'},
'country_codes': {'key': 'countryCodes', 'type': '[str]'},
}
def __init__(self, *, relative_path: str, action, country_codes, **kwargs) -> None:
super(GeoFilter, self).__init__(**kwargs)
self.relative_path = relative_path
self.action = action
self.country_codes = country_codes
```
#### File: v2015_06_15/models/os_disk_image_py3.py
```python
from msrest.serialization import Model
class OSDiskImage(Model):
"""Contains the os disk image information.
All required parameters must be populated in order to send to Azure.
:param operating_system: Required. The operating system of the
osDiskImage. Possible values include: 'Windows', 'Linux'
:type operating_system: str or
~azure.mgmt.compute.v2015_06_15.models.OperatingSystemTypes
"""
_validation = {
'operating_system': {'required': True},
}
_attribute_map = {
'operating_system': {'key': 'operatingSystem', 'type': 'OperatingSystemTypes'},
}
def __init__(self, *, operating_system, **kwargs) -> None:
super(OSDiskImage, self).__init__(**kwargs)
self.operating_system = operating_system
```
#### File: v2017_03_30/models/snapshot.py
```python
from .resource import Resource
class Snapshot(Resource):
"""Snapshot resource.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id
:vartype id: str
:ivar name: Resource name
:vartype name: str
:ivar type: Resource type
:vartype type: str
:param location: Required. Resource location
:type location: str
:param tags: Resource tags
:type tags: dict[str, str]
:ivar managed_by: Unused. Always Null.
:vartype managed_by: str
:param sku:
:type sku: ~azure.mgmt.compute.v2017_03_30.models.DiskSku
:ivar time_created: The time when the disk was created.
:vartype time_created: datetime
:param os_type: The Operating System type. Possible values include:
'Windows', 'Linux'
:type os_type: str or
~azure.mgmt.compute.v2017_03_30.models.OperatingSystemTypes
:param creation_data: Required. Disk source information. CreationData
information cannot be changed after the disk has been created.
:type creation_data: ~azure.mgmt.compute.v2017_03_30.models.CreationData
:param disk_size_gb: If creationData.createOption is Empty, this field is
mandatory and it indicates the size of the VHD to create. If this field is
present for updates or creation with other options, it indicates a resize.
Resizes are only allowed if the disk is not attached to a running VM, and
can only increase the disk's size.
:type disk_size_gb: int
:param encryption_settings: Encryption settings for disk or snapshot
:type encryption_settings:
~azure.mgmt.compute.v2017_03_30.models.EncryptionSettings
:ivar provisioning_state: The disk provisioning state.
:vartype provisioning_state: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'managed_by': {'readonly': True},
'time_created': {'readonly': True},
'creation_data': {'required': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'managed_by': {'key': 'managedBy', 'type': 'str'},
'sku': {'key': 'sku', 'type': 'DiskSku'},
'time_created': {'key': 'properties.timeCreated', 'type': 'iso-8601'},
'os_type': {'key': 'properties.osType', 'type': 'OperatingSystemTypes'},
'creation_data': {'key': 'properties.creationData', 'type': 'CreationData'},
'disk_size_gb': {'key': 'properties.diskSizeGB', 'type': 'int'},
'encryption_settings': {'key': 'properties.encryptionSettings', 'type': 'EncryptionSettings'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(self, **kwargs):
super(Snapshot, self).__init__(**kwargs)
self.managed_by = None
self.sku = kwargs.get('sku', None)
self.time_created = None
self.os_type = kwargs.get('os_type', None)
self.creation_data = kwargs.get('creation_data', None)
self.disk_size_gb = kwargs.get('disk_size_gb', None)
self.encryption_settings = kwargs.get('encryption_settings', None)
self.provisioning_state = None
```
#### File: v2017_03_30/models/virtual_machine_extension_instance_view_py3.py
```python
from msrest.serialization import Model
class VirtualMachineExtensionInstanceView(Model):
"""The instance view of a virtual machine extension.
:param name: The virtual machine extension name.
:type name: str
:param type: Specifies the type of the extension; an example is
"CustomScriptExtension".
:type type: str
:param type_handler_version: Specifies the version of the script handler.
:type type_handler_version: str
:param substatuses: The resource status information.
:type substatuses:
list[~azure.mgmt.compute.v2017_03_30.models.InstanceViewStatus]
:param statuses: The resource status information.
:type statuses:
list[~azure.mgmt.compute.v2017_03_30.models.InstanceViewStatus]
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'type_handler_version': {'key': 'typeHandlerVersion', 'type': 'str'},
'substatuses': {'key': 'substatuses', 'type': '[InstanceViewStatus]'},
'statuses': {'key': 'statuses', 'type': '[InstanceViewStatus]'},
}
def __init__(self, *, name: str=None, type: str=None, type_handler_version: str=None, substatuses=None, statuses=None, **kwargs) -> None:
super(VirtualMachineExtensionInstanceView, self).__init__(**kwargs)
self.name = name
self.type = type
self.type_handler_version = type_handler_version
self.substatuses = substatuses
self.statuses = statuses
```
#### File: v2018_06_01/models/run_command_input.py
```python
from msrest.serialization import Model
class RunCommandInput(Model):
"""Capture Virtual Machine parameters.
All required parameters must be populated in order to send to Azure.
:param command_id: Required. The run command id.
:type command_id: str
:param script: Optional. The script to be executed. When this value is
given, the given script will override the default script of the command.
:type script: list[str]
:param parameters: The run command parameters.
:type parameters:
list[~azure.mgmt.compute.v2018_06_01.models.RunCommandInputParameter]
"""
_validation = {
'command_id': {'required': True},
}
_attribute_map = {
'command_id': {'key': 'commandId', 'type': 'str'},
'script': {'key': 'script', 'type': '[str]'},
'parameters': {'key': 'parameters', 'type': '[RunCommandInputParameter]'},
}
def __init__(self, **kwargs):
super(RunCommandInput, self).__init__(**kwargs)
self.command_id = kwargs.get('command_id', None)
self.script = kwargs.get('script', None)
self.parameters = kwargs.get('parameters', None)
```
#### File: v2018_06_01/models/virtual_machine_scale_set_vm_instance_view.py
```python
from msrest.serialization import Model
class VirtualMachineScaleSetVMInstanceView(Model):
"""The instance view of a virtual machine scale set VM.
Variables are only populated by the server, and will be ignored when
sending a request.
:param platform_update_domain: The Update Domain count.
:type platform_update_domain: int
:param platform_fault_domain: The Fault Domain count.
:type platform_fault_domain: int
:param rdp_thumb_print: The Remote desktop certificate thumbprint.
:type rdp_thumb_print: str
:param vm_agent: The VM Agent running on the virtual machine.
:type vm_agent:
~azure.mgmt.compute.v2018_06_01.models.VirtualMachineAgentInstanceView
:param maintenance_redeploy_status: The Maintenance Operation status on
the virtual machine.
:type maintenance_redeploy_status:
~azure.mgmt.compute.v2018_06_01.models.MaintenanceRedeployStatus
:param disks: The disks information.
:type disks: list[~azure.mgmt.compute.v2018_06_01.models.DiskInstanceView]
:param extensions: The extensions information.
:type extensions:
list[~azure.mgmt.compute.v2018_06_01.models.VirtualMachineExtensionInstanceView]
:ivar vm_health: The health status for the VM.
:vartype vm_health:
~azure.mgmt.compute.v2018_06_01.models.VirtualMachineHealthStatus
:param boot_diagnostics: Boot Diagnostics is a debugging feature which
allows you to view Console Output and Screenshot to diagnose VM status.
<br><br> You can easily view the output of your console log. <br><br>
Azure also enables you to see a screenshot of the VM from the hypervisor.
:type boot_diagnostics:
~azure.mgmt.compute.v2018_06_01.models.BootDiagnosticsInstanceView
:param statuses: The resource status information.
:type statuses:
list[~azure.mgmt.compute.v2018_06_01.models.InstanceViewStatus]
:param placement_group_id: The placement group in which the VM is running.
If the VM is deallocated it will not have a placementGroupId.
:type placement_group_id: str
"""
_validation = {
'vm_health': {'readonly': True},
}
_attribute_map = {
'platform_update_domain': {'key': 'platformUpdateDomain', 'type': 'int'},
'platform_fault_domain': {'key': 'platformFaultDomain', 'type': 'int'},
'rdp_thumb_print': {'key': 'rdpThumbPrint', 'type': 'str'},
'vm_agent': {'key': 'vmAgent', 'type': 'VirtualMachineAgentInstanceView'},
'maintenance_redeploy_status': {'key': 'maintenanceRedeployStatus', 'type': 'MaintenanceRedeployStatus'},
'disks': {'key': 'disks', 'type': '[DiskInstanceView]'},
'extensions': {'key': 'extensions', 'type': '[VirtualMachineExtensionInstanceView]'},
'vm_health': {'key': 'vmHealth', 'type': 'VirtualMachineHealthStatus'},
'boot_diagnostics': {'key': 'bootDiagnostics', 'type': 'BootDiagnosticsInstanceView'},
'statuses': {'key': 'statuses', 'type': '[InstanceViewStatus]'},
'placement_group_id': {'key': 'placementGroupId', 'type': 'str'},
}
def __init__(self, **kwargs):
super(VirtualMachineScaleSetVMInstanceView, self).__init__(**kwargs)
self.platform_update_domain = kwargs.get('platform_update_domain', None)
self.platform_fault_domain = kwargs.get('platform_fault_domain', None)
self.rdp_thumb_print = kwargs.get('rdp_thumb_print', None)
self.vm_agent = kwargs.get('vm_agent', None)
self.maintenance_redeploy_status = kwargs.get('maintenance_redeploy_status', None)
self.disks = kwargs.get('disks', None)
self.extensions = kwargs.get('extensions', None)
self.vm_health = None
self.boot_diagnostics = kwargs.get('boot_diagnostics', None)
self.statuses = kwargs.get('statuses', None)
self.placement_group_id = kwargs.get('placement_group_id', None)
```
#### File: consumption/models/reservation_summaries.py
```python
from .resource import Resource
class ReservationSummaries(Resource):
"""reservation summaries resource.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:ivar tags: Resource tags.
:vartype tags: dict[str, str]
:ivar reservation_order_id: The reservation order ID is the identifier for
a reservation purchase. Each reservation order ID represents a single
purchase transaction. A reservation order contains reservations. The
reservation order specifies the VM size and region for the reservations.
:vartype reservation_order_id: str
:ivar reservation_id: The reservation ID is the identifier of a
reservation within a reservation order. Each reservation is the grouping
for applying the benefit scope and also specifies the number of instances
to which the reservation benefit can be applied to.
:vartype reservation_id: str
:ivar sku_name: This is the ARM Sku name. It can be used to join with the
servicetype field in additoinalinfo in usage records.
:vartype sku_name: str
:ivar reserved_hours: This is the total hours reserved. E.g. if
reservation for 1 instance was made on 1 PM, this will be 11 hours for
that day and 24 hours from subsequent days
:vartype reserved_hours: decimal.Decimal
:ivar usage_date: Data corresponding to the utilization record. If the
grain of data is monthly, it will be first day of month.
:vartype usage_date: datetime
:ivar used_hours: Total used hours by the reservation
:vartype used_hours: decimal.Decimal
:ivar min_utilization_percentage: This is the minimum hourly utilization
in the usage time (day or month). E.g. if usage record corresponds to
12/10/2017 and on that for hour 4 and 5, utilization was 10%, this field
will return 10% for that day
:vartype min_utilization_percentage: decimal.Decimal
:ivar avg_utilization_percentage: This is average utilization for the
entire time range. (day or month depending on the grain)
:vartype avg_utilization_percentage: decimal.Decimal
:ivar max_utilization_percentage: This is the maximum hourly utilization
in the usage time (day or month). E.g. if usage record corresponds to
12/10/2017 and on that for hour 4 and 5, utilization was 100%, this field
will return 100% for that day.
:vartype max_utilization_percentage: decimal.Decimal
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'tags': {'readonly': True},
'reservation_order_id': {'readonly': True},
'reservation_id': {'readonly': True},
'sku_name': {'readonly': True},
'reserved_hours': {'readonly': True},
'usage_date': {'readonly': True},
'used_hours': {'readonly': True},
'min_utilization_percentage': {'readonly': True},
'avg_utilization_percentage': {'readonly': True},
'max_utilization_percentage': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'reservation_order_id': {'key': 'properties.reservationOrderId', 'type': 'str'},
'reservation_id': {'key': 'properties.reservationId', 'type': 'str'},
'sku_name': {'key': 'properties.skuName', 'type': 'str'},
'reserved_hours': {'key': 'properties.reservedHours', 'type': 'decimal'},
'usage_date': {'key': 'properties.usageDate', 'type': 'iso-8601'},
'used_hours': {'key': 'properties.usedHours', 'type': 'decimal'},
'min_utilization_percentage': {'key': 'properties.minUtilizationPercentage', 'type': 'decimal'},
'avg_utilization_percentage': {'key': 'properties.avgUtilizationPercentage', 'type': 'decimal'},
'max_utilization_percentage': {'key': 'properties.maxUtilizationPercentage', 'type': 'decimal'},
}
def __init__(self):
super(ReservationSummaries, self).__init__()
self.reservation_order_id = None
self.reservation_id = None
self.sku_name = None
self.reserved_hours = None
self.usage_date = None
self.used_hours = None
self.min_utilization_percentage = None
self.avg_utilization_percentage = None
self.max_utilization_percentage = None
```
#### File: v2018_02_01_preview/models/registry_policies.py
```python
from msrest.serialization import Model
class RegistryPolicies(Model):
"""An object that represents policies for a container registry.
:param quarantine_policy: An object that represents quarantine policy for
a container registry.
:type quarantine_policy:
~azure.mgmt.containerregistry.v2018_02_01_preview.models.QuarantinePolicy
:param trust_policy: An object that represents content trust policy for a
container registry.
:type trust_policy:
~azure.mgmt.containerregistry.v2018_02_01_preview.models.TrustPolicy
"""
_attribute_map = {
'quarantine_policy': {'key': 'quarantinePolicy', 'type': 'QuarantinePolicy'},
'trust_policy': {'key': 'trustPolicy', 'type': 'TrustPolicy'},
}
def __init__(self, **kwargs):
super(RegistryPolicies, self).__init__(**kwargs)
self.quarantine_policy = kwargs.get('quarantine_policy', None)
self.trust_policy = kwargs.get('trust_policy', None)
```
#### File: v2018_09_01/models/custom_registry_credentials_py3.py
```python
from msrest.serialization import Model
class CustomRegistryCredentials(Model):
"""Describes the credentials that will be used to access a custom registry
during a run.
:param user_name: The username for logging into the custom registry.
:type user_name:
~azure.mgmt.containerregistry.v2018_09_01.models.SecretObject
:param password: The password for logging into the custom registry. The
password is a secret
object that allows multiple ways of providing the value for it.
:type password:
~azure.mgmt.containerregistry.v2018_09_01.models.SecretObject
"""
_attribute_map = {
'user_name': {'key': 'userName', 'type': 'SecretObject'},
'password': {'key': 'password', 'type': 'SecretObject'},
}
def __init__(self, *, user_name=None, password=None, **kwargs) -> None:
super(CustomRegistryCredentials, self).__init__(**kwargs)
self.user_name = user_name
self.password = password
```
#### File: v2018_09_01/models/request_py3.py
```python
from msrest.serialization import Model
class Request(Model):
"""The request that generated the event.
:param id: The ID of the request that initiated the event.
:type id: str
:param addr: The IP or hostname and possibly port of the client connection
that initiated the event. This is the RemoteAddr from the standard http
request.
:type addr: str
:param host: The externally accessible hostname of the registry instance,
as specified by the http host header on incoming requests.
:type host: str
:param method: The request method that generated the event.
:type method: str
:param useragent: The user agent header of the request.
:type useragent: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'addr': {'key': 'addr', 'type': 'str'},
'host': {'key': 'host', 'type': 'str'},
'method': {'key': 'method', 'type': 'str'},
'useragent': {'key': 'useragent', 'type': 'str'},
}
def __init__(self, *, id: str=None, addr: str=None, host: str=None, method: str=None, useragent: str=None, **kwargs) -> None:
super(Request, self).__init__(**kwargs)
self.id = id
self.addr = addr
self.host = host
self.method = method
self.useragent = useragent
```
#### File: v2018_09_01/models/source_upload_definition.py
```python
from msrest.serialization import Model
class SourceUploadDefinition(Model):
"""The properties of a response to source upload request.
:param upload_url: The URL where the client can upload the source.
:type upload_url: str
:param relative_path: The relative path to the source. This is used to
submit the subsequent queue build request.
:type relative_path: str
"""
_attribute_map = {
'upload_url': {'key': 'uploadUrl', 'type': 'str'},
'relative_path': {'key': 'relativePath', 'type': 'str'},
}
def __init__(self, **kwargs):
super(SourceUploadDefinition, self).__init__(**kwargs)
self.upload_url = kwargs.get('upload_url', None)
self.relative_path = kwargs.get('relative_path', None)
```
#### File: cosmosdb/models/database_account_py3.py
```python
from .resource_py3 import Resource
class DatabaseAccount(Resource):
"""An Azure Cosmos DB database account.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: The unique resource identifier of the database account.
:vartype id: str
:ivar name: The name of the database account.
:vartype name: str
:ivar type: The type of Azure resource.
:vartype type: str
:param location: Required. The location of the resource group to which the
resource belongs.
:type location: str
:param tags:
:type tags: dict[str, str]
:param kind: Indicates the type of database account. This can only be set
at database account creation. Possible values include: 'GlobalDocumentDB',
'MongoDB', 'Parse'. Default value: "GlobalDocumentDB" .
:type kind: str or ~azure.mgmt.cosmosdb.models.DatabaseAccountKind
:param provisioning_state:
:type provisioning_state: str
:ivar document_endpoint: The connection endpoint for the Cosmos DB
database account.
:vartype document_endpoint: str
:ivar database_account_offer_type: The offer type for the Cosmos DB
database account. Default value: Standard. Possible values include:
'Standard'
:vartype database_account_offer_type: str or
~azure.mgmt.cosmosdb.models.DatabaseAccountOfferType
:param ip_range_filter: Cosmos DB Firewall Support: This value specifies
the set of IP addresses or IP address ranges in CIDR form to be included
as the allowed list of client IPs for a given database account. IP
addresses/ranges must be comma separated and must not contain any spaces.
:type ip_range_filter: str
:param is_virtual_network_filter_enabled: Flag to indicate whether to
enable/disable Virtual Network ACL rules.
:type is_virtual_network_filter_enabled: bool
:param enable_automatic_failover: Enables automatic failover of the write
region in the rare event that the region is unavailable due to an outage.
Automatic failover will result in a new write region for the account and
is chosen based on the failover priorities configured for the account.
:type enable_automatic_failover: bool
:param consistency_policy: The consistency policy for the Cosmos DB
database account.
:type consistency_policy: ~azure.mgmt.cosmosdb.models.ConsistencyPolicy
:param capabilities: List of Cosmos DB capabilities for the account
:type capabilities: list[~azure.mgmt.cosmosdb.models.Capability]
:ivar write_locations: An array that contains the write location for the
Cosmos DB account.
:vartype write_locations: list[~azure.mgmt.cosmosdb.models.Location]
:ivar read_locations: An array that contains of the read locations enabled
for the Cosmos DB account.
:vartype read_locations: list[~azure.mgmt.cosmosdb.models.Location]
:ivar failover_policies: An array that contains the regions ordered by
their failover priorities.
:vartype failover_policies:
list[~azure.mgmt.cosmosdb.models.FailoverPolicy]
:param virtual_network_rules: List of Virtual Network ACL rules configured
for the Cosmos DB account.
:type virtual_network_rules:
list[~azure.mgmt.cosmosdb.models.VirtualNetworkRule]
:param enable_multiple_write_locations: Enables the account to write in
multiple locations
:type enable_multiple_write_locations: bool
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'document_endpoint': {'readonly': True},
'database_account_offer_type': {'readonly': True},
'write_locations': {'readonly': True},
'read_locations': {'readonly': True},
'failover_policies': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'kind': {'key': 'kind', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'document_endpoint': {'key': 'properties.documentEndpoint', 'type': 'str'},
'database_account_offer_type': {'key': 'properties.databaseAccountOfferType', 'type': 'DatabaseAccountOfferType'},
'ip_range_filter': {'key': 'properties.ipRangeFilter', 'type': 'str'},
'is_virtual_network_filter_enabled': {'key': 'properties.isVirtualNetworkFilterEnabled', 'type': 'bool'},
'enable_automatic_failover': {'key': 'properties.enableAutomaticFailover', 'type': 'bool'},
'consistency_policy': {'key': 'properties.consistencyPolicy', 'type': 'ConsistencyPolicy'},
'capabilities': {'key': 'properties.capabilities', 'type': '[Capability]'},
'write_locations': {'key': 'properties.writeLocations', 'type': '[Location]'},
'read_locations': {'key': 'properties.readLocations', 'type': '[Location]'},
'failover_policies': {'key': 'properties.failoverPolicies', 'type': '[FailoverPolicy]'},
'virtual_network_rules': {'key': 'properties.virtualNetworkRules', 'type': '[VirtualNetworkRule]'},
'enable_multiple_write_locations': {'key': 'properties.enableMultipleWriteLocations', 'type': 'bool'},
}
def __init__(self, *, location: str, tags=None, kind="GlobalDocumentDB", provisioning_state: str=None, ip_range_filter: str=None, is_virtual_network_filter_enabled: bool=None, enable_automatic_failover: bool=None, consistency_policy=None, capabilities=None, virtual_network_rules=None, enable_multiple_write_locations: bool=None, **kwargs) -> None:
super(DatabaseAccount, self).__init__(location=location, tags=tags, **kwargs)
self.kind = kind
self.provisioning_state = provisioning_state
self.document_endpoint = None
self.database_account_offer_type = None
self.ip_range_filter = ip_range_filter
self.is_virtual_network_filter_enabled = is_virtual_network_filter_enabled
self.enable_automatic_failover = enable_automatic_failover
self.consistency_policy = consistency_policy
self.capabilities = capabilities
self.write_locations = None
self.read_locations = None
self.failover_policies = None
self.virtual_network_rules = virtual_network_rules
self.enable_multiple_write_locations = enable_multiple_write_locations
```
#### File: datafactory/models/azure_databricks_linked_service.py
```python
from .linked_service import LinkedService
class AzureDatabricksLinkedService(LinkedService):
"""Azure Databricks linked service.
All required parameters must be populated in order to send to Azure.
:param additional_properties: Unmatched properties from the message are
deserialized this collection
:type additional_properties: dict[str, object]
:param connect_via: The integration runtime reference.
:type connect_via:
~azure.mgmt.datafactory.models.IntegrationRuntimeReference
:param description: Linked service description.
:type description: str
:param parameters: Parameters for linked service.
:type parameters: dict[str,
~azure.mgmt.datafactory.models.ParameterSpecification]
:param annotations: List of tags that can be used for describing the
Dataset.
:type annotations: list[object]
:param type: Required. Constant filled by server.
:type type: str
:param domain: Required. <REGION>.azuredatabricks.net, domain name of your
Databricks deployment. Type: string (or Expression with resultType
string).
:type domain: object
:param access_token: Required. Access token for databricks REST API. Refer
to https://docs.azuredatabricks.net/api/latest/authentication.html. Type:
string (or Expression with resultType string).
:type access_token: ~azure.mgmt.datafactory.models.SecretBase
:param existing_cluster_id: The id of an existing cluster that will be
used for all runs of this job. Type: string (or Expression with resultType
string).
:type existing_cluster_id: object
:param new_cluster_version: The Spark version of new cluster. Type: string
(or Expression with resultType string).
:type new_cluster_version: object
:param new_cluster_num_of_worker: Number of worker nodes that new cluster
should have. A string formatted Int32, like '1' means numOfWorker is 1 or
'1:10' means auto-scale from 1 as min and 10 as max. Type: string (or
Expression with resultType string).
:type new_cluster_num_of_worker: object
:param new_cluster_node_type: The node types of new cluster. Type: string
(or Expression with resultType string).
:type new_cluster_node_type: object
:param new_cluster_spark_conf: A set of optional, user-specified Spark
configuration key-value pairs.
:type new_cluster_spark_conf: dict[str, object]
:param new_cluster_spark_env_vars: A set of optional, user-specified Spark
environment variables key-value pairs.
:type new_cluster_spark_env_vars: dict[str, object]
:param new_cluster_custom_tags: Additional tags for cluster resources.
:type new_cluster_custom_tags: dict[str, object]
:param encrypted_credential: The encrypted credential used for
authentication. Credentials are encrypted using the integration runtime
credential manager. Type: string (or Expression with resultType string).
:type encrypted_credential: object
"""
_validation = {
'type': {'required': True},
'domain': {'required': True},
'access_token': {'required': True},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'connect_via': {'key': 'connectVia', 'type': 'IntegrationRuntimeReference'},
'description': {'key': 'description', 'type': 'str'},
'parameters': {'key': 'parameters', 'type': '{ParameterSpecification}'},
'annotations': {'key': 'annotations', 'type': '[object]'},
'type': {'key': 'type', 'type': 'str'},
'domain': {'key': 'typeProperties.domain', 'type': 'object'},
'access_token': {'key': 'typeProperties.accessToken', 'type': 'SecretBase'},
'existing_cluster_id': {'key': 'typeProperties.existingClusterId', 'type': 'object'},
'new_cluster_version': {'key': 'typeProperties.newClusterVersion', 'type': 'object'},
'new_cluster_num_of_worker': {'key': 'typeProperties.newClusterNumOfWorker', 'type': 'object'},
'new_cluster_node_type': {'key': 'typeProperties.newClusterNodeType', 'type': 'object'},
'new_cluster_spark_conf': {'key': 'typeProperties.newClusterSparkConf', 'type': '{object}'},
'new_cluster_spark_env_vars': {'key': 'typeProperties.newClusterSparkEnvVars', 'type': '{object}'},
'new_cluster_custom_tags': {'key': 'typeProperties.newClusterCustomTags', 'type': '{object}'},
'encrypted_credential': {'key': 'typeProperties.encryptedCredential', 'type': 'object'},
}
def __init__(self, **kwargs):
super(AzureDatabricksLinkedService, self).__init__(**kwargs)
self.domain = kwargs.get('domain', None)
self.access_token = kwargs.get('access_token', None)
self.existing_cluster_id = kwargs.get('existing_cluster_id', None)
self.new_cluster_version = kwargs.get('new_cluster_version', None)
self.new_cluster_num_of_worker = kwargs.get('new_cluster_num_of_worker', None)
self.new_cluster_node_type = kwargs.get('new_cluster_node_type', None)
self.new_cluster_spark_conf = kwargs.get('new_cluster_spark_conf', None)
self.new_cluster_spark_env_vars = kwargs.get('new_cluster_spark_env_vars', None)
self.new_cluster_custom_tags = kwargs.get('new_cluster_custom_tags', None)
self.encrypted_credential = kwargs.get('encrypted_credential', None)
self.type = 'AzureDatabricks'
```
#### File: datafactory/models/rerun_tumbling_window_trigger.py
```python
from .trigger import Trigger
class RerunTumblingWindowTrigger(Trigger):
"""Trigger that schedules pipeline reruns for all fixed time interval windows
from a requested start time to requested end time.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:param additional_properties: Unmatched properties from the message are
deserialized this collection
:type additional_properties: dict[str, object]
:param description: Trigger description.
:type description: str
:ivar runtime_state: Indicates if trigger is running or not. Updated when
Start/Stop APIs are called on the Trigger. Possible values include:
'Started', 'Stopped', 'Disabled'
:vartype runtime_state: str or
~azure.mgmt.datafactory.models.TriggerRuntimeState
:param type: Required. Constant filled by server.
:type type: str
:param parent_trigger: The parent trigger reference.
:type parent_trigger: object
:param requested_start_time: Required. The start time for the time period
for which restatement is initiated. Only UTC time is currently supported.
:type requested_start_time: datetime
:param requested_end_time: Required. The end time for the time period for
which restatement is initiated. Only UTC time is currently supported.
:type requested_end_time: datetime
:param max_concurrency: Required. The max number of parallel time windows
(ready for execution) for which a rerun is triggered.
:type max_concurrency: int
"""
_validation = {
'runtime_state': {'readonly': True},
'type': {'required': True},
'requested_start_time': {'required': True},
'requested_end_time': {'required': True},
'max_concurrency': {'required': True, 'maximum': 50, 'minimum': 1},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'description': {'key': 'description', 'type': 'str'},
'runtime_state': {'key': 'runtimeState', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'parent_trigger': {'key': 'typeProperties.parentTrigger', 'type': 'object'},
'requested_start_time': {'key': 'typeProperties.requestedStartTime', 'type': 'iso-8601'},
'requested_end_time': {'key': 'typeProperties.requestedEndTime', 'type': 'iso-8601'},
'max_concurrency': {'key': 'typeProperties.maxConcurrency', 'type': 'int'},
}
def __init__(self, **kwargs):
super(RerunTumblingWindowTrigger, self).__init__(**kwargs)
self.parent_trigger = kwargs.get('parent_trigger', None)
self.requested_start_time = kwargs.get('requested_start_time', None)
self.requested_end_time = kwargs.get('requested_end_time', None)
self.max_concurrency = kwargs.get('max_concurrency', None)
self.type = 'RerunTumblingWindowTrigger'
```
#### File: datafactory/models/retry_policy.py
```python
from msrest.serialization import Model
class RetryPolicy(Model):
"""Execution policy for an activity.
:param count: Maximum ordinary retry attempts. Default is 0. Type: integer
(or Expression with resultType integer), minimum: 0.
:type count: object
:param interval_in_seconds: Interval between retries in seconds. Default
is 30.
:type interval_in_seconds: int
"""
_validation = {
'interval_in_seconds': {'maximum': 86400, 'minimum': 30},
}
_attribute_map = {
'count': {'key': 'count', 'type': 'object'},
'interval_in_seconds': {'key': 'intervalInSeconds', 'type': 'int'},
}
def __init__(self, **kwargs):
super(RetryPolicy, self).__init__(**kwargs)
self.count = kwargs.get('count', None)
self.interval_in_seconds = kwargs.get('interval_in_seconds', None)
```
#### File: datafactory/models/sftp_server_linked_service.py
```python
from .linked_service import LinkedService
class SftpServerLinkedService(LinkedService):
"""A linked service for an SSH File Transfer Protocol (SFTP) server. .
All required parameters must be populated in order to send to Azure.
:param additional_properties: Unmatched properties from the message are
deserialized this collection
:type additional_properties: dict[str, object]
:param connect_via: The integration runtime reference.
:type connect_via:
~azure.mgmt.datafactory.models.IntegrationRuntimeReference
:param description: Linked service description.
:type description: str
:param parameters: Parameters for linked service.
:type parameters: dict[str,
~azure.mgmt.datafactory.models.ParameterSpecification]
:param annotations: List of tags that can be used for describing the
Dataset.
:type annotations: list[object]
:param type: Required. Constant filled by server.
:type type: str
:param host: Required. The SFTP server host name. Type: string (or
Expression with resultType string).
:type host: object
:param port: The TCP port number that the SFTP server uses to listen for
client connections. Default value is 22. Type: integer (or Expression with
resultType integer), minimum: 0.
:type port: object
:param authentication_type: The authentication type to be used to connect
to the FTP server. Possible values include: 'Basic', 'SshPublicKey'
:type authentication_type: str or
~azure.mgmt.datafactory.models.SftpAuthenticationType
:param user_name: The username used to log on to the SFTP server. Type:
string (or Expression with resultType string).
:type user_name: object
:param password: Password to logon the SFTP server for Basic
authentication.
:type password: ~azure.mgmt.datafactory.models.SecretBase
:param encrypted_credential: The encrypted credential used for
authentication. Credentials are encrypted using the integration runtime
credential manager. Type: string (or Expression with resultType string).
:type encrypted_credential: object
:param private_key_path: The SSH private key file path for SshPublicKey
authentication. Only valid for on-premises copy. For on-premises copy with
SshPublicKey authentication, either PrivateKeyPath or PrivateKeyContent
should be specified. SSH private key should be OpenSSH format. Type:
string (or Expression with resultType string).
:type private_key_path: object
:param private_key_content: Base64 encoded SSH private key content for
SshPublicKey authentication. For on-premises copy with SshPublicKey
authentication, either PrivateKeyPath or PrivateKeyContent should be
specified. SSH private key should be OpenSSH format.
:type private_key_content: ~azure.mgmt.datafactory.models.SecretBase
:param pass_phrase: The password to decrypt the SSH private key if the SSH
private key is encrypted.
:type pass_phrase: ~azure.mgmt.datafactory.models.SecretBase
:param skip_host_key_validation: If true, skip the SSH host key
validation. Default value is false. Type: boolean (or Expression with
resultType boolean).
:type skip_host_key_validation: object
:param host_key_fingerprint: The host key finger-print of the SFTP server.
When SkipHostKeyValidation is false, HostKeyFingerprint should be
specified. Type: string (or Expression with resultType string).
:type host_key_fingerprint: object
"""
_validation = {
'type': {'required': True},
'host': {'required': True},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'connect_via': {'key': 'connectVia', 'type': 'IntegrationRuntimeReference'},
'description': {'key': 'description', 'type': 'str'},
'parameters': {'key': 'parameters', 'type': '{ParameterSpecification}'},
'annotations': {'key': 'annotations', 'type': '[object]'},
'type': {'key': 'type', 'type': 'str'},
'host': {'key': 'typeProperties.host', 'type': 'object'},
'port': {'key': 'typeProperties.port', 'type': 'object'},
'authentication_type': {'key': 'typeProperties.authenticationType', 'type': 'str'},
'user_name': {'key': 'typeProperties.userName', 'type': 'object'},
'password': {'key': 'typeProperties.password', 'type': 'SecretBase'},
'encrypted_credential': {'key': 'typeProperties.encryptedCredential', 'type': 'object'},
'private_key_path': {'key': 'typeProperties.privateKeyPath', 'type': 'object'},
'private_key_content': {'key': 'typeProperties.privateKeyContent', 'type': 'SecretBase'},
'pass_phrase': {'key': 'typeProperties.passPhrase', 'type': 'SecretBase'},
'skip_host_key_validation': {'key': 'typeProperties.skipHostKeyValidation', 'type': 'object'},
'host_key_fingerprint': {'key': 'typeProperties.hostKeyFingerprint', 'type': 'object'},
}
def __init__(self, **kwargs):
super(SftpServerLinkedService, self).__init__(**kwargs)
self.host = kwargs.get('host', None)
self.port = kwargs.get('port', None)
self.authentication_type = kwargs.get('authentication_type', None)
self.user_name = kwargs.get('user_name', None)
self.password = kwargs.get('password', None)
self.encrypted_credential = kwargs.get('encrypted_credential', None)
self.private_key_path = kwargs.get('private_key_path', None)
self.private_key_content = kwargs.get('private_key_content', None)
self.pass_phrase = kwargs.get('pass_phrase', None)
self.skip_host_key_validation = kwargs.get('skip_host_key_validation', None)
self.host_key_fingerprint = kwargs.get('host_key_fingerprint', None)
self.type = 'Sftp'
```
#### File: datamigration/models/mongo_db_throttling_settings_py3.py
```python
from msrest.serialization import Model
class MongoDbThrottlingSettings(Model):
"""Specifies resource limits for the migration.
:param min_free_cpu: The percentage of CPU time that the migrator will try
to avoid using, from 0 to 100
:type min_free_cpu: int
:param min_free_memory_mb: The number of megabytes of RAM that the
migrator will try to avoid using
:type min_free_memory_mb: int
:param max_parallelism: The maximum number of work items (e.g. collection
copies) that will be processed in parallel
:type max_parallelism: int
"""
_attribute_map = {
'min_free_cpu': {'key': 'minFreeCpu', 'type': 'int'},
'min_free_memory_mb': {'key': 'minFreeMemoryMb', 'type': 'int'},
'max_parallelism': {'key': 'maxParallelism', 'type': 'int'},
}
def __init__(self, *, min_free_cpu: int=None, min_free_memory_mb: int=None, max_parallelism: int=None, **kwargs) -> None:
super(MongoDbThrottlingSettings, self).__init__(**kwargs)
self.min_free_cpu = min_free_cpu
self.min_free_memory_mb = min_free_memory_mb
self.max_parallelism = max_parallelism
```
#### File: devtestlabs/models/artifact_source_py3.py
```python
from .resource_py3 import Resource
class ArtifactSource(Resource):
"""Properties of an artifact source.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: The identifier of the resource.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource.
:vartype type: str
:param location: The location of the resource.
:type location: str
:param tags: The tags of the resource.
:type tags: dict[str, str]
:param display_name: The artifact source's display name.
:type display_name: str
:param uri: The artifact source's URI.
:type uri: str
:param source_type: The artifact source's type. Possible values include:
'VsoGit', 'GitHub'
:type source_type: str or ~azure.mgmt.devtestlabs.models.SourceControlType
:param folder_path: The folder containing artifacts.
:type folder_path: str
:param arm_template_folder_path: The folder containing Azure Resource
Manager templates.
:type arm_template_folder_path: str
:param branch_ref: The artifact source's branch reference.
:type branch_ref: str
:param security_token: The security token to authenticate to the artifact
source.
:type security_token: str
:param status: Indicates if the artifact source is enabled (values:
Enabled, Disabled). Possible values include: 'Enabled', 'Disabled'
:type status: str or ~azure.mgmt.devtestlabs.models.EnableStatus
:ivar created_date: The artifact source's creation date.
:vartype created_date: datetime
:ivar provisioning_state: The provisioning status of the resource.
:vartype provisioning_state: str
:ivar unique_identifier: The unique immutable identifier of a resource
(Guid).
:vartype unique_identifier: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'created_date': {'readonly': True},
'provisioning_state': {'readonly': True},
'unique_identifier': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'display_name': {'key': 'properties.displayName', 'type': 'str'},
'uri': {'key': 'properties.uri', 'type': 'str'},
'source_type': {'key': 'properties.sourceType', 'type': 'str'},
'folder_path': {'key': 'properties.folderPath', 'type': 'str'},
'arm_template_folder_path': {'key': 'properties.armTemplateFolderPath', 'type': 'str'},
'branch_ref': {'key': 'properties.branchRef', 'type': 'str'},
'security_token': {'key': 'properties.securityToken', 'type': 'str'},
'status': {'key': 'properties.status', 'type': 'str'},
'created_date': {'key': 'properties.createdDate', 'type': 'iso-8601'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'unique_identifier': {'key': 'properties.uniqueIdentifier', 'type': 'str'},
}
def __init__(self, *, location: str=None, tags=None, display_name: str=None, uri: str=None, source_type=None, folder_path: str=None, arm_template_folder_path: str=None, branch_ref: str=None, security_token: str=None, status=None, **kwargs) -> None:
super(ArtifactSource, self).__init__(location=location, tags=tags, **kwargs)
self.display_name = display_name
self.uri = uri
self.source_type = source_type
self.folder_path = folder_path
self.arm_template_folder_path = arm_template_folder_path
self.branch_ref = branch_ref
self.security_token = security_token
self.status = status
self.created_date = None
self.provisioning_state = None
self.unique_identifier = None
```
#### File: devtestlabs/models/lab_announcement_properties_fragment_py3.py
```python
from msrest.serialization import Model
class LabAnnouncementPropertiesFragment(Model):
"""Properties of a lab's announcement banner.
:param title: The plain text title for the lab announcement
:type title: str
:param markdown: The markdown text (if any) that this lab displays in the
UI. If left empty/null, nothing will be shown.
:type markdown: str
:param enabled: Is the lab announcement active/enabled at this time?.
Possible values include: 'Enabled', 'Disabled'
:type enabled: str or ~azure.mgmt.devtestlabs.models.EnableStatus
:param expiration_date: The time at which the announcement expires (null
for never)
:type expiration_date: datetime
:param expired: Has this announcement expired?
:type expired: bool
"""
_attribute_map = {
'title': {'key': 'title', 'type': 'str'},
'markdown': {'key': 'markdown', 'type': 'str'},
'enabled': {'key': 'enabled', 'type': 'str'},
'expiration_date': {'key': 'expirationDate', 'type': 'iso-8601'},
'expired': {'key': 'expired', 'type': 'bool'},
}
def __init__(self, *, title: str=None, markdown: str=None, enabled=None, expiration_date=None, expired: bool=None, **kwargs) -> None:
super(LabAnnouncementPropertiesFragment, self).__init__(**kwargs)
self.title = title
self.markdown = markdown
self.enabled = enabled
self.expiration_date = expiration_date
self.expired = expired
```
#### File: devtestlabs/models/lab_announcement_properties_py3.py
```python
from msrest.serialization import Model
class LabAnnouncementProperties(Model):
"""Properties of a lab's announcement banner.
Variables are only populated by the server, and will be ignored when
sending a request.
:param title: The plain text title for the lab announcement
:type title: str
:param markdown: The markdown text (if any) that this lab displays in the
UI. If left empty/null, nothing will be shown.
:type markdown: str
:param enabled: Is the lab announcement active/enabled at this time?.
Possible values include: 'Enabled', 'Disabled'
:type enabled: str or ~azure.mgmt.devtestlabs.models.EnableStatus
:param expiration_date: The time at which the announcement expires (null
for never)
:type expiration_date: datetime
:param expired: Has this announcement expired?
:type expired: bool
:ivar provisioning_state: The provisioning status of the resource.
:vartype provisioning_state: str
:ivar unique_identifier: The unique immutable identifier of a resource
(Guid).
:vartype unique_identifier: str
"""
_validation = {
'provisioning_state': {'readonly': True},
'unique_identifier': {'readonly': True},
}
_attribute_map = {
'title': {'key': 'title', 'type': 'str'},
'markdown': {'key': 'markdown', 'type': 'str'},
'enabled': {'key': 'enabled', 'type': 'str'},
'expiration_date': {'key': 'expirationDate', 'type': 'iso-8601'},
'expired': {'key': 'expired', 'type': 'bool'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'unique_identifier': {'key': 'uniqueIdentifier', 'type': 'str'},
}
def __init__(self, *, title: str=None, markdown: str=None, enabled=None, expiration_date=None, expired: bool=None, **kwargs) -> None:
super(LabAnnouncementProperties, self).__init__(**kwargs)
self.title = title
self.markdown = markdown
self.enabled = enabled
self.expiration_date = expiration_date
self.expired = expired
self.provisioning_state = None
self.unique_identifier = None
```
#### File: devtestlabs/models/lab_py3.py
```python
from .resource_py3 import Resource
class Lab(Resource):
"""A lab.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: The identifier of the resource.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource.
:vartype type: str
:param location: The location of the resource.
:type location: str
:param tags: The tags of the resource.
:type tags: dict[str, str]
:ivar default_storage_account: The lab's default storage account.
:vartype default_storage_account: str
:ivar default_premium_storage_account: The lab's default premium storage
account.
:vartype default_premium_storage_account: str
:ivar artifacts_storage_account: The lab's artifact storage account.
:vartype artifacts_storage_account: str
:ivar premium_data_disk_storage_account: The lab's premium data disk
storage account.
:vartype premium_data_disk_storage_account: str
:ivar vault_name: The lab's Key vault.
:vartype vault_name: str
:param lab_storage_type: Type of storage used by the lab. It can be either
Premium or Standard. Default is Premium. Possible values include:
'Standard', 'Premium'
:type lab_storage_type: str or ~azure.mgmt.devtestlabs.models.StorageType
:param mandatory_artifacts_resource_ids_linux: The ordered list of
artifact resource IDs that should be applied on all Linux VM creations by
default, prior to the artifacts specified by the user.
:type mandatory_artifacts_resource_ids_linux: list[str]
:param mandatory_artifacts_resource_ids_windows: The ordered list of
artifact resource IDs that should be applied on all Windows VM creations
by default, prior to the artifacts specified by the user.
:type mandatory_artifacts_resource_ids_windows: list[str]
:ivar created_date: The creation date of the lab.
:vartype created_date: datetime
:param premium_data_disks: The setting to enable usage of premium data
disks.
When its value is 'Enabled', creation of standard or premium data disks is
allowed.
When its value is 'Disabled', only creation of standard data disks is
allowed. Possible values include: 'Disabled', 'Enabled'
:type premium_data_disks: str or
~azure.mgmt.devtestlabs.models.PremiumDataDisk
:param environment_permission: The access rights to be granted to the user
when provisioning an environment. Possible values include: 'Reader',
'Contributor'
:type environment_permission: str or
~azure.mgmt.devtestlabs.models.EnvironmentPermission
:param announcement: The properties of any lab announcement associated
with this lab
:type announcement:
~azure.mgmt.devtestlabs.models.LabAnnouncementProperties
:param support: The properties of any lab support message associated with
this lab
:type support: ~azure.mgmt.devtestlabs.models.LabSupportProperties
:ivar vm_creation_resource_group: The resource group in which lab virtual
machines will be created in.
:vartype vm_creation_resource_group: str
:ivar public_ip_id: The public IP address for the lab's load balancer.
:vartype public_ip_id: str
:ivar load_balancer_id: The load balancer used to for lab VMs that use
shared IP address.
:vartype load_balancer_id: str
:ivar network_security_group_id: The Network Security Group attached to
the lab VMs Network interfaces to restrict open ports.
:vartype network_security_group_id: str
:param extended_properties: Extended properties of the lab used for
experimental features
:type extended_properties: dict[str, str]
:ivar provisioning_state: The provisioning status of the resource.
:vartype provisioning_state: str
:ivar unique_identifier: The unique immutable identifier of a resource
(Guid).
:vartype unique_identifier: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'default_storage_account': {'readonly': True},
'default_premium_storage_account': {'readonly': True},
'artifacts_storage_account': {'readonly': True},
'premium_data_disk_storage_account': {'readonly': True},
'vault_name': {'readonly': True},
'created_date': {'readonly': True},
'vm_creation_resource_group': {'readonly': True},
'public_ip_id': {'readonly': True},
'load_balancer_id': {'readonly': True},
'network_security_group_id': {'readonly': True},
'provisioning_state': {'readonly': True},
'unique_identifier': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'default_storage_account': {'key': 'properties.defaultStorageAccount', 'type': 'str'},
'default_premium_storage_account': {'key': 'properties.defaultPremiumStorageAccount', 'type': 'str'},
'artifacts_storage_account': {'key': 'properties.artifactsStorageAccount', 'type': 'str'},
'premium_data_disk_storage_account': {'key': 'properties.premiumDataDiskStorageAccount', 'type': 'str'},
'vault_name': {'key': 'properties.vaultName', 'type': 'str'},
'lab_storage_type': {'key': 'properties.labStorageType', 'type': 'str'},
'mandatory_artifacts_resource_ids_linux': {'key': 'properties.mandatoryArtifactsResourceIdsLinux', 'type': '[str]'},
'mandatory_artifacts_resource_ids_windows': {'key': 'properties.mandatoryArtifactsResourceIdsWindows', 'type': '[str]'},
'created_date': {'key': 'properties.createdDate', 'type': 'iso-8601'},
'premium_data_disks': {'key': 'properties.premiumDataDisks', 'type': 'str'},
'environment_permission': {'key': 'properties.environmentPermission', 'type': 'str'},
'announcement': {'key': 'properties.announcement', 'type': 'LabAnnouncementProperties'},
'support': {'key': 'properties.support', 'type': 'LabSupportProperties'},
'vm_creation_resource_group': {'key': 'properties.vmCreationResourceGroup', 'type': 'str'},
'public_ip_id': {'key': 'properties.publicIpId', 'type': 'str'},
'load_balancer_id': {'key': 'properties.loadBalancerId', 'type': 'str'},
'network_security_group_id': {'key': 'properties.networkSecurityGroupId', 'type': 'str'},
'extended_properties': {'key': 'properties.extendedProperties', 'type': '{str}'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'unique_identifier': {'key': 'properties.uniqueIdentifier', 'type': 'str'},
}
def __init__(self, *, location: str=None, tags=None, lab_storage_type=None, mandatory_artifacts_resource_ids_linux=None, mandatory_artifacts_resource_ids_windows=None, premium_data_disks=None, environment_permission=None, announcement=None, support=None, extended_properties=None, **kwargs) -> None:
super(Lab, self).__init__(location=location, tags=tags, **kwargs)
self.default_storage_account = None
self.default_premium_storage_account = None
self.artifacts_storage_account = None
self.premium_data_disk_storage_account = None
self.vault_name = None
self.lab_storage_type = lab_storage_type
self.mandatory_artifacts_resource_ids_linux = mandatory_artifacts_resource_ids_linux
self.mandatory_artifacts_resource_ids_windows = mandatory_artifacts_resource_ids_windows
self.created_date = None
self.premium_data_disks = premium_data_disks
self.environment_permission = environment_permission
self.announcement = announcement
self.support = support
self.vm_creation_resource_group = None
self.public_ip_id = None
self.load_balancer_id = None
self.network_security_group_id = None
self.extended_properties = extended_properties
self.provisioning_state = None
self.unique_identifier = None
```
#### File: devtestlabs/models/lab_support_properties_fragment.py
```python
from msrest.serialization import Model
class LabSupportPropertiesFragment(Model):
"""Properties of a lab's support banner.
:param enabled: Is the lab support banner active/enabled at this time?.
Possible values include: 'Enabled', 'Disabled'
:type enabled: str or ~azure.mgmt.devtestlabs.models.EnableStatus
:param markdown: The markdown text (if any) that this lab displays in the
UI. If left empty/null, nothing will be shown.
:type markdown: str
"""
_attribute_map = {
'enabled': {'key': 'enabled', 'type': 'str'},
'markdown': {'key': 'markdown', 'type': 'str'},
}
def __init__(self, **kwargs):
super(LabSupportPropertiesFragment, self).__init__(**kwargs)
self.enabled = kwargs.get('enabled', None)
self.markdown = kwargs.get('markdown', None)
```
#### File: devtestlabs/models/schedule_creation_parameter_py3.py
```python
from msrest.serialization import Model
class ScheduleCreationParameter(Model):
"""Properties for creating a schedule.
:param status: The status of the schedule (i.e. Enabled, Disabled).
Possible values include: 'Enabled', 'Disabled'
:type status: str or ~azure.mgmt.devtestlabs.models.EnableStatus
:param task_type: The task type of the schedule (e.g. LabVmsShutdownTask,
LabVmAutoStart).
:type task_type: str
:param weekly_recurrence: If the schedule will occur only some days of the
week, specify the weekly recurrence.
:type weekly_recurrence: ~azure.mgmt.devtestlabs.models.WeekDetails
:param daily_recurrence: If the schedule will occur once each day of the
week, specify the daily recurrence.
:type daily_recurrence: ~azure.mgmt.devtestlabs.models.DayDetails
:param hourly_recurrence: If the schedule will occur multiple times a day,
specify the hourly recurrence.
:type hourly_recurrence: ~azure.mgmt.devtestlabs.models.HourDetails
:param time_zone_id: The time zone ID (e.g. Pacific Standard time).
:type time_zone_id: str
:param notification_settings: Notification settings.
:type notification_settings:
~azure.mgmt.devtestlabs.models.NotificationSettings
:param target_resource_id: The resource ID to which the schedule belongs
:type target_resource_id: str
:param name: The name of the virtual machine or environment
:type name: str
:param location: The location of the new virtual machine or environment
:type location: str
:param tags: The tags of the resource.
:type tags: dict[str, str]
"""
_attribute_map = {
'status': {'key': 'properties.status', 'type': 'str'},
'task_type': {'key': 'properties.taskType', 'type': 'str'},
'weekly_recurrence': {'key': 'properties.weeklyRecurrence', 'type': 'WeekDetails'},
'daily_recurrence': {'key': 'properties.dailyRecurrence', 'type': 'DayDetails'},
'hourly_recurrence': {'key': 'properties.hourlyRecurrence', 'type': 'HourDetails'},
'time_zone_id': {'key': 'properties.timeZoneId', 'type': 'str'},
'notification_settings': {'key': 'properties.notificationSettings', 'type': 'NotificationSettings'},
'target_resource_id': {'key': 'properties.targetResourceId', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(self, *, status=None, task_type: str=None, weekly_recurrence=None, daily_recurrence=None, hourly_recurrence=None, time_zone_id: str=None, notification_settings=None, target_resource_id: str=None, name: str=None, location: str=None, tags=None, **kwargs) -> None:
super(ScheduleCreationParameter, self).__init__(**kwargs)
self.status = status
self.task_type = task_type
self.weekly_recurrence = weekly_recurrence
self.daily_recurrence = daily_recurrence
self.hourly_recurrence = hourly_recurrence
self.time_zone_id = time_zone_id
self.notification_settings = notification_settings
self.target_resource_id = target_resource_id
self.name = name
self.location = location
self.tags = tags
```
#### File: devtestlabs/models/virtual_network_py3.py
```python
from .resource_py3 import Resource
class VirtualNetwork(Resource):
"""A virtual network.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: The identifier of the resource.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource.
:vartype type: str
:param location: The location of the resource.
:type location: str
:param tags: The tags of the resource.
:type tags: dict[str, str]
:param allowed_subnets: The allowed subnets of the virtual network.
:type allowed_subnets: list[~azure.mgmt.devtestlabs.models.Subnet]
:param description: The description of the virtual network.
:type description: str
:param external_provider_resource_id: The Microsoft.Network resource
identifier of the virtual network.
:type external_provider_resource_id: str
:ivar external_subnets: The external subnet properties.
:vartype external_subnets:
list[~azure.mgmt.devtestlabs.models.ExternalSubnet]
:param subnet_overrides: The subnet overrides of the virtual network.
:type subnet_overrides:
list[~azure.mgmt.devtestlabs.models.SubnetOverride]
:ivar created_date: The creation date of the virtual network.
:vartype created_date: datetime
:ivar provisioning_state: The provisioning status of the resource.
:vartype provisioning_state: str
:ivar unique_identifier: The unique immutable identifier of a resource
(Guid).
:vartype unique_identifier: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'external_subnets': {'readonly': True},
'created_date': {'readonly': True},
'provisioning_state': {'readonly': True},
'unique_identifier': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'allowed_subnets': {'key': 'properties.allowedSubnets', 'type': '[Subnet]'},
'description': {'key': 'properties.description', 'type': 'str'},
'external_provider_resource_id': {'key': 'properties.externalProviderResourceId', 'type': 'str'},
'external_subnets': {'key': 'properties.externalSubnets', 'type': '[ExternalSubnet]'},
'subnet_overrides': {'key': 'properties.subnetOverrides', 'type': '[SubnetOverride]'},
'created_date': {'key': 'properties.createdDate', 'type': 'iso-8601'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'unique_identifier': {'key': 'properties.uniqueIdentifier', 'type': 'str'},
}
def __init__(self, *, location: str=None, tags=None, allowed_subnets=None, description: str=None, external_provider_resource_id: str=None, subnet_overrides=None, **kwargs) -> None:
super(VirtualNetwork, self).__init__(location=location, tags=tags, **kwargs)
self.allowed_subnets = allowed_subnets
self.description = description
self.external_provider_resource_id = external_provider_resource_id
self.external_subnets = None
self.subnet_overrides = subnet_overrides
self.created_date = None
self.provisioning_state = None
self.unique_identifier = None
```
#### File: hanaonazure/models/network_profile.py
```python
from msrest.serialization import Model
class NetworkProfile(Model):
"""Specifies the network settings for the HANA instance disks.
Variables are only populated by the server, and will be ignored when
sending a request.
:param network_interfaces: Specifies the network interfaces for the HANA
instance.
:type network_interfaces: list[~azure.mgmt.hanaonazure.models.IpAddress]
:ivar circuit_id: Specifies the circuit id for connecting to express
route.
:vartype circuit_id: str
"""
_validation = {
'circuit_id': {'readonly': True},
}
_attribute_map = {
'network_interfaces': {'key': 'networkInterfaces', 'type': '[IpAddress]'},
'circuit_id': {'key': 'circuitId', 'type': 'str'},
}
def __init__(self, **kwargs):
super(NetworkProfile, self).__init__(**kwargs)
self.network_interfaces = kwargs.get('network_interfaces', None)
self.circuit_id = None
```
#### File: hdinsight/models/cluster_get_properties_py3.py
```python
from msrest.serialization import Model
class ClusterGetProperties(Model):
"""The properties of cluster.
All required parameters must be populated in order to send to Azure.
:param cluster_version: The version of the cluster.
:type cluster_version: str
:param os_type: The type of operating system. Possible values include:
'Windows', 'Linux'
:type os_type: str or ~azure.mgmt.hdinsight.models.OSType
:param tier: The cluster tier. Possible values include: 'Standard',
'Premium'
:type tier: str or ~azure.mgmt.hdinsight.models.Tier
:param cluster_definition: Required. The cluster definition.
:type cluster_definition: ~azure.mgmt.hdinsight.models.ClusterDefinition
:param security_profile: The security profile.
:type security_profile: ~azure.mgmt.hdinsight.models.SecurityProfile
:param compute_profile: The compute profile.
:type compute_profile: ~azure.mgmt.hdinsight.models.ComputeProfile
:param provisioning_state: The provisioning state, which only appears in
the response. Possible values include: 'InProgress', 'Failed',
'Succeeded', 'Canceled', 'Deleting'
:type provisioning_state: str or
~azure.mgmt.hdinsight.models.HDInsightClusterProvisioningState
:param created_date: The date on which the cluster was created.
:type created_date: str
:param cluster_state: The state of the cluster.
:type cluster_state: str
:param quota_info: The quota information.
:type quota_info: ~azure.mgmt.hdinsight.models.QuotaInfo
:param errors: The list of errors.
:type errors: list[~azure.mgmt.hdinsight.models.Errors]
:param connectivity_endpoints: The list of connectivity endpoints.
:type connectivity_endpoints:
list[~azure.mgmt.hdinsight.models.ConnectivityEndpoint]
:param disk_encryption_properties: The disk encryption properties.
:type disk_encryption_properties:
~azure.mgmt.hdinsight.models.DiskEncryptionProperties
"""
_validation = {
'cluster_definition': {'required': True},
}
_attribute_map = {
'cluster_version': {'key': 'clusterVersion', 'type': 'str'},
'os_type': {'key': 'osType', 'type': 'OSType'},
'tier': {'key': 'tier', 'type': 'Tier'},
'cluster_definition': {'key': 'clusterDefinition', 'type': 'ClusterDefinition'},
'security_profile': {'key': 'securityProfile', 'type': 'SecurityProfile'},
'compute_profile': {'key': 'computeProfile', 'type': 'ComputeProfile'},
'provisioning_state': {'key': 'provisioningState', 'type': 'HDInsightClusterProvisioningState'},
'created_date': {'key': 'createdDate', 'type': 'str'},
'cluster_state': {'key': 'clusterState', 'type': 'str'},
'quota_info': {'key': 'quotaInfo', 'type': 'QuotaInfo'},
'errors': {'key': 'errors', 'type': '[Errors]'},
'connectivity_endpoints': {'key': 'connectivityEndpoints', 'type': '[ConnectivityEndpoint]'},
'disk_encryption_properties': {'key': 'diskEncryptionProperties', 'type': 'DiskEncryptionProperties'},
}
def __init__(self, *, cluster_definition, cluster_version: str=None, os_type=None, tier=None, security_profile=None, compute_profile=None, provisioning_state=None, created_date: str=None, cluster_state: str=None, quota_info=None, errors=None, connectivity_endpoints=None, disk_encryption_properties=None, **kwargs) -> None:
super(ClusterGetProperties, self).__init__(**kwargs)
self.cluster_version = cluster_version
self.os_type = os_type
self.tier = tier
self.cluster_definition = cluster_definition
self.security_profile = security_profile
self.compute_profile = compute_profile
self.provisioning_state = provisioning_state
self.created_date = created_date
self.cluster_state = cluster_state
self.quota_info = quota_info
self.errors = errors
self.connectivity_endpoints = connectivity_endpoints
self.disk_encryption_properties = disk_encryption_properties
```
#### File: hdinsight/models/compute_profile.py
```python
from msrest.serialization import Model
class ComputeProfile(Model):
"""Describes the compute profile.
:param roles: The list of roles in the cluster.
:type roles: list[~azure.mgmt.hdinsight.models.Role]
"""
_attribute_map = {
'roles': {'key': 'roles', 'type': '[Role]'},
}
def __init__(self, **kwargs):
super(ComputeProfile, self).__init__(**kwargs)
self.roles = kwargs.get('roles', None)
```
#### File: hdinsight/models/execute_script_action_parameters_py3.py
```python
from msrest.serialization import Model
class ExecuteScriptActionParameters(Model):
"""The parameters for the script actions to execute on a running cluster.
All required parameters must be populated in order to send to Azure.
:param script_actions: The list of run time script actions.
:type script_actions:
list[~azure.mgmt.hdinsight.models.RuntimeScriptAction]
:param persist_on_success: Required. Gets or sets if the scripts needs to
be persisted.
:type persist_on_success: bool
"""
_validation = {
'persist_on_success': {'required': True},
}
_attribute_map = {
'script_actions': {'key': 'scriptActions', 'type': '[RuntimeScriptAction]'},
'persist_on_success': {'key': 'persistOnSuccess', 'type': 'bool'},
}
def __init__(self, *, persist_on_success: bool, script_actions=None, **kwargs) -> None:
super(ExecuteScriptActionParameters, self).__init__(**kwargs)
self.script_actions = script_actions
self.persist_on_success = persist_on_success
```
#### File: hdinsight/models/script_action_execution_summary_py3.py
```python
from msrest.serialization import Model
class ScriptActionExecutionSummary(Model):
"""The execution summary of a script action.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar status: The status of script action execution.
:vartype status: str
:ivar instance_count: The instance count for a given script action
execution status.
:vartype instance_count: int
"""
_validation = {
'status': {'readonly': True},
'instance_count': {'readonly': True},
}
_attribute_map = {
'status': {'key': 'status', 'type': 'str'},
'instance_count': {'key': 'instanceCount', 'type': 'int'},
}
def __init__(self, **kwargs) -> None:
super(ScriptActionExecutionSummary, self).__init__(**kwargs)
self.status = None
self.instance_count = None
```
#### File: iotcentral/models/app_py3.py
```python
from .resource_py3 import Resource
class App(Resource):
"""The IoT Central application.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: The ARM resource identifier.
:vartype id: str
:ivar name: The ARM resource name.
:vartype name: str
:ivar type: The resource type.
:vartype type: str
:param location: Required. The resource location.
:type location: str
:param tags: The resource tags.
:type tags: dict[str, str]
:ivar application_id: The ID of the application.
:vartype application_id: str
:param display_name: The display name of the application.
:type display_name: str
:param subdomain: The subdomain of the application.
:type subdomain: str
:param template: The ID of the application template, which is a blueprint
that defines the characteristics and behaviors of an application.
Optional; if not specified, defaults to a blank blueprint and allows the
application to be defined from scratch.
:type template: str
:param sku: Required. A valid instance SKU.
:type sku: ~azure.mgmt.iotcentral.models.AppSkuInfo
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True, 'pattern': r'^(?![0-9]+$)(?!-)[a-zA-Z0-9-]{2,99}[a-zA-Z0-9]$'},
'type': {'readonly': True},
'location': {'required': True},
'application_id': {'readonly': True},
'display_name': {'pattern': r'^.{1,200}$'},
'subdomain': {'pattern': r'^[a-z0-9-]{1,63}$'},
'sku': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'application_id': {'key': 'properties.applicationId', 'type': 'str'},
'display_name': {'key': 'properties.displayName', 'type': 'str'},
'subdomain': {'key': 'properties.subdomain', 'type': 'str'},
'template': {'key': 'properties.template', 'type': 'str'},
'sku': {'key': 'sku', 'type': 'AppSkuInfo'},
}
def __init__(self, *, location: str, sku, tags=None, display_name: str=None, subdomain: str=None, template: str=None, **kwargs) -> None:
super(App, self).__init__(location=location, tags=tags, **kwargs)
self.application_id = None
self.display_name = display_name
self.subdomain = subdomain
self.template = template
self.sku = sku
```
#### File: iothub/models/export_devices_request.py
```python
from msrest.serialization import Model
class ExportDevicesRequest(Model):
"""Use to provide parameters when requesting an export of all devices in the
IoT hub.
All required parameters must be populated in order to send to Azure.
:param export_blob_container_uri: Required. The export blob container URI.
:type export_blob_container_uri: str
:param exclude_keys: Required. The value indicating whether keys should be
excluded during export.
:type exclude_keys: bool
"""
_validation = {
'export_blob_container_uri': {'required': True},
'exclude_keys': {'required': True},
}
_attribute_map = {
'export_blob_container_uri': {'key': 'exportBlobContainerUri', 'type': 'str'},
'exclude_keys': {'key': 'excludeKeys', 'type': 'bool'},
}
def __init__(self, **kwargs):
super(ExportDevicesRequest, self).__init__(**kwargs)
self.export_blob_container_uri = kwargs.get('export_blob_container_uri', None)
self.exclude_keys = kwargs.get('exclude_keys', None)
```
#### File: iothub/models/ip_filter_rule.py
```python
from msrest.serialization import Model
class IpFilterRule(Model):
"""The IP filter rules for the IoT hub.
All required parameters must be populated in order to send to Azure.
:param filter_name: Required. The name of the IP filter rule.
:type filter_name: str
:param action: Required. The desired action for requests captured by this
rule. Possible values include: 'Accept', 'Reject'
:type action: str or ~azure.mgmt.iothub.models.IpFilterActionType
:param ip_mask: Required. A string that contains the IP address range in
CIDR notation for the rule.
:type ip_mask: str
"""
_validation = {
'filter_name': {'required': True},
'action': {'required': True},
'ip_mask': {'required': True},
}
_attribute_map = {
'filter_name': {'key': 'filterName', 'type': 'str'},
'action': {'key': 'action', 'type': 'IpFilterActionType'},
'ip_mask': {'key': 'ipMask', 'type': 'str'},
}
def __init__(self, **kwargs):
super(IpFilterRule, self).__init__(**kwargs)
self.filter_name = kwargs.get('filter_name', None)
self.action = kwargs.get('action', None)
self.ip_mask = kwargs.get('ip_mask', None)
```
#### File: iothub/models/operations_monitoring_properties_py3.py
```python
from msrest.serialization import Model
class OperationsMonitoringProperties(Model):
"""The operations monitoring properties for the IoT hub. The possible keys to
the dictionary are Connections, DeviceTelemetry, C2DCommands,
DeviceIdentityOperations, FileUploadOperations, Routes, D2CTwinOperations,
C2DTwinOperations, TwinQueries, JobsOperations, DirectMethods.
:param events:
:type events: dict[str, str or
~azure.mgmt.iothub.models.OperationMonitoringLevel]
"""
_attribute_map = {
'events': {'key': 'events', 'type': '{str}'},
}
def __init__(self, *, events=None, **kwargs) -> None:
super(OperationsMonitoringProperties, self).__init__(**kwargs)
self.events = events
```
#### File: iothub/models/shared_access_signature_authorization_rule.py
```python
from msrest.serialization import Model
class SharedAccessSignatureAuthorizationRule(Model):
"""The properties of an IoT hub shared access policy.
All required parameters must be populated in order to send to Azure.
:param key_name: Required. The name of the shared access policy.
:type key_name: str
:param primary_key: The primary key.
:type primary_key: str
:param secondary_key: The secondary key.
:type secondary_key: str
:param rights: Required. The permissions assigned to the shared access
policy. Possible values include: 'RegistryRead', 'RegistryWrite',
'ServiceConnect', 'DeviceConnect', 'RegistryRead, RegistryWrite',
'RegistryRead, ServiceConnect', 'RegistryRead, DeviceConnect',
'RegistryWrite, ServiceConnect', 'RegistryWrite, DeviceConnect',
'ServiceConnect, DeviceConnect', 'RegistryRead, RegistryWrite,
ServiceConnect', 'RegistryRead, RegistryWrite, DeviceConnect',
'RegistryRead, ServiceConnect, DeviceConnect', 'RegistryWrite,
ServiceConnect, DeviceConnect', 'RegistryRead, RegistryWrite,
ServiceConnect, DeviceConnect'
:type rights: str or ~azure.mgmt.iothub.models.AccessRights
"""
_validation = {
'key_name': {'required': True},
'rights': {'required': True},
}
_attribute_map = {
'key_name': {'key': 'keyName', 'type': 'str'},
'primary_key': {'key': 'primaryKey', 'type': 'str'},
'secondary_key': {'key': 'secondaryKey', 'type': 'str'},
'rights': {'key': 'rights', 'type': 'AccessRights'},
}
def __init__(self, **kwargs):
super(SharedAccessSignatureAuthorizationRule, self).__init__(**kwargs)
self.key_name = kwargs.get('key_name', None)
self.primary_key = kwargs.get('primary_key', None)
self.secondary_key = kwargs.get('secondary_key', None)
self.rights = kwargs.get('rights', None)
```
#### File: iothub/models/test_all_routes_result.py
```python
from msrest.serialization import Model
class TestAllRoutesResult(Model):
"""Result of testing all routes.
:param routes: JSON-serialized array of matched routes
:type routes: list[~azure.mgmt.iothub.models.MatchedRoute]
"""
_attribute_map = {
'routes': {'key': 'routes', 'type': '[MatchedRoute]'},
}
def __init__(self, **kwargs):
super(TestAllRoutesResult, self).__init__(**kwargs)
self.routes = kwargs.get('routes', None)
```
#### File: iothub/models/user_subscription_quota.py
```python
from msrest.serialization import Model
class UserSubscriptionQuota(Model):
"""User subscription quota response.
:param id: IotHub type id
:type id: str
:param type: Response type
:type type: str
:param unit: Unit of IotHub type
:type unit: str
:param current_value: Current number of IotHub type
:type current_value: int
:param limit: Numerical limit on IotHub type
:type limit: int
:param name: IotHub type
:type name: ~azure.mgmt.iothub.models.Name
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'unit': {'key': 'unit', 'type': 'str'},
'current_value': {'key': 'currentValue', 'type': 'int'},
'limit': {'key': 'limit', 'type': 'int'},
'name': {'key': 'name', 'type': 'Name'},
}
def __init__(self, **kwargs):
super(UserSubscriptionQuota, self).__init__(**kwargs)
self.id = kwargs.get('id', None)
self.type = kwargs.get('type', None)
self.unit = kwargs.get('unit', None)
self.current_value = kwargs.get('current_value', None)
self.limit = kwargs.get('limit', None)
self.name = kwargs.get('name', None)
```
#### File: v2016_10_01/models/vault_properties.py
```python
from msrest.serialization import Model
class VaultProperties(Model):
"""Properties of the vault.
All required parameters must be populated in order to send to Azure.
:param tenant_id: Required. The Azure Active Directory tenant ID that
should be used for authenticating requests to the key vault.
:type tenant_id: str
:param sku: Required. SKU details
:type sku: ~azure.mgmt.keyvault.v2016_10_01.models.Sku
:param access_policies: An array of 0 to 16 identities that have access to
the key vault. All identities in the array must use the same tenant ID as
the key vault's tenant ID.
:type access_policies:
list[~azure.mgmt.keyvault.v2016_10_01.models.AccessPolicyEntry]
:param vault_uri: The URI of the vault for performing operations on keys
and secrets.
:type vault_uri: str
:param enabled_for_deployment: Property to specify whether Azure Virtual
Machines are permitted to retrieve certificates stored as secrets from the
key vault.
:type enabled_for_deployment: bool
:param enabled_for_disk_encryption: Property to specify whether Azure Disk
Encryption is permitted to retrieve secrets from the vault and unwrap
keys.
:type enabled_for_disk_encryption: bool
:param enabled_for_template_deployment: Property to specify whether Azure
Resource Manager is permitted to retrieve secrets from the key vault.
:type enabled_for_template_deployment: bool
:param enable_soft_delete: Property specifying whether recoverable
deletion is enabled for this key vault. Setting this property to true
activates the soft delete feature, whereby vaults or vault entities can be
recovered after deletion. Enabling this functionality is irreversible -
that is, the property does not accept false as its value.
:type enable_soft_delete: bool
:param create_mode: The vault's create mode to indicate whether the vault
need to be recovered or not. Possible values include: 'recover', 'default'
:type create_mode: str or
~azure.mgmt.keyvault.v2016_10_01.models.CreateMode
:param enable_purge_protection: Property specifying whether protection
against purge is enabled for this vault. Setting this property to true
activates protection against purge for this vault and its content - only
the Key Vault service may initiate a hard, irrecoverable deletion. The
setting is effective only if soft delete is also enabled. Enabling this
functionality is irreversible - that is, the property does not accept
false as its value.
:type enable_purge_protection: bool
"""
_validation = {
'tenant_id': {'required': True},
'sku': {'required': True},
}
_attribute_map = {
'tenant_id': {'key': 'tenantId', 'type': 'str'},
'sku': {'key': 'sku', 'type': 'Sku'},
'access_policies': {'key': 'accessPolicies', 'type': '[AccessPolicyEntry]'},
'vault_uri': {'key': 'vaultUri', 'type': 'str'},
'enabled_for_deployment': {'key': 'enabledForDeployment', 'type': 'bool'},
'enabled_for_disk_encryption': {'key': 'enabledForDiskEncryption', 'type': 'bool'},
'enabled_for_template_deployment': {'key': 'enabledForTemplateDeployment', 'type': 'bool'},
'enable_soft_delete': {'key': 'enableSoftDelete', 'type': 'bool'},
'create_mode': {'key': 'createMode', 'type': 'CreateMode'},
'enable_purge_protection': {'key': 'enablePurgeProtection', 'type': 'bool'},
}
def __init__(self, **kwargs):
super(VaultProperties, self).__init__(**kwargs)
self.tenant_id = kwargs.get('tenant_id', None)
self.sku = kwargs.get('sku', None)
self.access_policies = kwargs.get('access_policies', None)
self.vault_uri = kwargs.get('vault_uri', None)
self.enabled_for_deployment = kwargs.get('enabled_for_deployment', None)
self.enabled_for_disk_encryption = kwargs.get('enabled_for_disk_encryption', None)
self.enabled_for_template_deployment = kwargs.get('enabled_for_template_deployment', None)
self.enable_soft_delete = kwargs.get('enable_soft_delete', None)
self.create_mode = kwargs.get('create_mode', None)
self.enable_purge_protection = kwargs.get('enable_purge_protection', None)
```
#### File: azure-mgmt-keyvault/tests/test_vaults.py
```python
from devtools_testutils import AzureMgmtTestCase, ResourceGroupPreparer
import azure.mgmt.keyvault.models
class MgmtKeyVaultTest(AzureMgmtTestCase):
def setUp(self):
super(MgmtKeyVaultTest, self).setUp()
self.keyvault_client = self.create_mgmt_client(
azure.mgmt.keyvault.KeyVaultManagementClient
)
@ResourceGroupPreparer()
def test_vaults_operations(self, resource_group, **kwargs):
account_name = self.get_resource_name('pykv')
vault = self.keyvault_client.vaults.create_or_update(
resource_group.name,
account_name,
{
'location': self.region,
'properties': {
'sku': {
'name': 'standard'
},
# Fake random GUID
'tenant_id': '6819f86e-5d41-47b0-9297-334f33d7922d',
'access_policies': []
}
}
).result()
self.assertEqual(vault.name, account_name)
vault = self.keyvault_client.vaults.get(
resource_group.name,
account_name
)
self.assertEqual(vault.name, account_name)
vaults = list(self.keyvault_client.vaults.list_by_resource_group(resource_group.name))
self.assertEqual(len(vaults), 1)
self.assertIsInstance(vaults[0], azure.mgmt.keyvault.models.Vault)
self.assertEqual(vaults[0].name, account_name)
vaults = list(self.keyvault_client.vaults.list())
self.assertGreater(len(vaults), 0)
self.assertTrue(all(isinstance(v, azure.mgmt.keyvault.models.Resource) for v in vaults))
self.keyvault_client.vaults.delete(
resource_group.name,
account_name
)
```
#### File: media/models/asset_filter_py3.py
```python
from .proxy_resource_py3 import ProxyResource
class AssetFilter(ProxyResource):
"""An Asset Filter.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Fully qualified resource ID for the resource.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource.
:vartype type: str
:param presentation_time_range: The presentation time range.
:type presentation_time_range:
~azure.mgmt.media.models.PresentationTimeRange
:param first_quality: The first quality.
:type first_quality: ~azure.mgmt.media.models.FirstQuality
:param tracks: The tracks selection conditions.
:type tracks: list[~azure.mgmt.media.models.FilterTrackSelection]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'presentation_time_range': {'key': 'properties.presentationTimeRange', 'type': 'PresentationTimeRange'},
'first_quality': {'key': 'properties.firstQuality', 'type': 'FirstQuality'},
'tracks': {'key': 'properties.tracks', 'type': '[FilterTrackSelection]'},
}
def __init__(self, *, presentation_time_range=None, first_quality=None, tracks=None, **kwargs) -> None:
super(AssetFilter, self).__init__(**kwargs)
self.presentation_time_range = presentation_time_range
self.first_quality = first_quality
self.tracks = tracks
```
#### File: media/models/content_key_policy_option.py
```python
from msrest.serialization import Model
class ContentKeyPolicyOption(Model):
"""Represents a policy option.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar policy_option_id: The legacy Policy Option ID.
:vartype policy_option_id: str
:param name: The Policy Option description.
:type name: str
:param configuration: Required. The key delivery configuration.
:type configuration:
~azure.mgmt.media.models.ContentKeyPolicyConfiguration
:param restriction: Required. The requirements that must be met to deliver
keys with this configuration
:type restriction: ~azure.mgmt.media.models.ContentKeyPolicyRestriction
"""
_validation = {
'policy_option_id': {'readonly': True},
'configuration': {'required': True},
'restriction': {'required': True},
}
_attribute_map = {
'policy_option_id': {'key': 'policyOptionId', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'configuration': {'key': 'configuration', 'type': 'ContentKeyPolicyConfiguration'},
'restriction': {'key': 'restriction', 'type': 'ContentKeyPolicyRestriction'},
}
def __init__(self, **kwargs):
super(ContentKeyPolicyOption, self).__init__(**kwargs)
self.policy_option_id = None
self.name = kwargs.get('name', None)
self.configuration = kwargs.get('configuration', None)
self.restriction = kwargs.get('restriction', None)
```
#### File: media/models/format_py3.py
```python
from msrest.serialization import Model
class Format(Model):
"""Base class for output.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: ImageFormat, MultiBitrateFormat
All required parameters must be populated in order to send to Azure.
:param filename_pattern: The pattern of the file names for the generated
output files. The following macros are supported in the file name:
{Basename} - The base name of the input video {Extension} - The
appropriate extension for this format. {Label} - The label assigned to the
codec/layer. {Index} - A unique index for thumbnails. Only applicable to
thumbnails. {Bitrate} - The audio/video bitrate. Not applicable to
thumbnails. {Codec} - The type of the audio/video codec. Any unsubstituted
macros will be collapsed and removed from the filename.
:type filename_pattern: str
:param odatatype: Required. Constant filled by server.
:type odatatype: str
"""
_validation = {
'odatatype': {'required': True},
}
_attribute_map = {
'filename_pattern': {'key': 'filenamePattern', 'type': 'str'},
'odatatype': {'key': '@odata\\.type', 'type': 'str'},
}
_subtype_map = {
'odatatype': {'#Microsoft.Media.ImageFormat': 'ImageFormat', '#Microsoft.Media.MultiBitrateFormat': 'MultiBitrateFormat'}
}
def __init__(self, *, filename_pattern: str=None, **kwargs) -> None:
super(Format, self).__init__(**kwargs)
self.filename_pattern = filename_pattern
self.odatatype = None
```
#### File: media/models/h264_video.py
```python
from .video import Video
class H264Video(Video):
"""Describes all the properties for encoding a video with the H.264 codec.
All required parameters must be populated in order to send to Azure.
:param label: An optional label for the codec. The label can be used to
control muxing behavior.
:type label: str
:param odatatype: Required. Constant filled by server.
:type odatatype: str
:param key_frame_interval: The distance between two key frames, thereby
defining a group of pictures (GOP). The value should be a non-zero integer
in the range [1, 30] seconds, specified in ISO 8601 format. The default is
2 seconds (PT2S).
:type key_frame_interval: timedelta
:param stretch_mode: The resizing mode - how the input video will be
resized to fit the desired output resolution(s). Default is AutoSize.
Possible values include: 'None', 'AutoSize', 'AutoFit'
:type stretch_mode: str or ~azure.mgmt.media.models.StretchMode
:param scene_change_detection: Whether or not the encoder should insert
key frames at scene changes. If not specified, the default is false. This
flag should be set to true only when the encoder is being configured to
produce a single output video.
:type scene_change_detection: bool
:param complexity: Tells the encoder how to choose its encoding settings.
The default value is Balanced. Possible values include: 'Speed',
'Balanced', 'Quality'
:type complexity: str or ~azure.mgmt.media.models.H264Complexity
:param layers: The collection of output H.264 layers to be produced by the
encoder.
:type layers: list[~azure.mgmt.media.models.H264Layer]
"""
_validation = {
'odatatype': {'required': True},
}
_attribute_map = {
'label': {'key': 'label', 'type': 'str'},
'odatatype': {'key': '@odata\\.type', 'type': 'str'},
'key_frame_interval': {'key': 'keyFrameInterval', 'type': 'duration'},
'stretch_mode': {'key': 'stretchMode', 'type': 'str'},
'scene_change_detection': {'key': 'sceneChangeDetection', 'type': 'bool'},
'complexity': {'key': 'complexity', 'type': 'str'},
'layers': {'key': 'layers', 'type': '[H264Layer]'},
}
def __init__(self, **kwargs):
super(H264Video, self).__init__(**kwargs)
self.scene_change_detection = kwargs.get('scene_change_detection', None)
self.complexity = kwargs.get('complexity', None)
self.layers = kwargs.get('layers', None)
self.odatatype = '#Microsoft.Media.H264Video'
```
#### File: media/models/layer_py3.py
```python
from msrest.serialization import Model
class Layer(Model):
"""The encoder can be configured to produce video and/or images (thumbnails)
at different resolutions, by specifying a layer for each desired
resolution. A layer represents the properties for the video or image at a
resolution.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: VideoLayer, JpgLayer, PngLayer
All required parameters must be populated in order to send to Azure.
:param width: The width of the output video for this layer. The value can
be absolute (in pixels) or relative (in percentage). For example 50% means
the output video has half as many pixels in width as the input.
:type width: str
:param height: The height of the output video for this layer. The value
can be absolute (in pixels) or relative (in percentage). For example 50%
means the output video has half as many pixels in height as the input.
:type height: str
:param label: The alphanumeric label for this layer, which can be used in
multiplexing different video and audio layers, or in naming the output
file.
:type label: str
:param odatatype: Required. Constant filled by server.
:type odatatype: str
"""
_validation = {
'odatatype': {'required': True},
}
_attribute_map = {
'width': {'key': 'width', 'type': 'str'},
'height': {'key': 'height', 'type': 'str'},
'label': {'key': 'label', 'type': 'str'},
'odatatype': {'key': '@odata\\.type', 'type': 'str'},
}
_subtype_map = {
'odatatype': {'#Microsoft.Media.VideoLayer': 'VideoLayer', '#Microsoft.Media.JpgLayer': 'JpgLayer', '#Microsoft.Media.PngLayer': 'PngLayer'}
}
def __init__(self, *, width: str=None, height: str=None, label: str=None, **kwargs) -> None:
super(Layer, self).__init__(**kwargs)
self.width = width
self.height = height
self.label = label
self.odatatype = None
```
#### File: media/models/list_container_sas_input.py
```python
from msrest.serialization import Model
class ListContainerSasInput(Model):
"""The parameters to the list SAS request.
:param permissions: The permissions to set on the SAS URL. Possible values
include: 'Read', 'ReadWrite', 'ReadWriteDelete'
:type permissions: str or
~azure.mgmt.media.models.AssetContainerPermission
:param expiry_time: The SAS URL expiration time. This must be less than
24 hours from the current time.
:type expiry_time: datetime
"""
_attribute_map = {
'permissions': {'key': 'permissions', 'type': 'str'},
'expiry_time': {'key': 'expiryTime', 'type': 'iso-8601'},
}
def __init__(self, **kwargs):
super(ListContainerSasInput, self).__init__(**kwargs)
self.permissions = kwargs.get('permissions', None)
self.expiry_time = kwargs.get('expiry_time', None)
```
#### File: media/models/list_streaming_locators_response_py3.py
```python
from msrest.serialization import Model
class ListStreamingLocatorsResponse(Model):
"""The Streaming Locators associated with this Asset.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar streaming_locators: The list of Streaming Locators.
:vartype streaming_locators:
list[~azure.mgmt.media.models.AssetStreamingLocator]
"""
_validation = {
'streaming_locators': {'readonly': True},
}
_attribute_map = {
'streaming_locators': {'key': 'streamingLocators', 'type': '[AssetStreamingLocator]'},
}
def __init__(self, **kwargs) -> None:
super(ListStreamingLocatorsResponse, self).__init__(**kwargs)
self.streaming_locators = None
```
#### File: media/models/live_event_endpoint_py3.py
```python
from msrest.serialization import Model
class LiveEventEndpoint(Model):
"""The Live Event endpoint.
:param protocol: The endpoint protocol.
:type protocol: str
:param url: The endpoint URL.
:type url: str
"""
_attribute_map = {
'protocol': {'key': 'protocol', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'},
}
def __init__(self, *, protocol: str=None, url: str=None, **kwargs) -> None:
super(LiveEventEndpoint, self).__init__(**kwargs)
self.protocol = protocol
self.url = url
```
#### File: media/models/odata_error.py
```python
from msrest.serialization import Model
class ODataError(Model):
"""Information about an error.
:param code: A language-independent error name.
:type code: str
:param message: The error message.
:type message: str
:param target: The target of the error (for example, the name of the
property in error).
:type target: str
:param details: The error details.
:type details: list[~azure.mgmt.media.models.ODataError]
"""
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
'details': {'key': 'details', 'type': '[ODataError]'},
}
def __init__(self, **kwargs):
super(ODataError, self).__init__(**kwargs)
self.code = kwargs.get('code', None)
self.message = kwargs.get('message', None)
self.target = kwargs.get('target', None)
self.details = kwargs.get('details', None)
```
#### File: media/models/streaming_endpoint_access_control.py
```python
from msrest.serialization import Model
class StreamingEndpointAccessControl(Model):
"""StreamingEndpoint access control definition.
:param akamai: The access control of Akamai
:type akamai: ~azure.mgmt.media.models.AkamaiAccessControl
:param ip: The IP access control of the StreamingEndpoint.
:type ip: ~azure.mgmt.media.models.IPAccessControl
"""
_attribute_map = {
'akamai': {'key': 'akamai', 'type': 'AkamaiAccessControl'},
'ip': {'key': 'ip', 'type': 'IPAccessControl'},
}
def __init__(self, **kwargs):
super(StreamingEndpointAccessControl, self).__init__(**kwargs)
self.akamai = kwargs.get('akamai', None)
self.ip = kwargs.get('ip', None)
```
#### File: media/models/streaming_policy_content_key.py
```python
from msrest.serialization import Model
class StreamingPolicyContentKey(Model):
"""Class to specify properties of content key.
:param label: Label can be used to specify Content Key when creating a
Streaming Locator
:type label: str
:param policy_name: Policy used by Content Key
:type policy_name: str
:param tracks: Tracks which use this content key
:type tracks: list[~azure.mgmt.media.models.TrackSelection]
"""
_attribute_map = {
'label': {'key': 'label', 'type': 'str'},
'policy_name': {'key': 'policyName', 'type': 'str'},
'tracks': {'key': 'tracks', 'type': '[TrackSelection]'},
}
def __init__(self, **kwargs):
super(StreamingPolicyContentKey, self).__init__(**kwargs)
self.label = kwargs.get('label', None)
self.policy_name = kwargs.get('policy_name', None)
self.tracks = kwargs.get('tracks', None)
```
#### File: media/models/subscription_media_service_py3.py
```python
from .tracked_resource_py3 import TrackedResource
class SubscriptionMediaService(TrackedResource):
"""A Media Services account.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Fully qualified resource ID for the resource.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource.
:vartype type: str
:param tags: Resource tags.
:type tags: dict[str, str]
:param location: The Azure Region of the resource.
:type location: str
:ivar media_service_id: The Media Services account ID.
:vartype media_service_id: str
:param storage_accounts: The storage accounts for this resource.
:type storage_accounts: list[~azure.mgmt.media.models.StorageAccount]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'media_service_id': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'location': {'key': 'location', 'type': 'str'},
'media_service_id': {'key': 'properties.mediaServiceId', 'type': 'str'},
'storage_accounts': {'key': 'properties.storageAccounts', 'type': '[StorageAccount]'},
}
def __init__(self, *, tags=None, location: str=None, storage_accounts=None, **kwargs) -> None:
super(SubscriptionMediaService, self).__init__(tags=tags, location=location, **kwargs)
self.media_service_id = None
self.storage_accounts = storage_accounts
```
#### File: monitor/models/automation_runbook_receiver.py
```python
from msrest.serialization import Model
class AutomationRunbookReceiver(Model):
"""The Azure Automation Runbook notification receiver.
All required parameters must be populated in order to send to Azure.
:param automation_account_id: Required. The Azure automation account Id
which holds this runbook and authenticate to Azure resource.
:type automation_account_id: str
:param runbook_name: Required. The name for this runbook.
:type runbook_name: str
:param webhook_resource_id: Required. The resource id for webhook linked
to this runbook.
:type webhook_resource_id: str
:param is_global_runbook: Required. Indicates whether this instance is
global runbook.
:type is_global_runbook: bool
:param name: Indicates name of the webhook.
:type name: str
:param service_uri: The URI where webhooks should be sent.
:type service_uri: str
"""
_validation = {
'automation_account_id': {'required': True},
'runbook_name': {'required': True},
'webhook_resource_id': {'required': True},
'is_global_runbook': {'required': True},
}
_attribute_map = {
'automation_account_id': {'key': 'automationAccountId', 'type': 'str'},
'runbook_name': {'key': 'runbookName', 'type': 'str'},
'webhook_resource_id': {'key': 'webhookResourceId', 'type': 'str'},
'is_global_runbook': {'key': 'isGlobalRunbook', 'type': 'bool'},
'name': {'key': 'name', 'type': 'str'},
'service_uri': {'key': 'serviceUri', 'type': 'str'},
}
def __init__(self, **kwargs):
super(AutomationRunbookReceiver, self).__init__(**kwargs)
self.automation_account_id = kwargs.get('automation_account_id', None)
self.runbook_name = kwargs.get('runbook_name', None)
self.webhook_resource_id = kwargs.get('webhook_resource_id', None)
self.is_global_runbook = kwargs.get('is_global_runbook', None)
self.name = kwargs.get('name', None)
self.service_uri = kwargs.get('service_uri', None)
```
#### File: monitor/models/http_request_info_py3.py
```python
from msrest.serialization import Model
class HttpRequestInfo(Model):
"""The Http request info.
:param client_request_id: the client request id.
:type client_request_id: str
:param client_ip_address: the client Ip Address
:type client_ip_address: str
:param method: the Http request method.
:type method: str
:param uri: the Uri.
:type uri: str
"""
_attribute_map = {
'client_request_id': {'key': 'clientRequestId', 'type': 'str'},
'client_ip_address': {'key': 'clientIpAddress', 'type': 'str'},
'method': {'key': 'method', 'type': 'str'},
'uri': {'key': 'uri', 'type': 'str'},
}
def __init__(self, *, client_request_id: str=None, client_ip_address: str=None, method: str=None, uri: str=None, **kwargs) -> None:
super(HttpRequestInfo, self).__init__(**kwargs)
self.client_request_id = client_request_id
self.client_ip_address = client_ip_address
self.method = method
self.uri = uri
```
#### File: monitor/models/multi_metric_criteria.py
```python
from msrest.serialization import Model
class MultiMetricCriteria(Model):
"""The types of conditions for a multi resource alert.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: MetricCriteria
All required parameters must be populated in order to send to Azure.
:param additional_properties: Unmatched properties from the message are
deserialized this collection
:type additional_properties: dict[str, object]
:param criterion_type: Required. Constant filled by server.
:type criterion_type: str
"""
_validation = {
'criterion_type': {'required': True},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'criterion_type': {'key': 'criterionType', 'type': 'str'},
}
_subtype_map = {
'criterion_type': {'StaticThresholdCriterion': 'MetricCriteria'}
}
def __init__(self, **kwargs):
super(MultiMetricCriteria, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.criterion_type = None
```
#### File: monitor/models/scale_rule_py3.py
```python
from msrest.serialization import Model
class ScaleRule(Model):
"""A rule that provide the triggers and parameters for the scaling action.
All required parameters must be populated in order to send to Azure.
:param metric_trigger: Required. the trigger that results in a scaling
action.
:type metric_trigger: ~azure.mgmt.monitor.models.MetricTrigger
:param scale_action: Required. the parameters for the scaling action.
:type scale_action: ~azure.mgmt.monitor.models.ScaleAction
"""
_validation = {
'metric_trigger': {'required': True},
'scale_action': {'required': True},
}
_attribute_map = {
'metric_trigger': {'key': 'metricTrigger', 'type': 'MetricTrigger'},
'scale_action': {'key': 'scaleAction', 'type': 'ScaleAction'},
}
def __init__(self, *, metric_trigger, scale_action, **kwargs) -> None:
super(ScaleRule, self).__init__(**kwargs)
self.metric_trigger = metric_trigger
self.scale_action = scale_action
```
#### File: monitor/models/vm_insights_onboarding_status_py3.py
```python
from .proxy_resource_py3 import ProxyResource
class VMInsightsOnboardingStatus(ProxyResource):
"""VM Insights onboarding status for a resource.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Azure resource Id
:vartype id: str
:ivar name: Azure resource name
:vartype name: str
:ivar type: Azure resource type
:vartype type: str
:param resource_id: Required. Azure Resource Manager identifier of the
resource whose onboarding status is being represented.
:type resource_id: str
:param onboarding_status: Required. The onboarding status for the
resource. Note that, a higher level scope, e.g., resource group or
subscription, is considered onboarded if at least one resource under it is
onboarded. Possible values include: 'onboarded', 'notOnboarded', 'unknown'
:type onboarding_status: str or
~azure.mgmt.monitor.models.OnboardingStatus
:param data_status: Required. The status of VM Insights data from the
resource. When reported as `present` the data array will contain
information about the data containers to which data for the specified
resource is being routed. Possible values include: 'present', 'notPresent'
:type data_status: str or ~azure.mgmt.monitor.models.DataStatus
:param data: Containers that currently store VM Insights data for the
specified resource.
:type data: list[~azure.mgmt.monitor.models.DataContainer]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'resource_id': {'required': True},
'onboarding_status': {'required': True},
'data_status': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'resource_id': {'key': 'properties.resourceId', 'type': 'str'},
'onboarding_status': {'key': 'properties.onboardingStatus', 'type': 'str'},
'data_status': {'key': 'properties.dataStatus', 'type': 'str'},
'data': {'key': 'properties.data', 'type': '[DataContainer]'},
}
def __init__(self, *, resource_id: str, onboarding_status, data_status, data=None, **kwargs) -> None:
super(VMInsightsOnboardingStatus, self).__init__(**kwargs)
self.resource_id = resource_id
self.onboarding_status = onboarding_status
self.data_status = data_status
self.data = data
```
#### File: v2017_03_01/models/tunnel_connection_health_py3.py
```python
from msrest.serialization import Model
class TunnelConnectionHealth(Model):
"""VirtualNetworkGatewayConnection properties.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar tunnel: Tunnel name.
:vartype tunnel: str
:ivar connection_status: Virtual network Gateway connection status.
Possible values include: 'Unknown', 'Connecting', 'Connected',
'NotConnected'
:vartype connection_status: str or
~azure.mgmt.network.v2017_03_01.models.VirtualNetworkGatewayConnectionStatus
:ivar ingress_bytes_transferred: The Ingress Bytes Transferred in this
connection
:vartype ingress_bytes_transferred: long
:ivar egress_bytes_transferred: The Egress Bytes Transferred in this
connection
:vartype egress_bytes_transferred: long
:ivar last_connection_established_utc_time: The time at which connection
was established in Utc format.
:vartype last_connection_established_utc_time: str
"""
_validation = {
'tunnel': {'readonly': True},
'connection_status': {'readonly': True},
'ingress_bytes_transferred': {'readonly': True},
'egress_bytes_transferred': {'readonly': True},
'last_connection_established_utc_time': {'readonly': True},
}
_attribute_map = {
'tunnel': {'key': 'tunnel', 'type': 'str'},
'connection_status': {'key': 'connectionStatus', 'type': 'str'},
'ingress_bytes_transferred': {'key': 'ingressBytesTransferred', 'type': 'long'},
'egress_bytes_transferred': {'key': 'egressBytesTransferred', 'type': 'long'},
'last_connection_established_utc_time': {'key': 'lastConnectionEstablishedUtcTime', 'type': 'str'},
}
def __init__(self, **kwargs) -> None:
super(TunnelConnectionHealth, self).__init__(**kwargs)
self.tunnel = None
self.connection_status = None
self.ingress_bytes_transferred = None
self.egress_bytes_transferred = None
self.last_connection_established_utc_time = None
```
#### File: v2017_10_01/models/connectivity_hop_py3.py
```python
from msrest.serialization import Model
class ConnectivityHop(Model):
"""Information about a hop between the source and the destination.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar type: The type of the hop.
:vartype type: str
:ivar id: The ID of the hop.
:vartype id: str
:ivar address: The IP address of the hop.
:vartype address: str
:ivar resource_id: The ID of the resource corresponding to this hop.
:vartype resource_id: str
:ivar next_hop_ids: List of next hop identifiers.
:vartype next_hop_ids: list[str]
:ivar issues: List of issues.
:vartype issues:
list[~azure.mgmt.network.v2017_10_01.models.ConnectivityIssue]
"""
_validation = {
'type': {'readonly': True},
'id': {'readonly': True},
'address': {'readonly': True},
'resource_id': {'readonly': True},
'next_hop_ids': {'readonly': True},
'issues': {'readonly': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'address': {'key': 'address', 'type': 'str'},
'resource_id': {'key': 'resourceId', 'type': 'str'},
'next_hop_ids': {'key': 'nextHopIds', 'type': '[str]'},
'issues': {'key': 'issues', 'type': '[ConnectivityIssue]'},
}
def __init__(self, **kwargs) -> None:
super(ConnectivityHop, self).__init__(**kwargs)
self.type = None
self.id = None
self.address = None
self.resource_id = None
self.next_hop_ids = None
self.issues = None
```
#### File: v2017_10_01/models/virtual_network_gateway_connection_list_entity.py
```python
from .resource import Resource
class VirtualNetworkGatewayConnectionListEntity(Resource):
"""A common class for general resource information.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: Resource tags.
:type tags: dict[str, str]
:param authorization_key: The authorizationKey.
:type authorization_key: str
:param virtual_network_gateway1: Required. The reference to virtual
network gateway resource.
:type virtual_network_gateway1:
~azure.mgmt.network.v2017_10_01.models.VirtualNetworkConnectionGatewayReference
:param virtual_network_gateway2: The reference to virtual network gateway
resource.
:type virtual_network_gateway2:
~azure.mgmt.network.v2017_10_01.models.VirtualNetworkConnectionGatewayReference
:param local_network_gateway2: The reference to local network gateway
resource.
:type local_network_gateway2:
~azure.mgmt.network.v2017_10_01.models.VirtualNetworkConnectionGatewayReference
:param connection_type: Required. Gateway connection type. Possible values
are: 'IPsec','Vnet2Vnet','ExpressRoute', and 'VPNClient. Possible values
include: 'IPsec', 'Vnet2Vnet', 'ExpressRoute', 'VPNClient'
:type connection_type: str or
~azure.mgmt.network.v2017_10_01.models.VirtualNetworkGatewayConnectionType
:param routing_weight: The routing weight.
:type routing_weight: int
:param shared_key: The IPSec shared key.
:type shared_key: str
:ivar connection_status: Virtual network Gateway connection status.
Possible values are 'Unknown', 'Connecting', 'Connected' and
'NotConnected'. Possible values include: 'Unknown', 'Connecting',
'Connected', 'NotConnected'
:vartype connection_status: str or
~azure.mgmt.network.v2017_10_01.models.VirtualNetworkGatewayConnectionStatus
:ivar tunnel_connection_status: Collection of all tunnels' connection
health status.
:vartype tunnel_connection_status:
list[~azure.mgmt.network.v2017_10_01.models.TunnelConnectionHealth]
:ivar egress_bytes_transferred: The egress bytes transferred in this
connection.
:vartype egress_bytes_transferred: long
:ivar ingress_bytes_transferred: The ingress bytes transferred in this
connection.
:vartype ingress_bytes_transferred: long
:param peer: The reference to peerings resource.
:type peer: ~azure.mgmt.network.v2017_10_01.models.SubResource
:param enable_bgp: EnableBgp flag
:type enable_bgp: bool
:param use_policy_based_traffic_selectors: Enable policy-based traffic
selectors.
:type use_policy_based_traffic_selectors: bool
:param ipsec_policies: The IPSec Policies to be considered by this
connection.
:type ipsec_policies:
list[~azure.mgmt.network.v2017_10_01.models.IpsecPolicy]
:param resource_guid: The resource GUID property of the
VirtualNetworkGatewayConnection resource.
:type resource_guid: str
:ivar provisioning_state: The provisioning state of the
VirtualNetworkGatewayConnection resource. Possible values are: 'Updating',
'Deleting', and 'Failed'.
:vartype provisioning_state: str
:param etag: Gets a unique read-only string that changes whenever the
resource is updated.
:type etag: str
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
'virtual_network_gateway1': {'required': True},
'connection_type': {'required': True},
'connection_status': {'readonly': True},
'tunnel_connection_status': {'readonly': True},
'egress_bytes_transferred': {'readonly': True},
'ingress_bytes_transferred': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'authorization_key': {'key': 'properties.authorizationKey', 'type': 'str'},
'virtual_network_gateway1': {'key': 'properties.virtualNetworkGateway1', 'type': 'VirtualNetworkConnectionGatewayReference'},
'virtual_network_gateway2': {'key': 'properties.virtualNetworkGateway2', 'type': 'VirtualNetworkConnectionGatewayReference'},
'local_network_gateway2': {'key': 'properties.localNetworkGateway2', 'type': 'VirtualNetworkConnectionGatewayReference'},
'connection_type': {'key': 'properties.connectionType', 'type': 'str'},
'routing_weight': {'key': 'properties.routingWeight', 'type': 'int'},
'shared_key': {'key': 'properties.sharedKey', 'type': 'str'},
'connection_status': {'key': 'properties.connectionStatus', 'type': 'str'},
'tunnel_connection_status': {'key': 'properties.tunnelConnectionStatus', 'type': '[TunnelConnectionHealth]'},
'egress_bytes_transferred': {'key': 'properties.egressBytesTransferred', 'type': 'long'},
'ingress_bytes_transferred': {'key': 'properties.ingressBytesTransferred', 'type': 'long'},
'peer': {'key': 'properties.peer', 'type': 'SubResource'},
'enable_bgp': {'key': 'properties.enableBgp', 'type': 'bool'},
'use_policy_based_traffic_selectors': {'key': 'properties.usePolicyBasedTrafficSelectors', 'type': 'bool'},
'ipsec_policies': {'key': 'properties.ipsecPolicies', 'type': '[IpsecPolicy]'},
'resource_guid': {'key': 'properties.resourceGuid', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, **kwargs):
super(VirtualNetworkGatewayConnectionListEntity, self).__init__(**kwargs)
self.authorization_key = kwargs.get('authorization_key', None)
self.virtual_network_gateway1 = kwargs.get('virtual_network_gateway1', None)
self.virtual_network_gateway2 = kwargs.get('virtual_network_gateway2', None)
self.local_network_gateway2 = kwargs.get('local_network_gateway2', None)
self.connection_type = kwargs.get('connection_type', None)
self.routing_weight = kwargs.get('routing_weight', None)
self.shared_key = kwargs.get('shared_key', None)
self.connection_status = None
self.tunnel_connection_status = None
self.egress_bytes_transferred = None
self.ingress_bytes_transferred = None
self.peer = kwargs.get('peer', None)
self.enable_bgp = kwargs.get('enable_bgp', None)
self.use_policy_based_traffic_selectors = kwargs.get('use_policy_based_traffic_selectors', None)
self.ipsec_policies = kwargs.get('ipsec_policies', None)
self.resource_guid = kwargs.get('resource_guid', None)
self.provisioning_state = None
self.etag = kwargs.get('etag', None)
```
#### File: v2017_11_01/models/application_gateway_available_ssl_options.py
```python
from .resource import Resource
class ApplicationGatewayAvailableSslOptions(Resource):
"""Response for ApplicationGatewayAvailableSslOptions API service call.
Variables are only populated by the server, and will be ignored when
sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: Resource tags.
:type tags: dict[str, str]
:param predefined_policies: List of available Ssl predefined policy.
:type predefined_policies:
list[~azure.mgmt.network.v2017_11_01.models.SubResource]
:param default_policy: Name of the Ssl predefined policy applied by
default to application gateway. Possible values include:
'AppGwSslPolicy20150501', 'AppGwSslPolicy20170401',
'AppGwSslPolicy20170401S'
:type default_policy: str or
~azure.mgmt.network.v2017_11_01.models.ApplicationGatewaySslPolicyName
:param available_cipher_suites: List of available Ssl cipher suites.
:type available_cipher_suites: list[str or
~azure.mgmt.network.v2017_11_01.models.ApplicationGatewaySslCipherSuite]
:param available_protocols: List of available Ssl protocols.
:type available_protocols: list[str or
~azure.mgmt.network.v2017_11_01.models.ApplicationGatewaySslProtocol]
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'predefined_policies': {'key': 'properties.predefinedPolicies', 'type': '[SubResource]'},
'default_policy': {'key': 'properties.defaultPolicy', 'type': 'str'},
'available_cipher_suites': {'key': 'properties.availableCipherSuites', 'type': '[str]'},
'available_protocols': {'key': 'properties.availableProtocols', 'type': '[str]'},
}
def __init__(self, **kwargs):
super(ApplicationGatewayAvailableSslOptions, self).__init__(**kwargs)
self.predefined_policies = kwargs.get('predefined_policies', None)
self.default_policy = kwargs.get('default_policy', None)
self.available_cipher_suites = kwargs.get('available_cipher_suites', None)
self.available_protocols = kwargs.get('available_protocols', None)
```
#### File: v2017_11_01/models/express_route_circuit.py
```python
from .resource import Resource
class ExpressRouteCircuit(Resource):
"""ExpressRouteCircuit resource.
Variables are only populated by the server, and will be ignored when
sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: Resource tags.
:type tags: dict[str, str]
:param sku: The SKU.
:type sku: ~azure.mgmt.network.v2017_11_01.models.ExpressRouteCircuitSku
:param allow_classic_operations: Allow classic operations
:type allow_classic_operations: bool
:param circuit_provisioning_state: The CircuitProvisioningState state of
the resource.
:type circuit_provisioning_state: str
:param service_provider_provisioning_state: The
ServiceProviderProvisioningState state of the resource. Possible values
are 'NotProvisioned', 'Provisioning', 'Provisioned', and 'Deprovisioning'.
Possible values include: 'NotProvisioned', 'Provisioning', 'Provisioned',
'Deprovisioning'
:type service_provider_provisioning_state: str or
~azure.mgmt.network.v2017_11_01.models.ServiceProviderProvisioningState
:param authorizations: The list of authorizations.
:type authorizations:
list[~azure.mgmt.network.v2017_11_01.models.ExpressRouteCircuitAuthorization]
:param peerings: The list of peerings.
:type peerings:
list[~azure.mgmt.network.v2017_11_01.models.ExpressRouteCircuitPeering]
:param service_key: The ServiceKey.
:type service_key: str
:param service_provider_notes: The ServiceProviderNotes.
:type service_provider_notes: str
:param service_provider_properties: The ServiceProviderProperties.
:type service_provider_properties:
~azure.mgmt.network.v2017_11_01.models.ExpressRouteCircuitServiceProviderProperties
:param provisioning_state: Gets the provisioning state of the public IP
resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:type provisioning_state: str
:param gateway_manager_etag: The GatewayManager Etag.
:type gateway_manager_etag: str
:ivar etag: Gets a unique read-only string that changes whenever the
resource is updated.
:vartype etag: str
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
'etag': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'sku': {'key': 'sku', 'type': 'ExpressRouteCircuitSku'},
'allow_classic_operations': {'key': 'properties.allowClassicOperations', 'type': 'bool'},
'circuit_provisioning_state': {'key': 'properties.circuitProvisioningState', 'type': 'str'},
'service_provider_provisioning_state': {'key': 'properties.serviceProviderProvisioningState', 'type': 'str'},
'authorizations': {'key': 'properties.authorizations', 'type': '[ExpressRouteCircuitAuthorization]'},
'peerings': {'key': 'properties.peerings', 'type': '[ExpressRouteCircuitPeering]'},
'service_key': {'key': 'properties.serviceKey', 'type': 'str'},
'service_provider_notes': {'key': 'properties.serviceProviderNotes', 'type': 'str'},
'service_provider_properties': {'key': 'properties.serviceProviderProperties', 'type': 'ExpressRouteCircuitServiceProviderProperties'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'gateway_manager_etag': {'key': 'properties.gatewayManagerEtag', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, **kwargs):
super(ExpressRouteCircuit, self).__init__(**kwargs)
self.sku = kwargs.get('sku', None)
self.allow_classic_operations = kwargs.get('allow_classic_operations', None)
self.circuit_provisioning_state = kwargs.get('circuit_provisioning_state', None)
self.service_provider_provisioning_state = kwargs.get('service_provider_provisioning_state', None)
self.authorizations = kwargs.get('authorizations', None)
self.peerings = kwargs.get('peerings', None)
self.service_key = kwargs.get('service_key', None)
self.service_provider_notes = kwargs.get('service_provider_notes', None)
self.service_provider_properties = kwargs.get('service_provider_properties', None)
self.provisioning_state = kwargs.get('provisioning_state', None)
self.gateway_manager_etag = kwargs.get('gateway_manager_etag', None)
self.etag = None
```
#### File: v2018_07_01/models/ddos_protection_plan.py
```python
from msrest.serialization import Model
class DdosProtectionPlan(Model):
"""A DDoS protection plan in a resource group.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Resource ID.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: Resource tags.
:type tags: dict[str, str]
:ivar resource_guid: The resource GUID property of the DDoS protection
plan resource. It uniquely identifies the resource, even if the user
changes its name or migrate the resource across subscriptions or resource
groups.
:vartype resource_guid: str
:ivar provisioning_state: The provisioning state of the DDoS protection
plan resource. Possible values are: 'Succeeded', 'Updating', 'Deleting',
and 'Failed'.
:vartype provisioning_state: str
:ivar virtual_networks: The list of virtual networks associated with the
DDoS protection plan resource. This list is read-only.
:vartype virtual_networks:
list[~azure.mgmt.network.v2018_07_01.models.SubResource]
:ivar etag: A unique read-only string that changes whenever the resource
is updated.
:vartype etag: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'resource_guid': {'readonly': True},
'provisioning_state': {'readonly': True},
'virtual_networks': {'readonly': True},
'etag': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'resource_guid': {'key': 'properties.resourceGuid', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'virtual_networks': {'key': 'properties.virtualNetworks', 'type': '[SubResource]'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, **kwargs):
super(DdosProtectionPlan, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.location = kwargs.get('location', None)
self.tags = kwargs.get('tags', None)
self.resource_guid = None
self.provisioning_state = None
self.virtual_networks = None
self.etag = None
```
#### File: v2018_07_01/models/inbound_nat_pool_py3.py
```python
from .sub_resource_py3 import SubResource
class InboundNatPool(SubResource):
"""Inbound NAT pool of the load balancer.
All required parameters must be populated in order to send to Azure.
:param id: Resource ID.
:type id: str
:param frontend_ip_configuration: A reference to frontend IP addresses.
:type frontend_ip_configuration:
~azure.mgmt.network.v2018_07_01.models.SubResource
:param protocol: Required. Possible values include: 'Udp', 'Tcp', 'All'
:type protocol: str or
~azure.mgmt.network.v2018_07_01.models.TransportProtocol
:param frontend_port_range_start: Required. The first port number in the
range of external ports that will be used to provide Inbound Nat to NICs
associated with a load balancer. Acceptable values range between 1 and
65534.
:type frontend_port_range_start: int
:param frontend_port_range_end: Required. The last port number in the
range of external ports that will be used to provide Inbound Nat to NICs
associated with a load balancer. Acceptable values range between 1 and
65535.
:type frontend_port_range_end: int
:param backend_port: Required. The port used for internal connections on
the endpoint. Acceptable values are between 1 and 65535.
:type backend_port: int
:param idle_timeout_in_minutes: The timeout for the TCP idle connection.
The value can be set between 4 and 30 minutes. The default value is 4
minutes. This element is only used when the protocol is set to TCP.
:type idle_timeout_in_minutes: int
:param enable_floating_ip: Configures a virtual machine's endpoint for the
floating IP capability required to configure a SQL AlwaysOn Availability
Group. This setting is required when using the SQL AlwaysOn Availability
Groups in SQL server. This setting can't be changed after you create the
endpoint.
:type enable_floating_ip: bool
:param enable_tcp_reset: Receive bidirectional TCP Reset on TCP flow idle
timeout or unexpected connection termination. This element is only used
when the protocol is set to TCP.
:type enable_tcp_reset: bool
:param provisioning_state: Gets the provisioning state of the PublicIP
resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:type provisioning_state: str
:param name: The name of the resource that is unique within a resource
group. This name can be used to access the resource.
:type name: str
:param etag: A unique read-only string that changes whenever the resource
is updated.
:type etag: str
"""
_validation = {
'protocol': {'required': True},
'frontend_port_range_start': {'required': True},
'frontend_port_range_end': {'required': True},
'backend_port': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'frontend_ip_configuration': {'key': 'properties.frontendIPConfiguration', 'type': 'SubResource'},
'protocol': {'key': 'properties.protocol', 'type': 'str'},
'frontend_port_range_start': {'key': 'properties.frontendPortRangeStart', 'type': 'int'},
'frontend_port_range_end': {'key': 'properties.frontendPortRangeEnd', 'type': 'int'},
'backend_port': {'key': 'properties.backendPort', 'type': 'int'},
'idle_timeout_in_minutes': {'key': 'properties.idleTimeoutInMinutes', 'type': 'int'},
'enable_floating_ip': {'key': 'properties.enableFloatingIP', 'type': 'bool'},
'enable_tcp_reset': {'key': 'properties.enableTcpReset', 'type': 'bool'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, *, protocol, frontend_port_range_start: int, frontend_port_range_end: int, backend_port: int, id: str=None, frontend_ip_configuration=None, idle_timeout_in_minutes: int=None, enable_floating_ip: bool=None, enable_tcp_reset: bool=None, provisioning_state: str=None, name: str=None, etag: str=None, **kwargs) -> None:
super(InboundNatPool, self).__init__(id=id, **kwargs)
self.frontend_ip_configuration = frontend_ip_configuration
self.protocol = protocol
self.frontend_port_range_start = frontend_port_range_start
self.frontend_port_range_end = frontend_port_range_end
self.backend_port = backend_port
self.idle_timeout_in_minutes = idle_timeout_in_minutes
self.enable_floating_ip = enable_floating_ip
self.enable_tcp_reset = enable_tcp_reset
self.provisioning_state = provisioning_state
self.name = name
self.etag = etag
```
#### File: v2018_08_01/models/application_gateway_web_application_firewall_configuration.py
```python
from msrest.serialization import Model
class ApplicationGatewayWebApplicationFirewallConfiguration(Model):
"""Application gateway web application firewall configuration.
All required parameters must be populated in order to send to Azure.
:param enabled: Required. Whether the web application firewall is enabled
or not.
:type enabled: bool
:param firewall_mode: Required. Web application firewall mode. Possible
values include: 'Detection', 'Prevention'
:type firewall_mode: str or
~azure.mgmt.network.v2018_08_01.models.ApplicationGatewayFirewallMode
:param rule_set_type: Required. The type of the web application firewall
rule set. Possible values are: 'OWASP'.
:type rule_set_type: str
:param rule_set_version: Required. The version of the rule set type.
:type rule_set_version: str
:param disabled_rule_groups: The disabled rule groups.
:type disabled_rule_groups:
list[~azure.mgmt.network.v2018_08_01.models.ApplicationGatewayFirewallDisabledRuleGroup]
:param request_body_check: Whether allow WAF to check request Body.
:type request_body_check: bool
:param max_request_body_size: Maximum request body size for WAF.
:type max_request_body_size: int
:param max_request_body_size_in_kb: Maximum request body size in Kb for
WAF.
:type max_request_body_size_in_kb: int
:param file_upload_limit_in_mb: Maximum file upload size in Mb for WAF.
:type file_upload_limit_in_mb: int
:param exclusions: The exclusion list.
:type exclusions:
list[~azure.mgmt.network.v2018_08_01.models.ApplicationGatewayFirewallExclusion]
"""
_validation = {
'enabled': {'required': True},
'firewall_mode': {'required': True},
'rule_set_type': {'required': True},
'rule_set_version': {'required': True},
'max_request_body_size': {'maximum': 128, 'minimum': 8},
'max_request_body_size_in_kb': {'maximum': 128, 'minimum': 8},
'file_upload_limit_in_mb': {'maximum': 500, 'minimum': 0},
}
_attribute_map = {
'enabled': {'key': 'enabled', 'type': 'bool'},
'firewall_mode': {'key': 'firewallMode', 'type': 'str'},
'rule_set_type': {'key': 'ruleSetType', 'type': 'str'},
'rule_set_version': {'key': 'ruleSetVersion', 'type': 'str'},
'disabled_rule_groups': {'key': 'disabledRuleGroups', 'type': '[ApplicationGatewayFirewallDisabledRuleGroup]'},
'request_body_check': {'key': 'requestBodyCheck', 'type': 'bool'},
'max_request_body_size': {'key': 'maxRequestBodySize', 'type': 'int'},
'max_request_body_size_in_kb': {'key': 'maxRequestBodySizeInKb', 'type': 'int'},
'file_upload_limit_in_mb': {'key': 'fileUploadLimitInMb', 'type': 'int'},
'exclusions': {'key': 'exclusions', 'type': '[ApplicationGatewayFirewallExclusion]'},
}
def __init__(self, **kwargs):
super(ApplicationGatewayWebApplicationFirewallConfiguration, self).__init__(**kwargs)
self.enabled = kwargs.get('enabled', None)
self.firewall_mode = kwargs.get('firewall_mode', None)
self.rule_set_type = kwargs.get('rule_set_type', None)
self.rule_set_version = kwargs.get('rule_set_version', None)
self.disabled_rule_groups = kwargs.get('disabled_rule_groups', None)
self.request_body_check = kwargs.get('request_body_check', None)
self.max_request_body_size = kwargs.get('max_request_body_size', None)
self.max_request_body_size_in_kb = kwargs.get('max_request_body_size_in_kb', None)
self.file_upload_limit_in_mb = kwargs.get('file_upload_limit_in_mb', None)
self.exclusions = kwargs.get('exclusions', None)
```
#### File: v2018_10_01/models/application_gateway_custom_error.py
```python
from msrest.serialization import Model
class ApplicationGatewayCustomError(Model):
"""Customer error of an application gateway.
:param status_code: Status code of the application gateway customer error.
Possible values include: 'HttpStatus403', 'HttpStatus502'
:type status_code: str or
~azure.mgmt.network.v2018_10_01.models.ApplicationGatewayCustomErrorStatusCode
:param custom_error_page_url: Error page URL of the application gateway
customer error.
:type custom_error_page_url: str
"""
_attribute_map = {
'status_code': {'key': 'statusCode', 'type': 'str'},
'custom_error_page_url': {'key': 'customErrorPageUrl', 'type': 'str'},
}
def __init__(self, **kwargs):
super(ApplicationGatewayCustomError, self).__init__(**kwargs)
self.status_code = kwargs.get('status_code', None)
self.custom_error_page_url = kwargs.get('custom_error_page_url', None)
```
#### File: v2018_10_01/models/express_route_cross_connection_py3.py
```python
from .resource_py3 import Resource
class ExpressRouteCrossConnection(Resource):
"""ExpressRouteCrossConnection resource.
Variables are only populated by the server, and will be ignored when
sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: Resource tags.
:type tags: dict[str, str]
:ivar primary_azure_port: The name of the primary port.
:vartype primary_azure_port: str
:ivar secondary_azure_port: The name of the secondary port.
:vartype secondary_azure_port: str
:ivar s_tag: The identifier of the circuit traffic.
:vartype s_tag: int
:param peering_location: The peering location of the ExpressRoute circuit.
:type peering_location: str
:param bandwidth_in_mbps: The circuit bandwidth In Mbps.
:type bandwidth_in_mbps: int
:param express_route_circuit: The ExpressRouteCircuit
:type express_route_circuit:
~azure.mgmt.network.v2018_10_01.models.ExpressRouteCircuitReference
:param service_provider_provisioning_state: The provisioning state of the
circuit in the connectivity provider system. Possible values are
'NotProvisioned', 'Provisioning', 'Provisioned'. Possible values include:
'NotProvisioned', 'Provisioning', 'Provisioned', 'Deprovisioning'
:type service_provider_provisioning_state: str or
~azure.mgmt.network.v2018_10_01.models.ServiceProviderProvisioningState
:param service_provider_notes: Additional read only notes set by the
connectivity provider.
:type service_provider_notes: str
:ivar provisioning_state: Gets the provisioning state of the public IP
resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:vartype provisioning_state: str
:param peerings: The list of peerings.
:type peerings:
list[~azure.mgmt.network.v2018_10_01.models.ExpressRouteCrossConnectionPeering]
:ivar etag: Gets a unique read-only string that changes whenever the
resource is updated.
:vartype etag: str
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
'primary_azure_port': {'readonly': True},
'secondary_azure_port': {'readonly': True},
's_tag': {'readonly': True},
'provisioning_state': {'readonly': True},
'etag': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'primary_azure_port': {'key': 'properties.primaryAzurePort', 'type': 'str'},
'secondary_azure_port': {'key': 'properties.secondaryAzurePort', 'type': 'str'},
's_tag': {'key': 'properties.sTag', 'type': 'int'},
'peering_location': {'key': 'properties.peeringLocation', 'type': 'str'},
'bandwidth_in_mbps': {'key': 'properties.bandwidthInMbps', 'type': 'int'},
'express_route_circuit': {'key': 'properties.expressRouteCircuit', 'type': 'ExpressRouteCircuitReference'},
'service_provider_provisioning_state': {'key': 'properties.serviceProviderProvisioningState', 'type': 'str'},
'service_provider_notes': {'key': 'properties.serviceProviderNotes', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'peerings': {'key': 'properties.peerings', 'type': '[ExpressRouteCrossConnectionPeering]'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, *, id: str=None, location: str=None, tags=None, peering_location: str=None, bandwidth_in_mbps: int=None, express_route_circuit=None, service_provider_provisioning_state=None, service_provider_notes: str=None, peerings=None, **kwargs) -> None:
super(ExpressRouteCrossConnection, self).__init__(id=id, location=location, tags=tags, **kwargs)
self.primary_azure_port = None
self.secondary_azure_port = None
self.s_tag = None
self.peering_location = peering_location
self.bandwidth_in_mbps = bandwidth_in_mbps
self.express_route_circuit = express_route_circuit
self.service_provider_provisioning_state = service_provider_provisioning_state
self.service_provider_notes = service_provider_notes
self.provisioning_state = None
self.peerings = peerings
self.etag = None
```
#### File: v2018_10_01/models/network_configuration_diagnostic_result.py
```python
from msrest.serialization import Model
class NetworkConfigurationDiagnosticResult(Model):
"""Network configuration diagnostic result corresponded to provided traffic
query.
:param profile:
:type profile:
~azure.mgmt.network.v2018_10_01.models.NetworkConfigurationDiagnosticProfile
:param network_security_group_result:
:type network_security_group_result:
~azure.mgmt.network.v2018_10_01.models.NetworkSecurityGroupResult
"""
_attribute_map = {
'profile': {'key': 'profile', 'type': 'NetworkConfigurationDiagnosticProfile'},
'network_security_group_result': {'key': 'networkSecurityGroupResult', 'type': 'NetworkSecurityGroupResult'},
}
def __init__(self, **kwargs):
super(NetworkConfigurationDiagnosticResult, self).__init__(**kwargs)
self.profile = kwargs.get('profile', None)
self.network_security_group_result = kwargs.get('network_security_group_result', None)
```
#### File: v2018_10_01/models/next_hop_result_py3.py
```python
from msrest.serialization import Model
class NextHopResult(Model):
"""The information about next hop from the specified VM.
:param next_hop_type: Next hop type. Possible values include: 'Internet',
'VirtualAppliance', 'VirtualNetworkGateway', 'VnetLocal',
'HyperNetGateway', 'None'
:type next_hop_type: str or
~azure.mgmt.network.v2018_10_01.models.NextHopType
:param next_hop_ip_address: Next hop IP Address
:type next_hop_ip_address: str
:param route_table_id: The resource identifier for the route table
associated with the route being returned. If the route being returned does
not correspond to any user created routes then this field will be the
string 'System Route'.
:type route_table_id: str
"""
_attribute_map = {
'next_hop_type': {'key': 'nextHopType', 'type': 'str'},
'next_hop_ip_address': {'key': 'nextHopIpAddress', 'type': 'str'},
'route_table_id': {'key': 'routeTableId', 'type': 'str'},
}
def __init__(self, *, next_hop_type=None, next_hop_ip_address: str=None, route_table_id: str=None, **kwargs) -> None:
super(NextHopResult, self).__init__(**kwargs)
self.next_hop_type = next_hop_type
self.next_hop_ip_address = next_hop_ip_address
self.route_table_id = route_table_id
```
#### File: v2018_10_01/models/virtual_wan_security_providers.py
```python
from msrest.serialization import Model
class VirtualWanSecurityProviders(Model):
"""Collection of SecurityProviders.
:param supported_providers:
:type supported_providers:
list[~azure.mgmt.network.v2018_10_01.models.VirtualWanSecurityProvider]
"""
_attribute_map = {
'supported_providers': {'key': 'supportedProviders', 'type': '[VirtualWanSecurityProvider]'},
}
def __init__(self, **kwargs):
super(VirtualWanSecurityProviders, self).__init__(**kwargs)
self.supported_providers = kwargs.get('supported_providers', None)
```
#### File: v2018_11_01/models/azure_firewall_application_rule.py
```python
from msrest.serialization import Model
class AzureFirewallApplicationRule(Model):
"""Properties of an application rule.
:param name: Name of the application rule.
:type name: str
:param description: Description of the rule.
:type description: str
:param source_addresses: List of source IP addresses for this rule.
:type source_addresses: list[str]
:param protocols: Array of ApplicationRuleProtocols.
:type protocols:
list[~azure.mgmt.network.v2018_11_01.models.AzureFirewallApplicationRuleProtocol]
:param target_fqdns: List of FQDNs for this rule.
:type target_fqdns: list[str]
:param fqdn_tags: List of FQDN Tags for this rule.
:type fqdn_tags: list[str]
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'source_addresses': {'key': 'sourceAddresses', 'type': '[str]'},
'protocols': {'key': 'protocols', 'type': '[AzureFirewallApplicationRuleProtocol]'},
'target_fqdns': {'key': 'targetFqdns', 'type': '[str]'},
'fqdn_tags': {'key': 'fqdnTags', 'type': '[str]'},
}
def __init__(self, **kwargs):
super(AzureFirewallApplicationRule, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.description = kwargs.get('description', None)
self.source_addresses = kwargs.get('source_addresses', None)
self.protocols = kwargs.get('protocols', None)
self.target_fqdns = kwargs.get('target_fqdns', None)
self.fqdn_tags = kwargs.get('fqdn_tags', None)
```
#### File: v2018_11_01/models/connection_monitor_result.py
```python
from msrest.serialization import Model
class ConnectionMonitorResult(Model):
"""Information about the connection monitor.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar name: Name of the connection monitor.
:vartype name: str
:ivar id: ID of the connection monitor.
:vartype id: str
:param etag: Default value: "A unique read-only string that changes
whenever the resource is updated." .
:type etag: str
:ivar type: Connection monitor type.
:vartype type: str
:param location: Connection monitor location.
:type location: str
:param tags: Connection monitor tags.
:type tags: dict[str, str]
:param source: Required.
:type source:
~azure.mgmt.network.v2018_11_01.models.ConnectionMonitorSource
:param destination: Required.
:type destination:
~azure.mgmt.network.v2018_11_01.models.ConnectionMonitorDestination
:param auto_start: Determines if the connection monitor will start
automatically once created. Default value: True .
:type auto_start: bool
:param monitoring_interval_in_seconds: Monitoring interval in seconds.
Default value: 60 .
:type monitoring_interval_in_seconds: int
:param provisioning_state: The provisioning state of the connection
monitor. Possible values include: 'Succeeded', 'Updating', 'Deleting',
'Failed'
:type provisioning_state: str or
~azure.mgmt.network.v2018_11_01.models.ProvisioningState
:param start_time: The date and time when the connection monitor was
started.
:type start_time: datetime
:param monitoring_status: The monitoring status of the connection monitor.
:type monitoring_status: str
"""
_validation = {
'name': {'readonly': True},
'id': {'readonly': True},
'type': {'readonly': True},
'source': {'required': True},
'destination': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'source': {'key': 'properties.source', 'type': 'ConnectionMonitorSource'},
'destination': {'key': 'properties.destination', 'type': 'ConnectionMonitorDestination'},
'auto_start': {'key': 'properties.autoStart', 'type': 'bool'},
'monitoring_interval_in_seconds': {'key': 'properties.monitoringIntervalInSeconds', 'type': 'int'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'start_time': {'key': 'properties.startTime', 'type': 'iso-8601'},
'monitoring_status': {'key': 'properties.monitoringStatus', 'type': 'str'},
}
def __init__(self, **kwargs):
super(ConnectionMonitorResult, self).__init__(**kwargs)
self.name = None
self.id = None
self.etag = kwargs.get('etag', "A unique read-only string that changes whenever the resource is updated.")
self.type = None
self.location = kwargs.get('location', None)
self.tags = kwargs.get('tags', None)
self.source = kwargs.get('source', None)
self.destination = kwargs.get('destination', None)
self.auto_start = kwargs.get('auto_start', True)
self.monitoring_interval_in_seconds = kwargs.get('monitoring_interval_in_seconds', 60)
self.provisioning_state = kwargs.get('provisioning_state', None)
self.start_time = kwargs.get('start_time', None)
self.monitoring_status = kwargs.get('monitoring_status', None)
```
#### File: v2018_12_01/models/application_gateway_firewall_exclusion_py3.py
```python
from msrest.serialization import Model
class ApplicationGatewayFirewallExclusion(Model):
"""Allow to exclude some variable satisfy the condition for the WAF check.
All required parameters must be populated in order to send to Azure.
:param match_variable: Required. The variable to be excluded.
:type match_variable: str
:param selector_match_operator: Required. When matchVariable is a
collection, operate on the selector to specify which elements in the
collection this exclusion applies to.
:type selector_match_operator: str
:param selector: Required. When matchVariable is a collection, operator
used to specify which elements in the collection this exclusion applies
to.
:type selector: str
"""
_validation = {
'match_variable': {'required': True},
'selector_match_operator': {'required': True},
'selector': {'required': True},
}
_attribute_map = {
'match_variable': {'key': 'matchVariable', 'type': 'str'},
'selector_match_operator': {'key': 'selectorMatchOperator', 'type': 'str'},
'selector': {'key': 'selector', 'type': 'str'},
}
def __init__(self, *, match_variable: str, selector_match_operator: str, selector: str, **kwargs) -> None:
super(ApplicationGatewayFirewallExclusion, self).__init__(**kwargs)
self.match_variable = match_variable
self.selector_match_operator = selector_match_operator
self.selector = selector
```
#### File: v2018_12_01/models/express_route_connection_py3.py
```python
from .sub_resource_py3 import SubResource
class ExpressRouteConnection(SubResource):
"""ExpressRouteConnection resource.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:param id: Resource ID.
:type id: str
:ivar provisioning_state: The provisioning state of the resource. Possible
values include: 'Succeeded', 'Updating', 'Deleting', 'Failed'
:vartype provisioning_state: str or
~azure.mgmt.network.v2018_12_01.models.ProvisioningState
:param express_route_circuit_peering: Required. The ExpressRoute circuit
peering.
:type express_route_circuit_peering:
~azure.mgmt.network.v2018_12_01.models.ExpressRouteCircuitPeeringId
:param authorization_key: Authorization key to establish the connection.
:type authorization_key: str
:param routing_weight: The routing weight associated to the connection.
:type routing_weight: int
:param name: Required. The name of the resource.
:type name: str
"""
_validation = {
'provisioning_state': {'readonly': True},
'express_route_circuit_peering': {'required': True},
'name': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'express_route_circuit_peering': {'key': 'properties.expressRouteCircuitPeering', 'type': 'ExpressRouteCircuitPeeringId'},
'authorization_key': {'key': 'properties.authorizationKey', 'type': 'str'},
'routing_weight': {'key': 'properties.routingWeight', 'type': 'int'},
'name': {'key': 'name', 'type': 'str'},
}
def __init__(self, *, express_route_circuit_peering, name: str, id: str=None, authorization_key: str=None, routing_weight: int=None, **kwargs) -> None:
super(ExpressRouteConnection, self).__init__(id=id, **kwargs)
self.provisioning_state = None
self.express_route_circuit_peering = express_route_circuit_peering
self.authorization_key = authorization_key
self.routing_weight = routing_weight
self.name = name
```
#### File: v2018_12_01/models/vpn_connection_py3.py
```python
from .sub_resource_py3 import SubResource
class VpnConnection(SubResource):
"""VpnConnection Resource.
Variables are only populated by the server, and will be ignored when
sending a request.
:param id: Resource ID.
:type id: str
:param remote_vpn_site: Id of the connected vpn site.
:type remote_vpn_site: ~azure.mgmt.network.v2018_12_01.models.SubResource
:param routing_weight: routing weight for vpn connection.
:type routing_weight: int
:param connection_status: The connection status. Possible values include:
'Unknown', 'Connecting', 'Connected', 'NotConnected'
:type connection_status: str or
~azure.mgmt.network.v2018_12_01.models.VpnConnectionStatus
:param vpn_connection_protocol_type: Connection protocol used for this
connection. Possible values include: 'IKEv2', 'IKEv1'
:type vpn_connection_protocol_type: str or
~azure.mgmt.network.v2018_12_01.models.VirtualNetworkGatewayConnectionProtocol
:ivar ingress_bytes_transferred: Ingress bytes transferred.
:vartype ingress_bytes_transferred: long
:ivar egress_bytes_transferred: Egress bytes transferred.
:vartype egress_bytes_transferred: long
:param connection_bandwidth: Expected bandwidth in MBPS.
:type connection_bandwidth: int
:param shared_key: SharedKey for the vpn connection.
:type shared_key: str
:param enable_bgp: EnableBgp flag
:type enable_bgp: bool
:param ipsec_policies: The IPSec Policies to be considered by this
connection.
:type ipsec_policies:
list[~azure.mgmt.network.v2018_12_01.models.IpsecPolicy]
:param enable_rate_limiting: EnableBgp flag
:type enable_rate_limiting: bool
:param enable_internet_security: Enable internet security
:type enable_internet_security: bool
:param provisioning_state: The provisioning state of the resource.
Possible values include: 'Succeeded', 'Updating', 'Deleting', 'Failed'
:type provisioning_state: str or
~azure.mgmt.network.v2018_12_01.models.ProvisioningState
:param name: The name of the resource that is unique within a resource
group. This name can be used to access the resource.
:type name: str
:ivar etag: Gets a unique read-only string that changes whenever the
resource is updated.
:vartype etag: str
"""
_validation = {
'ingress_bytes_transferred': {'readonly': True},
'egress_bytes_transferred': {'readonly': True},
'etag': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'remote_vpn_site': {'key': 'properties.remoteVpnSite', 'type': 'SubResource'},
'routing_weight': {'key': 'properties.routingWeight', 'type': 'int'},
'connection_status': {'key': 'properties.connectionStatus', 'type': 'str'},
'vpn_connection_protocol_type': {'key': 'properties.vpnConnectionProtocolType', 'type': 'str'},
'ingress_bytes_transferred': {'key': 'properties.ingressBytesTransferred', 'type': 'long'},
'egress_bytes_transferred': {'key': 'properties.egressBytesTransferred', 'type': 'long'},
'connection_bandwidth': {'key': 'properties.connectionBandwidth', 'type': 'int'},
'shared_key': {'key': 'properties.sharedKey', 'type': 'str'},
'enable_bgp': {'key': 'properties.enableBgp', 'type': 'bool'},
'ipsec_policies': {'key': 'properties.ipsecPolicies', 'type': '[IpsecPolicy]'},
'enable_rate_limiting': {'key': 'properties.enableRateLimiting', 'type': 'bool'},
'enable_internet_security': {'key': 'properties.enableInternetSecurity', 'type': 'bool'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, *, id: str=None, remote_vpn_site=None, routing_weight: int=None, connection_status=None, vpn_connection_protocol_type=None, connection_bandwidth: int=None, shared_key: str=None, enable_bgp: bool=None, ipsec_policies=None, enable_rate_limiting: bool=None, enable_internet_security: bool=None, provisioning_state=None, name: str=None, **kwargs) -> None:
super(VpnConnection, self).__init__(id=id, **kwargs)
self.remote_vpn_site = remote_vpn_site
self.routing_weight = routing_weight
self.connection_status = connection_status
self.vpn_connection_protocol_type = vpn_connection_protocol_type
self.ingress_bytes_transferred = None
self.egress_bytes_transferred = None
self.connection_bandwidth = connection_bandwidth
self.shared_key = shared_key
self.enable_bgp = enable_bgp
self.ipsec_policies = ipsec_policies
self.enable_rate_limiting = enable_rate_limiting
self.enable_internet_security = enable_internet_security
self.provisioning_state = provisioning_state
self.name = name
self.etag = None
```
#### File: notificationhubs/operations/notification_hubs_operations.py
```python
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from .. import models
class NotificationHubsOperations(object):
"""NotificationHubsOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Client Api Version. Constant value: "2017-04-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2017-04-01"
self.config = config
def check_notification_hub_availability(
self, resource_group_name, namespace_name, parameters, custom_headers=None, raw=False, **operation_config):
"""Checks the availability of the given notificationHub in a namespace.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param namespace_name: The namespace name.
:type namespace_name: str
:param parameters: The notificationHub name.
:type parameters:
~azure.mgmt.notificationhubs.models.CheckAvailabilityParameters
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: CheckAvailabilityResult or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.notificationhubs.models.CheckAvailabilityResult or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.check_notification_hub_availability.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'namespaceName': self._serialize.url("namespace_name", namespace_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'CheckAvailabilityParameters')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('CheckAvailabilityResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
check_notification_hub_availability.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NotificationHubs/namespaces/{namespaceName}/checkNotificationHubAvailability'}
def create_or_update(
self, resource_group_name, namespace_name, notification_hub_name, parameters, custom_headers=None, raw=False, **operation_config):
"""Creates/Update a NotificationHub in a namespace.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param namespace_name: The namespace name.
:type namespace_name: str
:param notification_hub_name: The notification hub name.
:type notification_hub_name: str
:param parameters: Parameters supplied to the create/update a
NotificationHub Resource.
:type parameters:
~azure.mgmt.notificationhubs.models.NotificationHubCreateOrUpdateParameters
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: NotificationHubResource or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.notificationhubs.models.NotificationHubResource or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.create_or_update.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'namespaceName': self._serialize.url("namespace_name", namespace_name, 'str'),
'notificationHubName': self._serialize.url("notification_hub_name", notification_hub_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'NotificationHubCreateOrUpdateParameters')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('NotificationHubResource', response)
if response.status_code == 201:
deserialized = self._deserialize('NotificationHubResource', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NotificationHubs/namespaces/{namespaceName}/notificationHubs/{notificationHubName}'}
def delete(
self, resource_group_name, namespace_name, notification_hub_name, custom_headers=None, raw=False, **operation_config):
"""Deletes a notification hub associated with a namespace.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param namespace_name: The namespace name.
:type namespace_name: str
:param notification_hub_name: The notification hub name.
:type notification_hub_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.delete.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'namespaceName': self._serialize.url("namespace_name", namespace_name, 'str'),
'notificationHubName': self._serialize.url("notification_hub_name", notification_hub_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NotificationHubs/namespaces/{namespaceName}/notificationHubs/{notificationHubName}'}
def get(
self, resource_group_name, namespace_name, notification_hub_name, custom_headers=None, raw=False, **operation_config):
"""Lists the notification hubs associated with a namespace.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param namespace_name: The namespace name.
:type namespace_name: str
:param notification_hub_name: The notification hub name.
:type notification_hub_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: NotificationHubResource or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.notificationhubs.models.NotificationHubResource or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'namespaceName': self._serialize.url("namespace_name", namespace_name, 'str'),
'notificationHubName': self._serialize.url("notification_hub_name", notification_hub_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('NotificationHubResource', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NotificationHubs/namespaces/{namespaceName}/notificationHubs/{notificationHubName}'}
def create_or_update_authorization_rule(
self, resource_group_name, namespace_name, notification_hub_name, authorization_rule_name, properties, custom_headers=None, raw=False, **operation_config):
"""Creates/Updates an authorization rule for a NotificationHub.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param namespace_name: The namespace name.
:type namespace_name: str
:param notification_hub_name: The notification hub name.
:type notification_hub_name: str
:param authorization_rule_name: Authorization Rule Name.
:type authorization_rule_name: str
:param properties: Properties of the Namespace AuthorizationRules.
:type properties:
~azure.mgmt.notificationhubs.models.SharedAccessAuthorizationRuleProperties
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: SharedAccessAuthorizationRuleResource or ClientRawResponse if
raw=true
:rtype:
~azure.mgmt.notificationhubs.models.SharedAccessAuthorizationRuleResource
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
parameters = models.SharedAccessAuthorizationRuleCreateOrUpdateParameters(properties=properties)
# Construct URL
url = self.create_or_update_authorization_rule.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'namespaceName': self._serialize.url("namespace_name", namespace_name, 'str'),
'notificationHubName': self._serialize.url("notification_hub_name", notification_hub_name, 'str'),
'authorizationRuleName': self._serialize.url("authorization_rule_name", authorization_rule_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'SharedAccessAuthorizationRuleCreateOrUpdateParameters')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('SharedAccessAuthorizationRuleResource', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
create_or_update_authorization_rule.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NotificationHubs/namespaces/{namespaceName}/notificationHubs/{notificationHubName}/AuthorizationRules/{authorizationRuleName}'}
def delete_authorization_rule(
self, resource_group_name, namespace_name, notification_hub_name, authorization_rule_name, custom_headers=None, raw=False, **operation_config):
"""Deletes a notificationHub authorization rule.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param namespace_name: The namespace name.
:type namespace_name: str
:param notification_hub_name: The notification hub name.
:type notification_hub_name: str
:param authorization_rule_name: Authorization Rule Name.
:type authorization_rule_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.delete_authorization_rule.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'namespaceName': self._serialize.url("namespace_name", namespace_name, 'str'),
'notificationHubName': self._serialize.url("notification_hub_name", notification_hub_name, 'str'),
'authorizationRuleName': self._serialize.url("authorization_rule_name", authorization_rule_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
delete_authorization_rule.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NotificationHubs/namespaces/{namespaceName}/notificationHubs/{notificationHubName}/AuthorizationRules/{authorizationRuleName}'}
def get_authorization_rule(
self, resource_group_name, namespace_name, notification_hub_name, authorization_rule_name, custom_headers=None, raw=False, **operation_config):
"""Gets an authorization rule for a NotificationHub by name.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param namespace_name: The namespace name
:type namespace_name: str
:param notification_hub_name: The notification hub name.
:type notification_hub_name: str
:param authorization_rule_name: authorization rule name.
:type authorization_rule_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: SharedAccessAuthorizationRuleResource or ClientRawResponse if
raw=true
:rtype:
~azure.mgmt.notificationhubs.models.SharedAccessAuthorizationRuleResource
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get_authorization_rule.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'namespaceName': self._serialize.url("namespace_name", namespace_name, 'str'),
'notificationHubName': self._serialize.url("notification_hub_name", notification_hub_name, 'str'),
'authorizationRuleName': self._serialize.url("authorization_rule_name", authorization_rule_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('SharedAccessAuthorizationRuleResource', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get_authorization_rule.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NotificationHubs/namespaces/{namespaceName}/notificationHubs/{notificationHubName}/AuthorizationRules/{authorizationRuleName}'}
def list(
self, resource_group_name, namespace_name, custom_headers=None, raw=False, **operation_config):
"""Lists the notification hubs associated with a namespace.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param namespace_name: The namespace name.
:type namespace_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of NotificationHubResource
:rtype:
~azure.mgmt.notificationhubs.models.NotificationHubResourcePaged[~azure.mgmt.notificationhubs.models.NotificationHubResource]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'namespaceName': self._serialize.url("namespace_name", namespace_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.NotificationHubResourcePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.NotificationHubResourcePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NotificationHubs/namespaces/{namespaceName}/notificationHubs'}
def list_authorization_rules(
self, resource_group_name, namespace_name, notification_hub_name, custom_headers=None, raw=False, **operation_config):
"""Gets the authorization rules for a NotificationHub.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param namespace_name: The namespace name
:type namespace_name: str
:param notification_hub_name: The notification hub name.
:type notification_hub_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of
SharedAccessAuthorizationRuleResource
:rtype:
~azure.mgmt.notificationhubs.models.SharedAccessAuthorizationRuleResourcePaged[~azure.mgmt.notificationhubs.models.SharedAccessAuthorizationRuleResource]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list_authorization_rules.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'namespaceName': self._serialize.url("namespace_name", namespace_name, 'str'),
'notificationHubName': self._serialize.url("notification_hub_name", notification_hub_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.SharedAccessAuthorizationRuleResourcePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.SharedAccessAuthorizationRuleResourcePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list_authorization_rules.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NotificationHubs/namespaces/{namespaceName}/notificationHubs/{notificationHubName}/AuthorizationRules'}
def list_keys(
self, resource_group_name, namespace_name, notification_hub_name, authorization_rule_name, custom_headers=None, raw=False, **operation_config):
"""Gets the Primary and Secondary ConnectionStrings to the NotificationHub
.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param namespace_name: The namespace name.
:type namespace_name: str
:param notification_hub_name: The notification hub name.
:type notification_hub_name: str
:param authorization_rule_name: The connection string of the
NotificationHub for the specified authorizationRule.
:type authorization_rule_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: ResourceListKeys or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.notificationhubs.models.ResourceListKeys or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.list_keys.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'namespaceName': self._serialize.url("namespace_name", namespace_name, 'str'),
'notificationHubName': self._serialize.url("notification_hub_name", notification_hub_name, 'str'),
'authorizationRuleName': self._serialize.url("authorization_rule_name", authorization_rule_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ResourceListKeys', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
list_keys.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NotificationHubs/namespaces/{namespaceName}/notificationHubs/{notificationHubName}/AuthorizationRules/{authorizationRuleName}/listKeys'}
def regenerate_keys(
self, resource_group_name, namespace_name, notification_hub_name, authorization_rule_name, policy_key=None, custom_headers=None, raw=False, **operation_config):
"""Regenerates the Primary/Secondary Keys to the NotificationHub
Authorization Rule.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param namespace_name: The namespace name.
:type namespace_name: str
:param notification_hub_name: The notification hub name.
:type notification_hub_name: str
:param authorization_rule_name: The connection string of the
NotificationHub for the specified authorizationRule.
:type authorization_rule_name: str
:param policy_key: Name of the key that has to be regenerated for the
Namespace/Notification Hub Authorization Rule. The value can be
Primary Key/Secondary Key.
:type policy_key: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: ResourceListKeys or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.notificationhubs.models.ResourceListKeys or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
parameters = models.PolicykeyResource(policy_key=policy_key)
# Construct URL
url = self.regenerate_keys.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'namespaceName': self._serialize.url("namespace_name", namespace_name, 'str'),
'notificationHubName': self._serialize.url("notification_hub_name", notification_hub_name, 'str'),
'authorizationRuleName': self._serialize.url("authorization_rule_name", authorization_rule_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'PolicykeyResource')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ResourceListKeys', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
regenerate_keys.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NotificationHubs/namespaces/{namespaceName}/notificationHubs/{notificationHubName}/AuthorizationRules/{authorizationRuleName}/regenerateKeys'}
def get_pns_credentials(
self, resource_group_name, namespace_name, notification_hub_name, custom_headers=None, raw=False, **operation_config):
"""Lists the PNS Credentials associated with a notification hub .
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param namespace_name: The namespace name.
:type namespace_name: str
:param notification_hub_name: The notification hub name.
:type notification_hub_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: PnsCredentialsResource or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.notificationhubs.models.PnsCredentialsResource or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get_pns_credentials.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'namespaceName': self._serialize.url("namespace_name", namespace_name, 'str'),
'notificationHubName': self._serialize.url("notification_hub_name", notification_hub_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('PnsCredentialsResource', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get_pns_credentials.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NotificationHubs/namespaces/{namespaceName}/notificationHubs/{notificationHubName}/pnsCredentials'}
```
#### File: recoveryservices/models/resource_certificate_and_acs_details.py
```python
from .resource_certificate_details import ResourceCertificateDetails
class ResourceCertificateAndAcsDetails(ResourceCertificateDetails):
"""Certificate details representing the Vault credentials for ACS.
All required parameters must be populated in order to send to Azure.
:param certificate: The base64 encoded certificate raw data string.
:type certificate: bytearray
:param friendly_name: Certificate friendlyname.
:type friendly_name: str
:param issuer: Certificate issuer.
:type issuer: str
:param resource_id: Resource ID of the vault.
:type resource_id: long
:param subject: Certificate Subject Name.
:type subject: str
:param thumbprint: Certificate thumbprint.
:type thumbprint: str
:param valid_from: Certificate Validity start Date time.
:type valid_from: datetime
:param valid_to: Certificate Validity End Date time.
:type valid_to: datetime
:param auth_type: Required. Constant filled by server.
:type auth_type: str
:param global_acs_namespace: Required. ACS namespace name - tenant for our
service.
:type global_acs_namespace: str
:param global_acs_host_name: Required. Acs mgmt host name to connect to.
:type global_acs_host_name: str
:param global_acs_rp_realm: Required. Global ACS namespace RP realm.
:type global_acs_rp_realm: str
"""
_validation = {
'auth_type': {'required': True},
'global_acs_namespace': {'required': True},
'global_acs_host_name': {'required': True},
'global_acs_rp_realm': {'required': True},
}
_attribute_map = {
'certificate': {'key': 'certificate', 'type': 'bytearray'},
'friendly_name': {'key': 'friendlyName', 'type': 'str'},
'issuer': {'key': 'issuer', 'type': 'str'},
'resource_id': {'key': 'resourceId', 'type': 'long'},
'subject': {'key': 'subject', 'type': 'str'},
'thumbprint': {'key': 'thumbprint', 'type': 'str'},
'valid_from': {'key': 'validFrom', 'type': 'iso-8601'},
'valid_to': {'key': 'validTo', 'type': 'iso-8601'},
'auth_type': {'key': 'authType', 'type': 'str'},
'global_acs_namespace': {'key': 'globalAcsNamespace', 'type': 'str'},
'global_acs_host_name': {'key': 'globalAcsHostName', 'type': 'str'},
'global_acs_rp_realm': {'key': 'globalAcsRPRealm', 'type': 'str'},
}
def __init__(self, **kwargs):
super(ResourceCertificateAndAcsDetails, self).__init__(**kwargs)
self.global_acs_namespace = kwargs.get('global_acs_namespace', None)
self.global_acs_host_name = kwargs.get('global_acs_host_name', None)
self.global_acs_rp_realm = kwargs.get('global_acs_rp_realm', None)
self.auth_type = 'AccessControlService'
```
#### File: recoveryservicesbackup/models/azure_backup_server_engine.py
```python
from .backup_engine_base import BackupEngineBase
class AzureBackupServerEngine(BackupEngineBase):
"""Backup engine type when Azure Backup Server is used to manage the backups.
All required parameters must be populated in order to send to Azure.
:param friendly_name: Friendly name of the backup engine.
:type friendly_name: str
:param backup_management_type: Type of backup management for the backup
engine. Possible values include: 'Invalid', 'AzureIaasVM', 'MAB', 'DPM',
'AzureBackupServer', 'AzureSql', 'AzureStorage', 'AzureWorkload',
'DefaultBackup'
:type backup_management_type: str or
~azure.mgmt.recoveryservicesbackup.models.BackupManagementType
:param registration_status: Registration status of the backup engine with
the Recovery Services Vault.
:type registration_status: str
:param backup_engine_state: Status of the backup engine with the Recovery
Services Vault. = {Active/Deleting/DeleteFailed}
:type backup_engine_state: str
:param health_status: Backup status of the backup engine.
:type health_status: str
:param can_re_register: Flag indicating if the backup engine be
registered, once already registered.
:type can_re_register: bool
:param backup_engine_id: ID of the backup engine.
:type backup_engine_id: str
:param dpm_version: Backup engine version
:type dpm_version: str
:param azure_backup_agent_version: Backup agent version
:type azure_backup_agent_version: str
:param is_azure_backup_agent_upgrade_available: To check if backup agent
upgrade available
:type is_azure_backup_agent_upgrade_available: bool
:param is_dpm_upgrade_available: To check if backup engine upgrade
available
:type is_dpm_upgrade_available: bool
:param extended_info: Extended info of the backupengine
:type extended_info:
~azure.mgmt.recoveryservicesbackup.models.BackupEngineExtendedInfo
:param backup_engine_type: Required. Constant filled by server.
:type backup_engine_type: str
"""
_validation = {
'backup_engine_type': {'required': True},
}
_attribute_map = {
'friendly_name': {'key': 'friendlyName', 'type': 'str'},
'backup_management_type': {'key': 'backupManagementType', 'type': 'str'},
'registration_status': {'key': 'registrationStatus', 'type': 'str'},
'backup_engine_state': {'key': 'backupEngineState', 'type': 'str'},
'health_status': {'key': 'healthStatus', 'type': 'str'},
'can_re_register': {'key': 'canReRegister', 'type': 'bool'},
'backup_engine_id': {'key': 'backupEngineId', 'type': 'str'},
'dpm_version': {'key': 'dpmVersion', 'type': 'str'},
'azure_backup_agent_version': {'key': 'azureBackupAgentVersion', 'type': 'str'},
'is_azure_backup_agent_upgrade_available': {'key': 'isAzureBackupAgentUpgradeAvailable', 'type': 'bool'},
'is_dpm_upgrade_available': {'key': 'isDpmUpgradeAvailable', 'type': 'bool'},
'extended_info': {'key': 'extendedInfo', 'type': 'BackupEngineExtendedInfo'},
'backup_engine_type': {'key': 'backupEngineType', 'type': 'str'},
}
def __init__(self, **kwargs):
super(AzureBackupServerEngine, self).__init__(**kwargs)
self.backup_engine_type = 'AzureBackupServerEngine'
```
#### File: recoveryservicesbackup/models/azure_iaa_svm_job_task_details_py3.py
```python
from msrest.serialization import Model
class AzureIaaSVMJobTaskDetails(Model):
"""Azure IaaS VM workload-specific job task details.
:param task_id: The task display name.
:type task_id: str
:param start_time: The start time.
:type start_time: datetime
:param end_time: The end time.
:type end_time: datetime
:param instance_id: The instanceId.
:type instance_id: str
:param duration: Time elapsed for task.
:type duration: timedelta
:param status: The status.
:type status: str
:param progress_percentage: Progress of the task.
:type progress_percentage: float
"""
_attribute_map = {
'task_id': {'key': 'taskId', 'type': 'str'},
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'end_time': {'key': 'endTime', 'type': 'iso-8601'},
'instance_id': {'key': 'instanceId', 'type': 'str'},
'duration': {'key': 'duration', 'type': 'duration'},
'status': {'key': 'status', 'type': 'str'},
'progress_percentage': {'key': 'progressPercentage', 'type': 'float'},
}
def __init__(self, *, task_id: str=None, start_time=None, end_time=None, instance_id: str=None, duration=None, status: str=None, progress_percentage: float=None, **kwargs) -> None:
super(AzureIaaSVMJobTaskDetails, self).__init__(**kwargs)
self.task_id = task_id
self.start_time = start_time
self.end_time = end_time
self.instance_id = instance_id
self.duration = duration
self.status = status
self.progress_percentage = progress_percentage
```
#### File: recoveryservicesbackup/models/azure_workload_container_extended_info.py
```python
from msrest.serialization import Model
class AzureWorkloadContainerExtendedInfo(Model):
"""Extended information of the container.
:param host_server_name: Host Os Name in case of Stand Alone and
Cluster Name in case of distributed container.
:type host_server_name: str
:param inquiry_info: Inquiry Status for the container.
:type inquiry_info: ~azure.mgmt.recoveryservicesbackup.models.InquiryInfo
:param nodes_list: List of the nodes in case of distributed container.
:type nodes_list:
list[~azure.mgmt.recoveryservicesbackup.models.DistributedNodesInfo]
"""
_attribute_map = {
'host_server_name': {'key': 'hostServerName', 'type': 'str'},
'inquiry_info': {'key': 'inquiryInfo', 'type': 'InquiryInfo'},
'nodes_list': {'key': 'nodesList', 'type': '[DistributedNodesInfo]'},
}
def __init__(self, **kwargs):
super(AzureWorkloadContainerExtendedInfo, self).__init__(**kwargs)
self.host_server_name = kwargs.get('host_server_name', None)
self.inquiry_info = kwargs.get('inquiry_info', None)
self.nodes_list = kwargs.get('nodes_list', None)
```
#### File: recoveryservicesbackup/models/bms_container_query_object.py
```python
from msrest.serialization import Model
class BMSContainerQueryObject(Model):
"""The query filters that can be used with the list containers API.
All required parameters must be populated in order to send to Azure.
:param backup_management_type: Required. Backup management type for this
container. Possible values include: 'Invalid', 'AzureIaasVM', 'MAB',
'DPM', 'AzureBackupServer', 'AzureSql', 'AzureStorage', 'AzureWorkload',
'DefaultBackup'
:type backup_management_type: str or
~azure.mgmt.recoveryservicesbackup.models.BackupManagementType
:param container_type: Type of container for filter. Possible values
include: 'Invalid', 'Unknown', 'IaasVMContainer',
'IaasVMServiceContainer', 'DPMContainer', 'AzureBackupServerContainer',
'MABContainer', 'Cluster', 'AzureSqlContainer', 'Windows', 'VCenter',
'VMAppContainer', 'SQLAGWorkLoadContainer', 'StorageContainer',
'GenericContainer', 'SqlCluster', 'ExchangeDAG', 'SharepointFarm',
'HyperVCluster', 'WindowsClient'
:type container_type: str or
~azure.mgmt.recoveryservicesbackup.models.ContainerType
:param backup_engine_name: Backup engine name
:type backup_engine_name: str
:param fabric_name: Fabric name for filter
:type fabric_name: str
:param status: Status of registration of this container with the Recovery
Services Vault.
:type status: str
:param friendly_name: Friendly name of this container.
:type friendly_name: str
"""
_validation = {
'backup_management_type': {'required': True},
}
_attribute_map = {
'backup_management_type': {'key': 'backupManagementType', 'type': 'str'},
'container_type': {'key': 'containerType', 'type': 'str'},
'backup_engine_name': {'key': 'backupEngineName', 'type': 'str'},
'fabric_name': {'key': 'fabricName', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'friendly_name': {'key': 'friendlyName', 'type': 'str'},
}
def __init__(self, **kwargs):
super(BMSContainerQueryObject, self).__init__(**kwargs)
self.backup_management_type = kwargs.get('backup_management_type', None)
self.container_type = kwargs.get('container_type', None)
self.backup_engine_name = kwargs.get('backup_engine_name', None)
self.fabric_name = kwargs.get('fabric_name', None)
self.status = kwargs.get('status', None)
self.friendly_name = kwargs.get('friendly_name', None)
```
#### File: recoveryservicesbackup/models/client_script_for_connect_py3.py
```python
from msrest.serialization import Model
class ClientScriptForConnect(Model):
"""Client script details for file / folder restore.
:param script_content: File content of the client script for file / folder
restore.
:type script_content: str
:param script_extension: File extension of the client script for file /
folder restore - .ps1 , .sh , etc.
:type script_extension: str
:param os_type: OS type - Windows, Linux etc. for which this file / folder
restore client script works.
:type os_type: str
:param url: URL of Executable from where to source the content. If this is
not null then ScriptContent should not be used
:type url: str
:param script_name_suffix: Mandator suffix that should be added to the
name of script that is given for download to user.
If its null or empty then , ignore it.
:type script_name_suffix: str
"""
_attribute_map = {
'script_content': {'key': 'scriptContent', 'type': 'str'},
'script_extension': {'key': 'scriptExtension', 'type': 'str'},
'os_type': {'key': 'osType', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'},
'script_name_suffix': {'key': 'scriptNameSuffix', 'type': 'str'},
}
def __init__(self, *, script_content: str=None, script_extension: str=None, os_type: str=None, url: str=None, script_name_suffix: str=None, **kwargs) -> None:
super(ClientScriptForConnect, self).__init__(**kwargs)
self.script_content = script_content
self.script_extension = script_extension
self.os_type = os_type
self.url = url
self.script_name_suffix = script_name_suffix
```
#### File: recoveryservicesbackup/models/daily_retention_format.py
```python
from msrest.serialization import Model
class DailyRetentionFormat(Model):
"""Daily retention format.
:param days_of_the_month: List of days of the month.
:type days_of_the_month:
list[~azure.mgmt.recoveryservicesbackup.models.Day]
"""
_attribute_map = {
'days_of_the_month': {'key': 'daysOfTheMonth', 'type': '[Day]'},
}
def __init__(self, **kwargs):
super(DailyRetentionFormat, self).__init__(**kwargs)
self.days_of_the_month = kwargs.get('days_of_the_month', None)
```
#### File: recoveryservicesbackup/models/day.py
```python
from msrest.serialization import Model
class Day(Model):
"""Day of the week.
:param date_property: Date of the month
:type date_property: int
:param is_last: Whether Date is last date of month
:type is_last: bool
"""
_attribute_map = {
'date_property': {'key': 'date', 'type': 'int'},
'is_last': {'key': 'isLast', 'type': 'bool'},
}
def __init__(self, **kwargs):
super(Day, self).__init__(**kwargs)
self.date_property = kwargs.get('date_property', None)
self.is_last = kwargs.get('is_last', None)
```
#### File: recoveryservicesbackup/models/generic_protection_policy.py
```python
from .protection_policy import ProtectionPolicy
class GenericProtectionPolicy(ProtectionPolicy):
"""Azure VM (Mercury) workload-specific backup policy.
All required parameters must be populated in order to send to Azure.
:param protected_items_count: Number of items associated with this policy.
:type protected_items_count: int
:param backup_management_type: Required. Constant filled by server.
:type backup_management_type: str
:param sub_protection_policy: List of sub-protection policies which
includes schedule and retention
:type sub_protection_policy:
list[~azure.mgmt.recoveryservicesbackup.models.SubProtectionPolicy]
:param time_zone: TimeZone optional input as string. For example: TimeZone
= "Pacific Standard Time".
:type time_zone: str
:param fabric_name: Name of this policy's fabric.
:type fabric_name: str
"""
_validation = {
'backup_management_type': {'required': True},
}
_attribute_map = {
'protected_items_count': {'key': 'protectedItemsCount', 'type': 'int'},
'backup_management_type': {'key': 'backupManagementType', 'type': 'str'},
'sub_protection_policy': {'key': 'subProtectionPolicy', 'type': '[SubProtectionPolicy]'},
'time_zone': {'key': 'timeZone', 'type': 'str'},
'fabric_name': {'key': 'fabricName', 'type': 'str'},
}
def __init__(self, **kwargs):
super(GenericProtectionPolicy, self).__init__(**kwargs)
self.sub_protection_policy = kwargs.get('sub_protection_policy', None)
self.time_zone = kwargs.get('time_zone', None)
self.fabric_name = kwargs.get('fabric_name', None)
self.backup_management_type = 'GenericProtectionPolicy'
```
#### File: recoveryservicesbackup/models/instant_item_recovery_target.py
```python
from msrest.serialization import Model
class InstantItemRecoveryTarget(Model):
"""Target details for file / folder restore.
:param client_scripts: List of client scripts.
:type client_scripts:
list[~azure.mgmt.recoveryservicesbackup.models.ClientScriptForConnect]
"""
_attribute_map = {
'client_scripts': {'key': 'clientScripts', 'type': '[ClientScriptForConnect]'},
}
def __init__(self, **kwargs):
super(InstantItemRecoveryTarget, self).__init__(**kwargs)
self.client_scripts = kwargs.get('client_scripts', None)
```
#### File: recoveryservicesbackup/models/long_term_retention_policy_py3.py
```python
from .retention_policy_py3 import RetentionPolicy
class LongTermRetentionPolicy(RetentionPolicy):
"""Long term retention policy.
All required parameters must be populated in order to send to Azure.
:param retention_policy_type: Required. Constant filled by server.
:type retention_policy_type: str
:param daily_schedule: Daily retention schedule of the protection policy.
:type daily_schedule:
~azure.mgmt.recoveryservicesbackup.models.DailyRetentionSchedule
:param weekly_schedule: Weekly retention schedule of the protection
policy.
:type weekly_schedule:
~azure.mgmt.recoveryservicesbackup.models.WeeklyRetentionSchedule
:param monthly_schedule: Monthly retention schedule of the protection
policy.
:type monthly_schedule:
~azure.mgmt.recoveryservicesbackup.models.MonthlyRetentionSchedule
:param yearly_schedule: Yearly retention schedule of the protection
policy.
:type yearly_schedule:
~azure.mgmt.recoveryservicesbackup.models.YearlyRetentionSchedule
"""
_validation = {
'retention_policy_type': {'required': True},
}
_attribute_map = {
'retention_policy_type': {'key': 'retentionPolicyType', 'type': 'str'},
'daily_schedule': {'key': 'dailySchedule', 'type': 'DailyRetentionSchedule'},
'weekly_schedule': {'key': 'weeklySchedule', 'type': 'WeeklyRetentionSchedule'},
'monthly_schedule': {'key': 'monthlySchedule', 'type': 'MonthlyRetentionSchedule'},
'yearly_schedule': {'key': 'yearlySchedule', 'type': 'YearlyRetentionSchedule'},
}
def __init__(self, *, daily_schedule=None, weekly_schedule=None, monthly_schedule=None, yearly_schedule=None, **kwargs) -> None:
super(LongTermRetentionPolicy, self).__init__(**kwargs)
self.daily_schedule = daily_schedule
self.weekly_schedule = weekly_schedule
self.monthly_schedule = monthly_schedule
self.yearly_schedule = yearly_schedule
self.retention_policy_type = 'LongTermRetentionPolicy'
```
#### File: recoveryservicesbackup/models/operation_status_py3.py
```python
from msrest.serialization import Model
class OperationStatus(Model):
"""Operation status.
:param id: ID of the operation.
:type id: str
:param name: Name of the operation.
:type name: str
:param status: Operation status. Possible values include: 'Invalid',
'InProgress', 'Succeeded', 'Failed', 'Canceled'
:type status: str or
~azure.mgmt.recoveryservicesbackup.models.OperationStatusValues
:param start_time: Operation start time. Format: ISO-8601.
:type start_time: datetime
:param end_time: Operation end time. Format: ISO-8601.
:type end_time: datetime
:param error: Error information related to this operation.
:type error:
~azure.mgmt.recoveryservicesbackup.models.OperationStatusError
:param properties: Additional information associated with this operation.
:type properties:
~azure.mgmt.recoveryservicesbackup.models.OperationStatusExtendedInfo
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'end_time': {'key': 'endTime', 'type': 'iso-8601'},
'error': {'key': 'error', 'type': 'OperationStatusError'},
'properties': {'key': 'properties', 'type': 'OperationStatusExtendedInfo'},
}
def __init__(self, *, id: str=None, name: str=None, status=None, start_time=None, end_time=None, error=None, properties=None, **kwargs) -> None:
super(OperationStatus, self).__init__(**kwargs)
self.id = id
self.name = name
self.status = status
self.start_time = start_time
self.end_time = end_time
self.error = error
self.properties = properties
```
#### File: recoveryservicesbackup/models/pre_backup_validation.py
```python
from msrest.serialization import Model
class PreBackupValidation(Model):
"""Pre-backup validation for Azure VM Workload provider.
:param status: Status of protectable item, i.e.
InProgress,Succeeded,Failed. Possible values include: 'Invalid',
'Success', 'Failed'
:type status: str or
~azure.mgmt.recoveryservicesbackup.models.InquiryStatus
:param code: Error code of protectable item
:type code: str
:param message: Message corresponding to the error code for the
protectable item
:type message: str
"""
_attribute_map = {
'status': {'key': 'status', 'type': 'str'},
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(self, **kwargs):
super(PreBackupValidation, self).__init__(**kwargs)
self.status = kwargs.get('status', None)
self.code = kwargs.get('code', None)
self.message = kwargs.get('message', None)
```
#### File: recoveryservicesbackup/models/sql_data_directory_mapping_py3.py
```python
from msrest.serialization import Model
class SQLDataDirectoryMapping(Model):
"""Encapsulates information regarding data directory.
:param mapping_type: Type of data directory mapping. Possible values
include: 'Invalid', 'Data', 'Log'
:type mapping_type: str or
~azure.mgmt.recoveryservicesbackup.models.SQLDataDirectoryType
:param source_logical_name: Restore source logical name path
:type source_logical_name: str
:param source_path: Restore source path
:type source_path: str
:param target_path: Target path
:type target_path: str
"""
_attribute_map = {
'mapping_type': {'key': 'mappingType', 'type': 'str'},
'source_logical_name': {'key': 'sourceLogicalName', 'type': 'str'},
'source_path': {'key': 'sourcePath', 'type': 'str'},
'target_path': {'key': 'targetPath', 'type': 'str'},
}
def __init__(self, *, mapping_type=None, source_logical_name: str=None, source_path: str=None, target_path: str=None, **kwargs) -> None:
super(SQLDataDirectoryMapping, self).__init__(**kwargs)
self.mapping_type = mapping_type
self.source_logical_name = source_logical_name
self.source_path = source_path
self.target_path = target_path
```
#### File: recoveryservicesbackup/models/sql_data_directory.py
```python
from msrest.serialization import Model
class SQLDataDirectory(Model):
"""SQLDataDirectory info.
:param type: Type of data directory mapping. Possible values include:
'Invalid', 'Data', 'Log'
:type type: str or
~azure.mgmt.recoveryservicesbackup.models.SQLDataDirectoryType
:param path: File path
:type path: str
:param logical_name: Logical name of the file
:type logical_name: str
"""
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'path': {'key': 'path', 'type': 'str'},
'logical_name': {'key': 'logicalName', 'type': 'str'},
}
def __init__(self, **kwargs):
super(SQLDataDirectory, self).__init__(**kwargs)
self.type = kwargs.get('type', None)
self.path = kwargs.get('path', None)
self.logical_name = kwargs.get('logical_name', None)
```
#### File: redis/models/export_rdb_parameters_py3.py
```python
from msrest.serialization import Model
class ExportRDBParameters(Model):
"""Parameters for Redis export operation.
All required parameters must be populated in order to send to Azure.
:param format: File format.
:type format: str
:param prefix: Required. Prefix to use for exported files.
:type prefix: str
:param container: Required. Container name to export to.
:type container: str
"""
_validation = {
'prefix': {'required': True},
'container': {'required': True},
}
_attribute_map = {
'format': {'key': 'format', 'type': 'str'},
'prefix': {'key': 'prefix', 'type': 'str'},
'container': {'key': 'container', 'type': 'str'},
}
def __init__(self, *, prefix: str, container: str, format: str=None, **kwargs) -> None:
super(ExportRDBParameters, self).__init__(**kwargs)
self.format = format
self.prefix = prefix
self.container = container
```
#### File: redis/models/redis_reboot_parameters_py3.py
```python
from msrest.serialization import Model
class RedisRebootParameters(Model):
"""Specifies which Redis node(s) to reboot.
All required parameters must be populated in order to send to Azure.
:param reboot_type: Required. Which Redis node(s) to reboot. Depending on
this value data loss is possible. Possible values include: 'PrimaryNode',
'SecondaryNode', 'AllNodes'
:type reboot_type: str or ~azure.mgmt.redis.models.RebootType
:param shard_id: If clustering is enabled, the ID of the shard to be
rebooted.
:type shard_id: int
"""
_validation = {
'reboot_type': {'required': True},
}
_attribute_map = {
'reboot_type': {'key': 'rebootType', 'type': 'str'},
'shard_id': {'key': 'shardId', 'type': 'int'},
}
def __init__(self, *, reboot_type, shard_id: int=None, **kwargs) -> None:
super(RedisRebootParameters, self).__init__(**kwargs)
self.reboot_type = reboot_type
self.shard_id = shard_id
```
#### File: relay/models/wcf_relay.py
```python
from .resource import Resource
class WcfRelay(Resource):
"""Description of the WCF relay resource.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Resource ID.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:ivar is_dynamic: Returns true if the relay is dynamic; otherwise, false.
:vartype is_dynamic: bool
:ivar created_at: The time the WCF relay was created.
:vartype created_at: datetime
:ivar updated_at: The time the namespace was updated.
:vartype updated_at: datetime
:ivar listener_count: The number of listeners for this relay. Note that
min :1 and max:25 are supported.
:vartype listener_count: int
:param relay_type: WCF relay type. Possible values include: 'NetTcp',
'Http'
:type relay_type: str or ~azure.mgmt.relay.models.Relaytype
:param requires_client_authorization: Returns true if client authorization
is needed for this relay; otherwise, false.
:type requires_client_authorization: bool
:param requires_transport_security: Returns true if transport security is
needed for this relay; otherwise, false.
:type requires_transport_security: bool
:param user_metadata: The usermetadata is a placeholder to store
user-defined string data for the WCF Relay endpoint. For example, it can
be used to store descriptive data, such as list of teams and their contact
information. Also, user-defined configuration settings can be stored.
:type user_metadata: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'is_dynamic': {'readonly': True},
'created_at': {'readonly': True},
'updated_at': {'readonly': True},
'listener_count': {'readonly': True, 'maximum': 25, 'minimum': 0},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'is_dynamic': {'key': 'properties.isDynamic', 'type': 'bool'},
'created_at': {'key': 'properties.createdAt', 'type': 'iso-8601'},
'updated_at': {'key': 'properties.updatedAt', 'type': 'iso-8601'},
'listener_count': {'key': 'properties.listenerCount', 'type': 'int'},
'relay_type': {'key': 'properties.relayType', 'type': 'Relaytype'},
'requires_client_authorization': {'key': 'properties.requiresClientAuthorization', 'type': 'bool'},
'requires_transport_security': {'key': 'properties.requiresTransportSecurity', 'type': 'bool'},
'user_metadata': {'key': 'properties.userMetadata', 'type': 'str'},
}
def __init__(self, relay_type=None, requires_client_authorization=None, requires_transport_security=None, user_metadata=None):
super(WcfRelay, self).__init__()
self.is_dynamic = None
self.created_at = None
self.updated_at = None
self.listener_count = None
self.relay_type = relay_type
self.requires_client_authorization = requires_client_authorization
self.requires_transport_security = requires_transport_security
self.user_metadata = user_metadata
```
#### File: reservations/models/reservation_split_properties_py3.py
```python
from msrest.serialization import Model
class ReservationSplitProperties(Model):
"""ReservationSplitProperties.
:param split_destinations: List of destination Resource Id that are
created due to split. Format of the resource Id is
/providers/Microsoft.Capacity/reservationOrders/{reservationOrderId}/reservations/{reservationId}
:type split_destinations: list[str]
:param split_source: Resource Id of the Reservation from which this is
split. Format of the resource Id is
/providers/Microsoft.Capacity/reservationOrders/{reservationOrderId}/reservations/{reservationId}
:type split_source: str
"""
_attribute_map = {
'split_destinations': {'key': 'splitDestinations', 'type': '[str]'},
'split_source': {'key': 'splitSource', 'type': 'str'},
}
def __init__(self, *, split_destinations=None, split_source: str=None, **kwargs) -> None:
super(ReservationSplitProperties, self).__init__(**kwargs)
self.split_destinations = split_destinations
self.split_source = split_source
```
#### File: managedapplications/models/application_provider_authorization_py3.py
```python
from msrest.serialization import Model
class ApplicationProviderAuthorization(Model):
"""The managed application provider authorization.
All required parameters must be populated in order to send to Azure.
:param principal_id: Required. The provider's principal identifier. This
is the identity that the provider will use to call ARM to manage the
managed application resources.
:type principal_id: str
:param role_definition_id: Required. The provider's role definition
identifier. This role will define all the permissions that the provider
must have on the managed application's container resource group. This role
definition cannot have permission to delete the resource group.
:type role_definition_id: str
"""
_validation = {
'principal_id': {'required': True},
'role_definition_id': {'required': True},
}
_attribute_map = {
'principal_id': {'key': 'principalId', 'type': 'str'},
'role_definition_id': {'key': 'roleDefinitionId', 'type': 'str'},
}
def __init__(self, *, principal_id: str, role_definition_id: str, **kwargs) -> None:
super(ApplicationProviderAuthorization, self).__init__(**kwargs)
self.principal_id = principal_id
self.role_definition_id = role_definition_id
```
#### File: v2018_05_01/models/policy_assignment_py3.py
```python
from msrest.serialization import Model
class PolicyAssignment(Model):
"""The policy assignment.
Variables are only populated by the server, and will be ignored when
sending a request.
:param display_name: The display name of the policy assignment.
:type display_name: str
:param policy_definition_id: The ID of the policy definition or policy set
definition being assigned.
:type policy_definition_id: str
:param scope: The scope for the policy assignment.
:type scope: str
:param not_scopes: The policy's excluded scopes.
:type not_scopes: list[str]
:param parameters: Required if a parameter is used in policy rule.
:type parameters: object
:param description: This message will be part of response in case of
policy violation.
:type description: str
:param metadata: The policy assignment metadata.
:type metadata: object
:ivar id: The ID of the policy assignment.
:vartype id: str
:ivar type: The type of the policy assignment.
:vartype type: str
:ivar name: The name of the policy assignment.
:vartype name: str
:param sku: The policy sku. This property is optional, obsolete, and will
be ignored.
:type sku: ~azure.mgmt.resource.policy.v2018_05_01.models.PolicySku
:param location: The location of the policy assignment. Only required when
utilizing managed identity.
:type location: str
:param identity: The managed identity associated with the policy
assignment.
:type identity: ~azure.mgmt.resource.policy.v2018_05_01.models.Identity
"""
_validation = {
'id': {'readonly': True},
'type': {'readonly': True},
'name': {'readonly': True},
}
_attribute_map = {
'display_name': {'key': 'properties.displayName', 'type': 'str'},
'policy_definition_id': {'key': 'properties.policyDefinitionId', 'type': 'str'},
'scope': {'key': 'properties.scope', 'type': 'str'},
'not_scopes': {'key': 'properties.notScopes', 'type': '[str]'},
'parameters': {'key': 'properties.parameters', 'type': 'object'},
'description': {'key': 'properties.description', 'type': 'str'},
'metadata': {'key': 'properties.metadata', 'type': 'object'},
'id': {'key': 'id', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'sku': {'key': 'sku', 'type': 'PolicySku'},
'location': {'key': 'location', 'type': 'str'},
'identity': {'key': 'identity', 'type': 'Identity'},
}
def __init__(self, *, display_name: str=None, policy_definition_id: str=None, scope: str=None, not_scopes=None, parameters=None, description: str=None, metadata=None, sku=None, location: str=None, identity=None, **kwargs) -> None:
super(PolicyAssignment, self).__init__(**kwargs)
self.display_name = display_name
self.policy_definition_id = policy_definition_id
self.scope = scope
self.not_scopes = not_scopes
self.parameters = parameters
self.description = description
self.metadata = metadata
self.id = None
self.type = None
self.name = None
self.sku = sku
self.location = location
self.identity = identity
```
#### File: v2016_09_01/models/deployment_properties.py
```python
from msrest.serialization import Model
class DeploymentProperties(Model):
"""Deployment properties.
All required parameters must be populated in order to send to Azure.
:param template: The template content. You use this element when you want
to pass the template syntax directly in the request rather than link to an
existing template. It can be a JObject or well-formed JSON string. Use
either the templateLink property or the template property, but not both.
:type template: object
:param template_link: The URI of the template. Use either the templateLink
property or the template property, but not both.
:type template_link:
~azure.mgmt.resource.resources.v2016_09_01.models.TemplateLink
:param parameters: Name and value pairs that define the deployment
parameters for the template. You use this element when you want to provide
the parameter values directly in the request rather than link to an
existing parameter file. Use either the parametersLink property or the
parameters property, but not both. It can be a JObject or a well formed
JSON string.
:type parameters: object
:param parameters_link: The URI of parameters file. You use this element
to link to an existing parameters file. Use either the parametersLink
property or the parameters property, but not both.
:type parameters_link:
~azure.mgmt.resource.resources.v2016_09_01.models.ParametersLink
:param mode: Required. The mode that is used to deploy resources. This
value can be either Incremental or Complete. In Incremental mode,
resources are deployed without deleting existing resources that are not
included in the template. In Complete mode, resources are deployed and
existing resources in the resource group that are not included in the
template are deleted. Be careful when using Complete mode as you may
unintentionally delete resources. Possible values include: 'Incremental',
'Complete'
:type mode: str or
~azure.mgmt.resource.resources.v2016_09_01.models.DeploymentMode
:param debug_setting: The debug setting of the deployment.
:type debug_setting:
~azure.mgmt.resource.resources.v2016_09_01.models.DebugSetting
"""
_validation = {
'mode': {'required': True},
}
_attribute_map = {
'template': {'key': 'template', 'type': 'object'},
'template_link': {'key': 'templateLink', 'type': 'TemplateLink'},
'parameters': {'key': 'parameters', 'type': 'object'},
'parameters_link': {'key': 'parametersLink', 'type': 'ParametersLink'},
'mode': {'key': 'mode', 'type': 'DeploymentMode'},
'debug_setting': {'key': 'debugSetting', 'type': 'DebugSetting'},
}
def __init__(self, **kwargs):
super(DeploymentProperties, self).__init__(**kwargs)
self.template = kwargs.get('template', None)
self.template_link = kwargs.get('template_link', None)
self.parameters = kwargs.get('parameters', None)
self.parameters_link = kwargs.get('parameters_link', None)
self.mode = kwargs.get('mode', None)
self.debug_setting = kwargs.get('debug_setting', None)
```
#### File: security/models/aad_solution_properties_py3.py
```python
from msrest.serialization import Model
class AadSolutionProperties(Model):
"""The external security solution properties for AAD solutions.
:param device_vendor:
:type device_vendor: str
:param device_type:
:type device_type: str
:param workspace:
:type workspace: ~azure.mgmt.security.models.ConnectedWorkspace
:param connectivity_state: The connectivity state of the external AAD
solution . Possible values include: 'Discovered', 'NotLicensed',
'Connected'
:type connectivity_state: str or
~azure.mgmt.security.models.AadConnectivityState
"""
_attribute_map = {
'device_vendor': {'key': 'deviceVendor', 'type': 'str'},
'device_type': {'key': 'deviceType', 'type': 'str'},
'workspace': {'key': 'workspace', 'type': 'ConnectedWorkspace'},
'connectivity_state': {'key': 'connectivityState', 'type': 'str'},
}
def __init__(self, *, device_vendor: str=None, device_type: str=None, workspace=None, connectivity_state=None, **kwargs) -> None:
super(AadSolutionProperties, self).__init__(**kwargs)
self.device_vendor = device_vendor
self.device_type = device_type
self.workspace = workspace
self.connectivity_state = connectivity_state
```
#### File: security/models/alert.py
```python
from .resource import Resource
class Alert(Resource):
"""Security alert.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Resource Id
:vartype id: str
:ivar name: Resource name
:vartype name: str
:ivar type: Resource type
:vartype type: str
:ivar state: State of the alert (Active, Dismissed etc.)
:vartype state: str
:ivar reported_time_utc: The time the incident was reported to
Microsoft.Security in UTC
:vartype reported_time_utc: datetime
:ivar vendor_name: Name of the vendor that discovered the incident
:vartype vendor_name: str
:ivar alert_name: Name of the alert type
:vartype alert_name: str
:ivar alert_display_name: Display name of the alert type
:vartype alert_display_name: str
:ivar detected_time_utc: The time the incident was detected by the vendor
:vartype detected_time_utc: datetime
:ivar description: Description of the incident and what it means
:vartype description: str
:ivar remediation_steps: Recommended steps to reradiate the incident
:vartype remediation_steps: str
:ivar action_taken: The action that was taken as a response to the alert
(Active, Blocked etc.)
:vartype action_taken: str
:ivar reported_severity: Estimated severity of this alert
:vartype reported_severity: str
:ivar compromised_entity: The entity that the incident happened on
:vartype compromised_entity: str
:ivar associated_resource: Azure resource ID of the associated resource
:vartype associated_resource: str
:param extended_properties:
:type extended_properties: dict[str, object]
:ivar system_source: The type of the alerted resource (Azure, Non-Azure)
:vartype system_source: str
:ivar can_be_investigated: Whether this alert can be investigated with
Azure Security Center
:vartype can_be_investigated: bool
:param entities: objects that are related to this alerts
:type entities: list[~azure.mgmt.security.models.AlertEntity]
:ivar confidence_score: level of confidence we have on the alert
:vartype confidence_score: float
:param confidence_reasons: reasons the alert got the confidenceScore value
:type confidence_reasons:
list[~azure.mgmt.security.models.AlertConfidenceReason]
:ivar subscription_id: Azure subscription ID of the resource that had the
security alert or the subscription ID of the workspace that this resource
reports to
:vartype subscription_id: str
:ivar instance_id: Instance ID of the alert.
:vartype instance_id: str
:ivar workspace_arm_id: Azure resource ID of the workspace that the alert
was reported to.
:vartype workspace_arm_id: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'state': {'readonly': True},
'reported_time_utc': {'readonly': True},
'vendor_name': {'readonly': True},
'alert_name': {'readonly': True},
'alert_display_name': {'readonly': True},
'detected_time_utc': {'readonly': True},
'description': {'readonly': True},
'remediation_steps': {'readonly': True},
'action_taken': {'readonly': True},
'reported_severity': {'readonly': True},
'compromised_entity': {'readonly': True},
'associated_resource': {'readonly': True},
'system_source': {'readonly': True},
'can_be_investigated': {'readonly': True},
'confidence_score': {'readonly': True, 'maximum': 1, 'minimum': 0},
'subscription_id': {'readonly': True},
'instance_id': {'readonly': True},
'workspace_arm_id': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'state': {'key': 'properties.state', 'type': 'str'},
'reported_time_utc': {'key': 'properties.reportedTimeUtc', 'type': 'iso-8601'},
'vendor_name': {'key': 'properties.vendorName', 'type': 'str'},
'alert_name': {'key': 'properties.alertName', 'type': 'str'},
'alert_display_name': {'key': 'properties.alertDisplayName', 'type': 'str'},
'detected_time_utc': {'key': 'properties.detectedTimeUtc', 'type': 'iso-8601'},
'description': {'key': 'properties.description', 'type': 'str'},
'remediation_steps': {'key': 'properties.remediationSteps', 'type': 'str'},
'action_taken': {'key': 'properties.actionTaken', 'type': 'str'},
'reported_severity': {'key': 'properties.reportedSeverity', 'type': 'str'},
'compromised_entity': {'key': 'properties.compromisedEntity', 'type': 'str'},
'associated_resource': {'key': 'properties.associatedResource', 'type': 'str'},
'extended_properties': {'key': 'properties.extendedProperties', 'type': '{object}'},
'system_source': {'key': 'properties.systemSource', 'type': 'str'},
'can_be_investigated': {'key': 'properties.canBeInvestigated', 'type': 'bool'},
'entities': {'key': 'properties.entities', 'type': '[AlertEntity]'},
'confidence_score': {'key': 'properties.confidenceScore', 'type': 'float'},
'confidence_reasons': {'key': 'properties.confidenceReasons', 'type': '[AlertConfidenceReason]'},
'subscription_id': {'key': 'properties.subscriptionId', 'type': 'str'},
'instance_id': {'key': 'properties.instanceId', 'type': 'str'},
'workspace_arm_id': {'key': 'properties.workspaceArmId', 'type': 'str'},
}
def __init__(self, **kwargs):
super(Alert, self).__init__(**kwargs)
self.state = None
self.reported_time_utc = None
self.vendor_name = None
self.alert_name = None
self.alert_display_name = None
self.detected_time_utc = None
self.description = None
self.remediation_steps = None
self.action_taken = None
self.reported_severity = None
self.compromised_entity = None
self.associated_resource = None
self.extended_properties = kwargs.get('extended_properties', None)
self.system_source = None
self.can_be_investigated = None
self.entities = kwargs.get('entities', None)
self.confidence_score = None
self.confidence_reasons = kwargs.get('confidence_reasons', None)
self.subscription_id = None
self.instance_id = None
self.workspace_arm_id = None
```
#### File: security/models/compliance_segment.py
```python
from msrest.serialization import Model
class ComplianceSegment(Model):
"""A segment of a compliance assessment.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar segment_type: The segment type, e.g. compliant, non-compliance,
insufficient coverage, N/A, etc.
:vartype segment_type: str
:ivar percentage: The size (%) of the segment.
:vartype percentage: float
"""
_validation = {
'segment_type': {'readonly': True},
'percentage': {'readonly': True},
}
_attribute_map = {
'segment_type': {'key': 'segmentType', 'type': 'str'},
'percentage': {'key': 'percentage', 'type': 'float'},
}
def __init__(self, **kwargs):
super(ComplianceSegment, self).__init__(**kwargs)
self.segment_type = None
self.percentage = None
```
#### File: security/models/external_security_solution_kind1.py
```python
from msrest.serialization import Model
class ExternalSecuritySolutionKind1(Model):
"""Describes an Azure resource with kind.
:param kind: The kind of the external solution. Possible values include:
'CEF', 'ATA', 'AAD'
:type kind: str or
~azure.mgmt.security.models.ExternalSecuritySolutionKind
"""
_attribute_map = {
'kind': {'key': 'kind', 'type': 'str'},
}
def __init__(self, **kwargs):
super(ExternalSecuritySolutionKind1, self).__init__(**kwargs)
self.kind = kwargs.get('kind', None)
```
#### File: security/models/information_protection_policy_py3.py
```python
from .resource_py3 import Resource
class InformationProtectionPolicy(Resource):
"""Information protection policy.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Resource Id
:vartype id: str
:ivar name: Resource name
:vartype name: str
:ivar type: Resource type
:vartype type: str
:ivar last_modified_utc: Describes the last UTC time the policy was
modified.
:vartype last_modified_utc: datetime
:param labels: Dictionary of sensitivity labels.
:type labels: dict[str, ~azure.mgmt.security.models.SensitivityLabel]
:param information_types: The sensitivity information types.
:type information_types: dict[str,
~azure.mgmt.security.models.InformationType]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'last_modified_utc': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'last_modified_utc': {'key': 'properties.lastModifiedUtc', 'type': 'iso-8601'},
'labels': {'key': 'properties.labels', 'type': '{SensitivityLabel}'},
'information_types': {'key': 'properties.informationTypes', 'type': '{InformationType}'},
}
def __init__(self, *, labels=None, information_types=None, **kwargs) -> None:
super(InformationProtectionPolicy, self).__init__(**kwargs)
self.last_modified_utc = None
self.labels = labels
self.information_types = information_types
```
#### File: security/models/jit_network_access_policy_initiate_port_py3.py
```python
from msrest.serialization import Model
class JitNetworkAccessPolicyInitiatePort(Model):
"""JitNetworkAccessPolicyInitiatePort.
All required parameters must be populated in order to send to Azure.
:param number: Required.
:type number: int
:param allowed_source_address_prefix: Source of the allowed traffic. If
omitted, the request will be for the source IP address of the initiate
request.
:type allowed_source_address_prefix: str
:param end_time_utc: Required. The time to close the request in UTC
:type end_time_utc: datetime
"""
_validation = {
'number': {'required': True},
'end_time_utc': {'required': True},
}
_attribute_map = {
'number': {'key': 'number', 'type': 'int'},
'allowed_source_address_prefix': {'key': 'allowedSourceAddressPrefix', 'type': 'str'},
'end_time_utc': {'key': 'endTimeUtc', 'type': 'iso-8601'},
}
def __init__(self, *, number: int, end_time_utc, allowed_source_address_prefix: str=None, **kwargs) -> None:
super(JitNetworkAccessPolicyInitiatePort, self).__init__(**kwargs)
self.number = number
self.allowed_source_address_prefix = allowed_source_address_prefix
self.end_time_utc = end_time_utc
```
#### File: security/models/jit_network_access_request_py3.py
```python
from msrest.serialization import Model
class JitNetworkAccessRequest(Model):
"""JitNetworkAccessRequest.
All required parameters must be populated in order to send to Azure.
:param virtual_machines: Required.
:type virtual_machines:
list[~azure.mgmt.security.models.JitNetworkAccessRequestVirtualMachine]
:param start_time_utc: Required. The start time of the request in UTC
:type start_time_utc: datetime
:param requestor: Required. The identity of the person who made the
request
:type requestor: str
"""
_validation = {
'virtual_machines': {'required': True},
'start_time_utc': {'required': True},
'requestor': {'required': True},
}
_attribute_map = {
'virtual_machines': {'key': 'virtualMachines', 'type': '[JitNetworkAccessRequestVirtualMachine]'},
'start_time_utc': {'key': 'startTimeUtc', 'type': 'iso-8601'},
'requestor': {'key': 'requestor', 'type': 'str'},
}
def __init__(self, *, virtual_machines, start_time_utc, requestor: str, **kwargs) -> None:
super(JitNetworkAccessRequest, self).__init__(**kwargs)
self.virtual_machines = virtual_machines
self.start_time_utc = start_time_utc
self.requestor = requestor
```
#### File: servicefabric/models/node_type_description.py
```python
from msrest.serialization import Model
class NodeTypeDescription(Model):
"""Describes a node type in the cluster, each node type represents sub set of
nodes in the cluster.
All required parameters must be populated in order to send to Azure.
:param name: Required. The name of the node type.
:type name: str
:param placement_properties: The placement tags applied to nodes in the
node type, which can be used to indicate where certain services (workload)
should run.
:type placement_properties: dict[str, str]
:param capacities: The capacity tags applied to the nodes in the node
type, the cluster resource manager uses these tags to understand how much
resource a node has.
:type capacities: dict[str, str]
:param client_connection_endpoint_port: Required. The TCP cluster
management endpoint port.
:type client_connection_endpoint_port: int
:param http_gateway_endpoint_port: Required. The HTTP cluster management
endpoint port.
:type http_gateway_endpoint_port: int
:param durability_level: Possible values include: 'Bronze', 'Silver',
'Gold'
:type durability_level: str or ~azure.mgmt.servicefabric.models.enum
:param application_ports: The range of ports from which cluster assigned
port to Service Fabric applications.
:type application_ports:
~azure.mgmt.servicefabric.models.EndpointRangeDescription
:param ephemeral_ports: The range of empheral ports that nodes in this
node type should be configured with.
:type ephemeral_ports:
~azure.mgmt.servicefabric.models.EndpointRangeDescription
:param is_primary: Required. The node type on which system services will
run. Only one node type should be marked as primary. Primary node type
cannot be deleted or changed for existing clusters.
:type is_primary: bool
:param vm_instance_count: Required. The number of nodes in the node type.
This count should match the capacity property in the corresponding
VirtualMachineScaleSet resource.
:type vm_instance_count: int
:param reverse_proxy_endpoint_port: The endpoint used by reverse proxy.
:type reverse_proxy_endpoint_port: int
"""
_validation = {
'name': {'required': True},
'client_connection_endpoint_port': {'required': True},
'http_gateway_endpoint_port': {'required': True},
'is_primary': {'required': True},
'vm_instance_count': {'required': True, 'maximum': 2147483647, 'minimum': 1},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'placement_properties': {'key': 'placementProperties', 'type': '{str}'},
'capacities': {'key': 'capacities', 'type': '{str}'},
'client_connection_endpoint_port': {'key': 'clientConnectionEndpointPort', 'type': 'int'},
'http_gateway_endpoint_port': {'key': 'httpGatewayEndpointPort', 'type': 'int'},
'durability_level': {'key': 'durabilityLevel', 'type': 'str'},
'application_ports': {'key': 'applicationPorts', 'type': 'EndpointRangeDescription'},
'ephemeral_ports': {'key': 'ephemeralPorts', 'type': 'EndpointRangeDescription'},
'is_primary': {'key': 'isPrimary', 'type': 'bool'},
'vm_instance_count': {'key': 'vmInstanceCount', 'type': 'int'},
'reverse_proxy_endpoint_port': {'key': 'reverseProxyEndpointPort', 'type': 'int'},
}
def __init__(self, **kwargs):
super(NodeTypeDescription, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.placement_properties = kwargs.get('placement_properties', None)
self.capacities = kwargs.get('capacities', None)
self.client_connection_endpoint_port = kwargs.get('client_connection_endpoint_port', None)
self.http_gateway_endpoint_port = kwargs.get('http_gateway_endpoint_port', None)
self.durability_level = kwargs.get('durability_level', None)
self.application_ports = kwargs.get('application_ports', None)
self.ephemeral_ports = kwargs.get('ephemeral_ports', None)
self.is_primary = kwargs.get('is_primary', None)
self.vm_instance_count = kwargs.get('vm_instance_count', None)
self.reverse_proxy_endpoint_port = kwargs.get('reverse_proxy_endpoint_port', None)
```
#### File: sqlvirtualmachine/models/sql_storage_update_settings.py
```python
from msrest.serialization import Model
class SqlStorageUpdateSettings(Model):
"""Set disk storage settings for SQL Server.
:param disk_count: Virtual machine disk count.
:type disk_count: int
:param disk_configuration_type: Disk configuration to apply to SQL Server.
Possible values include: 'NEW', 'EXTEND', 'ADD'
:type disk_configuration_type: str or
~azure.mgmt.sqlvirtualmachine.models.DiskConfigurationType
:param starting_device_id: Device id of the first disk to be updated.
:type starting_device_id: int
"""
_attribute_map = {
'disk_count': {'key': 'diskCount', 'type': 'int'},
'disk_configuration_type': {'key': 'diskConfigurationType', 'type': 'str'},
'starting_device_id': {'key': 'startingDeviceId', 'type': 'int'},
}
def __init__(self, **kwargs):
super(SqlStorageUpdateSettings, self).__init__(**kwargs)
self.disk_count = kwargs.get('disk_count', None)
self.disk_configuration_type = kwargs.get('disk_configuration_type', None)
self.starting_device_id = kwargs.get('starting_device_id', None)
```
#### File: v2016_12_01/models/service_sas_parameters.py
```python
from msrest.serialization import Model
class ServiceSasParameters(Model):
"""The parameters to list service SAS credentials of a specific resource.
All required parameters must be populated in order to send to Azure.
:param canonicalized_resource: Required. The canonical path to the signed
resource.
:type canonicalized_resource: str
:param resource: Required. The signed services accessible with the service
SAS. Possible values include: Blob (b), Container (c), File (f), Share
(s). Possible values include: 'b', 'c', 'f', 's'
:type resource: str or ~azure.mgmt.storage.v2016_12_01.models.enum
:param permissions: The signed permissions for the service SAS. Possible
values include: Read (r), Write (w), Delete (d), List (l), Add (a), Create
(c), Update (u) and Process (p). Possible values include: 'r', 'd', 'w',
'l', 'a', 'c', 'u', 'p'
:type permissions: str or ~azure.mgmt.storage.v2016_12_01.models.enum
:param ip_address_or_range: An IP address or a range of IP addresses from
which to accept requests.
:type ip_address_or_range: str
:param protocols: The protocol permitted for a request made with the
account SAS. Possible values include: 'https,http', 'https'
:type protocols: str or
~azure.mgmt.storage.v2016_12_01.models.HttpProtocol
:param shared_access_start_time: The time at which the SAS becomes valid.
:type shared_access_start_time: datetime
:param shared_access_expiry_time: The time at which the shared access
signature becomes invalid.
:type shared_access_expiry_time: datetime
:param identifier: A unique value up to 64 characters in length that
correlates to an access policy specified for the container, queue, or
table.
:type identifier: str
:param partition_key_start: The start of partition key.
:type partition_key_start: str
:param partition_key_end: The end of partition key.
:type partition_key_end: str
:param row_key_start: The start of row key.
:type row_key_start: str
:param row_key_end: The end of row key.
:type row_key_end: str
:param key_to_sign: The key to sign the account SAS token with.
:type key_to_sign: str
:param cache_control: The response header override for cache control.
:type cache_control: str
:param content_disposition: The response header override for content
disposition.
:type content_disposition: str
:param content_encoding: The response header override for content
encoding.
:type content_encoding: str
:param content_language: The response header override for content
language.
:type content_language: str
:param content_type: The response header override for content type.
:type content_type: str
"""
_validation = {
'canonicalized_resource': {'required': True},
'resource': {'required': True},
'identifier': {'max_length': 64},
}
_attribute_map = {
'canonicalized_resource': {'key': 'canonicalizedResource', 'type': 'str'},
'resource': {'key': 'signedResource', 'type': 'str'},
'permissions': {'key': 'signedPermission', 'type': 'str'},
'ip_address_or_range': {'key': 'signedIp', 'type': 'str'},
'protocols': {'key': 'signedProtocol', 'type': 'HttpProtocol'},
'shared_access_start_time': {'key': 'signedStart', 'type': 'iso-8601'},
'shared_access_expiry_time': {'key': 'signedExpiry', 'type': 'iso-8601'},
'identifier': {'key': 'signedIdentifier', 'type': 'str'},
'partition_key_start': {'key': 'startPk', 'type': 'str'},
'partition_key_end': {'key': 'endPk', 'type': 'str'},
'row_key_start': {'key': 'startRk', 'type': 'str'},
'row_key_end': {'key': 'endRk', 'type': 'str'},
'key_to_sign': {'key': 'keyToSign', 'type': 'str'},
'cache_control': {'key': 'rscc', 'type': 'str'},
'content_disposition': {'key': 'rscd', 'type': 'str'},
'content_encoding': {'key': 'rsce', 'type': 'str'},
'content_language': {'key': 'rscl', 'type': 'str'},
'content_type': {'key': 'rsct', 'type': 'str'},
}
def __init__(self, **kwargs):
super(ServiceSasParameters, self).__init__(**kwargs)
self.canonicalized_resource = kwargs.get('canonicalized_resource', None)
self.resource = kwargs.get('resource', None)
self.permissions = kwargs.get('permissions', None)
self.ip_address_or_range = kwargs.get('ip_address_or_range', None)
self.protocols = kwargs.get('protocols', None)
self.shared_access_start_time = kwargs.get('shared_access_start_time', None)
self.shared_access_expiry_time = kwargs.get('shared_access_expiry_time', None)
self.identifier = kwargs.get('identifier', None)
self.partition_key_start = kwargs.get('partition_key_start', None)
self.partition_key_end = kwargs.get('partition_key_end', None)
self.row_key_start = kwargs.get('row_key_start', None)
self.row_key_end = kwargs.get('row_key_end', None)
self.key_to_sign = kwargs.get('key_to_sign', None)
self.cache_control = kwargs.get('cache_control', None)
self.content_disposition = kwargs.get('content_disposition', None)
self.content_encoding = kwargs.get('content_encoding', None)
self.content_language = kwargs.get('content_language', None)
self.content_type = kwargs.get('content_type', None)
```
#### File: v2018_02_01/models/legal_hold_properties_py3.py
```python
from msrest.serialization import Model
class LegalHoldProperties(Model):
"""The LegalHold property of a blob container.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar has_legal_hold: The hasLegalHold public property is set to true by
SRP if there are at least one existing tag. The hasLegalHold public
property is set to false by SRP if all existing legal hold tags are
cleared out. There can be a maximum of 1000 blob containers with
hasLegalHold=true for a given account.
:vartype has_legal_hold: bool
:param tags: The list of LegalHold tags of a blob container.
:type tags: list[~azure.mgmt.storage.v2018_02_01.models.TagProperty]
"""
_validation = {
'has_legal_hold': {'readonly': True},
}
_attribute_map = {
'has_legal_hold': {'key': 'hasLegalHold', 'type': 'bool'},
'tags': {'key': 'tags', 'type': '[TagProperty]'},
}
def __init__(self, *, tags=None, **kwargs) -> None:
super(LegalHoldProperties, self).__init__(**kwargs)
self.has_legal_hold = None
self.tags = tags
```
#### File: azure-mgmt-subscription/tests/test_mgmt_subscription.py
```python
import unittest
import azure.mgmt.billing
import azure.mgmt.subscription
from devtools_testutils import AzureMgmtTestCase
class MgmtSubscriptionTest(AzureMgmtTestCase):
def setUp(self):
super(MgmtSubscriptionTest, self).setUp()
self.subscription_client = self.create_basic_client(azure.mgmt.subscription.SubscriptionClient)
self.billing_client = self.create_mgmt_client(azure.mgmt.billing.BillingManagementClient)
def test_create_subscription(self):
enrollment_accounts = list(self.billing_client.enrollment_accounts.list())
self.assertTrue(len(enrollment_accounts) > 0)
creation_parameters = azure.mgmt.subscription.models.SubscriptionCreationParameters(
offer_type='MS-AZR-0148P')
creation_result = self.subscription_client.subscription_factory \
.create_subscription_in_enrollment_account(
enrollment_accounts[0].name,
creation_parameters)
self.assertTrue(len(creation_result.result().subscription_link) > 0)
#------------------------------------------------------------------------------
if __name__ == '__main__':
unittest.main()
```
#### File: web/models/app_service_certificate_order_patch_resource.py
```python
from .proxy_only_resource import ProxyOnlyResource
class AppServiceCertificateOrderPatchResource(ProxyOnlyResource):
"""ARM resource for a certificate order that is purchased through Azure.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param certificates: State of the Key Vault secret.
:type certificates: dict[str,
~azure.mgmt.web.models.AppServiceCertificate]
:param distinguished_name: Certificate distinguished name.
:type distinguished_name: str
:ivar domain_verification_token: Domain verification token.
:vartype domain_verification_token: str
:param validity_in_years: Duration in years (must be between 1 and 3).
Default value: 1 .
:type validity_in_years: int
:param key_size: Certificate key size. Default value: 2048 .
:type key_size: int
:param product_type: Required. Certificate product type. Possible values
include: 'StandardDomainValidatedSsl',
'StandardDomainValidatedWildCardSsl'
:type product_type: str or ~azure.mgmt.web.models.CertificateProductType
:param auto_renew: <code>true</code> if the certificate should be
automatically renewed when it expires; otherwise, <code>false</code>.
Default value: True .
:type auto_renew: bool
:ivar provisioning_state: Status of certificate order. Possible values
include: 'Succeeded', 'Failed', 'Canceled', 'InProgress', 'Deleting'
:vartype provisioning_state: str or
~azure.mgmt.web.models.ProvisioningState
:ivar status: Current order status. Possible values include:
'Pendingissuance', 'Issued', 'Revoked', 'Canceled', 'Denied',
'Pendingrevocation', 'PendingRekey', 'Unused', 'Expired', 'NotSubmitted'
:vartype status: str or ~azure.mgmt.web.models.CertificateOrderStatus
:ivar signed_certificate: Signed certificate.
:vartype signed_certificate: ~azure.mgmt.web.models.CertificateDetails
:param csr: Last CSR that was created for this order.
:type csr: str
:ivar intermediate: Intermediate certificate.
:vartype intermediate: ~azure.mgmt.web.models.CertificateDetails
:ivar root: Root certificate.
:vartype root: ~azure.mgmt.web.models.CertificateDetails
:ivar serial_number: Current serial number of the certificate.
:vartype serial_number: str
:ivar last_certificate_issuance_time: Certificate last issuance time.
:vartype last_certificate_issuance_time: datetime
:ivar expiration_time: Certificate expiration time.
:vartype expiration_time: datetime
:ivar is_private_key_external: <code>true</code> if private key is
external; otherwise, <code>false</code>.
:vartype is_private_key_external: bool
:ivar app_service_certificate_not_renewable_reasons: Reasons why App
Service Certificate is not renewable at the current moment.
:vartype app_service_certificate_not_renewable_reasons: list[str]
:ivar next_auto_renewal_time_stamp: Time stamp when the certificate would
be auto renewed next
:vartype next_auto_renewal_time_stamp: datetime
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'domain_verification_token': {'readonly': True},
'validity_in_years': {'maximum': 3, 'minimum': 1},
'product_type': {'required': True},
'provisioning_state': {'readonly': True},
'status': {'readonly': True},
'signed_certificate': {'readonly': True},
'intermediate': {'readonly': True},
'root': {'readonly': True},
'serial_number': {'readonly': True},
'last_certificate_issuance_time': {'readonly': True},
'expiration_time': {'readonly': True},
'is_private_key_external': {'readonly': True},
'app_service_certificate_not_renewable_reasons': {'readonly': True},
'next_auto_renewal_time_stamp': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'certificates': {'key': 'properties.certificates', 'type': '{AppServiceCertificate}'},
'distinguished_name': {'key': 'properties.distinguishedName', 'type': 'str'},
'domain_verification_token': {'key': 'properties.domainVerificationToken', 'type': 'str'},
'validity_in_years': {'key': 'properties.validityInYears', 'type': 'int'},
'key_size': {'key': 'properties.keySize', 'type': 'int'},
'product_type': {'key': 'properties.productType', 'type': 'CertificateProductType'},
'auto_renew': {'key': 'properties.autoRenew', 'type': 'bool'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'ProvisioningState'},
'status': {'key': 'properties.status', 'type': 'CertificateOrderStatus'},
'signed_certificate': {'key': 'properties.signedCertificate', 'type': 'CertificateDetails'},
'csr': {'key': 'properties.csr', 'type': 'str'},
'intermediate': {'key': 'properties.intermediate', 'type': 'CertificateDetails'},
'root': {'key': 'properties.root', 'type': 'CertificateDetails'},
'serial_number': {'key': 'properties.serialNumber', 'type': 'str'},
'last_certificate_issuance_time': {'key': 'properties.lastCertificateIssuanceTime', 'type': 'iso-8601'},
'expiration_time': {'key': 'properties.expirationTime', 'type': 'iso-8601'},
'is_private_key_external': {'key': 'properties.isPrivateKeyExternal', 'type': 'bool'},
'app_service_certificate_not_renewable_reasons': {'key': 'properties.appServiceCertificateNotRenewableReasons', 'type': '[str]'},
'next_auto_renewal_time_stamp': {'key': 'properties.nextAutoRenewalTimeStamp', 'type': 'iso-8601'},
}
def __init__(self, **kwargs):
super(AppServiceCertificateOrderPatchResource, self).__init__(**kwargs)
self.certificates = kwargs.get('certificates', None)
self.distinguished_name = kwargs.get('distinguished_name', None)
self.domain_verification_token = None
self.validity_in_years = kwargs.get('validity_in_years', 1)
self.key_size = kwargs.get('key_size', 2048)
self.product_type = kwargs.get('product_type', None)
self.auto_renew = kwargs.get('auto_renew', True)
self.provisioning_state = None
self.status = None
self.signed_certificate = None
self.csr = kwargs.get('csr', None)
self.intermediate = None
self.root = None
self.serial_number = None
self.last_certificate_issuance_time = None
self.expiration_time = None
self.is_private_key_external = None
self.app_service_certificate_not_renewable_reasons = None
self.next_auto_renewal_time_stamp = None
```
#### File: web/models/app_service_certificate_resource.py
```python
from .resource import Resource
class AppServiceCertificateResource(Resource):
"""Key Vault container ARM resource for a certificate that is purchased
through Azure.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:param location: Required. Resource Location.
:type location: str
:ivar type: Resource type.
:vartype type: str
:param tags: Resource tags.
:type tags: dict[str, str]
:param key_vault_id: Key Vault resource Id.
:type key_vault_id: str
:param key_vault_secret_name: Key Vault secret name.
:type key_vault_secret_name: str
:ivar provisioning_state: Status of the Key Vault secret. Possible values
include: 'Initialized', 'WaitingOnCertificateOrder', 'Succeeded',
'CertificateOrderFailed', 'OperationNotPermittedOnKeyVault',
'AzureServiceUnauthorizedToAccessKeyVault', 'KeyVaultDoesNotExist',
'KeyVaultSecretDoesNotExist', 'UnknownError', 'ExternalPrivateKey',
'Unknown'
:vartype provisioning_state: str or
~azure.mgmt.web.models.KeyVaultSecretStatus
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'location': {'required': True},
'type': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'key_vault_id': {'key': 'properties.keyVaultId', 'type': 'str'},
'key_vault_secret_name': {'key': 'properties.keyVaultSecretName', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'KeyVaultSecretStatus'},
}
def __init__(self, **kwargs):
super(AppServiceCertificateResource, self).__init__(**kwargs)
self.key_vault_id = kwargs.get('key_vault_id', None)
self.key_vault_secret_name = kwargs.get('key_vault_secret_name', None)
self.provisioning_state = None
```
#### File: web/models/auto_heal_rules_py3.py
```python
from msrest.serialization import Model
class AutoHealRules(Model):
"""Rules that can be defined for auto-heal.
:param triggers: Conditions that describe when to execute the auto-heal
actions.
:type triggers: ~azure.mgmt.web.models.AutoHealTriggers
:param actions: Actions to be executed when a rule is triggered.
:type actions: ~azure.mgmt.web.models.AutoHealActions
"""
_attribute_map = {
'triggers': {'key': 'triggers', 'type': 'AutoHealTriggers'},
'actions': {'key': 'actions', 'type': 'AutoHealActions'},
}
def __init__(self, *, triggers=None, actions=None, **kwargs) -> None:
super(AutoHealRules, self).__init__(**kwargs)
self.triggers = triggers
self.actions = actions
```
#### File: web/models/data_table_response_object_py3.py
```python
from msrest.serialization import Model
class DataTableResponseObject(Model):
"""Data Table which defines columns and raw row values.
:param table_name: Name of the table
:type table_name: str
:param columns: List of columns with data types
:type columns: list[~azure.mgmt.web.models.DataTableResponseColumn]
:param rows: Raw row values
:type rows: list[list[str]]
"""
_attribute_map = {
'table_name': {'key': 'tableName', 'type': 'str'},
'columns': {'key': 'columns', 'type': '[DataTableResponseColumn]'},
'rows': {'key': 'rows', 'type': '[[str]]'},
}
def __init__(self, *, table_name: str=None, columns=None, rows=None, **kwargs) -> None:
super(DataTableResponseObject, self).__init__(**kwargs)
self.table_name = table_name
self.columns = columns
self.rows = rows
```
#### File: web/models/diagnostic_metric_sample.py
```python
from msrest.serialization import Model
class DiagnosticMetricSample(Model):
"""Class representing Diagnostic Metric.
:param timestamp: Time at which metric is measured
:type timestamp: datetime
:param role_instance: Role Instance. Null if this counter is not per
instance
This is returned and should be whichever instance name we desire to be
returned
i.e. CPU and Memory return RDWORKERNAME (LargeDed..._IN_0)
where RDWORKERNAME is Machine name below and RoleInstance name in
parenthesis
:type role_instance: str
:param total: Total value of the metric. If multiple measurements are made
this will have sum of all.
:type total: float
:param maximum: Maximum of the metric sampled during the time period
:type maximum: float
:param minimum: Minimum of the metric sampled during the time period
:type minimum: float
:param is_aggregated: Whether the values are aggregates across all workers
or not
:type is_aggregated: bool
"""
_attribute_map = {
'timestamp': {'key': 'timestamp', 'type': 'iso-8601'},
'role_instance': {'key': 'roleInstance', 'type': 'str'},
'total': {'key': 'total', 'type': 'float'},
'maximum': {'key': 'maximum', 'type': 'float'},
'minimum': {'key': 'minimum', 'type': 'float'},
'is_aggregated': {'key': 'isAggregated', 'type': 'bool'},
}
def __init__(self, **kwargs):
super(DiagnosticMetricSample, self).__init__(**kwargs)
self.timestamp = kwargs.get('timestamp', None)
self.role_instance = kwargs.get('role_instance', None)
self.total = kwargs.get('total', None)
self.maximum = kwargs.get('maximum', None)
self.minimum = kwargs.get('minimum', None)
self.is_aggregated = kwargs.get('is_aggregated', None)
```
#### File: web/models/host_name_binding_py3.py
```python
from .proxy_only_resource_py3 import ProxyOnlyResource
class HostNameBinding(ProxyOnlyResource):
"""A hostname binding object.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param site_name: App Service app name.
:type site_name: str
:param domain_id: Fully qualified ARM domain resource URI.
:type domain_id: str
:param azure_resource_name: Azure resource name.
:type azure_resource_name: str
:param azure_resource_type: Azure resource type. Possible values include:
'Website', 'TrafficManager'
:type azure_resource_type: str or ~azure.mgmt.web.models.AzureResourceType
:param custom_host_name_dns_record_type: Custom DNS record type. Possible
values include: 'CName', 'A'
:type custom_host_name_dns_record_type: str or
~azure.mgmt.web.models.CustomHostNameDnsRecordType
:param host_name_type: Hostname type. Possible values include: 'Verified',
'Managed'
:type host_name_type: str or ~azure.mgmt.web.models.HostNameType
:param ssl_state: SSL type. Possible values include: 'Disabled',
'SniEnabled', 'IpBasedEnabled'
:type ssl_state: str or ~azure.mgmt.web.models.SslState
:param thumbprint: SSL certificate thumbprint
:type thumbprint: str
:ivar virtual_ip: Virtual IP address assigned to the hostname if IP based
SSL is enabled.
:vartype virtual_ip: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'virtual_ip': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'site_name': {'key': 'properties.siteName', 'type': 'str'},
'domain_id': {'key': 'properties.domainId', 'type': 'str'},
'azure_resource_name': {'key': 'properties.azureResourceName', 'type': 'str'},
'azure_resource_type': {'key': 'properties.azureResourceType', 'type': 'AzureResourceType'},
'custom_host_name_dns_record_type': {'key': 'properties.customHostNameDnsRecordType', 'type': 'CustomHostNameDnsRecordType'},
'host_name_type': {'key': 'properties.hostNameType', 'type': 'HostNameType'},
'ssl_state': {'key': 'properties.sslState', 'type': 'SslState'},
'thumbprint': {'key': 'properties.thumbprint', 'type': 'str'},
'virtual_ip': {'key': 'properties.virtualIP', 'type': 'str'},
}
def __init__(self, *, kind: str=None, site_name: str=None, domain_id: str=None, azure_resource_name: str=None, azure_resource_type=None, custom_host_name_dns_record_type=None, host_name_type=None, ssl_state=None, thumbprint: str=None, **kwargs) -> None:
super(HostNameBinding, self).__init__(kind=kind, **kwargs)
self.site_name = site_name
self.domain_id = domain_id
self.azure_resource_name = azure_resource_name
self.azure_resource_type = azure_resource_type
self.custom_host_name_dns_record_type = custom_host_name_dns_record_type
self.host_name_type = host_name_type
self.ssl_state = ssl_state
self.thumbprint = thumbprint
self.virtual_ip = None
```
#### File: web/models/metric_availability_py3.py
```python
from msrest.serialization import Model
class MetricAvailability(Model):
"""Retention policy of a resource metric.
:param time_grain:
:type time_grain: str
:param blob_duration:
:type blob_duration: str
"""
_attribute_map = {
'time_grain': {'key': 'timeGrain', 'type': 'str'},
'blob_duration': {'key': 'blobDuration', 'type': 'str'},
}
def __init__(self, *, time_grain: str=None, blob_duration: str=None, **kwargs) -> None:
super(MetricAvailability, self).__init__(**kwargs)
self.time_grain = time_grain
self.blob_duration = blob_duration
```
#### File: web/models/network_trace_py3.py
```python
from msrest.serialization import Model
class NetworkTrace(Model):
"""Network trace.
:param path: Local file path for the captured network trace file.
:type path: str
:param status: Current status of the network trace operation, same as
Operation.Status (InProgress/Succeeded/Failed).
:type status: str
:param message: Detailed message of a network trace operation, e.g. error
message in case of failure.
:type message: str
"""
_attribute_map = {
'path': {'key': 'path', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(self, *, path: str=None, status: str=None, message: str=None, **kwargs) -> None:
super(NetworkTrace, self).__init__(**kwargs)
self.path = path
self.status = status
self.message = message
```
#### File: web/models/operation.py
```python
from msrest.serialization import Model
class Operation(Model):
"""An operation on a resource.
:param id: Operation ID.
:type id: str
:param name: Operation name.
:type name: str
:param status: The current status of the operation. Possible values
include: 'InProgress', 'Failed', 'Succeeded', 'TimedOut', 'Created'
:type status: str or ~azure.mgmt.web.models.OperationStatus
:param errors: Any errors associate with the operation.
:type errors: list[~azure.mgmt.web.models.ErrorEntity]
:param created_time: Time when operation has started.
:type created_time: datetime
:param modified_time: Time when operation has been updated.
:type modified_time: datetime
:param expiration_time: Time when operation will expire.
:type expiration_time: datetime
:param geo_master_operation_id: Applicable only for stamp operation ids.
:type geo_master_operation_id: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'status': {'key': 'status', 'type': 'OperationStatus'},
'errors': {'key': 'errors', 'type': '[ErrorEntity]'},
'created_time': {'key': 'createdTime', 'type': 'iso-8601'},
'modified_time': {'key': 'modifiedTime', 'type': 'iso-8601'},
'expiration_time': {'key': 'expirationTime', 'type': 'iso-8601'},
'geo_master_operation_id': {'key': 'geoMasterOperationId', 'type': 'str'},
}
def __init__(self, **kwargs):
super(Operation, self).__init__(**kwargs)
self.id = kwargs.get('id', None)
self.name = kwargs.get('name', None)
self.status = kwargs.get('status', None)
self.errors = kwargs.get('errors', None)
self.created_time = kwargs.get('created_time', None)
self.modified_time = kwargs.get('modified_time', None)
self.expiration_time = kwargs.get('expiration_time', None)
self.geo_master_operation_id = kwargs.get('geo_master_operation_id', None)
```
#### File: web/models/site_extension_info_py3.py
```python
from .proxy_only_resource_py3 import ProxyOnlyResource
class SiteExtensionInfo(ProxyOnlyResource):
"""Site Extension Information.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param extension_id: Site extension ID.
:type extension_id: str
:param title:
:type title: str
:param extension_type: Site extension type. Possible values include:
'Gallery', 'WebRoot'
:type extension_type: str or ~azure.mgmt.web.models.SiteExtensionType
:param summary: Summary description.
:type summary: str
:param description: Detailed description.
:type description: str
:param version: Version information.
:type version: str
:param extension_url: Extension URL.
:type extension_url: str
:param project_url: Project URL.
:type project_url: str
:param icon_url: Icon URL.
:type icon_url: str
:param license_url: License URL.
:type license_url: str
:param feed_url: Feed URL.
:type feed_url: str
:param authors: List of authors.
:type authors: list[str]
:param installer_command_line_params: Installer command line parameters.
:type installer_command_line_params: str
:param published_date_time: Published timestamp.
:type published_date_time: datetime
:param download_count: Count of downloads.
:type download_count: int
:param local_is_latest_version: <code>true</code> if the local version is
the latest version; <code>false</code> otherwise.
:type local_is_latest_version: bool
:param local_path: Local path.
:type local_path: str
:param installed_date_time: Installed timestamp.
:type installed_date_time: datetime
:param provisioning_state: Provisioning state.
:type provisioning_state: str
:param comment: Site Extension comment.
:type comment: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'extension_id': {'key': 'properties.extension_id', 'type': 'str'},
'title': {'key': 'properties.title', 'type': 'str'},
'extension_type': {'key': 'properties.extension_type', 'type': 'SiteExtensionType'},
'summary': {'key': 'properties.summary', 'type': 'str'},
'description': {'key': 'properties.description', 'type': 'str'},
'version': {'key': 'properties.version', 'type': 'str'},
'extension_url': {'key': 'properties.extension_url', 'type': 'str'},
'project_url': {'key': 'properties.project_url', 'type': 'str'},
'icon_url': {'key': 'properties.icon_url', 'type': 'str'},
'license_url': {'key': 'properties.license_url', 'type': 'str'},
'feed_url': {'key': 'properties.feed_url', 'type': 'str'},
'authors': {'key': 'properties.authors', 'type': '[str]'},
'installer_command_line_params': {'key': 'properties.installer_command_line_params', 'type': 'str'},
'published_date_time': {'key': 'properties.published_date_time', 'type': 'iso-8601'},
'download_count': {'key': 'properties.download_count', 'type': 'int'},
'local_is_latest_version': {'key': 'properties.local_is_latest_version', 'type': 'bool'},
'local_path': {'key': 'properties.local_path', 'type': 'str'},
'installed_date_time': {'key': 'properties.installed_date_time', 'type': 'iso-8601'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'comment': {'key': 'properties.comment', 'type': 'str'},
}
def __init__(self, *, kind: str=None, extension_id: str=None, title: str=None, extension_type=None, summary: str=None, description: str=None, version: str=None, extension_url: str=None, project_url: str=None, icon_url: str=None, license_url: str=None, feed_url: str=None, authors=None, installer_command_line_params: str=None, published_date_time=None, download_count: int=None, local_is_latest_version: bool=None, local_path: str=None, installed_date_time=None, provisioning_state: str=None, comment: str=None, **kwargs) -> None:
super(SiteExtensionInfo, self).__init__(kind=kind, **kwargs)
self.extension_id = extension_id
self.title = title
self.extension_type = extension_type
self.summary = summary
self.description = description
self.version = version
self.extension_url = extension_url
self.project_url = project_url
self.icon_url = icon_url
self.license_url = license_url
self.feed_url = feed_url
self.authors = authors
self.installer_command_line_params = installer_command_line_params
self.published_date_time = published_date_time
self.download_count = download_count
self.local_is_latest_version = local_is_latest_version
self.local_path = local_path
self.installed_date_time = installed_date_time
self.provisioning_state = provisioning_state
self.comment = comment
```
#### File: web/models/top_level_domain_py3.py
```python
from .proxy_only_resource_py3 import ProxyOnlyResource
class TopLevelDomain(ProxyOnlyResource):
"""A top level domain object.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param privacy: If <code>true</code>, then the top level domain supports
domain privacy; otherwise, <code>false</code>.
:type privacy: bool
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'privacy': {'key': 'properties.privacy', 'type': 'bool'},
}
def __init__(self, *, kind: str=None, privacy: bool=None, **kwargs) -> None:
super(TopLevelDomain, self).__init__(kind=kind, **kwargs)
self.privacy = privacy
```
#### File: web/models/triggered_web_job.py
```python
from .proxy_only_resource import ProxyOnlyResource
class TriggeredWebJob(ProxyOnlyResource):
"""Triggered Web Job Information.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param latest_run: Latest job run information.
:type latest_run: ~azure.mgmt.web.models.TriggeredJobRun
:param history_url: History URL.
:type history_url: str
:param scheduler_logs_url: Scheduler Logs URL.
:type scheduler_logs_url: str
:param run_command: Run command.
:type run_command: str
:param url: Job URL.
:type url: str
:param extra_info_url: Extra Info URL.
:type extra_info_url: str
:param web_job_type: Job type. Possible values include: 'Continuous',
'Triggered'
:type web_job_type: str or ~azure.mgmt.web.models.WebJobType
:param error: Error information.
:type error: str
:param using_sdk: Using SDK?
:type using_sdk: bool
:param settings: Job settings.
:type settings: dict[str, object]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'latest_run': {'key': 'properties.latest_run', 'type': 'TriggeredJobRun'},
'history_url': {'key': 'properties.history_url', 'type': 'str'},
'scheduler_logs_url': {'key': 'properties.scheduler_logs_url', 'type': 'str'},
'run_command': {'key': 'properties.run_command', 'type': 'str'},
'url': {'key': 'properties.url', 'type': 'str'},
'extra_info_url': {'key': 'properties.extra_info_url', 'type': 'str'},
'web_job_type': {'key': 'properties.web_job_type', 'type': 'WebJobType'},
'error': {'key': 'properties.error', 'type': 'str'},
'using_sdk': {'key': 'properties.using_sdk', 'type': 'bool'},
'settings': {'key': 'properties.settings', 'type': '{object}'},
}
def __init__(self, **kwargs):
super(TriggeredWebJob, self).__init__(**kwargs)
self.latest_run = kwargs.get('latest_run', None)
self.history_url = kwargs.get('history_url', None)
self.scheduler_logs_url = kwargs.get('scheduler_logs_url', None)
self.run_command = kwargs.get('run_command', None)
self.url = kwargs.get('url', None)
self.extra_info_url = kwargs.get('extra_info_url', None)
self.web_job_type = kwargs.get('web_job_type', None)
self.error = kwargs.get('error', None)
self.using_sdk = kwargs.get('using_sdk', None)
self.settings = kwargs.get('settings', None)
```
#### File: web/models/validate_container_settings_request_py3.py
```python
from msrest.serialization import Model
class ValidateContainerSettingsRequest(Model):
"""Container settings validation request context.
:param base_url: Base URL of the container registry
:type base_url: str
:param username: Username for to access the container registry
:type username: str
:param password: Password for to access the container registry
:type password: str
:param repository: Repository name (image name)
:type repository: str
:param tag: Image tag
:type tag: str
:param platform: Platform (windows or linux)
:type platform: str
"""
_attribute_map = {
'base_url': {'key': 'baseUrl', 'type': 'str'},
'username': {'key': 'username', 'type': 'str'},
'password': {'key': 'password', 'type': 'str'},
'repository': {'key': 'repository', 'type': 'str'},
'tag': {'key': 'tag', 'type': 'str'},
'platform': {'key': 'platform', 'type': 'str'},
}
def __init__(self, *, base_url: str=None, username: str=None, password: str=None, repository: str=None, tag: str=None, platform: str=None, **kwargs) -> None:
super(ValidateContainerSettingsRequest, self).__init__(**kwargs)
self.base_url = base_url
self.username = username
self.password = password
self.repository = repository
self.tag = tag
self.platform = platform
```
#### File: web/models/validate_request_py3.py
```python
from msrest.serialization import Model
class ValidateRequest(Model):
"""Resource validation request content.
All required parameters must be populated in order to send to Azure.
:param name: Required. Resource name to verify.
:type name: str
:param type: Required. Resource type used for verification. Possible
values include: 'ServerFarm', 'Site'
:type type: str or ~azure.mgmt.web.models.ValidateResourceTypes
:param location: Required. Expected location of the resource.
:type location: str
:param server_farm_id: ARM resource ID of an App Service plan that would
host the app.
:type server_farm_id: str
:param sku_name: Name of the target SKU for the App Service plan.
:type sku_name: str
:param need_linux_workers: <code>true</code> if App Service plan is for
Linux workers; otherwise, <code>false</code>.
:type need_linux_workers: bool
:param is_spot: <code>true</code> if App Service plan is for Spot
instances; otherwise, <code>false</code>.
:type is_spot: bool
:param capacity: Target capacity of the App Service plan (number of VMs).
:type capacity: int
:param hosting_environment: Name of App Service Environment where app or
App Service plan should be created.
:type hosting_environment: str
:param is_xenon: <code>true</code> if App Service plan is running as a
windows container
:type is_xenon: bool
"""
_validation = {
'name': {'required': True},
'type': {'required': True},
'location': {'required': True},
'capacity': {'minimum': 1},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'server_farm_id': {'key': 'properties.serverFarmId', 'type': 'str'},
'sku_name': {'key': 'properties.skuName', 'type': 'str'},
'need_linux_workers': {'key': 'properties.needLinuxWorkers', 'type': 'bool'},
'is_spot': {'key': 'properties.isSpot', 'type': 'bool'},
'capacity': {'key': 'properties.capacity', 'type': 'int'},
'hosting_environment': {'key': 'properties.hostingEnvironment', 'type': 'str'},
'is_xenon': {'key': 'properties.isXenon', 'type': 'bool'},
}
def __init__(self, *, name: str, type, location: str, server_farm_id: str=None, sku_name: str=None, need_linux_workers: bool=None, is_spot: bool=None, capacity: int=None, hosting_environment: str=None, is_xenon: bool=None, **kwargs) -> None:
super(ValidateRequest, self).__init__(**kwargs)
self.name = name
self.type = type
self.location = location
self.server_farm_id = server_farm_id
self.sku_name = sku_name
self.need_linux_workers = need_linux_workers
self.is_spot = is_spot
self.capacity = capacity
self.hosting_environment = hosting_environment
self.is_xenon = is_xenon
```
#### File: web/models/vnet_gateway_py3.py
```python
from .proxy_only_resource_py3 import ProxyOnlyResource
class VnetGateway(ProxyOnlyResource):
"""The Virtual Network gateway contract. This is used to give the Virtual
Network gateway access to the VPN package.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param vnet_name: The Virtual Network name.
:type vnet_name: str
:param vpn_package_uri: Required. The URI where the VPN package can be
downloaded.
:type vpn_package_uri: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'vpn_package_uri': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'vnet_name': {'key': 'properties.vnetName', 'type': 'str'},
'vpn_package_uri': {'key': 'properties.vpnPackageUri', 'type': 'str'},
}
def __init__(self, *, vpn_package_uri: str, kind: str=None, vnet_name: str=None, **kwargs) -> None:
super(VnetGateway, self).__init__(kind=kind, **kwargs)
self.vnet_name = vnet_name
self.vpn_package_uri = vpn_package_uri
```
#### File: web/operations/diagnostics_operations.py
```python
import uuid
from msrest.pipeline import ClientRawResponse
from .. import models
class DiagnosticsOperations(object):
"""DiagnosticsOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: API Version. Constant value: "2018-02-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2018-02-01"
self.config = config
def list_hosting_environment_detector_responses(
self, resource_group_name, name, custom_headers=None, raw=False, **operation_config):
"""List Hosting Environment Detector Responses.
List Hosting Environment Detector Responses.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param name: Site Name
:type name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of DetectorResponse
:rtype:
~azure.mgmt.web.models.DetectorResponsePaged[~azure.mgmt.web.models.DetectorResponse]
:raises:
:class:`DefaultErrorResponseException<azure.mgmt.web.models.DefaultErrorResponseException>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list_hosting_environment_detector_responses.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.DefaultErrorResponseException(self._deserialize, response)
return response
# Deserialize response
deserialized = models.DetectorResponsePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.DetectorResponsePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list_hosting_environment_detector_responses.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/detectors'}
def get_hosting_environment_detector_response(
self, resource_group_name, name, detector_name, start_time=None, end_time=None, time_grain=None, custom_headers=None, raw=False, **operation_config):
"""Get Hosting Environment Detector Response.
Get Hosting Environment Detector Response.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param name: App Service Environment Name
:type name: str
:param detector_name: Detector Resource Name
:type detector_name: str
:param start_time: Start Time
:type start_time: datetime
:param end_time: End Time
:type end_time: datetime
:param time_grain: Time Grain
:type time_grain: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: DetectorResponse or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.web.models.DetectorResponse or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`DefaultErrorResponseException<azure.mgmt.web.models.DefaultErrorResponseException>`
"""
# Construct URL
url = self.get_hosting_environment_detector_response.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'name': self._serialize.url("name", name, 'str'),
'detectorName': self._serialize.url("detector_name", detector_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if start_time is not None:
query_parameters['startTime'] = self._serialize.query("start_time", start_time, 'iso-8601')
if end_time is not None:
query_parameters['endTime'] = self._serialize.query("end_time", end_time, 'iso-8601')
if time_grain is not None:
query_parameters['timeGrain'] = self._serialize.query("time_grain", time_grain, 'str', pattern=r'PT[1-9][0-9]+[SMH]')
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.DefaultErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DetectorResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get_hosting_environment_detector_response.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/detectors/{detectorName}'}
def list_site_detector_responses(
self, resource_group_name, site_name, custom_headers=None, raw=False, **operation_config):
"""List Site Detector Responses.
List Site Detector Responses.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param site_name: Site Name
:type site_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of DetectorResponse
:rtype:
~azure.mgmt.web.models.DetectorResponsePaged[~azure.mgmt.web.models.DetectorResponse]
:raises:
:class:`DefaultErrorResponseException<azure.mgmt.web.models.DefaultErrorResponseException>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list_site_detector_responses.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'siteName': self._serialize.url("site_name", site_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.DefaultErrorResponseException(self._deserialize, response)
return response
# Deserialize response
deserialized = models.DetectorResponsePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.DetectorResponsePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list_site_detector_responses.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/detectors'}
def get_site_detector_response(
self, resource_group_name, site_name, detector_name, start_time=None, end_time=None, time_grain=None, custom_headers=None, raw=False, **operation_config):
"""Get site detector response.
Get site detector response.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param site_name: Site Name
:type site_name: str
:param detector_name: Detector Resource Name
:type detector_name: str
:param start_time: Start Time
:type start_time: datetime
:param end_time: End Time
:type end_time: datetime
:param time_grain: Time Grain
:type time_grain: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: DetectorResponse or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.web.models.DetectorResponse or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`DefaultErrorResponseException<azure.mgmt.web.models.DefaultErrorResponseException>`
"""
# Construct URL
url = self.get_site_detector_response.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'siteName': self._serialize.url("site_name", site_name, 'str'),
'detectorName': self._serialize.url("detector_name", detector_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if start_time is not None:
query_parameters['startTime'] = self._serialize.query("start_time", start_time, 'iso-8601')
if end_time is not None:
query_parameters['endTime'] = self._serialize.query("end_time", end_time, 'iso-8601')
if time_grain is not None:
query_parameters['timeGrain'] = self._serialize.query("time_grain", time_grain, 'str', pattern=r'PT[1-9][0-9]+[SMH]')
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.DefaultErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DetectorResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get_site_detector_response.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/detectors/{detectorName}'}
def list_site_diagnostic_categories(
self, resource_group_name, site_name, custom_headers=None, raw=False, **operation_config):
"""Get Diagnostics Categories.
Get Diagnostics Categories.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param site_name: Site Name
:type site_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of DiagnosticCategory
:rtype:
~azure.mgmt.web.models.DiagnosticCategoryPaged[~azure.mgmt.web.models.DiagnosticCategory]
:raises:
:class:`DefaultErrorResponseException<azure.mgmt.web.models.DefaultErrorResponseException>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list_site_diagnostic_categories.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'siteName': self._serialize.url("site_name", site_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.DefaultErrorResponseException(self._deserialize, response)
return response
# Deserialize response
deserialized = models.DiagnosticCategoryPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.DiagnosticCategoryPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list_site_diagnostic_categories.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/diagnostics'}
def get_site_diagnostic_category(
self, resource_group_name, site_name, diagnostic_category, custom_headers=None, raw=False, **operation_config):
"""Get Diagnostics Category.
Get Diagnostics Category.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param site_name: Site Name
:type site_name: str
:param diagnostic_category: Diagnostic Category
:type diagnostic_category: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: DiagnosticCategory or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.web.models.DiagnosticCategory or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`DefaultErrorResponseException<azure.mgmt.web.models.DefaultErrorResponseException>`
"""
# Construct URL
url = self.get_site_diagnostic_category.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'siteName': self._serialize.url("site_name", site_name, 'str'),
'diagnosticCategory': self._serialize.url("diagnostic_category", diagnostic_category, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.DefaultErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DiagnosticCategory', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get_site_diagnostic_category.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/diagnostics/{diagnosticCategory}'}
def list_site_analyses(
self, resource_group_name, site_name, diagnostic_category, custom_headers=None, raw=False, **operation_config):
"""Get Site Analyses.
Get Site Analyses.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param site_name: Site Name
:type site_name: str
:param diagnostic_category: Diagnostic Category
:type diagnostic_category: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of AnalysisDefinition
:rtype:
~azure.mgmt.web.models.AnalysisDefinitionPaged[~azure.mgmt.web.models.AnalysisDefinition]
:raises:
:class:`DefaultErrorResponseException<azure.mgmt.web.models.DefaultErrorResponseException>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list_site_analyses.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'siteName': self._serialize.url("site_name", site_name, 'str'),
'diagnosticCategory': self._serialize.url("diagnostic_category", diagnostic_category, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.DefaultErrorResponseException(self._deserialize, response)
return response
# Deserialize response
deserialized = models.AnalysisDefinitionPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.AnalysisDefinitionPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list_site_analyses.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/diagnostics/{diagnosticCategory}/analyses'}
def get_site_analysis(
self, resource_group_name, site_name, diagnostic_category, analysis_name, custom_headers=None, raw=False, **operation_config):
"""Get Site Analysis.
Get Site Analysis.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param site_name: Site Name
:type site_name: str
:param diagnostic_category: Diagnostic Category
:type diagnostic_category: str
:param analysis_name: Analysis Name
:type analysis_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: DiagnosticAnalysis or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.web.models.DiagnosticAnalysis or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`DefaultErrorResponseException<azure.mgmt.web.models.DefaultErrorResponseException>`
"""
# Construct URL
url = self.get_site_analysis.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'siteName': self._serialize.url("site_name", site_name, 'str'),
'diagnosticCategory': self._serialize.url("diagnostic_category", diagnostic_category, 'str'),
'analysisName': self._serialize.url("analysis_name", analysis_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.DefaultErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DiagnosticAnalysis', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get_site_analysis.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/diagnostics/{diagnosticCategory}/analyses/{analysisName}'}
def execute_site_analysis(
self, resource_group_name, site_name, diagnostic_category, analysis_name, start_time=None, end_time=None, time_grain=None, custom_headers=None, raw=False, **operation_config):
"""Execute Analysis.
Execute Analysis.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param site_name: Site Name
:type site_name: str
:param diagnostic_category: Category Name
:type diagnostic_category: str
:param analysis_name: Analysis Resource Name
:type analysis_name: str
:param start_time: Start Time
:type start_time: datetime
:param end_time: End Time
:type end_time: datetime
:param time_grain: Time Grain
:type time_grain: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: DiagnosticAnalysis or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.web.models.DiagnosticAnalysis or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`DefaultErrorResponseException<azure.mgmt.web.models.DefaultErrorResponseException>`
"""
# Construct URL
url = self.execute_site_analysis.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'siteName': self._serialize.url("site_name", site_name, 'str'),
'diagnosticCategory': self._serialize.url("diagnostic_category", diagnostic_category, 'str'),
'analysisName': self._serialize.url("analysis_name", analysis_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if start_time is not None:
query_parameters['startTime'] = self._serialize.query("start_time", start_time, 'iso-8601')
if end_time is not None:
query_parameters['endTime'] = self._serialize.query("end_time", end_time, 'iso-8601')
if time_grain is not None:
query_parameters['timeGrain'] = self._serialize.query("time_grain", time_grain, 'str', pattern=r'PT[1-9][0-9]+[SMH]')
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.DefaultErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DiagnosticAnalysis', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
execute_site_analysis.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/diagnostics/{diagnosticCategory}/analyses/{analysisName}/execute'}
def list_site_detectors(
self, resource_group_name, site_name, diagnostic_category, custom_headers=None, raw=False, **operation_config):
"""Get Detectors.
Get Detectors.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param site_name: Site Name
:type site_name: str
:param diagnostic_category: Diagnostic Category
:type diagnostic_category: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of DetectorDefinition
:rtype:
~azure.mgmt.web.models.DetectorDefinitionPaged[~azure.mgmt.web.models.DetectorDefinition]
:raises:
:class:`DefaultErrorResponseException<azure.mgmt.web.models.DefaultErrorResponseException>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list_site_detectors.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'siteName': self._serialize.url("site_name", site_name, 'str'),
'diagnosticCategory': self._serialize.url("diagnostic_category", diagnostic_category, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.DefaultErrorResponseException(self._deserialize, response)
return response
# Deserialize response
deserialized = models.DetectorDefinitionPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.DetectorDefinitionPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list_site_detectors.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/diagnostics/{diagnosticCategory}/detectors'}
def get_site_detector(
self, resource_group_name, site_name, diagnostic_category, detector_name, custom_headers=None, raw=False, **operation_config):
"""Get Detector.
Get Detector.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param site_name: Site Name
:type site_name: str
:param diagnostic_category: Diagnostic Category
:type diagnostic_category: str
:param detector_name: Detector Name
:type detector_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of DetectorDefinition
:rtype:
~azure.mgmt.web.models.DetectorDefinitionPaged[~azure.mgmt.web.models.DetectorDefinition]
:raises:
:class:`DefaultErrorResponseException<azure.mgmt.web.models.DefaultErrorResponseException>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.get_site_detector.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'siteName': self._serialize.url("site_name", site_name, 'str'),
'diagnosticCategory': self._serialize.url("diagnostic_category", diagnostic_category, 'str'),
'detectorName': self._serialize.url("detector_name", detector_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.DefaultErrorResponseException(self._deserialize, response)
return response
# Deserialize response
deserialized = models.DetectorDefinitionPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.DetectorDefinitionPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
get_site_detector.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/diagnostics/{diagnosticCategory}/detectors/{detectorName}'}
def execute_site_detector(
self, resource_group_name, site_name, detector_name, diagnostic_category, start_time=None, end_time=None, time_grain=None, custom_headers=None, raw=False, **operation_config):
"""Execute Detector.
Execute Detector.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param site_name: Site Name
:type site_name: str
:param detector_name: Detector Resource Name
:type detector_name: str
:param diagnostic_category: Category Name
:type diagnostic_category: str
:param start_time: Start Time
:type start_time: datetime
:param end_time: End Time
:type end_time: datetime
:param time_grain: Time Grain
:type time_grain: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: DiagnosticDetectorResponse or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.web.models.DiagnosticDetectorResponse or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`DefaultErrorResponseException<azure.mgmt.web.models.DefaultErrorResponseException>`
"""
# Construct URL
url = self.execute_site_detector.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'siteName': self._serialize.url("site_name", site_name, 'str'),
'detectorName': self._serialize.url("detector_name", detector_name, 'str'),
'diagnosticCategory': self._serialize.url("diagnostic_category", diagnostic_category, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if start_time is not None:
query_parameters['startTime'] = self._serialize.query("start_time", start_time, 'iso-8601')
if end_time is not None:
query_parameters['endTime'] = self._serialize.query("end_time", end_time, 'iso-8601')
if time_grain is not None:
query_parameters['timeGrain'] = self._serialize.query("time_grain", time_grain, 'str', pattern=r'PT[1-9][0-9]+[SMH]')
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.DefaultErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DiagnosticDetectorResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
execute_site_detector.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/diagnostics/{diagnosticCategory}/detectors/{detectorName}/execute'}
def list_site_detector_responses_slot(
self, resource_group_name, site_name, slot, custom_headers=None, raw=False, **operation_config):
"""List Site Detector Responses.
List Site Detector Responses.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param site_name: Site Name
:type site_name: str
:param slot: Slot Name
:type slot: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of DetectorResponse
:rtype:
~azure.mgmt.web.models.DetectorResponsePaged[~azure.mgmt.web.models.DetectorResponse]
:raises:
:class:`DefaultErrorResponseException<azure.mgmt.web.models.DefaultErrorResponseException>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list_site_detector_responses_slot.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'siteName': self._serialize.url("site_name", site_name, 'str'),
'slot': self._serialize.url("slot", slot, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.DefaultErrorResponseException(self._deserialize, response)
return response
# Deserialize response
deserialized = models.DetectorResponsePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.DetectorResponsePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list_site_detector_responses_slot.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/slots/{slot}/detectors'}
def get_site_detector_response_slot(
self, resource_group_name, site_name, detector_name, slot, start_time=None, end_time=None, time_grain=None, custom_headers=None, raw=False, **operation_config):
"""Get site detector response.
Get site detector response.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param site_name: Site Name
:type site_name: str
:param detector_name: Detector Resource Name
:type detector_name: str
:param slot: Slot Name
:type slot: str
:param start_time: Start Time
:type start_time: datetime
:param end_time: End Time
:type end_time: datetime
:param time_grain: Time Grain
:type time_grain: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: DetectorResponse or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.web.models.DetectorResponse or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`DefaultErrorResponseException<azure.mgmt.web.models.DefaultErrorResponseException>`
"""
# Construct URL
url = self.get_site_detector_response_slot.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'siteName': self._serialize.url("site_name", site_name, 'str'),
'detectorName': self._serialize.url("detector_name", detector_name, 'str'),
'slot': self._serialize.url("slot", slot, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if start_time is not None:
query_parameters['startTime'] = self._serialize.query("start_time", start_time, 'iso-8601')
if end_time is not None:
query_parameters['endTime'] = self._serialize.query("end_time", end_time, 'iso-8601')
if time_grain is not None:
query_parameters['timeGrain'] = self._serialize.query("time_grain", time_grain, 'str', pattern=r'PT[1-9][0-9]+[SMH]')
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.DefaultErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DetectorResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get_site_detector_response_slot.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/slots/{slot}/detectors/{detectorName}'}
def list_site_diagnostic_categories_slot(
self, resource_group_name, site_name, slot, custom_headers=None, raw=False, **operation_config):
"""Get Diagnostics Categories.
Get Diagnostics Categories.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param site_name: Site Name
:type site_name: str
:param slot: Slot Name
:type slot: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of DiagnosticCategory
:rtype:
~azure.mgmt.web.models.DiagnosticCategoryPaged[~azure.mgmt.web.models.DiagnosticCategory]
:raises:
:class:`DefaultErrorResponseException<azure.mgmt.web.models.DefaultErrorResponseException>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list_site_diagnostic_categories_slot.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'siteName': self._serialize.url("site_name", site_name, 'str'),
'slot': self._serialize.url("slot", slot, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.DefaultErrorResponseException(self._deserialize, response)
return response
# Deserialize response
deserialized = models.DiagnosticCategoryPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.DiagnosticCategoryPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list_site_diagnostic_categories_slot.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/slots/{slot}/diagnostics'}
def get_site_diagnostic_category_slot(
self, resource_group_name, site_name, diagnostic_category, slot, custom_headers=None, raw=False, **operation_config):
"""Get Diagnostics Category.
Get Diagnostics Category.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param site_name: Site Name
:type site_name: str
:param diagnostic_category: Diagnostic Category
:type diagnostic_category: str
:param slot: Slot Name
:type slot: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: DiagnosticCategory or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.web.models.DiagnosticCategory or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`DefaultErrorResponseException<azure.mgmt.web.models.DefaultErrorResponseException>`
"""
# Construct URL
url = self.get_site_diagnostic_category_slot.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'siteName': self._serialize.url("site_name", site_name, 'str'),
'diagnosticCategory': self._serialize.url("diagnostic_category", diagnostic_category, 'str'),
'slot': self._serialize.url("slot", slot, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.DefaultErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DiagnosticCategory', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get_site_diagnostic_category_slot.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/slots/{slot}/diagnostics/{diagnosticCategory}'}
def list_site_analyses_slot(
self, resource_group_name, site_name, diagnostic_category, slot, custom_headers=None, raw=False, **operation_config):
"""Get Site Analyses.
Get Site Analyses.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param site_name: Site Name
:type site_name: str
:param diagnostic_category: Diagnostic Category
:type diagnostic_category: str
:param slot: Slot Name
:type slot: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of AnalysisDefinition
:rtype:
~azure.mgmt.web.models.AnalysisDefinitionPaged[~azure.mgmt.web.models.AnalysisDefinition]
:raises:
:class:`DefaultErrorResponseException<azure.mgmt.web.models.DefaultErrorResponseException>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list_site_analyses_slot.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'siteName': self._serialize.url("site_name", site_name, 'str'),
'diagnosticCategory': self._serialize.url("diagnostic_category", diagnostic_category, 'str'),
'slot': self._serialize.url("slot", slot, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.DefaultErrorResponseException(self._deserialize, response)
return response
# Deserialize response
deserialized = models.AnalysisDefinitionPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.AnalysisDefinitionPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list_site_analyses_slot.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/slots/{slot}/diagnostics/{diagnosticCategory}/analyses'}
def get_site_analysis_slot(
self, resource_group_name, site_name, diagnostic_category, analysis_name, slot, custom_headers=None, raw=False, **operation_config):
"""Get Site Analysis.
Get Site Analysis.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param site_name: Site Name
:type site_name: str
:param diagnostic_category: Diagnostic Category
:type diagnostic_category: str
:param analysis_name: Analysis Name
:type analysis_name: str
:param slot: Slot - optional
:type slot: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: DiagnosticAnalysis or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.web.models.DiagnosticAnalysis or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`DefaultErrorResponseException<azure.mgmt.web.models.DefaultErrorResponseException>`
"""
# Construct URL
url = self.get_site_analysis_slot.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'siteName': self._serialize.url("site_name", site_name, 'str'),
'diagnosticCategory': self._serialize.url("diagnostic_category", diagnostic_category, 'str'),
'analysisName': self._serialize.url("analysis_name", analysis_name, 'str'),
'slot': self._serialize.url("slot", slot, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.DefaultErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DiagnosticAnalysis', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get_site_analysis_slot.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/slots/{slot}/diagnostics/{diagnosticCategory}/analyses/{analysisName}'}
def execute_site_analysis_slot(
self, resource_group_name, site_name, diagnostic_category, analysis_name, slot, start_time=None, end_time=None, time_grain=None, custom_headers=None, raw=False, **operation_config):
"""Execute Analysis.
Execute Analysis.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param site_name: Site Name
:type site_name: str
:param diagnostic_category: Category Name
:type diagnostic_category: str
:param analysis_name: Analysis Resource Name
:type analysis_name: str
:param slot: Slot Name
:type slot: str
:param start_time: Start Time
:type start_time: datetime
:param end_time: End Time
:type end_time: datetime
:param time_grain: Time Grain
:type time_grain: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: DiagnosticAnalysis or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.web.models.DiagnosticAnalysis or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`DefaultErrorResponseException<azure.mgmt.web.models.DefaultErrorResponseException>`
"""
# Construct URL
url = self.execute_site_analysis_slot.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'siteName': self._serialize.url("site_name", site_name, 'str'),
'diagnosticCategory': self._serialize.url("diagnostic_category", diagnostic_category, 'str'),
'analysisName': self._serialize.url("analysis_name", analysis_name, 'str'),
'slot': self._serialize.url("slot", slot, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if start_time is not None:
query_parameters['startTime'] = self._serialize.query("start_time", start_time, 'iso-8601')
if end_time is not None:
query_parameters['endTime'] = self._serialize.query("end_time", end_time, 'iso-8601')
if time_grain is not None:
query_parameters['timeGrain'] = self._serialize.query("time_grain", time_grain, 'str', pattern=r'PT[1-9][0-9]+[SMH]')
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.DefaultErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DiagnosticAnalysis', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
execute_site_analysis_slot.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/slots/{slot}/diagnostics/{diagnosticCategory}/analyses/{analysisName}/execute'}
def list_site_detectors_slot(
self, resource_group_name, site_name, diagnostic_category, slot, custom_headers=None, raw=False, **operation_config):
"""Get Detectors.
Get Detectors.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param site_name: Site Name
:type site_name: str
:param diagnostic_category: Diagnostic Category
:type diagnostic_category: str
:param slot: Slot Name
:type slot: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of DetectorDefinition
:rtype:
~azure.mgmt.web.models.DetectorDefinitionPaged[~azure.mgmt.web.models.DetectorDefinition]
:raises:
:class:`DefaultErrorResponseException<azure.mgmt.web.models.DefaultErrorResponseException>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list_site_detectors_slot.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'siteName': self._serialize.url("site_name", site_name, 'str'),
'diagnosticCategory': self._serialize.url("diagnostic_category", diagnostic_category, 'str'),
'slot': self._serialize.url("slot", slot, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.DefaultErrorResponseException(self._deserialize, response)
return response
# Deserialize response
deserialized = models.DetectorDefinitionPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.DetectorDefinitionPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list_site_detectors_slot.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/slots/{slot}/diagnostics/{diagnosticCategory}/detectors'}
def get_site_detector_slot(
self, resource_group_name, site_name, diagnostic_category, detector_name, slot, custom_headers=None, raw=False, **operation_config):
"""Get Detector.
Get Detector.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param site_name: Site Name
:type site_name: str
:param diagnostic_category: Diagnostic Category
:type diagnostic_category: str
:param detector_name: Detector Name
:type detector_name: str
:param slot: Slot Name
:type slot: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of DetectorDefinition
:rtype:
~azure.mgmt.web.models.DetectorDefinitionPaged[~azure.mgmt.web.models.DetectorDefinition]
:raises:
:class:`DefaultErrorResponseException<azure.mgmt.web.models.DefaultErrorResponseException>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.get_site_detector_slot.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'siteName': self._serialize.url("site_name", site_name, 'str'),
'diagnosticCategory': self._serialize.url("diagnostic_category", diagnostic_category, 'str'),
'detectorName': self._serialize.url("detector_name", detector_name, 'str'),
'slot': self._serialize.url("slot", slot, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.DefaultErrorResponseException(self._deserialize, response)
return response
# Deserialize response
deserialized = models.DetectorDefinitionPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.DetectorDefinitionPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
get_site_detector_slot.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/slots/{slot}/diagnostics/{diagnosticCategory}/detectors/{detectorName}'}
def execute_site_detector_slot(
self, resource_group_name, site_name, detector_name, diagnostic_category, slot, start_time=None, end_time=None, time_grain=None, custom_headers=None, raw=False, **operation_config):
"""Execute Detector.
Execute Detector.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param site_name: Site Name
:type site_name: str
:param detector_name: Detector Resource Name
:type detector_name: str
:param diagnostic_category: Category Name
:type diagnostic_category: str
:param slot: Slot Name
:type slot: str
:param start_time: Start Time
:type start_time: datetime
:param end_time: End Time
:type end_time: datetime
:param time_grain: Time Grain
:type time_grain: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: DiagnosticDetectorResponse or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.web.models.DiagnosticDetectorResponse or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`DefaultErrorResponseException<azure.mgmt.web.models.DefaultErrorResponseException>`
"""
# Construct URL
url = self.execute_site_detector_slot.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'siteName': self._serialize.url("site_name", site_name, 'str'),
'detectorName': self._serialize.url("detector_name", detector_name, 'str'),
'diagnosticCategory': self._serialize.url("diagnostic_category", diagnostic_category, 'str'),
'slot': self._serialize.url("slot", slot, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if start_time is not None:
query_parameters['startTime'] = self._serialize.query("start_time", start_time, 'iso-8601')
if end_time is not None:
query_parameters['endTime'] = self._serialize.query("end_time", end_time, 'iso-8601')
if time_grain is not None:
query_parameters['timeGrain'] = self._serialize.query("time_grain", time_grain, 'str', pattern=r'PT[1-9][0-9]+[SMH]')
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.DefaultErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DiagnosticDetectorResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
execute_site_detector_slot.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/slots/{slot}/diagnostics/{diagnosticCategory}/detectors/{detectorName}/execute'}
```
#### File: examples/async_examples/example_queue_send_receive_batch_async.py
```python
import asyncio
import conftest
from azure.servicebus.aio import ServiceBusClient, Message
from azure.servicebus.common.constants import ReceiveSettleMode
async def sample_queue_send_receive_batch_async(sb_config, queue):
client = ServiceBusClient(
service_namespace=sb_config['hostname'],
shared_access_key_name=sb_config['key_name'],
shared_access_key_value=sb_config['access_key'],
debug=True)
queue_client = client.get_queue(queue)
async with queue_client.get_sender() as sender:
for i in range(100):
message = Message("Sample message no. {}".format(i))
await sender.send(message)
await sender.send(Message("shutdown"))
async with queue_client.get_receiver(idle_timeout=1, mode=ReceiveSettleMode.PeekLock, prefetch=10) as receiver:
# Receive list of messages as a batch
batch = await receiver.fetch_next(max_batch_size=10)
await asyncio.gather(*[m.complete() for m in batch])
# Receive messages as a continuous generator
async for message in receiver:
print("Message: {}".format(message))
print("Sequence number: {}".format(message.sequence_number))
await message.complete()
if __name__ == '__main__':
live_config = conftest.get_live_servicebus_config()
queue_name = conftest.create_standard_queue(live_config)
loop = asyncio.get_event_loop()
try:
loop.run_until_complete(sample_queue_send_receive_batch_async(live_config, queue_name))
finally:
conftest.cleanup_queue(live_config, queue_name)
```
#### File: servicefabric/models/application_upgrade_progress_info.py
```python
from msrest.serialization import Model
class ApplicationUpgradeProgressInfo(Model):
"""Describes the parameters for an application upgrade.
:param name: The name of the target application, including the 'fabric:'
URI scheme.
:type name: str
:param type_name: The application type name as defined in the application
manifest.
:type type_name: str
:param target_application_type_version: The target application type
version (found in the application manifest) for the application upgrade.
:type target_application_type_version: str
:param upgrade_domains: List of upgrade domains and their statuses.
:type upgrade_domains: list[~azure.servicefabric.models.UpgradeDomainInfo]
:param upgrade_state: The state of the upgrade domain. Possible values
include: 'Invalid', 'RollingBackInProgress', 'RollingBackCompleted',
'RollingForwardPending', 'RollingForwardInProgress',
'RollingForwardCompleted', 'Failed'
:type upgrade_state: str or ~azure.servicefabric.models.UpgradeState
:param next_upgrade_domain: The name of the next upgrade domain to be
processed.
:type next_upgrade_domain: str
:param rolling_upgrade_mode: The mode used to monitor health during a
rolling upgrade. The values are UnmonitoredAuto, UnmonitoredManual, and
Monitored. Possible values include: 'Invalid', 'UnmonitoredAuto',
'UnmonitoredManual', 'Monitored'. Default value: "UnmonitoredAuto" .
:type rolling_upgrade_mode: str or ~azure.servicefabric.models.UpgradeMode
:param upgrade_description: Describes the parameters for an application
upgrade. Note that upgrade description replaces the existing application
description. This means that if the parameters are not specified, the
existing parameters on the applications will be overwritten with the empty
parameters list. This would result in the application using the default
value of the parameters from the application manifest. If you do not want
to change any existing parameter values, please get the application
parameters first using the GetApplicationInfo query and then supply those
values as Parameters in this ApplicationUpgradeDescription.
:type upgrade_description:
~azure.servicefabric.models.ApplicationUpgradeDescription
:param upgrade_duration_in_milliseconds: The estimated total amount of
time spent processing the overall upgrade.
:type upgrade_duration_in_milliseconds: str
:param upgrade_domain_duration_in_milliseconds: The estimated total amount
of time spent processing the current upgrade domain.
:type upgrade_domain_duration_in_milliseconds: str
:param unhealthy_evaluations: List of health evaluations that resulted in
the current aggregated health state.
:type unhealthy_evaluations:
list[~azure.servicefabric.models.HealthEvaluationWrapper]
:param current_upgrade_domain_progress: Information about the current
in-progress upgrade domain.
:type current_upgrade_domain_progress:
~azure.servicefabric.models.CurrentUpgradeDomainProgressInfo
:param start_timestamp_utc: The estimated UTC datetime when the upgrade
started.
:type start_timestamp_utc: str
:param failure_timestamp_utc: The estimated UTC datetime when the upgrade
failed and FailureAction was executed.
:type failure_timestamp_utc: str
:param failure_reason: The cause of an upgrade failure that resulted in
FailureAction being executed. Possible values include: 'None',
'Interrupted', 'HealthCheck', 'UpgradeDomainTimeout',
'OverallUpgradeTimeout'
:type failure_reason: str or ~azure.servicefabric.models.FailureReason
:param upgrade_domain_progress_at_failure: Information about the upgrade
domain progress at the time of upgrade failure.
:type upgrade_domain_progress_at_failure:
~azure.servicefabric.models.FailureUpgradeDomainProgressInfo
:param upgrade_status_details: Additional detailed information about the
status of the pending upgrade.
:type upgrade_status_details: str
"""
_attribute_map = {
'name': {'key': 'Name', 'type': 'str'},
'type_name': {'key': 'TypeName', 'type': 'str'},
'target_application_type_version': {'key': 'TargetApplicationTypeVersion', 'type': 'str'},
'upgrade_domains': {'key': 'UpgradeDomains', 'type': '[UpgradeDomainInfo]'},
'upgrade_state': {'key': 'UpgradeState', 'type': 'str'},
'next_upgrade_domain': {'key': 'NextUpgradeDomain', 'type': 'str'},
'rolling_upgrade_mode': {'key': 'RollingUpgradeMode', 'type': 'str'},
'upgrade_description': {'key': 'UpgradeDescription', 'type': 'ApplicationUpgradeDescription'},
'upgrade_duration_in_milliseconds': {'key': 'UpgradeDurationInMilliseconds', 'type': 'str'},
'upgrade_domain_duration_in_milliseconds': {'key': 'UpgradeDomainDurationInMilliseconds', 'type': 'str'},
'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'},
'current_upgrade_domain_progress': {'key': 'CurrentUpgradeDomainProgress', 'type': 'CurrentUpgradeDomainProgressInfo'},
'start_timestamp_utc': {'key': 'StartTimestampUtc', 'type': 'str'},
'failure_timestamp_utc': {'key': 'FailureTimestampUtc', 'type': 'str'},
'failure_reason': {'key': 'FailureReason', 'type': 'str'},
'upgrade_domain_progress_at_failure': {'key': 'UpgradeDomainProgressAtFailure', 'type': 'FailureUpgradeDomainProgressInfo'},
'upgrade_status_details': {'key': 'UpgradeStatusDetails', 'type': 'str'},
}
def __init__(self, **kwargs):
super(ApplicationUpgradeProgressInfo, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.type_name = kwargs.get('type_name', None)
self.target_application_type_version = kwargs.get('target_application_type_version', None)
self.upgrade_domains = kwargs.get('upgrade_domains', None)
self.upgrade_state = kwargs.get('upgrade_state', None)
self.next_upgrade_domain = kwargs.get('next_upgrade_domain', None)
self.rolling_upgrade_mode = kwargs.get('rolling_upgrade_mode', "UnmonitoredAuto")
self.upgrade_description = kwargs.get('upgrade_description', None)
self.upgrade_duration_in_milliseconds = kwargs.get('upgrade_duration_in_milliseconds', None)
self.upgrade_domain_duration_in_milliseconds = kwargs.get('upgrade_domain_duration_in_milliseconds', None)
self.unhealthy_evaluations = kwargs.get('unhealthy_evaluations', None)
self.current_upgrade_domain_progress = kwargs.get('current_upgrade_domain_progress', None)
self.start_timestamp_utc = kwargs.get('start_timestamp_utc', None)
self.failure_timestamp_utc = kwargs.get('failure_timestamp_utc', None)
self.failure_reason = kwargs.get('failure_reason', None)
self.upgrade_domain_progress_at_failure = kwargs.get('upgrade_domain_progress_at_failure', None)
self.upgrade_status_details = kwargs.get('upgrade_status_details', None)
```
#### File: servicefabric/models/backup_partition_description_py3.py
```python
from msrest.serialization import Model
class BackupPartitionDescription(Model):
"""Describes the parameters for triggering partition's backup.
:param backup_storage: Specifies the details of the backup storage where
to save the backup.
:type backup_storage: ~azure.servicefabric.models.BackupStorageDescription
"""
_attribute_map = {
'backup_storage': {'key': 'BackupStorage', 'type': 'BackupStorageDescription'},
}
def __init__(self, *, backup_storage=None, **kwargs) -> None:
super(BackupPartitionDescription, self).__init__(**kwargs)
self.backup_storage = backup_storage
```
#### File: servicefabric/models/cluster_health_chunk_py3.py
```python
from msrest.serialization import Model
class ClusterHealthChunk(Model):
"""Represents the health chunk of the cluster.
Contains the cluster aggregated health state, and the cluster entities that
respect the input filter.
:param health_state: The HealthState representing the aggregated health
state of the cluster computed by Health Manager.
The health evaluation of the entity reflects all events reported on the
entity and its children (if any).
The aggregation is done by applying the desired cluster health policy and
the application health policies. Possible values include: 'Invalid', 'Ok',
'Warning', 'Error', 'Unknown'
:type health_state: str or ~azure.servicefabric.models.HealthState
:param node_health_state_chunks: The list of node health state chunks in
the cluster that respect the filters in the cluster health chunk query
description.
:type node_health_state_chunks:
~azure.servicefabric.models.NodeHealthStateChunkList
:param application_health_state_chunks: The list of application health
state chunks in the cluster that respect the filters in the cluster health
chunk query description.
:type application_health_state_chunks:
~azure.servicefabric.models.ApplicationHealthStateChunkList
"""
_attribute_map = {
'health_state': {'key': 'HealthState', 'type': 'str'},
'node_health_state_chunks': {'key': 'NodeHealthStateChunks', 'type': 'NodeHealthStateChunkList'},
'application_health_state_chunks': {'key': 'ApplicationHealthStateChunks', 'type': 'ApplicationHealthStateChunkList'},
}
def __init__(self, *, health_state=None, node_health_state_chunks=None, application_health_state_chunks=None, **kwargs) -> None:
super(ClusterHealthChunk, self).__init__(**kwargs)
self.health_state = health_state
self.node_health_state_chunks = node_health_state_chunks
self.application_health_state_chunks = application_health_state_chunks
```
#### File: servicefabric/models/cluster_health_policy.py
```python
from msrest.serialization import Model
class ClusterHealthPolicy(Model):
"""Defines a health policy used to evaluate the health of the cluster or of a
cluster node.
:param consider_warning_as_error: Indicates whether warnings are treated
with the same severity as errors. Default value: False .
:type consider_warning_as_error: bool
:param max_percent_unhealthy_nodes: The maximum allowed percentage of
unhealthy nodes before reporting an error. For example, to allow 10% of
nodes to be unhealthy, this value would be 10.
The percentage represents the maximum tolerated percentage of nodes that
can be unhealthy before the cluster is considered in error.
If the percentage is respected but there is at least one unhealthy node,
the health is evaluated as Warning.
The percentage is calculated by dividing the number of unhealthy nodes
over the total number of nodes in the cluster.
The computation rounds up to tolerate one failure on small numbers of
nodes. Default percentage is zero.
In large clusters, some nodes will always be down or out for repairs, so
this percentage should be configured to tolerate that. Default value: 0 .
:type max_percent_unhealthy_nodes: int
:param max_percent_unhealthy_applications: The maximum allowed percentage
of unhealthy applications before reporting an error. For example, to allow
10% of applications to be unhealthy, this value would be 10.
The percentage represents the maximum tolerated percentage of applications
that can be unhealthy before the cluster is considered in error.
If the percentage is respected but there is at least one unhealthy
application, the health is evaluated as Warning.
This is calculated by dividing the number of unhealthy applications over
the total number of application instances in the cluster, excluding
applications of application types that are included in the
ApplicationTypeHealthPolicyMap.
The computation rounds up to tolerate one failure on small numbers of
applications. Default percentage is zero. Default value: 0 .
:type max_percent_unhealthy_applications: int
:param application_type_health_policy_map: Defines a map with max
percentage unhealthy applications for specific application types.
Each entry specifies as key the application type name and as value an
integer that represents the MaxPercentUnhealthyApplications percentage
used to evaluate the applications of the specified application type.
The application type health policy map can be used during cluster health
evaluation to describe special application types.
The application types included in the map are evaluated against the
percentage specified in the map, and not with the global
MaxPercentUnhealthyApplications defined in the cluster health policy.
The applications of application types specified in the map are not counted
against the global pool of applications.
For example, if some applications of a type are critical, the cluster
administrator can add an entry to the map for that application type
and assign it a value of 0% (that is, do not tolerate any failures).
All other applications can be evaluated with
MaxPercentUnhealthyApplications set to 20% to tolerate some failures out
of the thousands of application instances.
The application type health policy map is used only if the cluster
manifest enables application type health evaluation using the
configuration entry for
HealthManager/EnableApplicationTypeHealthEvaluation.
:type application_type_health_policy_map:
list[~azure.servicefabric.models.ApplicationTypeHealthPolicyMapItem]
"""
_attribute_map = {
'consider_warning_as_error': {'key': 'ConsiderWarningAsError', 'type': 'bool'},
'max_percent_unhealthy_nodes': {'key': 'MaxPercentUnhealthyNodes', 'type': 'int'},
'max_percent_unhealthy_applications': {'key': 'MaxPercentUnhealthyApplications', 'type': 'int'},
'application_type_health_policy_map': {'key': 'ApplicationTypeHealthPolicyMap', 'type': '[ApplicationTypeHealthPolicyMapItem]'},
}
def __init__(self, **kwargs):
super(ClusterHealthPolicy, self).__init__(**kwargs)
self.consider_warning_as_error = kwargs.get('consider_warning_as_error', False)
self.max_percent_unhealthy_nodes = kwargs.get('max_percent_unhealthy_nodes', 0)
self.max_percent_unhealthy_applications = kwargs.get('max_percent_unhealthy_applications', 0)
self.application_type_health_policy_map = kwargs.get('application_type_health_policy_map', None)
```
#### File: servicefabric/models/file_info.py
```python
from msrest.serialization import Model
class FileInfo(Model):
"""Information about a image store file.
:param file_size: The size of file in bytes.
:type file_size: str
:param file_version: Information about the version of image store file.
:type file_version: ~azure.servicefabric.models.FileVersion
:param modified_date: The date and time when the image store file was last
modified.
:type modified_date: datetime
:param store_relative_path: The file path relative to the image store root
path.
:type store_relative_path: str
"""
_attribute_map = {
'file_size': {'key': 'FileSize', 'type': 'str'},
'file_version': {'key': 'FileVersion', 'type': 'FileVersion'},
'modified_date': {'key': 'ModifiedDate', 'type': 'iso-8601'},
'store_relative_path': {'key': 'StoreRelativePath', 'type': 'str'},
}
def __init__(self, **kwargs):
super(FileInfo, self).__init__(**kwargs)
self.file_size = kwargs.get('file_size', None)
self.file_version = kwargs.get('file_version', None)
self.modified_date = kwargs.get('modified_date', None)
self.store_relative_path = kwargs.get('store_relative_path', None)
```
#### File: servicefabric/models/http_config_py3.py
```python
from msrest.serialization import Model
class HttpConfig(Model):
"""Describes the http configuration for external connectivity for this
network.
All required parameters must be populated in order to send to Azure.
:param name: Required. http gateway config name.
:type name: str
:param port: Required. Specifies the port at which the service endpoint
below needs to be exposed.
:type port: int
:param hosts: Required. description for routing.
:type hosts: list[~azure.servicefabric.models.HttpHostConfig]
"""
_validation = {
'name': {'required': True},
'port': {'required': True},
'hosts': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'port': {'key': 'port', 'type': 'int'},
'hosts': {'key': 'hosts', 'type': '[HttpHostConfig]'},
}
def __init__(self, *, name: str, port: int, hosts, **kwargs) -> None:
super(HttpConfig, self).__init__(**kwargs)
self.name = name
self.port = port
self.hosts = hosts
```
#### File: servicefabric/models/named_partition_scheme_description.py
```python
from .partition_scheme_description import PartitionSchemeDescription
class NamedPartitionSchemeDescription(PartitionSchemeDescription):
"""Describes the named partition scheme of the service.
All required parameters must be populated in order to send to Azure.
:param partition_scheme: Required. Constant filled by server.
:type partition_scheme: str
:param count: Required. The number of partitions.
:type count: int
:param names: Required. Array of size specified by the ‘Count’ parameter,
for the names of the partitions.
:type names: list[str]
"""
_validation = {
'partition_scheme': {'required': True},
'count': {'required': True},
'names': {'required': True},
}
_attribute_map = {
'partition_scheme': {'key': 'PartitionScheme', 'type': 'str'},
'count': {'key': 'Count', 'type': 'int'},
'names': {'key': 'Names', 'type': '[str]'},
}
def __init__(self, **kwargs):
super(NamedPartitionSchemeDescription, self).__init__(**kwargs)
self.count = kwargs.get('count', None)
self.names = kwargs.get('names', None)
self.partition_scheme = 'Named'
```
#### File: servicefabric/models/node_health_state_filter_py3.py
```python
from msrest.serialization import Model
class NodeHealthStateFilter(Model):
"""Defines matching criteria to determine whether a node should be included in
the returned cluster health chunk.
One filter can match zero, one or multiple nodes, depending on its
properties.
Can be specified in the cluster health chunk query description.
:param node_name_filter: Name of the node that matches the filter. The
filter is applied only to the specified node, if it exists.
If the node doesn't exist, no node is returned in the cluster health chunk
based on this filter.
If the node exists, it is included in the cluster health chunk if the
health state matches the other filter properties.
If not specified, all nodes that match the parent filters (if any) are
taken into consideration and matched against the other filter members,
like health state filter.
:type node_name_filter: str
:param health_state_filter: The filter for the health state of the nodes.
It allows selecting nodes if they match the desired health states.
The possible values are integer value of one of the following health
states. Only nodes that match the filter are returned. All nodes are used
to evaluate the cluster aggregated health state.
If not specified, default value is None, unless the node name is
specified. If the filter has default value and node name is specified, the
matching node is returned.
The state values are flag-based enumeration, so the value could be a
combination of these values obtained using bitwise 'OR' operator.
For example, if the provided value is 6, it matches nodes with HealthState
value of OK (2) and Warning (4).
- Default - Default value. Matches any HealthState. The value is zero.
- None - Filter that doesn't match any HealthState value. Used in order to
return no results on a given collection of states. The value is 1.
- Ok - Filter that matches input with HealthState value Ok. The value is
2.
- Warning - Filter that matches input with HealthState value Warning. The
value is 4.
- Error - Filter that matches input with HealthState value Error. The
value is 8.
- All - Filter that matches input with any HealthState value. The value is
65535. Default value: 0 .
:type health_state_filter: int
"""
_attribute_map = {
'node_name_filter': {'key': 'NodeNameFilter', 'type': 'str'},
'health_state_filter': {'key': 'HealthStateFilter', 'type': 'int'},
}
def __init__(self, *, node_name_filter: str=None, health_state_filter: int=0, **kwargs) -> None:
super(NodeHealthStateFilter, self).__init__(**kwargs)
self.node_name_filter = node_name_filter
self.health_state_filter = health_state_filter
```
#### File: servicefabric/models/node_load_metric_information.py
```python
from msrest.serialization import Model
class NodeLoadMetricInformation(Model):
"""Represents data structure that contains load information for a certain
metric on a node.
:param name: Name of the metric for which this load information is
provided.
:type name: str
:param node_capacity: Total capacity on the node for this metric.
:type node_capacity: str
:param node_load: Current load on the node for this metric.
:type node_load: str
:param node_remaining_capacity: The remaining capacity on the node for
this metric.
:type node_remaining_capacity: str
:param is_capacity_violation: Indicates if there is a capacity violation
for this metric on the node.
:type is_capacity_violation: bool
:param node_buffered_capacity: The value that indicates the reserved
capacity for this metric on the node.
:type node_buffered_capacity: str
:param node_remaining_buffered_capacity: The remaining reserved capacity
for this metric on the node.
:type node_remaining_buffered_capacity: str
"""
_attribute_map = {
'name': {'key': 'Name', 'type': 'str'},
'node_capacity': {'key': 'NodeCapacity', 'type': 'str'},
'node_load': {'key': 'NodeLoad', 'type': 'str'},
'node_remaining_capacity': {'key': 'NodeRemainingCapacity', 'type': 'str'},
'is_capacity_violation': {'key': 'IsCapacityViolation', 'type': 'bool'},
'node_buffered_capacity': {'key': 'NodeBufferedCapacity', 'type': 'str'},
'node_remaining_buffered_capacity': {'key': 'NodeRemainingBufferedCapacity', 'type': 'str'},
}
def __init__(self, **kwargs):
super(NodeLoadMetricInformation, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.node_capacity = kwargs.get('node_capacity', None)
self.node_load = kwargs.get('node_load', None)
self.node_remaining_capacity = kwargs.get('node_remaining_capacity', None)
self.is_capacity_violation = kwargs.get('is_capacity_violation', None)
self.node_buffered_capacity = kwargs.get('node_buffered_capacity', None)
self.node_remaining_buffered_capacity = kwargs.get('node_remaining_buffered_capacity', None)
```
#### File: servicefabric/models/primary_replicator_status_py3.py
```python
from .replicator_status_py3 import ReplicatorStatus
class PrimaryReplicatorStatus(ReplicatorStatus):
"""Provides statistics about the Service Fabric Replicator, when it is
functioning in a Primary role.
All required parameters must be populated in order to send to Azure.
:param kind: Required. Constant filled by server.
:type kind: str
:param replication_queue_status: Details about the replication queue on
the primary replicator.
:type replication_queue_status:
~azure.servicefabric.models.ReplicatorQueueStatus
:param remote_replicators: The status of all the active and idle secondary
replicators that the primary is aware of.
:type remote_replicators:
list[~azure.servicefabric.models.RemoteReplicatorStatus]
"""
_validation = {
'kind': {'required': True},
}
_attribute_map = {
'kind': {'key': 'Kind', 'type': 'str'},
'replication_queue_status': {'key': 'ReplicationQueueStatus', 'type': 'ReplicatorQueueStatus'},
'remote_replicators': {'key': 'RemoteReplicators', 'type': '[RemoteReplicatorStatus]'},
}
def __init__(self, *, replication_queue_status=None, remote_replicators=None, **kwargs) -> None:
super(PrimaryReplicatorStatus, self).__init__(**kwargs)
self.replication_queue_status = replication_queue_status
self.remote_replicators = remote_replicators
self.kind = 'Primary'
```
#### File: servicefabric/models/restore_partition_description.py
```python
from msrest.serialization import Model
class RestorePartitionDescription(Model):
"""Specifies the parameters needed to trigger a restore of a specific
partition.
All required parameters must be populated in order to send to Azure.
:param backup_id: Required. Unique backup ID.
:type backup_id: str
:param backup_location: Required. Location of the backup relative to the
backup storage specified/ configured.
:type backup_location: str
:param backup_storage: Location of the backup from where the partition
will be restored.
:type backup_storage: ~azure.servicefabric.models.BackupStorageDescription
"""
_validation = {
'backup_id': {'required': True},
'backup_location': {'required': True},
}
_attribute_map = {
'backup_id': {'key': 'BackupId', 'type': 'str'},
'backup_location': {'key': 'BackupLocation', 'type': 'str'},
'backup_storage': {'key': 'BackupStorage', 'type': 'BackupStorageDescription'},
}
def __init__(self, **kwargs):
super(RestorePartitionDescription, self).__init__(**kwargs)
self.backup_id = kwargs.get('backup_id', None)
self.backup_location = kwargs.get('backup_location', None)
self.backup_storage = kwargs.get('backup_storage', None)
```
#### File: servicefabric/models/service_type_health_policy.py
```python
from msrest.serialization import Model
class ServiceTypeHealthPolicy(Model):
"""Represents the health policy used to evaluate the health of services
belonging to a service type.
:param max_percent_unhealthy_partitions_per_service: The maximum allowed
percentage of unhealthy partitions per service. Allowed values are Byte
values from zero to 100
The percentage represents the maximum tolerated percentage of partitions
that can be unhealthy before the service is considered in error.
If the percentage is respected but there is at least one unhealthy
partition, the health is evaluated as Warning.
The percentage is calculated by dividing the number of unhealthy
partitions over the total number of partitions in the service.
The computation rounds up to tolerate one failure on small numbers of
partitions. Default percentage is zero. Default value: 0 .
:type max_percent_unhealthy_partitions_per_service: int
:param max_percent_unhealthy_replicas_per_partition: The maximum allowed
percentage of unhealthy replicas per partition. Allowed values are Byte
values from zero to 100.
The percentage represents the maximum tolerated percentage of replicas
that can be unhealthy before the partition is considered in error.
If the percentage is respected but there is at least one unhealthy
replica, the health is evaluated as Warning.
The percentage is calculated by dividing the number of unhealthy replicas
over the total number of replicas in the partition.
The computation rounds up to tolerate one failure on small numbers of
replicas. Default percentage is zero. Default value: 0 .
:type max_percent_unhealthy_replicas_per_partition: int
:param max_percent_unhealthy_services: The maximum allowed percentage of
unhealthy services. Allowed values are Byte values from zero to 100.
The percentage represents the maximum tolerated percentage of services
that can be unhealthy before the application is considered in error.
If the percentage is respected but there is at least one unhealthy
service, the health is evaluated as Warning.
This is calculated by dividing the number of unhealthy services of the
specific service type over the total number of services of the specific
service type.
The computation rounds up to tolerate one failure on small numbers of
services. Default percentage is zero. Default value: 0 .
:type max_percent_unhealthy_services: int
"""
_attribute_map = {
'max_percent_unhealthy_partitions_per_service': {'key': 'MaxPercentUnhealthyPartitionsPerService', 'type': 'int'},
'max_percent_unhealthy_replicas_per_partition': {'key': 'MaxPercentUnhealthyReplicasPerPartition', 'type': 'int'},
'max_percent_unhealthy_services': {'key': 'MaxPercentUnhealthyServices', 'type': 'int'},
}
def __init__(self, **kwargs):
super(ServiceTypeHealthPolicy, self).__init__(**kwargs)
self.max_percent_unhealthy_partitions_per_service = kwargs.get('max_percent_unhealthy_partitions_per_service', 0)
self.max_percent_unhealthy_replicas_per_partition = kwargs.get('max_percent_unhealthy_replicas_per_partition', 0)
self.max_percent_unhealthy_services = kwargs.get('max_percent_unhealthy_services', 0)
``` |
{
"source": "Jonathan-GC/API-OCPP-PORT",
"score": 3
} |
#### File: app/central_ocpp/central.py
```python
import asyncio
import logging
from datetime import datetime
import json
import websockets
import sys
#Importaciones Clave
from ocpp.routing import on, after
from ocpp.v16 import ChargePoint as cp
from ocpp.v16.enums import Action, RegistrationStatus, AuthorizationStatus, RemoteStartStopStatus
from ocpp.v16 import call_result, call
#Nivel de acceso
logging.basicConfig(level=logging.INFO)
logging.basicConfig()
STATE = {"value": 0}
USERS = set()
hayControlRemoto = 0
EV = None
#Variables de Transaccion
idConector = 0
idTag = ""
def state_event():
recibe = json.dumps({"type": "state", **STATE})
return recibe
def users_event():
recibe = json.dumps({"type": "users", "count": len(USERS)})
return recibe
async def notify_state():
if USERS: # asyncio.wait doesn't accept an empty list
message = state_event()
print(message)
await asyncio.wait([user.send(message) for user in USERS])
async def notify_users():
if USERS: # asyncio.wait doesn't accept an empty list
message = users_event()
print(message)
await asyncio.wait([user.send(message) for user in USERS])
async def register(websocket):
print(websocket)
USERS.add(websocket)
await notify_users()
async def unregister(websocket):
USERS.remove(websocket)
await notify_users()
async def counter(websocket, path, objeto_ocpp = None):
# register(websocket) sends user_event() to websocket
await register(websocket)
try:
await websocket.send(state_event())
async for message in websocket:
data = json.loads(message)
print(data)
if data["action"] == "Stop":
STATE["value"] = 0
await notify_state()
#objeto_ocpp.on_remote_start_transaction( {"status" : "Accepted"})
if(objeto_ocpp != None ):
await objeto_ocpp.enviarOrden(STATE["value"])
#await cp2.enviar(message)
elif data["action"] == "Start":
STATE["value"] = 1
await notify_state()
#objeto_ocpp.on_remote_start_transaction({"status" : "Accepted"})
if(objeto_ocpp != None ):
await objeto_ocpp.enviarOrden(STATE["value"])
else:
logging.error("unsupported event: {}", data)
finally:
await unregister(websocket)
class ChargePoint(cp):
#Decorador principal de pedido clientes
@on(Action.BootNotification)
def on_boot_notitication(self, charge_point_vendor: str, charge_point_model: str, **kwargs):
return call_result.BootNotificationPayload(
current_time=datetime.utcnow().isoformat(),
interval=10,
status=RegistrationStatus.accepted
)
#Decorador posterior a la aceptacion del cliente
@after(Action.BootNotification)
def after_boot_notification(self, charge_point_vendor: str, charge_point_model: str, **kwargs):
print("Conexion Mongo o SQL y verificaciones del sistema")
try:
@on(Action.Authorize)
def on_authorize_response(self, id_tag: str, **kwargs):
global idTag
idTag = id_tag
print("He recibido: ", id_tag)
return call_result.AuthorizePayload(
id_tag_info={
"status" : AuthorizationStatus.accepted
}
)
except:
print("No se puede hacer la transaccion")
@on(Action.StartTransaction)
def on_start_transaction(self, connector_id: int, id_tag: str, meter_start: int, timestamp: str, **kwargs):
return call_result.StartTransactionPayload(
transaction_id=connector_id,
id_tag_info={
"status" : AuthorizationStatus.accepted
}
)
@after(Action.StartTransaction)
def imprimirJoder(self, connector_id: int, id_tag: str, meter_start: int, timestamp: str, **kwargs):
print("dispensado de energia ", meter_start, "units")
print("Otras medidas: ", **kwargs)
@on(Action.StopTransaction)
def on_stop_transaction(self, transaction_id: int, timestamp: str, meter_stop: int, **kwargs):
return call_result.StopTransactionPayload(
id_tag_info={
"status" : AuthorizationStatus.accepted
}
)
@after(Action.StopTransaction)
def imprimir(self, transaction_id: int, timestamp: str, meter_stop: int, **kwargs):
print("Deteniendo Transaccion en", meter_stop, "units recargadas", "id de transaccion: ", transaction_id)
@on(Action.Heartbeat)
def on_heartbeat(self, **kwargs):
return call_result.HeartbeatPayload(
current_time=datetime.utcnow().isoformat()
)
@after(Action.Heartbeat)
def imprimirMenssage(self):
print("tomando Pulso del cargador")
@on(Action.StatusNotification)
def on_status_notification(self, connector_id: int, error_code: str, status: str, timestamp: str, info: str, vendor_id: str, vendor_error_code: str, **kwargs):
global idConector
idConector = connector_id
return call_result.StatusNotificationPayload(
)
@after(Action.StatusNotification)
def imprimirMenssage(self, connector_id: int, error_code: str, status: str, timestamp: str, info: str, vendor_id: str, vendor_error_code: str, **kwargs):
print("tomando Pulso del cargador")
@on(Action.MeterValues)
def on_meter_values (self, connector_id: int, **kwargs):
return call_result.MeterValuesPayload(
)
async def notify_stateCP(self):
if USERS: # asyncio.wait doesn't accept an empty list
message = state_event()
print("entro en CP: ", message)
await asyncio.wait([user.send(message) for user in USERS])
async def enviarOrden(self, run = None):
global idTag
global idConector
if run:
print("enviando orden de carga remota")
msn = call.RemoteStartTransactionPayload(
id_tag = str(idTag),
connector_id = idConector
)
response = await self.call(msn)
else:
print("Detener orden de carga remota")
msn= call.RemoteStopTransactionPayload(
transaction_id = 1
)
response = await self.call(msn)
async def on_connect(websocket, path):
""" For every new charge point that connects, create a ChargePoint instance
and start listening for messages.
"""
try:
global EV
global hayControlRemoto
charge_point_id = path.strip('/')
if (charge_point_id != "RemotoControl" ):
print("Es un cargador")
cp = ChargePoint(charge_point_id, websocket)
EV = cp
print ("EV es cp: ", EV is cp)
print (EV)
await cp.start()
else:
print("Es un Control Remoto")
print (EV)
await counter(websocket, path, EV)
except websockets.exceptions.ConnectionClosedOK:
print ("Cliente Cerrado")
async def main():
server2 = await websockets.serve(
on_connect,
'localhost',
#'0.0.0.0',
9000,
#'192.168.127.12',
#8080,
subprotocols=['ocpp1.6']
)
await server2.wait_closed()
if __name__ == '__main__':
try:
# asyncio.run() is used when running this example with Python 3.7 and
# higher.
asyncio.run(main())
except AttributeError:
# For Python 3.6 a bit more code is required to run the main() task on
# an event loop.
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
loop.close()
``` |
{
"source": "jonathangiguere/Airplane_Sequence_Prediction",
"score": 3
} |
#### File: jonathan-giguere-individual-project/Code/mywork.py
```python
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
from pickle import load
from sklearn.metrics import classification_report, cohen_kappa_score, confusion_matrix
from keras.models import Model
from keras.layers import Dense, Input
from keras.layers import LSTM
# set run name
run_name = 'DL_50'
# set model name
model_name = 'seq2seq'
print(f'Run name is {run_name} and model name is {model_name}.')
# load tokenizer, get vocab_size, and load x, y
tokenizer = load(open(f'/home/ubuntu/Final-Project-Group1/Models/{run_name}_tokenizer.pkl', 'rb'))
vocab_size = len(tokenizer.word_index) + 1
x_train = np.load(f'/home/ubuntu/Final-Project-Group1/Data 2/{run_name}_x_train.npy')
y_train = np.load(f'/home/ubuntu/Final-Project-Group1/Data 2/{run_name}_y_train.npy')
x_test = np.load(f'/home/ubuntu/Final-Project-Group1/Data 2/{run_name}_x_test.npy')
y_test = np.load(f'/home/ubuntu/Final-Project-Group1/Data 2/{run_name}_y_test.npy')
# Undo one hot encoding on target variables
y_train = np.argmax(y_train, axis=1)
y_train = np.reshape(y_train, (len(y_train), -1))
y_test = np.argmax(y_test, axis=1)
y_test = np.reshape(y_test, (len(y_test), -1))
# Combine x and y to be split however works best
x_train = np.concatenate((x_train, y_train), axis=1)
# Do the same for test data
x_test = np.concatenate((x_test, y_test), axis=1)
del y_train, y_test # No longer needed
# Check shapes before splitting
print(x_train.shape)
print(x_test.shape)
print()
# Define length of beginning sequence
split = 49
# Train a model for each split
# Results and models are saved to /home/ubuntu/Final-Project-Group1/Models/DL_25_seq2seq
seq_beg, other, seq_end = np.split(x_train, [split, split], axis=1) # Split data and analyze shapes
print(other.shape)
del other # remove this useless output
print(seq_beg.shape)
print(seq_end.shape)
print()
# Add special beginning and end tags to training ending sequences
seq_end = np.insert(seq_end, 0, 1111, axis=1)
seq_end = np.insert(seq_end, seq_end.shape[1], 9999, axis=1)
print(seq_end.shape)
print()
# Also split test data and analyze shapes
seq_beg_test, other, seq_end_test = np.split(x_test, [split, split], axis=1)
print(other.shape)
del other
print(seq_beg_test.shape)
print(seq_end_test.shape)
print()
# Add special beginning and end tags to testing ending sequences
seq_end_test = np.insert(seq_end_test, 0, 1111, axis=1)
seq_end_test = np.insert(seq_end_test, seq_end_test.shape[1], 9999, axis=1)
print(seq_end_test.shape)
print()
# Store all unique airport IDs in a list
airports = x_train.flatten().tolist()
airports.append(1111) # Add the special characters so they will be in dictionaries
airports.append(9999)
airports = set(airports)
airports = sorted(list(airports))
# dictionary to index each airport - key is index and value is airport
index_to_airport_dict = {}
# dictionary to get airport given its index - key is airport and value is index
airport_to_index_dict = {}
for k, v in enumerate(airports):
index_to_airport_dict[k] = v
airport_to_index_dict[v] = k
# Get empty numpy arrays to tokenize the training sequences
tokenized_seq_beg = np.zeros(shape=(seq_beg.shape[0], seq_beg.shape[1], len(airports)), dtype='float32')
tokenized_seq_end = np.zeros(shape=(seq_end.shape[0], seq_end.shape[1], len(airports)), dtype='float32')
target_data = np.zeros(shape=(seq_end.shape[0], seq_end.shape[1], len(airports)), dtype='float32')
# Vectorize the beginning and ending sequences for training data
for i in range(seq_beg.shape[0]):
for k, ch in enumerate(seq_beg[i]):
tokenized_seq_beg[i, k, airport_to_index_dict[ch]] = 1
for k, ch in enumerate(seq_end[i]):
tokenized_seq_end[i, k, airport_to_index_dict[ch]] = 1
# decoder_target_data will be ahead by one timestep and will not include the start airport.
if k > 0:
target_data[i, k - 1, airport_to_index_dict[ch]] = 1
# Get empty numpy array to tokenize the beginning test sequences to be fed at evaluation time
tokenized_seq_beg_test = np.zeros(shape=(seq_beg_test.shape[0], seq_beg_test.shape[1], len(airports)), dtype='float32')
# Vectorize the beginning sequences for test data to be fed to encoder
for i in range(seq_beg_test.shape[0]):
for k, ch in enumerate(seq_beg_test[i]):
tokenized_seq_beg_test[i, k, airport_to_index_dict[ch]] = 1
# hyperparameters
N_NEURONS = 256
N_EPOCHS = 6
BATCH_SIZE = 64
# Encoder Model
encoder_input = Input(shape=(None, len(airports)))
encoder_LSTM = LSTM(N_NEURONS, return_state=True)
encoder_outputs, encoder_h, encoder_c = encoder_LSTM(encoder_input)
encoder_states = [encoder_h, encoder_c] # These states are passed to decoder LSTM layer
# Decoder model
decoder_input = Input(shape=(None, len(airports)))
decoder_LSTM = LSTM(N_NEURONS, return_sequences=True, return_state=True)
decoder_out, _, _ = decoder_LSTM(decoder_input, initial_state=encoder_states)
decoder_dense = Dense(len(airports), activation='softmax')
decoder_out = decoder_dense(decoder_out)
model = Model(inputs=[encoder_input, decoder_input], outputs=[decoder_out])
# Run training
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
history = model.fit(x=[tokenized_seq_beg, tokenized_seq_end], y=target_data,
batch_size=BATCH_SIZE, epochs=N_EPOCHS, validation_split=.2)
# Visualize training process
plt.plot(history.history['loss'], label='Categorical crossentropy loss (training data)')
plt.plot(history.history['val_loss'], label='Categorical crossentropy loss (validation data)')
plt.title(f'Categorical crossentropy loss for {run_name}, overall accuracy: {run_name}')
plt.ylabel('Categorical crossentropy loss value')
plt.yscale('log')
plt.xlabel('No. epoch')
plt.legend(loc="upper left")
plt.show()
# save the model to file
model.save(f'/home/ubuntu/Final-Project-Group1/Models/{run_name}_{model_name}_model.h5')
# save history
df_history = pd.DataFrame.from_dict(history.history)
df_history.to_csv(f'/home/ubuntu/Final-Project-Group1/Models/{run_name}_{model_name}_history.csv')
######################################################################################################################
# Evaluate model with test data
# Encoder inference model
encoder_model_inf = Model(encoder_input, encoder_states)
# Decoder inference model
decoder_state_input_h = Input(shape=(N_NEURONS,))
decoder_state_input_c = Input(shape=(N_NEURONS,))
decoder_input_states = [decoder_state_input_h, decoder_state_input_c]
decoder_out, decoder_h, decoder_c = decoder_LSTM(decoder_input, initial_state=decoder_input_states)
decoder_states = [decoder_h, decoder_c]
decoder_out = decoder_dense(decoder_out)
decoder_model_inf = Model(inputs=[decoder_input] + decoder_input_states,
outputs=[decoder_out] + decoder_states)
def decode_seq(inp_seq):
# Get initial states by feeding beginning of a test sequence to encoder
states_val = encoder_model_inf.predict(inp_seq)
# Set first target sequence to be 1111 (one hot encoded)
decoder_input = np.zeros((1, 1, len(airports)))
decoder_input[0, 0, airport_to_index_dict[1111]] = 1
# Start predicted airplane route with special character
airplane_route = [1111]
stop_condition = False
# Predict the next airports
while not stop_condition:
decoder_out, decoder_h, decoder_c = decoder_model_inf.predict(x=[decoder_input] + states_val)
# Get index of predicted airport
max_val_index = np.argmax(decoder_out[0, -1, :])
sampled_airport = index_to_airport_dict[max_val_index]
# Append predicted airport to list representing predicted sequence
airplane_route.append(sampled_airport)
# If predictions surpass the ending sequence length or model predicts 9999 indicating end of sequence
if (sampled_airport == 9999) or (len(airplane_route) > (seq_end.shape[1] - 1)):
stop_condition = True
# Update predicted airport to be fed to decoder model
decoder_input = np.zeros((1, 1, len(airports)))
decoder_input[0, 0, max_val_index] = 1
# Get states for predicting next character
states_val = [decoder_h, decoder_c]
return airplane_route # Return predicted sequence
cumulative_predictions = [] # To accumulate all predictions
cumulative_actuals = [] # To accumulate all actual labels
cumulative_accuracy = 0
test_sequences = 5000
drops = [1111, 9999] # specify beg and end tags to drop for evaluation
# Loop through test data and feed input sequences to encoder model
loop_count = 0
print('Beginning inference...')
for seq_index in range(test_sequences):
inp_seq = tokenized_seq_beg_test[seq_index]
inp_seq = np.expand_dims(inp_seq, axis=0) # Resize to go into encoder model
pred_airplane_route = decode_seq(inp_seq)
# Drop beginning and end tags before calculating evaluation metrics
pred_airplane_route = [_ for _ in pred_airplane_route if _ not in drops]
actual_airplane_route = seq_end_test[seq_index]
actual_airplane_route = [_ for _ in actual_airplane_route if _ not in drops]
# print('-')
# print('Input sequence:', seq_beg_test[seq_index])
# print('Predicted output sequence:', pred_airplane_route)
# print('Actual output sequence:', actual_airplane_route)
# print('Actual whole sequence', x_test[seq_index])
correct, incorrect = 0, 0 # To keep track of right and wrong predictions
for _ in range(len(actual_airplane_route)):
if pred_airplane_route[_] == actual_airplane_route[_]:
correct += 1
else:
incorrect += 1
# Append predictions and labels to huge lists for classification report
cumulative_predictions.append(pred_airplane_route[_])
cumulative_actuals.append(actual_airplane_route[_])
accuracy = correct / (correct + incorrect)
#print('Test Accuracy', accuracy) # This gives the accuracy on each test sequence
cumulative_accuracy += accuracy # Accumulate accuracy from all test sequences to be averaged later
#loop_count += 1
#print('Processing test sequence ' + str(loop_count) + ' out of ' + str(test_sequences))
######################################################################################################################
# Evaluate model performance on test data
cumulative_accuracy = cumulative_accuracy / test_sequences # Gets accuracy over all test sequences used
print()
# Get classification report
class_report = classification_report(cumulative_actuals, cumulative_predictions, output_dict=True)
print(class_report)
print()
# Get confusion matrix
conf_mat = confusion_matrix(y_true=cumulative_actuals, y_pred=cumulative_predictions)
print(conf_mat)
print()
# Get accuracy
print('Accuracy:', cumulative_accuracy)
# Get Cohens Kappa
ck_score = cohen_kappa_score(cumulative_actuals, cumulative_predictions)
print('Cohens Kappa:', ck_score)
# Get precision
print('Precision:', class_report['weighted avg']['precision'])
# Get recall
print('Recall:', class_report['weighted avg']['recall'])
# Get F1
print('F1:', class_report['weighted avg']['f1-score'])
# Get support
print('Support:', class_report['weighted avg']['support'])
# Create dataframe from classification report
df_class_report = pd.DataFrame(class_report).T.iloc[:-3,:]
df_class_report.sort_values('f1-score', inplace=True)
print(df_class_report)
# Plot the classes (airports) as a scatterplot colored by F1 and sized by total numbed of flights from each airport.
# g = sns.scatterplot(x='precision', y='recall', size='support',
# hue='f1-score', data=df_class_report)
# plt.title("Scatterplot of Model's Precision and Recall, \nColored by F1 Score, Sized by Number of Flights")
# plt.show()
plt.scatter(df_class_report['precision'], df_class_report['recall'], s=df_class_report['support'],
c=df_class_report['f1-score'])
plt.title(f"Scatterplot of {model_name}_{run_name} Precision and Recall, \nColored by F1 Score, Sized by Number of Flights")
plt.show()
# proc log
log_name = f'/home/ubuntu/Final-Project-Group1/Logs/{model_name}'
log = open(log_name, 'a+')
log.write(f'{model_name} for {run_name} scored {accuracy} accuracy and {ck_score} cohen_kappa score. \n')
log.close()
``` |
{
"source": "jonathangiguere/Emotion_Image_Classifier",
"score": 2
} |
#### File: jonathangiguere/Emotion_Image_Classifier/model_4.py
```python
import os
import random
import numpy as np
import tensorflow as tf
from custom_generator import JG_Generator
import keras
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D, Dense, Dropout, Activation, Flatten
from keras.layers.normalization import BatchNormalization
import pandas as pd
import matplotlib.pyplot as plt
from keras import backend as K
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
# Set matplotlib sizes
plt.rc('font', size=20)
plt.rc('axes', titlesize=20)
plt.rc('axes', labelsize=20)
plt.rc('xtick', labelsize=20)
plt.rc('ytick', labelsize=20)
plt.rc('legend', fontsize=20)
plt.rc('figure', titlesize=20)
# Set seed for reproducible results
SEED = 42
os.environ['PYTHONHASHSEED'] = str(SEED)
random.seed(SEED)
np.random.seed(SEED)
tf.random.set_seed(SEED)
# Data Prep
# Load file names and labels
X_train_filenames = np.load('/home/ubuntu/capstone/train_test_valid/X_train_filenames.npy')
y_train = np.load('/home/ubuntu/capstone/train_test_valid/y_train.npy')
X_valid_filenames = np.load('/home/ubuntu/capstone/train_test_valid/X_valid_filenames.npy')
y_valid = np.load('/home/ubuntu/capstone/train_test_valid/y_valid.npy')
################################################################################################################
model = Sequential()
model.add(Conv2D(64, (3, 3), input_shape=(256, 256, 3)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(128, (3, 3)))
model.add(Dropout(0.3))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(256, (3, 3)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(256, (3, 3)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(128))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.25))
model.add(Dense(64))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dense(6))
model.add(Activation('softmax'))
################################################################################################################
# Checkpoint callback
checkpoint_cb = keras.callbacks.ModelCheckpoint('/home/ubuntu/capstone/models/model_4.h5',
save_best_only=True)
# Early stopping callback
early_stopping_cb = keras.callbacks.EarlyStopping(patience=3,
restore_best_weights=True)
# Create custom loss function
def weighted_categorical_crossentropy(weights):
weights = K.variable(weights)
def loss(y_true, y_pred):
# scale predictions so that the class probas of each sample sum to 1
y_pred /= K.sum(y_pred, axis=-1, keepdims=True)
# clip to prevent NaN's and Inf's
y_pred = K.clip(y_pred, K.epsilon(), 1 - K.epsilon())
# calc
loss = y_true * K.log(y_pred) * weights
loss = -K.sum(loss, -1)
return loss
return loss
# Set Flag for weighted or regular loss
loss_flag = 'weights'
if loss_flag == 'no_weights':
weights = np.array([1, 1, 1, 1, 1, 1])
elif loss_flag == 'weights':
weights = np.array([0.802518525169482, 0.802092227896231, 0.866721731456969,
1.0450040554419, 1.6127733638543, 0.870890096181114])
# Compile model in correct mode
model.compile(optimizer=keras.optimizers.Adam(learning_rate=0.001), loss=weighted_categorical_crossentropy(weights),
metrics=['accuracy', tf.keras.metrics.Recall(), tf.keras.metrics.Precision()])
# Instantiate generators for feeding in data
batch_size = 64
my_training_batch_generator = JG_Generator(X_train_filenames, y_train, batch_size)
my_validation_batch_generator = JG_Generator(X_valid_filenames, y_valid, batch_size)
# Call the custom generators and train the model
history = model.fit_generator(generator=my_training_batch_generator,
steps_per_epoch=int(len(X_train_filenames) // batch_size),
epochs=5,
verbose=1,
validation_data=my_validation_batch_generator,
validation_steps=int(3000 // batch_size),
callbacks=[checkpoint_cb, early_stopping_cb])
print('Training Complete')
# Create a figure
pd.DataFrame(history.history).plot(figsize=(8, 5))
# Set grid
plt.grid(True)
# Show the figure
plt.tight_layout()
plt.show()
```
#### File: jonathangiguere/Emotion_Image_Classifier/model_6.py
```python
import os
import random
import numpy as np
import tensorflow as tf
from custom_generator import JG_Generator_Xception
import keras
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.utils import shuffle
from keras import backend as K
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
# Set matplotlib sizes
plt.rc('font', size=20)
plt.rc('axes', titlesize=20)
plt.rc('axes', labelsize=20)
plt.rc('xtick', labelsize=20)
plt.rc('ytick', labelsize=20)
plt.rc('legend', fontsize=20)
plt.rc('figure', titlesize=20)
# Set seed for reproducible results
SEED = 42
os.environ['PYTHONHASHSEED'] = str(SEED)
random.seed(SEED)
np.random.seed(SEED)
tf.random.set_seed(SEED)
# Data Prep
# Load file names and labels
X_train_filenames = np.load('/home/ubuntu/capstone/train_test_valid/X_train_filenames.npy')
y_train = np.load('/home/ubuntu/capstone/train_test_valid/y_train.npy')
X_train_filenames, y_train = shuffle(X_train_filenames, y_train, random_state=42)
# Use 5k to test Resnet50
# X_train_filenames = X_train_filenames[:5000]
# y_train = y_train[:5000]
print('Training Data Categories')
print(X_train_filenames.shape)
print(y_train.shape)
print(np.sum(y_train, axis=0))
print()
X_valid_filenames = np.load('/home/ubuntu/capstone/train_test_valid/X_valid_filenames.npy')
y_valid = np.load('/home/ubuntu/capstone/train_test_valid/y_valid.npy')
################################################################################################################
# Load ResNet50
pretrained_model = keras.applications.ResNet50(include_top=False, weights='imagenet')
# Add GlobalAveragePooling2D layer
average_pooling = keras.layers.GlobalAveragePooling2D()(pretrained_model.output)
# Add fully connected layer to get a more complex model
#fully_connected = keras.layers.Dense(128, activation='relu')(average_pooling)
# Add the output layer
output = keras.layers.Dense(5, activation='softmax')(average_pooling)
# Get the model
model = keras.Model(inputs=pretrained_model.input, outputs=output)
print(len(model.layers))
unfreeze = len(model.layers) // 5 # get size of 20% of data
freeze = unfreeze * 4 # Freeze 80% of the layers
# Freeze layers
for _, layer in enumerate(pretrained_model.layers):
if _ <= freeze:
layer.trainable = False
else:
break
print(model.summary())
################################################################################################################
# Checkpoint callback
checkpoint_cb = keras.callbacks.ModelCheckpoint('/home/ubuntu/capstone/models/model_6.h5',
save_best_only=True)
# Early stopping callback
early_stopping_cb = keras.callbacks.EarlyStopping(patience=3,
restore_best_weights=True)
# Create custom loss function
def weighted_categorical_crossentropy(weights):
weights = K.variable(weights)
def loss(y_true, y_pred):
# scale predictions so that the class probas of each sample sum to 1
y_pred /= K.sum(y_pred, axis=-1, keepdims=True)
# clip to prevent NaN's and Inf's
y_pred = K.clip(y_pred, K.epsilon(), 1 - K.epsilon())
# calc
loss = y_true * K.log(y_pred) * weights
loss = -K.sum(loss, -1)
return loss
return loss
# Set Flag for weighted or regular loss
loss_flag = 'weights'
if loss_flag == 'no_weights':
weights = np.array([1, 1, 1, 1, 1, 1])
elif loss_flag == 'weights':
weights = np.array([0.999812577822696, 0.999812577822696, 0.999815898512017,
1.00074194755438, 0.999816998288216])
# Compile model in correct mode
model.compile(optimizer=keras.optimizers.Adam(learning_rate=0.001), loss=weighted_categorical_crossentropy(weights), metrics=['accuracy', tf.keras.metrics.Recall(), tf.keras.metrics.Precision()])
# Instantiate generators for feeding in data
batch_size = 64
my_training_batch_generator = JG_Generator_Xception(X_train_filenames, y_train, batch_size)
my_validation_batch_generator = JG_Generator_Xception(X_valid_filenames, y_valid, batch_size)
# Call the custom generators and train the model
history = model.fit_generator(generator=my_training_batch_generator,
steps_per_epoch=int(len(X_train_filenames) // batch_size),
epochs=5,
verbose=1,
validation_data=my_validation_batch_generator,
validation_steps=int(2500 // batch_size),
callbacks=[checkpoint_cb, early_stopping_cb])
print('Training Complete')
# Create a figure
pd.DataFrame(history.history).plot(figsize=(8, 5))
# Set grid
plt.grid(True)
# Show the figure
plt.tight_layout()
plt.show()
```
#### File: jonathangiguere/Emotion_Image_Classifier/train_valid_test_clean.py
```python
import numpy as np
from sklearn.model_selection import train_test_split
# Load file names and labels
x, y = np.load("/home/ubuntu/capstone/filenames.npy"), np.load("/home/ubuntu/capstone/labels.npy")
print(x.shape)
print(y.shape)
# Loop through labels and keep track of indices where the non-faces are
# Also drop None and Uncertain categories
# Also drop Contempt and Disgust categories
drop_indices = []
for _ in range(len(y)):
if y[_][10] == 1 or y[_][9] == 1 or y[_][8] == 1 or y[_][7] == 1 or y[_][5] == 1: # or y[_][4] ... add back to drop Fear
drop_indices.append(_)
# Drop label rows where indices match
y = np.delete(y, drop_indices, axis=0)
y = np.delete(y, 10, axis=1) # Drop last column because all vals are 0 after removing non-face rows
y = np.delete(y, 9, axis=1) # Do the same for None and Uncertain categories
y = np.delete(y, 8, axis=1)
y = np.delete(y, 7, axis=1) # Do the same for Contempt and Disgust categories
y = np.delete(y, 5, axis=1)
#y = np.delete(y, 4, axis=1) # Do the same for Fear category
# Drop image names where indices match
x = np.delete(x, drop_indices)
print(len(drop_indices))
# Get validation set 500 per category
def get_indices_valid(label):
valid_indices = []
for _ in range(len(y)): # Loop through all labels
if len(valid_indices) < 500: # Get 500 indices for the label
if y[_][label] == 1:
valid_indices.append(_)
return valid_indices
# Get 500 indices for all categories
valid_indices = []
for _ in range(6):
valid_indices = valid_indices + get_indices_valid(_)
# Take indices identified as validation data
y_valid = np.take(y, valid_indices, axis=0)
# Take indices from the input data as well
X_valid_filenames = np.take(x, valid_indices)
# Drop the validation data from original data
y = np.delete(y, valid_indices, axis=0)
x = np.delete(x, valid_indices)
# Now get test data with train test split...0.00924 gives us the same test size and validation size of 2500
X_train_filenames, X_test_filenames, y_train, y_test = train_test_split(x, y, random_state=42, test_size=0.01073, stratify=y)
print('Train Final:')
print(X_train_filenames.shape)
print(y_train.shape)
print(np.sum(y_train, axis=0))
print()
print('Valid Final:')
print(X_valid_filenames.shape)
print(y_valid.shape)
print(np.sum(y_valid, axis=0))
print()
print('Test Final:')
print(X_test_filenames.shape)
print(y_test.shape)
print(np.sum(y_test, axis=0))
print()
# Save all data to numpy files in new directory
# Train
np.save('/home/ubuntu/capstone/train_test_valid/X_train_filenames.npy', X_train_filenames)
np.save('/home/ubuntu/capstone/train_test_valid/y_train.npy', y_train)
# Valid
np.save('/home/ubuntu/capstone/train_test_valid/X_valid_filenames.npy', X_valid_filenames)
np.save('/home/ubuntu/capstone/train_test_valid/y_valid.npy', y_valid)
# Test
np.save('/home/ubuntu/capstone/train_test_valid/X_test_filenames.npy', X_test_filenames)
np.save('/home/ubuntu/capstone/train_test_valid/y_test.npy', y_test)
``` |
{
"source": "jonathangil97/4a-docs",
"score": 2
} |
#### File: authApp/views/verifyTokenView.py
```python
from django.conf import settings
from rest_framework import status
from rest_framework.response import Response
from rest_framework_simplejwt.views import TokenVerifyView
from rest_framework_simplejwt.backends import TokenBackend
from rest_framework_simplejwt.exceptions import InvalidToken, TokenError
from rest_framework_simplejwt.serializers import TokenVerifySerializer
class VerifyTokenView(TokenVerifyView):
def post(self, request, *args, **kwargs):
serializer = TokenVerifySerializer(data=request.data)
tokenBackend = TokenBackend(algorithm=settings.SIMPLE_JWT['ALGORITHM'])
try:
serializer.is_valid(raise_exception=True)
token_data = tokenBackend.decode(request.data['token'],verify=False)
serializer.validated_data['UserId'] = token_data['user_id']
except TokenError as e:
raise InvalidToken(e.args[0])
return Response(serializer.validated_data, status=status.HTTP_200_OK)
``` |
{
"source": "jonathangingras/expkit",
"score": 3
} |
#### File: expkit/examples/data.py
```python
from sklearn.datasets import load_iris
def iris_train():
i = load_iris()
return {"X": i.data[:75], "y": i.target[:75], "feature_names": None}
def iris_test():
i = load_iris()
return {"X": i.data[75:], "y": i.target[75:], "feature_names": None}
```
#### File: expkit/expkit/config.py
```python
from .utils.expando import Expando
class Config(Expando):
USER_ADDED_PREFIX = "user_added__"
def __init__(self,
python="python",
latex="pdflatex",
result_dir="__results__",
experiments_cfg="experiments.py",
results_cfg="results.py",
open_cmd="open",
target="document.pdf",
**kwargs):
for key, val in filter(lambda key_val: id(key_val[1]) != id(self) and id(key_val[1]) != id(kwargs), locals().items()):
self[key] = val
for key, val in kwargs.items():
self[Config.USER_ADDED_PREFIX + key] = val
def __getattribute__(self, key):
val = Expando.__getattribute__(self, str(key))
if val is None:
return Expando.__getattribute__(self, Config.USER_ADDED_PREFIX + str(key))
return val
@staticmethod
def from_dict(dict_object):
return Config(**dict_object)
@staticmethod
def from_list(list_object):
return Config.from_dict({key.lower().strip(): val.strip() for key, val in map(lambda el: el.split("="), list_object)})
@staticmethod
def from_file(file_object):
return Config.from_list(list(filter(lambda line: line != '', map(lambda line: line.strip(), file_object.readlines()))))
@staticmethod
def from_configfile(filepath="config.expkit"):
return Config.from_file(open(filepath, "r"))
```
#### File: experiment/report/graphics.py
```python
import os
import pickle as pkl
import matplotlib.pyplot as plt
from ...graphics.confusion_matrix import plot_binary_confusion_matrix
from ...graphics.pdf import save_to_pdf
import numpy as np
def save_inpdf(pkl_filepath, file_prefix, graphic_code):
res = pkl.load(open(pkl_filepath, "rb"))
save_to_pdf(os.path.join(os.path.dirname(pkl_filepath),
os.path.basename(pkl_filepath)[:-4] +
file_prefix +
".in" + ".pdf"),
graphic_code,
res)
def print_feature_importances(pkl_filepath):
def graphic(res):
fi = res["learner"].feature_importances_
plt.plot(range(len(fi)), fi)
plt.title(res["experiment_label"])
save_inpdf(pkl_filepath, ".f_imp", graphic)
def print_confusion_matrix(pkl_filepath):
def graphic(res):
cnf_matrix = res["confusion_matrix"]
title = res["experiment_label"]
plot_binary_confusion_matrix(cnf_matrix, ["False", "True"], title=title)
save_inpdf(pkl_filepath, ".cnf_mat", graphic)
def print_cv_heatmaps(pkl_filepath, param_name1, param_name2, **fix_params):
def graphic(res):
param_grid = res["configs"]["params"]["param_grid"]
if len(param_grid) < 2:
raise RuntimeError("not enough hyper parameter ranges")
params1 = param_grid[param_name1][::-1]
param1_mask = res["cv_results"]["param_" + param_name1]
params2 = param_grid[param_name2]
param2_mask = res["cv_results"]["param_" + param_name2]
mean_test_scores = res["cv_results"]["mean_test_score"]
fix_params_mask = np.ones(len(mean_test_scores)) == 1
for key, val in fix_params.items():
fix_params_mask = fix_params_mask & \
(res["cv_results"]["param_" + key] == val)
scores = np.zeros((len(params1), len(params2)))
for i, ival in enumerate(params1):
for j, jval in enumerate(params2):
mask = (param1_mask == ival) & (param2_mask == jval) & fix_params_mask
scores[i, j] = mean_test_scores[mask]
plt.imshow(scores, interpolation='nearest')
plt.ylabel(param_name1)
plt.xlabel(param_name2)
plt.colorbar()
plt.title("{}: {} vs {}, scorer={}".format(res["experiment_label"], param_name1, param_name2, res["configs"]["params"]["scoring"]))
plt.yticks(np.arange(scores.shape[0]), params1)
plt.xticks(np.arange(scores.shape[1]), params2, rotation=45)
save_inpdf(pkl_filepath, ".cv_hmap", graphic)
```
#### File: expkit/utils/conversion.py
```python
import numpy as np
def collect_classes(y):
if isinstance(y, np.ndarray) and len(y.shape) > 1:
cy = np.ascontiguousarray(y).view(np.dtype((np.void, y.dtype.itemsize * y.shape[1])))
_, idx = np.unique(cy, return_index=True)
return y[idx]
return np.array(list(set(y)))
def per_sample_shape(X):
return np.array(X, copy=False).shape[1:]
def one_hots_to_indices(one_hots):
return np.array(list(map(np.argmax, one_hots)))
def label_to_one_hot(label, classes, dtype=np.float64):
"""
y: class label, for e.g. "positive"
classes: list of classes, for e.g. ["negative", "positive"]
"""
one_hot = np.zeros((len(classes),), dtype=dtype)
class_index = np.argmax(np.array(tuple(map(lambda lbl: lbl == label, classes))))
one_hot[class_index] = 1
return one_hot
def labels_to_one_hots(y, classes, dtype=np.float64):
one_hots = np.zeros((len(y), len(classes)), dtype=dtype)
def apply_ones(label):
class_index = np.argmax(np.array(tuple(map(lambda lbl: lbl == label, classes))))
one_hots[y == label, class_index] = 1
tuple(map(apply_ones, classes))
return one_hots
```
#### File: expkit/utils/notifications.py
```python
from .iterators import iterable
import smtplib
from email.mime.text import MIMEText
from slacker import Slacker
import os
class Message(object):
def __init__(self, recipient, body="", sender=None, subject=None, attachments=None):
self.recipient = recipient
self.sender = sender if sender is not None else recipient
self.subject = subject
self.body = body
self.attachments = attachments
class NotificationService(object):
def send(self, message=None, **kwargs):
if message == None:
return self.__send__(Message(**kwargs))
if not isinstance(message, Message):
raise RuntimeError("not an " + str(Message) + " instance")
return self.__send__(message)
class EmailSendingService(NotificationService):
def __init__(self, server_class=smtplib.SMTP,
additionnal_steps=None,
**server_args):
self.server_class = server_class
self.additionnal_steps = additionnal_steps
self.server_args = server_args
def __convert_to_MIMEText(self, message):
m = MIMEText(message.body)
m['From'] = message.sender
m['To'] = message.recipient
m['Subject'] = message.subject if message.subject is not None else "(empty)"
return m
def __send__(self, message):
server = self.server_class(**self.server_args)
server.ehlo()
if callable(self.additionnal_steps):
self.additionnal_steps(self)
server.send_message(self.__convert_to_MIMEText(message))
server.quit()
class SlackNotificationService(NotificationService):
def __init__(self, slack_token):
self.slack = Slacker(slack_token)
def __slack_users(self):
return self.slack.users.list().body['members']
def __find_user_by_name(self, name):
users = tuple(filter(lambda user: user["name"] == name, self.__slack_users()))
if len(users) == 0:
raise RuntimeError("no such user")
if len(users) > 1:
raise RuntimeError("inconsistent slack API, two users with same name")
return users[0]["id"]
def __format_message(self, message):
if message.subject is None:
return message.body
else:
return "*{}*\n{}".format(message.subject, message.body)
def __send__(self, message):
recipient = self.__find_user_by_name(message.recipient)
channel = self.slack.im.open(recipient).body["channel"]["id"]
self.slack.chat.post_message(channel, self.__format_message(message))
if message.attachments is not None:
if not iterable(message.attachments) or isinstance(message.attachments, str):
attachments = (message.attachments,)
else:
attachments = message.attachments
for attachment in attachments:
self.slack.files.upload(channels=[channel],
file_=attachment,
filename=os.path.basename(attachment))
```
#### File: expkit/utils/writer.py
```python
import sys
from functools import reduce
class StdOutOutput(object):
def write(self, arg):
return sys.stdout.write(arg)
class TeeWriter(object):
def __init__(self, output, tee_output):
if output is None or tee_output is None:
raise ValueError("no output may be None")
self.output = output
self.tee_output = tee_output
def write(self, *args):
self.output.write(*args)
return 2*self.tee_output.write(*args)
class Writer(object):
def __init__(self, output=None, separator=' '):
self.separator = separator
if output is None:
self.output = StdOutOutput()
else:
self.output = output
def __eq__(self, other):
if isinstance(other, Writer):
return self.output == other.output
return False
def write(self, *args):
return self.output.write(self.separator.join(map(str, args)))
class SkipWriter(Writer):
def __init__(self, output=None, separator=' ', skip_factor=10):
super().__init__(output=output, separator=separator)
self.count = 0
self.skip_factor = skip_factor
def write(self, *args):
length = 0
if self.count % self.skip_factor == 0:
length = super().write(*args)
self.count += 1
return length
class DotWriter(Writer):
def __init__(self, output=None, separator=' ', string='.'):
super().__init__(output=output, separator=separator)
self.string = string
def write(self, *args):
return super().write(self.string)
class BufferArray(object):
def __init__(self):
self.data = []
def write(self, arg):
self.data.append(arg)
return len(self.data[-1])
def clear(self):
self.data.clear()
def dump(self, output):
tuple(map(output.write, self.data))
def __len__(self):
if len(self.data) == 0:
return 0
return reduce(lambda x, y: x + y, map(len, self.data))
def __repr__(self):
return "<{}.{} object with data: {}>".format(self.__class__.__module__,
self.__class__.__name__,
self.data)
def __str__(self):
return "".join(self.data)
def __format__(self, f):
return str(self)
class InMemoryWriter(Writer):
def __init__(self, separator=' '):
super().__init__(output=BufferArray(), separator=separator)
def dump(self, output):
self.output.dump(output)
class FileWriter(Writer):
def __init__(self, filename, separator=' ', buffer_size=1024, mode="w"):
super().__init__(output=BufferArray(), separator=separator)
self.filename = filename
self.buffer_limit = buffer_size
self._init_file(mode)
def _init_file(self, mode):
if mode == "a":
return
elif mode == "w":
with open(self.filename, "w") as f:
f.flush()
else:
raise ValueError("mode must be either 'a' (append) or 'w' (write)")
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.flush()
def __del__(self):
self.flush()
def flush(self):
if len(self.output) == 0:
return
with open(self.filename, "a") as f:
self.output.dump(f)
self.output.clear()
def write(self, *args):
if len(self.output) + 1 > self.buffer_limit:
self.flush()
return super().write(*args)
```
#### File: expkit/wrapper/torch.py
```python
import multiprocessing as mp
import math
import json
import numpy as np
import torch
import torch.utils
import torch.utils.data
import torch.autograd
from ..experiment import Dataset
from .operators import unwrapped
from .learner import LearnerWrapper, Event
from .onehot import OneHotClassifierWrapper
from ..utils.conversion import collect_classes, per_sample_shape, labels_to_one_hots
from ..utils.arguments import null_function, merge_dicts
from ..utils.writer import Writer
class NeuralNetwork(object):
def __init__(self,
model,
loss_function_class,
optimizer_class,
lr_scheduler_class=None,
loss_function_params={},
optimizer_params={'lr': 0.1},
lr_scheduler_params={},
n_epochs=200,
batch_size=64,
n_output='auto',
log=None,
callbacks={},
n_jobs=-1,
use_gpu=True):
self.model = model
self.loss = loss_function_class(**loss_function_params)
self.optimizer = optimizer_class(self.model.parameters(), **optimizer_params)
if lr_scheduler_class is not None:
self.lr_scheduler = lr_scheduler_class(optimizer=self.optimizer, **lr_scheduler_params)
else:
self.lr_scheduler = None
self.n_epochs = n_epochs
self.batch_size = batch_size
self.n_output = n_output
self.log = log if log is not None else Writer()
self.workers = n_jobs if n_jobs > 0 else mp.cpu_count()
self.use_gpu = use_gpu
self._n_samples = 0
self._data_loader = None
if self.use_gpu:
self.model = self.model.cuda()
self.callbacks = {
"before_epoch": null_function,
"after_epoch": null_function,
"before_batch": null_function,
"after_batch": null_function,
"before_fit": null_function,
"after_fit": null_function,
"log_extras": null_function,
}
self.callbacks.update(callbacks)
self.validation_dataset = None
self.test_dataset = None
def __to_variable(self, tensor):
variable = torch.autograd.Variable(tensor)
if self.use_gpu:
return variable.cuda()
return variable
def __to_numpy(self, variable):
data = variable.data
if 'cuda' in data.__class__.__module__:
return data.cpu().numpy()
return data.numpy()
@property
def n_batches(self):
return int(self._n_samples/self.batch_size)
def __log(self, epoch_idx, batch_idx, loss, validation_loss=None, test_loss=None):
self.log.write(json.dumps(merge_dicts({
"epoch": epoch_idx,
"n_epochs": self.n_epochs,
"batch": batch_idx,
"n_batches": self.n_batches,
"training loss": loss,
"validation loss": validation_loss,
"test loss": test_loss,
}, self.callbacks["log_extras"](self))), '\n')
def __batch(self, epoch_idx, batch_idx, batch_X, batch_y):
batch_X, batch_y = self.__to_variable(batch_X), self.__to_variable(batch_y)
self.callbacks["before_batch"](self, epoch_idx, batch_idx, batch_X, batch_y)
y_pred = self.model(batch_X)
loss = self.loss(y_pred, batch_y)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
self.callbacks["after_batch"](self, epoch_idx, batch_idx, loss)
return loss.data[0]
def __epoch(self, epoch_idx):
self.callbacks["before_epoch"](self, epoch_idx)
if self.lr_scheduler is not None:
self.lr_scheduler.step()
for batch_idx, (batch_X, batch_y) in enumerate(self._data_loader):
loss = self.__batch(epoch_idx, batch_idx, batch_X, batch_y)
validation_loss = None
test_loss = None
if (epoch_idx % (math.ceil(self.n_epochs * 0.05)) == 0) and batch_idx == 0:
validation_loss = self.__validation_loss()
test_loss = self.__test_loss()
self.__log(epoch_idx, batch_idx, loss, validation_loss, test_loss)
self.callbacks["after_epoch"](self, epoch_idx)
def __validation_loss(self):
if not self.validation_dataset:
return None
valid_pred = self.model(self.validation_dataset["X"])
loss = self.loss(valid_pred, self.validation_dataset["y"])
return loss.data[0]
def __test_loss(self):
if not self.test_dataset:
return None
test_pred = self.model(self.test_dataset["X"])
loss = self.loss(test_pred, self.test_dataset["y"])
return loss.data[0]
def register_validation_dataset(self, validation_dataset):
self.validation_dataset = {
"X": self.__to_variable(torch.from_numpy(validation_dataset.X)),
"y": self.__to_variable(torch.from_numpy(validation_dataset.y))
}
def register_test_dataset(self, validation_dataset):
self.test_dataset = {
"X": self.__to_variable(torch.from_numpy(validation_dataset.X)),
"y": self.__to_variable(torch.from_numpy(validation_dataset.y))
}
def __getstate__(self):
state = {key: value for key, value in self.__dict__.items() if key not in ["validation_dataset", "test_dataset"]}
return state
def __setstate__(self, state):
self.__dict__.update(state)
def fit(self, X, y):
X = torch.from_numpy(X)
y = torch.from_numpy(y)
self.callbacks["before_fit"](self, X, y)
dataset = torch.utils.data.TensorDataset(X, y)
self._n_samples = len(dataset)
self._data_loader = torch.utils.data.DataLoader(
dataset, batch_size=self.batch_size, shuffle=True,
num_workers=self.workers, pin_memory=True, sampler=None)
for epoch_idx in range(self.n_epochs):
self.__epoch(epoch_idx)
self.callbacks["after_fit"](self)
def predict(self, X):
X = torch.from_numpy(np.array(X))
pred = self.model(self.__to_variable(X))
return self.__to_numpy(pred)
class NeuralNetworkMixin(object):
def __init__(self, seed=None, **kwargs):
self.seed = seed
self.learner = LearnerWrapper(NeuralNetwork, **kwargs)
def init_seed(self):
if self.seed is not None:
print("setting torch seed")
torch.manual_seed(self.seed)
def create_model(self, input_dim, output_dim):
raise RuntimeError("no create_model method overridden")
def register_evaluation_datasets(self, validation_dataset, test_dataset=None):
def register(_, validation_dataset, test_dataset=None):
unwrapped(self.learner).register_validation_dataset(
Dataset(validation_dataset.X, validation_dataset.y)
)
if test_dataset is not None:
unwrapped(self.learner).register_test_dataset(
Dataset(test_dataset.X, test_dataset.y)
)
self.learner.register_event("fit", Event(register, validation_dataset, test_dataset))
def predict(self, X):
return self.learner.predict(X)
class SingleOutputNeuralNetworkMixin(NeuralNetworkMixin):
def fit(self, X, y):
self.init_seed()
self.learner.instantiate_estimator(model=self.create_model(per_sample_shape(X), 1))
return self.learner.fit(X, y)
class MultiOutputNeuralNetworkMixin(NeuralNetworkMixin):
def fit(self, X, y):
if self.n_output == 'auto':
n_output = len(collect_classes(y))
else:
if isinstance(self.n_output, int):
n_output = self.n_output
else:
n_output = self.n_output(X, y)
self.init_seed()
self.learner.instantiate_estimator(model=self.create_model(per_sample_shape(X), n_output))
return self.learner.fit(X, y)
class AbstractOneHotNeuralNetwork(MultiOutputNeuralNetworkMixin):
def __init__(self, *args, y_dtype=None, seed=None, **kwargs):
self.seed = seed
self.learner = OneHotClassifierWrapper(NeuralNetwork, *args, y_dtype=y_dtype, **kwargs)
self.validation_dataset = None
self.y_dtype = y_dtype
def register_evaluation_datasets(self, validation_dataset, test_dataset=None):
def convert_to_onehot(_, validation_dataset, test_dataset=None):
unwrapped(self.learner).learner.register_validation_dataset(Dataset(
validation_dataset.X,
labels_to_one_hots(validation_dataset.y, self.learner.get_classes(), dtype=self.y_dtype)
))
if test_dataset is not None:
unwrapped(self.learner).learner.register_test_dataset(Dataset(
test_dataset.X,
labels_to_one_hots(test_dataset.y, self.learner.get_classes(), dtype=self.y_dtype)
))
self.learner.register_event("classes_collected", Event(convert_to_onehot, validation_dataset, test_dataset))
```
#### File: expkit/wrapper/validation_split.py
```python
import numpy as np
from sklearn.model_selection import train_test_split, PredefinedSplit
from .cv import CVWrapper
from ..experiment.dataset import Dataset
class TrainValidationCVWrapper(CVWrapper):
def __init__(self, estimator_class, *args, **kwargs):
self.estimator_class = estimator_class
self.args = args
self.kwargs = kwargs
def create_predefined_split(self, train_dataset, validation_dataset):
train_split = -1 * np.ones(train_dataset.X.shape[0])
test_split = np.zeros(validation_dataset.X.shape[0])
split = np.append(train_split, test_split)
return PredefinedSplit(split)
def fit(self, X, y):
train = Dataset(X["train"], y["train"], X["feature_names"])
valid = Dataset(X["valid"], y["valid"], X["feature_names"])
alldata = train.merge(valid)
super().__init__(self.estimator_class, *self.args, cv=self.create_predefined_split(train, valid), **self.kwargs)
super().fit(alldata.X, alldata.y)
def wrap_train_valid_datasets(train, valid, treatment_train=None, treatment_valid=None):
if treatment_train:
train = treatment_train(train)
if treatment_valid:
valid = treatment_valid(valid)
return {
"X": {"train": train.X, "valid": valid.X, "feature_names": train.feature_names},
"y": {"train": train.y, "valid": valid.y},
"feature_names": train.feature_names
}
```
#### File: tests/experiment/test_attribute_chaining.py
```python
from unittest import TestCase
from expkit.experiment.attribute_chaining import get_chained_attribute, ChainedAttributeError, ChainedAttributeCallForwarder
CHAIN_END = 42
METHOD_RETURN = "method return"
NO_SUCH_KEY = "no such key"
class SomeOneLevelClass(object):
def __init__(self):
self.lvl1_attribute = CHAIN_END
self.lvl1_dict = {"key": CHAIN_END}
def some_method(self):
return METHOD_RETURN
class SomeTwoLevelClass(object):
def __init__(self):
self.lvl2_attribute = SomeOneLevelClass()
class GetChainedAttributeTest(TestCase):
def test_can_chain_1_level_attribute(self):
obj = SomeOneLevelClass()
chain_end = get_chained_attribute(obj, ["lvl1_attribute"])
self.assertEqual(CHAIN_END, chain_end)
def test_can_chain_2_level_attribute(self):
obj = SomeTwoLevelClass()
chain_end = get_chained_attribute(obj, ["lvl2_attribute", "lvl1_attribute"])
self.assertEqual(CHAIN_END, chain_end)
def test_a_missing_attribute_in_chain_fallsback_to_subscript_operator(self):
obj = SomeOneLevelClass()
chain_end = get_chained_attribute(obj, ["lvl1_dict", "key"])
self.assertEqual(CHAIN_END, chain_end)
def test_a_missing_attribute_in_chain_fallsback_to_subscript_operator_at_lvl2(self):
obj = SomeTwoLevelClass()
chain_end = get_chained_attribute(obj, ["lvl2_attribute", "lvl1_dict", "key"])
self.assertEqual(CHAIN_END, chain_end)
def test_can_return_a_bound_method(self):
obj = SomeTwoLevelClass()
chain_end = get_chained_attribute(obj, ["lvl2_attribute", "some_method"])
self.assertEqual(obj.lvl2_attribute.some_method, chain_end)
self.assertEqual(METHOD_RETURN, chain_end())
def test_a_missing_attribute_or_key_raises_chained_attribute_error(self):
obj = SomeTwoLevelClass()
with self.assertRaises(ChainedAttributeError):
get_chained_attribute(obj, ["lvl2_attribute", NO_SUCH_KEY])
ARBITRARY_VALUE_1 = 42
ARBITRARY_VALUE_2 = 43
ARBITRARY_VALUE_3 = 44
class InnerForwardable(object):
def inner_method(self, *args):
return args
def inner_callable(self):
return ARBITRARY_VALUE_3
class Forwardable(object):
def __init__(self):
self.dict_attribute = {
"key1": ARBITRARY_VALUE_1,
"key2": ARBITRARY_VALUE_2
}
self.attribute = InnerForwardable()
class ChainedAttributeCallForwarderTest(TestCase):
def test_can_forward_without_argument(self):
forwarder = ChainedAttributeCallForwarder(["attribute", "inner_method"])
result = forwarder(Forwardable())
self.assertEqual(tuple(), result)
def test_can_forward_with_single_arbitrary_argument(self):
forwarder = ChainedAttributeCallForwarder(["attribute", "inner_method"],
ARBITRARY_VALUE_1)
result = forwarder(Forwardable())
self.assertEqual(ARBITRARY_VALUE_1, result[0])
def test_can_forward_with_single_chained_argument(self):
forwarder = ChainedAttributeCallForwarder(["attribute", "inner_method"],
["dict_attribute", "key1"])
result = forwarder(Forwardable())
self.assertEqual(ARBITRARY_VALUE_1, result[0])
def test_can_forward_with_2_chained_arguments(self):
forwarder = ChainedAttributeCallForwarder(["attribute", "inner_method"],
["dict_attribute", "key1"],
["dict_attribute", "key2"])
result1, result2 = forwarder(Forwardable())
self.assertEqual(ARBITRARY_VALUE_1, result1)
self.assertEqual(ARBITRARY_VALUE_2, result2)
def test_a_non_chained_argument_is_called_if_callable(self):
forwarder = ChainedAttributeCallForwarder(["attribute", "inner_method"],
lambda: ARBITRARY_VALUE_3)
result = forwarder(Forwardable())
self.assertEqual(ARBITRARY_VALUE_3, result[0])
def test_a_chained_argument_is_called_if_callable(self):
forwarder = ChainedAttributeCallForwarder(["attribute", "inner_method"],
["attribute", "inner_callable"])
result = forwarder(Forwardable())
self.assertEqual(ARBITRARY_VALUE_3, result[0])
```
#### File: tests/utils/test_expando.py
```python
from unittest import TestCase
from expkit.utils.expando import Expando
obj = Expando(_89='opm', kiki=9)
obj2 = Expando.from_dict({89:'opm2', 'kiki':90})
class ExpandoTest(TestCase):
def test_can_instanciate_from_kwargs(self):
self.assertEqual({"_89": "opm", "kiki": 9}, vars(obj))
def test_getting_non_existing_attribute_returns_None(self):
self.assertEqual(None, obj.non_existing)
def test_setting_non_existing_object_sets_effectively(self):
obj._1 = 1
obj.someatt = "someval"
self.assertEqual(1, obj._1)
self.assertEqual("someval", obj.someatt)
def test_accesssing_non_existing_object_returns_None_with_getitem(self):
self.assertEqual(None, obj[0])
def test_setting_non_existing_object_sets_effectively_with_setitem(self):
obj[1] = 1
obj["someatt"] = "someval"
self.assertEqual(1, obj[1])
self.assertEqual("someval", obj["someatt"])
``` |
{
"source": "jonathangjertsen/flynt",
"score": 2
} |
#### File: flynt/lexer/Chunk.py
```python
import ast
import sys
import token
from collections import deque
from typing import Deque
from flynt.lexer.PyToken import PyToken
REUSE = "Token was not used"
is_36 = sys.version_info.major == 3 and sys.version_info.minor == 6
if is_36:
multiline_skip = (token.NEWLINE, 58)
multiline_break = (57,)
single_break = (token.NEWLINE, 57, 58)
else:
multiline_skip = (token.NEWLINE, token.NL)
multiline_break = (token.COMMENT,)
single_break = (token.COMMENT, token.NEWLINE, token.NL)
single_skip = ()
class Chunk:
skip_tokens = ()
break_tokens = ()
multiline = None
@staticmethod
def set_multiline():
Chunk.skip_tokens = multiline_skip
Chunk.break_tokens = multiline_break
Chunk.multiline = True
@staticmethod
def set_single_line():
Chunk.skip_tokens = single_skip
Chunk.break_tokens = single_break
Chunk.multiline = False
def __init__(self, tokens=()):
self.tokens: Deque[PyToken] = deque(tokens)
self.complete = False
self.is_percent_chunk = False
self.percent_ongoing = False
self.is_call_chunk = False
self.successful = False
self.string_in_string = False
def empty_append(self, t: PyToken):
if not t.is_string() or t.is_raw_string():
self.complete = True
self.tokens.append(t)
def second_append(self, t: PyToken):
if t.is_string():
self.tokens[0].tokval += t.tokval
self.tokens[0].end = t.end
elif t.is_percent_op():
self.tokens.append(t)
self.is_percent_chunk = True
elif t.is_dot_op():
self.tokens.append(t)
self.is_call_chunk = True
else:
self.tokens.append(t)
self.complete = True
def percent_append(self, t: PyToken):
# todo handle all cases?
if not self[0].is_string():
self.complete = True
return
if len(self) == 2:
self.tokens.append(t)
if self.is_parseable:
self.successful = True
else:
self.percent_ongoing = True
else:
if self.percent_ongoing:
self.tokens.append(t)
if t.is_string() and "{" not in str(self):
self.string_in_string = True
if self.is_parseable:
self.percent_ongoing = False
self.successful = True
elif t.is_expr_continuation_op():
self.tokens.append(t)
self.percent_ongoing = True
else:
self.complete = True
self.successful = self.is_parseable
return REUSE
def call_append(self, t: PyToken):
if t.is_string():
self.string_in_string = True
self.tokens.append(t)
if len(self) > 3 and self.is_parseable:
self.complete = True
self.successful = True
def append(self, t: PyToken):
# stop on a comment or too long chunk
if t.toknum in self.break_tokens:
self.complete = True
self.successful = self.is_parseable and (
self.is_percent_chunk or self.is_call_chunk
)
return
if len(self) > 50:
self.complete = True
self.successful = False
return
if t.toknum in self.skip_tokens:
return
if len(self) == 0:
self.empty_append(t)
elif not (self.is_call_chunk or self.is_percent_chunk):
self.second_append(t)
elif self.is_call_chunk:
self.call_append(t)
else:
return self.percent_append(t)
@property
def is_parseable(self):
if len(self.tokens) < 1:
return False
try:
ast.parse(str(self))
return True
except SyntaxError:
return False
@property
def start_line(self):
return self.tokens[0].start[0] - 1
@property
def start_idx(self):
return self.tokens[0].start[1]
@property
def end_idx(self):
return self.tokens[-1].end[1]
@property
def end_line(self):
return self.tokens[-1].end[0] - 1
@property
def n_lines(self):
return 1 + self.end_line - self.start_line
@property
def is_multiline(self):
return self.n_lines > 1
@property
def contains_raw_strings(self):
return any(tok.is_raw_string() for tok in self.tokens)
@property
def contains_multiple_string_tokens(self):
return sum(t.toknum == token.STRING for t in self.tokens) > 1
@property
def quote_type(self):
return self.tokens[0].get_quote_type()
def __getitem__(self, item):
return self.tokens[item]
def __iter__(self):
return iter(self.tokens)
def __len__(self):
return len(self.tokens)
def __str__(self):
return " ".join(t.tokval for t in self)
def __repr__(self):
if self.tokens:
return "Chunk: " + str(self)
else:
return "Empty Chunk"
``` |
{
"source": "jonathangjertsen/ifttt_event_loop",
"score": 3
} |
#### File: jonathangjertsen/ifttt_event_loop/event_loop.py
```python
from datetime import datetime
from time import sleep
import sys
from croniter import croniter
from ifttt import trigger
from event_cfg import events
from secret import debug
def run_loop(sleep_time):
"""
Check triggers, act on triggers
:param sleep_time: Time to sleep between each iteration
:return:
"""
def default_counters():
return { event: 0 for event in events if "periodic" in events[event]["conditions"] }
def default_timeofdays():
return { event: False for event in events if "time of day" in events[event]["conditions"] }
def default_crons():
base_time = datetime.now()
crons = {}
for event in events:
if "cron" in events[event]["conditions"]:
cron_string = events[event]["conditions"]["cron"]
if croniter.is_valid(cron_string):
crons[event] = croniter(cron_string, base_time)
# Advance the iterator once so the first call to
# get_current() doesn't return the current time
crons[event].get_next()
else:
raise ValueError("Invalid cron string specified for event {event}".format(event=event))
return crons
counters = default_counters()
timeofdays = default_timeofdays()
crons = default_crons()
datetime_after_sleep = datetime.now()
while True:
# In case we did something time consuming, remove the time it took from the sleep time
true_sleep_time = sleep_time - (datetime.now() - datetime_after_sleep).seconds
# Goodnight
sleep(max(true_sleep_time, 0))
datetime_after_prev_sleep = datetime_after_sleep
datetime_after_sleep = datetime.now()
# Reset time-of-day flags
if datetime_after_sleep.date() != datetime_after_prev_sleep.date():
timeofdays = default_timeofdays()
# Check all the triggers
for event in events:
do_check = False
conditions = events[event]["conditions"]
# Check periodic triggers
if "periodic" in conditions:
counters[event] += sleep_time
if counters[event] >= conditions["periodic"]:
counters[event] = 0
do_check = True
# Check time-of-day triggers
if "time of day" in conditions:
if not timeofdays[event]:
trigger_time = datetime.strptime(conditions["time of day"], "%H:%M").time()
if trigger_time >= datetime_after_sleep.time():
do_check = True
timeofdays[event] = True
# Check cron-like triggers
if "cron" in conditions:
if crons[event].get_current(datetime) <= datetime_after_sleep:
do_check = True
crons[event].get_next(datetime)
# Run triggered events
if do_check:
func = events[event]["func"]
args = events[event]["args"]
data = func(**args)
if debug:
print("Event data: ", data)
if data is not None:
response = trigger(event, *data)
if debug:
print("IFTT response: {response}".format(response=response.text))
if __name__ == "__main__":
if len(sys.argv) >= 2 and sys.argv[1].isdigit():
sleep_time = int(sys.argv[1])
else:
print("No valid sleep time set, will default to 5 seconds")
sleep_time = 5
run_loop(sleep_time)
```
#### File: jonathangjertsen/ifttt_event_loop/ifttt.py
```python
import requests
from secret import key
def trigger(event, *values):
return requests.post(
url="https://maker.ifttt.com/trigger/{event}/with/key/{key}".format(event=event, key=key),
json={ "value{idx}".format(idx=idx+1): value for idx, value in enumerate(values) }
)
if __name__ == "__main__":
response = trigger("test", "Title", "Text", "Extra")
print(response.text)
```
#### File: ifttt_event_loop/triggers/example.py
```python
def trigger_example(do_it):
if do_it:
return ("First value", "Second value", "Third value", )
return None
``` |
{
"source": "jonathangjertsen/jchord",
"score": 3
} |
#### File: jchord/jchord/group_notes_to_chords.py
```python
from collections import defaultdict
from math import exp, ceil
from typing import List
from jchord.midi import MidiNote
# Notes separated by less than this much belong to one chord
MIN_SEP_INTERVAL = 0.1
# Bucket size for the KDE algorithm
KDE_BUCKETS_PER_SECOND = 1 / MIN_SEP_INTERVAL
def kernel_default(distance):
"""
Default kernel
"""
return exp(-((distance / MIN_SEP_INTERVAL) ** 2))
def group_notes_to_chords(notes: List[MidiNote], kernel=None) -> List[List[MidiNote]]:
"""
Groups the list of `MidiNote`s by time.
The return value maps time to a list of `MidiNote`s for that time.
"""
if kernel is None:
kernel = kernel_default
# Degenerate case: no notes -> no chords
if not notes:
return []
# Ensure notes are sorted
notes = sorted(notes, key=lambda note: note.time)
# Get the total duration of all notes
min_time = notes[0].time
max_time = notes[-1].time
# Degenerate case: all in one chord
if (max_time - min_time) <= MIN_SEP_INTERVAL:
return [notes]
max_time += notes[-1].duration
duration = max_time - min_time
# Do kernel density estimate
bucket_duration = 1.0 / KDE_BUCKETS_PER_SECOND
kde = [
sum(kernel(abs(note.time - i * bucket_duration)) for note in notes)
for i in range(ceil(KDE_BUCKETS_PER_SECOND * duration))
]
# Find kde_threshold such that the times between the first and last note in a chord
# always has kde[t] > kde_threshold
buckets = defaultdict(list)
kde_threshold = float("inf")
for note in notes:
bucket = min(int(note.time / bucket_duration), len(kde) - 1)
buckets[bucket].append(note)
kde_threshold = min(kde_threshold, kde[bucket])
# It needs to be a little bit lower than that to ensure all notes get included in a chord.
# Arbitrarily reduce by 25%
kde_threshold *= 0.95
# Do grouping
chords = []
cur_chord = []
for i, kde_val in enumerate(kde):
if kde_val > kde_threshold:
if i in buckets:
cur_chord.extend(buckets[i])
else:
if cur_chord:
chords.append(cur_chord)
cur_chord = []
if cur_chord:
chords.append(cur_chord)
return chords
```
#### File: jchord/jchord/progressions.py
```python
from collections import namedtuple
from typing import Hashable, List, Set, Union
from jchord.knowledge import REPETITION_SYMBOL
from jchord.core import CompositeObject
from jchord.chords import Chord
from jchord.midi import read_midi_file, notes_to_messages, MidiNote
from jchord.group_notes_to_chords import group_notes_to_chords
class InvalidProgression(Exception):
"""Raised when encountering what seems like an invalid chord progression."""
def _string_to_progression(string: str) -> List[Chord]:
string = string.strip()
if string == "":
return []
progression = []
for name in string.split():
name = name.strip()
if name == REPETITION_SYMBOL:
if not progression:
raise InvalidProgression(
"Can't repeat before at least one chord has been added"
)
progression.append(progression[-1])
else:
progression.append(Chord.from_name(name))
return progression
class MidiConversionSettings(object):
def __init__(
self,
filename: str,
instrument: int = 1,
tempo: int = 120,
beats_per_chord: Union[int, list] = 2,
velocity: int = 100,
repeat: str = "replay",
effect=None,
):
self.filename = filename
self.instrument = instrument
self.tempo = tempo
self.beats_per_chord = beats_per_chord
self.velocity = velocity
self.repeat = repeat
self.effect = effect
self.progression = None
def set(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
class ChordProgression(CompositeObject):
"""
Represents a chord progression.
There are many ways to create a ``ChordProgression`` object.
**From a string**
Use the ``from_string`` method to generate a chord progression from a string.
>>> ChordProgression.from_string("Dm7 -- Gm7 Am7")
ChordProgression([Chord(name='Dm7', root=Note('D', 4), intervals=Intervals(name='m7', semitones=[0, 3, 7, 10])), Chord(name='Dm7', root=Note('D', 4), intervals=Intervals(name='m7', semitones=[0, 3, 7, 10])), Chord(name='Gm7', root=Note('G', 4), intervals=Intervals(name='m7', semitones=[0, 3, 7, 10])), Chord(name='Am7', root=Note('A', 4), intervals=Intervals(name='m7', semitones=[0, 3, 7, 10]))])
**From a text file**
Use the ``from_txt`` method to generate a chord progression from a text file.
If example.txt contains the text "Am7 D7", then ``ChordProgression.from_txt("example.txt")``
will produce the same result as ``ChordProgression.from_string("Am7 D7")``.
**From an Excel file**
Use the ``from_xlsx`` method to generate a chord progression from an Excel spreadsheet.
If example.xlsx contains the following cells:
+-----+-----+
| C | D |
+-----+-----+
| Em7 | G7 |
+-----+-----+
Then the result is equivalent to calling ``ChordProgression.from_string("C D Em7 G7")``.
.. note::
This feature requires ``openpyxl``, which you can get with ``pip install openpyxl``.
**From a MIDI file**
Use the ``from_midi`` method to generate a chord progression from a MIDI file.
If ``example.mid`` contains some chords that you would like to convert to a ``ChordProgression``,
use ``ChordProgression.from_midi("example.mid")``.
For best results, the MIDI file should contain a single instrument with chords played as straight as possible.
.. note::
This feature requires ``mido``, which you can get with ``pip install mido``.
"""
class _DummyChord(object):
"""Mocks a ChordWithProgression object"""
def midi(self):
return []
DUMMY_CHORD = _DummyChord()
def __init__(self, progression: List[Chord]):
self.progression = progression
def _keys(self) -> Hashable:
return (self.progression,)
@classmethod
def from_string(cls, string: str) -> "ChordProgression":
return cls(_string_to_progression(string))
@classmethod
def from_txt(cls, filename: str) -> "ChordProgression":
with open(filename) as file:
return cls(_string_to_progression(file.read()))
@classmethod
def from_xlsx(cls, filename: str) -> "ChordProgression":
from openpyxl import load_workbook
workbook = load_workbook(filename)
sheet = workbook.active
names = []
for row in sheet.iter_rows():
for cell in row:
name = cell.value
if not name:
name = REPETITION_SYMBOL
names.append(name)
return cls.from_string(" ".join(names))
@classmethod
def from_midi_file(cls, filename: str) -> "ChordProgression":
notes = read_midi_file(filename)
progression = []
for chord in group_notes_to_chords(notes):
progression.append(Chord.from_midi([note.note for note in chord]))
return cls(progression)
def chords(self) -> Set[Chord]:
"""
Returns the set of chords in the progression.
>>> ChordProgression.from_string("Am7 D7").chords() # doctest: +SKIP
{Chord(name='D7', root=Note('D', 4), intervals=Intervals(name='7', semitones=[0, 4, 7, 10])), Chord(name='Am7', root=Note('A', 4), intervals=Intervals(name='m7', semitones=[0, 3, 7, 10]))}
"""
return set(self.progression)
def midi(self) -> List[List[int]]:
"""
Returns the MIDI values for each chord in the progression.
>>> ChordProgression.from_string("Am7 D7").midi()
[[69, 72, 76, 79], [62, 66, 69, 72]]
"""
return [chord.midi() for chord in self.progression]
def transpose(self, shift: int):
"""
Transposes all chords in the progression by the given shift.
>>> ChordProgression.from_string("Am7 D7").transpose(2).to_string().strip()
'Bm7 E7'
"""
return ChordProgression([chord.transpose(shift) for chord in self.progression])
def to_string(
self, chords_per_row: int = 4, column_spacing: int = 2, newline: str = "\n"
) -> str:
"""
Returns the string representation of the chord progression.
"""
max_len = max(len(chord.name) for chord in self.progression)
column_width = max_len + column_spacing
column = 0
output = []
prev_chord = None
for chord in self.progression:
if prev_chord == chord:
chord_name = REPETITION_SYMBOL
else:
chord_name = chord.name
output.append(chord_name)
output.append(" " * (column_width - len(chord_name)))
column += 1
if column % chords_per_row == 0:
column = 0
output.append(newline)
prev_chord = chord
return "".join(output) + newline
def to_txt(
self,
filename: str,
chords_per_row: int = 4,
column_spacing: int = 2,
newline: str = "\n",
):
"""
Saves the string representation of the chord progression to a text file.
"""
output_str = self.to_string(
chords_per_row=chords_per_row,
column_spacing=column_spacing,
newline=newline,
)
with open(filename, "w") as file:
file.write(output_str)
def to_xlsx(self, filename: str, chords_per_row: int = 4):
"""
Saves the chord progression to an Excel file.
.. note::
This feature requires ``openpyxl``, which you can get with ``pip install openpyxl``.
"""
from openpyxl import Workbook
workbook = Workbook()
worksheet = workbook.active
row = 1
column = 1
prev_chord = None
for chord in self.progression:
if prev_chord == chord:
chord_name = REPETITION_SYMBOL
else:
chord_name = chord.name
worksheet.cell(row=row, column=column).value = chord_name
column += 1
if (column - 1) % chords_per_row == 0:
column = 1
row += 1
prev_chord = chord
workbook.save(filename)
def to_midi(self, settings: MidiConversionSettings, **kwargs):
"""
Saves the chord progression to a MIDI file.
.. note::
This feature requires ``mido``, which you can get with ``pip install mido``.
"""
if not isinstance(settings, MidiConversionSettings) or kwargs:
raise ValueError(
"to_midi now takes a MidiConversionSettings object, not individual arguments; see README.md"
)
repeat_options = {"replay", "hold"}
assert (
settings.repeat in repeat_options
), f"repeat argument must be one of: {repeat_options}"
import mido
mid = mido.MidiFile()
track = mido.MidiTrack()
mid.tracks.append(track)
# Ensure beats_per_chord is a list
if isinstance(settings.beats_per_chord, int):
settings.beats_per_chord = [
settings.beats_per_chord for _ in range(len(self.progression))
]
assert len(settings.beats_per_chord) == len(
self.progression
), "len(settings.beats_per_chord) is {}, which is not equal to the number of chords in the progression ({})".format(
len(settings.beats_per_chord), len(self.progression)
)
seconds_per_chord = [
(60 / settings.tempo) * bpc for bpc in settings.beats_per_chord
]
ticks_per_chord = [
int(
mido.second2tick(
spc, mid.ticks_per_beat, mido.bpm2tempo(settings.tempo)
)
)
for spc in seconds_per_chord
]
track.append(
mido.MetaMessage("set_tempo", tempo=mido.bpm2tempo(settings.tempo))
)
track.append(mido.Message("program_change", program=settings.instrument))
played_chords = []
prev_chord = None
time = 0
for chord, tpc in zip(self.midi(), ticks_per_chord):
if chord == prev_chord and settings.repeat == "hold":
played_chords[-1] = [
pnote._replace(duration=pnote.duration + tpc)
for pnote in played_chords[-1]
]
else:
played_chords.append(
[
MidiNote(
note=note,
velocity=settings.velocity,
time=time,
duration=tpc,
)
for note in chord
]
)
prev_chord = chord
time += tpc
settings.set(progression=self)
settings.set(played_chords=played_chords)
settings.set(midi_track=track)
if settings.effect:
settings.effect.set_settings(settings)
played_chords = [settings.effect.apply(chord) for chord in played_chords]
played_notes = [note for chord in played_chords for note in chord]
for message in notes_to_messages(played_notes, velocity=settings.velocity):
track.append(message)
mid.save(settings.filename)
SongSection = namedtuple("SongSection", "name, progression")
SongSection.__doc__ = """Represents a section in a Song."""
class Song(CompositeObject):
"""Represents a song (a series of sections)."""
def __init__(self, sections: List[SongSection]):
self.sections = sections
def _keys(self):
return (self.sections,)
def to_string(
self, chords_per_row: int = 4, column_spacing: int = 2, newline: str = "\n"
):
"""Returns the string representation of the song."""
out = []
multiplier = 1
for i, section in enumerate(self.sections):
if multiplier > 1:
multiplier -= 1
continue
for j in range(i + 1, len(self.sections)):
if self.sections[j] is self.sections[i]:
multiplier += 1
else:
break
if multiplier > 1:
section_name = f"{section.name} (x{multiplier})"
else:
section_name = section.name
out.append(f"{section_name}{newline}{'=' * len(section_name)}{newline}")
out.append(
section.progression.to_string(
chords_per_row=chords_per_row,
column_spacing=column_spacing,
newline=newline,
)
)
out.append(newline)
combined = "".join(out)
combined = combined.replace(3 * newline, 2 * newline)
combined = combined.strip() + newline
combined = newline.join(line.strip() for line in combined.split(newline))
return combined
```
#### File: jonathangjertsen/jchord/repo_check.py
```python
from pathlib import Path
from subprocess import run
import sys
TASKS = {}
def task(func):
TASKS[func.__name__] = func
return func
@task
def build_docs():
print("========== Generating docs...")
proc = run(["sphinx-build", ".", "docs", "-c", "docs"])
if proc.returncode != 0:
print("sphinx-build failed.")
sys.exit(1)
@task
def fix_crlf():
print("========== Removing CRLF...")
def helper(path):
path_str = str(path)
if any(
token in path_str
for token in (".git", ".pytest_cache", "build", "dist", ".png", ".mid")
):
return
if path.is_dir():
for subpath in path.iterdir():
helper(subpath)
else:
try:
path.write_text(path.read_text().replace("\r\n", "\n"))
except UnicodeDecodeError:
pass
helper(Path("."))
@task
def check_clean():
print("========== Checking if anything changed...")
porcelain_after = (
run(["git", "status", "--porcelain"], capture_output=True)
.stdout.decode("ascii", errors="ignore")
.strip()
)
if porcelain_after:
print(porcelain_after)
run(["git", "status", "-vvv"])
print(
"Repo is dirty.\n"
"If this is run locally, please commit the files that were updated.\n"
"If this is in CI, please run python repo_check.py locally and commit the changes."
)
sys.exit(1)
@task
def format():
print("========== Formatting python code...")
proc = run(["black", "."])
if proc.returncode != 0:
print("black failed.")
sys.exit(1)
@task
def pytest():
print("========== Checking if tests pass...")
proc = run(["pytest"])
if proc.returncode != 0:
print("pytest failed.")
sys.exit(1)
@task
def pyflakes():
print("========== Running pyflakes...")
proc = run(["pyflakes", "jchord", "test", "examples"])
if proc.returncode != 0:
print("pyflakes failed.")
sys.exit(1)
@task
def doctest():
print("========== Checking if doctests pass...")
proc = run(
[
"python",
"-m",
"doctest",
"-v",
"jchord/core.py",
"jchord/chords.py",
"jchord/progressions.py",
]
)
if proc.returncode != 0:
print("pytest failed.")
sys.exit(1)
if __name__ == "__main__":
git = "git" in sys.argv
fix_crlf()
if git:
check_clean()
pyflakes()
format()
pytest()
doctest()
build_docs()
fix_crlf()
if git:
check_clean()
print("OK, everything is up to date")
```
#### File: jchord/test/test_0_core.py
```python
from jchord.knowledge import CHROMATIC, ENHARMONIC
from jchord.core import (
CompositeObject,
degree_to_semitone,
InvalidDegree,
Note,
note_diff,
semitone_to_degree_options,
split_to_base_and_shift,
)
import pytest
@pytest.mark.parametrize(
"degree, semitone",
[
("1", 0),
("b1", -1),
("bb1", -2),
("bbb1", -3),
("#1", 1),
("##1", 2),
("2", 2),
("b2", 1),
("#2", 3),
("3", 4),
("4", 5),
("b4", 4),
("#4", 6),
("5", 7),
("b5", 6),
("#5", 8),
("6", 9),
("b6", 8),
("#6", 10),
("7", 11),
("b7", 10),
("#7", 12),
("9", 14),
("b9", 13),
("#9", 15),
("11", 17),
("b11", 16),
("#11", 18),
("13", 21),
("b13", 20),
("#13", 22),
],
)
def test_degree_to_semitone(degree, semitone):
assert degree_to_semitone(degree) == semitone
@pytest.mark.parametrize("degree", ["#b1", "b1#", "asdf", "b99"])
def test_degree_to_semitone_invalid_degree(degree):
with pytest.raises(InvalidDegree):
assert degree_to_semitone(degree)
@pytest.mark.parametrize(
"semitone, n_accidentals, options",
[
(-1, 0, []),
(24, 0, []),
(0, 0, ["1"]),
(0, 1, ["1"]),
(0, 2, ["1", "bb2"]),
(0, 3, ["1", "bb2"]),
(0, 4, ["1", "bb2", "bbbb3"]),
(3, 0, []),
(3, 1, ["b3", "#2"]),
(4, 0, ["3"]),
(4, 1, ["3", "b4"]),
(8, 1, ["b6", "#5"]),
(17, 1, ["11", "#10"]),
],
)
def test_semitone_to_degree_options(semitone, n_accidentals, options):
assert semitone_to_degree_options(semitone, n_accidentals) == options
@pytest.mark.parametrize(
"item, base, shift",
[
("A", "A", 0),
("A#", "A", 1),
("A##", "A", 2),
("A###", "A", 3),
("Ab", "A", -1),
("Abb", "A", -2),
("Abbb", "A", -3),
],
)
def test_split_to_base_and_shift_after(item, base, shift):
assert split_to_base_and_shift(item, name_before_accidental=True) == (base, shift)
@pytest.mark.parametrize(
"item, base, shift",
[
("9", "9", 0),
("#9", "9", 1),
("##9", "9", 2),
("###9", "9", 3),
("b9", "9", -1),
("bb9", "9", -2),
("bbb9", "9", -3),
],
)
def test_split_to_base_and_shift_before(item, base, shift):
assert split_to_base_and_shift(item, name_before_accidental=False) == (base, shift)
@pytest.mark.parametrize(
"name, octave, the_repr",
[
("A", 0, "Note('A', 0)"),
("A", 1, "Note('A', 1)"),
("G#", 1, "Note('G#', 1)"),
("Db", 133, "Note('Db', 133)"),
],
)
def test_note_repr(name, octave, the_repr):
assert repr(Note(name, octave)) == the_repr
assert Note(name, octave) == eval(the_repr)
@pytest.mark.parametrize(
"sharp, flat, octave",
[(sharp, flat, octave) for sharp, flat in ENHARMONIC for octave in range(-2, 2)],
)
def test_note_eq(sharp, flat, octave):
assert Note(sharp, octave) == Note(flat, octave)
assert Note(flat, octave) == Note(sharp, octave)
@pytest.mark.parametrize(
"note, octave", [(note, octave) for note in CHROMATIC for octave in range(-2, 2)]
)
def test_note_eq_tuple(note, octave):
assert Note(note, octave) == (note, octave)
@pytest.mark.parametrize(
"note, octave, other",
[
("A", 0, None),
("A", 0, ("A",)),
("A", 0, ("A", 1)),
("A", 0, ("Ab", 0)),
("A", 0, ("A#", 0)),
("A", 0, ("E", 0)),
("A", 0, ("A", 0, 0)),
],
)
def test_note_neq(note, octave, other):
assert Note(note, octave) != other
@pytest.mark.parametrize(
"note_in, octave_in, shift, note_out, octave_out",
# [
# (note, octave, n * 12, note, octave + n)
# for n in range(-2, 2)
# for octave in range(-2, 2)
# for note in CHROMATIC
# ]
[
("C", 0, 1, "C#", 0),
("C#", 0, 1, "D", 0),
("D", 0, 1, "D#", 0),
("B", 0, 1, "C", 1),
("B", 1, 1, "C", 2),
("Bb", 4, 1, "B", 4),
("Bb", 4, -1, "A", 4),
("B", 110, 1, "C", 111),
("C#", 0, -1, "C", 0),
("D", 0, -1, "C#", 0),
("D#", 0, -1, "D", 0),
("C", 0, -1, "B", -1),
("C", 1, -1, "B", 0),
("C", 110, -1, "B", 109),
("C", 4, 7, "G", 4),
("C", 4, 12 + 7, "G", 5),
("A", 3, 3, "C", 4),
],
)
def test_transpose(note_in, octave_in, shift, note_out, octave_out):
assert Note(note_in, octave_in).transpose(shift) == (note_out, octave_out)
@pytest.mark.parametrize(
"note_in, octave_in, shift, down, note_out, octave_out",
[
("C", 0, "b2", False, "C#", 0),
("C", 0, "b2", True, "B", -1),
("C#", 0, "#1", False, "D", 0),
("D", 0, "b2", False, "D#", 0),
("B", 0, "b2", False, "C", 1),
("B", 1, "#1", False, "C", 2),
("B", 110, "#1", False, "C", 111),
("C", 4, "5", False, "G", 4),
("C", 4, "5", True, "F", 3),
("C", 4, "12", False, "G", 5),
("A", 3, "b3", False, "C", 4),
],
)
def test_transpose_degree(note_in, octave_in, shift, down, note_out, octave_out):
assert Note(note_in, octave_in).transpose_degree(shift, down) == (
note_out,
octave_out,
)
@pytest.mark.parametrize(
"note_low, note_high, diff",
[
("A", "A", 0),
("A", "A#", 1),
("A", "B", 2),
("A", "G", 10),
("A#", "A", 11),
("B", "A", 10),
("G", "A", 2),
],
)
def test_note_diff(note_low, note_high, diff):
assert note_diff(note_low, note_high) == diff
@pytest.mark.parametrize(
"name, octave, pitch",
[
("A", 4, 440),
("A", 0, 27.5),
("A#", 0, 29.135235),
("C", 8, 4186.009),
("C", 4, 261.62556),
],
)
def test_note_pitch(name, octave, pitch):
assert Note(name, octave).pitch() == pytest.approx(pitch)
def test_composite_object_equality():
with pytest.raises(NotImplementedError):
CompositeObject() == CompositeObject()
def test_composite_object_hash():
with pytest.raises(NotImplementedError):
hash(CompositeObject())
with pytest.raises(NotImplementedError):
{CompositeObject(): 1}
@pytest.mark.parametrize(
"args_a, args_b, equal",
[
((1, 2, 3), (1, 2, 3), True),
((1, 2, 3), (1, 2, 2), True),
((1, 2, 3), (1, 3, 3), False),
],
)
def test_composite_object_subclass(args_a, args_b, equal):
class ConcreteCompositeObject(CompositeObject):
def __init__(self, a, b, c):
self.a, self.b, self.c = a, b, c
def _keys(self):
return (self.a, self.b)
assert (
ConcreteCompositeObject(*args_a) == ConcreteCompositeObject(*args_b)
) == equal
assert (
{ConcreteCompositeObject(*args_a): 1} == {ConcreteCompositeObject(*args_b): 1}
) == equal
``` |
{
"source": "jonathangjertsen/plappy",
"score": 3
} |
#### File: plappy/plappy/devices.py
```python
import collections
from plappy.core import Connectable
from plappy.io import IO
from plappy.plappyconfig import config
from plappy.util import unique_key
DevicePatch = dict
class Device(Connectable):
"""A generic Device.
label (str): Used to identify the Device
ios (dict[str->IO]): IO ports for the Device
subdevices (dict[str->Device]): Other Devices contained within this one,
will tick whenever self.tick() is ran
"""
def __init__(self, label: str):
"""Initialize Device with inp label, an optional dict of IOs and an optional dict of Devices"""
super().__init__(label)
self.ios = {}
self.subdevices = {}
def __repr__(self) -> str:
"""Representation of the Device"""
ios = "{" + ", ".join(repr(io) for io in self.ios) + "}"
subdevices = "{" + ", ".join(repr(sub) for sub in self.subdevices) + "}"
return f"{type(self).__name__}('{self.label}', ios={ios}, subdevices={subdevices})"
def __le__(self, devices: 'Device' or tuple) -> 'Device':
"""Allows you to add subdevices: parent <= (child1, child2, ..., childn)"""
if isinstance(devices, collections.Iterable):
for device in devices:
self <= device
return self
else:
self.add_subdevice(devices)
return devices
def __or__(self, other: 'Device') -> 'DeviceCollectionMixin':
"""Makes (dev1|dev2) a DeviceConnection which can run the two devices in parallel"""
from plappy.mixins import DeviceCollectionMixin
return DeviceCollectionMixin('parallel') <= (self, other)
def __ror__(self, other: 'Device'):
"""Equivalent to __or__"""
return self | other
def patch_helper(self, seen: set):
seen.add(id(self))
if not self.subdevices:
return []
return [
{
'class': type(sub).__name__,
'label': self.label,
'ios': tuple(io.patch_helper() for io in sub.ios.values()),
'subdevices': sub.patch_helper(seen)
}
for sub in self.subdevices.values()
if id(sub) not in seen
]
def make_patch(self, name: str, format: str = 'dict'):
seen = set()
patch = DevicePatch(
version=config.version,
schema='device-patch',
name=name,
tree={
'class': type(self).__name__,
'label': self.label,
'ios': tuple(io.patch_helper() for io in self.ios.values()),
'subdevices': self.patch_helper(seen)
}
)
if format == 'json':
import json
return json.dumps(patch, indent=4)
else:
return patch
def add_io(self, io: IO, label: str = None) -> 'Device':
"""Add a new IO port"""
# Ensure label exists
if label is None:
label = io.label
# Add the IO
self.ios[unique_key(label, self.ios)] = io
return self
def io(self, label: str) -> IO:
"""Return the IO instance referred to by the label"""
return self.ios[label]
def add_subdevice(self, subdevice: 'Device', label: str = None) -> 'Device':
"""Add a new subdevice"""
if label is None:
label = subdevice.label
self.subdevices[unique_key(label, self.subdevices)] = subdevice
return self
def add_subdevices(self, subdevices: dict) -> 'Device':
for label in subdevices:
self.add_subdevice(subdevices[label], label)
return self
def subdevice(self, label: str) -> 'Device':
"""Return the Device instance referred to by the label"""
return self.subdevices[label]
def connect_io(self, other: 'IO', label: str) -> 'Device':
"""Connect an own IO to another IO"""
self.ios[label] -- other
return self
def tick(self) -> 'Device':
# Load from inputs
for io in self.ios.values():
if io.bufstate == config.bufstate.filled:
io.tick()
# Do own processing
self.process()
# Push from outputs
for io in self.ios.values():
if io.bufstate == config.bufstate.ready_to_push:
io.tick()
return self
def process(self) -> 'Device':
# Run subdevices
for subdevice in self.subdevices:
self.subdevices[subdevice].tick()
return self
``` |
{
"source": "jonathangjertsen/pyboard-mpu9225",
"score": 3
} |
#### File: jonathangjertsen/pyboard-mpu9225/mpu9225stream.py
```python
from pyb import I2C
import pyb
import utime
import sys
import array
from micropython import const
# Set the full-scale range of the accelerometer in g's here. Must be 2, 4, 8, or 16
FULL_SCALE_CHOICE = 2
# I2C address
AX_ADDR = const(104)
# Translates the full-scale value in g's to the right register value
FULL_SCALE = {
2: 0 << 3,
4: 1 << 3,
8: 2 << 3,
16: 3 << 3
}
# Maximum 16-bit value
TOP_16BIT = const(65536)
# MPU9225 register addresses
INT_STATUS = const(0x3A)
ACCEL_XOUT_H = const(0x3B)
ACCEL_XOUT_L = const(0x3C)
ACCEL_YOUT_H = const(0x3D)
ACCEL_YOUT_L = const(0x3E)
ACCEL_ZOUT_H = const(0x3F)
ACCEL_ZOUT_L = const(0x40)
SMPLRT_DIV = const(0x19)
WHO_AM_I = const(0x75)
PWR_MGMT_1 = const(0x6b)
PWR_MGMT_1_CLKSEL_MASK = const(0x7)
PWR_MGMT_1_SLEEP_MASK = const(0x40)
ACCEL_CONFIG = const(0x1c)
ACCEL_CONFIG2 = const(0x1d)
ACCEL_FS_SEL_MASK = const(0x18)
_i2c_object = None
_i2c_default_bus = 1
def i2c(bus_no: int=_i2c_default_bus, baudrate: int=400000) -> I2C:
"""Return an I2C object which is initialized the first time the function is called."""
global _i2c_object
if _i2c_object is None:
_i2c_object = I2C(bus_no, I2C.MASTER, baudrate=baudrate)
return _i2c_object
def set_i2c_bus(bus_no: int) -> None:
"""Sets the I2C bus used by the accelerometer."""
global _i2c_default_bus
_i2c_default_bus = bus_no
def twos_complement(val: int, num_bits: int) -> int:
"""Returns the num_bits-bit two's complement of the input value."""
mask = 2 ** (num_bits - 1)
twos_comp = -(val & mask) + (val & ~mask)
return twos_comp
def ax_send(data: int, max_attempts: int=10) -> None:
"""Send data to the accelerometer, trying up to max_attempts times with exponential backoff. Raises OSError if it fails."""
attempts = 0
while attempts < max_attempts:
try:
i2c().send(data, addr=AX_ADDR)
return
except OSError:
pyb.delay(0.5 * 2 ** attempts)
attempts += 1
raise OSError("Failed to send")
def ax_write(reg: int, value: int) -> None:
"""Write a value to a register."""
ax_send(bytearray([reg, value]))
def ax_write_masked(reg: int, value: int, bitmask: int, read_after: bool=False) -> int or None:
"""Update some bits (specified by the bitmask) of the register with the bits in the value. If read_after is True, returns the actual value of the register write."""
masked_val = value & bitmask
old_val = ax_read(reg, convert=True)
reg_val = (old_val & ~bitmask) | masked_val
ax_write(reg, reg_val)
return ax_read(reg, convert=True) if read_after else None
def ax_read(reg: int, convert: bool=False) -> int:
"""Read an 8-bit register and return the result as an integer."""
ax_send(reg)
if convert:
return int.from_bytes(i2c().recv(1, addr=AX_ADDR), 'big')
else:
return i2c().recv(1, addr=AX_ADDR)
def ax_read_double(addr_h: int, addr_l: int, as_list: bool=False) -> list or int:
"""Read two 8-bit registers. If as_list is True, the result is returned as a list. Otherwise, the result is interpreted as a single 16-bit value."""
res_h = ax_read(addr_h, convert=True)
res_l = ax_read(addr_l, convert=True)
if as_list:
return [res_h, res_l]
else:
return res_h * 256 + res_l
def ax_x() -> int:
"""Read the acceleration value along the x axis."""
return twos_complement(ax_read_double(ACCEL_XOUT_H, ACCEL_XOUT_L), 16) * FULL_SCALE_CHOICE // 4
def ax_y() -> int:
"""Read the acceleration value along the y axis."""
return twos_complement(ax_read_double(ACCEL_YOUT_H, ACCEL_YOUT_L), 16) * FULL_SCALE_CHOICE // 4
def ax_z() -> int:
"""Read the acceleration value along the z axis."""
return twos_complement(ax_read_double(ACCEL_ZOUT_H, ACCEL_ZOUT_L), 16) * FULL_SCALE_CHOICE // 4
def init_accelerometer(do_scan=True) -> None:
"""Initialize the accelerometer."""
# Wait for an I2C device with the correct I2C address to appear.
while True:
check_ready = True
if do_scan:
slaves = i2c().scan()
print("I2C device addresses: " + ", ".join([str(slave) for slave in slaves]))
if not AX_ADDR in slaves:
check_ready = False
if check_ready:
if (i2c().is_ready(AX_ADDR)):
print("Ready!")
break
else:
print("AX is not ready.")
pyb.delay(1000)
# Set accelerometer clock
ax_write_masked(reg=PWR_MGMT_1, value=1, bitmask=PWR_MGMT_1_CLKSEL_MASK)
# Set full scale accelerometer range
ax_write_masked(reg=ACCEL_CONFIG, value=FULL_SCALE[FULL_SCALE_CHOICE], bitmask=ACCEL_FS_SEL_MASK)
# Disable sleep
ax_write_masked(reg=PWR_MGMT_1, value=0, bitmask=PWR_MGMT_1_SLEEP_MASK)
def get_data_string() -> str:
"""Get a string with the current time in microseconds and the acceleration along x, y and z."""
return "{0} {1} {2} {3}".format(utime.ticks_us(), ax_x(), ax_y(), ax_z())
def to_g(ax) -> float:
"""Convert raw value to acceleration in g's."""
return 2 * FULL_SCALE_CHOICE * ax / TOP_16BIT
def read_buf(number_of_samples, sample_period, prev_t=0) -> array:
"""Read number_of_samples samples spaced (at least) sample_period apart, blocking in the meantime.
Can be called in a loop by using the second return value as the third argument in the next call."""
buf = array.array('i')
for i in range(number_of_samples):
# Spin in a tight loop until the time is right
t = utime.ticks_us()
if utime.ticks_diff(t, prev_t) < sample_period:
continue
prev_t = t
# Add 4 entries to the buffer: time, x, y and z
buf.append(t)
buf.append(ax_x())
buf.append(ax_y())
buf.append(ax_z())
return buf, t
``` |
{
"source": "jonathangjertsen/pyruter",
"score": 3
} |
#### File: pyruter/ruter/__init__.py
```python
import json
import pytz
from datetime import datetime as dt
from dateutil.parser import parse as dparse
from pytz import reference
from requests import get
_URIBASE = 'https://reisapi.ruter.no'
_SYS_TZ = reference.LocalTimezone()
class Ruter(object):
def __init__(self, uribase=_URIBASE):
self.uribase = uribase
self.location = None
def set_location(self, location=None):
"""
Location should be a tuple with x as the first value and y as the second
"""
self.location = location
def get_simple(self, api_method, search_string="", params=None):
response = get(
urljoin(self.uribase, api_method, search_string), params)
verify_response(response)
return json.loads(response.text)
def get_validities(self):
"""
Returns the date and time for the first and last valid search time.
http://reisapi.ruter.no/Help/Api/GET-Meta-GetValidities
"""
return self.get_simple('Meta/GetValidities')
def get_heartbeat(self):
"""
http://reisapi.ruter.no/Help/Api/GET-Heartbeat-Index
"""
return self.get_simple('Heartbeat/Index')
def get_street(self, street_id):
"""
http://reisapi.ruter.no/Help/Api/GET-Street-GetStreet-id
"""
return self.get_simple('Street/GetStreet', street_id)
def get_trip(self, trip_id, trip_time=None):
params = {}
if trip_time:
params['time'] = trip_time
return self.get_simple('Trip/GetTrip', trip_id, params)
def get_places(self, search_string, location=None, counties=None):
"""
http://reisapi.ruter.no/Help/Api/GET-Place-GetPlaces-id_location
"""
params = {}
if location:
params['location'] = get_location_string(location)
elif self.location:
params['location'] = get_location_string(self.location)
if counties:
params['counties'] = counties
return self.get_simple('Place/GetPlaces', search_string, params)
def get_place(self, search_string, location=None):
"""
http://reisapi.ruter.no/Help/Api/GET-Place-GetPlaces-id_location
"""
return self.get_places(search_string, location)[0]
def get_stop(self, stop_id):
"""
http://reisapi.ruter.no/Help/Api/GET-Place-GetStop-id
"""
return self.get_simple('Place/GetStop', stop_id)
def get_stops_ruter(self):
"""
http://reisapi.ruter.no/Help/Api/GET-Place-GetStopsRuter
"""
return self.get_simple('Place/GetStopsRuter')
def get_travels(self, **travel_args):
"""
http://reisapi.ruter.no/Help/Api/GET-Travel-GetTravels_fromPlace_toPlace_isafter_time_changemargin_changepunish_walkingfactor_proposals_transporttypes_maxwalkingminutes_linenames_waitReluctance_walkreluctance_waitAtBeginningFactor_showIntermediateStops_ignoreRealtimeUpdates_intermediateStops
"""
return self.get_simple('Travel/GetTravels', '', travel_args)
def get_travels_extension(self, **travel_args):
"""
http://reisapi.ruter.no/Help/Api/GET-Travel-GetTravelsExtension_fromplace_toplace_isafter_time_changemargin_changepunish_walkingfactor_proposals_transporttypes_maxwalkingminutes_linenames_showIntermediatePlaces_ignoreRealtimeUpdates
"""
return self.get_simple('Travel/GetTravelsExtension', '', travel_args)
def get_lines(self, ruter_operated_only=False, extended=False):
"""
http://reisapi.ruter.no/Help/Api/GET-Line-GetLines
http://reisapi.ruter.no/Help/Api/GET-Line-GetLinesRuter_ruterOperatedOnly
http://reisapi.ruter.no/Help/Api/GET-Line-GetLinesRuterExtended_ruterOperatedOnly
"""
if ruter_operated_only:
if extended:
url = 'Line/GetLinesRuter/Extended'
else:
url = 'Line/GetLinesRuter'
return self.get_simple(url, '', {'ruterOperatedOnly': True})
else:
return self.get_simple('Line/GetLines')
def get_lines_by_stop_id(self, stop_id):
"""
http://reisapi.ruter.no/Help/Api/GET-Line-GetLinesByStopID-id
"""
return self.get_simple('Line/GetLinesByStopID', stop_id)
def get_data_by_line_id(self, line_id):
"""
http://reisapi.ruter.no/Help/Api/GET-Line-GetDataByLineID-id
"""
return self.get_simple('Line/GetDataByLineID', line_id)
def get_stops_by_line_id(self, line_id):
"""
http://reisapi.ruter.no/Help/Api/GET-Line-GetStopsByLineID-id
"""
return self.get_simple('Line/GetStopsByLineID', line_id)
def get_departures(self,
stop_id,
datetime=None,
transporttypes=None,
linenames=None):
"""
http://reisapi.ruter.no/Help/Api/GET-StopVisit-GetDepartures-id_datetime_transporttypes_linenames
"""
params = {}
if datetime:
params['datetime'] = datetime
if transporttypes:
params['transporttypes'] = transporttypes
if linenames:
params['linenames'] = linenames
return self.get_simple('StopVisit/GetDepartures', stop_id, params)
def get_next_departure(self, stop_id, linename, direction):
"""
direction: 1 is towards city center, 2 is west
"""
all_departures = self.get_departures(stop_id, linenames=linename)
all_departures = [
d for d in all_departures
if d['MonitoredVehicleJourney']['DirectionName'] == str(direction)
]
next_departure = min(
all_departures,
key=
lambda elm: elm['MonitoredVehicleJourney']['MonitoredCall']['ExpectedArrivalTime']
)
return next_departure['MonitoredVehicleJourney']
def get_time_until_next_departure(self, stop_id, linename, direction):
departure = self.get_next_departure(
stop_id, linename=linename, direction=direction)
departure_dt =\
localize(dparse(departure['MonitoredCall']['ExpectedArrivalTime']))
return localize(departure_dt) - tz_now()
def verify_response(response):
if not str(response.status_code).startswith('2'):
raise Exception('%s: %s' % (response.code, response.text))
try:
json.loads(response.text)
except Exception as e:
raise Exception('Unable to parse json\n %s' % str(e))
def urljoin(*args):
uri = ''
for arg in args:
if not uri.endswith('/') and uri != '':
uri += '/'
uri += str(arg)
return uri
def tz_now():
return pytz.utc.localize(dt.utcnow()).astimezone(_SYS_TZ)
def localize(timestamp):
return timestamp.astimezone(_SYS_TZ)
def get_location_string(location):
"""
Get coordinates as a string compatible with the Ruter API
"""
if location:
return f"(x={location[0]},y={location[1]})"
else:
return None
``` |
{
"source": "jonathan-golorry/django_model_helpers",
"score": 2
} |
#### File: jonathan-golorry/django_model_helpers/model_helpers.py
```python
from django.core.exceptions import ValidationError
from os import path as fs_path
from time import strftime
from django.utils.text import slugify
from django.utils import six
from django.utils.translation import ugettext as _
from django.core.cache import cache
from django.conf import settings
from django.db import models
from collections import OrderedDict
try:
from django.utils.deconstruct import deconstructible
except ImportError:
# for older versions of django, define a no-op decorator instead.
def deconstructible(old_class):
return old_class
UPLOAD_TO_OPTIONS = {
"black_listed_extensions": ["php", "html", "htm", "js", "vbs", "py", "pyc", "asp", "aspx", "pl"],
"max_filename_length": 40,
"file_name_template": "{model_name}/%Y/{filename}.{extension}"
}
@deconstructible
class UploadTo(object):
"""
An instance of this class is passed as "upload_to" parameter for any FileField or ImageField
It ensures file name is less than "max_filename_length" char also slugify the filename and finally provide simple
protection against uploading some harmful files like (php or python files)
File is saved in a folder called <model_name>/<current_year>/file_name.ext
example: User/2015/profile_pic.jpg
"""
def __init__(self, **kwargs):
"""
:param kwargs: You can override any of the default options by passing it as keyword argument to this function
:return:
"""
self.options = UPLOAD_TO_OPTIONS.copy()
if hasattr(settings, "UPLOAD_TO_OPTIONS"):
self.options.update(settings.UPLOAD_TO_OPTIONS)
self.options.update(kwargs)
@staticmethod
def get_file_info(full_filename):
filename = fs_path.basename(full_filename).lower()
filename, file_ext = filename.rsplit(".", 1)
return {
"filename": filename,
"extension": file_ext,
"full_filename": full_filename
}
def validate_file_info(self, file_info):
file_ext = file_info["extension"]
if file_ext in self.options["black_listed_extensions"]:
raise ValueError("File extension '%s' is not allowed" % file_ext)
def generate_file_name(self, instance, file_info):
model_name = instance.__class__.__name__
filename = file_info["filename"]
max_len = self.options["max_filename_length"]
file_info["filename"] = slugify(filename)[:max_len]
return strftime(self.options["file_name_template"]).format(
model_name=model_name,
instance=instance,
**file_info
)
def __call__(self, instance, full_filename):
"""
:param instance: model instance which the file is uploaded for
:param full_filename: filename including its path
:return: string
"""
full_filename = six.text_type(full_filename)
file_info = self.get_file_info(full_filename)
self.validate_file_info(file_info)
return self.generate_file_name(instance, file_info)
# Shortcut for UploadTo class
def upload_to(instance, full_filename):
upload_to_obj = UploadTo()
return upload_to_obj(instance, full_filename)
def cached_model_property(model_method=None, readonly=True, cache_timeout=None):
"""
cached_model_property is a decorator for model functions that takes no arguments
The function is converted into a property that support caching out of the box
:param readonly: set readonly parameter False to make the property writeable
:type readonly: bool
:param cache_timeout: number of seconds before cache expires
:type cache_timeout: int
Sample usage:
class Team(models.Model):
@cached_model_property
def points(self):
# Do complex DB queries
return result
@cached_model_property(readonly=False)
def editable_points(self):
# get result
return result
Now try
team = Team.objects.first()
team.points <-- complex DB queries will happen, result will be returned
team.points <-- this time result is returned from cache (points function is not called at all!
del team.points <-- points value has been removed from cache
team.points <-- complex DB queries will happen, result will be returned
set readonly parameter False to make the property writeable
team.editable_points = 88
in this case the assigned value will replace the value stored in the cache
team.editable_points
returns 88
"""
def func(f):
def _get_cache_key(obj):
"""
:type obj: django.db.models.Model
:rtype: six.string_types
"""
# getattr(obj, "_meta") is same as obj._meta but avoid the warning about accessing protected property
model_name = getattr(obj, "_meta").db_table
method_name = f.__name__
return "%s.%s.%s" % (model_name, obj.pk, method_name)
def get_x(obj):
# Try to get the cache key for that method
cache_key = _get_cache_key(obj)
result = cache.get(cache_key)
# If not cached, call the actual method and cache the result
if result is None:
result = f(obj)
set_x(obj, result)
return result
def del_x(obj):
"""
Remove that property from the cache
:param obj:
:return: None
"""
cache_key = _get_cache_key(obj)
# Remove that key from the cache
cache.delete(cache_key)
def set_x(obj, value):
"""
Set the cache value of that property
:param obj:
:return: None
"""
cache_key = _get_cache_key(obj)
# Save that key in the cache
if cache_timeout is None:
cache.set(cache_key, value)
else:
cache.set(cache_key, value, cache_timeout)
if readonly:
return property(fget=get_x, fdel=del_x)
else:
return property(fget=get_x, fset=set_x, fdel=del_x)
# model_method is passed when using @cached_model_property
if model_method:
return func(model_method)
# model_method is not passed when using @cached_model_property(readonly=True) or even @cached_model_property()
return func
# noinspection PyOldStyleClasses
class Choices(OrderedDict):
"""
Offer a cleaner way for django choices field
Usage:
** define a constant **
ANIMAL_TYPES = Choices(
[
{"insect": 1,
{"mammal": {"id": 2}, # same as {"mammal": 2}
{"none": {"id": None, "display": "Not Animal"},
])
** Inside your model class **
animal_type = models.IntegerField(choices=ANIMAL_TYPES(), null=True)
output of ANIMAL_TYPES() is django choice list ordered by display name:
[(1, 'Insect'), (2, 'Mammal'), (None, 'Not Animal')]
** Using the new model **
animal = Animals.objects.first()
if animal.animal_type == ANIMAL_TYPES.insect:
# do the insect related code
"""
# always True except during the execution of__init__() and update() methods
_read_only = True
# cache for mapping between choice id and choice dictionary (populated on demand)
_choices_id = None
def __init__(self, choices, order_by="display"):
"""
:param choices: dictionary of dictionary . ex: {'choice1': {'id':1, 'display': 'Code One'}, ...}
display key is optional. if not provided its assumed to be dict_key.replace("_", " ").capitalize()
:type choices: Choices | OrderedDict | dict | tuple | list
:param order_by: Whether generated Django choice list should be ordered (valid options "id", "display", None)
:type order_by: str | None
"""
self._read_only = False
# Initialize parent dict with the choices provided by the user
super(Choices, self).__init__(choices)
self._choices = _choices = []
self._order_by = order_by
if not choices:
return
# choice_ids are used to validate an id is not used more than once
choice_ids = set()
for choice_code, choice_options in self.items():
if not issubclass(choice_options.__class__, dict):
# in case passing {"insect": 1} assume 1 is the id
choice_options = {"id": choice_options}
self[choice_code] = choice_options
choice_id = choice_options["id"]
choice_ids.add(choice_id)
# End of validation
if "display" not in choice_options:
choice_options["display"] = choice_code.replace("_", " ").capitalize()
display = choice_options["display"]
_choices.append((choice_id, _(display)))
# Sort by display name
if order_by == "display":
_choices.sort(key=lambda x: x[1])
elif order_by == "id":
_choices.sort(key=lambda x: x[0])
self._read_only = True
def get_display_name(self, choice_id):
"""
Return translated display name of certain choice.
same same model's get_<field_name>_display()
:param choice_id: choice id
:rtype: str
"""
return self.get_value(choice_id, "display")
def get_value(self, choice_id, choice_key, raise_exception=True):
"""
Finds a choice with id <choice_id> and return value of key <key>
:param choice_id: the db value of the choice in question
:param choice_key: the key inside choice dictionary in which you want to get value of
:param raise_exception: if True, KeyError exception will be raised if the key wasn't found
:return: whatever stored in that choice key is returned,
if key not found and raise_exception=False then None is returned
"""
if self._choices_id is None:
self._choices_id = {item["id"]: (key, item) for key, item in six.iteritems(self)}
choice_name, choice = self._choices_id[choice_id]
if choice_key is None:
return choice_name
elif raise_exception:
return choice[choice_key]
else:
return choice.get(choice_key)
def get_code_name(self, choice_id):
"""
Return code name of certain choice
:param choice_id: choice id
:rtype: str
"""
return self.get_value(choice_id, choice_key=None)
def __getattr__(self, attr_name):
if attr_name in self:
return self[attr_name]["id"]
raise AttributeError("Attribute %s is not part of %s class" % (attr_name, self.__class__.__name__))
def __call__(self):
"""
:return: list of choices
:rtype: list
"""
return self._choices
def __setattr__(self, attr, *args):
if self._read_only and attr in self:
raise TypeError("Choices are constants and can't be modified")
super(Choices, self).__setattr__(attr, *args)
def __setitem__(self, *args):
if self._read_only:
raise TypeError("Choices are constants and can't be modified")
super(Choices, self).__setitem__(*args)
def __dir__(self):
return list(self.keys()) + dir(self.__class__)
def copy(self):
new_self = Choices({}, order_by=self._order_by)
new_self.update(self)
return new_self
def update(self, new_data=None, **kwargs):
"""
:type new_data: Choices | OrderedDict | dict | tuple | list
"""
if self._read_only:
raise TypeError("Choices are constants and can't be modified")
if not new_data:
new_data = kwargs
if not isinstance(new_data, Choices):
new_data = Choices(new_data)
assert isinstance(new_data, Choices)
common_keys = set(new_data.keys()) & set(self.keys())
if common_keys:
raise ValueError("The following keys exist in both instances %s" % ", ".join(common_keys))
self._choices += (new_data())
self._choices_id = None
super(Choices, self).update(new_data)
def __enter__(self):
return self
def __exit__(self, *args, **kwargs):
self._read_only = True
def __add__(self, other):
self._read_only = False
with self.copy() as result:
result.update(other)
self._read_only = True
return result
class KeyValueContainer(dict):
def __init__(self, seq=None, separator="=", **kwargs):
super(KeyValueContainer, self).__init__()
self.sep = separator
if isinstance(seq, six.string_types):
seq = self._parse_string(seq)
if seq is not None:
seq = dict(seq)
kwargs.update(seq)
for key, value in six.iteritems(kwargs):
self.__setitem__(key, value)
def __str__(self):
result = []
for key, val in six.iteritems(self):
result.append(u"%s %s %s" % (key, self.sep, val))
return u"\n".join(result) + "\n"
def __setitem__(self, key, item):
if item is None:
item = ""
else:
item = six.text_type(item)
super(KeyValueContainer, self).__setitem__(key, item)
def __unicode__(self):
return self.__str__()
def _parse_string(self, value):
result = {}
if not value:
return result
for line in value.split("\n"):
line = line.strip()
if not line:
continue
if self.sep not in line:
raise ValueError(_("Invalid syntax in line %s\nExpected: key %s value") % (repr(line), self.sep))
key, value = [val.strip() for val in line.split(self.sep, 1)]
result[key] = value
return result
class KeyValueField(models.TextField):
"""
Basically a way to store configuration in DB and have it returned as dictionary.
Simple key/value store
data stored as
key = value
default separator is "=" but it can be customized
sample usage
class MyModel(models.Model):
options = KeyValueField(separator=":")
>> my_model.options = "key1 : val1 \n key2 : val2"
>> my_model.clean_fields()
>> my_model.options
{"key1": "val1", "key2": "val2"}
"""
description = _("Key/Value dictionary field")
empty_values = (None,)
def __init__(self, separator="=", *args, **kwargs):
self.separator = separator
super(KeyValueField, self).__init__(*args, **kwargs)
def contribute_to_class(self, cls, name, **kwargs):
super(KeyValueField, self).contribute_to_class(cls, name, **kwargs)
setattr(cls, name, property(fget=self.get_value, fset=self.set_value))
def set_value(self, obj, value):
if isinstance(value, six.string_types):
value = self.from_db_value(value)
elif not isinstance(value, KeyValueContainer):
value = KeyValueContainer(value)
obj.__dict__[self.name] = value
def get_value(self, obj):
return obj.__dict__[self.name]
def from_db_value(self, value, *args, **kwargs):
try:
return KeyValueContainer(value, separator=self.separator)
except ValueError as e:
raise ValidationError(e)
def get_prep_value(self, value):
if value is None:
return ""
return six.text_type(value)
def deconstruct(self):
name, path, args, kwargs = super(KeyValueField, self).deconstruct()
if self.separator != "=":
kwargs["separator"] = self.separator
return name, path, args, kwargs
``` |
{
"source": "jonathangomesselman/CS330-CAML",
"score": 3
} |
#### File: fullgrad_saliency_master/saliency/simple_fullgrad.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
from math import isclose
from saliency.tensor_extractor import FullGradExtractor
class SimpleFullGrad():
"""
Compute simple FullGrad saliency map
"""
def __init__(self, model, im_size = (3,224,224) ):
self.model = model
self.im_size = (1,) + im_size
self.model_ext = FullGradExtractor(model, im_size)
def _getGradients(self, image, target_class=None):
"""
Compute intermediate gradients for an image
"""
self.model.eval()
image = image.requires_grad_()
out = self.model(image)
if target_class is None:
target_class = out.data.max(1, keepdim=True)[1]
# Select the output unit corresponding to the target class
# -1 compensates for negation in nll_loss function
output_scalar = -1. * F.nll_loss(out, target_class.flatten(), reduction='sum')
return self.model_ext.getFeatureGrads(image, output_scalar)
def _postProcess(self, input, eps=1e-6):
# Absolute value
input = abs(input)
# Rescale operations to ensure gradients lie between 0 and 1
flatin = input.view((input.size(0),-1))
temp, _ = flatin.min(1, keepdim=True)
input = input - temp.unsqueeze(1).unsqueeze(1)
flatin = input.view((input.size(0),-1))
temp, _ = flatin.max(1, keepdim=True)
input = input / (temp.unsqueeze(1).unsqueeze(1) + eps)
return input
def saliency(self, image, target_class=None):
#Simple FullGrad saliency
self.model.eval()
input_grad, intermed_grad = self._getGradients(image, target_class=target_class)
im_size = image.size()
# Input-gradient * image
grd = input_grad * image
gradient = self._postProcess(grd).sum(1, keepdim=True)
cam = gradient
# Aggregate Intermediate-gradients
for i in range(len(intermed_grad)):
# Select only Conv layers
if len(intermed_grad[i].size()) == len(im_size):
temp = self._postProcess(intermed_grad[i])
gradient = F.interpolate(temp, size=(im_size[2], im_size[3]), mode = 'bilinear', align_corners=True)
cam += gradient.sum(1, keepdim=True)
return cam
```
#### File: jonathangomesselman/CS330-CAML/saliency.py
```python
import importlib
import datetime
import argparse
import random
import uuid
import time
import os
import numpy as np
import torch
from torch.autograd import Variable
from metrics.metrics import confusion_matrix
import matplotlib.pyplot as plt
from main import load_datasets
# Import saliency methods
#from fullgrad_saliency_master.saliency.fullgrad import FullGrad
#from fullgrad_saliency_master.saliency.simple_fullgrad import SimpleFullGrad
#from fullgrad_saliency_master.saliency.smooth_fullgrad import SmoothFullGrad
from fullgrad_saliency_master.saliency.gradcam import GradCAM
from fullgrad_saliency_master.saliency.grad import InputGradient
from fullgrad_saliency_master.saliency.smoothgrad import SmoothGrad
"""
Stolen from CS231N
"""
def compute_saliency_maps(x, y, model):
"""
Compute a class saliency map using the model for images X and labels y.
Input:
- x: Input image: Tensor of shape(H*W)
- y: Labels for x; float label
- model: A pretrained CNN that will be used to compute the saliency map.
Returns:
- saliency: A Tensor of shape (H*W) giving the saliency maps for the input
images.
"""
# Make sure the model is in "test" mode
model.eval()
# Make input tensor require gradient
x.requires_grad_()
##############################################################################
# TODO: Implement this function. Perform a forward and backward pass through #
# the model to compute the gradient of the correct class score with respect #
# to each input image. You first want to compute the loss over the correct #
# scores (we'll combine losses across a batch by summing), and then compute #
# the gradients with a backward pass. #
##############################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
scores = model(x, 0) # Not sure about the 0
# Gather just the correct scores
# Not sure why we did this instead of the loss!
scores = scores.gather(0, y,).squeeze()
loss = torch.sum(scores)
loss.backward()
# Now actually get step
x_grad = x.grad
saliency = torch.abs(x_grad)
return saliency
def main():
parser = argparse.ArgumentParser(description='Continuum learning')
# Woody: extra args for caml
parser.add_argument('--caml_priority', type=str, default='loss',
help='how to prioritize sampling in caml')
parser.add_argument('--softmax_temperature', type=float, default=1.0,
help='temperature for softmax in replay buffer sampling')
# model details
parser.add_argument('--model', type=str, default='caml1',
help='model to train')
parser.add_argument('--n_hiddens', type=int, default=100,
help='number of hidden neurons at each layer')
parser.add_argument('--n_layers', type=int, default=2,
help='number of hidden layers')
parser.add_argument('--finetune', default='yes', type=str,help='whether to initialize nets in indep. nets')
# optimizer parameters influencing all models
parser.add_argument('--n_epochs', type=int, default=1,
help='Number of epochs per task')
parser.add_argument('--batch_size', type=int, default=1,
help='the amount of items received by the algorithm at one time (set to 1 across all experiments). Variable name is from GEM project.')
parser.add_argument('--lr', type=float, default=1e-3,
help='learning rate')
# memory parameters for GEM baselines
parser.add_argument('--n_memories', type=int, default=0,
help='number of memories per task')
parser.add_argument('--memory_strength', default=0, type=float,
help='memory strength (meaning depends on memory)')
# parameters specific to models in https://openreview.net/pdf?id=B1gTShAct7
parser.add_argument('--memories', type=int, default=5120, help='number of total memories stored in a reservoir sampling based buffer')
parser.add_argument('--gamma', type=float, default=1.0,
help='gamma learning rate parameter') #gating net lr in roe
parser.add_argument('--batches_per_example', type=float, default=1,
help='the number of batch per incoming example')
parser.add_argument('--s', type=float, default=1,
help='current example learning rate multiplier (s)')
parser.add_argument('--replay_batch_size', type=float, default=20,
help='The batch size for experience replay. Denoted as k-1 in the paper.')
parser.add_argument('--beta', type=float, default=1.0,
help='beta learning rate parameter') # exploration factor in roe
# experiment parameters
parser.add_argument('--cuda', type=str, default='no',
help='Use GPU?')
parser.add_argument('--seed', type=int, default=0,
help='random seed of model')
parser.add_argument('--log_every', type=int, default=100,
help='frequency of logs, in minibatches')
parser.add_argument('--save_path', type=str, default='results/',
help='save models at the end of training')
# data parameters
parser.add_argument('--data_path', default='data/',
help='path where data is located')
parser.add_argument('--data_file', default='mnist_rotations.pt',
help='data file')
parser.add_argument('--samples_per_task', type=int, default=-1,
help='training samples per task (all if negative)')
parser.add_argument('--shuffle_tasks', type=str, default='no',
help='present tasks in order')
# Saliency method
parser.add_argument('--saliency', type=str, default='smoothgrad',
help="Defines the saliency method used")
args = parser.parse_args()
args.cuda = True if args.cuda == 'yes' else False
args.finetune = True if args.finetune == 'yes' else False
# taskinput model has one extra layer
if args.model == 'taskinput':
args.n_layers -= 1
# unique identifier
uid = uuid.uuid4().hex
# initialize seeds
torch.backends.cudnn.enabled = False
torch.manual_seed(args.seed)
np.random.seed(args.seed)
random.seed(args.seed)
if args.cuda:
print("Found GPU:", torch.cuda.get_device_name(0))
torch.cuda.manual_seed_all(args.seed)
# load data
x_tr, x_te, n_inputs, n_outputs, n_tasks = load_datasets(args)
n_outputs = n_outputs.item() # outputs should not be a tensor, otherwise "TypeError: expected Float (got Long)"
# load model
Model = importlib.import_module('model.' + args.model)
model = Model.Net(n_inputs, n_outputs, n_tasks, args)
#result_t, result_a, model_state_dict, stats, one_liner, _ = torch.load('woody_results/online_mnist_rotations.pt_2020_11_01_11_19_37_f37e2305e6e04d61ab498c9bf252fe97.pt')
result_t, result_a, model_state_dict, stats, one_liner, _ = torch.load('woody_results/caml1_mnist_rotations.pt_2020_11_01_13_58_46_0c7287daad494c818e6d5ce206b16b0b.pt')
model.load_state_dict(model_state_dict)
model.eval()
if args.cuda:
try:
model.cuda()
except:
pass
# Initialize saliency methods
saliency_methods = {
# FullGrad-based methods
#'fullgrad': FullGrad(model),
#'simple_fullgrad': SimpleFullGrad(model),
#'smooth_fullgrad': SmoothFullGrad(model),
# Other saliency methods from literature
'gradcam': GradCAM(model),
'inputgrad': InputGradient(model),
'smoothgrad': SmoothGrad(model)
}
# Test this saliency shit on two data points
# From the final task train set
task_num = 0
saliency_idxes = [7, 1, 105]
#x = x_tr[task_num][1][saliency_idxes]
#y = x_tr[task_num][2][saliency_idxes]
x = x_tr[task_num][1][1]
y = x_tr[task_num][2][1]
#saliency = compute_saliency_maps(x, y, model)
saliency = saliency_methods[args.saliency].saliency(x, y)
# Convert the saliency map from Torch Tensor to numpy array and show images
# and saliency maps together.
saliency = saliency.detach().numpy()
# Try the technique of multiplying the image and saliency!
#saliency = saliency * x.detach().numpy()
saliency = saliency.reshape(-1, 28, 28)
x = x.reshape(-1, 28, 28).detach().numpy()
N = x.shape[0]
for i in range(N):
plt.subplot(2, N, i+1)
plt.imshow(x[i])
plt.axis('off')
plt.subplot(2, N, N + i + 1)
plt.imshow(saliency[i], cmap=plt.cm.Greens)
plt.axis('off')
plt.gcf().set_size_inches(12, 5)
plt.show()
if __name__ == '__main__':
main()
``` |
{
"source": "jon-a-thang/Python-Collection",
"score": 4
} |
#### File: Python-Collection/Excel_Zipcode/excel_zipcode_universal.py
```python
import pandas as pd
def excel_zipcode():
"""
excel_zipcode will open an excel file and get the data from a particular col and
then grab the information that it needs to find and
then add a new column and write the new column and the currently existing dataframe into a new excel file.
:return:
None
"""
full_file_path = "PATH_TO_EXCEL_FILE"
df = pd.read_excel(full_file_path, usecols=['COL_NAME_THAT_NEEDS_TO_BE_ALTERED'])
# print(df.values.tolist())
new_df_list = []
# Checking the output of the dataframe from the excel sheet
for each in df.values.tolist():
print(each[0])
print(each[0].split(",")[-2])
# Appending the data that we are altering or need into its own new list
new_df_list.append(each[0].split(",")[-1])
dff = pd.read_excel(full_file_path)
# inserting the new column
dff.insert(0, "Zipcode", new_df_list, True)
print(dff)
print(dff.to_excel("PATH_OF_NEW_EXCEL_FILE"))
def main():
"""
Main function to run excel_zipcode
:return:
None
"""
excel_zipcode()
if __name__ == '__main__':
main()
``` |
{
"source": "jonathanGR3450/estocasticos",
"score": 2
} |
#### File: estocasticos/Comandos/training.py
```python
from sys import byteorder
from array import array
from struct import pack
from features import mfcc
from features import logfbank
import scipy.io.wavfile as wav
import numpy as np
import dataset
import pyaudio
import wave
import lumos, close, nox, yes1, no
THRESHOLD = 500
CHUNK_SIZE = 2048
FORMAT = pyaudio.paInt16
RATE = 48000
exit_flag = 0
def is_silent(snd_data):
"Returns 'True' if below THRESHOLD"
return max(snd_data) < THRESHOLD
def normalize(snd_data):
"Average volume out"
MAXIMUM = 16384
times = float(MAXIMUM)/max(abs(i) for i in snd_data)
r = array('h')
for i in snd_data:
r.append(int(i*times))
return r
def trim(snd_data):
"Trim the blank spots at the start and end"
def _trim(snd_data):
snd_started = False
r = array('h')
for i in snd_data:
if not snd_started and abs(i)>THRESHOLD:
snd_started = True
r.append(i)
elif snd_started:
r.append(i)
return r
# Trim to the left
snd_data = _trim(snd_data)
# Trim to the right
snd_data.reverse()
snd_data = _trim(snd_data)
snd_data.reverse()
return snd_data
def record():
"""
Record a word or words from the microphone and
return the data as an array of signed shorts.
"""
p = pyaudio.PyAudio()
stream = p.open(format=FORMAT, channels=1, rate=RATE,
input=True, output=True,
frames_per_buffer=CHUNK_SIZE)
num_silent = 0
snd_started = False
r = array('h')
while 1:
# little endian, signed short
snd_data = array('h', stream.read(CHUNK_SIZE))
if byteorder == 'big':
snd_data.byteswap()
r.extend(snd_data)
silent = is_silent(snd_data)
if silent and snd_started:
num_silent += 1
elif not silent and not snd_started:
snd_started = True
if snd_started and num_silent > 30:
break
sample_width = p.get_sample_size(FORMAT)
stream.stop_stream()
stream.close()
p.terminate()
r = normalize(r)
r = trim(r)
return sample_width, r
def record_to_file(path):
"Records from the microphone and outputs the resulting data to 'path'"
sample_width, data = record()
data = pack('<' + ('h'*len(data)), *data)
wf = wave.open(path, 'wb')
wf.setnchannels(1)
wf.setsampwidth(sample_width)
wf.setframerate(RATE)
wf.writeframes(data)
wf.close()
def check_for_match(input):
"Takes input and searches dataset for a hit"
flag = 0
global exit_flag
for i in np.array(close.close):
no_match = i
if (np.allclose(input,no_match,0.00000000,3.00000000)==True) and (flag == 0):
print "CLOSE"
flag = 1
exit_flag = 1
return "close"
for i in np.array((lumos.lumos)):
no_match = i
if (np.allclose(input,no_match,0.00000000,2.30000000)==True) and (flag == 0): #2.2
print "LUMOS"
flag = 1
return "lumos"
for i in np.array(nox.nox):
no_match = i
if (np.allclose(input,no_match,0.00000000,2.50000000)==True) and (flag == 0):
print "NOX"
flag = 1
return "nox"
for i in np.array((no.no)):
no_match = i
if (np.allclose(input,no_match,0.00000000,3.00000000)==True) and (flag == 0):
print "NO"
flag = 1
return "no"
for i in np.array(yes1.yes1):
yes_match = i
if (np.allclose(input,yes_match,0.00000000,3.00000000)==True) and (flag == 0):
print "YES1"
flag = 1
return "yes1"
if flag == 0:
print "desconocida"
return "desconocida"
def parse_array(recording,word):
"Write calculated coefficients into database"
if word is 'lumos':
testing = recording.tolist()
testfile = open('lumos.py','a')
testfile.write(",")
testfile.write(str(testing))
print "adding"
if word is 'close':
testing = recording.tolist()
testfile = open('close.py','a')
testfile.write(",")
testfile.write(str(testing))
print "adding"
if word is 'nox':
testing = recording.tolist()
testfile = open('nox.py','a')
testfile.write(",")
testfile.write(str(testing))
print "adding"
if word is 'yes1':
testing = recording.tolist()
testfile = open('yes1.py','a')
testfile.write(",")
testfile.write(str(testing))
print "adding"
if word is 'no':
testing = recording.tolist()
testfile = open('no.py','a')
testfile.write(",")
testfile.write(str(testing))
print "adding"
def training():
'''
Takes input signal and searches current dataset for hit.
If hit, then add to correct dataset.
If miss, asks user for currect input and adds to dataset.
'''
print("please speak a word into the microphone")
record_to_file('training.wav')
print("done - result written to training.wav")
(rate,sig) = wav.read("training.wav")
mfcc_feat = mfcc(sig,rate)
fbank_feat = logfbank(sig,rate)
recording = fbank_feat[1:3,:]
testing = check_for_match(recording)
verify = raw_input("did you say " + testing + " ")
if verify is 'y':
parse_array(recording,testing)
if verify is 'n':
correct_word = input("what word did you mean? ")
print correct_word
parse_array(recording,correct_word)
while True:
"Continously run program until user finishes training"
if __name__ == '__main__':
user_input = raw_input("what would you like to do? ")
if user_input in 'train':
training()
if user_input in 'exit':
break
print "program has exited"
``` |
{
"source": "jonathangranskog/shading-scene-representations",
"score": 3
} |
#### File: shading-scene-representations/code/create-dataset-json-files.py
```python
import numpy as np
import os
import math
import random
from argparse import ArgumentParser
from util.config import configure
from tqdm import tqdm
import renderer.randomize.scene_randomizer as sr
'''
This script creates a folder structure containing 'size' + 'testing_size' batches,
where each batch consists of a certain number of scenes based on the batch size.
Each scene also consists of N rendered views.
This script does not generate any imagery by itself, only the scene descriptions required to
render it.
'''
parser = ArgumentParser()
parser.add_argument('--config_dir', type=str, default='', help='Where config file is located')
parser.add_argument('--config', type=str, default='', help='Config file to read')
parser.add_argument('--size', type=int, default=9000, help='How many batches to include in dataset')
parser.add_argument('--testing_size', type=int, default=1000, help='How many testing batches to include in dataset')
parser.add_argument('--device', type=str, default='', help='Which device to run on')
parser.add_argument('--find_checkpoints', action='store_true', help='Attempt to find checkpoints automatically')
parser.add_argument('--out_folder', type=str, default='tmp/', help='Folder to save JSON files to')
args = parser.parse_args()
settings = configure(args, ignore_data=True)
randomizer = sr.select_randomizer(settings.dataset, settings.seed)
# Create main directories
parent_path = os.path.abspath(args.out_folder)
train_path = os.path.join(parent_path, 'train')
test_path = os.path.join(parent_path, 'test')
os.makedirs(parent_path, exist_ok=True)
os.makedirs(train_path, exist_ok=True)
os.makedirs(test_path, exist_ok=True)
def random_scene(factor):
global randomizer
if factor == -1:
randomizer.random_scene()
elif factor == 0:
randomizer.randomize_lighting()
elif factor == 1:
randomizer.randomize_geometry()
else:
randomizer.randomize_materials()
def scene_json(folder, factor):
random_scene(factor)
os.makedirs(folder, exist_ok=True)
for i in range(settings.views_per_scene):
randomizer.random_view()
json_file = folder + "/view%03d.json" % i
params = randomizer.generate_params()
randomizer.save_json(json_file, params)
def generate_batch(folder, batch_size, latent_separation):
randomizer.random_scene()
if latent_separation:
os.makedirs(folder, exist_ok=True)
factor = random.randint(0, 2)
factor_file = folder + "/factor.txt"
with open(factor_file, 'w') as fac:
fac.write(str(factor))
else:
factor = -1
for i in range(batch_size):
scene_path = os.path.join(folder, "scene%04d" % i)
scene_json(scene_path, factor)
def generate_set(folder, size, batch_size, latent_separation):
for i in tqdm(range(size)):
batch_path = os.path.join(folder, "batch%09d" % i)
generate_batch(batch_path, batch_size, latent_separation)
print("Generating training data...")
generate_set(train_path, args.size, settings.batch_size, settings.latent_separation)
print("Generating testing data...")
generate_set(test_path, args.testing_size, settings.test_batch_size, False)
```
#### File: renderer/optix_renderer/render.py
```python
import numpy as np
import random
import os
import renderer.optix_renderer.build.lib.rnd as rnd
RND_CONTEXT = 0
# This class accesses the C++ functions for the Optix renderer
class OptixRenderer():
def __init__(self, size, device):
global RND_CONTEXT
self.size = size
self.device = device
if RND_CONTEXT == 0:
RND_CONTEXT = rnd.Context(self.size, device)
def load_scene_file(self, filename):
fname = os.path.abspath(filename)
RND_CONTEXT.load_scene_file(fname)
def load_scene_json(self, desc):
RND_CONTEXT.load_scene_json(desc)
def get_pose(self):
view_matrix = RND_CONTEXT.get_view_matrix()
view_matrix = np.asanyarray(view_matrix)
pose = view_matrix.reshape(16)
return pose
def set_camera(self, pos, lookat):
px = pos[0]
py = pos[1]
pz = pos[2]
lx = lookat[0]
ly = lookat[1]
lz = lookat[2]
RND_CONTEXT.set_camera(px, py, pz, lx, ly, lz)
def set_nee(self, nee):
RND_CONTEXT.set_nee(nee)
def draw_scene_to_tensor(self, samples):
RND_CONTEXT.render(samples)
image = np.asanyarray(RND_CONTEXT.get_image())
return image
def draw_scene(self, samples):
tensor = np.flipud(self.draw_scene_to_tensor(samples)).copy()
passes = {}
passes["beauty"] = np.nan_to_num(tensor[:, :, 0:3])
passes["normal"] = tensor[:, :, 3:6]
passes["depth"] = tensor[:, :, 6:7]
passes["position"] = tensor[:, :, 7:10]
passes["albedo"] = tensor[:, :, 10:13]
passes["roughness"] = tensor[:, :, 13:14]
passes["direct"] = np.nan_to_num(tensor[:, :, 14:17])
passes["indirect"] = passes["beauty"] - passes["direct"]
passes["diffuse"] = tensor[:, :, 17:20]
passes["specular"] = passes["beauty"] - passes["diffuse"]
passes["mirror"] = tensor[:, :, 20:23]
passes["mirror_hit"] = tensor[:, :, 23:26]
passes["mirror_normal"] = tensor[:, :, 26:29]
passes["shadows"] = tensor[:, :, 29:30]
passes["ao"] = tensor[:, :, 30:31]
passes["id"] = tensor[:, :, 31:34]
return passes
```
#### File: renderer/randomize/material.py
```python
import numpy as np
import pyrr
import os
class Material():
def __init__(self, color=np.ones(3, dtype=np.float32), emission=np.zeros(3, dtype=np.float32), roughness=1.0, ior=15.0, id=0, texture=None, texture_frequency=np.array([1.0, 1.0])):
self.color = color
self.emission = emission
self.roughness = roughness
self.ior = ior
if not texture is None:
self.texture = os.path.abspath(texture)
else:
self.texture = ""
self.texture_frequency = texture_frequency
self.id = id
def as_dict(self):
d = {}
d["color"] = self.color.tolist()
d["emission"] = self.emission.tolist()
d["roughness"] = self.roughness
d["ior"] = self.ior
d["texture"] = self.texture
d["texture_frequency"] = self.texture_frequency.tolist()
d["id"] = self.id
return d
```
#### File: code/util/attribution.py
```python
import torch
import torch.nn as nn
import copy
import time
import tqdm
def fetch_gradients(data, settings):
query_grads = {}
observation_grads = {}
for key in data["query_images"]:
if data["query_images"][key].grad is not None:
query_grads[key] = data["query_images"][key].grad * data["query_images"][key]
for key in data["observation_images"]:
if data["observation_images"][key].grad is not None:
observation_grads[key] = data["observation_images"][key].grad * data["observation_images"][key]
return query_grads, observation_grads
# Function that computes the gradients for a single channel in a pixel
def pixel_channel_gradient(data, model, settings, image, pixel, channel):
model.zero_grad()
value = image[0, channel, pixel[1], pixel[0]]
model.representation.retain_gradients()
value.backward(retain_graph=True)
# Assume we only have one representation!
if settings.multi_gpu:
repr_net = model.representation.representations[0].module
else:
repr_net = model.representation.representations[0]
rgrad = repr_net.representation.grad * repr_net.representation
qgrad, ograd = fetch_gradients(data, settings)
return qgrad, ograd, rgrad
def pixel_gradient(data, model, settings, image, pixel):
query_grads = {}
observation_grads = {}
assert(image.shape[0] == 1)
# Compute gradient matrices with shape [num_channels_output, num_channels_input, height, width]
for channel in range(image.shape[1]):
qgrad, ograd, rgrad = pixel_channel_gradient(data, model, settings, image, pixel, channel)
if channel == 0:
query_grads = qgrad
observation_grads = ograd
representation_grads = rgrad
else:
for key in qgrad:
query_grads[key] = torch.cat([query_grads[key], qgrad[key]], dim=0)
for key in ograd:
observation_grads[key] = torch.cat([observation_grads[key], ograd[key]], dim=0)
representation_grads = torch.cat([representation_grads, rgrad], dim=0)
return query_grads, observation_grads, representation_grads
def mean_patch_gradient(data, model, settings, image, patch_bounds, signed=False):
query_grad = {}
observation_grad = {}
representation_grad = []
patch_size = 0
for x in tqdm.tqdm(range(patch_bounds[0][0], patch_bounds[0][1])):
for y in range(patch_bounds[1][0], patch_bounds[1][1]):
qgrad, ograd, rgrad = pixel_gradient(data, model, settings, image, [x, y])
if not signed:
for key in qgrad:
qgrad[key] = torch.abs(qgrad[key])
for key in ograd:
ograd[key] = torch.abs(ograd[key])
rgrad = torch.abs(rgrad)
if patch_size == 0:
query_grad = qgrad
observation_grad = ograd
representation_grad = rgrad
else:
representation_grad += rgrad
for key in query_grad:
query_grad[key] += qgrad[key]
for key in observation_grad:
observation_grad[key] += ograd[key]
patch_size += 1
representation_grad = representation_grad / patch_size
for key in query_grad:
query_grad[key] = torch.mean(query_grad[key], dim=0, keepdim=True) / patch_size
for key in observation_grad:
observation_grad[key] = torch.mean(observation_grad[key], dim=0, keepdim=True) / patch_size
return query_grad, observation_grad, representation_grad
def representation_gradient(data, model, settings, part=[]):
batch_data = copy.deepcopy(data)
for key in data["query_images"]:
batch_data["query_images"][key] = torch.autograd.Variable(batch_data["query_images"][key], requires_grad=True)
for key in data["observation_images"]:
batch_data["observation_images"][key] = torch.autograd.Variable(batch_data["observation_images"][key], requires_grad=True)
sample_image = model.sample(batch_data)
if settings.multi_gpu:
repr_net = model.representation.representations[0].module
else:
repr_net = model.representation.representations[0]
representation = repr_net.representation
assert(representation.shape[0] == 1)
for dim in range(part[0], part[1]):
model.zero_grad()
value = representation[0, dim]
model.representation.retain_gradients()
value.backward(retain_graph=True)
_, ograd = fetch_gradients(batch_data, settings)
# Compute gradient matrices with shape [r_dim, num_channels_input, height, width]
if dim == part[0]:
observation_grads = ograd
else:
for key in ograd:
observation_grads[key] = torch.cat([observation_grads[key], ograd[key]], dim=0)
for key in observation_grads:
observation_grads[key] = torch.sum(observation_grads[key], dim=0, keepdim=True)
query_grads = {}
for key in settings.model.representations[0].observation_passes:
if batch_data["observation_images"][key].grad is not None:
observation_grads[key] = batch_data["observation_images"][key].grad * batch_data["observation_images"][key]
if repr_net.representation.grad is not None:
representation_grads = repr_net.representation.grad
else:
representation_grads = torch.zeros_like(repr_net.representation)
return sample_image[-1], query_grads, observation_grads, representation_grads
def simple_gradients(data, model, settings, patch=[]):
t0 = time.time()
print("Computing gradients...")
batch_data = copy.deepcopy(data)
for key in data["query_images"]:
batch_data["query_images"][key] = torch.autograd.Variable(batch_data["query_images"][key], requires_grad=True)
for key in data["observation_images"]:
batch_data["observation_images"][key] = torch.autograd.Variable(batch_data["observation_images"][key], requires_grad=True)
sample_image = model.sample(batch_data)
imsize = settings.model.generators[-1].render_size
if len(patch) == 0:
patch = [[0, imsize], [0, imsize]]
qgrad, ograd, rgrad = mean_patch_gradient(batch_data, model, settings, sample_image[-1]["beauty"], patch, signed=False)
t1 = time.time()
print(str(t1 - t0))
return sample_image[-1], qgrad, ograd, rgrad
# Function that takes some data as input and computes the gradient wrt the input
# The gradient can be computed for the representation or the output image
def predictions_and_gradients(data, model, settings, mode='mean', pixel=[0.5, 0.5], patch_size=1, partition=0, r_index=0, g_index=-1):
# Prepare data by telling PyTorch to store gradients for input data
batch_data = copy.deepcopy(data)
for key in data["query_images"]:
batch_data["query_images"][key] = torch.autograd.Variable(batch_data["query_images"][key], requires_grad=True)
for key in data["observation_images"]:
batch_data["observation_images"][key] = torch.autograd.Variable(batch_data["observation_images"][key], requires_grad=True)
# Get predictions
model.zero_grad()
sample_image = model.sample(batch_data)
if settings.multi_gpu:
repr_net = model.representation.representations[r_index].module
else:
repr_net = model.representation.representations[r_index]
# Select what to compute gradient for
if mode == 'mean' or mode == 'pixel':
outputs = list(sample_image[g_index].values())
output_image = outputs[0]
for i in range(1, len(outputs)):
output_image = torch.cat([output_image, outputs[i]], dim=1)
if mode == 'mean':
prop = output_image.mean()
else:
pixely0 = int((output_image.shape[2] - 1) * pixel[1])
pixelx0 = int((output_image.shape[3] - 1) * pixel[0])
pixely1 = min(pixely0 + int(patch_size), output_image.shape[2])
pixelx1 = min(pixelx0 + int(patch_size), output_image.shape[3])
prop = output_image[:, :, pixely0:pixely1, pixelx0:pixelx1].mean()
else:
r = model.compute_representations(batch_data)[0][r_index]
# Partition specific gradients
if settings.latent_separation and mode == 'partition':
softmax = torch.nn.Softmax(dim=0)
deltas = repr_net.deltas
deltas = r.shape[1] * torch.cumsum(softmax(deltas), dim=0)
deltas = deltas.int()
if partition == 0:
prop = r[:, :deltas[0]].mean()
elif partition == 1:
prop = r[:, deltas[0]:deltas[1]].mean()
else:
prop = r[:, deltas[1]:].mean()
else:
# Gradient wrt single element in representation
prop = r[:, int(pixel[0] * (r.shape[1] - 1))].mean()
# Run backward pass
model.representation.retain_gradients()
prop.backward()
if repr_net.representation.grad is not None:
representation_grads = repr_net.representation.grad * repr_net.representation
else:
representation_grads = torch.zeros_like(repr_net.representation)
query_grads = {}
observation_grads = {}
if mode == 'mean' or mode == 'pixel':
for key in settings.model.generators[g_index].query_passes:
if batch_data["query_images"][key].grad is not None:
query_grads[key] = batch_data["query_images"][key].grad * batch_data["query_images"][key]
for key in settings.model.representations[r_index].observation_passes:
if batch_data["observation_images"][key].grad is not None:
observation_grads[key] = batch_data["observation_images"][key].grad * batch_data["observation_images"][key]
return sample_image[g_index], observation_grads, query_grads, representation_grads
# Convert a whole hsv[0,1] tensor to rgb
def hsv_to_rgb(tensor):
C = tensor[:, 2:] * tensor[:, 1:2]
X = C * (1 - torch.abs(torch.fmod(tensor[:, 0:1] * 6, 2) - 1))
m = tensor[:, 2:] - C
H = tensor[:, 0:1] * 360
zeros = torch.zeros_like(C)
rgb = torch.cat([C, zeros, X], dim=1)
rgb = torch.where(H < 300, torch.cat([X, zeros, C], dim=1), rgb)
rgb = torch.where(H < 240, torch.cat([zeros, X, C], dim=1), rgb)
rgb = torch.where(H < 180, torch.cat([zeros, C, X], dim=1), rgb)
rgb = torch.where(H < 120, torch.cat([X, C, zeros], dim=1), rgb)
rgb = torch.where(H < 60, torch.cat([C, X, zeros], dim=1), rgb)
return rgb + m
def visualize_buffer_gradients(gradients, heatmap=False, maximum=None, minimum=None):
# Compute a heatmap by mapping the hue from blue to red according to the value of gradients
if heatmap:
# Preprocess and compute magnitude
b, c, h, w = gradients.shape
gradients = torch.abs(gradients).clamp(0, 1e25)
gradients = torch.sqrt(torch.sum(gradients * gradients, dim=1, keepdim=True))
# Fit within a range
if maximum is None:
maximum = torch.max(gradients) / 10
if minimum is None:
minimum = 0.0
gradients = ((gradients - minimum) / (maximum - minimum)).clamp(0, 1)
hmap = torch.ones_like(gradients.repeat(1, 3, 1, 1))
hue = (1 - gradients) * 0.7 # 0 is red, 0.7 is blue
hmap[:, 0] = hue[:, 0]
hmap = hsv_to_rgb(hmap)
return hmap
else:
# Fit within a range
if maximum is None:
maximum = torch.max(gradients) / 10
if minimum is None:
minimum = 0.0
gradients = ((gradients - minimum) / (maximum - minimum)).clamp(0, 1)
if gradients.shape[1] == 1:
gradients = gradients.repeat(1, 3, 1, 1)
return gradients
```
#### File: shading-scene-representations/code/visualize-gqn.py
```python
import torch
import torchvision.transforms as transforms
import numpy as np
import os
import math
import random
import time
import pyrr
import sys
import copy
from GQN.model import GenerativeQueryNetwork
from argparse import ArgumentParser
from util.datasets import RTRenderedDataset
from util.config import configure, read_checkpoint
from util.settings import *
from PIL import Image
from renderer.interface import RenderInterface
'''
This script lets you walk around scenes and visualize network predictions
'''
parser = ArgumentParser()
parser.add_argument('--checkpoint', type=str, default='', help='Checkpoint to load')
parser.add_argument('--config_dir', type=str, default='', help='Where config file is located')
parser.add_argument('--config', type=str, default='', help='Which config to read')
parser.add_argument('--device', type=str, default='', help='Device to run on')
parser.add_argument('--find_checkpoints', action='store_true', help='Attempt to find matching checkpoints automatically')
parser.add_argument('--scene_file', type=str, default='')
args = parser.parse_args()
cuda = torch.cuda.is_available()
device = torch.device("cuda:0" if cuda else "cpu")
settings = configure(args, ignore_data=True)
checkpoint, iteration = read_checkpoint(args, settings)
# This is a trick to use the DataLoader instead of creating everything from scratch
settings.batch_size = 1
samples = settings.samples_per_pixel
settings.samples_per_pixel = 1
dataset = RTRenderedDataset(settings, device)
iterator = iter(dataset)
t = 0.0
data = next(iterator)
dataset.samples = samples
observation_samples = 256
def init_data():
global data, dataset
dataset.samples = samples
queries = dataset.get_current_view()
data["query_images"] = queries[0]
data["query_poses"] = queries[1]
random_observations()
def random_scene():
global data, iterator
dataset.renderer.random_scene()
init_data()
def set_spp(num_samples):
global dataset
dataset.samples = num_samples
def random_observations():
global data, dataset
# Create observations manually
dataset.samples = observation_samples
dataset.renderer.random_view()
view1 = dataset.get_current_view()
dataset.renderer.random_view()
view2 = dataset.get_current_view()
dataset.renderer.random_view()
view3 = dataset.get_current_view()
for key in data["observation_images"].keys():
data["observation_images"][key][0][0] = view1[0][key][0]
data["observation_images"][key][0][1] = view2[0][key][0]
data["observation_images"][key][0][2] = view3[0][key][0]
data["observation_poses"][0][0] = view1[1][0]
data["observation_poses"][0][1] = view2[1][0]
data["observation_poses"][0][2] = view3[1][0]
dataset.samples = samples
if args.scene_file != '':
if not os.path.isfile(args.scene_file):
print("Provided scene file does not exist!")
quit()
dataset.renderer.load_scene_file(args.scene_file)
init_data()
else:
random_scene()
iteration = checkpoint['iteration']
# Create network
net = GenerativeQueryNetwork(settings, iteration)
if 'representation_state' in checkpoint and 'generator_state' in checkpoint:
net.representation.load_state_dict(checkpoint['representation_state'])
net.generator.load_state_dict(checkpoint['generator_state'])
else:
net.load_state_dict(checkpoint['model_state'])
for i in range(len(net.representation.representations)):
net.representation.representations[i].iteration = iteration
net = net.to(device)
net.eval()
print(settings)
def format_buffer(buf):
tmp = buf.clone()
if tmp.shape[0] == 1:
tmp = tmp.repeat(3, 1, 1)
return tmp.detach().cpu().permute(1, 2, 0) ** (1 / 2.2)
import moderngl
import moderngl_window as mglw
from moderngl_window import geometry
from moderngl_window.integrations.imgui import ModernglWindowRenderer
from moderngl_window.scene.camera import KeyboardCamera
import imgui
class WindowEvents(mglw.WindowConfig):
gl_version = (3, 3)
window_size = (1200, 600)
aspect_ratio = window_size[0] / window_size[1]
title = "Neural Renderer"
def __init__(self, **kwargs):
super().__init__(**kwargs)
imgui.create_context()
self.wnd.ctx.error
self.imgui = ModernglWindowRenderer(self.wnd)
self.space_down = False
self.prog = self.ctx.program(vertex_shader="""
#version 330
uniform mat4 transform;
uniform vec3 clr;
uniform float aspect;
in vec3 in_vert;
out vec3 color;
out vec2 uv;
void main() {
vec3 pos = vec3(in_vert.x, in_vert.y * aspect, 0.0);
gl_Position = transform * vec4(pos, 1.0);
uv = vec2(in_vert.x * 0.5 + 0.5, in_vert.y * 0.5 + 0.5);
uv.y = 1.0 - uv.y;
color = vec3(1, 0, 0);
}
""",
fragment_shader="""
#version 330
uniform sampler2D tex_sampler;
out vec4 fragColor;
in vec3 color;
in vec2 uv;
void main() {
fragColor = vec4(texture(tex_sampler, uv).rgb, 1.0);
}
""",
)
self.reference_texture = self.ctx.texture((dataset.render_size, dataset.render_size), components=3)
self.prediction_texture = self.ctx.texture((dataset.render_size, dataset.render_size), components=3)
self.reference_texture.repeat_x = False
self.reference_texture.repeat_y = False
self.prediction_texture.repeat_x = False
self.prediction_texture.repeat_y = False
self.reference_texture.use(5)
self.prediction_texture.use(6)
self.prog['aspect'].value = 12 / 6
T = pyrr.matrix44.create_from_translation(np.array([-0.5, 0.15, 0]))
T2 = pyrr.matrix44.create_from_translation(np.array([0.5, 0.15, 0]))
S = pyrr.matrix44.create_from_scale(np.array([0.4, 0.4, 1]))
self.M = pyrr.matrix44.multiply(S, T)
self.M2 = pyrr.matrix44.multiply(S, T2)
self.transform = self.prog['transform']
self.transform.value = tuple(self.M.flatten())
self.observation_textures = []
self.observation_transforms = []
for i in range(1, settings.views_per_scene):
self.observation_textures.append(self.ctx.texture((dataset.render_size, dataset.render_size), components=3))
self.observation_textures[-1].repeat_x = False
self.observation_textures[-1].repeat_y = False
self.observation_textures[-1].use(6 + i)
T = pyrr.matrix44.create_from_translation(np.array([-0.825 + (i - 1) * 0.165, -0.825, 0]))
S = pyrr.matrix44.create_from_scale(np.array([0.075, 0.075, 1]))
M = pyrr.matrix44.multiply(S, T)
self.observation_transforms.append(M)
self.buffer_textures = []
self.buffer_transforms = []
for i in range(len(settings.model.generators[-1].query_passes)):
self.buffer_textures.append(self.ctx.texture((dataset.render_size, dataset.render_size), components=3))
self.buffer_textures[-1].repeat_x = False
self.buffer_textures[-1].repeat_y = False
self.buffer_textures[-1].use(6 + settings.views_per_scene + i)
T = pyrr.matrix44.create_from_translation(np.array([0.175 + i * 0.165, -0.825, 0]))
S = pyrr.matrix44.create_from_scale(np.array([0.075, 0.075, 1]))
M = pyrr.matrix44.multiply(S, T)
self.buffer_transforms.append(M)
self.camera = KeyboardCamera(self.wnd.keys, 45.0, 1.0, 0.1, 100.0)
self.camera.position[0] = 1.5
self.camera.position[1] = 1.5
self.camera.position[2] = -1.5
self.camera._velocity = -2.5
self.camera._mouse_sensitivity = -0.1
self.quad = np.array([-1.0, -1.0, 0.0,
1.0, -1.0, 0.0,
1.0, 1.0, 0.0,
-1.0, -1.0, 0.0,
1.0, 1.0, 0.0,
-1.0, 1.0, 0.0], dtype='f4')
self.quad_vao = self.ctx.simple_vertex_array(self.prog, self.ctx.buffer(self.quad), 'in_vert')
def render(self, time: float, frametime: float):
# Load transform
view = self.camera.matrix
view_inverse = pyrr.matrix44.inverse(view)
position = pyrr.matrix44.apply_to_vector(view_inverse, np.array([0.0, 0.0, 0.0, 1.0]))
lookat = pyrr.matrix44.apply_to_vector(view_inverse, np.array([0.0, 0.0, 1.0, 1.0]))
dataset.renderer.renderer.set_camera(position[:3], lookat[:3])
# Get reference and draw
dataset.samples = samples
queries = dataset.get_current_view()
data["query_images"] = queries[0]
data["query_poses"] = queries[1]
reference = format_buffer(data["query_images"][settings.model.output_pass][0])
reference = reference.clamp(0, 1).numpy()
reference = (reference * 255).astype(np.uint8)
self.reference_texture.write(reference.tobytes())
self.prog['tex_sampler'].value = 5
self.transform.value = tuple(self.M.flatten())
self.quad_vao.render(mode=moderngl.TRIANGLES)
# Draw observations
for i in range(len(self.observation_textures)):
observation = format_buffer(data["observation_images"][settings.model.output_pass][0][i])
observation = observation.clamp(0, 1).numpy()
observation = (observation * 255).astype(np.uint8)
self.observation_textures[i].write(observation.tobytes())
self.prog['tex_sampler'].value = 6 + 1 + i
self.transform.value = tuple(self.observation_transforms[i].flatten())
self.quad_vao.render(mode=moderngl.TRIANGLES)
# Draw G-buffer (TODO)
for i in range(len(self.buffer_textures)):
buffer = format_buffer(data["query_images"][settings.model.generators[-1].query_passes[i]][0])
buffer = buffer.clamp(0, 1).numpy()
buffer = (buffer * 255).astype(np.uint8)
self.buffer_textures[i].write(buffer.tobytes())
self.prog['tex_sampler'].value = 6 + settings.views_per_scene + i
self.transform.value = tuple(self.buffer_transforms[i].flatten())
self.quad_vao.render(mode=moderngl.TRIANGLES)
# Network sample and draw
prediction = net.sample(data)
pred = format_buffer(prediction[-1][settings.model.output_pass][0])
pred = pred.clamp(0, 1).numpy()
pred = (pred * 255).astype(np.uint8)
self.prediction_texture.write(pred.tobytes())
self.prog['tex_sampler'].value = 6
self.transform.value = tuple(self.M2.flatten())
self.quad_vao.render(mode=moderngl.TRIANGLES)
self.render_ui()
def render_ui(self):
global samples, observation_samples
imgui.new_frame()
imgui.begin("Options", True)
if imgui.button("Random Scene"):
random_scene()
if imgui.button("Randomize Observations"):
random_observations()
_, samples = imgui.drag_int("Query SPP", samples, min_value=1, max_value=1024)
_, observation_samples = imgui.drag_int("Observation SPP", observation_samples, min_value=1, max_value=1024)
imgui.end()
imgui.render()
self.imgui.render(imgui.get_draw_data())
def resize(self, width: int, height: int):
self.imgui.resize(width, height)
def key_event(self, key, action, modifiers):
global samples, observation_samples
self.imgui.key_event(key, action, modifiers)
if action == self.wnd.keys.ACTION_PRESS:
if key == self.wnd.keys.SPACE:
self.space_down = True
if key == self.wnd.keys.R:
random_scene()
if key == self.wnd.keys.O:
random_observations()
if key == self.wnd.keys.J:
samples += 10
if key == self.wnd.keys.H:
samples = max(1, samples - 10)
if key == self.wnd.keys.M:
observation_samples += 10
if key == self.wnd.keys.N:
observation_samples = max(1, observation_samples - 10)
if action == self.wnd.keys.ACTION_RELEASE:
if key == self.wnd.keys.SPACE:
self.space_down = False
imgui.set_window_focus()
if self.space_down:
self.camera.key_input(key, action, modifiers)
def mouse_position_event(self, x, y, dx, dy):
self.imgui.mouse_position_event(x, y, dx, dy)
def mouse_drag_event(self, x, y, dx, dy):
self.imgui.mouse_drag_event(x, y, dx, dy)
if self.space_down:
self.camera.rot_state(dx, dy)
def mouse_scroll_event(self, x_offset, y_offset):
self.imgui.mouse_scroll_event(x_offset, y_offset)
def mouse_press_event(self, x, y, button):
self.imgui.mouse_press_event(x, y, button)
def mouse_release_event(self, x: int, y: int, button: int):
self.imgui.mouse_release_event(x, y, button)
def unicode_char_entered(self, char):
self.imgui.unicode_char_entered(char)
sys.argv[1:] = []
mglw.run_window_config(WindowEvents)
``` |
{
"source": "JonathanGrant/marbles",
"score": 3
} |
#### File: docs/examples/custom_assertions.py
```python
import marbles.core
import marbles.mixins
def my_sort(i, reverse=False):
'''Sort the elements in ``i``.'''
# Purposefully sort in the wrong order so our unit test will fail
return sorted(i, reverse=~reverse)
class SortTestCase(marbles.core.TestCase, marbles.mixins.MonotonicMixins):
def test_sort(self):
i = [1, 3, 4, 5, 2, 0, 8]
self.assertMonotonicIncreasing(my_sort(i))
self.assertMonotonicDecreasing(my_sort(i, reverse=True))
if __name__ == '__main__':
marbles.core.main()
```
#### File: docs/examples/required_note.py
```python
import marbles.core
class ComplexTestCase(marbles.core.AnnotatedTestCase):
def test_for_edge_case(self):
self.assertTrue(False)
if __name__ == '__main__':
marbles.core.main()
```
#### File: tests/examples/example_marbles.py
```python
import marbles.core
class Response:
def __init__(self, json):
self._json = json
def json(self):
return self._json
class requests:
@classmethod
def put(cls, endpoint, data=None):
return Response({
'code': 409,
'status': 'Conflict',
'details': 'Resource with id {} already exists'.format(data['id'])
})
class ResponseTestCase(marbles.core.TestCase):
'''Test application responses.'''
def test_create_resource(self):
endpoint = 'http://example.com/api/v1/resource'
data = {'id': '1', 'name': 'Little Bobby Tables'}
self.assertEqual(
requests.put(endpoint, data=data).json()['code'],
201)
if __name__ == '__main__':
marbles.core.main()
```
#### File: core/tests/test_log.py
```python
import collections
import io
import json
import os
import sys
import tempfile
import unittest
import unittest.util
from marbles.core import ContextualAssertionError
from marbles.core import log
from marbles.core import __version__
import tests.test_marbles as marbles_tests
class LoggingConfigureTestCase(unittest.TestCase):
'''Configures assertion logging during a test.
Assumes the subclass has a dict attr named 'log_config'.
During a test, one can get the results of logging so far with
``self.assertion_logs()``.
'''
env_var_mapping = {
'logfile': 'MARBLES_LOGFILE',
'verbose': 'MARBLES_LOG_VERBOSE',
'attrs': 'MARBLES_TEST_CASE_ATTRS',
'verbose_attrs': 'MARBLES_TEST_CASE_ATTRS_VERBOSE',
}
def __init__(self, methodName='runTest', *, use_env=False, use_file=False,
use_annotated_test_case=False, **kwargs):
super().__init__(methodName=methodName, **kwargs)
self._use_env = use_env
self._use_file = use_file
self._use_annotated_test_case = use_annotated_test_case
def __str__(self):
params = ', '.join(
'{}={!r}'.format(name, getattr(self, '_{}'.format(name)))
for name in ('use_env', 'use_file', 'use_annotated_test_case'))
return '{} ({}) ({})'.format(
self._testMethodName,
unittest.util.strclass(self.__class__),
params)
def setUpEnv(self):
if not isinstance(self.__config['logfile'], str):
raise unittest.SkipTest('can only use env vars to pass '
'filenames, not file objects')
self.new_env = {self.env_var_mapping[k]: v
for k, v in self.__config.items()}
self.old_env = {k: os.environ.get(k)
for k in self.new_env.keys()}
for k, v in self.new_env.items():
if isinstance(v, str):
os.environ[k] = v
elif isinstance(v, collections.Sequence):
os.environ[k] = ','.join(str(x) for x in v)
else:
os.environ[k] = str(v)
def tearDownEnv(self):
# If we skipped the test in setUpEnv, we wouldn't have this,
# but we also don't need to do anything.
if hasattr(self, 'old_env'):
for k, v in self.old_env.items():
if v is None:
del os.environ[k]
else:
os.environ[k] = v
def setUpConfigure(self):
log.logger.configure(**self.__config)
def tearDownConfigure(self):
pass
def setUp(self):
super().setUp()
if self._use_annotated_test_case:
self.case = marbles_tests.ExampleAnnotatedTestCase()
else:
self.case = marbles_tests.ExampleTestCase()
self.__old_logger = log.logger
log.logger = log.AssertionLogger()
log.logger.configure()
self.__config = self.log_config.copy()
if self._use_file:
_, self._tmpfilename = tempfile.mkstemp()
self.__config['logfile'] = self._tmpfilename
else:
self.__file_handle = io.StringIO()
self.__config['logfile'] = self.__file_handle
if self._use_env:
self.setUpEnv()
else:
self.setUpConfigure()
def tearDown(self):
super().tearDown()
delattr(self, 'case')
if self._use_env:
self.tearDownEnv()
else:
self.tearDownConfigure()
if self._use_file:
log.logger.logfile.close()
os.remove(self._tmpfilename)
log.logger = self.__old_logger
def assertion_logs(self):
if self._use_file:
log.logger.logfile.flush()
with open(self._tmpfilename, 'r') as f:
lines = list(f.readlines())
else:
lines = self.__file_handle.getvalue().split('\n')
return [json.loads(line) for line in lines if line]
class TestAssertionLogging(LoggingConfigureTestCase):
log_config = {}
def test_success(self):
'''On a successful assertion, do we log information?'''
self.case.test_success()
logs = self.assertion_logs()
self.assertEqual(len(logs), 1)
expected = {
'file': marbles_tests.__file__,
'assertion': 'assertTrue',
'marbles_version': __version__,
'args': ['True'],
'kwargs': [],
}
self.assertEqual({k: v for k, v in logs[0].items() if k in expected},
expected)
for unexpected_key in ('locals', 'msg', 'note'):
self.assertNotIn(unexpected_key, logs[0])
class TestAssertionLoggingVerboseTrue(LoggingConfigureTestCase):
log_config = {'verbose': True}
def test_success(self):
'''On a successful assertion, do we respect verbose=True?'''
self.case.test_success()
logs = self.assertion_logs()
self.assertEqual(len(logs), 1)
expected = {
'file': marbles_tests.__file__,
'assertion': 'assertTrue',
'marbles_version': __version__,
'args': ['True'],
'kwargs': [],
'locals': [],
'msg': None,
'note': 'some note',
}
self.assertEqual({k: v for k, v in logs[0].items() if k in expected},
expected)
def test_failure(self):
'''On a failed assertion, do we log information?'''
with self.assertRaises(ContextualAssertionError):
self.case.test_reverse_equality_positional_msg()
logs = self.assertion_logs()
self.assertEqual(len(logs), 1)
expected = {
'file': marbles_tests.__file__,
'assertion': 'assertReverseEqual',
'assertion_class': 'tests.test_marbles.ReversingTestCaseMixin',
'marbles_version': __version__,
'args': ['leif', 'leif'],
'kwargs': [],
'locals': [{'key': 's', 'value': 'leif'}],
'msg': 'some message',
'note': 'some note',
}
self.assertEqual({k: v for k, v in logs[0].items() if k in expected},
expected)
class TestAssertionLoggingVerboseFalse(LoggingConfigureTestCase):
log_config = {'verbose': False}
def test_success(self):
'''On a successful assertion, do we respect verbose=False?'''
self.case.test_success()
logs = self.assertion_logs()
self.assertEqual(len(logs), 1)
expected = {
'file': marbles_tests.__file__,
'assertion': 'assertTrue',
'marbles_version': __version__,
'args': ['True'],
'kwargs': [],
}
self.assertEqual({k: v for k, v in logs[0].items() if k in expected},
expected)
for unexpected_key in ('locals', 'msg', 'note'):
self.assertNotIn(unexpected_key, logs[0])
def test_failure(self):
'''On a failed assertion, do we log information?'''
with self.assertRaises(ContextualAssertionError):
self.case.test_reverse_equality_positional_msg()
logs = self.assertion_logs()
self.assertEqual(len(logs), 1)
expected = {
'file': marbles_tests.__file__,
'assertion': 'assertReverseEqual',
'assertion_class': 'tests.test_marbles.ReversingTestCaseMixin',
'marbles_version': __version__,
'args': ['leif', 'leif'],
'kwargs': [],
'locals': [{'key': 's', 'value': 'leif'}],
'msg': 'some message',
'note': 'some note',
}
self.assertEqual({k: v for k, v in logs[0].items() if k in expected},
expected)
class TestAssertionLoggingVerboseList(LoggingConfigureTestCase):
log_config = {'verbose': ['msg', 'note']}
def test_success(self):
'''On a successful assertion, do we respect a verbose list?'''
self.case.test_success()
logs = self.assertion_logs()
self.assertEqual(len(logs), 1)
expected = {
'file': marbles_tests.__file__,
'assertion': 'assertTrue',
'marbles_version': __version__,
'args': ['True'],
'kwargs': [],
'msg': None,
'note': 'some note',
}
self.assertEqual({k: v for k, v in logs[0].items() if k in expected},
expected)
for unexpected_key in ('locals',):
self.assertNotIn(unexpected_key, logs[0])
def test_failure(self):
'''On a failed assertion, do we log information?'''
with self.assertRaises(ContextualAssertionError):
self.case.test_reverse_equality_positional_msg()
logs = self.assertion_logs()
self.assertEqual(len(logs), 1)
expected = {
'file': marbles_tests.__file__,
'assertion': 'assertReverseEqual',
'assertion_class': 'tests.test_marbles.ReversingTestCaseMixin',
'marbles_version': __version__,
'args': ['leif', 'leif'],
'kwargs': [],
'locals': [{'key': 's', 'value': 'leif'}],
'msg': 'some message',
'note': 'some note',
}
self.assertEqual({k: v for k, v in logs[0].items() if k in expected},
expected)
class TestAssertionLoggingAttributeCapture(LoggingConfigureTestCase):
log_config = {'attrs': ['longMessage']}
def test_capture_test_case_attributes(self):
'''Can we capture other attributes of a TestCase?'''
self.case.test_success()
logs = self.assertion_logs()
self.assertEqual(len(logs), 1)
expected = {
'assertion': 'assertTrue'
}
self.assertEqual({k: v for k, v in logs[0].items() if k in expected},
expected)
self.assertNotIn('longMessage', logs[0])
def test_capture_test_case_attributes_on_failure(self):
'''Can we capture other attributes of a TestCase on failure?'''
with self.assertRaises(ContextualAssertionError):
self.case.test_failure()
logs = self.assertion_logs()
self.assertEqual(len(logs), 1)
expected = {
'assertion': 'assertTrue',
'longMessage': 'This is a long message'
}
self.assertEqual({k: v for k, v in logs[0].items() if k in expected},
expected)
class TestAssertionLoggingVerboseAttributeCapture(LoggingConfigureTestCase):
log_config = {'verbose_attrs': ['longMessage']}
def test_capture_test_case_attributes(self):
'''Can we capture other attributes of a TestCase?'''
self.case.test_success()
logs = self.assertion_logs()
self.assertEqual(len(logs), 1)
expected = {
'assertion': 'assertTrue',
'longMessage': 'This is a long message'
}
self.assertEqual({k: v for k, v in logs[0].items() if k in expected},
expected)
def test_capture_test_case_attributes_on_failure(self):
'''Can we capture other attributes of a TestCase on failure?'''
with self.assertRaises(ContextualAssertionError):
self.case.test_failure()
logs = self.assertion_logs()
self.assertEqual(len(logs), 1)
expected = {
'assertion': 'assertTrue',
'longMessage': 'This is a long message'
}
self.assertEqual({k: v for k, v in logs[0].items() if k in expected},
expected)
class TestAssertionLoggingRespectsEnvOverrides(LoggingConfigureTestCase):
'''Tests that we can override log.logger.configure() with env vars.'''
log_config = {'logfile': '/path/does/not/exist',
'verbose': False,
'attrs': ['longMessage']}
def setUp(self):
if self._use_env:
raise unittest.SkipTest(
'Only testing when the base class sets up with configure()')
self._use_file = True
super().setUp()
self.old_logfile = os.environ.get('MARBLES_LOGFILE')
_, self._tmpfilename = tempfile.mkstemp()
os.environ['MARBLES_LOGFILE'] = self._tmpfilename
def tearDown(self):
super().tearDown()
if self.old_logfile is None:
del os.environ['MARBLES_LOGFILE']
else:
os.environ['MARBLES_LOGFILE'] = self.old_logfile
delattr(self, 'old_logfile')
if hasattr(self, 'old_verbose'):
if self.old_verbose is None:
del os.environ['MARBLES_LOG_VERBOSE']
else:
os.environ['MARBLES_LOG_VERBOSE'] = self.old_verbose
def test_success(self):
'''On a successful assertion, do we log information?'''
self.case.test_success()
logs = self.assertion_logs()
self.assertEqual(len(logs), 1)
expected = {
'file': marbles_tests.__file__,
'assertion': 'assertTrue',
'marbles_version': __version__,
'args': ['True'],
'kwargs': [],
}
self.assertEqual({k: v for k, v in logs[0].items() if k in expected},
expected)
for unexpected_key in ('locals', 'msg', 'note'):
self.assertNotIn(unexpected_key, logs[0])
def test_verbose_override(self):
'''On a successful assertion, do we log information?'''
self.old_verbose = os.environ.get('MARBLES_LOG_VERBOSE')
os.environ['MARBLES_LOG_VERBOSE'] = 'true'
self.case.test_success()
logs = self.assertion_logs()
self.assertEqual(len(logs), 1)
expected = {
'file': marbles_tests.__file__,
'assertion': 'assertTrue',
'marbles_version': __version__,
'args': ['True'],
'kwargs': [],
'locals': [],
'msg': None,
'note': 'some note',
}
self.assertEqual({k: v for k, v in logs[0].items() if k in expected},
expected)
def load_tests(loader, tests, pattern):
suite = unittest.TestSuite()
module = sys.modules[__name__]
objs = [getattr(module, name) for name in dir(module)]
test_classes = [obj for obj in objs
if (isinstance(obj, type)
and issubclass(obj, unittest.TestCase))]
for use_annotated_test_case in (True, False):
for cls in test_classes:
for name in loader.getTestCaseNames(cls):
suite.addTest(
cls(
methodName=name,
use_env=False,
use_file=False,
use_annotated_test_case=use_annotated_test_case
)
)
for cls in test_classes:
for name in loader.getTestCaseNames(cls):
suite.addTest(
cls(
methodName=name,
use_env=False,
use_file=True,
use_annotated_test_case=use_annotated_test_case
)
)
for cls in test_classes:
for name in loader.getTestCaseNames(cls):
suite.addTest(
cls(
methodName=name,
use_env=True,
use_file=True,
use_annotated_test_case=use_annotated_test_case
)
)
return suite
```
#### File: marbles/mixins/mixins.py
```python
import abc
import collections
import operator
import os
from datetime import date, datetime, timedelta, timezone
import pandas as pd
# TODO (jsa): override abc TypeError to inform user that they have to
# inherit from unittest.TestCase (I don't know if this is possible)
class BetweenMixins(abc.ABC):
'''Built-in assertions about betweenness.'''
@abc.abstractmethod
def fail(self, msg):
pass # pragma: no cover
@abc.abstractmethod
def _formatMessage(self, msg, standardMsg):
pass # pragma: no cover
def assertBetween(self, obj, lower, upper, strict=True, msg=None):
'''Fail if ``obj`` is not between ``lower`` and ``upper``.
If ``strict=True`` (default), fail unless
``lower < obj < upper``. If ``strict=False``, fail unless
``lower <= obj <= upper``.
This is equivalent to ``self.assertTrue(lower < obj < upper)``
or ``self.assertTrue(lower <= obj <= upper)``, but with a nicer
default message.
Parameters
----------
obj
lower
upper
strict : bool
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
'''
if strict:
standardMsg = '%s is not strictly between %s and %s' % (
obj, lower, upper)
op = operator.lt
else:
standardMsg = '%s is not between %s and %s' % (obj, lower, upper)
op = operator.le
if not (op(lower, obj) and op(obj, upper)):
self.fail(self._formatMessage(msg, standardMsg))
def assertNotBetween(self, obj, lower, upper, strict=True, msg=None):
'''Fail if ``obj`` is between ``lower`` and ``upper``.
If ``strict=True`` (default), fail if ``lower <= obj <=
upper``. If ``strict=False``, fail if ``lower < obj < upper``.
This is equivalent to ``self.assertFalse(lower < obj < upper)``
or ``self.assertFalse(lower <= obj <= upper)``, but with a
nicer default message.
Raises
------
ValueError
If ``lower`` equals ``upper`` and ``strict=True`` is
specified.
Parameters
----------
obj
lower
upper
strict : bool
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
'''
if strict:
standardMsg = '%s is between %s and %s' % (obj, lower, upper)
op = operator.le
else:
standardMsg = '%s is strictly between %s and %s' % (
obj, lower, upper)
op = operator.lt
# Providing strict=False and a degenerate interval should raise
# ValueError so the test will error instead of fail
if (not strict) and (lower == upper):
raise ValueError('cannot specify strict=False if lower == upper')
if (op(lower, obj) and op(obj, upper)):
self.fail(self._formatMessage(msg, standardMsg))
class MonotonicMixins(abc.ABC):
'''Built-in assertions about monotonicity.'''
@abc.abstractmethod
def fail(self):
pass # pragma: no cover
@abc.abstractmethod
def assertIsInstance(self, obj, cls, msg):
pass # pragma: no cover
@abc.abstractmethod
def _formatMessage(self, msg, standardMsg):
pass # pragma: no cover
@staticmethod
def _monotonic(op, sequence):
return all(op(i, j) for i, j in zip(sequence, sequence[1:]))
def assertMonotonicIncreasing(self, sequence, strict=True, msg=None):
'''Fail if ``sequence`` is not monotonically increasing.
If ``strict=True`` (default), fail unless each element in
``sequence`` is less than the following element as determined
by the ``<`` operator. If ``strict=False``, fail unless each
element in ``sequence`` is less than or equal to the following
element as determined by the ``<=`` operator.
.. code-block:: python
assert all((i < j) for i, j in zip(sequence, sequence[1:]))
assert all((i <= j) for i, j in zip(sequence, sequence[1:]))
Parameters
----------
sequence : iterable
strict : bool
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``sequence`` is not iterable.
'''
if not isinstance(sequence, collections.Iterable):
raise TypeError('First argument is not iterable')
if strict:
standardMsg = ('Elements in %s are not strictly monotonically '
'increasing') % (sequence,)
op = operator.lt
else:
standardMsg = ('Elements in %s are not monotonically '
'increasing') % (sequence,)
op = operator.le
if not self._monotonic(op, sequence):
self.fail(self._formatMessage(msg, standardMsg))
def assertNotMonotonicIncreasing(self, sequence, strict=True, msg=None):
'''Fail if ``sequence`` is monotonically increasing.
If ``strict=True`` (default), fail if each element in
``sequence`` is less than the following element as determined
by the ``<`` operator. If ``strict=False``, fail if each
element in ``sequence`` is less than or equal to the following
element as determined by the ``<=`` operator.
.. code-block:: python
assert not all((i < j) for i, j in zip(sequence, sequence[1:]))
assert not all((i <= j) for i, j in zip(sequence, sequence[1:]))
Parameters
----------
sequence : iterable
strict : bool
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``sequence`` is not iterable.
'''
if not isinstance(sequence, collections.Iterable):
raise TypeError('First argument is not iterable')
if strict:
standardMsg = ('Elements in %s are strictly monotonically '
'increasing') % (sequence,)
op = operator.lt
else:
standardMsg = ('Elements in %s are monotonically '
'increasing') % (sequence,)
op = operator.le
if self._monotonic(op, sequence):
self.fail(self._formatMessage(msg, standardMsg))
def assertMonotonicDecreasing(self, sequence, strict=True, msg=None):
'''Fail if ``sequence`` is not monotonically decreasing.
If ``strict=True`` (default), fail unless each element in
``sequence`` is greater than the following element as
determined by the ``>`` operator. If ``strict=False``, fail
unless each element in ``sequence`` is greater than or equal
to the following element as determined by the ``>=`` operator.
.. code-block:: python
assert all((i > j) for i, j in zip(sequence, sequence[1:]))
assert all((i >= j) for i, j in zip(sequence, sequence[1:]))
Parameters
----------
sequence : iterable
strict : bool
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``sequence`` is not iterable.
'''
if not isinstance(sequence, collections.Iterable):
raise TypeError('First argument is not iterable')
if strict:
standardMsg = ('Elements in %s are not strictly monotonically '
'decreasing') % (sequence,)
op = operator.gt
else:
standardMsg = ('Elements in %s are not monotonically '
'decreasing') % (sequence,)
op = operator.ge
if not self._monotonic(op, sequence):
self.fail(self._formatMessage(msg, standardMsg))
def assertNotMonotonicDecreasing(self, sequence, strict=True, msg=None):
'''Fail if ``sequence`` is monotonically decreasing.
If ``strict=True`` (default), fail if each element in
``sequence`` is greater than the following element as
determined by the ``>`` operator. If ``strict=False``, fail if
each element in ``sequence`` is greater than or equal to the
following element as determined by the ``>=`` operator.
.. code-block:: python
assert not all((i > j) for i, j in zip(sequence, sequence[1:]))
assert not all((i >= j) for i, j in zip(sequence, sequence[1:]))
Parameters
----------
sequence : iterable
strict : bool
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``sequence`` is not iterable.
'''
if not isinstance(sequence, collections.Iterable):
raise TypeError('First argument is not iterable')
if strict:
standardMsg = ('Elements in %s are strictly monotonically '
'decreasing') % (sequence,)
op = operator.gt
else:
standardMsg = ('Elements in %s are monotonically '
'decreasing') % (sequence,)
op = operator.ge
if self._monotonic(op, sequence):
self.fail(self._formatMessage(msg, standardMsg))
class UniqueMixins(abc.ABC):
'''Built-in assertions about uniqueness.
These assertions can handle containers that contain unhashable
elements.
'''
@abc.abstractmethod
def fail(self, msg):
pass # pragma: no cover
@abc.abstractmethod
def assertIsInstance(self, obj, cls, msg):
pass # pragma: no cover
@abc.abstractmethod
def _formatMessage(self, msg, standardMsg):
pass # pragma: no cover
def assertUnique(self, container, msg=None):
'''Fail if elements in ``container`` are not unique.
Parameters
----------
container : iterable
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``container`` is not iterable.
'''
if not isinstance(container, collections.Iterable):
raise TypeError('First argument is not iterable')
standardMsg = 'Elements in %s are not unique' % (container,)
# We iterate over each element in the container instead of
# comparing len(container) == len(set(container)) to allow
# for containers that contain unhashable types
for idx, elem in enumerate(container):
# If elem appears at an earlier or later index position
# the elements are not unique
if elem in container[:idx] or elem in container[idx+1:]:
self.fail(self._formatMessage(msg, standardMsg))
def assertNotUnique(self, container, msg=None):
'''Fail if elements in ``container`` are unique.
Parameters
----------
container : iterable
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``container`` is not iterable.
'''
if not isinstance(container, collections.Iterable):
raise TypeError('First argument is not iterable')
standardMsg = 'Elements in %s are unique' % (container,)
# We iterate over each element in the container instead of
# comparing len(container) == len(set(container)) to allow
# for containers that contain unhashable types
for idx, elem in enumerate(container):
# If elem appears at an earlier or later index position
# the elements are not unique
if elem in container[:idx] or elem in container[idx+1:]:
return # succeed fast
self.fail(self._formatMessage(msg, standardMsg))
class FileMixins(abc.ABC):
'''Built-in assertions for files.
With the exception of :meth:`assertFileExists` and
:meth:`assertFileNotExists`, all custom file assertions take a
``filename`` argument which can accept a file name as a
:class:`str` or :py:class:`bytes` object, or a `file-like object`_.
Accepting a file-like object is useful for testing files that are
not present locally, e.g., files in HDFS.
.. _file-like object: http://docs.python.org/3.5/glossary.html#term-file-like-object
.. code-block:: python
import unittest
import hdfs3
from marbles.mixins import mixins
class MyFileTest(unittest.TestCase, mixins.FileMixins):
def test_file_encoding(self):
fname = 'myfile.csv'
# You can pass fname directly to the assertion (if the
# file exists locally)
self.assertFileEncodingEqual(fname, 'utf-8')
# Or open the file and pass a file descriptor to the
# assertion
with open(fname) as f:
self.assertFileEncodingEqual(f, 'utf-8')
def test_hdfs_file_encoding(self):
hdfspath = '/path/to/myfile.csv'
client = hdfs3.HDFileSystem(host='host', port='port')
with client.open(hdfspath) as f:
self.assertFileEncodingEqual(f, 'utf-8')
Note that not all file-like objects implement the expected
interface. These custom file assertions expect the following
methods and attributes:
+ :meth:`read`
+ :meth:`write`
+ :meth:`seek`
+ :meth:`tell`
+ :attr:`name`
+ :attr:`encoding`
'''
@abc.abstractmethod
def fail(self, msg):
pass # pragma: no cover
@abc.abstractmethod
def assertEqual(self, first, second, msg):
pass # pragma: no cover
@abc.abstractmethod
def assertNotEqual(self, first, second, msg):
pass # pragma: no cover
@abc.abstractmethod
def assertAlmostEqual(self, first, second, places, msg, delta):
pass # pragma: no cover
@abc.abstractmethod
def assertNotAlmostEqual(self, first, second, places, msg, delta):
pass # pragma: no cover
@abc.abstractmethod
def assertGreater(self, a, b, msg):
pass # pragma: no cover
@abc.abstractmethod
def assertGreaterEqual(self, a, b, msg):
pass # pragma: no cover
@abc.abstractmethod
def assertLess(self, a, b, msg):
pass # pragma: no cover
@abc.abstractmethod
def assertLessEqual(self, a, b, msg):
pass # pragma: no cover
@abc.abstractmethod
def assertRegex(self, text, expected_regex, msg):
pass # pragma: no cover
@abc.abstractmethod
def assertNotRegex(self, text, expected_regex, msg):
pass # pragma: no cover
@abc.abstractmethod
def _formatMessage(self, msg, standardMsg):
pass # pragma: no cover
@staticmethod
def _get_or_open_file(filename):
'''If ``filename`` is a string or bytes object, open the
``filename`` and return the file object. If ``filename`` is
file-like (i.e., it has 'read' and 'write' attributes, return
``filename``.
Parameters
----------
filename : str, bytes, file
Raises
------
TypeError
If ``filename`` is not a string, bytes, or file-like
object.
File-likeness is determined by checking for 'read' and
'write' attributes.
'''
if isinstance(filename, (str, bytes)):
f = open(filename)
elif hasattr(filename, 'read') and hasattr(filename, 'write'):
f = filename
else:
raise TypeError('filename must be str or bytes, or a file')
return f
def _get_file_name(self, filename):
f = self._get_or_open_file(filename)
try:
fname = f.name
except AttributeError as e:
# If f doesn't have an name attribute,
# raise a TypeError
if e.args == ('name',):
raise TypeError('Expected file-like object')
raise e # pragma: no cover
finally:
f.close()
return fname
def _get_file_type(self, filename):
f = self._get_or_open_file(filename)
try:
fname = f.name
except AttributeError as e:
# If f doesn't have an name attribute,
# raise a TypeError
if e.args == ('name',):
raise TypeError('Expected file-like object')
raise e # pragma: no cover
else:
filetype = os.path.splitext(fname)[-1]
finally:
f.close()
return filetype
def _get_file_encoding(self, filename):
f = self._get_or_open_file(filename)
try:
encoding = f.encoding
except AttributeError as e:
# If f doesn't have an encoding attribute,
# raise a TypeError
if e.args == ('encoding',):
raise TypeError('Expected file-like object')
raise e # pragma: no cover
finally:
f.close()
return encoding
def _get_file_size(self, filename):
f = self._get_or_open_file(filename)
try:
f.seek(0, os.SEEK_END)
except AttributeError as e:
# If f doesn't have a seek method,
# raise a TypeError
if e.args == ('seek',):
raise TypeError('Expected file-like object')
raise e # pragma: no cover
else:
length = f.tell()
finally:
f.close()
return length
def assertFileExists(self, filename, msg=None):
'''Fail if ``filename`` does not exist as determined by
``os.path.isfile(filename)``.
Parameters
----------
filename : str, bytes
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
'''
standardMsg = '%s does not exist' % filename
if not os.path.isfile(filename):
self.fail(self._formatMessage(msg, standardMsg))
def assertFileNotExists(self, filename, msg=None):
'''Fail if ``filename`` exists as determined by
``~os.path.isfile(filename)``.
Parameters
----------
filename : str, bytes
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
'''
standardMsg = '%s exists' % filename
if os.path.isfile(filename):
self.fail(self._formatMessage(msg, standardMsg))
def assertFileNameEqual(self, filename, name, msg=None):
'''Fail if ``filename`` does not have the given ``name`` as
determined by the ``==`` operator.
Parameters
----------
filename : str, bytes, file-like
name : str, byes
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``filename`` is not a str or bytes object and is not
file-like.
'''
fname = self._get_file_name(filename)
self.assertEqual(fname, name, msg=msg)
def assertFileNameNotEqual(self, filename, name, msg=None):
'''Fail if ``filename`` has the given ``name`` as determined
by the ``!=`` operator.
Parameters
----------
filename : str, bytes, file-like
name : str, byes
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``filename`` is not a str or bytes object and is not
file-like.
'''
fname = self._get_file_name(filename)
self.assertNotEqual(fname, name, msg=msg)
def assertFileNameRegex(self, filename, expected_regex, msg=None):
'''Fail unless ``filename`` matches ``expected_regex``.
Parameters
----------
filename : str, bytes, file-like
expected_regex : str, bytes
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``filename`` is not a str or bytes object and is not
file-like.
'''
fname = self._get_file_name(filename)
self.assertRegex(fname, expected_regex, msg=msg)
def assertFileNameNotRegex(self, filename, expected_regex, msg=None):
'''Fail if ``filename`` matches ``expected_regex``.
Parameters
----------
filename : str, bytes, file-like
expected_regex : str, bytes
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``filename`` is not a str or bytes object and is not
file-like.
'''
fname = self._get_file_name(filename)
self.assertNotRegex(fname, expected_regex, msg=msg)
def assertFileTypeEqual(self, filename, extension, msg=None):
'''Fail if ``filename`` does not have the given ``extension``
as determined by the ``==`` operator.
Parameters
----------
filename : str, bytes, file-like
extension : str, bytes
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``filename`` is not a str or bytes object and is not
file-like.
'''
ftype = self._get_file_type(filename)
self.assertEqual(ftype, extension, msg=msg)
def assertFileTypeNotEqual(self, filename, extension, msg=None):
'''Fail if ``filename`` has the given ``extension`` as
determined by the ``!=`` operator.
Parameters
----------
filename : str, bytes, file-like
extension : str, bytes
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``filename`` is not a str or bytes object and is not
file-like.
'''
ftype = self._get_file_type(filename)
self.assertNotEqual(ftype, extension, msg=msg)
def assertFileEncodingEqual(self, filename, encoding, msg=None):
'''Fail if ``filename`` is not encoded with the given
``encoding`` as determined by the '==' operator.
Parameters
----------
filename : str, bytes, file-like
encoding : str, bytes
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``filename`` is not a str or bytes object and is not
file-like.
'''
fencoding = self._get_file_encoding(filename)
fname = self._get_file_name(filename)
standardMsg = '%s is not %s encoded' % (fname, encoding)
self.assertEqual(fencoding.lower(),
encoding.lower(),
self._formatMessage(msg, standardMsg))
def assertFileEncodingNotEqual(self, filename, encoding, msg=None):
'''Fail if ``filename`` is encoded with the given ``encoding``
as determined by the '!=' operator.
Parameters
----------
filename : str, bytes, file-like
encoding : str, bytes
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``filename`` is not a str or bytes object and is not
file-like.
'''
fencoding = self._get_file_encoding(filename)
fname = self._get_file_name(filename)
standardMsg = '%s is %s encoded' % (fname, encoding)
self.assertNotEqual(fencoding.lower(),
encoding.lower(),
self._formatMessage(msg, standardMsg))
def assertFileSizeEqual(self, filename, size, msg=None):
'''Fail if ``filename`` does not have the given ``size`` as
determined by the '==' operator.
Parameters
----------
filename : str, bytes, file-like
size : int, float
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``filename`` is not a str or bytes object and is not
file-like.
'''
fsize = self._get_file_size(filename)
self.assertEqual(fsize, size, msg=msg)
def assertFileSizeNotEqual(self, filename, size, msg=None):
'''Fail if ``filename`` has the given ``size`` as determined
by the '!=' operator.
Parameters
----------
filename : str, bytes, file-like
size : int, float
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``filename`` is not a str or bytes object and is not
file-like.
'''
fsize = self._get_file_size(filename)
self.assertNotEqual(fsize, size, msg=msg)
def assertFileSizeAlmostEqual(
self, filename, size, places=None, msg=None, delta=None):
'''Fail if ``filename`` does not have the given ``size`` as
determined by their difference rounded to the given number of
decimal ``places`` (default 7) and comparing to zero, or if
their difference is greater than a given ``delta``.
Parameters
----------
filename : str, bytes, file-like
size : int, float
places : int
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
delta : int, float
Raises
------
TypeError
If ``filename`` is not a str or bytes object and is not
file-like.
'''
fsize = self._get_file_size(filename)
self.assertAlmostEqual(
fsize, size, places=places, msg=msg, delta=delta)
def assertFileSizeNotAlmostEqual(
self, filename, size, places=None, msg=None, delta=None):
'''Fail unless ``filename`` does not have the given ``size``
as determined by their difference rounded to the given number
ofdecimal ``places`` (default 7) and comparing to zero, or if
their difference is greater than a given ``delta``.
Parameters
----------
filename : str, bytes, file-like
size : int, float
places : int
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
delta : int, float
Raises
------
TypeError
If ``filename`` is not a str or bytes object and is not
file-like.
'''
fsize = self._get_file_size(filename)
self.assertNotAlmostEqual(
fsize, size, places=places, msg=msg, delta=delta)
def assertFileSizeGreater(self, filename, size, msg=None):
'''Fail if ``filename``'s size is not greater than ``size`` as
determined by the '>' operator.
Parameters
----------
filename : str, bytes, file-like
size : int, float
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``filename`` is not a str or bytes object and is not
file-like.
'''
fsize = self._get_file_size(filename)
self.assertGreater(fsize, size, msg=msg)
def assertFileSizeGreaterEqual(self, filename, size, msg=None):
'''Fail if ``filename``'s size is not greater than or equal to
``size`` as determined by the '>=' operator.
Parameters
----------
filename : str, bytes, file-like
size : int, float
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``filename`` is not a str or bytes object and is not
file-like.
'''
fsize = self._get_file_size(filename)
self.assertGreaterEqual(fsize, size, msg=msg)
def assertFileSizeLess(self, filename, size, msg=None):
'''Fail if ``filename``'s size is not less than ``size`` as
determined by the '<' operator.
Parameters
----------
filename : str, bytes, file-like
size : int, float
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``filename`` is not a str or bytes object and is not
file-like.
'''
fsize = self._get_file_size(filename)
self.assertLess(fsize, size, msg=msg)
def assertFileSizeLessEqual(self, filename, size, msg=None):
'''Fail if ``filename``'s size is not less than or equal to
``size`` as determined by the '<=' operator.
Parameters
----------
filename : str, bytes, file-like
size : int, float
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``filename`` is not a str or bytes object and is not
file-like.
'''
fsize = self._get_file_size(filename)
self.assertLessEqual(fsize, size, msg=msg)
class CategoricalMixins(abc.ABC):
'''Built-in assertions for categorical data.
This mixin includes some common categorical variables (e.g.,
weekdays, months, U.S. states, etc.) that test authors can use
test resources against. For instance, if a dataset is supposed
to contain data for all states in the U.S., test authors can
test the state column in their dataset against the `US_STATES`
attribute.
.. code-block:: python
import unittest
from marbles.mixins import mixins
class MyTestCase(unittest.TestCase, mixins.CategoricalMixins):
def test_that_all_states_are_present(self):
df = ...
self.assertCategoricalLevelsEqual(df['STATE'], self.US_STATES)
These categorical variables are provided as a convenience; test
authors can and should manipulate these variables, or create
their own, as needed. The idea is, for expectations that may
apply across datasets, to ensure that the same expectation
is being tested in the same way across different datasets.
Attributes
----------
WEEKDAYS : list
WEEKDAYS_ABBR : list
Weekdays abbreviated to three characters
MONTHS : list
MONTHS_ABBR : list
Months abbreviated to three characters
US_STATES : list
US_STATES_ABBR : list
U.S. state names abbreviated to two uppercase characters
US_TERRITORIES : list
US_TERRITORIES_ABBR : list
U.S. territory names abbreviated to two uppercase characters
CONTINENTS : list
7-continent model names
'''
# TODO (jsa): providing these as pandas Series objects or numpy
# arrays might make applying transformations (uppercase, lowercase)
# easier
WEEKDAYS = ['Monday', 'Tuesday', 'Wednesday', 'Thursday',
'Friday', 'Saturday', 'Sunday']
WEEKDAYS_ABBR = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
MONTHS = ['January', 'February', 'March', 'April', 'May', 'June', 'July',
'August', 'September', 'October', 'November', 'December']
MONTHS_ABBR = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
US_STATES = ['Alabama', 'Alaska', 'Arizona', 'Arkansas', 'California',
'Colorado', 'Connecticut', 'Delaware', 'Florida', 'Georgia',
'Hawaii', 'Idaho', 'Illinois', 'Indiana', 'Iowa', 'Kansas',
'Kentucky', 'Louisiana', 'Maine', 'Maryland', 'Massachusetts',
'Michigan', 'Minnesota', 'Mississippi', 'Missouri', 'Montana',
'Nebraska', 'Nevada', 'New Hampshire', 'New Jersey',
'New Mexico', 'New York', 'North Carolina', 'North Dakota',
'Ohio', 'Oklahoma', 'Oregon', 'Pennsylvania', 'Rhode Island',
'South Carolina', 'South Dakota', 'Tennessee', 'Texas',
'Utah', 'Vermont', 'Virginia', 'Washington', 'West Virginia',
'Wisconsin', 'Wyoming']
US_STATES_ABBR = ['AL', 'AK', 'AZ', 'AR', 'CA', 'CO', 'CT', 'DE', 'FL',
'GA', 'HI', 'ID', 'IL', 'IN', 'IA', 'KS', 'KY', 'LA',
'ME', 'MD', 'MA', 'MI', 'MN', 'MS', 'MO', 'MT', 'NE',
'NV', 'NH', 'NJ', 'NM', 'NY', 'NC', 'ND', 'OH', 'OK',
'OR', 'PA', 'RI', 'SC', 'SD', 'TN', 'TX', 'UT', 'VT',
'VA', 'WA', 'WV', 'WI', 'WY']
US_TERRITORIES = ['American Samoa', 'District of Columbia',
'Federated States of Micronesia', 'Guam',
'Marshall Islands', 'Northern Mariana Islands',
'Palau', 'Puerto Rico', 'Virgin Islands']
US_TERRITORIES_ABBR = ['AS', 'DC', 'FM', 'GU', 'MH', 'MP', 'PW', 'PR', 'VI']
# TODO (jsa): support 4 and/or 6 continent models?
CONTINENTS = ['Africa', 'Antarctica', 'Asia', 'Australia',
'Europe', 'North America', 'South America']
@abc.abstractmethod
def fail(self, msg):
pass # pragma: no cover
@abc.abstractmethod
def assertIsInstance(self, obj, cls, msg):
pass # pragma: no cover
@abc.abstractmethod
def assertIn(self, member, container, msg):
pass # pragma: no cover
@abc.abstractmethod
def assertNotIn(self, member, container, msg):
pass # pragma: no cover
@abc.abstractmethod
def _formatMessage(self, msg, standardMsg):
pass # pragma: no cover
def assertCategoricalLevelsEqual(self, levels1, levels2, msg=None):
'''Fail if ``levels1`` and ``levels2`` do not have the same
domain.
Parameters
----------
levels1 : iterable
levels2 : iterable
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If either ``levels1`` or ``levels2`` is not iterable.
'''
if not isinstance(levels1, collections.Iterable):
raise TypeError('First argument is not iterable')
if not isinstance(levels2, collections.Iterable):
raise TypeError('Second argument is not iterable')
standardMsg = '%s levels != %s levels' % (levels1, levels2)
if not all(level in levels2 for level in levels1):
self.fail(self._formatMessage(msg, standardMsg))
if not all(level in levels1 for level in levels2):
self.fail(self._formatMessage(msg, standardMsg))
def assertCategoricalLevelsNotEqual(self, levels1, levels2, msg=None):
'''Fail if ``levels1`` and ``levels2`` have the same domain.
Parameters
----------
levels1 : iterable
levels2 : iterable
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If either ``levels1`` or ``levels2`` is not iterable.
'''
if not isinstance(levels1, collections.Iterable):
raise TypeError('First argument is not iterable')
if not isinstance(levels2, collections.Iterable):
raise TypeError('Second argument is not iterable')
standardMsg = '%s levels == %s levels' % (levels1, levels2)
unshared_levels = False
if not all(level in levels2 for level in levels1):
unshared_levels = True
if not all(level in levels1 for level in levels2):
unshared_levels = True
if not unshared_levels:
self.fail(self._formatMessage(msg, standardMsg))
def assertCategoricalLevelIn(self, level, levels, msg=None):
'''Fail if ``level`` is not in ``levels``.
This is equivalent to ``self.assertIn(level, levels)``.
Parameters
----------
level
levels : iterable
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``levels`` is not iterable.
'''
if not isinstance(levels, collections.Iterable):
raise TypeError('Second argument is not iterable')
self.assertIn(level, levels, msg=msg)
def assertCategoricalLevelNotIn(self, level, levels, msg=None):
'''Fail if ``level`` is in ``levels``.
This is equivalent to ``self.assertNotIn(level, levels)``.
Parameters
----------
level
levels : iterable
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``levels`` is not iterable.
'''
if not isinstance(levels, collections.Iterable):
raise TypeError('Second argument is not iterable')
self.assertNotIn(level, levels, msg=msg)
class DateTimeMixins(abc.ABC):
'''Built-in assertions for :class:`date` s, :class:`datetime` s,
and :class:`time` s.
'''
@abc.abstractmethod
def fail(self, msg):
pass # pragma: no cover
@abc.abstractmethod
def assertIsInstance(self, obj, cls, msg):
pass # pragma: no cover
@abc.abstractmethod
def assertIsNone(self, obj, msg):
pass # pragma: no cover
@abc.abstractmethod
def assertIsNotNone(self, obj, msg):
pass # pragma: no cover
def assertDateTimesBefore(self, sequence, target, strict=True, msg=None):
'''Fail if any elements in ``sequence`` are not before
``target``.
If ``target`` is iterable, it must have the same length as
``sequence``
If ``strict=True``, fail unless all elements in ``sequence``
are strictly less than ``target``. If ``strict=False``, fail
unless all elements in ``sequence`` are less than or equal to
``target``.
Parameters
----------
sequence : iterable
target : datetime, date, iterable
strict : bool
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``sequence`` is not iterable.
ValueError
If ``target`` is iterable but does not have the same length
as ``sequence``.
TypeError
If ``target`` is not a datetime or date object and is not
iterable.
'''
if not isinstance(sequence, collections.Iterable):
raise TypeError('First argument is not iterable')
if strict:
standardMsg = '%s is not strictly less than %s' % (sequence,
target)
op = operator.lt
else:
standardMsg = '%s is not less than %s' % (sequence, target)
op = operator.le
# Null date(time)s will always compare False, but
# we want to know about null date(time)s
if isinstance(target, collections.Iterable):
if len(target) != len(sequence):
raise ValueError(('Length mismatch: '
'first argument contains %s elements, '
'second argument contains %s elements' % (
len(sequence), len(target))))
if not all(op(i, j) for i, j in zip(sequence, target)):
self.fail(self._formatMessage(msg, standardMsg))
elif isinstance(target, (date, datetime)):
if not all(op(element, target) for element in sequence):
self.fail(self._formatMessage(msg, standardMsg))
else:
raise TypeError(
'Second argument is not a datetime or date object or iterable')
def assertDateTimesAfter(self, sequence, target, strict=True, msg=None):
'''Fail if any elements in ``sequence`` are not after
``target``.
If ``target`` is iterable, it must have the same length as
``sequence``
If ``strict=True``, fail unless all elements in ``sequence``
are strictly greater than ``target``. If ``strict=False``,
fail unless all elements in ``sequence`` are greater than or
equal to ``target``.
Parameters
----------
sequence : iterable
target : datetime, date, iterable
strict : bool
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``sequence`` is not iterable.
ValueError
If ``target`` is iterable but does not have the same length
as ``sequence``.
TypeError
If ``target`` is not a datetime or date object and is not
iterable.
'''
if not isinstance(sequence, collections.Iterable):
raise TypeError('First argument is not iterable')
if strict:
standardMsg = '%s is not strictly greater than %s' % (sequence,
target)
op = operator.gt
else:
standardMsg = '%s is not greater than %s' % (sequence,
target)
op = operator.ge
# Null date(time)s will always compare False, but
# we want to know about null date(time)s
if isinstance(target, collections.Iterable):
if len(target) != len(sequence):
raise ValueError(('Length mismatch: '
'first argument contains %s elements, '
'second argument contains %s elements' % (
len(sequence), len(target))))
if not all(op(i, j) for i, j in zip(sequence, target)):
self.fail(self._formatMessage(msg, standardMsg))
elif isinstance(target, (date, datetime)):
if not all(op(element, target) for element in sequence):
self.fail(self._formatMessage(msg, standardMsg))
else:
raise TypeError(
'Second argument is not a datetime or date object or iterable')
def assertDateTimesPast(self, sequence, strict=True, msg=None):
'''Fail if any elements in ``sequence`` are not in the past.
If the max element is a datetime, "past" is defined as anything
prior to ``datetime.now()``; if the max element is a date,
"past" is defined as anything prior to ``date.today()``.
If ``strict=True``, fail unless all elements in ``sequence``
are strictly less than ``date.today()`` (or ``datetime.now()``).
If ``strict=False``, fail unless all elements in ``sequence``
are less than or equal to ``date.today()`` (or
``datetime.now()``).
Parameters
----------
sequence : iterable
strict : bool
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``sequence`` is not iterable.
TypeError
If max element in ``sequence`` is not a datetime or date
object.
'''
if not isinstance(sequence, collections.Iterable):
raise TypeError('First argument is not iterable')
# Cannot compare datetime to date, so if dates are provided use
# date.today(), if datetimes are provided use datetime.today()
if isinstance(max(sequence), datetime):
target = datetime.today()
elif isinstance(max(sequence), date):
target = date.today()
else:
raise TypeError('Expected iterable of datetime or date objects')
self.assertDateTimesBefore(sequence, target, strict=strict, msg=msg)
def assertDateTimesFuture(self, sequence, strict=True, msg=None):
'''Fail if any elements in ``sequence`` are not in the future.
If the min element is a datetime, "future" is defined as
anything after ``datetime.now()``; if the min element is a date,
"future" is defined as anything after ``date.today()``.
If ``strict=True``, fail unless all elements in ``sequence``
are strictly greater than ``date.today()``
(or ``datetime.now()``). If ``strict=False``, fail all
elements in ``sequence`` are greater than or equal to
``date.today()`` (or ``datetime.now()``).
Parameters
----------
sequence : iterable
strict : bool
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``sequence`` is not iterable.
TypeError
If min element in ``sequence`` is not a datetime or date
object.
'''
if not isinstance(sequence, collections.Iterable):
raise TypeError('First argument is not iterable')
# Cannot compare datetime to date, so if dates are provided use
# date.today(), if datetimes are provided use datetime.today()
if isinstance(min(sequence), datetime):
target = datetime.today()
elif isinstance(min(sequence), date):
target = date.today()
else:
raise TypeError('Expected iterable of datetime or date objects')
self.assertDateTimesAfter(sequence, target, strict=strict, msg=msg)
def assertDateTimesFrequencyEqual(self, sequence, frequency, msg=None):
'''Fail if any elements in ``sequence`` aren't separated by
the expected ``fequency``.
Parameters
----------
sequence : iterable
frequency : timedelta
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``sequence`` is not iterable.
TypeError
If ``frequency`` is not a timedelta object.
'''
# TODO (jsa): check that elements in sequence are dates or
# datetimes, keeping in mind that sequence may contain null
# values
if not isinstance(sequence, collections.Iterable):
raise TypeError('First argument is not iterable')
if not isinstance(frequency, timedelta):
raise TypeError('Second argument is not a timedelta object')
standardMsg = 'unexpected frequencies found in %s' % sequence
s1 = pd.Series(sequence)
s2 = s1.shift(-1)
freq = s2 - s1
if not all(f == frequency for f in freq[:-1]):
self.fail(self._formatMessage(msg, standardMsg))
def assertDateTimesLagEqual(self, sequence, lag, msg=None):
'''Fail unless max element in ``sequence`` is separated from
the present by ``lag`` as determined by the '==' operator.
If the max element is a datetime, "present" is defined as
``datetime.now()``; if the max element is a date, "present"
is defined as ``date.today()``.
This is equivalent to
``self.assertEqual(present - max(sequence), lag)``.
Parameters
----------
sequence : iterable
lag : timedelta
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``sequence`` is not iterable.
TypeError
If ``lag`` is not a timedelta object.
TypeError
If max element in ``sequence`` is not a datetime or date
object.
'''
if not isinstance(sequence, collections.Iterable):
raise TypeError('First argument is not iterable')
if not isinstance(lag, timedelta):
raise TypeError('Second argument is not a timedelta object')
# Cannot compare datetime to date, so if dates are provided use
# date.today(), if datetimes are provided use datetime.today()
if isinstance(max(sequence), datetime):
target = datetime.today()
elif isinstance(max(sequence), date):
target = date.today()
else:
raise TypeError('Expected iterable of datetime or date objects')
self.assertEqual(target - max(sequence), lag, msg=msg)
def assertDateTimesLagLess(self, sequence, lag, msg=None):
'''Fail if max element in ``sequence`` is separated from
the present by ``lag`` or more as determined by the '<'
operator.
If the max element is a datetime, "present" is defined as
``datetime.now()``; if the max element is a date, "present"
is defined as ``date.today()``.
This is equivalent to
``self.assertLess(present - max(sequence), lag)``.
Parameters
----------
sequence : iterable
lag : timedelta
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``sequence`` is not iterable.
TypeError
If ``lag`` is not a timedelta object.
TypeError
If max element in ``sequence`` is not a datetime or date
object.
'''
if not isinstance(sequence, collections.Iterable):
raise TypeError('First argument is not iterable')
if not isinstance(lag, timedelta):
raise TypeError('Second argument is not a timedelta object')
# Cannot compare datetime to date, so if dates are provided use
# date.today(), if datetimes are provided use datetime.today()
if isinstance(max(sequence), datetime):
target = datetime.today()
elif isinstance(max(sequence), date):
target = date.today()
else:
raise TypeError('Expected iterable of datetime or date objects')
self.assertLess(target - max(sequence), lag, msg=msg)
def assertDateTimesLagLessEqual(self, sequence, lag, msg=None):
'''Fail if max element in ``sequence`` is separated from
the present by more than ``lag`` as determined by the '<='
operator.
If the max element is a datetime, "present" is defined as
``datetime.now()``; if the max element is a date, "present"
is defined as ``date.today()``.
This is equivalent to
``self.assertLessEqual(present - max(sequence), lag)``.
Parameters
----------
sequence : iterable
lag : timedelta
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``sequence`` is not iterable.
TypeError
If ``lag`` is not a timedelta object.
TypeError
If max element in ``sequence`` is not a datetime or date
object.
'''
if not isinstance(sequence, collections.Iterable):
raise TypeError('First argument is not iterable')
if not isinstance(lag, timedelta):
raise TypeError('Second argument is not a timedelta object')
# Cannot compare datetime to date, so if dates are provided use
# date.today(), if datetimes are provided use datetime.today()
if isinstance(max(sequence), datetime):
target = datetime.today()
elif isinstance(max(sequence), date):
target = date.today()
else:
raise TypeError('Expected iterable of datetime or date objects')
self.assertLessEqual(target - max(sequence), lag, msg=msg)
def assertTimeZoneIsNone(self, dt, msg=None):
'''Fail if ``dt`` has a non-null ``tzinfo`` attribute.
Parameters
----------
dt : datetime
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``dt`` is not a datetime object.
'''
if not isinstance(dt, datetime):
raise TypeError('First argument is not a datetime object')
self.assertIsNone(dt.tzinfo, msg=msg)
def assertTimeZoneIsNotNone(self, dt, msg=None):
'''Fail unless ``dt`` has a non-null ``tzinfo`` attribute.
Parameters
----------
dt : datetime
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``dt`` is not a datetime object.
'''
if not isinstance(dt, datetime):
raise TypeError('First argument is not a datetime object')
self.assertIsNotNone(dt.tzinfo, msg=msg)
def assertTimeZoneEqual(self, dt, tz, msg=None):
'''Fail unless ``dt``'s ``tzinfo`` attribute equals ``tz`` as
determined by the '==' operator.
Parameters
----------
dt : datetime
tz : timezone
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``dt`` is not a datetime object.
TypeError
If ``tz`` is not a timezone object.
'''
if not isinstance(dt, datetime):
raise TypeError('First argument is not a datetime object')
if not isinstance(tz, timezone):
raise TypeError('Second argument is not a timezone object')
self.assertEqual(dt.tzinfo, tz, msg=msg)
def assertTimeZoneNotEqual(self, dt, tz, msg=None):
'''Fail if ``dt``'s ``tzinfo`` attribute equals ``tz`` as
determined by the '!=' operator.
Parameters
----------
dt : datetime
tz : timezone
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``dt`` is not a datetime object.
TypeError
If ``tz`` is not a timezone object.
'''
if not isinstance(dt, datetime):
raise TypeError('First argument is not a datetime object')
if not isinstance(tz, timezone):
raise TypeError('Second argument is not a timezone object')
self.assertNotEqual(dt.tzinfo, tz, msg=msg)
``` |
{
"source": "jonathangreen/circulation",
"score": 2
} |
#### File: tests/core/test_lane.py
```python
import datetime
import json
import logging
import random
from unittest.mock import MagicMock, call
import pytest
from elasticsearch.exceptions import ElasticsearchException
from sqlalchemy import and_, text
from core.classifier import Classifier
from core.config import Configuration
from core.entrypoint import (
AudiobooksEntryPoint,
EbooksEntryPoint,
EntryPoint,
EverythingEntryPoint,
)
from core.external_search import (
Filter,
MockExternalSearchIndex,
WorkSearchResult,
mock_search_index,
)
from core.lane import (
DatabaseBackedFacets,
DatabaseBackedWorkList,
DefaultSortOrderFacets,
FacetConstants,
Facets,
FacetsWithEntryPoint,
FeaturedFacets,
Lane,
Pagination,
SearchFacets,
TopLevelWorkList,
WorkList,
)
from core.model import (
CachedFeed,
DataSource,
Edition,
Genre,
Library,
LicensePool,
Work,
WorkGenre,
get_one_or_create,
tuple_to_numericrange,
)
from core.problem_details import INVALID_INPUT
from core.testing import DatabaseTest, EndToEndSearchTest, LogCaptureHandler
from core.util.datetime_helpers import utc_now
from core.util.opds_writer import OPDSFeed
class TestFacetsWithEntryPoint(DatabaseTest):
class MockFacetConfig(object):
"""Pass this in when you call FacetsWithEntryPoint.from_request
but you don't care which EntryPoints are configured.
"""
entrypoints = []
def test_items(self):
ep = AudiobooksEntryPoint
f = FacetsWithEntryPoint(ep)
expect_items = (f.ENTRY_POINT_FACET_GROUP_NAME, ep.INTERNAL_NAME)
assert [expect_items] == list(f.items())
assert "%s=%s" % expect_items == f.query_string
f.max_cache_age = 41
expect_items = [
(f.ENTRY_POINT_FACET_GROUP_NAME, ep.INTERNAL_NAME),
(f.MAX_CACHE_AGE_NAME, "41"),
]
assert expect_items == list(f.items())
def test_modify_database_query(self):
class MockEntryPoint(object):
def modify_database_query(self, _db, qu):
self.called_with = (_db, qu)
ep = MockEntryPoint()
f = FacetsWithEntryPoint(ep)
_db = object()
qu = object()
f.modify_database_query(_db, qu)
assert (_db, qu) == ep.called_with
def test_navigate(self):
# navigate creates a new FacetsWithEntryPoint.
old_entrypoint = object()
kwargs = dict(extra_key="extra_value")
facets = FacetsWithEntryPoint(
old_entrypoint, entrypoint_is_default=True, max_cache_age=123, **kwargs
)
new_entrypoint = object()
new_facets = facets.navigate(new_entrypoint)
# A new FacetsWithEntryPoint was created.
assert isinstance(new_facets, FacetsWithEntryPoint)
# It has the new entry point.
assert new_entrypoint == new_facets.entrypoint
# Since navigating from one Facets object to another is a choice,
# the new Facets object is not using a default EntryPoint.
assert False == new_facets.entrypoint_is_default
# The max_cache_age was preserved.
assert 123 == new_facets.max_cache_age
# The keyword arguments used to create the original faceting
# object were propagated to its constructor.
assert kwargs == new_facets.constructor_kwargs
def test_from_request(self):
# from_request just calls the _from_request class method
expect = object()
class Mock(FacetsWithEntryPoint):
@classmethod
def _from_request(cls, *args, **kwargs):
cls.called_with = (args, kwargs)
return expect
result = Mock.from_request(
"library",
"facet config",
"get_argument",
"get_header",
"worklist",
"default entrypoint",
extra="extra argument",
)
# The arguments given to from_request were propagated to _from_request.
args, kwargs = Mock.called_with
assert (
"facet config",
"get_argument",
"get_header",
"worklist",
"default entrypoint",
) == args
assert dict(extra="extra argument") == kwargs
# The return value of _from_request was propagated through
# from_request.
assert expect == result
def test__from_request(self):
# _from_request calls load_entrypoint() and
# load_max_cache_age() and instantiates the class with the
# result.
class MockFacetsWithEntryPoint(FacetsWithEntryPoint):
# Mock load_entrypoint() and load_max_cache_age() to
# return whatever values we have set up ahead of time.
@classmethod
def selectable_entrypoints(cls, facet_config):
cls.selectable_entrypoints_called_with = facet_config
return ["Selectable entrypoints"]
@classmethod
def load_entrypoint(cls, entrypoint_name, entrypoints, default=None):
cls.load_entrypoint_called_with = (
entrypoint_name,
entrypoints,
default,
)
return cls.expect_load_entrypoint
@classmethod
def load_max_cache_age(cls, max_cache_age):
cls.load_max_cache_age_called_with = max_cache_age
return cls.expect_max_cache_age
# Mock the functions that pull information out of an HTTP
# request.
# EntryPoint.load_entrypoint pulls the facet group name and
# the maximum cache age out of the 'request' and passes those
# values into load_entrypoint() and load_max_cache_age.
def get_argument(key, default):
if key == Facets.ENTRY_POINT_FACET_GROUP_NAME:
return "entrypoint name from request"
elif key == Facets.MAX_CACHE_AGE_NAME:
return "max cache age from request"
# FacetsWithEntryPoint.load_entrypoint does not use
# get_header().
def get_header(name):
raise Exception("I'll never be called")
config = self.MockFacetConfig
mock_worklist = object()
default_entrypoint = object()
def m():
return MockFacetsWithEntryPoint._from_request(
config,
get_argument,
get_header,
mock_worklist,
default_entrypoint=default_entrypoint,
extra="extra kwarg",
)
# First, test failure. If load_entrypoint() returns a
# ProblemDetail, that object is returned instead of the
# faceting class.
MockFacetsWithEntryPoint.expect_load_entrypoint = INVALID_INPUT
assert INVALID_INPUT == m()
# Similarly if load_entrypoint() works but load_max_cache_age
# returns a ProblemDetail.
expect_entrypoint = object()
expect_is_default = object()
MockFacetsWithEntryPoint.expect_load_entrypoint = (
expect_entrypoint,
expect_is_default,
)
MockFacetsWithEntryPoint.expect_max_cache_age = INVALID_INPUT
assert INVALID_INPUT == m()
# Next, test success. The return value of load_entrypoint() is
# is passed as 'entrypoint' into the FacetsWithEntryPoint
# constructor. The object returned by load_max_cache_age is
# passed as 'max_cache_age'.
#
# The object returned by load_entrypoint() does not need to be a
# currently enabled entrypoint for the library.
MockFacetsWithEntryPoint.expect_max_cache_age = 345
facets = m()
assert isinstance(facets, FacetsWithEntryPoint)
assert expect_entrypoint == facets.entrypoint
assert expect_is_default == facets.entrypoint_is_default
assert (
"entrypoint name from request",
["Selectable entrypoints"],
default_entrypoint,
) == MockFacetsWithEntryPoint.load_entrypoint_called_with
assert 345 == facets.max_cache_age
assert dict(extra="extra kwarg") == facets.constructor_kwargs
assert MockFacetsWithEntryPoint.selectable_entrypoints_called_with == config
assert (
MockFacetsWithEntryPoint.load_max_cache_age_called_with
== "max cache age from request"
)
def test_load_entrypoint(self):
audio = AudiobooksEntryPoint
ebooks = EbooksEntryPoint
# These are the allowable entrypoints for this site -- we'll
# be passing this in to load_entrypoint every time.
entrypoints = [audio, ebooks]
worklist = object()
m = FacetsWithEntryPoint.load_entrypoint
# This request does not ask for any particular entrypoint, and
# it doesn't specify a default, so it gets the first available
# entrypoint.
audio_default, is_default = m(None, entrypoints)
assert audio == audio_default
assert True == is_default
# This request does not ask for any particular entrypoint, so
# it gets the specified default.
default = object()
assert (default, True) == m(None, entrypoints, default)
# This request asks for an entrypoint and gets it.
assert (ebooks, False) == m(ebooks.INTERNAL_NAME, entrypoints)
# This request asks for an entrypoint that is not available,
# and gets the default.
assert (audio, True) == m("no such entrypoint", entrypoints)
# If no EntryPoints are available, load_entrypoint returns
# nothing.
assert (None, True) == m(audio.INTERNAL_NAME, [])
def test_load_max_cache_age(self):
m = FacetsWithEntryPoint.load_max_cache_age
# The two valid options for max_cache_age as loaded in from a request are
# IGNORE_CACHE (do not pull from cache) and None (no opinion).
assert None == m("")
assert None == m(None)
assert CachedFeed.IGNORE_CACHE == m(0)
assert CachedFeed.IGNORE_CACHE == m("0")
# All other values are treated as 'no opinion'.
assert None == m("1")
assert None == m(2)
assert None == m("not a number")
def test_cache_age(self):
# No matter what type of feed we ask about, the max_cache_age of a
# FacetsWithEntryPoint is whatever is stored in its .max_cache_age.
#
# This is true even for 'feed types' that make no sense.
max_cache_age = object()
facets = FacetsWithEntryPoint(max_cache_age=max_cache_age)
assert max_cache_age == facets.max_cache_age
def test_selectable_entrypoints(self):
"""The default implementation of selectable_entrypoints just returns
the worklist's entrypoints.
"""
class MockWorkList(object):
def __init__(self, entrypoints):
self.entrypoints = entrypoints
mock_entrypoints = object()
worklist = MockWorkList(mock_entrypoints)
m = FacetsWithEntryPoint.selectable_entrypoints
assert mock_entrypoints == m(worklist)
assert [] == m(None)
def test_modify_search_filter(self):
# When an entry point is selected, search filters are modified so
# that they only find works that fit that entry point.
filter = Filter()
facets = FacetsWithEntryPoint(AudiobooksEntryPoint)
facets.modify_search_filter(filter)
assert [Edition.AUDIO_MEDIUM] == filter.media
# If no entry point is selected, the filter is not modified.
filter = Filter()
facets = FacetsWithEntryPoint()
facets.modify_search_filter(filter)
assert None == filter.media
class TestFacets(DatabaseTest):
def _configure_facets(self, library, enabled, default):
"""Set facet configuration for the given Library."""
for key, values in list(enabled.items()):
library.enabled_facets_setting(key).value = json.dumps(values)
for key, value in list(default.items()):
library.default_facet_setting(key).value = value
def test_max_cache_age(self):
# A default Facets object has no opinion on what max_cache_age
# should be.
facets = Facets(
self._default_library,
Facets.COLLECTION_FULL,
Facets.AVAILABLE_ALL,
Facets.ORDER_TITLE,
)
assert None == facets.max_cache_age
def test_facet_groups(self):
facets = Facets(
self._default_library,
Facets.COLLECTION_FULL,
Facets.AVAILABLE_ALL,
Facets.ORDER_TITLE,
)
all_groups = list(facets.facet_groups)
# By default, there are 8 facet transitions: two groups of three
# and one group of two.
assert 8 == len(all_groups)
# available=all, collection=full, and order=title are the selected
# facets.
selected = sorted([x[:2] for x in all_groups if x[-1] == True])
assert [
("available", "all"),
("collection", "full"),
("order", "title"),
] == selected
test_enabled_facets = {
Facets.ORDER_FACET_GROUP_NAME: [Facets.ORDER_WORK_ID, Facets.ORDER_TITLE],
Facets.COLLECTION_FACET_GROUP_NAME: [Facets.COLLECTION_FEATURED],
Facets.AVAILABILITY_FACET_GROUP_NAME: [Facets.AVAILABLE_ALL],
}
test_default_facets = {
Facets.ORDER_FACET_GROUP_NAME: Facets.ORDER_TITLE,
Facets.COLLECTION_FACET_GROUP_NAME: Facets.COLLECTION_FEATURED,
Facets.AVAILABILITY_FACET_GROUP_NAME: Facets.AVAILABLE_ALL,
}
library = self._default_library
self._configure_facets(library, test_enabled_facets, test_default_facets)
facets = Facets(self._default_library, None, None, Facets.ORDER_TITLE)
all_groups = list(facets.facet_groups)
# We have disabled almost all the facets, so the list of
# facet transitions includes only two items.
#
# 'Sort by title' was selected, and it shows up as the selected
# item in this facet group.
expect = [["order", "title", True], ["order", "work_id", False]]
assert expect == sorted([list(x[:2]) + [x[-1]] for x in all_groups])
def test_default(self):
# Calling Facets.default() is like calling the constructor with
# no arguments except the library.
class Mock(Facets):
def __init__(self, library, **kwargs):
self.library = library
self.kwargs = kwargs
facets = Mock.default(self._default_library)
assert self._default_library == facets.library
assert (
dict(collection=None, availability=None, order=None, entrypoint=None)
== facets.kwargs
)
def test_default_facet_is_always_available(self):
# By definition, the default facet must be enabled. So if the
# default facet for a given facet group is not enabled by the
# current configuration, it's added to the beginning anyway.
class MockConfiguration(object):
def enabled_facets(self, facet_group_name):
self.called_with = facet_group_name
return ["facet1", "facet2"]
class MockFacets(Facets):
@classmethod
def default_facet(cls, config, facet_group_name):
cls.called_with = (config, facet_group_name)
return "facet3"
config = MockConfiguration()
available = MockFacets.available_facets(config, "some facet group")
# MockConfiguration.enabled_facets() was called to get the
# enabled facets for the facet group.
assert "some facet group" == config.called_with
# Then Mock.default_facet() was called to get the default
# facet for that group.
assert (config, "some facet group") == MockFacets.called_with
# Since the default facet was not found in the 'enabled'
# group, it was added to the beginning of the list.
assert ["facet3", "facet1", "facet2"] == available
# If the default facet _is_ found in the 'enabled' group, it's
# not added again.
class MockFacets(Facets):
@classmethod
def default_facet(cls, config, facet_group_name):
cls.called_with = (config, facet_group_name)
return "facet2"
available = MockFacets.available_facets(config, "some facet group")
assert ["facet1", "facet2"] == available
def test_default_availability(self):
# Normally, the availability will be the library's default availability
# facet.
test_enabled_facets = {
Facets.ORDER_FACET_GROUP_NAME: [Facets.ORDER_WORK_ID],
Facets.COLLECTION_FACET_GROUP_NAME: [Facets.COLLECTION_FULL],
Facets.AVAILABILITY_FACET_GROUP_NAME: [
Facets.AVAILABLE_ALL,
Facets.AVAILABLE_NOW,
],
}
test_default_facets = {
Facets.ORDER_FACET_GROUP_NAME: Facets.ORDER_TITLE,
Facets.COLLECTION_FACET_GROUP_NAME: Facets.COLLECTION_FULL,
Facets.AVAILABILITY_FACET_GROUP_NAME: Facets.AVAILABLE_ALL,
}
library = self._default_library
self._configure_facets(library, test_enabled_facets, test_default_facets)
facets = Facets(library, None, None, None)
assert Facets.AVAILABLE_ALL == facets.availability
# However, if the library does not allow holds, we only show
# books that are currently available.
library.setting(Library.ALLOW_HOLDS).value = False
facets = Facets(library, None, None, None)
assert Facets.AVAILABLE_NOW == facets.availability
# Unless 'now' is not one of the enabled facets - then we keep
# using the library's default.
test_enabled_facets[Facets.AVAILABILITY_FACET_GROUP_NAME] = [
Facets.AVAILABLE_ALL
]
self._configure_facets(library, test_enabled_facets, test_default_facets)
facets = Facets(library, None, None, None)
assert Facets.AVAILABLE_ALL == facets.availability
def test_facets_can_be_enabled_at_initialization(self):
enabled_facets = {
Facets.ORDER_FACET_GROUP_NAME: [
Facets.ORDER_TITLE,
Facets.ORDER_AUTHOR,
],
Facets.COLLECTION_FACET_GROUP_NAME: [Facets.COLLECTION_FULL],
Facets.AVAILABILITY_FACET_GROUP_NAME: [Facets.AVAILABLE_OPEN_ACCESS],
}
library = self._default_library
self._configure_facets(library, enabled_facets, {})
# Create a new Facets object with these facets enabled,
# no matter the Configuration.
facets = Facets(
self._default_library,
Facets.COLLECTION_FULL,
Facets.AVAILABLE_OPEN_ACCESS,
Facets.ORDER_TITLE,
enabled_facets=enabled_facets,
)
all_groups = list(facets.facet_groups)
expect = [["order", "author", False], ["order", "title", True]]
assert expect == sorted([list(x[:2]) + [x[-1]] for x in all_groups])
def test_facets_dont_need_a_library(self):
enabled_facets = {
Facets.ORDER_FACET_GROUP_NAME: [
Facets.ORDER_TITLE,
Facets.ORDER_AUTHOR,
],
Facets.COLLECTION_FACET_GROUP_NAME: [Facets.COLLECTION_FULL],
Facets.AVAILABILITY_FACET_GROUP_NAME: [Facets.AVAILABLE_OPEN_ACCESS],
}
facets = Facets(
None,
Facets.COLLECTION_FULL,
Facets.AVAILABLE_OPEN_ACCESS,
Facets.ORDER_TITLE,
enabled_facets=enabled_facets,
)
all_groups = list(facets.facet_groups)
expect = [["order", "author", False], ["order", "title", True]]
assert expect == sorted([list(x[:2]) + [x[-1]] for x in all_groups])
def test_items(self):
"""Verify that Facets.items() returns all information necessary
to recreate the Facets object.
"""
facets = Facets(
self._default_library,
Facets.COLLECTION_FULL,
Facets.AVAILABLE_ALL,
Facets.ORDER_TITLE,
entrypoint=AudiobooksEntryPoint,
)
assert [
("available", Facets.AVAILABLE_ALL),
("collection", Facets.COLLECTION_FULL),
("entrypoint", AudiobooksEntryPoint.INTERNAL_NAME),
("order", Facets.ORDER_TITLE),
] == sorted(facets.items())
def test_default_order_ascending(self):
# Name-based facets are ordered ascending by default (A-Z).
for order in (Facets.ORDER_TITLE, Facets.ORDER_AUTHOR):
f = Facets(
self._default_library,
collection=Facets.COLLECTION_FULL,
availability=Facets.AVAILABLE_ALL,
order=order,
)
assert True == f.order_ascending
# But the time-based facets are ordered descending by default
# (newest->oldest)
assert set([Facets.ORDER_ADDED_TO_COLLECTION, Facets.ORDER_LAST_UPDATE]) == set(
Facets.ORDER_DESCENDING_BY_DEFAULT
)
for order in Facets.ORDER_DESCENDING_BY_DEFAULT:
f = Facets(
self._default_library,
collection=Facets.COLLECTION_FULL,
availability=Facets.AVAILABLE_ALL,
order=order,
)
assert False == f.order_ascending
def test_navigate(self):
"""Test the ability of navigate() to move between slight
variations of a FeaturedFacets object.
"""
F = Facets
ebooks = EbooksEntryPoint
f = Facets(
self._default_library,
F.COLLECTION_FULL,
F.AVAILABLE_ALL,
F.ORDER_TITLE,
entrypoint=ebooks,
)
different_collection = f.navigate(collection=F.COLLECTION_FEATURED)
assert F.COLLECTION_FEATURED == different_collection.collection
assert F.AVAILABLE_ALL == different_collection.availability
assert F.ORDER_TITLE == different_collection.order
assert ebooks == different_collection.entrypoint
different_availability = f.navigate(availability=F.AVAILABLE_NOW)
assert F.COLLECTION_FULL == different_availability.collection
assert F.AVAILABLE_NOW == different_availability.availability
assert F.ORDER_TITLE == different_availability.order
assert ebooks == different_availability.entrypoint
different_order = f.navigate(order=F.ORDER_AUTHOR)
assert F.COLLECTION_FULL == different_order.collection
assert F.AVAILABLE_ALL == different_order.availability
assert F.ORDER_AUTHOR == different_order.order
assert ebooks == different_order.entrypoint
audiobooks = AudiobooksEntryPoint
different_entrypoint = f.navigate(entrypoint=audiobooks)
assert F.COLLECTION_FULL == different_entrypoint.collection
assert F.AVAILABLE_ALL == different_entrypoint.availability
assert F.ORDER_TITLE == different_entrypoint.order
assert audiobooks == different_entrypoint.entrypoint
def test_from_request(self):
library = self._default_library
library.setting(EntryPoint.ENABLED_SETTING).value = json.dumps(
[AudiobooksEntryPoint.INTERNAL_NAME, EbooksEntryPoint.INTERNAL_NAME]
)
config = library
worklist = WorkList()
worklist.initialize(library)
m = Facets.from_request
# Valid object using the default settings.
default_order = config.default_facet(Facets.ORDER_FACET_GROUP_NAME)
default_collection = config.default_facet(Facets.COLLECTION_FACET_GROUP_NAME)
default_availability = config.default_facet(
Facets.AVAILABILITY_FACET_GROUP_NAME
)
args = {}
headers = {}
facets = m(library, library, args.get, headers.get, worklist)
assert default_order == facets.order
assert default_collection == facets.collection
assert default_availability == facets.availability
assert library == facets.library
# The AudiobooksEntryPoint was selected as a default.
assert AudiobooksEntryPoint == facets.entrypoint
assert True == facets.entrypoint_is_default
# Valid object using non-default settings.
args = dict(
order=Facets.ORDER_TITLE,
collection=Facets.COLLECTION_FULL,
available=Facets.AVAILABLE_OPEN_ACCESS,
entrypoint=EbooksEntryPoint.INTERNAL_NAME,
)
facets = m(library, library, args.get, headers.get, worklist)
assert Facets.ORDER_TITLE == facets.order
assert Facets.COLLECTION_FULL == facets.collection
assert Facets.AVAILABLE_OPEN_ACCESS == facets.availability
assert library == facets.library
assert EbooksEntryPoint == facets.entrypoint
# Invalid order
args = dict(order="no such order")
invalid_order = m(library, library, args.get, headers.get, None)
assert INVALID_INPUT.uri == invalid_order.uri
assert (
"I don't know how to order a feed by 'no such order'"
== invalid_order.detail
)
# Invalid availability
args = dict(available="no such availability")
invalid_availability = m(library, library, args.get, headers.get, None)
assert INVALID_INPUT.uri == invalid_availability.uri
assert (
"I don't understand the availability term 'no such availability'"
== invalid_availability.detail
)
# Invalid collection
args = dict(collection="no such collection")
invalid_collection = m(library, library, args.get, headers.get, None)
assert INVALID_INPUT.uri == invalid_collection.uri
assert (
"I don't understand what 'no such collection' refers to."
== invalid_collection.detail
)
def test_from_request_gets_available_facets_through_hook_methods(self):
# Available and default facets are determined by calling the
# available_facets() and default_facets() methods. This gives
# subclasses a chance to add extra facets or change defaults.
class Mock(Facets):
available_facets_calls = []
default_facet_calls = []
# For whatever reason, this faceting object allows only a
# single setting for each facet group.
mock_enabled = dict(
order=[Facets.ORDER_TITLE],
available=[Facets.AVAILABLE_OPEN_ACCESS],
collection=[Facets.COLLECTION_FULL],
)
@classmethod
def available_facets(cls, config, facet_group_name):
cls.available_facets_calls.append((config, facet_group_name))
return cls.mock_enabled[facet_group_name]
@classmethod
def default_facet(cls, config, facet_group_name):
cls.default_facet_calls.append((config, facet_group_name))
return cls.mock_enabled[facet_group_name][0]
library = self._default_library
result = Mock.from_request(library, library, {}.get, {}.get, None)
order, available, collection = Mock.available_facets_calls
# available_facets was called three times, to ask the Mock class what it thinks
# the options for order, availability, and collection should be.
assert (library, "order") == order
assert (library, "available") == available
assert (library, "collection") == collection
# default_facet was called three times, to ask the Mock class what it thinks
# the default order, availability, and collection should be.
order_d, available_d, collection_d = Mock.default_facet_calls
assert (library, "order") == order_d
assert (library, "available") == available_d
assert (library, "collection") == collection_d
# Finally, verify that the return values from the mocked methods were actually used.
# The facets enabled during initialization are the limited
# subset established by available_facets().
assert Mock.mock_enabled == result.facets_enabled_at_init
# The current values came from the defaults provided by default_facet().
assert Facets.ORDER_TITLE == result.order
assert Facets.AVAILABLE_OPEN_ACCESS == result.availability
assert Facets.COLLECTION_FULL == result.collection
def test_modify_search_filter(self):
# Test superclass behavior -- filter is modified by entrypoint.
facets = Facets(
self._default_library, None, None, None, entrypoint=AudiobooksEntryPoint
)
filter = Filter()
facets.modify_search_filter(filter)
assert [Edition.AUDIO_MEDIUM] == filter.media
# Now test the subclass behavior.
facets = Facets(
self._default_library,
"some collection",
"some availability",
order=Facets.ORDER_ADDED_TO_COLLECTION,
order_ascending="yep",
)
facets.modify_search_filter(filter)
# The library's minimum featured quality is passed in.
assert (
self._default_library.minimum_featured_quality
== filter.minimum_featured_quality
)
# Availability and collection are propagated with no
# validation.
assert "some availability" == filter.availability
assert "some collection" == filter.subcollection
# The sort order constant is converted to the name of an
# Elasticsearch field.
expect = Facets.SORT_ORDER_TO_ELASTICSEARCH_FIELD_NAME[
Facets.ORDER_ADDED_TO_COLLECTION
]
assert expect == filter.order
assert "yep" == filter.order_ascending
# Specifying an invalid sort order doesn't cause a crash, but you
# don't get a sort order.
facets = Facets(self._default_library, None, None, "invalid order")
filter = Filter()
facets.modify_search_filter(filter)
assert None == filter.order
def test_modify_database_query(self):
# Make sure that modify_database_query handles the various
# reasons why a book might or might not be 'available'.
open_access = self._work(with_open_access_download=True, title="open access")
open_access.quality = 1
self_hosted = self._work(
with_license_pool=True, self_hosted=True, title="self hosted"
)
unlimited_access = self._work(
with_license_pool=True, unlimited_access=True, title="unlimited access"
)
available = self._work(with_license_pool=True, title="available")
[pool] = available.license_pools
pool.licenses_owned = 1
pool.licenses_available = 1
not_available = self._work(with_license_pool=True, title="not available")
[pool] = not_available.license_pools
pool.licenses_owned = 1
pool.licenses_available = 0
not_licensed = self._work(with_license_pool=True, title="not licensed")
[pool] = not_licensed.license_pools
pool.licenses_owned = 0
pool.licenses_available = 0
qu = (
self._db.query(Work)
.join(Work.license_pools)
.join(LicensePool.presentation_edition)
)
for availability, expect in [
(
Facets.AVAILABLE_NOW,
[open_access, available, self_hosted, unlimited_access],
),
(
Facets.AVAILABLE_ALL,
[open_access, available, not_available, self_hosted, unlimited_access],
),
(Facets.AVAILABLE_NOT_NOW, [not_available]),
]:
facets = Facets(self._default_library, None, availability, None)
modified = facets.modify_database_query(self._db, qu)
assert (availability, sorted([x.title for x in modified])) == (
availability,
sorted([x.title for x in expect]),
)
# Setting the 'featured' collection includes only known
# high-quality works.
for collection, expect in [
(
Facets.COLLECTION_FULL,
[open_access, available, self_hosted, unlimited_access],
),
(Facets.COLLECTION_FEATURED, [open_access]),
]:
facets = Facets(
self._default_library, collection, Facets.AVAILABLE_NOW, None
)
modified = facets.modify_database_query(self._db, qu)
assert (collection, sorted([x.title for x in modified])) == (
collection,
sorted([x.title for x in expect]),
)
class TestDefaultSortOrderFacets(DatabaseTest):
def setup_method(self):
super(TestDefaultSortOrderFacets, self).setup_method()
self.config = self._default_library
def _check_other_groups_not_changed(self, cls):
# Verify that nothing has changed for the collection or
# availability facet groups.
for group_name in (
Facets.COLLECTION_FACET_GROUP_NAME,
Facets.AVAILABILITY_FACET_GROUP_NAME,
):
assert Facets.available_facets(
self.config, group_name
) == cls.available_facets(self.config, group_name)
assert Facets.default_facet(self.config, group_name) == cls.default_facet(
self.config, group_name
)
def test_sort_order_rearrangement(self):
# Test the case where a DefaultSortOrderFacets does nothing but
# rearrange the default sort orders.
class TitleFirst(DefaultSortOrderFacets):
DEFAULT_SORT_ORDER = Facets.ORDER_TITLE
# In general, TitleFirst has the same options and
# defaults as a normal Facets object.
self._check_other_groups_not_changed(TitleFirst)
# But the default sort order for TitleFirst is ORDER_TITLE.
order = Facets.ORDER_FACET_GROUP_NAME
assert TitleFirst.DEFAULT_SORT_ORDER == TitleFirst.default_facet(
self.config, order
)
assert Facets.default_facet(self.config, order) != TitleFirst.DEFAULT_SORT_ORDER
# TitleFirst has the same sort orders as Facets, but ORDER_TITLE
# comes first in the list.
default_orders = Facets.available_facets(self.config, order)
title_first_orders = TitleFirst.available_facets(self.config, order)
assert set(default_orders) == set(title_first_orders)
assert Facets.ORDER_TITLE == title_first_orders[0]
assert default_orders[0] != Facets.ORDER_TITLE
def test_new_sort_order(self):
# Test the case where DefaultSortOrderFacets adds a sort order
# not ordinarily supported.
class SeriesFirst(DefaultSortOrderFacets):
DEFAULT_SORT_ORDER = Facets.ORDER_SERIES_POSITION
# In general, SeriesFirst has the same options and
# defaults as a normal Facets object.
self._check_other_groups_not_changed(SeriesFirst)
# But its default sort order is ORDER_SERIES.
order = Facets.ORDER_FACET_GROUP_NAME
assert SeriesFirst.DEFAULT_SORT_ORDER == SeriesFirst.default_facet(
self.config, order
)
assert (
Facets.default_facet(self.config, order) != SeriesFirst.DEFAULT_SORT_ORDER
)
# Its list of sort orders is the same as Facets, except Series
# has been added to the front of the list.
default = Facets.available_facets(self.config, order)
series = SeriesFirst.available_facets(self.config, order)
assert [SeriesFirst.DEFAULT_SORT_ORDER] + default == series
class TestDatabaseBackedFacets(DatabaseTest):
def test_available_facets(self):
# The only available sort orders are the ones that map
# directly onto a database field.
f1 = Facets
f2 = DatabaseBackedFacets
# The sort orders available to a DatabaseBackedFacets are a
# subset of the ones available to a Facets under the same
# configuration.
f1_orders = f1.available_facets(
self._default_library, FacetConstants.ORDER_FACET_GROUP_NAME
)
f2_orders = f2.available_facets(
self._default_library, FacetConstants.ORDER_FACET_GROUP_NAME
)
assert len(f2_orders) < len(f1_orders)
for order in f2_orders:
assert order in f1_orders and order in f2.ORDER_FACET_TO_DATABASE_FIELD
# The rules for collection and availability are the same.
for group in (
FacetConstants.COLLECTION_FACET_GROUP_NAME,
FacetConstants.AVAILABILITY_FACET_GROUP_NAME,
):
assert f1.available_facets(
self._default_library, group
) == f2.available_facets(self._default_library, group)
def test_default_facets(self):
# If the configured default sort order is not available,
# DatabaseBackedFacets chooses the first enabled sort order.
f1 = Facets
f2 = DatabaseBackedFacets
# The rules for collection and availability are the same.
for group in (
FacetConstants.COLLECTION_FACET_GROUP_NAME,
FacetConstants.AVAILABILITY_FACET_GROUP_NAME,
):
assert f1.default_facet(self._default_library, group) == f2.default_facet(
self._default_library, group
)
# In this bizarre library, the default sort order is 'time
# added to collection' -- an order not supported by
# DatabaseBackedFacets.
class Mock(object):
enabled = [
FacetConstants.ORDER_ADDED_TO_COLLECTION,
FacetConstants.ORDER_TITLE,
FacetConstants.ORDER_AUTHOR,
]
def enabled_facets(self, group_name):
return self.enabled
def default_facet(self, group_name):
return FacetConstants.ORDER_ADDED_TO_COLLECTION
# A Facets object uses the 'time added to collection' order by
# default.
config = Mock()
assert f1.ORDER_ADDED_TO_COLLECTION == f1.default_facet(
config, f1.ORDER_FACET_GROUP_NAME
)
# A DatabaseBacked Facets can't do that. It finds the first
# enabled sort order that it can support, and uses it instead.
assert f2.ORDER_TITLE == f2.default_facet(config, f2.ORDER_FACET_GROUP_NAME)
# If no enabled sort orders are supported, it just sorts
# by Work ID, so that there is always _some_ sort order.
config.enabled = [FacetConstants.ORDER_ADDED_TO_COLLECTION]
assert f2.ORDER_WORK_ID == f2.default_facet(config, f2.ORDER_FACET_GROUP_NAME)
def test_order_by(self):
E = Edition
W = Work
def order(facet, ascending=None):
f = DatabaseBackedFacets(
self._default_library,
collection=Facets.COLLECTION_FULL,
availability=Facets.AVAILABLE_ALL,
order=facet,
order_ascending=ascending,
)
return f.order_by()[0]
def compare(a, b):
assert len(a) == len(b)
for i in range(0, len(a)):
assert a[i].compare(b[i])
expect = [E.sort_author.asc(), E.sort_title.asc(), W.id.asc()]
actual = order(Facets.ORDER_AUTHOR, True)
compare(expect, actual)
expect = [E.sort_author.desc(), E.sort_title.asc(), W.id.asc()]
actual = order(Facets.ORDER_AUTHOR, False)
compare(expect, actual)
expect = [E.sort_title.asc(), E.sort_author.asc(), W.id.asc()]
actual = order(Facets.ORDER_TITLE, True)
compare(expect, actual)
expect = [
W.last_update_time.asc(),
E.sort_author.asc(),
E.sort_title.asc(),
W.id.asc(),
]
actual = order(Facets.ORDER_LAST_UPDATE, True)
compare(expect, actual)
# Unsupported sort order -> default (author, title, work ID)
expect = [E.sort_author.asc(), E.sort_title.asc(), W.id.asc()]
actual = order(Facets.ORDER_ADDED_TO_COLLECTION, True)
compare(expect, actual)
def test_modify_database_query(self):
# Set up works that are matched by different types of collections.
# A high-quality open-access work.
open_access_high = self._work(with_open_access_download=True)
open_access_high.quality = 0.8
# A low-quality open-access work.
open_access_low = self._work(with_open_access_download=True)
open_access_low.quality = 0.2
# A high-quality licensed work which is not currently available.
(licensed_e1, licensed_p1) = self._edition(
data_source_name=DataSource.OVERDRIVE, with_license_pool=True
)
licensed_high = self._work(presentation_edition=licensed_e1)
licensed_high.license_pools.append(licensed_p1)
licensed_high.quality = 0.8
licensed_p1.open_access = False
licensed_p1.licenses_owned = 1
licensed_p1.licenses_available = 0
# A low-quality licensed work which is currently available.
(licensed_e2, licensed_p2) = self._edition(
data_source_name=DataSource.OVERDRIVE, with_license_pool=True
)
licensed_p2.open_access = False
licensed_low = self._work(presentation_edition=licensed_e2)
licensed_low.license_pools.append(licensed_p2)
licensed_low.quality = 0.2
licensed_p2.licenses_owned = 1
licensed_p2.licenses_available = 1
# A high-quality work with unlimited access.
unlimited_access_high = self._work(
with_license_pool=True, unlimited_access=True
)
unlimited_access_high.quality = 0.8
qu = DatabaseBackedWorkList.base_query(self._db)
def facetify(
collection=Facets.COLLECTION_FULL,
available=Facets.AVAILABLE_ALL,
order=Facets.ORDER_TITLE,
):
f = DatabaseBackedFacets(
self._default_library, collection, available, order
)
return f.modify_database_query(self._db, qu)
# When holds are allowed, we can find all works by asking
# for everything.
library = self._default_library
library.setting(Library.ALLOW_HOLDS).value = "True"
everything = facetify()
assert 5 == everything.count()
# If we disallow holds, we lose one book even when we ask for
# everything.
library.setting(Library.ALLOW_HOLDS).value = "False"
everything = facetify()
assert 4 == everything.count()
assert licensed_high not in everything
library.setting(Library.ALLOW_HOLDS).value = "True"
# Even when holds are allowed, if we restrict to books
# currently available we lose the unavailable book.
available_now = facetify(available=Facets.AVAILABLE_NOW)
assert 4 == available_now.count()
assert licensed_high not in available_now
# If we restrict to open-access books we lose the two licensed
# books.
open_access = facetify(available=Facets.AVAILABLE_OPEN_ACCESS)
assert 2 == open_access.count()
assert licensed_high not in open_access
assert licensed_low not in open_access
assert unlimited_access_high not in open_access
# If we restrict to the featured collection we lose the two
# low-quality books.
featured_collection = facetify(collection=Facets.COLLECTION_FEATURED)
assert 3 == featured_collection.count()
assert open_access_low not in featured_collection
assert licensed_low not in featured_collection
# Try some different orderings to verify that order_by()
# is called and used properly.
title_order = facetify(order=Facets.ORDER_TITLE)
assert [
open_access_high.id,
open_access_low.id,
licensed_high.id,
licensed_low.id,
unlimited_access_high.id,
] == [x.id for x in title_order]
assert ["sort_title", "sort_author", "id"] == [
x.name for x in title_order._distinct
]
# This sort order is not supported, so the default is used.
unsupported_order = facetify(order=Facets.ORDER_ADDED_TO_COLLECTION)
assert [
unlimited_access_high.id,
licensed_low.id,
licensed_high.id,
open_access_low.id,
open_access_high.id,
] == [x.id for x in unsupported_order]
assert ["sort_author", "sort_title", "id"] == [
x.name for x in unsupported_order._distinct
]
class TestFeaturedFacets(DatabaseTest):
def test_constructor(self):
# Verify that constructor arguments are stored.
entrypoint = object()
facets = FeaturedFacets(1, entrypoint, entrypoint_is_default=True)
assert 1 == facets.minimum_featured_quality
assert entrypoint == facets.entrypoint
assert True == facets.entrypoint_is_default
def test_feed_type(self):
# If a grouped feed is built via CachedFeed.fetch, it will be
# filed as a grouped feed.
assert CachedFeed.GROUPS_TYPE == FeaturedFacets.CACHED_FEED_TYPE
def test_default(self):
# Check how FeaturedFacets gets its minimum_featured_quality value.
library1 = self._default_library
library1.setting(Configuration.MINIMUM_FEATURED_QUALITY).value = 0.22
library2 = self._library()
library2.setting(Configuration.MINIMUM_FEATURED_QUALITY).value = 0.99
lane = self._lane(library=library2)
# FeaturedFacets can be instantiated for a library...
facets = FeaturedFacets.default(library1)
assert library1.minimum_featured_quality == facets.minimum_featured_quality
# Or for a lane -- in which case it will take on the value for
# the library associated with that lane.
facets = FeaturedFacets.default(lane)
assert library2.minimum_featured_quality == facets.minimum_featured_quality
# Or with nothing -- in which case the default value is used.
facets = FeaturedFacets.default(None)
assert (
Configuration.DEFAULT_MINIMUM_FEATURED_QUALITY
== facets.minimum_featured_quality
)
def test_navigate(self):
# Test the ability of navigate() to move between slight
# variations of a FeaturedFacets object.
entrypoint = EbooksEntryPoint
f = FeaturedFacets(1, entrypoint)
different_entrypoint = f.navigate(entrypoint=AudiobooksEntryPoint)
assert 1 == different_entrypoint.minimum_featured_quality
assert AudiobooksEntryPoint == different_entrypoint.entrypoint
different_quality = f.navigate(minimum_featured_quality=2)
assert 2 == different_quality.minimum_featured_quality
assert entrypoint == different_quality.entrypoint
class TestSearchFacets(DatabaseTest):
def test_constructor(self):
# The SearchFacets constructor allows you to specify
# a medium and language (or a list of them) as well
# as an entrypoint.
m = SearchFacets
# If you don't pass any information in, you get a SearchFacets
# that does nothing.
defaults = m()
assert None == defaults.entrypoint
assert None == defaults.languages
assert None == defaults.media
assert m.ORDER_BY_RELEVANCE == defaults.order
assert None == defaults.min_score
mock_entrypoint = object()
# If you pass in a single value for medium or language
# they are turned into a list.
with_single_value = m(
entrypoint=mock_entrypoint, media=Edition.BOOK_MEDIUM, languages="eng"
)
assert mock_entrypoint == with_single_value.entrypoint
assert [Edition.BOOK_MEDIUM] == with_single_value.media
assert ["eng"] == with_single_value.languages
# If you pass in a list of values, it's left alone.
media = [Edition.BOOK_MEDIUM, Edition.AUDIO_MEDIUM]
languages = ["eng", "spa"]
with_multiple_values = m(media=media, languages=languages)
assert media == with_multiple_values.media
assert languages == with_multiple_values.languages
# The only exception is if you pass in Edition.ALL_MEDIUM
# as 'medium' -- that's passed through as is.
every_medium = m(media=Edition.ALL_MEDIUM)
assert Edition.ALL_MEDIUM == every_medium.media
# Pass in a value for min_score, and it's stored for later.
mock_min_score = object()
with_min_score = m(min_score=mock_min_score)
assert mock_min_score == with_min_score.min_score
# Pass in a value for order, and you automatically get a
# reasonably tight value for min_score.
order = object()
with_order = m(order=order)
assert order == with_order.order
assert SearchFacets.DEFAULT_MIN_SCORE == with_order.min_score
def test_from_request(self):
# An HTTP client can customize which SearchFacets object
# is created by sending different HTTP requests.
# These variables mock the query string arguments and
# HTTP headers of an HTTP request.
arguments = dict(
entrypoint=EbooksEntryPoint.INTERNAL_NAME,
media=Edition.AUDIO_MEDIUM,
min_score="123",
)
headers = {"Accept-Language": "da, en-gb;q=0.8"}
get_argument = arguments.get
get_header = headers.get
unused = object()
library = self._default_library
library.setting(EntryPoint.ENABLED_SETTING).value = json.dumps(
[AudiobooksEntryPoint.INTERNAL_NAME, EbooksEntryPoint.INTERNAL_NAME]
)
def from_request(**extra):
return SearchFacets.from_request(
self._default_library,
self._default_library,
get_argument,
get_header,
unused,
**extra
)
facets = from_request(extra="value")
assert dict(extra="value") == facets.constructor_kwargs
# The superclass's from_request implementation pulled the
# requested EntryPoint out of the request.
assert EbooksEntryPoint == facets.entrypoint
# The SearchFacets implementation pulled the 'media' query
# string argument.
#
# The medium from the 'media' argument contradicts the medium
# implied by the entry point, but that's not our problem.
assert [Edition.AUDIO_MEDIUM] == facets.media
# The SearchFacets implementation turned the 'min_score'
# argument into a numeric minimum score.
assert 123 == facets.min_score
# The SearchFacets implementation turned the 'Accept-Language'
# header into a set of language codes.
assert ["dan", "eng"] == facets.languages
# Try again with bogus media, languages, and minimum score.
arguments["media"] = "Unknown Media"
arguments["min_score"] = "not a number"
headers["Accept-Language"] = "xx, ql"
# None of the bogus information was used.
facets = from_request()
assert None == facets.media
assert None == facets.languages
assert None == facets.min_score
# Reading the language query with acceptable Accept-Language header
# but not passing that value through.
arguments["language"] = "all"
headers["Accept-Language"] = "da, en-gb;q=0.8"
facets = from_request()
assert None == facets.languages
# Try again with no information.
del arguments["media"]
del headers["Accept-Language"]
facets = from_request()
assert None == facets.media
assert None == facets.languages
def test_from_request_from_admin_search(self):
# If the SearchFacets object is being created by a search run from the admin interface,
# there might be order and language arguments which should be used to filter search results.
arguments = dict(
order="author",
language="fre",
entrypoint=EbooksEntryPoint.INTERNAL_NAME,
media=Edition.AUDIO_MEDIUM,
min_score="123",
)
headers = {"Accept-Language": "da, en-gb;q=0.8"}
get_argument = arguments.get
get_header = headers.get
unused = object()
library = self._default_library
library.setting(EntryPoint.ENABLED_SETTING).value = json.dumps(
[AudiobooksEntryPoint.INTERNAL_NAME, EbooksEntryPoint.INTERNAL_NAME]
)
def from_request(**extra):
return SearchFacets.from_request(
self._default_library,
self._default_library,
get_argument,
get_header,
unused,
**extra
)
facets = from_request(extra="value")
# The SearchFacets implementation uses the order and language values submitted by the admin.
assert "author" == facets.order
assert ["fre"] == facets.languages
def test_selectable_entrypoints(self):
"""If the WorkList has more than one facet, an 'everything' facet
is added for search purposes.
"""
class MockWorkList(object):
def __init__(self):
self.entrypoints = None
ep1 = object()
ep2 = object()
worklist = MockWorkList()
# No WorkList, no EntryPoints.
m = SearchFacets.selectable_entrypoints
assert [] == m(None)
# If there is one EntryPoint, it is returned as-is.
worklist.entrypoints = [ep1]
assert [ep1] == m(worklist)
# If there are multiple EntryPoints, EverythingEntryPoint
# shows up at the beginning.
worklist.entrypoints = [ep1, ep2]
assert [EverythingEntryPoint, ep1, ep2] == m(worklist)
# If EverythingEntryPoint is already in the list, it's not
# added twice.
worklist.entrypoints = [ep1, EverythingEntryPoint, ep2]
assert worklist.entrypoints == m(worklist)
def test_items(self):
facets = SearchFacets(
entrypoint=EverythingEntryPoint,
media=Edition.BOOK_MEDIUM,
languages=["eng"],
min_score=123,
)
# When we call items(), e.g. to create a query string that
# propagates the facet settings, both entrypoint and
# media are propagated if present.
#
# language is not propagated, because it's set through
# the Accept-Language header rather than through a query
# string.
assert [
("entrypoint", EverythingEntryPoint.INTERNAL_NAME),
(Facets.ORDER_FACET_GROUP_NAME, SearchFacets.ORDER_BY_RELEVANCE),
(Facets.AVAILABILITY_FACET_GROUP_NAME, Facets.AVAILABLE_ALL),
(Facets.COLLECTION_FACET_GROUP_NAME, Facets.COLLECTION_FULL),
("media", Edition.BOOK_MEDIUM),
("min_score", "123"),
] == list(facets.items())
def test_navigation(self):
"""Navigating from one SearchFacets to another gives a new
SearchFacets object. A number of fields can be changed,
including min_score, which is SearchFacets-specific.
"""
facets = SearchFacets(entrypoint=object(), order="field1", min_score=100)
new_ep = object()
new_facets = facets.navigate(entrypoint=new_ep, order="field2", min_score=120)
assert isinstance(new_facets, SearchFacets)
assert new_ep == new_facets.entrypoint
assert "field2" == new_facets.order
assert 120 == new_facets.min_score
def test_modify_search_filter(self):
# Test superclass behavior -- filter is modified by entrypoint.
facets = SearchFacets(entrypoint=AudiobooksEntryPoint)
filter = Filter()
facets.modify_search_filter(filter)
assert [Edition.AUDIO_MEDIUM] == filter.media
# The medium specified in the constructor overrides anything
# already present in the filter.
facets = SearchFacets(entrypoint=None, media=Edition.BOOK_MEDIUM)
filter = Filter(media=Edition.AUDIO_MEDIUM)
facets.modify_search_filter(filter)
assert [Edition.BOOK_MEDIUM] == filter.media
# It also overrides anything specified by the EntryPoint.
facets = SearchFacets(
entrypoint=AudiobooksEntryPoint, media=Edition.BOOK_MEDIUM
)
filter = Filter()
facets.modify_search_filter(filter)
assert [Edition.BOOK_MEDIUM] == filter.media
# The language specified in the constructor _adds_ to any
# languages already present in the filter.
facets = SearchFacets(languages=["eng", "spa"])
filter = Filter(languages="spa")
facets.modify_search_filter(filter)
assert ["eng", "spa"] == filter.languages
# It doesn't override those values.
facets = SearchFacets(languages="eng")
filter = Filter(languages="spa")
facets.modify_search_filter(filter)
assert ["eng", "spa"] == filter.languages
# This may result in modify_search_filter being a no-op.
facets = SearchFacets(languages="eng")
filter = Filter(languages="eng")
facets.modify_search_filter(filter)
assert ["eng"] == filter.languages
# If no languages are specified in the SearchFacets, the value
# set by the filter is used by itself.
facets = SearchFacets(languages=None)
filter = Filter(languages="spa")
facets.modify_search_filter(filter)
assert ["spa"] == filter.languages
# If neither facets nor filter includes any languages, there
# is no language filter.
facets = SearchFacets(languages=None)
filter = Filter(languages=None)
facets.modify_search_filter(filter)
assert None == filter.languages
def test_modify_search_filter_accepts_relevance_order(self):
# By default, ElasticSearch orders by relevance, so if order
# is specified as "relevance", filter should not have an
# `order` property.
with LogCaptureHandler(logging.root) as logs:
facets = SearchFacets()
filter = Filter()
facets.modify_search_filter(filter)
assert None == filter.order
assert 0 == len(logs.error)
with LogCaptureHandler(logging.root) as logs:
facets = SearchFacets(order="relevance")
filter = Filter()
facets.modify_search_filter(filter)
assert None == filter.order
assert 0 == len(logs.error)
with LogCaptureHandler(logging.root) as logs:
supported_order = "author"
facets = SearchFacets(order=supported_order)
filter = Filter()
facets.modify_search_filter(filter)
assert filter.order is not None
assert len(filter.order) > 0
assert 0 == len(logs.error)
with LogCaptureHandler(logging.root) as logs:
unsupported_order = "some_order_we_do_not_support"
facets = SearchFacets(order=unsupported_order)
filter = Filter()
facets.modify_search_filter(filter)
assert None == filter.order
assert "Unrecognized sort order: %s" % unsupported_order in logs.error
class TestPagination(DatabaseTest):
def test_from_request(self):
# No arguments -> Class defaults.
pagination = Pagination.from_request({}.get, None)
assert isinstance(pagination, Pagination)
assert Pagination.DEFAULT_SIZE == pagination.size
assert 0 == pagination.offset
# Override the default page size.
pagination = Pagination.from_request({}.get, 100)
assert isinstance(pagination, Pagination)
assert 100 == pagination.size
assert 0 == pagination.offset
# The most common usages.
pagination = Pagination.from_request(dict(size="4").get)
assert isinstance(pagination, Pagination)
assert 4 == pagination.size
assert 0 == pagination.offset
pagination = Pagination.from_request(dict(after="6").get)
assert isinstance(pagination, Pagination)
assert Pagination.DEFAULT_SIZE == pagination.size
assert 6 == pagination.offset
pagination = Pagination.from_request(dict(size=4, after=6).get)
assert isinstance(pagination, Pagination)
assert 4 == pagination.size
assert 6 == pagination.offset
# Invalid size or offset -> problem detail
error = Pagination.from_request(dict(size="string").get)
assert INVALID_INPUT.uri == error.uri
assert "Invalid page size: string" == str(error.detail)
error = Pagination.from_request(dict(after="string").get)
assert INVALID_INPUT.uri == error.uri
assert "Invalid offset: string" == str(error.detail)
# Size too large -> cut down to MAX_SIZE
pagination = Pagination.from_request(dict(size="10000").get)
assert isinstance(pagination, Pagination)
assert Pagination.MAX_SIZE == pagination.size
assert 0 == pagination.offset
def test_has_next_page_total_size(self):
"""Test the ability of Pagination.total_size to control whether there is a next page."""
query = self._db.query(Work)
pagination = Pagination(size=2)
# When total_size is not set, Pagination assumes there is a
# next page.
pagination.modify_database_query(self._db, query)
assert True == pagination.has_next_page
# Here, there is one more item on the next page.
pagination.total_size = 3
assert 0 == pagination.offset
assert True == pagination.has_next_page
# Here, the last item on this page is the last item in the dataset.
pagination.offset = 1
assert False == pagination.has_next_page
assert None == pagination.next_page
# If we somehow go over the end of the dataset, there is no next page.
pagination.offset = 400
assert False == pagination.has_next_page
assert None == pagination.next_page
# If both total_size and this_page_size are set, total_size
# takes precedence.
pagination.offset = 0
pagination.total_size = 100
pagination.this_page_size = 0
assert True == pagination.has_next_page
pagination.total_size = 0
pagination.this_page_size = 10
assert False == pagination.has_next_page
assert None == pagination.next_page
def test_has_next_page_this_page_size(self):
"""Test the ability of Pagination.this_page_size to control whether there is a next page."""
query = self._db.query(Work)
pagination = Pagination(size=2)
# When this_page_size is not set, Pagination assumes there is a
# next page.
pagination.modify_database_query(self._db, query)
assert True == pagination.has_next_page
# Here, there is nothing on the current page. There is no next page.
pagination.this_page_size = 0
assert False == pagination.has_next_page
# If the page is full, we can be almost certain there is a next page.
pagination.this_page_size = 400
assert True == pagination.has_next_page
# Here, there is one item on the current page. Even though the
# current page is not full (page size is 2), we assume for
# safety's sake that there is a next page. The cost of getting
# this wrong is low, compared to the cost of saying there is no
# next page when there actually is.
pagination.this_page_size = 1
assert True == pagination.has_next_page
def test_page_loaded(self):
# Test page_loaded(), which lets the Pagination object see the
# size of the current page.
pagination = Pagination()
assert None == pagination.this_page_size
assert False == pagination.page_has_loaded
pagination.page_loaded([1, 2, 3])
assert 3 == pagination.this_page_size
assert True == pagination.page_has_loaded
def test_modify_search_query(self):
# The default implementation of modify_search_query is to slice
# a set of search results like a list.
pagination = Pagination(offset=2, size=3)
o = [1, 2, 3, 4, 5, 6]
assert o[2 : 2 + 3] == pagination.modify_search_query(o)
class MockWork(object):
"""Acts enough like a Work to trick code that doesn't need to make
database requests.
"""
def __init__(self, id):
self.id = id
class MockWorks(WorkList):
"""A WorkList that mocks works_from_database()."""
def __init__(self):
self.reset()
def reset(self):
self._works = []
self.works_from_database_calls = []
self.random_sample_calls = []
def queue_works(self, works):
"""Set the next return value for works_from_database()."""
self._works.append(works)
def works_from_database(self, _db, facets=None, pagination=None, featured=False):
self.works_from_database_calls.append((facets, pagination, featured))
try:
return self._works.pop(0)
except IndexError:
return []
def random_sample(self, query, target_size):
# The 'query' is actually a list, and we're in a test
# environment where randomness is not welcome. Just take
# a sample from the front of the list.
self.random_sample_calls.append((query, target_size))
return query[:target_size]
class TestWorkList(DatabaseTest):
def test_initialize(self):
wl = WorkList()
child = WorkList()
child.initialize(self._default_library)
sf, ignore = Genre.lookup(self._db, "Science Fiction")
romance, ignore = Genre.lookup(self._db, "Romance")
# Create a WorkList that's associated with a Library, two genres,
# and a child WorkList.
wl.initialize(
self._default_library,
children=[child],
genres=[sf, romance],
entrypoints=[1, 2, 3],
)
# Access the Library.
assert self._default_library == wl.get_library(self._db)
# The Collections associated with the WorkList are those associated
# with the Library.
assert set(wl.collection_ids) == set(
[x.id for x in self._default_library.collections]
)
# The Genres associated with the WorkList are the ones passed
# in on the constructor.
assert set(wl.genre_ids) == set([x.id for x in [sf, romance]])
# The WorkList's child is the WorkList passed in to the constructor.
assert [child] == wl.visible_children
# The Worklist's .entrypoints is whatever was passed in
# to the constructor.
assert [1, 2, 3] == wl.entrypoints
def test_initialize_without_library(self):
# It's possible to initialize a WorkList with no Library.
worklist = WorkList()
worklist.initialize(None)
# No restriction is placed on the collection IDs of the
# Works in this list.
assert None == worklist.collection_ids
def test_initialize_with_customlists(self):
gutenberg = DataSource.lookup(self._db, DataSource.GUTENBERG)
customlist1, ignore = self._customlist(
data_source_name=gutenberg.name, num_entries=0
)
customlist2, ignore = self._customlist(
data_source_name=gutenberg.name, num_entries=0
)
customlist3, ignore = self._customlist(
data_source_name=DataSource.OVERDRIVE, num_entries=0
)
# Make a WorkList based on specific CustomLists.
worklist = WorkList()
worklist.initialize(
self._default_library, customlists=[customlist1, customlist3]
)
assert [customlist1.id, customlist3.id] == worklist.customlist_ids
assert None == worklist.list_datasource_id
# Make a WorkList based on a DataSource, as a shorthand for
# 'all the CustomLists from that DataSource'.
worklist = WorkList()
worklist.initialize(self._default_library, list_datasource=gutenberg)
assert [customlist1.id, customlist2.id] == worklist.customlist_ids
assert gutenberg.id == worklist.list_datasource_id
def test_initialize_without_library(self):
wl = WorkList()
sf, ignore = Genre.lookup(self._db, "Science Fiction")
romance, ignore = Genre.lookup(self._db, "Romance")
# Create a WorkList that's associated with two genres.
wl.initialize(None, genres=[sf, romance])
wl.collection_ids = [self._default_collection.id]
# There is no Library.
assert None == wl.get_library(self._db)
# The Genres associated with the WorkList are the ones passed
# in on the constructor.
assert set(wl.genre_ids) == set([x.id for x in [sf, romance]])
def test_initialize_uses_append_child_hook_method(self):
# When a WorkList is initialized with children, the children
# are passed individually through the append_child() hook
# method, not simply set to WorkList.children.
class Mock(WorkList):
append_child_calls = []
def append_child(self, child):
self.append_child_calls.append(child)
return super(Mock, self).append_child(child)
child = WorkList()
parent = Mock()
parent.initialize(self._default_library, children=[child])
assert [child] == parent.append_child_calls
# They do end up in WorkList.children, since that's what the
# default append_child() implementation does.
assert [child] == parent.children
def test_top_level_for_library(self):
"""Test the ability to generate a top-level WorkList."""
# These two top-level lanes should be children of the WorkList.
lane1 = self._lane(display_name="Top-level Lane 1")
lane1.priority = 0
lane2 = self._lane(display_name="Top-level Lane 2")
lane2.priority = 1
# This lane is invisible and will be filtered out.
invisible_lane = self._lane(display_name="Invisible Lane")
invisible_lane.visible = False
# This lane has a parent and will be filtered out.
sublane = self._lane(display_name="Sublane")
lane1.sublanes.append(sublane)
# This lane belongs to a different library.
other_library = self._library(name="Other Library", short_name="Other")
other_library_lane = self._lane(
display_name="Other Library Lane", library=other_library
)
# The default library gets a TopLevelWorkList with the two top-level lanes as children.
wl = WorkList.top_level_for_library(self._db, self._default_library)
assert isinstance(wl, TopLevelWorkList)
assert [lane1, lane2] == wl.children
assert Edition.FULFILLABLE_MEDIA == wl.media
# The other library only has one top-level lane, so we use that lane.
l = WorkList.top_level_for_library(self._db, other_library)
assert other_library_lane == l
# This library has no lanes configured at all.
no_config_library = self._library(
name="No configuration Library", short_name="No config"
)
wl = WorkList.top_level_for_library(self._db, no_config_library)
assert isinstance(wl, TopLevelWorkList)
assert [] == wl.children
assert Edition.FULFILLABLE_MEDIA == wl.media
def test_audience_key(self):
wl = WorkList()
wl.initialize(library=self._default_library)
# No audience.
assert "" == wl.audience_key
# All audiences.
wl.audiences = Classifier.AUDIENCES
assert "" == wl.audience_key
# Specific audiences.
wl.audiences = [Classifier.AUDIENCE_CHILDREN, Classifier.AUDIENCE_YOUNG_ADULT]
assert "Children,Young+Adult" == wl.audience_key
def test_parent(self):
# A WorkList has no parent.
assert None == WorkList().parent
def test_parentage(self):
# A WorkList has no parentage, since it has no parent.
assert [] == WorkList().parentage
def test_inherit_parent_restrictions(self):
# A WorkList never inherits parent restrictions, because it
# can't have a parent.
assert False == WorkList().inherit_parent_restrictions
def test_hierarchy(self):
# A WorkList's hierarchy includes only itself, because it
# can't have a parent.
wl = WorkList()
assert [wl] == wl.hierarchy
def test_visible_children(self):
"""Invisible children don't show up in WorkList.visible_children."""
wl = WorkList()
visible = self._lane()
invisible = self._lane()
invisible.visible = False
child_wl = WorkList()
child_wl.initialize(self._default_library)
wl.initialize(self._default_library, children=[visible, invisible, child_wl])
assert set([child_wl, visible]) == set(wl.visible_children)
def test_visible_children_sorted(self):
"""Visible children are sorted by priority and then by display name."""
wl = WorkList()
lane_child = self._lane()
lane_child.display_name = "ZZ"
lane_child.priority = 0
wl_child = WorkList()
wl_child.priority = 1
wl_child.display_name = "AA"
wl.initialize(self._default_library, children=[lane_child, wl_child])
# lane_child has a higher priority so it shows up first even
# though its display name starts with a Z.
assert [lane_child, wl_child] == wl.visible_children
# If the priorities are the same, wl_child shows up first,
# because its display name starts with an A.
wl_child.priority = 0
assert [wl_child, lane_child] == wl.visible_children
def test_is_self_or_descendant(self):
# Test the code that checks whether one WorkList is 'beneath'
# another.
class WorkListWithParent(WorkList):
# A normal WorkList never has a parent; this subclass
# makes it possible to explicitly set a WorkList's parent
# and get its parentage.
#
# This way we can test WorkList code without bringing in Lane.
def __init__(self):
self._parent = None
@property
def parent(self):
return self._parent
@property
def parentage(self):
if not self._parent:
return []
return [self._parent] + list(self._parent.parentage)
# A WorkList matches itself.
child = WorkListWithParent()
child.initialize(self._default_library)
assert True == child.is_self_or_descendant(child)
# But not any other WorkList.
parent = WorkListWithParent()
parent.initialize(self._default_library)
assert False == child.is_self_or_descendant(parent)
grandparent = WorkList()
grandparent.initialize(self._default_library)
assert False == child.is_self_or_descendant(grandparent)
# Unless it's a descendant of that WorkList.
child._parent = parent
parent._parent = grandparent
assert True == child.is_self_or_descendant(parent)
assert True == child.is_self_or_descendant(grandparent)
assert True == parent.is_self_or_descendant(grandparent)
assert False == parent.is_self_or_descendant(child)
assert False == grandparent.is_self_or_descendant(parent)
def test_accessible_to(self):
# Test the circumstances under which a Patron may or may not access a
# WorkList.
wl = WorkList()
wl.initialize(self._default_library)
# A WorkList is always accessible to unauthenticated users.
m = wl.accessible_to
assert True == m(None)
# A WorkList is never accessible to patrons of a different library.
other_library = self._library()
other_library_patron = self._patron(library=other_library)
assert False == m(other_library_patron)
# A WorkList is always accessible to patrons with no root lane
# set.
patron = self._patron()
assert True == m(patron)
# Give the patron a root lane.
lane = self._lane()
lane.root_for_patron_type = ["1"]
patron.external_type = "1"
# Now that the patron has a root lane, WorkLists will become
# inaccessible if they might contain content not
# age-appropriate for that patron (as gauged by their root
# lane).
# As initialized, our worklist has no audience restrictions.
assert True == m(patron)
# Give it some audience restrictions.
wl.audiences = [Classifier.AUDIENCE_ADULT, Classifier.AUDIENCE_CHILDREN]
wl.target_age = tuple_to_numericrange((4, 5))
# Now it depends on the return value of Patron.work_is_age_appropriate.
# Mock that method.
patron.work_is_age_appropriate = MagicMock(return_value=False)
# Since our mock returns false, so does accessible_to
assert False == m(patron)
# work_is_age_appropriate was called once, with the
# WorkList's target age and its first audience restriction.
# When work_is_age_appropriate returned False, it short-circuited
# the process and no second call was made.
patron.work_is_age_appropriate.assert_called_once_with(
wl.audiences[0], wl.target_age
)
# If we tell work_is_age_appropriate to always return true...
patron.work_is_age_appropriate = MagicMock(return_value=True)
# ...accessible_to starts returning True.
assert True == m(patron)
# The mock method was called once for each audience
# restriction in our WorkList. Only if _every_ call returns
# True is the WorkList considered age-appropriate for the
# patron.
patron.work_is_age_appropriate.assert_has_calls(
[
call(wl.audiences[0], wl.target_age),
call(wl.audiences[1], wl.target_age),
]
)
def test_uses_customlists(self):
"""A WorkList is said to use CustomLists if either ._customlist_ids
or .list_datasource_id is set.
"""
wl = WorkList()
wl.initialize(self._default_library)
assert False == wl.uses_customlists
wl._customlist_ids = object()
assert True == wl.uses_customlists
wl._customlist_ids = None
wl.list_datasource_id = object()
assert True == wl.uses_customlists
def test_max_cache_age(self):
# By default, the maximum cache age of an OPDS feed based on a
# WorkList is the default cache age for any type of OPDS feed,
# no matter what type of feed is being generated.
wl = WorkList()
assert OPDSFeed.DEFAULT_MAX_AGE == wl.max_cache_age(object())
def test_filter(self):
# Verify that filter() calls modify_search_filter_hook()
# and can handle either a new Filter being returned or a Filter
# modified in place.
class ModifyInPlace(WorkList):
# A WorkList that modifies its search filter in place.
def modify_search_filter_hook(self, filter):
filter.hook_called = True
wl = ModifyInPlace()
wl.initialize(self._default_library)
facets = SearchFacets()
filter = wl.filter(self._db, facets)
assert isinstance(filter, Filter)
assert True == filter.hook_called
class NewFilter(WorkList):
# A WorkList that returns a brand new Filter
def modify_search_filter_hook(self, filter):
return "A brand new Filter"
wl = NewFilter()
wl.initialize(self._default_library)
facets = SearchFacets()
filter = wl.filter(self._db, facets)
assert "A brand new Filter" == filter
def test_groups(self):
w1 = MockWork(1)
w2 = MockWork(2)
w3 = MockWork(3)
class MockWorkList(object):
def __init__(self, works):
self._works = works
self.visible = True
def groups(self, *args, **kwargs):
for i in self._works:
yield i, self
# This WorkList has one featured work.
child1 = MockWorkList([w1])
# This WorkList has two featured works.
child2 = MockWorkList([w2, w1])
# This WorkList has two children -- the two WorkLists created
# above.
wl = WorkList()
wl.initialize(self._default_library, children=[child1, child2])
# Calling groups() on the parent WorkList returns three
# 2-tuples; one for each work featured by one of its children
# WorkLists. Note that the same work appears twice, through two
# different children.
[wwl1, wwl2, wwl3] = wl.groups(self._db)
assert (w1, child1) == wwl1
assert (w2, child2) == wwl2
assert (w1, child2) == wwl3
def test_groups_propagates_facets(self):
# Verify that the Facets object passed into groups() is
# propagated to the methods called by groups().
class MockWorkList(WorkList):
overview_facets_called_with = None
def works(self, _db, pagination, facets):
self.works_called_with = (pagination, facets)
return []
def overview_facets(self, _db, facets):
self.overview_facets_called_with = facets
return "A new faceting object"
def _groups_for_lanes(
self,
_db,
relevant_children,
relevant_lanes,
pagination,
facets,
**kwargs
):
self._groups_for_lanes_called_with = (pagination, facets)
return []
mock = MockWorkList()
mock.initialize(library=self._default_library)
facets = object()
# First, try the situation where we're trying to make a grouped feed
# out of the (imaginary) sublanes of this lane.
[x for x in mock.groups(self._db, facets=facets)]
# overview_facets() was not called.
assert None == mock.overview_facets_called_with
# The _groups_for_lanes() method was called with the
# (imaginary) list of sublanes and the original faceting
# object. No pagination was provided. The _groups_for_lanes()
# implementation is responsible for giving each sublane a
# chance to adapt that faceting object to its own needs.
assert (None, facets) == mock._groups_for_lanes_called_with
mock._groups_for_lanes_called_with = None
# Now try the case where we want to use a pagination object to
# restrict the number of results per lane.
pagination = object()
[x for x in mock.groups(self._db, pagination=pagination, facets=facets)]
# The pagination object is propagated to _groups_for_lanes.
assert (pagination, facets) == mock._groups_for_lanes_called_with
mock._groups_for_lanes_called_with = None
# Now try the situation where we're just trying to get _part_ of
# a grouped feed -- the part for which this lane is responsible.
[x for x in mock.groups(self._db, facets=facets, include_sublanes=False)]
# Now, the original faceting object was passed into
# overview_facets().
assert facets == mock.overview_facets_called_with
# And the return value of overview_facets() was passed into
# works()
assert (None, "A new faceting object") == mock.works_called_with
# _groups_for_lanes was not called.
assert None == mock._groups_for_lanes_called_with
def test_works(self):
# Test the method that uses the search index to fetch a list of
# results appropriate for a given WorkList.
class MockSearchClient(object):
"""Respond to search requests with some fake work IDs."""
fake_work_ids = [1, 10, 100, 1000]
def query_works(self, **kwargs):
self.called_with = kwargs
return self.fake_work_ids
class MockWorkList(WorkList):
"""Mock the process of turning work IDs into WorkSearchResult
objects."""
fake_work_list = "a list of works"
def works_for_hits(self, _db, work_ids, facets=None):
self.called_with = (_db, work_ids)
return self.fake_work_list
# Here's a WorkList.
wl = MockWorkList()
wl.initialize(self._default_library, languages=["eng"])
facets = Facets(self._default_library, None, None, order=Facets.ORDER_TITLE)
mock_pagination = object()
mock_debug = object()
search_client = MockSearchClient()
# Ask the WorkList for a page of works, using the search index
# to drive the query instead of the database.
result = wl.works(self._db, facets, mock_pagination, search_client, mock_debug)
# MockSearchClient.query_works was used to grab a list of work
# IDs.
query_works_kwargs = search_client.called_with
# Our facets and the requirements of the WorkList were used to
# make a Filter object, which was passed as the 'filter'
# keyword argument.
filter = query_works_kwargs.pop("filter")
assert Filter.from_worklist(self._db, wl, facets).build() == filter.build()
# The other arguments to query_works are either constants or
# our mock objects.
assert (
dict(query_string=None, pagination=mock_pagination, debug=mock_debug)
== query_works_kwargs
)
# The fake work IDs returned from query_works() were passed into
# works_for_hits().
assert (self._db, search_client.fake_work_ids) == wl.called_with
# And the fake return value of works_for_hits() was used as
# the return value of works(), the method we're testing.
assert wl.fake_work_list == result
def test_works_for_hits(self):
# Verify that WorkList.works_for_hits() just calls
# works_for_resultsets().
class Mock(WorkList):
def works_for_resultsets(self, _db, resultsets, facets=None):
self.called_with = (_db, resultsets)
return [["some", "results"]]
wl = Mock()
results = wl.works_for_hits(self._db, ["hit1", "hit2"])
# The list of hits was itself wrapped in a list, and passed
# into works_for_resultsets().
assert (self._db, [["hit1", "hit2"]]) == wl.called_with
# The return value -- a list of lists of results, which
# contained a single item -- was unrolled and used as the
# return value of works_for_hits().
assert ["some", "results"] == results
def test_works_for_resultsets(self):
# Verify that WorkList.works_for_resultsets turns lists of
# (mocked) Hit objects into lists of Work or WorkSearchResult
# objects.
# Create the WorkList we'll be testing with.
wl = WorkList()
wl.initialize(self._default_library)
m = wl.works_for_resultsets
# Create two works.
w1 = self._work(with_license_pool=True)
w2 = self._work(with_license_pool=True)
class MockHit(object):
def __init__(self, work_id, has_last_update=False):
if isinstance(work_id, Work):
self.work_id = work_id.id
else:
self.work_id = work_id
self.has_last_update = has_last_update
def __contains__(self, k):
# Pretend to have the 'last_update' script field,
# if necessary.
return k == "last_update" and self.has_last_update
hit1 = MockHit(w1)
hit2 = MockHit(w2)
# For each list of hits passed in, a corresponding list of
# Works is returned.
assert [[w2]] == m(self._db, [[hit2]])
assert [[w2], [w1]] == m(self._db, [[hit2], [hit1]])
assert [[w1, w1], [w2, w2], []] == m(self._db, [[hit1, hit1], [hit2, hit2], []])
# Works are returned in the order we ask for.
for ordering in ([hit1, hit2], [hit2, hit1]):
[works] = m(self._db, [ordering])
assert [x.work_id for x in ordering] == [x.id for x in works]
# If we ask for a work ID that's not in the database,
# we don't get it.
assert [[]] == m(self._db, [[MockHit(-100)]])
# If we pass in Hit objects that have extra information in them,
# we get WorkSearchResult objects
hit1_extra = MockHit(w1, True)
hit2_extra = MockHit(w2, True)
[results] = m(self._db, [[hit2_extra, hit1_extra]])
assert all(isinstance(x, WorkSearchResult) for x in results)
r1, r2 = results
# These WorkSearchResult objects wrap Work objects together
# with the corresponding Hit objects.
assert w2 == r1._work
assert hit2_extra == r1._hit
assert w1 == r2._work
assert hit1_extra == r2._hit
# Finally, test that undeliverable works are filtered out.
for lpdm in w2.license_pools[0].delivery_mechanisms:
self._db.delete(lpdm)
assert [[]] == m(self._db, [[hit2]])
def test_search_target(self):
# A WorkList can be searched - it is its own search target.
wl = WorkList()
assert wl == wl.search_target
def test_search(self):
# Test the successful execution of WorkList.search()
class MockWorkList(WorkList):
def works_for_hits(self, _db, work_ids):
self.works_for_hits_called_with = (_db, work_ids)
return "A bunch of Works"
wl = MockWorkList()
wl.initialize(self._default_library, audiences=[Classifier.AUDIENCE_CHILDREN])
query = "a query"
class MockSearchClient(object):
def query_works(self, query, filter, pagination, debug):
self.query_works_called_with = (query, filter, pagination, debug)
return "A bunch of work IDs"
# Search with the default arguments.
client = MockSearchClient()
results = wl.search(self._db, query, client)
# The results of query_works were passed into
# MockWorkList.works_for_hits.
assert (self._db, "A bunch of work IDs") == wl.works_for_hits_called_with
# The return value of MockWorkList.works_for_hits is
# used as the return value of query_works().
assert "A bunch of Works" == results
# From this point on we are only interested in the arguments
# passed in to query_works, since MockSearchClient always
# returns the same result.
# First, let's see what the default arguments look like.
qu, filter, pagination, debug = client.query_works_called_with
# The query was passed through.
assert query == qu
assert False == debug
# A Filter object was created to match only works that belong
# in the MockWorkList.
assert [
Classifier.AUDIENCE_CHILDREN,
Classifier.AUDIENCE_ALL_AGES,
] == filter.audiences
# A default Pagination object was created.
assert 0 == pagination.offset
assert Pagination.DEFAULT_SEARCH_SIZE == pagination.size
# Now let's try a search with specific Pagination and Facets
# objects.
facets = SearchFacets(languages=["chi"])
pagination = object()
results = wl.search(self._db, query, client, pagination, facets, debug=True)
qu, filter, pag, debug = client.query_works_called_with
assert query == qu
assert pagination == pag
assert True == debug
# The Filter incorporates restrictions imposed by both the
# MockWorkList and the Facets.
assert [
Classifier.AUDIENCE_CHILDREN,
Classifier.AUDIENCE_ALL_AGES,
] == filter.audiences
assert ["chi"] == filter.languages
def test_search_failures(self):
# Test reasons why WorkList.search() might not work.
wl = WorkList()
wl.initialize(self._default_library)
query = "a query"
# If there is no SearchClient, there are no results.
assert [] == wl.search(self._db, query, None)
# If the SearchClient returns nothing, there are no results.
class NoResults(object):
def query_works(self, *args, **kwargs):
return None
assert [] == wl.search(self._db, query, NoResults())
# If there's an ElasticSearch exception during the query,
# there are no results.
class RaisesException(object):
def query_works(self, *args, **kwargs):
raise ElasticsearchException("oh no")
assert [] == wl.search(self._db, query, RaisesException())
class TestDatabaseBackedWorkList(DatabaseTest):
def test_works_from_database(self):
# Verify that the works_from_database() method calls the
# methods we expect, in the right order.
class MockQuery(object):
# Simulates the behavior of a database Query object
# without the need to pass around actual database clauses.
#
# This is a lot of instrumentation but it means we can
# test what happened inside works() mainly by looking at a
# string of method names in the result object.
def __init__(self, clauses, distinct=False):
self.clauses = clauses
self._distinct = distinct
def filter(self, clause):
# Create a new MockQuery object with a new clause
return MockQuery(self.clauses + [clause], self._distinct)
def distinct(self, fields):
return MockQuery(self.clauses, fields)
def __repr__(self):
return "<MockQuery %d clauses, most recent %s>" % (
len(self.clauses),
self.clauses[-1],
)
class MockWorkList(DatabaseBackedWorkList):
def __init__(self, _db):
super(MockWorkList, self).__init__()
self._db = _db # We'll be using this in assertions.
self.stages = []
def _stage(self, method_name, _db, qu, qu_is_previous_stage=True):
# _db must always be self._db; check it here and then
# ignore it.
assert _db == self._db
if qu_is_previous_stage:
# qu must be the MockQuery returned from the
# previous call.
assert qu == self.stages[-1]
else:
# qu must be a new object, and _not_ the MockQuery
# returned from the previous call.
assert qu != self.stages[-1]
# Create a new MockQuery with an additional filter,
# named after the method that was called.
new_filter = qu.filter(method_name)
self.stages.append(new_filter)
return new_filter
def base_query(self, _db):
# This kicks off the process -- most future calls will
# use _stage().
assert _db == self._db
query = MockQuery(["base_query"])
self.stages.append(query)
return query
def only_show_ready_deliverable_works(self, _db, qu):
return self._stage("only_show_ready_deliverable_works", _db, qu)
def bibliographic_filter_clauses(self, _db, qu):
# This method is a little different, so we can't use
# _stage().
#
# This implementation doesn't change anything; it will be
# replaced with an implementation that does.
assert _db == self._db
self.bibliographic_filter_clauses_called_with = qu
return qu, []
def modify_database_query_hook(self, _db, qu):
return self._stage("modify_database_query_hook", _db, qu)
def active_bibliographic_filter_clauses(self, _db, qu):
# This alternate implementation of
# bibliographic_filter_clauses returns a brand new
# MockQuery object and a list of filters.
self.pre_bibliographic_filter = qu
new_query = MockQuery(
["new query made inside active_bibliographic_filter_clauses"]
)
self.stages.append(new_query)
return (new_query, [text("clause 1"), text("clause 2")])
# The simplest case: no facets or pagination,
# and bibliographic_filter_clauses does nothing.
wl = MockWorkList(self._db)
result = wl.works_from_database(self._db, extra_kwarg="ignored")
# We got a MockQuery.
assert isinstance(result, MockQuery)
# During the course of the works() call, we verified that the
# MockQuery is constructed by chaining method calls. Now we
# just need to verify that all the methods were called and in
# the order we expect.
assert [
"base_query",
"only_show_ready_deliverable_works",
"modify_database_query_hook",
] == result.clauses
# bibliographic_filter_clauses used a different mechanism, but
# since it stored the MockQuery it was called with, we can see
# when it was called -- just after
# only_show_ready_deliverable_works.
assert [
"base_query",
"only_show_ready_deliverable_works",
] == wl.bibliographic_filter_clauses_called_with.clauses
wl.bibliographic_filter_clauses_called_with = None
# Since nobody made the query distinct, it was set distinct on
# Work.id.
assert Work.id == result._distinct
# Now we're going to do a more complicated test, with
# faceting, pagination, and a bibliographic_filter_clauses that
# actually does something.
wl.bibliographic_filter_clauses = wl.active_bibliographic_filter_clauses
class MockFacets(DatabaseBackedFacets):
def __init__(self, wl):
self.wl = wl
def modify_database_query(self, _db, qu):
# This is the only place we pass in False for
# qu_is_previous_stage. This is called right after
# bibliographic_filter_clauses, which caused a brand
# new MockQuery object to be created.
#
# Normally, _stage() will assert that `qu` is the
# return value from the previous call, but this time
# we want to assert the opposite.
result = self.wl._stage("facets", _db, qu, qu_is_previous_stage=False)
distinct = result.distinct("some other field")
self.wl.stages.append(distinct)
return distinct
class MockPagination(object):
def __init__(self, wl):
self.wl = wl
def modify_database_query(self, _db, qu):
return self.wl._stage("pagination", _db, qu)
result = wl.works_from_database(
self._db, facets=MockFacets(wl), pagination=MockPagination(wl)
)
# Here are the methods called before bibliographic_filter_clauses.
assert [
"base_query",
"only_show_ready_deliverable_works",
] == wl.pre_bibliographic_filter.clauses
# bibliographic_filter_clauses created a brand new object,
# which ended up as our result after some more methods were
# called on it.
assert (
"new query made inside active_bibliographic_filter_clauses"
== result.clauses.pop(0)
)
# bibliographic_filter_clauses() returned two clauses which were
# combined with and_().
bibliographic_filter_clauses = result.clauses.pop(0)
assert str(and_(text("clause 1"), text("clause 2"))) == str(
bibliographic_filter_clauses
)
# The rest of the calls are easy to trac.
assert [
"facets",
"modify_database_query_hook",
"pagination",
] == result.clauses
# The query was made distinct on some other field, so the
# default behavior (making it distinct on Work.id) wasn't
# triggered.
assert "some other field" == result._distinct
def test_works_from_database_end_to_end(self):
# Verify that works_from_database() correctly locates works
# that match the criteria specified by the
# DatabaseBackedWorkList, the faceting object, and the
# pagination object.
#
# This is a simple end-to-end test of functionality that's
# tested in more detail elsewhere.
# Create two books.
oliver_twist = self._work(
title="<NAME>", with_license_pool=True, language="eng"
)
barnaby_rudge = self._work(
title="Barnaby Rudge", with_license_pool=True, language="spa"
)
# A standard DatabaseBackedWorkList will find both books.
wl = DatabaseBackedWorkList()
wl.initialize(self._default_library)
assert 2 == wl.works_from_database(self._db).count()
# A work list with a language restriction will only find books
# in that language.
wl.initialize(self._default_library, languages=["eng"])
assert [oliver_twist] == [x for x in wl.works_from_database(self._db)]
# A DatabaseBackedWorkList will only find books licensed
# through one of its collections.
collection = self._collection()
self._default_library.collections = [collection]
wl.initialize(self._default_library)
assert 0 == wl.works_from_database(self._db).count()
# If a DatabaseBackedWorkList has no collections, it has no
# books.
self._default_library.collections = []
wl.initialize(self._default_library)
assert 0 == wl.works_from_database(self._db).count()
# A DatabaseBackedWorkList can be set up with a collection
# rather than a library. TODO: The syntax here could be improved.
wl = DatabaseBackedWorkList()
wl.initialize(None)
wl.collection_ids = [self._default_collection.id]
assert None == wl.get_library(self._db)
assert 2 == wl.works_from_database(self._db).count()
# Facets and pagination can affect which entries and how many
# are returned.
facets = DatabaseBackedFacets(
self._default_library,
collection=Facets.COLLECTION_FULL,
availability=Facets.AVAILABLE_ALL,
order=Facets.ORDER_TITLE,
)
pagination = Pagination(offset=1, size=1)
assert [oliver_twist] == wl.works_from_database(
self._db, facets, pagination
).all()
facets.order_ascending = False
assert [barnaby_rudge] == wl.works_from_database(
self._db, facets, pagination
).all()
# Ensure that availability facets are handled properly
# We still have two works:
# - barnaby_rudge is closed access and available
# - oliver_twist's access and availability is varied below
ot_lp = oliver_twist.license_pools[0]
# open access (thus available)
ot_lp.open_access = True
facets.availability = Facets.AVAILABLE_ALL
assert 2 == wl.works_from_database(self._db, facets).count()
facets.availability = Facets.AVAILABLE_NOW
assert 2 == wl.works_from_database(self._db, facets).count()
facets.availability = Facets.AVAILABLE_OPEN_ACCESS
assert 1 == wl.works_from_database(self._db, facets).count()
assert [oliver_twist] == wl.works_from_database(self._db, facets).all()
# closed access & unavailable
ot_lp.open_access = False
ot_lp.licenses_owned = 1
ot_lp.licenses_available = 0
facets.availability = Facets.AVAILABLE_ALL
assert 2 == wl.works_from_database(self._db, facets).count()
facets.availability = Facets.AVAILABLE_NOW
assert 1 == wl.works_from_database(self._db, facets).count()
assert [barnaby_rudge] == wl.works_from_database(self._db, facets).all()
facets.availability = Facets.AVAILABLE_OPEN_ACCESS
assert 0 == wl.works_from_database(self._db, facets).count()
def test_base_query(self):
# Verify that base_query makes the query we expect and then
# calls some optimization methods (not tested).
class Mock(DatabaseBackedWorkList):
@classmethod
def _modify_loading(cls, qu):
return [qu, "_modify_loading"]
@classmethod
def _defer_unused_fields(cls, qu):
return qu + ["_defer_unused_fields"]
result = Mock.base_query(self._db)
[base_query, m, d] = result
expect = (
self._db.query(Work)
.join(Work.license_pools)
.join(Work.presentation_edition)
.filter(LicensePool.superceded == False)
)
assert str(expect) == str(base_query)
assert "_modify_loading" == m
assert "_defer_unused_fields" == d
def test_bibliographic_filter_clauses(self):
called = dict()
class MockWorkList(DatabaseBackedWorkList):
"""Verifies that bibliographic_filter_clauses() calls various hook
methods.
The hook methods themselves are tested separately.
"""
def __init__(self, parent):
super(MockWorkList, self).__init__()
self._parent = parent
self._inherit_parent_restrictions = False
def audience_filter_clauses(self, _db, qu):
called["audience_filter_clauses"] = (_db, qu)
return []
def customlist_filter_clauses(self, qu):
called["customlist_filter_clauses"] = qu
return qu, []
def age_range_filter_clauses(self):
called["age_range_filter_clauses"] = True
return []
def genre_filter_clause(self, qu):
called["genre_filter_clause"] = qu
return qu, None
@property
def parent(self):
return self._parent
@property
def inherit_parent_restrictions(self):
return self._inherit_parent_restrictions
class MockParent(object):
bibliographic_filter_clauses_called_with = None
def bibliographic_filter_clauses(self, _db, qu):
self.bibliographic_filter_clauses_called_with = (_db, qu)
return qu, []
parent = MockParent()
# Create a MockWorkList with a parent.
wl = MockWorkList(parent)
wl.initialize(self._default_library)
original_qu = DatabaseBackedWorkList.base_query(self._db)
# If no languages or genre IDs are specified, and the hook
# methods do nothing, then bibliographic_filter_clauses() has
# no effect.
final_qu, clauses = wl.bibliographic_filter_clauses(self._db, original_qu)
assert original_qu == final_qu
assert [] == clauses
# But at least the apply_audience_filter was called with the correct
# arguments.
_db, qu = called["audience_filter_clauses"]
assert self._db == _db
assert original_qu == qu
# age_range_filter_clauses was also called.
assert True == called["age_range_filter_clauses"]
# customlist_filter_clauses and genre_filter_clause were not
# called because the WorkList doesn't do anything relating to
# custom lists.
assert "customlist_filter_clauses" not in called
assert "genre_filter_clause" not in called
# The parent's bibliographic_filter_clauses() implementation
# was not called, because wl.inherit_parent_restrictions is
# set to False.
assert None == parent.bibliographic_filter_clauses_called_with
# Set things up so that those other methods will be called.
empty_list, ignore = self._customlist(num_entries=0)
sf, ignore = Genre.lookup(self._db, "Science Fiction")
wl.initialize(self._default_library, customlists=[empty_list], genres=[sf])
wl._inherit_parent_restrictions = True
final_qu, clauses = wl.bibliographic_filter_clauses(self._db, original_qu)
assert (
self._db,
original_qu,
) == parent.bibliographic_filter_clauses_called_with
assert original_qu == called["genre_filter_clause"]
assert original_qu == called["customlist_filter_clauses"]
# But none of those methods changed anything, because their
# implementations didn't return anything.
assert [] == clauses
# Now test the clauses that are created directly by
# bibliographic_filter_clauses.
overdrive = DataSource.lookup(self._db, DataSource.OVERDRIVE)
wl.initialize(
self._default_library,
languages=["eng"],
media=[Edition.BOOK_MEDIUM],
fiction=True,
license_datasource=overdrive,
)
final_qu, clauses = wl.bibliographic_filter_clauses(self._db, original_qu)
assert original_qu == final_qu
language, medium, fiction, datasource = clauses
# NOTE: str() doesn't prove that the values are the same, only
# that the constraints are similar.
assert str(language) == str(Edition.language.in_(wl.languages))
assert str(medium) == str(Edition.medium.in_(wl.media))
assert str(fiction) == str(Work.fiction == True)
assert str(datasource) == str(LicensePool.data_source_id == overdrive.id)
def test_bibliographic_filter_clauses_end_to_end(self):
# Verify that bibliographic_filter_clauses generates
# SQLAlchemy clauses that give the expected results when
# applied to a real `works` table.
original_qu = DatabaseBackedWorkList.base_query(self._db)
# Create a work that may or may not show up in various
# DatabaseBackedWorkLists.
sf, ignore = Genre.lookup(self._db, "Science Fiction")
english_sf = self._work(
title="English SF",
language="eng",
with_license_pool=True,
audience=Classifier.AUDIENCE_YOUNG_ADULT,
)
italian_sf = self._work(
title="Italian SF",
language="ita",
with_license_pool=True,
audience=Classifier.AUDIENCE_YOUNG_ADULT,
)
english_sf.target_age = tuple_to_numericrange((12, 14))
gutenberg = english_sf.license_pools[0].data_source
english_sf.presentation_edition.medium = Edition.BOOK_MEDIUM
english_sf.genres.append(sf)
italian_sf.genres.append(sf)
def worklist_has_books(expect_books, worklist=None, **initialize_kwargs):
"""Apply bibliographic filters to a query and verify
that it finds only the given books.
"""
if worklist is None:
worklist = DatabaseBackedWorkList()
worklist.initialize(self._default_library, **initialize_kwargs)
qu, clauses = worklist.bibliographic_filter_clauses(self._db, original_qu)
qu = qu.filter(and_(*clauses))
expect_titles = sorted([x.sort_title for x in expect_books])
actual_titles = sorted([x.sort_title for x in qu])
assert expect_titles == actual_titles
# A WorkList will find a book only if all restrictions
# are met.
worklist_has_books(
[english_sf],
languages=["eng"],
genres=[sf],
media=[Edition.BOOK_MEDIUM],
fiction=True,
license_datasource=gutenberg,
audiences=[Classifier.AUDIENCE_YOUNG_ADULT],
target_age=tuple_to_numericrange((13, 13)),
)
# This might be because there _are_ no restrictions.
worklist_has_books([english_sf, italian_sf], fiction=None)
# DatabaseBackedWorkLists with a contradictory setting for one
# of the fields associated with the English SF book will not
# find it.
worklist_has_books([italian_sf], languages=["ita"], genres=[sf])
romance, ignore = Genre.lookup(self._db, "Romance")
worklist_has_books([], languages=["eng"], genres=[romance])
worklist_has_books(
[], languages=["eng"], genres=[sf], media=[Edition.AUDIO_MEDIUM]
)
worklist_has_books([], fiction=False)
worklist_has_books(
[], license_datasource=DataSource.lookup(self._db, DataSource.OVERDRIVE)
)
# If the WorkList has custom list IDs, then works will only show up if
# they're on one of the matching CustomLists.
sf_list, ignore = self._customlist(num_entries=0)
sf_list.add_entry(english_sf)
sf_list.add_entry(italian_sf)
worklist_has_books([english_sf, italian_sf], customlists=[sf_list])
empty_list, ignore = self._customlist(num_entries=0)
worklist_has_books([], customlists=[empty_list])
# Test parent restrictions.
#
# Ordinary DatabaseBackedWorkLists can't inherit restrictions
# from their parent (TODO: no reason not to implement this)
# but Lanes can, so let's use Lanes for the rest of this test.
# This lane has books from a list of English books.
english_list, ignore = self._customlist(num_entries=0)
english_list.add_entry(english_sf)
english_lane = self._lane()
english_lane.customlists.append(english_list)
# This child of that lane has books from the list of SF books.
sf_lane = self._lane(parent=english_lane, inherit_parent_restrictions=False)
sf_lane.customlists.append(sf_list)
# When the child lane does not inherit its parent restrictions,
# both SF books show up.
worklist_has_books([english_sf, italian_sf], sf_lane)
# When the child inherits its parent's restrictions, only the
# works that are on _both_ lists show up in the lane,
sf_lane.inherit_parent_restrictions = True
worklist_has_books([english_sf], sf_lane)
# Other restrictions are inherited as well. Here, a title must
# show up on both lists _and_ be a nonfiction book. There are
# no titles that meet all three criteria.
sf_lane.fiction = False
worklist_has_books([], sf_lane)
sf_lane.fiction = True
worklist_has_books([english_sf], sf_lane)
# Parent restrictions based on genre can also be inherited.
#
# Here's a lane that finds only short stories.
short_stories, ignore = Genre.lookup(self._db, "Short Stories")
short_stories_lane = self._lane(genres=["Short Stories"])
# Here's a child of that lane, which contains science fiction.
sf_shorts = self._lane(
genres=[sf], parent=short_stories_lane, inherit_parent_restrictions=False
)
self._db.flush()
# Without the parent restriction in place, all science fiction
# shows up in sf_shorts.
worklist_has_books([english_sf, italian_sf], sf_shorts)
# With the parent restriction in place, a book must be classified
# under both science fiction and short stories to show up.
sf_shorts.inherit_parent_restrictions = True
worklist_has_books([], sf_shorts)
english_sf.genres.append(short_stories)
worklist_has_books([english_sf], sf_shorts)
def test_age_range_filter_clauses_end_to_end(self):
# Standalone test of age_range_filter_clauses().
def worklist_has_books(expect, **wl_args):
"""Make a DatabaseBackedWorkList and find all the works
that match its age_range_filter_clauses.
"""
wl = DatabaseBackedWorkList()
wl.initialize(self._default_library, **wl_args)
qu = self._db.query(Work)
clauses = wl.age_range_filter_clauses()
qu = qu.filter(and_(*clauses))
assert set(expect) == set(qu.all())
adult = self._work(
title="For adults",
audience=Classifier.AUDIENCE_ADULT,
with_license_pool=True,
)
assert None == adult.target_age
fourteen_or_fifteen = self._work(
title="For teens",
audience=Classifier.AUDIENCE_YOUNG_ADULT,
with_license_pool=True,
)
fourteen_or_fifteen.target_age = tuple_to_numericrange((14, 15))
# This DatabaseBackedWorkList contains the YA book because its
# age range overlaps the age range of the book.
worklist_has_books([fourteen_or_fifteen], target_age=(12, 14))
worklist_has_books(
[adult, fourteen_or_fifteen],
audiences=[Classifier.AUDIENCE_ADULT],
target_age=(12, 14),
)
# This lane contains no books because it skews too old for the YA
# book, but books for adults are not allowed.
older_ya = self._lane()
older_ya.target_age = (16, 17)
worklist_has_books([], target_age=(16, 17))
# Expand it to include books for adults, and the adult book
# shows up despite having no target age at all.
worklist_has_books([adult], target_age=(16, 18))
def test_audience_filter_clauses(self):
# Verify that audience_filter_clauses restricts a query to
# reflect a DatabaseBackedWorkList's audience filter.
# Create a children's book and a book for adults.
adult = self._work(
title="Diseases of the Horse",
with_license_pool=True,
with_open_access_download=True,
audience=Classifier.AUDIENCE_ADULT,
)
children = self._work(
title="Wholesome Nursery Rhymes For All Children",
with_license_pool=True,
with_open_access_download=True,
audience=Classifier.AUDIENCE_CHILDREN,
)
def for_audiences(*audiences):
"""Invoke audience_filter_clauses using the given
`audiences`, and return all the matching Work objects.
"""
wl = DatabaseBackedWorkList()
wl.audiences = audiences
qu = wl.base_query(self._db)
clauses = wl.audience_filter_clauses(self._db, qu)
if clauses:
qu = qu.filter(and_(*clauses))
return qu.all()
assert [adult] == for_audiences(Classifier.AUDIENCE_ADULT)
assert [children] == for_audiences(Classifier.AUDIENCE_CHILDREN)
# If no particular audiences are specified, no books are filtered.
assert set([adult, children]) == set(for_audiences())
def test_customlist_filter_clauses(self):
# Standalone test of customlist_filter_clauses
# If a lane has nothing to do with CustomLists,
# apply_customlist_filter does nothing.
no_lists = DatabaseBackedWorkList()
no_lists.initialize(self._default_library)
qu = no_lists.base_query(self._db)
new_qu, clauses = no_lists.customlist_filter_clauses(qu)
assert qu == new_qu
assert [] == clauses
# Now set up a Work and a CustomList that contains the work.
work = self._work(with_license_pool=True)
gutenberg = DataSource.lookup(self._db, DataSource.GUTENBERG)
assert gutenberg == work.license_pools[0].data_source
gutenberg_list, ignore = self._customlist(num_entries=0)
gutenberg_list.data_source = gutenberg
gutenberg_list_entry, ignore = gutenberg_list.add_entry(work)
# This DatabaseBackedWorkList gets every work on a specific list.
works_on_list = DatabaseBackedWorkList()
works_on_list.initialize(self._default_library, customlists=[gutenberg_list])
# This lane gets every work on every list associated with Project
# Gutenberg.
works_on_gutenberg_lists = DatabaseBackedWorkList()
works_on_gutenberg_lists.initialize(
self._default_library, list_datasource=gutenberg
)
def _run(qu, clauses):
# Run a query with certain clauses
return qu.filter(and_(*clauses)).all()
def results(wl=works_on_gutenberg_lists, must_be_featured=False):
qu = wl.base_query(self._db)
new_qu, clauses = wl.customlist_filter_clauses(qu)
# The query comes out different than it goes in -- there's a
# new join against CustomListEntry.
assert new_qu != qu
return _run(new_qu, clauses)
# Both lanes contain the work.
assert [work] == results(works_on_list)
assert [work] == results(works_on_gutenberg_lists)
# If there's another list with the same work on it, the
# work only shows up once.
gutenberg_list_2, ignore = self._customlist(num_entries=0)
gutenberg_list_2_entry, ignore = gutenberg_list_2.add_entry(work)
works_on_list._customlist_ids.append(gutenberg_list.id)
assert [work] == results(works_on_list)
# This WorkList gets every work on a list associated with Overdrive.
# There are no such lists, so the lane is empty.
overdrive = DataSource.lookup(self._db, DataSource.OVERDRIVE)
works_on_overdrive_lists = DatabaseBackedWorkList()
works_on_overdrive_lists.initialize(
self._default_library, list_datasource=overdrive
)
assert [] == results(works_on_overdrive_lists)
# It's possible to restrict a WorkList to works that were seen on
# a certain list recently.
now = utc_now()
two_days_ago = now - datetime.timedelta(days=2)
gutenberg_list_entry.most_recent_appearance = two_days_ago
# The lane will only show works that were seen within the last
# day. There are no such works.
works_on_gutenberg_lists.list_seen_in_previous_days = 1
assert [] == results()
# Now it's been loosened to three days, and the work shows up.
works_on_gutenberg_lists.list_seen_in_previous_days = 3
assert [work] == results()
# Now let's test what happens when we chain calls to this
# method.
gutenberg_list_2_wl = DatabaseBackedWorkList()
gutenberg_list_2_wl.initialize(
self._default_library, customlists=[gutenberg_list_2]
)
# These two lines won't work, because these are
# DatabaseBackedWorkLists, not Lanes, but they show the
# scenario in which this would actually happen. When
# determining which works belong in the child lane,
# Lane.customlist_filter_clauses() will be called on the
# parent lane and then on the child. In this case, only want
# books that are on _both_ works_on_list and gutenberg_list_2.
#
# TODO: There's no reason WorkLists shouldn't be able to have
# parents and inherit parent restrictions.
#
# gutenberg_list_2_wl.parent = works_on_list
# gutenberg_list_2_wl.inherit_parent_restrictions = True
qu = works_on_list.base_query(self._db)
list_1_qu, list_1_clauses = works_on_list.customlist_filter_clauses(qu)
# The query has been modified -- we've added a join against
# CustomListEntry.
assert list_1_qu != qu
assert [work] == list_1_qu.all()
# Now call customlist_filter_clauses again so that the query
# must only match books on _both_ lists. This simulates
# what happens when the second lane is a child of the first,
# and inherits its restrictions.
both_lists_qu, list_2_clauses = gutenberg_list_2_wl.customlist_filter_clauses(
list_1_qu,
)
# The query has been modified again -- we've added a second join
# against CustomListEntry.
assert both_lists_qu != list_1_qu
both_lists_clauses = list_1_clauses + list_2_clauses
# The combined query matches the work that shows up on
# both lists.
assert [work] == _run(both_lists_qu, both_lists_clauses)
# If we remove `work` from either list, the combined query
# matches nothing.
for l in [gutenberg_list, gutenberg_list_2]:
l.remove_entry(work)
assert [] == _run(both_lists_qu, both_lists_clauses)
l.add_entry(work)
def test_works_from_database_with_superceded_pool(self):
work = self._work(with_license_pool=True)
work.license_pools[0].superceded = True
ignore, pool = self._edition(with_license_pool=True)
pool.work = work
lane = self._lane()
[w] = lane.works_from_database(self._db).all()
# Only one pool is loaded, and it's the non-superceded one.
assert [pool] == w.license_pools
class TestHierarchyWorkList(DatabaseTest):
"""Test HierarchyWorkList in terms of its two subclasses, Lane and TopLevelWorkList."""
def test_accessible_to(self):
# In addition to the general tests imposed by WorkList, a Lane
# is only accessible to a patron if it is a descendant of
# their root lane.
lane = self._lane()
patron = self._patron()
lane.root_for_patron_type = ["1"]
patron.external_type = "1"
# Descendant -> it's accessible
m = lane.accessible_to
lane.is_self_or_descendant = MagicMock(return_value=True)
assert True == m(patron)
# Not a descendant -> it's not accessible
lane.is_self_or_descendant = MagicMock(return_value=False)
assert False == m(patron)
# If the patron has no root lane, is_self_or_descendant
# isn't consulted -- everything is accessible.
patron.external_type = "2"
assert True == m(patron)
# Similarly if there is no authenticated patron.
assert True == m(None)
# TopLevelWorkList works the same way -- it's visible unless the
# patron has a top-level lane set.
wl = TopLevelWorkList()
wl.initialize(self._default_library)
assert True == wl.accessible_to(None)
assert True == wl.accessible_to(patron)
patron.external_type = "1"
assert False == wl.accessible_to(patron)
# However, a TopLevelWorkList associated with library A is not
# visible to a patron from library B.
library2 = self._library()
wl.initialize(library2)
patron.external_type = None
assert False == wl.accessible_to(patron)
class TestLane(DatabaseTest):
def test_get_library(self):
lane = self._lane()
assert self._default_library == lane.get_library(self._db)
def test_list_datasource(self):
"""Test setting and retrieving the DataSource object and
the underlying ID.
"""
lane = self._lane()
# This lane is based on a specific CustomList.
customlist1, ignore = self._customlist(num_entries=0)
customlist2, ignore = self._customlist(num_entries=0)
lane.customlists.append(customlist1)
assert None == lane.list_datasource
assert None == lane.list_datasource_id
assert [customlist1.id] == lane.customlist_ids
# Now change it so it's based on all CustomLists from a given
# DataSource.
source = customlist1.data_source
lane.list_datasource = source
assert source == lane.list_datasource
assert source.id == lane.list_datasource_id
# The lane is now based on two CustomLists instead of one.
assert set([customlist1.id, customlist2.id]) == set(lane.customlist_ids)
def test_set_audiences(self):
"""Setting Lane.audiences to a single value will
auto-convert it into a list containing one value.
"""
lane = self._lane()
lane.audiences = Classifier.AUDIENCE_ADULT
assert [Classifier.AUDIENCE_ADULT] == lane.audiences
def test_update_size(self):
class Mock(object):
# Mock the ExternalSearchIndex.count_works() method to
# return specific values without consulting an actual
# search index.
def count_works(self, filter):
values_by_medium = {
None: 102,
Edition.AUDIO_MEDIUM: 3,
Edition.BOOK_MEDIUM: 99,
}
if filter.media:
[medium] = filter.media
else:
medium = None
return values_by_medium[medium]
search_engine = Mock()
# Enable the 'ebooks' and 'audiobooks' entry points.
self._default_library.setting(EntryPoint.ENABLED_SETTING).value = json.dumps(
[AudiobooksEntryPoint.INTERNAL_NAME, EbooksEntryPoint.INTERNAL_NAME]
)
# Make a lane with some incorrect values that will be fixed by
# update_size().
fiction = self._lane(display_name="Fiction", fiction=True)
fiction.size = 44
fiction.size_by_entrypoint = {"Nonexistent entrypoint": 33}
with mock_search_index(search_engine):
fiction.update_size(self._db)
# The lane size is also calculated individually for every
# enabled entry point. EverythingEntryPoint is used for the
# total size of the lane.
assert {
AudiobooksEntryPoint.URI: 3,
EbooksEntryPoint.URI: 99,
EverythingEntryPoint.URI: 102,
} == fiction.size_by_entrypoint
assert 102 == fiction.size
def test_visibility(self):
parent = self._lane()
visible_child = self._lane(parent=parent)
invisible_child = self._lane(parent=parent)
invisible_child.visible = False
assert [visible_child] == list(parent.visible_children)
grandchild = self._lane(parent=invisible_child)
assert True == parent.visible
assert True == visible_child.visible
assert False == invisible_child.visible
# The grandchild lane is set to visible in the database, but
# it is not visible because its parent is not visible.
assert True == grandchild._visible
assert False == grandchild.visible
def test_parentage(self):
worklist = WorkList()
worklist.display_name = "A WorkList"
lane = self._lane()
child_lane = self._lane(parent=lane)
grandchild_lane = self._lane(parent=child_lane)
unrelated = self._lane()
worklist.sublanes = [child_lane]
# A WorkList has no parentage.
assert [] == list(worklist.parentage)
assert "A WorkList" == worklist.full_identifier
# The WorkList has the Lane as a child, but the Lane doesn't know
# this.
assert [] == list(lane.parentage)
assert [lane] == list(child_lane.parentage)
assert (
"%s / %s" % (lane.library.short_name, lane.display_name)
== lane.full_identifier
)
assert (
"%s / %s / %s / %s"
% (
lane.library.short_name,
lane.display_name,
child_lane.display_name,
grandchild_lane.display_name,
)
== grandchild_lane.full_identifier
)
assert [lane, child_lane, grandchild_lane] == grandchild_lane.hierarchy
# TODO: The error should be raised when we try to set the parent
# to an illegal value, not afterwards.
lane.parent = child_lane
with pytest.raises(ValueError) as excinfo:
list(lane.parentage)
assert "Lane parentage loop detected" in str(excinfo.value)
def test_is_self_or_descendant(self):
# Test the code that checks whether one Lane is 'beneath'
# a WorkList.
top_level = TopLevelWorkList()
top_level.initialize(self._default_library)
parent = self._lane()
child = self._lane(parent=parent)
# Generally this works the same as WorkList.is_self_or_descendant.
assert True == parent.is_self_or_descendant(parent)
assert True == child.is_self_or_descendant(child)
assert True == child.is_self_or_descendant(parent)
assert False == parent.is_self_or_descendant(child)
# The big exception: a TopLevelWorkList is a descendant of any
# Lane so long as they belong to the same library.
assert True == child.is_self_or_descendant(top_level)
assert True == parent.is_self_or_descendant(top_level)
library2 = self._library()
top_level.initialize(library2)
assert False == child.is_self_or_descendant(top_level)
assert False == parent.is_self_or_descendant(top_level)
def test_depth(self):
child = self._lane("sublane")
parent = self._lane("parent")
parent.sublanes.append(child)
assert 0 == parent.depth
assert 1 == child.depth
def test_url_name(self):
lane = self._lane("Fantasy / Science Fiction")
assert lane.id == lane.url_name
def test_display_name_for_all(self):
lane = self._lane("Fantasy / Science Fiction")
assert "All Fantasy / Science Fiction" == lane.display_name_for_all
def test_entrypoints(self):
"""Currently a Lane can never have entrypoints."""
assert [] == self._lane().entrypoints
def test_affected_by_customlist(self):
# Two lists.
l1, ignore = self._customlist(
data_source_name=DataSource.GUTENBERG, num_entries=0
)
l2, ignore = self._customlist(
data_source_name=DataSource.OVERDRIVE, num_entries=0
)
# A lane populated by specific lists.
lane = self._lane()
# Not affected by any lists.
for l in [l1, l2]:
assert 0 == Lane.affected_by_customlist(l1).count()
# Add a lane to the list, and it becomes affected.
lane.customlists.append(l1)
assert [lane] == lane.affected_by_customlist(l1).all()
assert 0 == lane.affected_by_customlist(l2).count()
lane.customlists = []
# A lane based on all lists with the GUTENBERG data source.
lane2 = self._lane()
lane2.list_datasource = l1.data_source
# It's affected by the GUTENBERG list but not the OVERDRIVE
# list.
assert [lane2] == Lane.affected_by_customlist(l1).all()
assert 0 == Lane.affected_by_customlist(l2).count()
def test_inherited_value(self):
# Test WorkList.inherited_value.
#
# It's easier to test this in Lane because WorkLists can't have
# parents.
# This lane contains fiction.
fiction_lane = self._lane(fiction=True)
# This sublane contains nonfiction.
nonfiction_sublane = self._lane(parent=fiction_lane, fiction=False)
nonfiction_sublane.inherit_parent_restrictions = False
# This sublane doesn't specify a value for .fiction.
default_sublane = self._lane(parent=fiction_lane)
default_sublane.inherit_parent_restrictions = False
# When inherit_parent_restrictions is False,
# inherited_value("fiction") returns whatever value is set for
# .fiction.
assert None == default_sublane.inherited_value("fiction")
assert False == nonfiction_sublane.inherited_value("fiction")
# When inherit_parent_restrictions is True,
# inherited_value("fiction") returns False for the sublane
# that sets no value for .fiction.
default_sublane.inherit_parent_restrictions = True
assert True == default_sublane.inherited_value("fiction")
# The sublane that sets its own value for .fiction is unaffected.
nonfiction_sublane.inherit_parent_restrictions = True
assert False == nonfiction_sublane.inherited_value("fiction")
def test_inherited_values(self):
# Test WorkList.inherited_values.
#
# It's easier to test this in Lane because WorkLists can't have
# parents.
# This lane contains best-sellers.
best_sellers_lane = self._lane()
best_sellers, ignore = self._customlist(num_entries=0)
best_sellers_lane.customlists.append(best_sellers)
# This sublane contains staff picks.
staff_picks_lane = self._lane(parent=best_sellers_lane)
staff_picks, ignore = self._customlist(num_entries=0)
staff_picks_lane.customlists.append(staff_picks)
# What does it mean that the 'staff picks' lane is *inside*
# the 'best sellers' lane?
# If inherit_parent_restrictions is False, it doesn't mean
# anything in particular. This lane contains books that
# are on the staff picks list.
staff_picks_lane.inherit_parent_restrictions = False
assert [[staff_picks]] == staff_picks_lane.inherited_values("customlists")
# If inherit_parent_restrictions is True, then the lane
# has *two* sets of restrictions: a book must be on both
# the staff picks list *and* the best sellers list.
staff_picks_lane.inherit_parent_restrictions = True
x = staff_picks_lane.inherited_values("customlists")
assert sorted([[staff_picks], [best_sellers]]) == sorted(
staff_picks_lane.inherited_values("customlists")
)
def test_setting_target_age_locks_audiences(self):
lane = self._lane()
lane.target_age = (16, 18)
assert sorted(
[Classifier.AUDIENCE_YOUNG_ADULT, Classifier.AUDIENCE_ADULT]
) == sorted(lane.audiences)
lane.target_age = (0, 2)
assert [Classifier.AUDIENCE_CHILDREN] == lane.audiences
lane.target_age = 14
assert [Classifier.AUDIENCE_YOUNG_ADULT] == lane.audiences
# It's not possible to modify .audiences to a value that's
# incompatible with .target_age.
lane.audiences = lane.audiences
def doomed():
lane.audiences = [Classifier.AUDIENCE_CHILDREN]
with pytest.raises(ValueError) as excinfo:
doomed()
assert "Cannot modify Lane.audiences when Lane.target_age is set" in str(
excinfo.value
)
# Setting target_age to None leaves preexisting .audiences in place.
lane.target_age = None
assert [Classifier.AUDIENCE_YOUNG_ADULT] == lane.audiences
# But now you can modify .audiences.
lane.audiences = [Classifier.AUDIENCE_CHILDREN]
def test_target_age_treats_all_adults_equally(self):
"""We don't distinguish between different age groups for adults."""
lane = self._lane()
lane.target_age = (35, 40)
assert tuple_to_numericrange((18, 18)) == lane.target_age
def test_uses_customlists(self):
lane = self._lane()
assert False == lane.uses_customlists
customlist, ignore = self._customlist(num_entries=0)
lane.customlists = [customlist]
assert True == lane.uses_customlists
gutenberg = DataSource.lookup(self._db, DataSource.GUTENBERG)
lane.list_datasource = gutenberg
self._db.commit()
assert True == lane.uses_customlists
# Note that the specific custom list was removed from this
# Lane when it switched to using all lists from a certain data
# source.
assert [] == lane.customlists
# A Lane may use custom lists by virtue of inheriting
# restrictions from its parent.
child = self._lane(parent=lane)
child.inherit_parent_restrictions = True
assert True == child.uses_customlists
def test_genre_ids(self):
# By default, when you add a genre to a lane, you are saying
# that Works classified under it and all its subgenres should
# show up in the lane.
fantasy = self._lane()
fantasy.add_genre("Fantasy")
# At this point the lane picks up Fantasy and all of its
# subgenres.
expect = [
Genre.lookup(self._db, genre)[0].id
for genre in [
"Fantasy",
"Epic Fantasy",
"Historical Fantasy",
"Urban Fantasy",
]
]
assert set(expect) == fantasy.genre_ids
# Let's exclude one of the subgenres.
fantasy.add_genre("Urban Fantasy", inclusive=False)
urban_fantasy, ignore = Genre.lookup(self._db, "Urban Fantasy")
# That genre's ID has disappeared from .genre_ids.
assert urban_fantasy.id not in fantasy.genre_ids
# Let's add Science Fiction, but not its subgenres.
fantasy.add_genre("Science Fiction", recursive=False)
science_fiction, ignore = Genre.lookup(self._db, "Science Fiction")
space_opera, ignore = Genre.lookup(self._db, "Space Opera")
assert science_fiction.id in fantasy.genre_ids
assert space_opera.id not in fantasy.genre_ids
# Let's add Space Opera, but exclude Science Fiction and its
# subgenres.
fantasy.lane_genres = []
fantasy.add_genre("Space Opera")
fantasy.add_genre("Science Fiction", inclusive=False, recursive=True)
# That eliminates everything.
assert set([]) == fantasy.genre_ids
# NOTE: We don't have any doubly nested subgenres, so we can't
# test the case where a genre is included recursively but one
# of its subgenres is exclused recursively (in which case the
# sub-subgenre would be excluded), but it should work.
# We can exclude a genre even when no genres are explicitly included.
# The lane will include all genres that aren't excluded.
no_inclusive_genres = self._lane()
no_inclusive_genres.add_genre("Science Fiction", inclusive=False)
assert len(no_inclusive_genres.genre_ids) > 10
assert science_fiction.id not in no_inclusive_genres.genre_ids
def test_customlist_ids(self):
# WorkLists always return None for customlist_ids.
wl = WorkList()
wl.initialize(self._default_library)
assert None == wl.customlist_ids
# When you add a CustomList to a Lane, you are saying that works
# from that CustomList can appear in the Lane.
nyt1, ignore = self._customlist(num_entries=0, data_source_name=DataSource.NYT)
nyt2, ignore = self._customlist(num_entries=0, data_source_name=DataSource.NYT)
no_lists = self._lane()
assert None == no_lists.customlist_ids
has_list = self._lane()
has_list.customlists.append(nyt1)
assert [nyt1.id] == has_list.customlist_ids
# When you set a Lane's list_datasource, you're saying that
# works appear in the Lane if they are on _any_ CustomList from
# that data source.
has_list_source = self._lane()
has_list_source.list_datasource = DataSource.lookup(self._db, DataSource.NYT)
assert set([nyt1.id, nyt2.id]) == set(has_list_source.customlist_ids)
# If there are no CustomLists from that data source, an empty
# list is returned.
has_no_lists = self._lane()
has_no_lists.list_datasource = DataSource.lookup(self._db, DataSource.OVERDRIVE)
assert [] == has_no_lists.customlist_ids
def test_search_target(self):
# A Lane that is the root for a patron type can be
# searched.
root_lane = self._lane()
root_lane.root_for_patron_type = ["A"]
assert root_lane == root_lane.search_target
# A Lane that's the descendant of a root Lane for a
# patron type will search that root Lane.
child = self._lane(parent=root_lane)
assert root_lane == child.search_target
grandchild = self._lane(parent=child)
assert root_lane == grandchild.search_target
# Any Lane that does not descend from a root Lane will
# get a WorkList as its search target, with some
# restrictions from the Lane.
lane = self._lane()
lane.languages = ["eng", "ger"]
target = lane.search_target
assert "English/Deutsch" == target.display_name
assert ["eng", "ger"] == target.languages
assert None == target.audiences
assert None == target.media
# If there are too many languages, they're left out of the
# display name (so the search description will be "Search").
lane.languages = ["eng", "ger", "spa", "fre"]
target = lane.search_target
assert "" == target.display_name
assert ["eng", "ger", "spa", "fre"] == target.languages
assert None == target.audiences
assert None == target.media
lane.languages = ["eng"]
target = lane.search_target
assert "English" == target.display_name
assert ["eng"] == target.languages
assert None == target.audiences
assert None == target.media
target = lane.search_target
assert "English" == target.display_name
assert ["eng"] == target.languages
assert None == target.audiences
assert None == target.media
# Media aren't included in the description, but they
# are used in search.
lane.media = [Edition.BOOK_MEDIUM]
target = lane.search_target
assert "English" == target.display_name
assert ["eng"] == target.languages
assert None == target.audiences
assert [Edition.BOOK_MEDIUM] == target.media
# Audiences are only used in search if one of the
# audiences is young adult or children.
lane.audiences = [Classifier.AUDIENCE_ADULTS_ONLY]
target = lane.search_target
assert "English" == target.display_name
assert ["eng"] == target.languages
assert None == target.audiences
assert [Edition.BOOK_MEDIUM] == target.media
lane.audiences = [Classifier.AUDIENCE_ADULT, Classifier.AUDIENCE_YOUNG_ADULT]
target = lane.search_target
assert "English Adult and Young Adult" == target.display_name
assert ["eng"] == target.languages
assert [
Classifier.AUDIENCE_ADULT,
Classifier.AUDIENCE_YOUNG_ADULT,
] == target.audiences
assert [Edition.BOOK_MEDIUM] == target.media
# If there are too many audiences, they're left
# out of the display name.
lane.audiences = [
Classifier.AUDIENCE_ADULT,
Classifier.AUDIENCE_YOUNG_ADULT,
Classifier.AUDIENCE_CHILDREN,
]
target = lane.search_target
assert "English" == target.display_name
assert ["eng"] == target.languages
assert [
Classifier.AUDIENCE_ADULT,
Classifier.AUDIENCE_YOUNG_ADULT,
Classifier.AUDIENCE_CHILDREN,
] == target.audiences
assert [Edition.BOOK_MEDIUM] == target.media
def test_search(self):
# Searching a Lane calls search() on its search_target.
#
# TODO: This test could be trimmed down quite a bit with
# mocks.
work = self._work(with_license_pool=True)
lane = self._lane()
search_client = MockExternalSearchIndex()
search_client.bulk_update([work])
pagination = Pagination(offset=0, size=1)
results = lane.search(
self._db, work.title, search_client, pagination=pagination
)
target_results = lane.search_target.search(
self._db, work.title, search_client, pagination=pagination
)
assert results == target_results
# The single search result was returned as a Work.
[result] = results
assert work == result
# This still works if the lane is its own search_target.
lane.root_for_patron_type = ["A"]
results = lane.search(
self._db, work.title, search_client, pagination=pagination
)
target_results = lane.search_target.search(
self._db, work.title, search_client, pagination=pagination
)
assert results == target_results
def test_search_propagates_facets(self):
"""Lane.search propagates facets when calling search() on
its search target.
"""
class Mock(object):
def search(self, *args, **kwargs):
self.called_with = kwargs["facets"]
mock = Mock()
lane = self._lane()
old_lane_search_target = Lane.search_target
old_wl_search = WorkList.search
Lane.search_target = mock
facets = SearchFacets()
lane.search(self._db, "query", None, facets=facets)
assert facets == mock.called_with
# Now try the case where a lane is its own search target. The
# Facets object is propagated to the WorkList.search().
mock.called_with = None
Lane.search_target = lane
WorkList.search = mock.search
lane.search(self._db, "query", None, facets=facets)
assert facets == mock.called_with
# Restore methods that were mocked.
Lane.search_target = old_lane_search_target
WorkList.search = old_wl_search
def test_explain(self):
parent = self._lane(display_name="Parent")
parent.priority = 1
child = self._lane(parent=parent, display_name="Child")
child.priority = 2
data = parent.explain()
assert [
"ID: %s" % parent.id,
"Library: %s" % self._default_library.short_name,
"Priority: 1",
"Display name: Parent",
] == data
data = child.explain()
assert [
"ID: %s" % child.id,
"Library: %s" % self._default_library.short_name,
"Parent ID: %s (Parent)" % parent.id,
"Priority: 2",
"Display name: Child",
] == data
def test_groups_propagates_facets(self):
# Lane.groups propagates a received Facets object into
# _groups_for_lanes.
def mock(self, _db, relevant_lanes, queryable_lanes, facets, *args, **kwargs):
self.called_with = facets
return []
old_value = Lane._groups_for_lanes
Lane._groups_for_lanes = mock
lane = self._lane()
facets = FeaturedFacets(0)
lane.groups(self._db, facets=facets)
assert facets == lane.called_with
Lane._groups_for_lanes = old_value
class TestWorkListGroupsEndToEnd(EndToEndSearchTest):
# A comprehensive end-to-end test of WorkList.groups()
# using a real Elasticsearch index.
#
# Helper methods are tested in a different class, TestWorkListGroups
def populate_works(self):
def _w(**kwargs):
"""Helper method to create a work with license pool."""
return self._work(with_license_pool=True, **kwargs)
# In this library, the groups feed includes at most two books
# for each lane.
library = self._default_library
library.setting(library.FEATURED_LANE_SIZE).value = "2"
# Create eight works.
self.hq_litfic = _w(title="HQ LitFic", fiction=True, genre="Literary Fiction")
self.hq_litfic.quality = 0.8
self.lq_litfic = _w(title="LQ LitFic", fiction=True, genre="Literary Fiction")
self.lq_litfic.quality = 0
self.hq_sf = _w(title="HQ SF", genre="Science Fiction", fiction=True)
# Add a lot of irrelevant genres to one of the works. This
# won't affect the results.
for genre in ["Westerns", "Horror", "Erotica"]:
genre_obj, is_new = Genre.lookup(self._db, genre)
get_one_or_create(self._db, WorkGenre, work=self.hq_sf, genre=genre_obj)
self.hq_sf.quality = 0.8
self.mq_sf = _w(title="MQ SF", genre="Science Fiction", fiction=True)
self.mq_sf.quality = 0.6
self.lq_sf = _w(title="LQ SF", genre="Science Fiction", fiction=True)
self.lq_sf.quality = 0.1
self.hq_ro = _w(title="HQ Romance", genre="Romance", fiction=True)
self.hq_ro.quality = 0.8
self.mq_ro = _w(title="MQ Romance", genre="Romance", fiction=True)
self.mq_ro.quality = 0.6
# This work is in a different language -- necessary to run the
# LQRomanceEntryPoint test below.
self.lq_ro = _w(
title="LQ Romance", genre="Romance", fiction=True, language="lan"
)
self.lq_ro.quality = 0.1
self.nonfiction = _w(title="Nonfiction", fiction=False)
# One of these works (mq_sf) is a best-seller and also a staff
# pick.
self.best_seller_list, ignore = self._customlist(num_entries=0)
self.best_seller_list.add_entry(self.mq_sf)
self.staff_picks_list, ignore = self._customlist(num_entries=0)
self.staff_picks_list.add_entry(self.mq_sf)
def test_groups(self):
if not self.search:
return
# Create a 'Fiction' lane with five sublanes.
fiction = self._lane("Fiction")
fiction.fiction = True
# "Best Sellers", which will contain one book.
best_sellers = self._lane("Best Sellers", parent=fiction)
best_sellers.customlists.append(self.best_seller_list)
# "Staff Picks", which will contain the same book.
staff_picks = self._lane("Staff Picks", parent=fiction)
staff_picks.customlists.append(self.staff_picks_list)
# "Science Fiction", which will contain two books (but
# will not contain the best-seller).
sf_lane = self._lane(
"Science Fiction", parent=fiction, genres=["Science Fiction"]
)
# "Romance", which will contain two books.
romance_lane = self._lane("Romance", parent=fiction, genres=["Romance"])
# "Discredited Nonfiction", which contains a book that would
# not normally appear in 'Fiction'.
discredited_nonfiction = self._lane(
"Discredited Nonfiction", fiction=False, parent=fiction
)
discredited_nonfiction.inherit_parent_restrictions = False
# Since we have a bunch of lanes and works, plus an
# Elasticsearch index, let's take this opportunity to verify that
# WorkList.works and DatabaseBackedWorkList.works_from_database
# give the same results.
facets = DatabaseBackedFacets(
self._default_library,
collection=Facets.COLLECTION_FULL,
availability=Facets.AVAILABLE_ALL,
order=Facets.ORDER_TITLE,
)
for lane in [
fiction,
best_sellers,
staff_picks,
sf_lane,
romance_lane,
discredited_nonfiction,
]:
t1 = [x.id for x in lane.works(self._db, facets)]
t2 = [x.id for x in lane.works_from_database(self._db, facets)]
assert t1 == t2
def assert_contents(g, expect):
"""Assert that a generator yields the expected
(Work, lane) 2-tuples.
"""
results = list(g)
expect = [(x[0].sort_title, x[1].display_name) for x in expect]
actual = [(x[0].sort_title, x[1].display_name) for x in results]
for i, expect_item in enumerate(expect):
if i >= len(actual):
actual_item = None
else:
actual_item = actual[i]
assert expect_item == actual_item, (
"Mismatch in position %d: Expected %r, got %r.\nOverall, expected:\n%r\nGot:\n%r:"
% (i, expect_item, actual_item, expect, actual)
)
assert len(expect) == len(actual), (
"Expect matches actual, but actual has extra members.\nOverall, expected:\n%r\nGot:\n%r:"
% (expect, actual)
)
def make_groups(lane, facets=None, **kwargs):
# Run the `WorkList.groups` method in a way that's
# instrumented for this unit test.
# Most of the time, we want a simple deterministic query.
facets = facets or FeaturedFacets(1, random_seed=Filter.DETERMINISTIC)
return lane.groups(
self._db, facets=facets, search_engine=self.search, debug=True, **kwargs
)
assert_contents(
make_groups(fiction),
[
# The lanes based on lists feature every title on the
# list. This isn't enough to pad out the lane to
# FEATURED_LANE_SIZE, but nothing else belongs in the
# lane.
(self.mq_sf, best_sellers),
# In fact, both lanes feature the same title -- this
# generally won't happen but it can happen when
# multiple lanes are based on lists that feature the
# same title.
(self.mq_sf, staff_picks),
# The genre-based lanes contain FEATURED_LANE_SIZE
# (two) titles each. The 'Science Fiction' lane
# features a low-quality work because the
# medium-quality work was already used above.
(self.hq_sf, sf_lane),
(self.lq_sf, sf_lane),
(self.hq_ro, romance_lane),
(self.mq_ro, romance_lane),
# The 'Discredited Nonfiction' lane contains a single
# book. There just weren't enough matching books to fill
# out the lane to FEATURED_LANE_SIZE.
(self.nonfiction, discredited_nonfiction),
# The 'Fiction' lane contains a title that fits in the
# fiction lane but was not classified under any other
# lane. It also contains a title that was previously
# featured earlier. The search index knows about a
# title (lq_litfix) that was not previously featured,
# but we didn't see it because the Elasticsearch query
# didn't happen to fetch it.
#
# Each lane gets a separate query, and there were too
# many high-quality works in 'fiction' for the
# low-quality one to show up.
(self.hq_litfic, fiction),
(self.hq_sf, fiction),
],
)
# If we ask only about 'Fiction', not including its sublanes,
# we get only the subset of the books previously returned for
# 'fiction'.
assert_contents(
make_groups(fiction, include_sublanes=False),
[(self.hq_litfic, fiction), (self.hq_sf, fiction)],
)
# If we exclude 'Fiction' from its own grouped feed, we get
# all the other books/lane combinations *except for* the books
# associated directly with 'Fiction'.
fiction.include_self_in_grouped_feed = False
assert_contents(
make_groups(fiction),
[
(self.mq_sf, best_sellers),
(self.mq_sf, staff_picks),
(self.hq_sf, sf_lane),
(self.lq_sf, sf_lane),
(self.hq_ro, romance_lane),
(self.mq_ro, romance_lane),
(self.nonfiction, discredited_nonfiction),
],
)
fiction.include_self_in_grouped_feed = True
# When a lane has no sublanes, its behavior is the same whether
# it is called with include_sublanes true or false.
for include_sublanes in (True, False):
assert_contents(
discredited_nonfiction.groups(
self._db, include_sublanes=include_sublanes
),
[(self.nonfiction, discredited_nonfiction)],
)
# If we make the lanes thirstier for content, we see slightly
# different behavior.
library = self._default_library
library.setting(library.FEATURED_LANE_SIZE).value = "3"
assert_contents(
make_groups(fiction),
[
# The list-based lanes are the same as before.
(self.mq_sf, best_sellers),
(self.mq_sf, staff_picks),
# After using every single science fiction work that
# wasn't previously used, we reuse self.mq_sf to pad the
# "Science Fiction" lane up to three items. It's
# better to have self.lq_sf show up before self.mq_sf, even
# though it's lower quality, because self.lq_sf hasn't been
# used before.
(self.hq_sf, sf_lane),
(self.lq_sf, sf_lane),
(self.mq_sf, sf_lane),
# The 'Romance' lane now contains all three Romance
# titles, with the higher-quality titles first.
(self.hq_ro, romance_lane),
(self.mq_ro, romance_lane),
(self.lq_ro, romance_lane),
# The 'Discredited Nonfiction' lane is the same as
# before.
(self.nonfiction, discredited_nonfiction),
# After using every single fiction work that wasn't
# previously used, we reuse high-quality works to pad
# the "Fiction" lane to three items. The
# lowest-quality Romance title doesn't show up here
# anymore, because the 'Romance' lane claimed it. If
# we have to reuse titles, we'll reuse the
# high-quality ones.
(self.hq_litfic, fiction),
(self.hq_sf, fiction),
(self.hq_ro, fiction),
],
)
# Let's see how entry points affect the feeds.
#
# There are no audiobooks in the system, so passing in a
# FeaturedFacets scoped to the AudiobooksEntryPoint excludes everything.
facets = FeaturedFacets(0, entrypoint=AudiobooksEntryPoint)
_db = self._db
assert [] == list(fiction.groups(self._db, facets=facets))
# Here's an entry point that applies a language filter
# that only finds one book.
class LQRomanceEntryPoint(EntryPoint):
URI = ""
@classmethod
def modify_search_filter(cls, filter):
filter.languages = ["lan"]
facets = FeaturedFacets(
1, entrypoint=LQRomanceEntryPoint, random_seed=Filter.DETERMINISTIC
)
assert_contents(
make_groups(fiction, facets=facets),
[
# The single recognized book shows up in both lanes
# that can show it.
(self.lq_ro, romance_lane),
(self.lq_ro, fiction),
],
)
# Now, instead of relying on the 'Fiction' lane, make a
# WorkList containing two different lanes, and call groups() on
# the WorkList.
class MockWorkList(object):
display_name = "Mock"
visible = True
priority = 2
def groups(slf, _db, include_sublanes, pagination=None, facets=None):
yield self.lq_litfic, slf
mock = MockWorkList()
wl = WorkList()
wl.initialize(self._default_library, children=[best_sellers, staff_picks, mock])
# We get results from the two lanes and from the MockWorkList.
# Since the MockWorkList wasn't a lane, its results were obtained
# by calling groups() recursively.
assert_contents(
wl.groups(self._db),
[
(self.mq_sf, best_sellers),
(self.mq_sf, staff_picks),
(self.lq_litfic, mock),
],
)
class TestWorkListGroups(DatabaseTest):
def setup_method(self):
super(TestWorkListGroups, self).setup_method()
# Make sure random selections and range generations go the
# same way every time.
random.seed(42)
def test_groups_for_lanes_adapts_facets(self):
# Verify that _groups_for_lanes gives each of a WorkList's
# non-queryable children the opportunity to adapt the incoming
# FeaturedFacets objects to its own needs.
class MockParent(WorkList):
def _featured_works_with_lanes(
self, _db, lanes, pagination, facets, *args, **kwargs
):
self._featured_works_with_lanes_called_with = (
lanes,
pagination,
facets,
)
return super(MockParent, self)._featured_works_with_lanes(
_db, lanes, pagination, facets, *args, **kwargs
)
class MockChild(WorkList):
def __init__(self, work):
self.work = work
self.id = work.title
super(MockChild, self).__init__()
def overview_facets(self, _db, facets):
self.overview_facets_called_with = (_db, facets)
return "Custom facets for %s." % self.id
def works(self, _db, pagination, facets, *args, **kwargs):
self.works_called_with = (pagination, facets)
return [self.work]
parent = MockParent()
child1 = MockChild(self._work(title="Lane 1"))
child2 = MockChild(self._work(title="Lane 2"))
children = [child1, child2]
for wl in children:
wl.initialize(library=self._default_library)
parent.initialize(library=self._default_library, children=[child1, child2])
# We're going to make a grouped feed in which both children
# are relevant, but neither one is queryable.
relevant = parent.children
queryable = []
pagination = Pagination(size=2)
facets = FeaturedFacets(0)
groups = list(
parent._groups_for_lanes(self._db, relevant, queryable, pagination, facets)
)
# Each sublane was asked in turn to provide works for the feed.
assert [(child1.work, child1), (child2.work, child2)] == groups
# But we're more interested in what happened to the faceting objects.
# The original faceting object was passed into
# _featured_works_with_lanes, but none of the lanes were
# queryable, so it ended up doing nothing.
assert ([], pagination, facets) == parent._featured_works_with_lanes_called_with
# Each non-queryable sublane was given a chance to adapt that
# faceting object to its own needs.
for wl in children:
assert wl.overview_facets_called_with == (self._db, facets)
# Each lane's adapted faceting object was then passed into
# works().
assert (pagination, "Custom facets for Lane 1.") == child1.works_called_with
assert (pagination, "Custom facets for Lane 2.") == child2.works_called_with
# If no pagination object is passed in (the most common case),
# a new Pagination object is created based on the featured lane
# size for the library.
groups = list(
parent._groups_for_lanes(self._db, relevant, queryable, None, facets)
)
(ignore1, pagination, ignore2) = parent._featured_works_with_lanes_called_with
assert isinstance(pagination, Pagination)
# For each sublane, we ask for 10% more items than we need to
# reduce the chance that we'll need to put the same item in
# multiple lanes.
assert int(self._default_library.featured_lane_size * 1.10) == pagination.size
def test_featured_works_with_lanes(self):
# _featured_works_with_lanes builds a list of queries and
# passes the list into search_engine.works_query_multi(). It
# passes the search results into works_for_resultsets() to
# create a sequence of (Work, Lane) 2-tuples.
class MockWorkList(WorkList):
"""Mock the behavior of WorkList that's not being tested here --
overview_facets() for the child lanes that are being
searched, and works_for_resultsets() for the parent that's
doing the searching.
"""
def __init__(self, *args, **kwargs):
# Track all the times overview_facets is called (it
# should be called twice), plus works_for_resultsets
# (which should only be called once).
super(MockWorkList, self).__init__(*args, **kwargs)
self.works_for_resultsets_calls = []
self.overview_facets_calls = []
def overview_facets(self, _db, facets):
# Track that overview_facets was called with a
# FeaturedFacets object. Then call the superclass
# implementation -- we need to return a real Facets
# object so it can be turned into a Filter.
assert isinstance(facets, FeaturedFacets)
self.overview_facets_calls.append((_db, facets))
return super(MockWorkList, self).overview_facets(_db, facets)
def works_for_resultsets(self, _db, resultsets, facets=None):
# Take some lists of (mocked) of search results and turn
# them into lists of (mocked) Works.
self.works_for_resultsets_calls.append((_db, resultsets))
one_lane_worth = [["Here is", "one lane", "of works"]]
return one_lane_worth * len(resultsets)
class MockSearchEngine(object):
"""Mock a multi-query call to an Elasticsearch server."""
def __init__(self):
self.called_with = None
def query_works_multi(self, queries):
# Pretend to run a multi-query and return three lists of
# mocked results.
self.called_with = queries
return [["some"], ["search"], ["results"]]
# Now the actual test starts. We've got a parent lane with two
# children.
parent = MockWorkList()
child1 = MockWorkList()
child2 = MockWorkList()
parent.initialize(
library=self._default_library,
children=[child1, child2],
display_name="Parent lane -- call my _featured_works_with_lanes()!",
)
child1.initialize(library=self._default_library, display_name="Child 1")
child2.initialize(library=self._default_library, display_name="Child 2")
# We've got a search engine that's ready to find works in any
# of these lanes.
search = MockSearchEngine()
# Set up facets and pagination, and call the method that's
# being tested.
facets = FeaturedFacets(0.1)
pagination = object()
results = parent._featured_works_with_lanes(
self._db, [child1, child2], pagination, facets, search_engine=search
)
results = list(results)
# MockSearchEngine.query_works_multi was called on a list of
# queries it prepared from child1 and child2.
q1, q2 = search.called_with
# These queries are almost the same.
for query in search.called_with:
# Neither has a query string.
assert None == query[0]
# Both have the same pagination object.
assert pagination == query[2]
# But each query has a different Filter.
f1 = q1[1]
f2 = q2[1]
assert f1 != f2
# How did these Filters come about? Well, for each lane, we
# called overview_facets() and passed in the same
# FeaturedFacets object.
assert (self._db, facets) == child1.overview_facets_calls.pop()
assert [] == child1.overview_facets_calls
child1_facets = child1.overview_facets(self._db, facets)
assert (self._db, facets) == child2.overview_facets_calls.pop()
assert [] == child2.overview_facets_calls
child2_facets = child1.overview_facets(self._db, facets)
# We then passed each result into Filter.from_worklist, along
# with the corresponding lane.
compare_f1 = Filter.from_worklist(self._db, child1, child1_facets)
compare_f2 = Filter.from_worklist(self._db, child2, child2_facets)
# Reproducing that code inside this test, which we just did,
# gives us Filter objects -- compare_f1 and compare_f2 --
# identical to the ones passed into query_works_multi -- f1
# and f2. We know they're the same because they build() to
# identical dictionaries.
assert compare_f1.build() == f1.build()
assert compare_f2.build() == f2.build()
# So we ended up with q1 and q2, two queries to find the works
# from child1 and child2. That's what was passed into
# query_works_multi().
# We know that query_works_multi() returned: a list
# of lists of fake "results" that looked like this:
# [["some"], ["search"], ["results"]]
#
# This was passed into parent.works_for_resultsets():
call = parent.works_for_resultsets_calls.pop()
assert call == (self._db, [["some"], ["search"], ["results"]])
assert [] == parent.works_for_resultsets_calls
# The return value of works_for_resultsets -- another list of
# lists -- was then turned into a sequence of ('work', Lane)
# 2-tuples.
assert [
("Here is", child1),
("one lane", child1),
("of works", child1),
("Here is", child2),
("one lane", child2),
("of works", child2),
] == results
# And that's how we got a sequence of 2-tuples mapping out a
# grouped OPDS feed.
def test__size_for_facets(self):
lane = self._lane()
m = lane._size_for_facets
ebooks, audio, everything, nothing = [
FeaturedFacets(minimum_featured_quality=0.5, entrypoint=x)
for x in (
EbooksEntryPoint,
AudiobooksEntryPoint,
EverythingEntryPoint,
None,
)
]
# When Lane.size_by_entrypoint is not set, Lane.size is used.
# This should only happen immediately after a site is upgraded.
lane.size = 100
for facets in (ebooks, audio):
assert 100 == lane._size_for_facets(facets)
# Once Lane.size_by_entrypoint is set, it's used when possible.
lane.size_by_entrypoint = {
EverythingEntryPoint.URI: 99,
EbooksEntryPoint.URI: 1,
AudiobooksEntryPoint.URI: 2,
}
assert 99 == m(None)
assert 99 == m(nothing)
assert 99 == m(everything)
assert 1 == m(ebooks)
assert 2 == m(audio)
# If size_by_entrypoint contains no estimate for a given
# EntryPoint URI, the overall lane size is used. This can
# happen between the time an EntryPoint is enabled and the
# lane size refresh script is run.
del lane.size_by_entrypoint[AudiobooksEntryPoint.URI]
assert 100 == m(audio)
``` |
{
"source": "jonathangreen/library-registry",
"score": 3
} |
#### File: jonathangreen/library-registry/authentication_document.py
```python
import json
from collections import defaultdict
from flask_babel import lazy_gettext as _
from sqlalchemy.orm.exc import MultipleResultsFound, NoResultFound
from sqlalchemy.orm.session import Session
from model import Audience, CollectionSummary, Place, ServiceArea, get_one_or_create
from problem_details import INVALID_INTEGRATION_DOCUMENT
class AuthenticationDocument(object):
"""Parse an Authentication For OPDS document, including the
Library Simplified-specific extensions, extracting all the information
that's of interest to the library registry.
"""
ANONYMOUS_ACCESS_REL = "https://librarysimplified.org/rel/auth/anonymous"
AUTHENTICATION_DOCUMENT_REL = "http://opds-spec.org/auth/document"
MEDIA_TYPE = "application/vnd.opds.authentication.v1.0+json"
COVERAGE_EVERYWHERE = "everywhere"
# The list of color schemes supported by SimplyE.
SIMPLYE_COLOR_SCHEMES = [
"red",
"blue",
"gray",
"gold",
"green",
"teal",
"purple",
]
PUBLIC_AUDIENCE = "public"
AUDIENCES = [
PUBLIC_AUDIENCE,
"educational-primary",
"educational-secondary",
"research",
"print-disability",
"other",
]
def __init__(
self,
_db,
id,
title,
authentication,
service_description,
color_scheme,
collection_size,
public_key,
audiences,
service_area,
focus_area,
links,
place_class=Place,
):
self.id = id
self.title = title
self.authentication = authentication
self.service_description = service_description
self.color_scheme = color_scheme
self.collection_size = collection_size
self.public_key = public_key
self.audiences = audiences or [self.PUBLIC_AUDIENCE]
self.service_area, self.focus_area = self.parse_service_and_focus_area(
_db, service_area, focus_area, place_class
)
self.links = links
self.website = self.extract_link(rel="alternate", require_type="text/html")
self.online_registration = self.has_link(rel="register")
self.root = self.extract_link(
rel="start", prefer_type="application/atom+xml;profile=opds-catalog"
)
logo = self.extract_link(rel="logo")
self.logo = None
self.logo_link = None
if logo:
data = logo.get("href", "")
if data and data.startswith("data:"):
self.logo = data
else:
self.logo_link = logo
self.anonymous_access = False
for flow in self.authentication_flows:
if flow.get("type") == self.ANONYMOUS_ACCESS_REL:
self.anonymous_access = True
break
@property
def authentication_flows(self):
"""Return all valid authentication flows in this document."""
for i in self.authentication:
if not isinstance(i, dict):
# Not a valid authentication flow.
continue
yield i
def extract_link(self, rel, require_type=None, prefer_type=None):
"""Find a link with the given link relation in the main authentication
document.
Does not consider links found in the authentication flows.
:param rel: The link must use this as the link relation.
:param require_type: The link must have this as its type.
:param prefer_type: A link with this type is better than a link of
some other type.
"""
return self._extract_link(self.links, rel, require_type, prefer_type)
def has_link(self, rel):
"""Is there a link with this link relation anywhere in the document?
This checks both the main document and the authentication flows.
:rel: The link must have this link relation.
:return: True if there is a link with the link relation in the document,
False otherwise.
"""
if self._extract_link(self.links, rel):
return True
# We couldn't find a matching link in the main set of
# links, but maybe there's a matching link associated with
# a particular authentication flow.
for flow in self.authentication_flows:
if self._extract_link(flow.get("links", []), rel):
return True
return False
@classmethod
def parse_service_and_focus_area(
cls, _db, service_area, focus_area, place_class=Place
):
if service_area:
service_area = cls.parse_coverage(
_db, service_area, place_class=place_class
)
else:
service_area = [], {}, {}
if focus_area:
focus_area = cls.parse_coverage(_db, focus_area, place_class=place_class)
else:
focus_area = service_area
return service_area, focus_area
@classmethod
def parse_coverage(cls, _db, coverage, place_class=Place):
"""Derive Place objects from an Authentication For OPDS coverage
object (i.e. a value for `service_area` or `focus_area`)
:param coverage: An Authentication For OPDS coverage object.
:param place_class: In unit tests, pass in a mock replacement
for the Place class here.
:return: A 3-tuple (places, unknown, ambiguous).
`places` is a list of Place model objects.
`unknown` is a coverage object representing the subset of
`coverage` that had no corresponding Place objects. This
object will not be used for any purpose except error display.
`ambiguous` is a coverage object representing the subset of
`coverage` that had more than one corresponding Place
object. This object will not be used for any purpose except
error display.
"""
place_objs = []
unknown = defaultdict(list)
ambiguous = defaultdict(list)
if coverage == cls.COVERAGE_EVERYWHERE:
# This library covers the entire universe! No need to
# parse anything.
place_objs.append(place_class.everywhere(_db))
coverage = dict() # Do no more processing
elif not isinstance(coverage, dict):
# The coverage is not in { nation: place } format.
# Convert it into that format using the default nation.
default_nation = place_class.default_nation(_db)
if default_nation:
coverage = {default_nation.abbreviated_name: coverage}
else:
# Oops, that's not going to work. We don't know which
# nation this place is in. Return a coverage object
# that makes it semi-clear what the problem is.
unknown["??"] = coverage
coverage = dict() # Do no more processing
for nation, places in list(coverage.items()):
try:
nation_obj = place_class.lookup_one_by_name(
_db,
nation,
place_type=Place.NATION,
)
if places == cls.COVERAGE_EVERYWHERE:
# This library covers an entire nation.
place_objs.append(nation_obj)
else:
# This library covers a list of places within a
# nation.
if isinstance(places, str):
# This is invalid -- you're supposed to always
# pass in a list -- but we can support it.
places = [places]
for place in places:
try:
place_obj = nation_obj.lookup_inside(place)
if place_obj:
# We found it.
place_objs.append(place_obj)
else:
# We couldn't find any place with this name.
unknown[nation].append(place)
except MultipleResultsFound:
# The place was ambiguously named.
ambiguous[nation].append(place)
except MultipleResultsFound:
# A nation was ambiguously named -- not very likely.
ambiguous[nation] = places
except NoResultFound:
# Either this isn't a recognized nation
# or we don't have a geography for it.
unknown[nation] = places
return place_objs, unknown, ambiguous
@classmethod
def _extract_link(cls, links, rel, require_type=None, prefer_type=None):
if require_type and prefer_type:
raise ValueError(
"At most one of require_type and prefer_type may be specified."
)
if not links:
# There are no links, period.
return None
good_enough = None
if not isinstance(links, list):
# Invalid links object; ignore it.
return
for link in links:
if rel != link.get("rel"):
continue
if not require_type and not prefer_type:
# Any link with this relation will work. Return the
# first one we see.
return link
# Beyond this point, either require_type or prefer_type is
# set, so the type of the link becomes relevant.
type = link.get("type", "")
if type:
if (
require_type
and type.startswith(require_type)
or prefer_type
and type.startswith(prefer_type)
):
# If we have a require_type, this means we have
# met the requirement. If we have a prefer_type,
# we will not find a better link than this
# one. Return it immediately.
return link
if not require_type and not good_enough:
# We would prefer a link of a certain type, but if it
# turns out there is no such link, we will accept the
# first link of the given type.
good_enough = link
return good_enough
@classmethod
def from_string(cls, _db, s, place_class=Place):
data = json.loads(s)
return cls.from_dict(_db, data, place_class)
@classmethod
def from_dict(cls, _db, data, place_class=Place):
return AuthenticationDocument(
_db,
id=data.get("id", None),
title=data.get("title", data.get("name", None)),
authentication=data.get("authentication", []),
service_description=data.get("service_description", None),
color_scheme=data.get("color_scheme"),
collection_size=data.get("collection_size"),
public_key=data.get("public_key"),
audiences=data.get("audience"),
service_area=data.get("service_area"),
focus_area=data.get("focus_area"),
links=data.get("links", []),
place_class=place_class,
)
def update_library(self, library):
"""Modify a library to reflect the current state of this
AuthenticationDocument.
:param library: A Library.
:return: A ProblemDetail if there's a problem, otherwise None.
"""
library.name = self.title
library.description = self.service_description
library.online_registration = self.online_registration
library.anonymous_access = self.anonymous_access
problem = self.update_audiences(library)
if not problem:
problem = self.update_service_areas(library)
if not problem:
problem = self.update_collection_size(library)
return problem
def update_audiences(self, library):
return self._update_audiences(library, self.audiences)
@classmethod
def _update_audiences(self, library, audiences):
if not audiences:
# Most of the libraries in this system are open to at
# least some subset of the general public.
audiences = [Audience.PUBLIC]
if isinstance(audiences, str):
# This is invalid but we can easily support it.
audiences = [audiences]
if not isinstance(audiences, list):
return INVALID_INTEGRATION_DOCUMENT.detailed(
_("'audience' must be a list: %(audiences)r", audiences=audiences)
)
# Unrecognized audiences become Audience.OTHER.
filtered_audiences = set()
for audience in audiences:
if audience in Audience.KNOWN_AUDIENCES:
filtered_audiences.add(audience)
else:
filtered_audiences.add(Audience.OTHER)
audiences = filtered_audiences
audience_objs = []
_db = Session.object_session(library)
for audience in audiences:
audience_obj = Audience.lookup(_db, audience)
audience_objs.append(audience_obj)
library.audiences = audience_objs
def update_service_areas(self, library):
"""Update a library's ServiceAreas based on the contents of this
document.
"""
return self.set_service_areas(library, self.service_area, self.focus_area)
@classmethod
def set_service_areas(cls, library, service_area, focus_area):
"""Replace a library's ServiceAreas with specific new values."""
service_areas = []
# What service_area or focus_area looks like when
# no input was specified.
empty = [[], {}, {}]
if focus_area == empty and service_area == empty:
# A library can't lose its entire coverage area -- it's
# more likely that the coverage area was grandfathered in
# and it just isn't set on the remote side.
#
# Do nothing.
return
if focus_area == empty and service_area != empty or service_area == focus_area:
# Service area and focus area are the same, either because
# they were defined that way explicitly or because focus
# area was not specified.
#
# Register the service area as the focus area and call it
# a day.
problem = cls._update_service_areas(
library, service_area, ServiceArea.FOCUS, service_areas
)
if problem:
return problem
else:
# Service area and focus area are different.
problem = cls._update_service_areas(
library, service_area, ServiceArea.ELIGIBILITY, service_areas
)
if problem:
return problem
problem = cls._update_service_areas(
library, focus_area, ServiceArea.FOCUS, service_areas
)
if problem:
return problem
# Delete any ServiceAreas associated with the given library
# which are not mentioned in the list we just gathered.
library.service_areas = service_areas
@classmethod
def _update_service_areas(cls, library, areas, type, service_areas):
"""Update a Library's ServiceAreas with a new set based on
`areas`.
:param library: A Library.
:param areas: A list [place_objs, unknown, ambiguous]
of the sort returned by `parse_coverage()`.
:param type: A value to use for `ServiceAreas.type`.
:param service_areas: All ServiceAreas that became associated
with the Library will be inserted into this list.
:return: A ProblemDetailDocument if any of the service areas could
not be transformed into Place objects. Otherwise, None.
"""
_db = Session.object_session(library)
places, unknown, ambiguous = areas
if unknown or ambiguous:
msgs = []
if unknown:
msgs.append(
str(
_(
"The following service area was unknown: %(service_area)s.",
service_area=json.dumps(unknown),
)
)
)
if ambiguous:
msgs.append(
str(
_(
"The following service area was ambiguous: %(service_area)s.",
service_area=json.dumps(ambiguous),
)
)
)
return INVALID_INTEGRATION_DOCUMENT.detailed(" ".join(msgs))
for place in places:
service_area, is_new = get_one_or_create(
_db, ServiceArea, library_id=library.id, place_id=place.id, type=type
)
service_areas.append(service_area)
def update_collection_size(self, library):
return self._update_collection_size(library, self.collection_size)
@classmethod
def _update_collection_size(self, library, sizes):
if isinstance(sizes, str) or isinstance(sizes, int):
# A single collection with no known language.
sizes = {None: sizes}
if sizes is None:
# No collections are specified.
sizes = {}
if not isinstance(sizes, dict):
return INVALID_INTEGRATION_DOCUMENT.detailed(
_(
"'collection_size' must be a number or an object mapping language codes to numbers"
)
)
new_collections = set()
unknown_size = 0
try:
for language, size in list(sizes.items()):
summary = CollectionSummary.set(library, language, size)
if summary.language is None:
unknown_size += summary.size
new_collections.add(summary)
if unknown_size:
# We found one or more collections in languages we
# didn't recognize. Set the total size of this collection
# as the size of a collection with unknown language.
new_collections.add(CollectionSummary.set(library, None, unknown_size))
except ValueError as e:
return INVALID_INTEGRATION_DOCUMENT.detailed(str(e))
# Destroy any CollectionSummaries representing collections
# no longer associated with this library.
library.collections = list(new_collections)
```
#### File: jonathangreen/library-registry/geometry_loader.py
```python
import json
from model import Place, PlaceAlias, get_one_or_create
from util import GeometryUtility
class GeometryLoader(object):
"""Load Place objects from a NDJSON document like that generated by
geojson-places-us.
"""
def __init__(self, _db):
self._db = _db
self.places_by_external_id = dict()
def load_ndjson(self, fh):
while True:
metadata = fh.readline().strip()
if not metadata:
# End of file.
break
geometry = fh.readline().strip()
yield self.load(metadata, geometry)
def load(self, metadata, geometry):
metadata = json.loads(metadata)
external_id = metadata["id"]
type = metadata["type"]
parent_external_id = metadata["parent_id"]
name = metadata["name"]
aliases = metadata.get("aliases", [])
abbreviated_name = metadata.get("abbreviated_name", None)
if parent_external_id:
parent = self.places_by_external_id[parent_external_id]
else:
parent = None
# This gives us a Geometry object. Set its SRID so the database
# knows it's using real-world latitude and longitude.
geometry = GeometryUtility.from_geojson(geometry)
place, is_new = get_one_or_create(
self._db,
Place,
external_id=external_id,
type=type,
parent=parent,
create_method_kwargs=dict(geometry=geometry),
)
# Set these values, even the ones that were set in
# create_method_kwargs, so that we can update any that have
# changed.
place.external_name = name
place.abbreviated_name = abbreviated_name
place.geometry = geometry
# We only ever add aliases. If the database contains an alias
# for this place that doesn't show up in the metadata, it
# may have been created manually.
for alias in aliases:
name = alias["name"]
language = alias["language"]
alias, is_new = get_one_or_create(
self._db, PlaceAlias, place=place, name=name, language=language
)
self.places_by_external_id[external_id] = place
return place, is_new
``` |
{
"source": "jonathangreen/simplified-server-core",
"score": 3
} |
#### File: simplified-server-core/classifier/gutenberg.py
```python
from . import *
class GutenbergBookshelfClassifier(Classifier):
# Any classification that includes the string "Fiction" will be
# counted as fiction. This is just the leftovers.
FICTION = set([
"Bestsellers, American, 1895-1923",
"Adventure",
"Fantasy",
"Horror",
"Mystery",
"Western",
"Suspense",
"Thriller",
"Children's Anthologies",
])
GENRES = {
Adventure: [
"Adventure",
"Pirates, Buccaneers, Corsairs, etc.",
],
# African_American : ["African American Writers"],
Ancient_History: ["Classical Antiquity"],
Architecture : [
"Architecture",
"The American Architect and Building News",
],
Art : ["Art"],
Biography_Memoir : [
"Biographies",
"Children's Biography",
],
Christianity : ["Christianity"],
Civil_War_History: "US Civil War",
Classics : [
"Best Books Ever Listings",
"Harvard Classics",
],
Cooking : [
"Armour's Monthly Cook Book",
"Cookery",
],
Drama : [
"One Act Plays",
"Opera",
"Plays",
],
Erotica : "Erotic Fiction",
Fantasy : "Fantasy",
Foreign_Language_Study : [
"Language Education",
],
Gardening : [
"Garden and Forest",
"Horticulture",
],
Historical_Fiction : "Historical Fiction",
History : [
"Children's History",
],
Horror : ["Gothic Fiction", "Horror"],
Humorous_Fiction : ["Humor"],
Islam : "Islam",
Judaism : "Judaism",
Law : [
"British Law",
"Noteworthy Trials",
"United States Law",
],
Literary_Criticism : ["Bibliomania"],
Mathematics : "Mathematics",
Medical : [
"Medicine",
"The North American Medical and Surgical Journal",
"Physiology",
],
Military_History : [
"American Revolutionary War",
"World War I",
"World War II",
"Spanish American War",
"Boer War",
"Napoleonic",
],
Modern_History: "Current History",
Music : [
"Music",
"Child's Own Book of Great Musicians",
],
Mystery : [
"Crime Fiction",
"Detective Fiction",
"Mystery Fiction",
],
Nature : [
"Animal",
"Animals-Wild",
"Bird-Lore"
"Birds, Illustrated by Color Photography",
],
Periodicals : [
"Ainslee's",
"<NAME>",
"Blackwood's Edinburgh Magazine",
u"Barnavännen",
"Buchanan's Journal of Man",
"Bulletin de Lille",
"Celtic Magazine",
"Chambers's Edinburgh Journal",
"Contemporary Reviews",
"Continental Monthly",
"De Aarde en haar Volken",
"Dew Drops",
"Donahoe's Magazine",
"Golden Days for Boys and Girls",
"Harper's New Monthly Magazine",
"Harper's Young People",
"Graham's Magazine",
"Lippincott's Magazine",
"L'Illustration",
"McClure's Magazine",
"Mrs Whittelsey's Magazine for Mothers and Daughters",
"Northern Nut Growers Association",
"Notes and Queries",
"Our Young Folks",
"The American Missionary",
"The American Quarterly Review",
"The Arena",
"The Argosy",
"The Atlantic Monthly",
"The Baptist Magazine",
"The Bay State Monthly",
"The Botanical Magazine",
"The Catholic World",
"The Christian Foundation",
"The Church of England Magazine",
"The Contemporary Review",
"The Economist",
"The Esperantist",
"The Girls Own Paper",
"The Great Round World And What Is Going On In It",
"The Idler",
"The Illustrated War News",
"The International Magazine of Literature, Art, and Science",
"The Irish Ecclesiastical Record",
"The Irish Penny Journal",
"The Journal of Negro History",
"The Knickerbocker",
"The Mayflower",
"The Menorah Journal",
"The Mentor",
"The Mirror of Literature, Amusement, and Instruction",
"The Mirror of Taste, and Dramatic Censor",
"The National Preacher",
"The Aldine",
"The Nursery",
"St. Nicholas Magazine for Boys and Girls",
"Punch",
"Punchinello",
"Scribner's Magazine",
"The Scrap Book",
"The Speaker",
"The Stars and Stripes",
"The Strand Magazine",
"The Unpopular Review",
"The Writer",
"The Yellow Book",
"Women's Travel Journals",
],
Pets : ["Animals-Domestic"],
Philosophy : ["Philosophy"],
Photography : "Photography",
Poetry : [
"Poetry",
"Poetry, A Magazine of Verse",
"Children's Verse",
],
Political_Science : [
"Anarchism",
"Politics",
],
Psychology : ["Psychology"],
Reference_Study_Aids : [
"Reference",
"CIA World Factbooks",
],
Religion_Spirituality : [
"Atheism",
"Bahá'í Faith",
"Hinduism",
"Paganism",
"Children's Religion",
],
Science : [
"Astronomy",
"Biology",
"Botany",
"Chemistry",
"Ecology",
"Geology",
"Journal of Entomology and Zoology",
"Microbiology",
"Microscopy",
"Natural History",
"Mycology",
"Popular Science Monthly",
"Physics",
"Scientific American",
],
Science_Fiction : [
"Astounding Stories",
"Precursors of Science Fiction",
"The Galaxy",
"Science Fiction",
],
Social_Sciences : [
"Anthropology",
"Archaeology",
"The American Journal of Archaeology",
"Sociology",
],
Suspense_Thriller : [
"Suspense",
"Thriller",
],
Technology : [
"Engineering",
"Technology",
"Transportation",
],
Travel : "Travel",
True_Crime : "Crime Nonfiction",
Westerns : "Western",
}
@classmethod
def scrub_identifier(cls, identifier):
return identifier
@classmethod
def is_fiction(cls, identifier, name):
if (identifier in cls.FICTION
or "Fiction" in identifier or "Stories" in identifier):
return True
return None
@classmethod
def audience(cls, identifier, name):
if ("Children's" in identifier):
return cls.AUDIENCE_CHILDREN
return cls.AUDIENCE_ADULT
@classmethod
def genre(cls, identifier, name, fiction=None, audience=None):
for l, v in cls.GENRES.items():
if identifier == v or (isinstance(v, list) and identifier in v):
return l
return None
Classifier.classifiers[Classifier.GUTENBERG_BOOKSHELF] = GutenbergBookshelfClassifier
```
#### File: simplified-server-core/model/integrationclient.py
```python
from . import (
Base,
get_one,
get_one_or_create,
)
import datetime
import os
import re
from sqlalchemy import (
Boolean,
Column,
DateTime,
Integer,
Unicode,
)
from sqlalchemy.orm import (
relationship,
)
from ..util.string_helpers import (
native_string,
random_string,
)
class IntegrationClient(Base):
"""A client that has authenticated access to this application.
Currently used to represent circulation managers that have access
to the metadata wrangler.
"""
__tablename__ = 'integrationclients'
id = Column(Integer, primary_key=True)
# URL (or human readable name) to represent the server.
url = Column(Unicode, unique=True)
# Shared secret
shared_secret = Column(Unicode, unique=True, index=True)
# It may be necessary to disable an integration client until it
# upgrades to fix a known bug.
enabled = Column(Boolean, default=True)
created = Column(DateTime)
last_accessed = Column(DateTime)
loans = relationship('Loan', backref='integration_client')
holds = relationship('Hold', backref='integration_client')
def __repr__(self):
return native_string(
u"<IntegrationClient: URL=%s ID=%s>" % (self.url, self.id)
)
@classmethod
def for_url(cls, _db, url):
"""Finds the IntegrationClient for the given server URL.
:return: an IntegrationClient. If it didn't already exist,
it will be created. If it didn't already have a secret, no
secret will be set.
"""
url = cls.normalize_url(url)
now = datetime.datetime.utcnow()
client, is_new = get_one_or_create(
_db, cls, url=url, create_method_kwargs=dict(created=now)
)
client.last_accessed = now
return client, is_new
@classmethod
def register(cls, _db, url, submitted_secret=None):
"""Creates a new server with client details."""
client, is_new = cls.for_url(_db, url)
if not is_new and (not submitted_secret or submitted_secret != client.shared_secret):
raise ValueError('Cannot update existing IntegratedClient without valid shared_secret')
generate_secret = (client.shared_secret is None) or submitted_secret
if generate_secret:
client.randomize_secret()
return client, is_new
@classmethod
def normalize_url(cls, url):
url = re.sub(r'^(http://|https://)', '', url)
url = re.sub(r'^www\.', '', url)
if url.endswith('/'):
url = url[:-1]
return unicode(url.lower())
@classmethod
def authenticate(cls, _db, shared_secret):
client = get_one(_db, cls, shared_secret=unicode(shared_secret))
if client:
client.last_accessed = datetime.datetime.utcnow()
# Committing immediately reduces the risk of contention.
_db.commit()
return client
return None
def randomize_secret(self):
self.shared_secret = random_string(24)
```
#### File: simplified-server-core/model/patron.py
```python
from . import (
Base,
get_one_or_create,
numericrange_to_tuple
)
from credential import Credential
import datetime
import logging
from sqlalchemy import (
Boolean,
Column,
Date,
DateTime,
ForeignKey,
Index,
Integer,
String,
Unicode,
UniqueConstraint,
)
from psycopg2.extras import NumericRange
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.orm import relationship
from sqlalchemy.orm.session import Session
from ..classifier import Classifier
from ..user_profile import ProfileStorage
import uuid
class LoanAndHoldMixin(object):
@property
def work(self):
"""Try to find the corresponding work for this Loan/Hold."""
license_pool = self.license_pool
if not license_pool:
return None
if license_pool.work:
return license_pool.work
if license_pool.presentation_edition and license_pool.presentation_edition.work:
return license_pool.presentation_edition.work
return None
@property
def library(self):
"""Try to find the corresponding library for this Loan/Hold."""
if self.patron:
return self.patron.library
# If this Loan/Hold belongs to a external patron, there may be no library.
return None
class Patron(Base):
__tablename__ = 'patrons'
id = Column(Integer, primary_key=True)
# Each patron is the patron _of_ one particular library. An
# individual human being may patronize multiple libraries, but
# they will have a different patron account at each one.
library_id = Column(
Integer, ForeignKey('libraries.id'), index=True,
nullable=False
)
# The patron's permanent unique identifier in an external library
# system, probably never seen by the patron.
#
# This is not stored as a ForeignIdentifier because it corresponds
# to the patron's identifier in the library responsible for the
# Simplified instance, not a third party.
external_identifier = Column(Unicode)
# The patron's account type, as reckoned by an external library
# system. Different account types may be subject to different
# library policies.
#
# Depending on library policy it may be possible to automatically
# derive the patron's account type from their authorization
# identifier.
external_type = Column(Unicode, index=True)
# An identifier used by the patron that gives them the authority
# to borrow books. This identifier may change over time.
authorization_identifier = Column(Unicode)
# An identifier used by the patron that authenticates them,
# but does not give them the authority to borrow books. i.e. their
# website username.
username = Column(Unicode)
# The last time this record was synced up with an external library
# system such as an ILS.
last_external_sync = Column(DateTime)
# The last time this record was synced with the corresponding
# records managed by the vendors who provide the library with
# ebooks.
_last_loan_activity_sync = Column(
DateTime, default=None, name="last_loan_activity_sync"
)
# The time, if any, at which the user's authorization to borrow
# books expires.
authorization_expires = Column(Date, index=True)
# Outstanding fines the user has, if any.
fines = Column(Unicode)
# If the patron's borrowing privileges have been blocked, this
# field contains the library's reason for the block. If this field
# is None, the patron's borrowing privileges have not been
# blocked.
#
# Although we currently don't do anything with specific values for
# this field, the expectation is that values will be taken from a
# small controlled vocabulary (e.g. "banned", "incorrect personal
# information", "unknown"), rather than freeform strings entered
# by librarians.
#
# Common reasons for blocks are kept in circulation's PatronData
# class.
block_reason = Column(String(255), default=None)
# Whether or not the patron wants their annotations synchronized
# across devices (which requires storing those annotations on a
# library server).
_synchronize_annotations = Column(Boolean, default=None,
name="synchronize_annotations")
# If the circulation manager is set up to associate a patron's
# neighborhood with circulation events, and it would be
# prohibitively expensive to fetch a patron's neighborhood from
# the ILS on every relevant request, the ILS may choose to cache
# the information here.
#
# Periodically, patrons with an old last_external_sync (i.e. who
# haven't used the circulation manager in a while) will have their
# cached_neighborhood scrubbed from the database. This is the
# responsibility of PatronNeighborhoodScrubber.
#
# This field is called cached_neighborhood for two reasons. First,
# the name makes it clear that this is a performance cache, not a
# permanent data store like authorization_identifier. Second, the
# neighborhood of the authenticated patron (however obtained) is
# stored in flask.request.patron.neighborhood. Giving the database
# field a different name guarantees that a patron's neighborhood
# is never _unintentionally_ written to the database. It has to
# be an explicit decision of the ILS integration code.
cached_neighborhood = Column(Unicode, default=None, index=True)
loans = relationship('Loan', backref='patron', cascade='delete')
holds = relationship('Hold', backref='patron', cascade='delete')
annotations = relationship('Annotation', backref='patron', order_by="desc(Annotation.timestamp)", cascade='delete')
# One Patron can have many associated Credentials.
credentials = relationship("Credential", backref="patron", cascade="delete")
__table_args__ = (
UniqueConstraint('library_id', 'username'),
UniqueConstraint('library_id', 'authorization_identifier'),
UniqueConstraint('library_id', 'external_identifier'),
)
# A patron with borrowing privileges should have their local
# metadata synced with their ILS record at intervals no greater
# than this time.
MAX_SYNC_TIME = datetime.timedelta(hours=12)
def __repr__(self):
def date(d):
"""Format an object that might be a datetime as a date.
This keeps a patron representation short.
"""
if d is None:
return None
if isinstance(d, datetime.datetime):
return d.date()
return d
return '<Patron authentication_identifier=%s expires=%s sync=%s>' % (
self.authorization_identifier, date(self.authorization_expires),
date(self.last_external_sync)
)
def identifier_to_remote_service(self, remote_data_source, generator=None):
"""Find or randomly create an identifier to use when identifying
this patron to a remote service.
:param remote_data_source: A DataSource object (or name of a
DataSource) corresponding to the remote service.
"""
_db = Session.object_session(self)
def refresh(credential):
if generator and callable(generator):
identifier = generator()
else:
identifier = str(uuid.uuid1())
credential.credential = identifier
credential = Credential.lookup(
_db, remote_data_source, Credential.IDENTIFIER_TO_REMOTE_SERVICE,
self, refresh, allow_persistent_token=True
)
return credential.credential
def works_on_loan(self):
db = Session.object_session(self)
loans = db.query(Loan).filter(Loan.patron==self)
return [loan.work for loan in self.loans if loan.work]
def works_on_loan_or_on_hold(self):
db = Session.object_session(self)
results = set()
holds = [hold.work for hold in self.holds if hold.work]
loans = self.works_on_loan()
return set(holds + loans)
@property
def loan_activity_max_age(self):
"""In the absence of any other information, how long should loan
activity be considered 'fresh' for this patron?
We reset Patron.last_loan_activity_sync immediately if we hear
about a change to a patron's loans or holds. This handles
cases where patron activity happens where we can't see it,
e.g. on a vendor website or mobile app.
TODO: This is currently a constant, but in the future it could become
a per-library setting.
"""
return 15 * 60
@hybrid_property
def last_loan_activity_sync(self):
"""When was the last time we asked the vendors about
this patron's loan activity?
:return: A datetime, or None if we know our loan data is
stale.
"""
value = self._last_loan_activity_sync
if not value:
return value
# We have an answer, but it may be so old that we should clear
# it out.
now = datetime.datetime.utcnow()
expires = value + datetime.timedelta(
seconds=self.loan_activity_max_age
)
if now > expires:
# The value has expired. Clear it out.
value = None
self._last_loan_activity_sync = value
return value
@last_loan_activity_sync.setter
def last_loan_activity_sync(self, value):
self._last_loan_activity_sync = value
@hybrid_property
def synchronize_annotations(self):
return self._synchronize_annotations
@synchronize_annotations.setter
def synchronize_annotations(self, value):
"""When a patron says they don't want their annotations to be stored
on a library server, delete all their annotations.
"""
if value is None:
# A patron cannot decide to go back to the state where
# they hadn't made a decision.
raise ValueError(
"synchronize_annotations cannot be unset once set."
)
if value is False:
_db = Session.object_session(self)
qu = _db.query(Annotation).filter(Annotation.patron==self)
for annotation in qu:
_db.delete(annotation)
self._synchronize_annotations = value
@property
def root_lane(self):
"""Find the Lane, if any, to be used as the Patron's root lane.
A patron with a root Lane can only access that Lane and the
Lanes beneath it. In addition, a patron with a root lane
cannot conduct a transaction on a book intended for an older
audience than the one defined by their root lane.
"""
# Two ways of improving performance by short-circuiting this
# logic.
if not self.external_type:
return None
if not self.library.has_root_lanes:
return None
_db = Session.object_session(self)
from ..lane import Lane
qu = _db.query(Lane).filter(
Lane.library==self.library
).filter(
Lane.root_for_patron_type.any(self.external_type)
).order_by(Lane.id)
lanes = qu.all()
if len(lanes) < 1:
# The most common situation -- this patron has no special
# root lane.
return None
if len(lanes) > 1:
# Multiple root lanes for a patron indicates a
# configuration problem, but we shouldn't make the patron
# pay the price -- just pick the first one.
logging.error(
"Multiple root lanes found for patron type %s.",
self.external_type
)
return lanes[0]
def work_is_age_appropriate(self, work_audience, work_target_age):
"""Is the given audience and target age an age-appropriate match for this Patron?
NOTE: What "age-appropriate" means depends on some policy questions
that have not been answered and may be library-specific. For
now, it is determined by comparing audience and target age to that of the
Patron's root lane.
This is designed for use when reasoning about works in
general. If you have a specific Work in mind, use
`Work.age_appropriate_for_patron`.
:param work_audience: One of the audience constants from
Classifier, representing the general reading audience to
which a putative work belongs.
:param work_target_age: A number or 2-tuple representing the target age
or age range of a putative work.
:return: A boolean
"""
root = self.root_lane
if not root:
# The patron has no root lane. They can interact with any
# title.
return True
# The patron can interact with a title if any of the audiences
# in their root lane (in conjunction with the root lane's target_age)
# are a match for the title's audience and target age.
return any(
self.age_appropriate_match(
work_audience, work_target_age,
audience, root.target_age
)
for audience in root.audiences
)
@classmethod
def age_appropriate_match(
cls, work_audience, work_target_age,
reader_audience, reader_age
):
"""Match the audience and target age of a work with that of a reader,
and see whether they are an age-appropriate match.
NOTE: What "age-appropriate" means depends on some policy
questions that have not been answered and may be
library-specific. For now, non-children's books are
age-inappropriate for young children, and children's books are
age-inappropriate for children too young to be in the book's
target age range.
:param reader_audience: One of the audience constants from
Classifier, representing the general reading audience to
which the reader belongs.
:param reader_age: A number or 2-tuple representing the age or
age range of the reader.
"""
if reader_audience is None:
# A patron with no particular audience restrictions
# can see everything.
#
# This is by far the most common case, so we don't set up
# logging until after running it.
return True
log = logging.getLogger("Age-appropriate match calculator")
log.debug(
"Matching work %s/%s to reader %s/%s" % (
work_audience, work_target_age,
reader_audience, reader_age
)
)
if reader_audience not in Classifier.AUDIENCES_JUVENILE:
log.debug("A non-juvenile patron can see everything.")
return True
if work_audience == Classifier.AUDIENCE_ALL_AGES:
log.debug("An all-ages book is always age appropriate.")
return True
# At this point we know that the patron is a juvenile.
def ensure_tuple(x):
# Convert a potential NumericRange into a tuple.
if isinstance(x, NumericRange):
x = numericrange_to_tuple(x)
return x
reader_age = ensure_tuple(reader_age)
if isinstance(reader_age, tuple):
# A range was passed in rather than a specific age. Assume
# the reader is at the top edge of the range.
ignore, reader_age = reader_age
work_target_age = ensure_tuple(work_target_age)
if isinstance(work_target_age, tuple):
# Pick the _bottom_ edge of a work's target age range --
# the work is appropriate for anyone _at least_ that old.
work_target_age, ignore = work_target_age
# A YA reader is treated as an adult (with no reading
# restrictions) if they have no associated age range, or their
# age range includes ADULT_AGE_CUTOFF.
if (reader_audience == Classifier.AUDIENCE_YOUNG_ADULT
and (reader_age is None
or reader_age >= Classifier.ADULT_AGE_CUTOFF)):
log.debug("YA reader to be treated as an adult.")
return True
# There are no other situations where a juvenile reader can access
# non-juvenile titles.
if work_audience not in Classifier.AUDIENCES_JUVENILE:
log.debug("Juvenile reader cannot access non-juvenile title.")
return False
# At this point we know we have a juvenile reader and a
# juvenile book.
if (reader_audience == Classifier.AUDIENCE_YOUNG_ADULT
and work_audience in (Classifier.AUDIENCES_YOUNG_CHILDREN)):
log.debug("YA reader can access any children's title.")
return True
if (reader_audience in (Classifier.AUDIENCES_YOUNG_CHILDREN)
and work_audience == Classifier.AUDIENCE_YOUNG_ADULT):
log.debug("Child reader cannot access any YA title.")
return False
# At this point we either have a YA patron with a YA book, or
# a child patron with a children's book. It comes down to a
# question of the reader's age vs. the work's target age.
if work_target_age is None:
# This is a generic children's or YA book with no
# particular target age. Assume it's age appropriate.
log.debug(
"Juvenile book with no target age is presumed age-appropriate."
)
return True
if reader_age is None:
# We have no idea how old the patron is, so any work with
# the appropriate audience is considered age-appropriate.
log.debug(
"Audience matches, and no specific patron age information available: presuming age-appropriate."
)
return True
if reader_age < work_target_age:
# The audience for this book matches the patron's
# audience, but the book has a target age that is too high
# for the reader.
log.debug(
"Audience matches, but work's target age is too high for reader."
)
return False
log.debug("Both audience and target age match; it's age-appropriate.")
return True
Index("ix_patron_library_id_external_identifier", Patron.library_id, Patron.external_identifier)
Index("ix_patron_library_id_authorization_identifier", Patron.library_id, Patron.authorization_identifier)
Index("ix_patron_library_id_username", Patron.library_id, Patron.username)
class Loan(Base, LoanAndHoldMixin):
__tablename__ = 'loans'
id = Column(Integer, primary_key=True)
patron_id = Column(Integer, ForeignKey('patrons.id'), index=True)
integration_client_id = Column(Integer, ForeignKey('integrationclients.id'), index=True)
# A Loan is always associated with a LicensePool.
license_pool_id = Column(Integer, ForeignKey('licensepools.id'), index=True)
# It may also be associated with an individual License if the source
# provides information about individual licenses.
license_id = Column(Integer, ForeignKey('licenses.id'), index=True, nullable=True)
fulfillment_id = Column(Integer, ForeignKey('licensepooldeliveries.id'))
start = Column(DateTime, index=True)
end = Column(DateTime, index=True)
# Some distributors (e.g. Feedbooks) may have an identifier that can
# be used to check the status of a specific Loan.
external_identifier = Column(Unicode, unique=True, nullable=True)
__table_args__ = (
UniqueConstraint('patron_id', 'license_pool_id'),
)
def until(self, default_loan_period):
"""Give or estimate the time at which the loan will end."""
if self.end:
return self.end
if default_loan_period is None:
# This loan will last forever.
return None
start = self.start or datetime.datetime.utcnow()
return start + default_loan_period
class Hold(Base, LoanAndHoldMixin):
"""A patron is in line to check out a book.
"""
__tablename__ = 'holds'
id = Column(Integer, primary_key=True)
patron_id = Column(Integer, ForeignKey('patrons.id'), index=True)
integration_client_id = Column(Integer, ForeignKey('integrationclients.id'), index=True)
license_pool_id = Column(Integer, ForeignKey('licensepools.id'), index=True)
start = Column(DateTime, index=True)
end = Column(DateTime, index=True)
position = Column(Integer, index=True)
external_identifier = Column(Unicode, unique=True, nullable=True)
@classmethod
def _calculate_until(
self, start, queue_position, total_licenses, default_loan_period,
default_reservation_period):
"""Helper method for `Hold.until` that can be tested independently.
We have to wait for the available licenses to cycle a
certain number of times before we get a turn.
Example: 4 licenses, queue position 21
After 1 cycle: queue position 17
2 : queue position 13
3 : queue position 9
4 : queue position 5
5 : queue position 1
6 : available
The worst-case cycle time is the loan period plus the reservation
period.
"""
if queue_position == 0:
# The book is currently reserved to this patron--they need
# to hurry up and check it out.
return start + default_reservation_period
if total_licenses == 0:
# The book will never be available
return None
# If you are at the very front of the queue, the worst case
# time to get the book is is the time it takes for the person
# in front of you to get a reservation notification, borrow
# the book at the last minute, and keep the book for the
# maximum allowable time.
cycle_period = (default_reservation_period + default_loan_period)
# This will happen at least once.
cycles = 1
if queue_position <= total_licenses:
# But then the book will be available to you.
pass
else:
# This will happen more than once. After the first cycle,
# other people will be notified that it's their turn,
# they'll wait a while, get a reservation, and then keep
# the book for a while, and so on.
cycles += queue_position // total_licenses
if (total_licenses > 1 and queue_position % total_licenses == 0):
cycles -= 1
return start + (cycle_period * cycles)
def until(self, default_loan_period, default_reservation_period):
"""Give or estimate the time at which the book will be available
to this patron.
This is a *very* rough estimate that should be treated more or
less as a worst case. (Though it could be even worse than
this--the library's license might expire and then you'll
_never_ get the book.)
"""
if self.end and self.end > datetime.datetime.utcnow():
# The license source provided their own estimate, and it's
# not obviously wrong, so use it.
return self.end
if default_loan_period is None or default_reservation_period is None:
# This hold has no definite end date, because there's no known
# upper bound on how long someone in front of you can keep the
# book.
return None
start = datetime.datetime.utcnow()
licenses_available = self.license_pool.licenses_owned
position = self.position
if position is None:
# We don't know where in line we are. Assume we're at the
# end.
position = self.license_pool.patrons_in_hold_queue
return self._calculate_until(
start, position, licenses_available,
default_loan_period, default_reservation_period)
def update(self, start, end, position):
"""When the book becomes available, position will be 0 and end will be
set to the time at which point the patron will lose their place in
line.
Otherwise, end is irrelevant and is set to None.
"""
if start is not None:
self.start = start
if end is not None:
self.end = end
if position is not None:
self.position = position
__table_args__ = (
UniqueConstraint('patron_id', 'license_pool_id'),
)
class Annotation(Base):
# The Web Annotation Data Model defines a basic set of motivations.
# https://www.w3.org/TR/annotation-model/#motivation-and-purpose
OA_NAMESPACE = u"http://www.w3.org/ns/oa#"
# We need to define some terms of our own.
LS_NAMESPACE = u"http://librarysimplified.org/terms/annotation/"
IDLING = LS_NAMESPACE + u'idling'
BOOKMARKING = OA_NAMESPACE + u'bookmarking'
MOTIVATIONS = [
IDLING,
BOOKMARKING,
]
__tablename__ = 'annotations'
id = Column(Integer, primary_key=True)
patron_id = Column(Integer, ForeignKey('patrons.id'), index=True)
identifier_id = Column(Integer, ForeignKey('identifiers.id'), index=True)
motivation = Column(Unicode, index=True)
timestamp = Column(DateTime, index=True)
active = Column(Boolean, default=True)
content = Column(Unicode)
target = Column(Unicode)
@classmethod
def get_one_or_create(self, _db, patron, *args, **kwargs):
"""Find or create an Annotation, but only if the patron has
annotation sync turned on.
"""
if not patron.synchronize_annotations:
raise ValueError(
"Patron has opted out of synchronizing annotations."
)
return get_one_or_create(
_db, Annotation, patron=patron, *args, **kwargs
)
def set_inactive(self):
self.active = False
self.content = None
self.timestamp = datetime.datetime.utcnow()
class PatronProfileStorage(ProfileStorage):
"""Interface between a Patron object and the User Profile Management
Protocol.
"""
def __init__(self, patron, url_for=None):
"""Set up a storage interface for a specific Patron.
:param patron: We are accessing the profile for this patron.
"""
self.patron = patron
self.url_for = url_for
@property
def writable_setting_names(self):
"""Return the subset of settings that are considered writable."""
return set([self.SYNCHRONIZE_ANNOTATIONS])
@property
def profile_document(self):
"""Create a Profile document representing the patron's current
status.
"""
doc = dict()
patron = self.patron
doc[self.AUTHORIZATION_IDENTIFIER] = patron.authorization_identifier
if patron.authorization_expires:
doc[self.AUTHORIZATION_EXPIRES] = (
patron.authorization_expires.strftime("%Y-%m-%dT%H:%M:%SZ")
)
settings = {
self.SYNCHRONIZE_ANNOTATIONS :
patron.synchronize_annotations
}
doc[self.SETTINGS_KEY] = settings
return doc
def update(self, settable, full):
"""Bring the Patron's status up-to-date with the given document.
Right now this means making sure Patron.synchronize_annotations
is up to date.
"""
key = self.SYNCHRONIZE_ANNOTATIONS
if key in settable:
self.patron.synchronize_annotations = settable[key]
```
#### File: tests/models/test_cachedfeed.py
```python
import pytest
import datetime
from ...testing import DatabaseTest
from ...classifier import Classifier
from ...lane import (
Facets,
Pagination,
Lane,
WorkList,
)
from ...model.cachedfeed import CachedFeed
from ...model.configuration import ConfigurationSetting
from ...opds import AcquisitionFeed
from ...util.flask_util import OPDSFeedResponse
from ...util.opds_writer import OPDSFeed
class MockFeedGenerator(object):
def __init__(self):
self.calls = []
def __call__(self):
self.calls.append(object())
return b"This is feed #%d" % len(self.calls)
class TestCachedFeed(DatabaseTest):
def test_fetch(self):
# Verify that CachedFeed.fetch looks in the database for a
# matching CachedFeed
#
# If a new feed needs to be generated, this is done by calling
# a hook function, and the result is stored in the database.
work = self._work()
lane = self._lane()
class Mock(CachedFeed):
# Mock all of the helper methods, which are tested
# separately below.
@classmethod
def _prepare_keys(cls, *args):
cls._prepare_keys_called_with = args
return cls._keys
# _prepare_keys always returns this named tuple. Manipulate its
# members to test different bits of fetch().
_keys = CachedFeed.CachedFeedKeys(
feed_type="mock type",
library=self._default_library,
work=work,
lane_id=lane.id,
unique_key="unique key",
facets_key=u'facets',
pagination_key=u'pagination',
)
@classmethod
def max_cache_age(cls, *args):
cls.max_cache_age_called_with = args
return cls.MAX_CACHE_AGE
# max_cache_age always returns whatever value is stored here.
MAX_CACHE_AGE = 42
@classmethod
def _should_refresh(cls, *args):
cls._should_refresh_called_with = args
return cls.SHOULD_REFRESH
# _should_refresh always returns whatever value is stored here.
SHOULD_REFRESH = True
m = Mock.fetch
def clear_helpers():
"Clear out the records of calls made to helper methods."
Mock._prepare_keys_called_with = None
Mock.max_cache_age_called_with = None
Mock._should_refresh_called_with = None
clear_helpers()
# Define the hook function that is called whenever
# we need to generate a feed.
refresher = MockFeedGenerator()
# The first time we call fetch(), we end up with a CachedFeed.
worklist = object()
facets = object()
pagination = object()
max_age = object()
result1 = m(
self._db, worklist, facets, pagination, refresher, max_age,
raw=True
)
now = datetime.datetime.utcnow()
assert isinstance(result1, CachedFeed)
# The content of the CachedFeed comes from refresher(). It was
# converted to Unicode. (Verifying the unicode() call may seem
# like a small thing, but it means a refresher method can
# return an OPDSFeed object.)
assert u"This is feed #1" == result1.content
# The timestamp is recent.
timestamp1 = result1.timestamp
assert (now - timestamp1).total_seconds() < 2
# Relevant information from the named tuple returned by
# _prepare_keys made it into the CachedFeed.
k = Mock._keys
assert k.feed_type == result1.type
assert k.lane_id == result1.lane_id
assert k.unique_key == result1.unique_key
assert unicode(k.facets_key) == result1.facets
assert unicode(k.pagination_key) == result1.pagination
# Now let's verify that the helper methods were called with the
# right arguments.
# We called _prepare_keys with all the necessary information
# to create a named tuple.
assert (
(self._db, worklist, facets, pagination) ==
Mock._prepare_keys_called_with)
# We then called max_cache_age on the WorkList, the page
# type, and the max_age object passed in to fetch().
assert (
(worklist, "mock type", facets, max_age) ==
Mock.max_cache_age_called_with)
# Then we called _should_refresh with the feed retrieved from
# the database (which was None), and the return value of
# max_cache_age.
assert (
(None, 42) ==
Mock._should_refresh_called_with)
# Since _should_refresh is hard-coded to return True, we then
# called refresher() to generate a feed and created a new
# CachedFeed in the database.
# Now let's try the same thing again. This time, there's a
# CachedFeed already in the database, but our mocked
# _should_refresh() is hard-coded to always return True, so
# refresher() will be called again.
clear_helpers()
result2 = m(
self._db, worklist, facets, pagination, refresher, max_age,
raw=True
)
# The CachedFeed from before was reused.
assert result2 == result1
# But its .content has been updated.
assert "This is feed #2" == result2.content
timestamp2 = result2.timestamp
assert timestamp2 > timestamp1
# Since there was a matching CachedFeed in the database
# already, that CachedFeed was passed into _should_refresh --
# previously this value was None.
assert (
(result1, 42) ==
Mock._should_refresh_called_with)
# Now try the scenario where the feed does not need to be refreshed.
clear_helpers()
Mock.SHOULD_REFRESH = False
result3 = m(
self._db, worklist, facets, pagination, refresher, max_age,
raw=True
)
# Not only do we have the same CachedFeed as before, but its
# timestamp and content are unchanged.
assert result3 == result2
assert "This is feed #2" == result3.content
assert timestamp2 == result3.timestamp
# If max_age ends up zero, we don't check for the existence of a
# cached feed before forging ahead.
Mock.MAX_CACHE_AGE = 0
clear_helpers()
m(
self._db, worklist, facets, pagination, refresher, max_age,
raw=True
)
# A matching CachedFeed exists in the database, but we didn't
# even look for it, because we knew we'd be looking it up
# again after feed generation.
assert (
(None, 0) ==
Mock._should_refresh_called_with)
def test_no_race_conditions(self):
# Why do we look up a CachedFeed again after feed generation?
# Well, let's see what happens if someone else messes around
# with the CachedFeed object _while the refresher is running_.
#
# This is a race condition that happens in real life. Rather
# than setting up a multi-threaded test, we can have the
# refresher itself simulate a background modification by
# messing around with the CachedFeed object we know will
# eventually be returned.
#
# The most up-to-date feed always wins, so background
# modifications will take effect only if they made the
# CachedFeed look _newer_ than the foreground process does.
facets = Facets.default(self._default_library)
pagination = Pagination.default()
wl = WorkList()
wl.initialize(self._default_library)
m = CachedFeed.fetch
# In this case, two simulated threads try to create the same
# CachedFeed at the same time. We end up with a single
# CachedFeed containing the result of the last code that ran.
def simultaneous_refresher():
# This refresher method simulates another thread creating
# a CachedFeed for this feed while this thread's
# refresher is running.
def other_thread_refresher():
return "Another thread made a feed."
m(
self._db, wl, facets, pagination, other_thread_refresher, 0,
raw=True
)
return "Then this thread made a feed."
# This will call simultaneous_refresher(), which will call
# CachedFeed.fetch() _again_, which will call
# other_thread_refresher().
result = m(
self._db, wl, facets, pagination, simultaneous_refresher, 0,
raw=True
)
# We ended up with a single CachedFeed containing the
# latest information.
assert [result] == self._db.query(CachedFeed).all()
assert "Then this thread made a feed." == result.content
# If two threads contend for an existing CachedFeed, the one that
# sets CachedFeed.timestamp to the later value wins.
#
# Here, the other thread wins by setting .timestamp on the
# existing CachedFeed to a date in the future.
now = datetime.datetime.utcnow()
tomorrow = now + datetime.timedelta(days=1)
yesterday = now - datetime.timedelta(days=1)
def tomorrow_vs_now():
result.content = "Someone in the background set tomorrow's content."
result.timestamp = tomorrow
return "Today's content can't compete."
tomorrow_result = m(
self._db, wl, facets, pagination, tomorrow_vs_now, 0, raw=True
)
assert tomorrow_result == result
assert ("Someone in the background set tomorrow's content." ==
tomorrow_result.content)
assert tomorrow_result.timestamp == tomorrow
# Here, the other thread sets .timestamp to a date in the past, and
# it loses out to the (apparently) newer feed.
def yesterday_vs_now():
result.content = "Someone in the background set yesterday's content."
result.timestamp = yesterday
return "Today's content is fresher."
now_result = m(
self._db, wl, facets, pagination, yesterday_vs_now, 0, raw=True
)
# We got the same CachedFeed we've been getting this whole
# time, but the outdated data set by the 'background thread'
# has been fixed.
assert result == now_result
assert "Today's content is fresher." == result.content
assert result.timestamp > yesterday
# This shouldn't happen, but if the CachedFeed's timestamp or
# content are *cleared out* in the background, between the
# time the CacheFeed is fetched and the time the refresher
# finishes, then we don't know what's going on and we don't
# take chances. We create a whole new CachedFeed object for
# the updated version of the feed.
# First, try the situation where .timestamp is cleared out in
# the background.
def timestamp_cleared_in_background():
result.content = "Someone else sets content and clears timestamp."
result.timestamp = None
return "Non-weird content."
result2 = m(
self._db, wl, facets, pagination, timestamp_cleared_in_background,
0, raw=True
)
now = datetime.datetime.utcnow()
# result2 is a brand new CachedFeed.
assert result2 != result
assert "Non-weird content." == result2.content
assert (now - result2.timestamp).total_seconds() < 2
# We let the background process do whatever it wants to do
# with the old one.
assert "Someone else sets content and clears timestamp." == result.content
assert None == result.timestamp
# Next, test the situation where .content is cleared out.
def content_cleared_in_background():
result2.content = None
result2.timestamp = tomorrow
return "Non-weird content."
result3 = m(
self._db, wl, facets, pagination, content_cleared_in_background, 0,
raw=True
)
now = datetime.datetime.utcnow()
# Again, a brand new CachedFeed.
assert result3 != result2
assert result3 != result
assert "Non-weird content." == result3.content
assert (now - result3.timestamp).total_seconds() < 2
# Again, we let the background process have the old one for
# whatever weird thing it wants to do.
assert None == result2.content
assert tomorrow == result2.timestamp
def test_response_format(self):
# Verify that fetch() can be told to return an appropriate
# OPDSFeedResponse object. This is the default behavior, since
# it preserves some useful information that would otherwise be
# lost.
facets = Facets.default(self._default_library)
pagination = Pagination.default()
wl = WorkList()
wl.initialize(self._default_library)
def refresh():
return "Here's a feed."
private=object()
r = CachedFeed.fetch(
self._db, wl, facets, pagination, refresh, max_age=102,
private=private
)
assert isinstance(r, OPDSFeedResponse)
assert 200 == r.status_code
assert OPDSFeed.ACQUISITION_FEED_TYPE == r.content_type
assert 102 == r.max_age
assert "Here's a feed." == r.data
# The extra argument `private`, not used by CachedFeed.fetch, was
# passed on to the OPDSFeedResponse constructor.
assert private == r.private
# The CachedFeed was created; just not returned.
cf = self._db.query(CachedFeed).one()
assert "Here's a feed." == cf.content
# Try it again as a cache hit.
r = CachedFeed.fetch(
self._db, wl, facets, pagination, refresh, max_age=102,
private=private
)
assert isinstance(r, OPDSFeedResponse)
assert 200 == r.status_code
assert OPDSFeed.ACQUISITION_FEED_TYPE == r.content_type
assert 102 == r.max_age
assert "Here's a feed." == r.data
# If we tell CachedFeed to cache its feed 'forever', that only
# applies to the _database_ cache. The client is told to cache
# the feed for the default period.
r = CachedFeed.fetch(
self._db, wl, facets, pagination, refresh,
max_age=CachedFeed.CACHE_FOREVER, private=private
)
assert isinstance(r, OPDSFeedResponse)
assert OPDSFeed.DEFAULT_MAX_AGE == r.max_age
# Tests of helper methods.
def test_feed_type(self):
# Verify that a WorkList or a Facets object can determine the
# value to be stored in CachedFeed.type, with Facets taking
# priority.
class DontCare(object):
CACHED_FEED_TYPE = None
class WorkList(object):
CACHED_FEED_TYPE = "from worklist"
class Facets(object):
CACHED_FEED_TYPE = "from facets"
m = CachedFeed.feed_type
# The default type is PAGE_TYPE.
assert CachedFeed.PAGE_TYPE == m(None, None)
assert CachedFeed.PAGE_TYPE == m(DontCare, DontCare)
# If `worklist` has an opinion and `facets` doesn't, we use that.
assert "from worklist" == m(WorkList, None)
assert "from worklist" == m(WorkList, DontCare)
# If `facets` has an opinion`, it is always used.
assert "from facets" == m(DontCare, Facets)
assert "from facets" == m(None, Facets)
assert "from facets" == m(WorkList, Facets)
def test_max_cache_age(self):
m = CachedFeed.max_cache_age
# If override is provided, that value is always used.
assert 60 == m(None, None, None, 60)
assert 60 == m(None, None, None, datetime.timedelta(minutes=1))
# Otherwise, the faceting object gets a chance to weigh in.
class MockFacets(object):
max_cache_age = 22
facets = MockFacets()
assert 22 == m(None, "feed type", facets=facets)
# If there is no override and the faceting object doesn't
# care, CachedFeed.max_cache_age depends on
# WorkList.max_cache_age. This method can return a few
# different data types.
class MockWorklist(object):
def max_cache_age(self, type):
return dict(
number=1,
timedelta=datetime.timedelta(seconds=2),
expensive=CachedFeed.CACHE_FOREVER,
dont_cache=None,
)[type]
# The result is always either a number of seconds or
# CACHE_FOREVER.
wl = MockWorklist()
assert 1 == m(wl, "number", None)
assert 2 == m(wl, "timedelta", None)
assert 0 == m(wl, "dont_cache", None)
assert CachedFeed.CACHE_FOREVER == m(wl, "expensive", None)
# The faceting object still takes precedence, assuming it has
# an opinion.
facets.max_cache_age = None
assert CachedFeed.CACHE_FOREVER == m(wl, "expensive", facets)
facets.max_cache_age = 22
assert 22 == m(wl, "expensive", facets)
# And an override takes precedence over that.
assert 60 == m(wl, "expensive", facets, 60)
def test__prepare_keys(self):
# Verify the method that turns WorkList, Facets, and Pagination
# into a unique set of values for CachedFeed fields.
# First, prepare some mock classes.
class MockCachedFeed(CachedFeed):
feed_type_called_with = None
@classmethod
def feed_type(cls, worklist, facets):
cls.feed_type_called_with = (worklist, facets)
return "mock type"
class MockFacets(object):
query_string = b"facets query string"
class MockPagination(object):
query_string = b"pagination query string"
m = MockCachedFeed._prepare_keys
# A WorkList of some kind is required.
with pytest.raises(ValueError) as excinfo:
m(self._db, None, MockFacets, MockPagination)
assert "Cannot prepare a CachedFeed without a WorkList." in str(excinfo.value)
# Basic Lane case, no facets or pagination.
lane = self._lane()
# The response object is a named tuple. feed_type, library and
# lane_id are the only members set.
keys = m(self._db, lane, None, None)
assert "mock type" == keys.feed_type
assert lane.library == keys.library
assert None == keys.work
assert lane.id == keys.lane_id
assert None == keys.unique_key
assert u'' == keys.facets_key
assert u'' == keys.pagination_key
# When pagination and/or facets are available, facets_key and
# pagination_key are set appropriately.
keys = m(self._db, lane, MockFacets, MockPagination)
assert u"facets query string" == keys.facets_key
assert u"pagination query string" == keys.pagination_key
# Now we can check that feed_type was obtained by passing
# `worklist` and `facets` into MockCachedFeed.feed_type.
assert "mock type" == keys.feed_type
assert (lane, MockFacets) == MockCachedFeed.feed_type_called_with
# When a WorkList is used instead of a Lane, keys.lane_id is None
# but keys.unique_key is set to worklist.unique_key.
worklist = WorkList()
worklist.initialize(
library=self._default_library, display_name="wl",
languages=["eng", "spa"], audiences=[Classifier.AUDIENCE_CHILDREN]
)
keys = m(self._db, worklist, None, None)
assert "mock type" == keys.feed_type
assert worklist.get_library(self._db) == keys.library
assert None == keys.work
assert None == keys.lane_id
assert "wl-eng,spa-Children" == keys.unique_key
assert keys.unique_key == worklist.unique_key
assert u'' == keys.facets_key
assert u'' == keys.pagination_key
# When a WorkList is associated with a specific .work,
# that information is included as keys.work.
work = object()
worklist.work = work
keys = m(self._db, worklist, None, None)
assert work == keys.work
def test__should_refresh(self):
# Test the algorithm that tells whether a CachedFeed is stale.
m = CachedFeed._should_refresh
# If there's no CachedFeed, we must always refresh.
assert True == m(None, object())
class MockCachedFeed(object):
def __init__(self, timestamp):
self.timestamp = timestamp
now = datetime.datetime.utcnow()
# This feed was generated five minutes ago.
five_minutes_old = MockCachedFeed(
now - datetime.timedelta(minutes=5)
)
# This feed was generated a thousand years ago.
ancient = MockCachedFeed(
now - datetime.timedelta(days=1000*365)
)
# If we intend to cache forever, then even a thousand-year-old
# feed shouldn't be refreshed.
assert False == m(ancient, CachedFeed.CACHE_FOREVER)
# Otherwise, it comes down to a date comparison.
# If we're caching a feed for ten minutes, then the
# five-minute-old feed should not be refreshed.
assert False == m(five_minutes_old, 600)
# If we're caching a feed for only a few seconds (or not at all),
# then the five-minute-old feed should be refreshed.
assert True == m(five_minutes_old, 0)
assert True == m(five_minutes_old, 1)
# Realistic end-to-end tests.
def test_lifecycle_with_lane(self):
facets = Facets.default(self._default_library)
pagination = Pagination.default()
lane = self._lane(u"My Lane", languages=['eng','chi'])
# Fetch a cached feed from the database. It comes out updated.
refresher = MockFeedGenerator()
args = (self._db, lane, facets, pagination, refresher)
feed = CachedFeed.fetch(*args, max_age=0, raw=True)
assert "This is feed #1" == feed.content
assert pagination.query_string == feed.pagination
assert facets.query_string == feed.facets
assert lane.id == feed.lane_id
# Fetch it again, with a high max_age, and it's cached!
feed = CachedFeed.fetch(*args, max_age=1000, raw=True)
assert "This is feed #1" == feed.content
# Fetch it with a low max_age, and it gets updated again.
feed = CachedFeed.fetch(*args, max_age=0, raw=True)
assert "This is feed #2" == feed.content
# The special constant CACHE_FOREVER means it's always cached.
feed = CachedFeed.fetch(*args, max_age=CachedFeed.CACHE_FOREVER, raw=True)
assert "This is feed #2" == feed.content
def test_lifecycle_with_worklist(self):
facets = Facets.default(self._default_library)
pagination = Pagination.default()
lane = WorkList()
lane.initialize(self._default_library)
# Fetch a cached feed from the database. It comes out updated.
refresher = MockFeedGenerator()
args = (self._db, lane, facets, pagination, refresher)
feed = CachedFeed.fetch(*args, max_age=0, raw=True)
assert "This is feed #1" == feed.content
assert pagination.query_string == feed.pagination
assert facets.query_string == feed.facets
assert None == feed.lane_id
assert lane.unique_key == feed.unique_key
# Fetch it again, with a high max_age, and it's cached!
feed = CachedFeed.fetch(*args, max_age=1000, raw=True)
assert "This is feed #1" == feed.content
# Fetch it with a low max_age, and it gets updated again.
feed = CachedFeed.fetch(*args, max_age=0, raw=True)
assert "This is feed #2" == feed.content
# The special constant CACHE_FOREVER means it's always cached.
feed = CachedFeed.fetch(
*args, max_age=CachedFeed.CACHE_FOREVER, raw=True
)
assert "This is feed #2" == feed.content
```
#### File: tests/models/test_circulationevent.py
```python
import pytest
import datetime
from sqlalchemy.exc import IntegrityError
from ...testing import DatabaseTest
from ...model import (
create,
get_one_or_create
)
from ...model.circulationevent import CirculationEvent
from ...model.datasource import DataSource
from ...model.identifier import Identifier
from ...model.licensing import LicensePool
class TestCirculationEvent(DatabaseTest):
def _event_data(self, **kwargs):
for k, default in (
("source", DataSource.OVERDRIVE),
("id_type", Identifier.OVERDRIVE_ID),
("start", datetime.datetime.utcnow()),
("type", CirculationEvent.DISTRIBUTOR_LICENSE_ADD),
):
kwargs.setdefault(k, default)
if 'old_value' in kwargs and 'new_value' in kwargs:
kwargs['delta'] = kwargs['new_value'] - kwargs['old_value']
return kwargs
def _get_datetime(self, data, key):
date = data.get(key, None)
if not date:
return None
elif isinstance(date, datetime.date):
return date
else:
return datetime.datetime.strptime(date, CirculationEvent.TIME_FORMAT)
def _get_int(self, data, key):
value = data.get(key, None)
if not value:
return value
else:
return int(value)
def from_dict(self, data):
_db = self._db
# Identify the source of the event.
source_name = data['source']
source = DataSource.lookup(_db, source_name)
# Identify which LicensePool the event is talking about.
foreign_id = data['id']
identifier_type = source.primary_identifier_type
collection = data['collection']
license_pool, was_new = LicensePool.for_foreign_id(
_db, source, identifier_type, foreign_id, collection=collection
)
# Finally, gather some information about the event itself.
type = data.get("type")
start = self._get_datetime(data, 'start')
end = self._get_datetime(data, 'end')
old_value = self._get_int(data, 'old_value')
new_value = self._get_int(data, 'new_value')
delta = self._get_int(data, 'delta')
event, was_new = get_one_or_create(
_db, CirculationEvent, license_pool=license_pool,
type=type, start=start,
create_method_kwargs=dict(
old_value=old_value,
new_value=new_value,
delta=delta,
end=end)
)
return event, was_new
def test_new_title(self):
# Here's a new title.
collection = self._collection()
data = self._event_data(
source=DataSource.OVERDRIVE,
id="{1-2-3}",
type=CirculationEvent.DISTRIBUTOR_LICENSE_ADD,
collection=collection,
old_value=0,
delta=2,
new_value=2,
)
# Turn it into an event and see what happens.
event, ignore = self.from_dict(data)
# The event is associated with the correct data source.
assert DataSource.OVERDRIVE == event.license_pool.data_source.name
# The event identifies a work by its ID plus the data source's
# primary identifier and its collection.
assert Identifier.OVERDRIVE_ID == event.license_pool.identifier.type
assert "{1-2-3}" == event.license_pool.identifier.identifier
assert collection == event.license_pool.collection
# The number of licenses has not been set to the new value.
# The creator of a circulation event is responsible for also
# updating the dataset.
assert 0 == event.license_pool.licenses_owned
def test_log(self):
# Basic test of CirculationEvent.log.
pool = self._licensepool(edition=None)
library = self._default_library
event_name = CirculationEvent.DISTRIBUTOR_CHECKOUT
old_value = 10
new_value = 8
start = datetime.datetime(2019, 1, 1)
end = datetime.datetime(2019, 1, 2)
location = "Westgate Branch"
m = CirculationEvent.log
event, is_new = m(
self._db, license_pool=pool, event_name=event_name,
library=library, old_value=old_value, new_value=new_value,
start=start, end=end, location=location
)
assert True == is_new
assert pool == event.license_pool
assert library == event.library
assert -2 == event.delta # calculated from old_value and new_value
assert start == event.start
assert end == event.end
assert location == event.location
# If log finds another event with the same license pool,
# library, event name, and start date, that event is returned
# unchanged.
event, is_new = m(
self._db, license_pool=pool, event_name=event_name,
library=library, start=start,
# These values will be ignored.
old_value=500, new_value=200,
end=datetime.datetime.utcnow(),
location="another location"
)
assert False == is_new
assert pool == event.license_pool
assert library == event.library
assert -2 == event.delta
assert start == event.start
assert end == event.end
assert location == event.location
# If no timestamp is provided, the current time is used. This
# is the most common case, so basically a new event will be
# created each time you call log().
event, is_new = m(
self._db, license_pool=pool, event_name=event_name,
library=library, old_value=old_value, new_value=new_value,
end=end, location=location
)
assert (datetime.datetime.utcnow() - event.start).total_seconds() < 2
assert True == is_new
assert pool == event.license_pool
assert library == event.library
assert -2 == event.delta
assert end == event.end
assert location == event.location
def test_uniqueness_constraints_no_library(self):
# If library is null, then license_pool + type + start must be
# unique.
pool = self._licensepool(edition=None)
now = datetime.datetime.utcnow()
kwargs = dict(
license_pool=pool, type=CirculationEvent.DISTRIBUTOR_TITLE_ADD,
)
event = create(self._db, CirculationEvent, start=now, **kwargs)
# Different timestamp -- no problem.
now2 = datetime.datetime.utcnow()
event2 = create(self._db, CirculationEvent, start=now2, **kwargs)
assert event != event2
# Reuse the timestamp and you get an IntegrityError which ruins the
# entire transaction.
pytest.raises(
IntegrityError, create, self._db, CirculationEvent, start=now,
**kwargs
)
self._db.rollback()
def test_uniqueness_constraints_with_library(self):
# If library is provided, then license_pool + library + type +
# start must be unique.
pool = self._licensepool(edition=None)
now = datetime.datetime.utcnow()
kwargs = dict(
license_pool=pool,
library=self._default_library,
type=CirculationEvent.DISTRIBUTOR_TITLE_ADD,
)
event = create(self._db, CirculationEvent, start=now, **kwargs)
# Different timestamp -- no problem.
now2 = datetime.datetime.utcnow()
event2 = create(self._db, CirculationEvent, start=now2, **kwargs)
assert event != event2
# Reuse the timestamp and you get an IntegrityError which ruins the
# entire transaction.
pytest.raises(
IntegrityError, create, self._db, CirculationEvent, start=now,
**kwargs
)
self._db.rollback()
```
#### File: tests/models/test_edition.py
```python
import datetime
from ...testing import DatabaseTest
from ...model import (
get_one_or_create,
PresentationCalculationPolicy,
)
from ...model.constants import MediaTypes
from ...model.coverage import CoverageRecord
from ...model.contributor import Contributor
from ...model.datasource import DataSource
from ...model.edition import Edition
from ...model.identifier import Identifier
from ...model.licensing import DeliveryMechanism
from ...model.resource import (
Hyperlink,
Representation,
)
class TestEdition(DatabaseTest):
def test_medium_from_media_type(self):
# Verify that we can guess a value for Edition.medium from a
# media type.
m = Edition.medium_from_media_type
for audio_type in MediaTypes.AUDIOBOOK_MEDIA_TYPES:
assert Edition.AUDIO_MEDIUM == m(audio_type)
assert Edition.AUDIO_MEDIUM == m(audio_type + ";param=value")
for book_type in MediaTypes.BOOK_MEDIA_TYPES:
assert Edition.BOOK_MEDIUM == m(book_type)
assert Edition.BOOK_MEDIUM == m(book_type + ";param=value")
assert Edition.BOOK_MEDIUM == m(DeliveryMechanism.ADOBE_DRM)
def test_license_pools(self):
# Here are two collections that provide access to the same book.
c1 = self._collection()
c2 = self._collection()
edition, lp1 = self._edition(with_license_pool=True)
lp2 = self._licensepool(edition=edition, collection=c2)
# Two LicensePools for the same work.
assert lp1.identifier == lp2.identifier
# Edition.license_pools contains both.
assert set([lp1, lp2]) == set(edition.license_pools)
def test_author_contributors(self):
data_source = DataSource.lookup(self._db, DataSource.GUTENBERG)
id = self._str
type = Identifier.GUTENBERG_ID
edition, was_new = Edition.for_foreign_id(
self._db, data_source, type, id
)
# We've listed the same person as primary author and author.
[alice], ignore = Contributor.lookup(self._db, "<NAME>")
edition.add_contributor(
alice, [Contributor.AUTHOR_ROLE, Contributor.PRIMARY_AUTHOR_ROLE]
)
# We've listed a different person as illustrator.
[bob], ignore = Contributor.lookup(self._db, "Bitshifter, Bob")
edition.add_contributor(bob, [Contributor.ILLUSTRATOR_ROLE])
# Both contributors show up in .contributors.
assert set([alice, bob]) == edition.contributors
# Only the author shows up in .author_contributors, and she
# only shows up once.
assert [alice] == edition.author_contributors
def test_for_foreign_id(self):
"""Verify we can get a data source's view of a foreign id."""
data_source = DataSource.lookup(self._db, DataSource.GUTENBERG)
id = "549"
type = Identifier.GUTENBERG_ID
record, was_new = Edition.for_foreign_id(
self._db, data_source, type, id)
assert data_source == record.data_source
identifier = record.primary_identifier
assert id == identifier.identifier
assert type == identifier.type
assert True == was_new
assert [identifier] == record.equivalent_identifiers()
# We can get the same work record by providing only the name
# of the data source.
record, was_new = Edition.for_foreign_id(
self._db, DataSource.GUTENBERG, type, id)
assert data_source == record.data_source
assert identifier == record.primary_identifier
assert False == was_new
def test_missing_coverage_from(self):
gutenberg = DataSource.lookup(self._db, DataSource.GUTENBERG)
oclc = DataSource.lookup(self._db, DataSource.OCLC)
web = DataSource.lookup(self._db, DataSource.WEB)
# Here are two Gutenberg records.
g1, ignore = Edition.for_foreign_id(
self._db, gutenberg, Identifier.GUTENBERG_ID, "1")
g2, ignore = Edition.for_foreign_id(
self._db, gutenberg, Identifier.GUTENBERG_ID, "2")
# One of them has coverage from OCLC Classify
c1 = self._coverage_record(g1, oclc)
# The other has coverage from a specific operation on OCLC Classify
c2 = self._coverage_record(g2, oclc, "some operation")
# Here's a web record, just sitting there.
w, ignore = Edition.for_foreign_id(
self._db, web, Identifier.URI, "http://www.foo.com/")
# missing_coverage_from picks up the Gutenberg record with no
# coverage from OCLC. It doesn't pick up the other
# Gutenberg record, and it doesn't pick up the web record.
[in_gutenberg_but_not_in_oclc] = Edition.missing_coverage_from(
self._db, gutenberg, oclc).all()
assert g2 == in_gutenberg_but_not_in_oclc
# If we ask about a specific operation, we get the Gutenberg
# record that has coverage for that operation, but not the one
# that has generic OCLC coverage.
[has_generic_coverage_only] = Edition.missing_coverage_from(
self._db, gutenberg, oclc, "some operation").all()
assert g1 == has_generic_coverage_only
# We don't put web sites into OCLC, so this will pick up the
# web record (but not the Gutenberg record).
[in_web_but_not_in_oclc] = Edition.missing_coverage_from(
self._db, web, oclc).all()
assert w == in_web_but_not_in_oclc
# We don't use the web as a source of coverage, so this will
# return both Gutenberg records (but not the web record).
assert [g1.id, g2.id] == sorted([x.id for x in Edition.missing_coverage_from(
self._db, gutenberg, web)])
def test_sort_by_priority(self):
# Make editions created by the license source, the metadata
# wrangler, and library staff.
admin = self._edition(data_source_name=DataSource.LIBRARY_STAFF, with_license_pool=False)
od = self._edition(data_source_name=DataSource.OVERDRIVE, with_license_pool=False)
mw = self._edition(data_source_name=DataSource.METADATA_WRANGLER, with_license_pool=False)
# Create an invalid edition with no data source. (This shouldn't
# happen.)
no_data_source = self._edition(with_license_pool=False)
no_data_source.data_source = None
def ids(l):
return [x for x in l]
# The invalid edition is the lowest priority. The admin
# interface and metadata wrangler take precedence over any
# other data sources.
expect = [no_data_source, od, mw, admin]
actual = Edition.sort_by_priority(expect)
assert ids(expect) == ids(actual)
# If you specify which data source is associated with the
# license for the book, you will boost its priority above that
# of the metadata wrangler.
expect = [no_data_source, mw, od, admin]
actual = Edition.sort_by_priority(expect, od.data_source)
assert ids(expect) == ids(actual)
def test_equivalent_identifiers(self):
edition = self._edition()
identifier = self._identifier()
data_source = DataSource.lookup(self._db, DataSource.OCLC)
identifier.equivalent_to(data_source, edition.primary_identifier, 0.6)
policy = PresentationCalculationPolicy(
equivalent_identifier_threshold=0.5
)
assert (set([identifier, edition.primary_identifier]) ==
set(edition.equivalent_identifiers(policy=policy)))
policy.equivalent_identifier_threshold = 0.7
assert (set([edition.primary_identifier]) ==
set(edition.equivalent_identifiers(policy=policy)))
def test_recursive_edition_equivalence(self):
# Here's a Edition for a Project Gutenberg text.
gutenberg, gutenberg_pool = self._edition(
data_source_name=DataSource.GUTENBERG,
identifier_type=Identifier.GUTENBERG_ID,
identifier_id="1",
with_open_access_download=True,
title="Original Gutenberg text")
# Here's a Edition for an Open Library text.
open_library, open_library_pool = self._edition(
data_source_name=DataSource.OPEN_LIBRARY,
identifier_type=Identifier.OPEN_LIBRARY_ID,
identifier_id="W1111",
with_open_access_download=True,
title="Open Library record")
# We've learned from OCLC Classify that the Gutenberg text is
# equivalent to a certain OCLC Number. We've learned from OCLC
# Linked Data that the Open Library text is equivalent to the
# same OCLC Number.
oclc_classify = DataSource.lookup(self._db, DataSource.OCLC)
oclc_linked_data = DataSource.lookup(self._db, DataSource.OCLC_LINKED_DATA)
oclc_number, ignore = Identifier.for_foreign_id(
self._db, Identifier.OCLC_NUMBER, "22")
gutenberg.primary_identifier.equivalent_to(
oclc_classify, oclc_number, 1)
open_library.primary_identifier.equivalent_to(
oclc_linked_data, oclc_number, 1)
# Here's a Edition for a Recovering the Classics cover.
web_source = DataSource.lookup(self._db, DataSource.WEB)
recovering, ignore = Edition.for_foreign_id(
self._db, web_source, Identifier.URI,
"http://recoveringtheclassics.com/pride-and-prejudice.jpg")
recovering.title = "Recovering the Classics cover"
# We've manually associated that Edition's URI directly
# with the Project Gutenberg text.
manual = DataSource.lookup(self._db, DataSource.MANUAL)
gutenberg.primary_identifier.equivalent_to(
manual, recovering.primary_identifier, 1)
# Finally, here's a completely unrelated Edition, which
# will not be showing up.
gutenberg2, gutenberg2_pool = self._edition(
data_source_name=DataSource.GUTENBERG,
identifier_type=Identifier.GUTENBERG_ID,
identifier_id="2",
with_open_access_download=True,
title="Unrelated Gutenberg record.")
# When we call equivalent_editions on the Project Gutenberg
# Edition, we get three Editions: the Gutenberg record
# itself, the Open Library record, and the Recovering the
# Classics record.
#
# We get the Open Library record because it's associated with
# the same OCLC Number as the Gutenberg record. We get the
# Recovering the Classics record because it's associated
# directly with the Gutenberg record.
results = list(gutenberg.equivalent_editions())
assert 3 == len(results)
assert gutenberg in results
assert open_library in results
assert recovering in results
# Here's a Work that incorporates one of the Gutenberg records.
work = self._work()
work.license_pools.extend([gutenberg2_pool])
# Its set-of-all-editions contains only one record.
assert 1 == work.all_editions().count()
# If we add the other Gutenberg record to it, then its
# set-of-all-editions is extended by that record, *plus*
# all the Editions equivalent to that record.
work.license_pools.extend([gutenberg_pool])
assert 4 == work.all_editions().count()
def test_calculate_presentation_title(self):
wr = self._edition(title="The Foo")
wr.calculate_presentation()
assert "Foo, The" == wr.sort_title
wr = self._edition(title="A Foo")
wr.calculate_presentation()
assert "Foo, A" == wr.sort_title
def test_calculate_presentation_missing_author(self):
wr = self._edition()
self._db.delete(wr.contributions[0])
self._db.commit()
wr.calculate_presentation()
assert u"[Unknown]" == wr.sort_author
assert u"[Unknown]" == wr.author
def test_calculate_presentation_author(self):
bob, ignore = self._contributor(sort_name="Bitshifter, Bob")
wr = self._edition(authors=bob.sort_name)
wr.calculate_presentation()
assert "<NAME>" == wr.author
assert "Bitshifter, Bob" == wr.sort_author
bob.display_name="<NAME>"
wr.calculate_presentation()
assert "<NAME>. Bitshifter" == wr.author
assert "Bitshifter, Bob" == wr.sort_author
kelly, ignore = self._contributor(sort_name="Accumulator, Kelly")
wr.add_contributor(kelly, Contributor.AUTHOR_ROLE)
wr.calculate_presentation()
assert "Kelly Accumulator, <NAME>" == wr.author
assert "Accumulator, Kelly ; Bitshifter, Bob" == wr.sort_author
def test_set_summary(self):
e, pool = self._edition(with_license_pool=True)
work = self._work(presentation_edition=e)
overdrive = DataSource.lookup(self._db, DataSource.OVERDRIVE)
# Set the work's summmary.
l1, new = pool.add_link(Hyperlink.DESCRIPTION, None, overdrive, "text/plain",
"F")
work.set_summary(l1.resource)
assert l1.resource == work.summary
assert "F" == work.summary_text
# Remove the summary.
work.set_summary(None)
assert None == work.summary
assert "" == work.summary_text
def test_calculate_evaluate_summary_quality_with_privileged_data_sources(self):
e, pool = self._edition(with_license_pool=True)
oclc = DataSource.lookup(self._db, DataSource.OCLC_LINKED_DATA)
overdrive = DataSource.lookup(self._db, DataSource.OVERDRIVE)
# There's a perfunctory description from Overdrive.
l1, new = pool.add_link(Hyperlink.SHORT_DESCRIPTION, None, overdrive, "text/plain",
"F")
overdrive_resource = l1.resource
# There's a much better description from OCLC Linked Data.
l2, new = pool.add_link(Hyperlink.DESCRIPTION, None, oclc, "text/plain",
"""Nothing about working with his former high school crush, <NAME>, is ideal. Still, if <NAME> intends to save his grandmother's bakery, he must. Good thing he has a lot of ideas he can't wait to implement. He never imagines Stephanie would have her own ideas for the business. Or that they would clash with his!""")
oclc_resource = l2.resource
# In a head-to-head evaluation, the OCLC Linked Data description wins.
ids = [e.primary_identifier.id]
champ1, resources = Identifier.evaluate_summary_quality(self._db, ids)
assert set([overdrive_resource, oclc_resource]) == set(resources)
assert oclc_resource == champ1
# But if we say that Overdrive is the privileged data source, it wins
# automatically. The other resource isn't even considered.
champ2, resources2 = Identifier.evaluate_summary_quality(
self._db, ids, [overdrive])
assert overdrive_resource == champ2
assert [overdrive_resource] == resources2
# If we say that some other data source is privileged, and
# there are no descriptions from that data source, a
# head-to-head evaluation is performed, and OCLC Linked Data
# wins.
threem = DataSource.lookup(self._db, DataSource.THREEM)
champ3, resources3 = Identifier.evaluate_summary_quality(
self._db, ids, [threem])
assert set([overdrive_resource, oclc_resource]) == set(resources3)
assert oclc_resource == champ3
# If there are two privileged data sources and there's no
# description from the first, the second is used.
champ4, resources4 = Identifier.evaluate_summary_quality(
self._db, ids, [threem, overdrive])
assert [overdrive_resource] == resources4
assert overdrive_resource == champ4
# Even an empty string wins if it's from the most privileged data source.
# This is not a silly example. The librarian may choose to set the description
# to an empty string in the admin inteface, to override a bad overdrive/etc. description.
staff = DataSource.lookup(self._db, DataSource.LIBRARY_STAFF)
l3, new = pool.add_link(Hyperlink.SHORT_DESCRIPTION, None, staff, "text/plain", "")
staff_resource = l3.resource
champ5, resources5 = Identifier.evaluate_summary_quality(
self._db, ids, [staff, overdrive])
assert [staff_resource] == resources5
assert staff_resource == champ5
def test_calculate_presentation_cover(self):
# Here's a cover image with a thumbnail.
representation, ignore = get_one_or_create(self._db, Representation, url="http://cover")
representation.media_type = Representation.JPEG_MEDIA_TYPE
representation.mirrored_at = datetime.datetime.now()
representation.mirror_url = "http://mirror/cover"
thumb, ignore = get_one_or_create(self._db, Representation, url="http://thumb")
thumb.media_type = Representation.JPEG_MEDIA_TYPE
thumb.mirrored_at = datetime.datetime.now()
thumb.mirror_url = "http://mirror/thumb"
thumb.thumbnail_of_id = representation.id
# Verify that a cover for the edition's primary identifier is used.
e, pool = self._edition(with_license_pool=True)
link, ignore = e.primary_identifier.add_link(Hyperlink.IMAGE, "http://cover", e.data_source)
link.resource.representation = representation
e.calculate_presentation()
assert "http://mirror/cover" == e.cover_full_url
assert "http://mirror/thumb" == e.cover_thumbnail_url
# Verify that a cover will be used even if it's some
# distance away along the identifier-equivalence line.
e, pool = self._edition(with_license_pool=True)
oclc_classify = DataSource.lookup(self._db, DataSource.OCLC)
oclc_number, ignore = Identifier.for_foreign_id(
self._db, Identifier.OCLC_NUMBER, "22")
e.primary_identifier.equivalent_to(
oclc_classify, oclc_number, 1)
link, ignore = oclc_number.add_link(Hyperlink.IMAGE, "http://cover", oclc_classify)
link.resource.representation = representation
e.calculate_presentation()
assert "http://mirror/cover" == e.cover_full_url
assert "http://mirror/thumb" == e.cover_thumbnail_url
# Verify that a nearby cover takes precedence over a
# faraway cover.
link, ignore = e.primary_identifier.add_link(Hyperlink.IMAGE, "http://nearby-cover", e.data_source)
nearby, ignore = get_one_or_create(self._db, Representation, url=link.resource.url)
nearby.media_type = Representation.JPEG_MEDIA_TYPE
nearby.mirrored_at = datetime.datetime.now()
nearby.mirror_url = "http://mirror/nearby-cover"
link.resource.representation = nearby
nearby_thumb, ignore = get_one_or_create(self._db, Representation, url="http://nearby-thumb")
nearby_thumb.media_type = Representation.JPEG_MEDIA_TYPE
nearby_thumb.mirrored_at = datetime.datetime.now()
nearby_thumb.mirror_url = "http://mirror/nearby-thumb"
nearby_thumb.thumbnail_of_id = nearby.id
e.calculate_presentation()
assert "http://mirror/nearby-cover" == e.cover_full_url
assert "http://mirror/nearby-thumb" == e.cover_thumbnail_url
# Verify that a thumbnail is used even if there's
# no full-sized cover.
e, pool = self._edition(with_license_pool=True)
link, ignore = e.primary_identifier.add_link(Hyperlink.THUMBNAIL_IMAGE, "http://thumb", e.data_source)
link.resource.representation = thumb
e.calculate_presentation()
assert None == e.cover_full_url
assert "http://mirror/thumb" == e.cover_thumbnail_url
def test_calculate_presentation_registers_coverage_records(self):
edition = self._edition()
identifier = edition.primary_identifier
# This Identifier has no CoverageRecords.
assert [] == identifier.coverage_records
# But once we calculate the Edition's presentation...
edition.calculate_presentation()
# Two CoverageRecords have been associated with this Identifier.
records = identifier.coverage_records
# One for setting the Edition metadata and one for choosing
# the Edition's cover.
expect = set([
CoverageRecord.SET_EDITION_METADATA_OPERATION,
CoverageRecord.CHOOSE_COVER_OPERATION]
)
assert expect == set([x.operation for x in records])
# We know the records are associated with this specific
# Edition, not just the Identifier, because each
# CoverageRecord's DataSource is set to this Edition's
# DataSource.
assert (
[edition.data_source, edition.data_source] ==
[x.data_source for x in records])
def test_no_permanent_work_id_for_edition_without_title_or_medium(self):
# An edition with no title or medium is not assigned a permanent work
# ID.
edition = self._edition()
assert None == edition.permanent_work_id
edition.title = ''
edition.calculate_permanent_work_id()
assert None == edition.permanent_work_id
edition.title = u'something'
edition.calculate_permanent_work_id()
assert None != edition.permanent_work_id
edition.medium = None
edition.calculate_permanent_work_id()
assert None == edition.permanent_work_id
def test_choose_cover_can_choose_full_image_and_thumbnail_separately(self):
edition = self._edition()
# This edition has a full-sized image and a thumbnail image,
# but there is no evidence that they are the _same_ image.
main_image, ignore = edition.primary_identifier.add_link(
Hyperlink.IMAGE, "http://main/",
edition.data_source, Representation.PNG_MEDIA_TYPE
)
thumbnail_image, ignore = edition.primary_identifier.add_link(
Hyperlink.THUMBNAIL_IMAGE, "http://thumbnail/",
edition.data_source, Representation.PNG_MEDIA_TYPE
)
# Nonetheless, Edition.choose_cover() will assign the
# potentially unrelated images to the Edition, because there
# is no better option.
edition.choose_cover()
assert main_image.resource.url == edition.cover_full_url
assert thumbnail_image.resource.url == edition.cover_thumbnail_url
# If there is a clear indication that one of the thumbnails
# associated with the identifier is a thumbnail _of_ the
# full-sized image...
thumbnail_2, ignore = edition.primary_identifier.add_link(
Hyperlink.THUMBNAIL_IMAGE, "http://thumbnail2/",
edition.data_source, Representation.PNG_MEDIA_TYPE
)
thumbnail_2.resource.representation.thumbnail_of = main_image.resource.representation
edition.choose_cover()
# ...That thumbnail will be chosen in preference to the
# possibly unrelated thumbnail.
assert main_image.resource.url == edition.cover_full_url
assert thumbnail_2.resource.url == edition.cover_thumbnail_url
```
#### File: simplified-server-core/tests/test_external_list.py
```python
import datetime
from ..testing import (
DatabaseTest,
DummyMetadataClient,
)
from ..model import (
DataSource,
Edition,
Identifier,
Subject,
)
from ..external_list import (
CustomListFromCSV,
MembershipManager,
ClassificationBasedMembershipManager,
)
class TestCustomListFromCSV(DatabaseTest):
def setup_method(self):
super(TestCustomListFromCSV, self).setup_method()
self.data_source = DataSource.lookup(self._db, DataSource.LIBRARY_STAFF)
self.metadata = DummyMetadataClient()
self.metadata.lookups['Octavia Butler'] = 'Butler, Octavia'
self.l = CustomListFromCSV(self.data_source.name, "Test list",
metadata_client = self.metadata,
display_author_field='author',
identifier_fields={Identifier.ISBN: "isbn"})
self.custom_list, ignore = self._customlist(
data_source_name=self.data_source.name, num_entries=0)
self.now = datetime.datetime.utcnow()
DATE_FORMAT = "%Y/%m/%d %H:%M:%S"
def create_row(self, display_author=None, sort_author=None):
"""Create a dummy row for this tests's custom list."""
l = self.l
row = dict()
for scalarkey in (l.title_field, l.annotation_field,
l.annotation_author_name_field,
l.annotation_author_affiliation_field):
row[scalarkey] = self._str
display_author = display_author or self._str
fn = l.sort_author_field
if isinstance(fn, list):
fn = fn[0]
row[fn] = sort_author
row['isbn'] = self._isbn
for key in l.subject_fields.keys():
row[key] = ", ".join([self._str, self._str])
for timekey in (l.first_appearance_field,
l.published_field):
if isinstance(timekey, list):
timekey = timekey[0]
row[timekey] = self._time.strftime(self.DATE_FORMAT)
row[self.l.display_author_field] = display_author
return row
def test_annotation_citation(self):
m = self.l.annotation_citation
row = dict()
assert None == m(row)
row[self.l.annotation_author_name_field] = "Alice"
assert u" —Alice" == m(row)
row[self.l.annotation_author_affiliation_field] = "2nd Street Branch"
assert u" —Alice, 2nd Street Branch" == m(row)
del row[self.l.annotation_author_name_field]
assert None == m(row)
def test_row_to_metadata_complete_success(self):
row = self.create_row()
metadata = self.l.row_to_metadata(row)
assert row[self.l.title_field] == metadata.title
assert row['author'] == metadata.contributors[0].display_name
assert row['isbn'] == metadata.identifiers[0].identifier
expect_pub = datetime.datetime.strptime(
row['published'], self.DATE_FORMAT)
assert expect_pub == metadata.published
assert self.l.default_language == metadata.language
def test_metadata_to_list_entry_complete_success(self):
row = self.create_row(display_author="<NAME>")
metadata = self.l.row_to_metadata(row)
list_entry = self.l.metadata_to_list_entry(
self.custom_list, self.data_source, self.now, metadata)
e = list_entry.edition
assert row[self.l.title_field] == e.title
assert "<NAME>" == e.author
assert "<NAME>" == e.sort_author
i = e.primary_identifier
assert Identifier.ISBN == i.type
assert row['isbn'] == i.identifier
# There should be one description.
expect = row[self.l.annotation_field] + self.l.annotation_citation(row)
assert expect == list_entry.annotation
classifications = i.classifications
# There should be six classifications, two of type 'tag', two
# of type 'schema:audience', and two of type
# 'schema:typicalAgeRange'
assert 6 == len(classifications)
tags = [x for x in classifications if x.subject.type==Subject.TAG]
assert 2 == len(tags)
audiences = [x for x in classifications
if x.subject.type==Subject.FREEFORM_AUDIENCE]
assert 2 == len(audiences)
age_ranges = [x for x in classifications
if x.subject.type==Subject.AGE_RANGE]
assert 2 == len(age_ranges)
expect_first = datetime.datetime.strptime(
row[self.l.first_appearance_field], self.DATE_FORMAT)
assert expect_first == list_entry.first_appearance
assert self.now == list_entry.most_recent_appearance
def test_row_to_item_matching_work_found(self):
row = self.create_row(display_author="<NAME>")
work = self._work(title=row[self.l.title_field],
authors=['<NAME>'])
self._db.commit()
metadata = self.l.row_to_metadata(row)
list_entry = self.l.metadata_to_list_entry(
self.custom_list, self.data_source, self.now, metadata)
e = list_entry.edition
assert row[self.l.title_field] == e.title
assert "<NAME>" == e.author
assert "<NAME>" == e.sort_author
def test_non_default_language(self):
row = self.create_row()
row[self.l.language_field] = 'Spanish'
metadata = self.l.row_to_metadata(row)
list_entry = self.l.metadata_to_list_entry(
self.custom_list, self.data_source, self.now, metadata)
assert 'spa' == list_entry.edition.language
def test_non_default_language(self):
row = self.create_row()
row[self.l.language_field] = 'Spanish'
metadata = self.l.row_to_metadata(row)
list_entry = self.l.metadata_to_list_entry(
self.custom_list, self.data_source, self.now, metadata)
assert 'spa' == list_entry.edition.language
def test_overwrite_old_data(self):
self.l.overwrite_old_data = True
row1 = self.create_row()
row2 = self.create_row()
row3 = self.create_row()
for f in self.l.title_field, self.l.sort_author_field, self.l.display_author_field, 'isbn':
row2[f] = row1[f]
row3[f] = row1[f]
metadata = self.l.row_to_metadata(row1)
list_entry_1 = self.l.metadata_to_list_entry(
self.custom_list, self.data_source, self.now, metadata)
# Import from the second row, and (e.g.) the new annotation
# will overwrite the old annotation.
metadata2 = self.l.row_to_metadata(row2)
list_entry_2 = self.l.metadata_to_list_entry(
self.custom_list, self.data_source, self.now, metadata2)
assert list_entry_1 == list_entry_2
assert list_entry_1.annotation == list_entry_2.annotation
# There are still six classifications.
i = list_entry_1.edition.primary_identifier
assert 6 == len(i.classifications)
# Now import from the third row, but with
# overwrite_old_data set to False.
self.l.overwrite_old_data = False
metadata3 = self.l.row_to_metadata(row3)
list_entry_3 = self.l.metadata_to_list_entry(
self.custom_list, self.data_source, self.now, metadata3)
assert list_entry_3 == list_entry_1
# Now there are 12 classifications.
assert 12 == len(i.classifications)
class BooksInSeries(MembershipManager):
"""A sample implementation of MembershipManager that makes a CustomList
out of all books that are in some series.
"""
@property
def new_membership(self):
"""Only books that are part of a series should be in this list."""
return self._db.query(Edition).filter(Edition.series != None)
class TestMembershipManager(DatabaseTest):
def test_update(self):
# Create two books that are part of series, and one book that
# is not.
series1 = self._edition()
series1.series = "Series 1"
series2 = self._edition()
series2.series = "Series Two"
no_series = self._edition()
assert None == no_series.series
update_time = datetime.datetime(2015, 1, 1)
# To create necessary mocked objects,
# _customlist calls _work
# which calls _edition, which makes an edition and a pool (through _licensepool)
# then makes work through get_one_or_create
custom_list, ignore = self._customlist()
manager = BooksInSeries(custom_list)
manager.update(update_time)
[entry1] = [x for x in custom_list.entries if x.edition.series == "Series 1"]
[entry2] = [x for x in custom_list.entries if x.edition.series == "Series Two"]
assert update_time == entry1.first_appearance
assert update_time == entry1.most_recent_appearance
# In a shocking twist, one of the entries turns out not to
# have a series, while the entry previously thought not to
# have a series actually does.
series2.series = None
no_series.series = "Actually I do have a series."
self._db.commit()
new_update_time = datetime.datetime(2016, 1,1)
manager.update(new_update_time)
# Entry #2 has been removed from the list, and a new entry added.
[old_entry] = [x for x in custom_list.entries if x.edition.series == "Series 1"]
[new_entry] = [x for x in custom_list.entries if x.edition.series == "Actually I do have a series."]
assert update_time == old_entry.first_appearance
assert new_update_time == old_entry.most_recent_appearance
assert new_update_time == new_entry.first_appearance
assert new_update_time == new_entry.most_recent_appearance
def test_classification_based_membership_manager(self):
e1 = self._edition()
e2 = self._edition()
e3 = self._edition()
source = e1.data_source
e1.primary_identifier.classify(source, Subject.TAG, "GOOD FOOD")
e2.primary_identifier.classify(source, Subject.TAG, "barflies")
e3.primary_identifier.classify(source, Subject.TAG, "irrelevant")
custom_list, ignore = self._customlist()
fragments = ["foo", "bar"]
manager = ClassificationBasedMembershipManager(custom_list, fragments)
members = list(manager.new_membership)
assert 2 == len(members)
# e1 is a member of the list because its primary identifier is
# classified under a subject that matches %foo%.
#
# e2 is a member of the list because its primary identifier is
# classified under a subject that matches %bar%.
#
# e3 is not a member of the list.
assert e1 in members
assert e2 in members
```
#### File: simplified-server-core/tests/test_external_search.py
```python
import pytest
from collections import defaultdict
import datetime
import json
import logging
import re
import time
from psycopg2.extras import NumericRange
from ..testing import (
DatabaseTest,
)
from elasticsearch_dsl import Q
from elasticsearch_dsl.function import (
ScriptScore,
RandomScore,
)
from elasticsearch_dsl.query import (
Bool,
DisMax,
Query as elasticsearch_dsl_query,
MatchAll,
Match,
MatchNone,
MatchPhrase,
MultiMatch,
Nested,
Range,
Term,
Terms,
)
from elasticsearch.exceptions import ElasticsearchException
from ..config import (
Configuration,
CannotLoadConfiguration,
)
from ..lane import (
Facets,
FeaturedFacets,
Lane,
Pagination,
SearchFacets,
WorkList,
)
from ..metadata_layer import (
ContributorData,
IdentifierData,
)
from ..model import (
ConfigurationSetting,
Contribution,
Contributor,
DataSource,
Edition,
ExternalIntegration,
Genre,
Work,
WorkCoverageRecord,
get_one_or_create,
)
from ..external_search import (
CurrentMapping,
ExternalSearchIndex,
Filter,
Mapping,
MockExternalSearchIndex,
MockSearchResult,
Query,
QueryParser,
SearchBase,
SearchIndexCoverageProvider,
SortKeyPagination,
WorkSearchResult,
mock_search_index,
)
from ..classifier import Classifier
from ..problem_details import INVALID_INPUT
from ..testing import (
ExternalSearchTest,
EndToEndSearchTest,
)
RESEARCH = Term(audience=Classifier.AUDIENCE_RESEARCH.lower())
class TestExternalSearch(ExternalSearchTest):
def test_load(self):
# Normally, load() returns a brand new ExternalSearchIndex
# object.
loaded = ExternalSearchIndex.load(self._db, in_testing=True)
assert isinstance(loaded, ExternalSearchIndex)
# However, inside the mock_search_index context manager,
# load() returns whatever object was mocked.
mock = object()
with mock_search_index(mock):
assert mock == ExternalSearchIndex.load(self._db, in_testing=True)
def test_constructor(self):
# The configuration of the search ExternalIntegration becomes the
# configuration of the ExternalSearchIndex.
#
# This basically just verifies that the test search term is taken
# from the ExternalIntegration.
class MockIndex(ExternalSearchIndex):
def set_works_index_and_alias(self, _db):
self.set_works_index_and_alias_called_with = _db
index = MockIndex(self._db)
assert self._db == index.set_works_index_and_alias_called_with
assert "test_search_term" == index.test_search_term
# TODO: would be good to check the put_script calls, but the
# current constructor makes put_script difficult to mock.
def test_elasticsearch_error_in_constructor_becomes_cannotloadconfiguration(self):
"""If we're unable to establish a connection to the Elasticsearch
server, CannotLoadConfiguration (which the circulation manager can
understand) is raised instead of an Elasticsearch-specific exception.
"""
# Unlike other tests in this module, this one runs even if no
# ElasticSearch server is running, since it's testing what
# happens if there's a problem communicating with that server.
class Mock(ExternalSearchIndex):
def set_works_index_and_alias(self, _db):
raise ElasticsearchException("very bad")
with pytest.raises(CannotLoadConfiguration) as excinfo:
Mock(self._db)
assert "Exception communicating with Elasticsearch server: " in str(excinfo.value)
assert "very bad" in str(excinfo.value)
def test_works_index_name(self):
"""The name of the search index is the prefix (defined in
ExternalSearchTest.setup) plus a version number associated
with this version of the core code.
"""
assert "test_index-v4" == self.search.works_index_name(self._db)
def test_setup_index_creates_new_index(self):
current_index = self.search.works_index
# This calls self.search.setup_index (which is what we're testing)
# and also registers the index to be torn down at the end of the test.
self.setup_index('the_other_index')
# Both indices exist.
assert True == self.search.indices.exists(current_index)
assert True == self.search.indices.exists('the_other_index')
# The index for the app's search is still the original index.
assert current_index == self.search.works_index
# The alias hasn't been passed over to the new index.
alias = 'test_index-' + self.search.CURRENT_ALIAS_SUFFIX
assert alias == self.search.works_alias
assert True == self.search.indices.exists_alias(current_index, alias)
assert False == self.search.indices.exists_alias('the_other_index', alias)
def test_set_works_index_and_alias(self):
# If the index or alias don't exist, set_works_index_and_alias
# will create them.
self.integration.set_setting(ExternalSearchIndex.WORKS_INDEX_PREFIX_KEY, u'banana')
self.search.set_works_index_and_alias(self._db)
expected_index = 'banana-' + CurrentMapping.version_name()
expected_alias = 'banana-' + self.search.CURRENT_ALIAS_SUFFIX
assert expected_index == self.search.works_index
assert expected_alias == self.search.works_alias
# If the index and alias already exist, set_works_index_and_alias
# does nothing.
self.search.set_works_index_and_alias(self._db)
assert expected_index == self.search.works_index
assert expected_alias == self.search.works_alias
def test_setup_current_alias(self):
# The index was generated from the string in configuration.
version = CurrentMapping.version_name()
index_name = 'test_index-' + version
assert index_name == self.search.works_index
assert True == self.search.indices.exists(index_name)
# The alias is also created from the configuration.
alias = 'test_index-' + self.search.CURRENT_ALIAS_SUFFIX
assert alias == self.search.works_alias
assert True == self.search.indices.exists_alias(index_name, alias)
# If the -current alias is already set on a different index, it
# won't be reassigned. Instead, search will occur against the
# index itself.
ExternalSearchIndex.reset()
self.integration.set_setting(ExternalSearchIndex.WORKS_INDEX_PREFIX_KEY, u'my-app')
self.search = ExternalSearchIndex(self._db)
assert 'my-app-%s' % version == self.search.works_index
assert 'my-app-' + self.search.CURRENT_ALIAS_SUFFIX == self.search.works_alias
def test_transfer_current_alias(self):
# An error is raised if you try to set the alias to point to
# an index that doesn't already exist.
pytest.raises(
ValueError, self.search.transfer_current_alias, self._db,
'no-such-index'
)
original_index = self.search.works_index
# If the -current alias doesn't exist, it's created
# and everything is updated accordingly.
self.search.indices.delete_alias(
index=original_index, name='test_index-current', ignore=[404]
)
self.setup_index(new_index='test_index-v9999')
self.search.transfer_current_alias(self._db, 'test_index-v9999')
assert 'test_index-v9999' == self.search.works_index
assert 'test_index-current' == self.search.works_alias
# If the -current alias already exists on the index,
# it's used without a problem.
self.search.transfer_current_alias(self._db, 'test_index-v9999')
assert 'test_index-v9999' == self.search.works_index
assert 'test_index-current' == self.search.works_alias
# If the -current alias is being used on a different version of the
# index, it's deleted from that index and placed on the new one.
self.setup_index(original_index)
self.search.transfer_current_alias(self._db, original_index)
assert original_index == self.search.works_index
assert 'test_index-current' == self.search.works_alias
# It has been removed from other index.
assert False == self.search.indices.exists_alias(
index='test_index-v9999', name='test_index-current')
# And only exists on the new index.
alias_indices = self.search.indices.get_alias(name='test_index-current').keys()
assert [original_index] == alias_indices
# If the index doesn't have the same base name, an error is raised.
pytest.raises(
ValueError, self.search.transfer_current_alias, self._db,
'banana-v10'
)
def test_query_works(self):
# Verify that query_works operates by calling query_works_multi.
# The actual functionality of query_works and query_works_multi
# have many end-to-end tests in TestExternalSearchWithWorks.
class Mock(ExternalSearchIndex):
def __init__(self):
self.query_works_multi_calls = []
self.queued_results = []
def query_works_multi(self, queries, debug=False):
self.query_works_multi_calls.append((queries, debug))
return self.queued_results.pop()
search = Mock()
# If the filter is designed to match nothing,
# query_works_multi isn't even called -- we just return an
# empty list.
query = object()
pagination = object()
filter = Filter(match_nothing=True)
assert [] == search.query_works(query, filter, pagination)
assert [] == search.query_works_multi_calls
# Otherwise, query_works_multi is called with a list
# containing a single query, and the list of resultsets is
# turned into a single list of results.
search.queued_results.append([["r1", "r2"]])
filter = object()
results = search.query_works(query, filter, pagination)
assert ["r1", "r2"] == results
call = search.query_works_multi_calls.pop()
assert ([(query, filter, pagination)], False) == call
assert [] == search.query_works_multi_calls
# If no Pagination object is provided, a default is used.
search.queued_results.append([["r3", "r4"]])
results = search.query_works(query, filter, None, True)
assert ["r3", "r4"] == results
([query_tuple], debug) = search.query_works_multi_calls.pop()
assert True == debug
assert query == query_tuple[0]
assert filter == query_tuple[1]
pagination = query_tuple[2]
default = Pagination.default()
assert isinstance(pagination, Pagination)
assert pagination.offset == default.offset
assert pagination.size == default.size
def test__run_self_tests(self):
index = MockExternalSearchIndex()
# First, see what happens when the search returns no results.
test_results = [x for x in index._run_self_tests(self._db, in_testing=True)]
assert "Search results for 'a search term':" == test_results[0].name
assert True == test_results[0].success
assert [] == test_results[0].result
assert "Search document for 'a search term':" == test_results[1].name
assert True == test_results[1].success
assert "[]" == test_results[1].result
assert "Raw search results for 'a search term':" == test_results[2].name
assert True == test_results[2].success
assert [] == test_results[2].result
assert "Total number of search results for 'a search term':" == test_results[3].name
assert True == test_results[3].success
assert "0" == test_results[3].result
assert "Total number of documents in this search index:" == test_results[4].name
assert True == test_results[4].success
assert "0" == test_results[4].result
assert "Total number of documents per collection:" == test_results[5].name
assert True == test_results[5].success
assert "{}" == test_results[5].result
# Set up the search index so it will return a result.
collection = self._collection()
search_result = MockSearchResult(
"Sample Book Title", "author", {}, "id"
)
index.index("index", "doc type", "id", search_result)
test_results = [x for x in index._run_self_tests(self._db, in_testing=True)]
assert "Search results for 'a search term':" == test_results[0].name
assert True == test_results[0].success
assert ["Sample Book Title (author)"] == test_results[0].result
assert "Search document for 'a search term':" == test_results[1].name
assert True == test_results[1].success
result = json.loads(test_results[1].result)
sample_book = {"author": "author", "meta": {"id": "id", "_sort": [u'Sample Book Title', u'author', u'id']}, "id": "id", "title": "Sample Book Title"}
assert sample_book == result
assert "Raw search results for 'a search term':" == test_results[2].name
assert True == test_results[2].success
result = json.loads(test_results[2].result[0])
assert sample_book == result
assert "Total number of search results for 'a search term':" == test_results[3].name
assert True == test_results[3].success
assert "1" == test_results[3].result
assert "Total number of documents in this search index:" == test_results[4].name
assert True == test_results[4].success
assert "1" == test_results[4].result
assert "Total number of documents per collection:" == test_results[5].name
assert True == test_results[5].success
result = json.loads(test_results[5].result)
assert {collection.name: 1} == result
class TestCurrentMapping(object):
def test_character_filters(self):
# Verify the functionality of the regular expressions we tell
# Elasticsearch to use when normalizing fields that will be used
# for searching.
filters = []
for filter_name in CurrentMapping.AUTHOR_CHAR_FILTER_NAMES:
configuration = CurrentMapping.CHAR_FILTERS[filter_name]
find = re.compile(configuration['pattern'])
replace = configuration['replacement']
# Hack to (imperfectly) convert Java regex format to Python format.
# $1 -> \1
replace = replace.replace("$", "\\")
filters.append((find, replace))
def filters_to(start, finish):
"""When all the filters are applied to `start`,
the result is `finish`.
"""
for find, replace in filters:
start = find.sub(replace, start)
assert start == finish
# Only the primary author is considered for sorting purposes.
filters_to("<NAME> ; <NAME>", "<NAME>")
# The special system author '[Unknown]' is replaced with
# REPLACEMENT CHARACTER so it will be last in sorted lists.
filters_to("[Unknown]", u"\N{REPLACEMENT CHARACTER}")
# Periods are removed.
filters_to("<NAME>.", "<NAME>")
filters_to("<NAME>", "<NAME>")
# The initials of authors who go by initials are normalized
# so that their books all sort together.
filters_to("Wells, HG", "Wells, HG")
filters_to("Wells, H G", "Wells, HG")
filters_to("Wells, H.G.", "Wells, HG")
filters_to("Wells, H. G.", "Wells, HG")
# It works with up to three initials.
filters_to("<NAME>.", "Tolkien, JRR")
# Parentheticals are removed.
filters_to("<NAME>. (<NAME>)", "Wells, HG")
class TestExternalSearchWithWorks(EndToEndSearchTest):
"""These tests run against a real search index with works in it.
The setup is very slow, so all the tests are in the same method.
Don't add new methods to this class - add more tests into test_query_works,
or add a new test class.
"""
def populate_works(self):
_work = self.default_work
self.moby_dick = _work(
title="Mob<NAME>", authors="<NAME>", fiction=True,
)
self.moby_dick.presentation_edition.subtitle = "Or, the Whale"
self.moby_dick.presentation_edition.series = "Classics"
self.moby_dick.summary_text = "Ishmael"
self.moby_dick.presentation_edition.publisher = "Project Gutenberg"
self.moby_dick.last_update_time = datetime.datetime(2019, 1, 1)
self.moby_duck = _work(title="Moby Duck", authors="<NAME>", fiction=False)
self.moby_duck.presentation_edition.subtitle = "The True Story of 28,800 Bath Toys Lost at Sea"
self.moby_duck.summary_text = "A compulsively readable narrative"
self.moby_duck.presentation_edition.publisher = "Penguin"
self.moby_duck.last_update_time = datetime.datetime(2019, 1, 2)
# This book is not currently loanable. It will still show up
# in search results unless the library's settings disable it.
self.moby_duck.license_pools[0].licenses_available = 0
self.title_match = _work(title="Match")
self.subtitle_match = _work(title="SubtitleM")
self.subtitle_match.presentation_edition.subtitle = "Match"
self.summary_match = _work(title="SummaryM")
self.summary_match.summary_text = "It's a Match! The story of a work whose summary contained an important keyword."
self.publisher_match = _work(title="PublisherM")
self.publisher_match.presentation_edition.publisher = "Match"
self.tess = _work(title="Tess of the d'Urbervilles")
self.tiffany = _work(title="Breakfast at Tiffany's")
self.les_mis = _work()
self.les_mis.presentation_edition.title = u"Les Mis\u00E9rables"
self.modern_romance = _work(title="Modern Romance")
self.lincoln = _work(genre="Biography & Memoir", title="<NAME>")
self.washington = _work(genre="Biography", title="<NAME>")
self.lincoln_vampire = _work(title="<NAME>: <NAME>", genre="Fantasy")
self.children_work = _work(title="<NAME> Wonderland", audience=Classifier.AUDIENCE_CHILDREN)
self.all_ages_work = _work(title="The Annotated Alice", audience=Classifier.AUDIENCE_ALL_AGES)
self.ya_work = _work(title="Go Ask Alice", audience=Classifier.AUDIENCE_YOUNG_ADULT)
self.adult_work = _work(title="Still Alice", audience=Classifier.AUDIENCE_ADULT)
self.research_work = _work(
title="Curiouser and Curiouser: Surrealism and Repression in 'Alice in Wonderland'",
audience=Classifier.AUDIENCE_RESEARCH
)
self.ya_romance = _work(
title="Gumby In Love",
audience=Classifier.AUDIENCE_YOUNG_ADULT, genre="Romance"
)
self.ya_romance.presentation_edition.subtitle = (
"Modern Fairytale Series, Volume 7"
)
self.ya_romance.presentation_edition.series = "Modern Fairytales"
self.no_age = _work()
self.no_age.summary_text = "President Barack Obama's election in 2008 energized the United States"
# Set the series to the empty string rather than None -- this isn't counted
# as the book belonging to a series.
self.no_age.presentation_edition.series = ""
self.age_4_5 = _work()
self.age_4_5.target_age = NumericRange(4, 5, '[]')
self.age_4_5.summary_text = "President Barack Obama's election in 2008 energized the United States"
self.age_5_6 = _work(fiction=False)
self.age_5_6.target_age = NumericRange(5, 6, '[]')
self.obama = _work(
title="<NAME>", genre="Biography & Memoir"
)
self.obama.target_age = NumericRange(8, 8, '[]')
self.obama.summary_text = "President <NAME>'s election in 2008 energized the United States"
self.dodger = _work()
self.dodger.target_age = NumericRange(8, 8, '[]')
self.dodger.summary_text = "Willie finds himself running for student council president"
self.age_9_10 = _work()
self.age_9_10.target_age = NumericRange(9, 10, '[]')
self.age_9_10.summary_text = "President <NAME>'s election in 2008 energized the United States"
self.age_2_10 = _work()
self.age_2_10.target_age = NumericRange(2, 10, '[]')
self.pride = _work(title="Pride and Prejudice (E)")
self.pride.presentation_edition.medium = Edition.BOOK_MEDIUM
self.pride_audio = _work(title="Pride and Prejudice (A)")
self.pride_audio.presentation_edition.medium = Edition.AUDIO_MEDIUM
self.sherlock = _work(
title="The Adventures of Sherlock Holmes",
with_open_access_download=True
)
self.sherlock.presentation_edition.language = "eng"
self.sherlock_spanish = _work(title="Las Aventuras de Sherlock Holmes")
self.sherlock_spanish.presentation_edition.language = "spa"
# Create a custom list that contains a few books.
self.presidential, ignore = self._customlist(
name="Nonfiction about US Presidents", num_entries=0
)
for work in [self.washington, self.lincoln, self.obama]:
self.presidential.add_entry(work)
# Create a second collection that only contains a few books.
self.tiny_collection = self._collection("A Tiny Collection")
self.tiny_book = self._work(
title="A Tiny Book", with_license_pool=True,
collection=self.tiny_collection
)
self.tiny_book.license_pools[0].self_hosted = True
# Both collections contain 'The Adventures of Sherlock
# Holmes", but each collection licenses the book through a
# different mechanism.
self.sherlock_pool_2 = self._licensepool(
edition=self.sherlock.presentation_edition,
collection=self.tiny_collection
)
sherlock_2, is_new = self.sherlock_pool_2.calculate_work()
assert self.sherlock == sherlock_2
assert 2 == len(self.sherlock.license_pools)
# These books look good for some search results, but they
# will be filtered out by the universal filters, and will
# never show up in results.
# We own no copies of this book.
self.no_copies = _work(title="Moby Dick 2")
self.no_copies.license_pools[0].licenses_owned = 0
# This book's only license pool has been suppressed.
self.suppressed = _work(title="Moby Dick 2")
self.suppressed.license_pools[0].suppressed = True
# This book is not presentation_ready.
self.not_presentation_ready = _work(title="Moby Dick 2")
self.not_presentation_ready.presentation_ready = False
def test_query_works(self):
# An end-to-end test of the search functionality.
#
# Works created during setup are added to a real search index.
# We then run actual Elasticsearch queries against the
# search index and verify that the work IDs returned
# are the ones we expect.
# First, run some basic checks to make sure the search
# document query doesn't contain over-zealous joins. This test
# class is the main place where we make a large number of
# works and generate search documents for them.
assert 1 == len(self.moby_dick.to_search_document()['licensepools'])
assert ("Audio" ==
self.pride_audio.to_search_document()['licensepools'][0]['medium'])
# Set up convenient aliases for methods we'll be calling a
# lot.
query = self.search.query_works
expect = self._expect_results
# First, test pagination.
first_item = Pagination(size=1, offset=0)
expect(self.moby_dick, "moby dick", None, first_item)
second_item = first_item.next_page
expect(self.moby_duck, "moby dick", None, second_item)
two_per_page = Pagination(size=2, offset=0)
expect(
[self.moby_dick, self.moby_duck],
"moby dick", None, two_per_page
)
# Now try some different search queries.
# Search in title.
assert 2 == len(query("moby"))
# Search in author name
expect(self.moby_dick, "melville")
# Search in subtitle
expect(self.moby_dick, "whale")
# Search in series.
expect(self.moby_dick, "classics")
# Search in summary.
expect(self.moby_dick, "ishmael")
# Search in publisher name.
expect(self.moby_dick, "gutenberg")
# Title > subtitle > word found in summary > publisher
order = [
self.title_match,
self.subtitle_match,
self.summary_match,
self.publisher_match,
]
expect(order, "match")
# A search for a partial title match + a partial author match
# considers only books that match both fields.
expect(
[self.moby_dick],
"moby melville"
)
# Match a quoted phrase
# 'Moby-Dick' is the first result because it's an exact title
# match. '<NAME>' is the second result because it's a fuzzy
# match,
expect([self.moby_dick, self.moby_duck], '"moby dick"')
# Match a stemmed word: 'running' is stemmed to 'run', and
# so is 'runs'.
expect(self.dodger, "runs")
# Match a misspelled phrase: 'movy' -> 'moby'.
expect([self.moby_dick, self.moby_duck], "movy", ordered=False)
# Match a misspelled author: 'mleville' -> 'melville'
expect(self.moby_dick, "mleville")
# TODO: This is clearly trying to match "<NAME>", but it
# matches nothing. This is because at least two of the strings
# in a query must match. Neither "di" nor "ck" matches a fuzzy
# search on its own, which means "moby" is the only thing that
# matches, and that's not enough.
expect([], "moby di ck")
# Here, "dic" is close enough to "dick" that the fuzzy match
# kicks in. With both "moby" and "dic" matching, it's okay
# that "k" was a dud.
expect([self.moby_dick], "moby dic k")
# A query without an apostrophe matches a word that contains
# one. (this is a feature of the stemmer.)
expect(self.tess, "durbervilles")
expect(self.tiffany, "tiffanys")
# A query with an 'e' matches a word that contains an
# e-with-acute. (this is managed by the 'asciifolding' filter in
# the analyzers)
expect(self.les_mis, "les miserables")
# Find results based on fiction status.
#
# Here, Moby-Dick (fiction) is privileged over Moby Duck
# (nonfiction)
expect([self.moby_dick], "fiction moby")
# Here, Moby Duck is privileged over Moby-Dick.
expect([self.moby_duck], "nonfiction moby")
# Find results based on series.
classics = Filter(series="Classics")
expect(self.moby_dick, "moby", classics)
# This finds books that belong to _some_ series.
some_series = Filter(series=True)
expect([self.moby_dick, self.ya_romance], "", some_series,
ordered=False)
# Find results based on genre.
# If the entire search query is converted into a filter, every
# book matching that filter is boosted above books that match
# the search string as a query.
expect([self.ya_romance, self.modern_romance], "romance")
# Find results based on audience.
expect(self.children_work, "children's")
expect(
[self.ya_work, self.ya_romance], "young adult", ordered=False
)
# Find results based on grade level or target age.
for q in ('grade 4', 'grade 4-6', 'age 9'):
# ages 9-10 is a better result because a book targeted
# toward a narrow range is a better match than a book
# targeted toward a wide range.
expect([self.age_9_10, self.age_2_10], q)
# TODO: The target age query only scores how big the overlap
# is, it doesn't look at how large the non-overlapping part of
# the range is. So the 2-10 book can show up before the 9-10
# book. This could be improved.
expect([self.age_9_10, self.age_2_10], "age 10-12", ordered=False)
# Books whose target age are closer to the requested range
# are ranked higher.
expect([self.age_4_5, self.age_5_6, self.age_2_10], "age 3-5")
# Search by a combination of genre and audience.
# The book with 'Romance' in the title does not show up because
# it's not a YA book.
expect([self.ya_romance], "young adult romance")
# Search by a combination of target age and fiction
#
# Two books match the age range, but the one with a
# tighter age range comes first.
expect([self.age_4_5, self.age_2_10], "age 5 fiction")
# Search by a combination of genre and title
# Two books match 'lincoln', but only the biography is returned
expect([self.lincoln], "lincoln biography")
# Search by age + genre + summary
results = query("age 8 president biography")
# There are a number of results, but the top one is a presidential
# biography for 8-year-olds.
assert 5 == len(results)
assert self.obama.id == results[0].work_id
# Now we'll test filters.
# Both self.pride and self.pride_audio match the search query,
# but the filters eliminate one or the other from
# consideration.
book_filter = Filter(media=Edition.BOOK_MEDIUM)
audio_filter = Filter(media=Edition.AUDIO_MEDIUM)
expect(self.pride, "pride and prejudice", book_filter)
expect(self.pride_audio, "pride and prejudice", audio_filter)
# Filters on languages
english = Filter(languages="eng")
spanish = Filter(languages="spa")
both = Filter(languages=["eng", "spa"])
expect(self.sherlock, "sherlock", english)
expect(self.sherlock_spanish, "sherlock", spanish)
expect(
[self.sherlock, self.sherlock_spanish], "sherlock", both,
ordered=False
)
# Filters on fiction status
fiction = Filter(fiction=True)
nonfiction = Filter(fiction=False)
both = Filter()
expect(self.moby_dick, "moby dick", fiction)
expect(self.moby_duck, "moby dick", nonfiction)
expect([self.moby_dick, self.moby_duck], "moby dick", both)
# Filters on series
classics = Filter(series="classics")
expect(self.moby_dick, "moby", classics)
# Filters on audience
adult = Filter(audiences=Classifier.AUDIENCE_ADULT)
ya = Filter(audiences=Classifier.AUDIENCE_YOUNG_ADULT)
children = Filter(audiences=Classifier.AUDIENCE_CHILDREN)
ya_and_children = Filter(
audiences=[Classifier.AUDIENCE_CHILDREN,
Classifier.AUDIENCE_YOUNG_ADULT]
)
research = Filter(audiences=[Classifier.AUDIENCE_RESEARCH])
def expect_alice(expect_works, filter):
return expect(expect_works, "alice", filter, ordered=False)
expect_alice([self.adult_work, self.all_ages_work], adult)
expect_alice([self.ya_work, self.all_ages_work], ya)
expect_alice([self.children_work, self.all_ages_work], children)
expect_alice([self.children_work, self.ya_work, self.all_ages_work],
ya_and_children)
# The 'all ages' work appears except when the audience would make
# that inappropriate...
expect_alice([self.research_work], research)
expect_alice([], Filter(audiences=Classifier.AUDIENCE_ADULTS_ONLY))
# ...or when the target age does not include children expected
# to have the necessary reading fluency.
expect_alice(
[self.children_work],
Filter(audiences=Classifier.AUDIENCE_CHILDREN, target_age=(2,3))
)
# If there is no filter, the research work is excluded by
# default, but everything else is included.
default_filter = Filter()
expect_alice(
[self.children_work, self.ya_work, self.adult_work,
self.all_ages_work],
default_filter
)
# Filters on age range
age_8 = Filter(target_age=8)
age_5_8 = Filter(target_age=(5,8))
age_5_10 = Filter(target_age=(5,10))
age_8_10 = Filter(target_age=(8,10))
# As the age filter changes, different books appear and
# disappear. no_age is always present since it has no age
# restrictions.
expect(
[self.no_age, self.obama, self.dodger],
"president", age_8, ordered=False
)
expect(
[self.no_age, self.age_4_5, self.obama, self.dodger],
"president", age_5_8, ordered=False
)
expect(
[self.no_age, self.age_4_5, self.obama, self.dodger,
self.age_9_10],
"president", age_5_10, ordered=False
)
expect(
[self.no_age, self.obama, self.dodger, self.age_9_10],
"president", age_8_10, ordered=False
)
# Filters on license source.
gutenberg = DataSource.lookup(self._db, DataSource.GUTENBERG)
gutenberg_only = Filter(license_datasource=gutenberg)
expect([self.moby_dick, self.moby_duck], "moby", gutenberg_only,
ordered=False)
overdrive = DataSource.lookup(self._db, DataSource.OVERDRIVE)
overdrive_only = Filter(license_datasource=overdrive)
expect([], "moby", overdrive_only, ordered=False)
# Filters on last modified time.
# Obviously this query string matches "Moby-Dick", but it's
# filtered out because its last update time is before the
# `updated_after`. "Moby Duck" shows up because its last update
# time is right on the edge.
after_moby_duck = Filter(updated_after=self.moby_duck.last_update_time)
expect([self.moby_duck], "moby dick", after_moby_duck)
# Filters on genre
biography, ignore = Genre.lookup(self._db, "Biography & Memoir")
fantasy, ignore = Genre.lookup(self._db, "Fantasy")
biography_filter = Filter(genre_restriction_sets=[[biography]])
fantasy_filter = Filter(genre_restriction_sets=[[fantasy]])
both = Filter(genre_restriction_sets=[[fantasy, biography]])
expect(self.lincoln, "lincoln", biography_filter)
expect(self.lincoln_vampire, "lincoln", fantasy_filter)
expect([self.lincoln, self.lincoln_vampire], "lincoln", both,
ordered=False)
# Filters on list membership.
# This ignores '<NAME>, <NAME>' because that
# book isn't on the self.presidential list.
on_presidential_list = Filter(
customlist_restriction_sets=[[self.presidential]]
)
expect(self.lincoln, "lincoln", on_presidential_list)
# This filters everything, since the query is restricted to
# an empty set of lists.
expect([], "lincoln", Filter(customlist_restriction_sets=[[]]))
# Filter based on collection ID.
# "A Tiny Book" isn't in the default collection.
default_collection_only = Filter(collections=self._default_collection)
expect([], "a tiny book", default_collection_only)
# It is in the tiny_collection.
other_collection_only = Filter(collections=self.tiny_collection)
expect(self.tiny_book, "a tiny book", other_collection_only)
# If a book is present in two different collections which are
# being searched, it only shows up in search results once.
f = Filter(
collections=[self._default_collection, self.tiny_collection],
languages="eng"
)
expect(self.sherlock, "sherlock holmes", f)
# Filter on identifier -- one or many.
for results in [
[self.lincoln],
[self.sherlock, self.pride_audio]
]:
identifiers = [w.license_pools[0].identifier for w in results]
f = Filter(identifiers=identifiers)
expect(results, None, f, ordered=False)
# Setting .match_nothing on a Filter makes it always return nothing,
# even if it would otherwise return works.
nothing = Filter(fiction=True, match_nothing=True)
expect([], None, nothing)
# Filters that come from site or library settings.
# The source for the 'Pride and Prejudice' audiobook has been
# excluded, so it won't show up in search results.
f = Filter(
excluded_audiobook_data_sources=[
self.pride_audio.license_pools[0].data_source
]
)
expect([self.pride], "pride and prejudice", f)
# Here, a different data source is excluded, and it shows up.
f = Filter(
excluded_audiobook_data_sources=[
DataSource.lookup(self._db, DataSource.BIBLIOTHECA)
]
)
expect(
[self.pride, self.pride_audio], "pride and prejudice", f,
ordered=False
)
# "Moby Duck" is not currently available, so it won't show up in
# search results if allow_holds is False.
f = Filter(allow_holds=False)
expect([self.moby_dick], "moby duck", f)
# Finally, let's do some end-to-end tests of
# WorkList.works()
#
# That's a simple method that puts together a few pieces
# which are tested separately, so we don't need to go all-out.
def pages(worklist):
"""Iterate over a WorkList until it ends, and return all of the
pages.
"""
pagination = SortKeyPagination(size=2)
facets = Facets(
self._default_library, None, None, order=Facets.ORDER_TITLE
)
pages = []
while pagination:
pages.append(worklist.works(
self._db, facets, pagination, self.search
))
pagination = pagination.next_page
# The last page should always be empty -- that's how we
# knew we'd reached the end.
assert [] == pages[-1]
# Return all the other pages for verification.
return pages[:-1]
# Test a WorkList based on a custom list.
presidential = WorkList()
presidential.initialize(
self._default_library, customlists=[self.presidential]
)
p1, p2 = pages(presidential)
assert [self.lincoln, self.obama] == p1
assert [self.washington] == p2
# Test a WorkList based on a language.
spanish = WorkList()
spanish.initialize(self._default_library, languages=['spa'])
assert [[self.sherlock_spanish]] == pages(spanish)
# Test a WorkList based on a genre.
biography_wl = WorkList()
biography_wl.initialize(self._default_library, genres=[biography])
assert [[self.lincoln, self.obama]] == pages(biography_wl)
# Search results may be sorted by some field other than search
# quality.
f = SearchFacets
by_author = f(
library=self._default_library, collection=f.COLLECTION_FULL,
availability=f.AVAILABLE_ALL, order=f.ORDER_AUTHOR
)
by_author = Filter(facets=by_author)
by_title = f(
library=self._default_library, collection=f.COLLECTION_FULL,
availability=f.AVAILABLE_ALL, order=f.ORDER_TITLE
)
by_title = Filter(facets=by_title)
# By default, search results sorted by a bibliographic field
# are also filtered to eliminate low-quality results. In a
# real collection the default filter level works well, but it
# makes it difficult to test the feature in this limited test
# collection.
expect([self.moby_dick], "mo<NAME>", by_author)
expect([self.ya_romance], "romance", by_author)
expect([], "moby", by_author)
expect([], "president", by_author)
# Let's lower the score so we can test the ordering properly.
by_title.min_score = 50
by_author.min_score = 50
expect([self.moby_dick, self.moby_duck], "moby", by_title)
expect([self.moby_duck, self.moby_dick], "moby", by_author)
expect([self.ya_romance, self.modern_romance], "romance", by_title)
expect([self.modern_romance, self.ya_romance], "romance", by_author)
# Lower it even more and we can start picking up search results
# that only match because of words in the description.
by_title.min_score=10
by_author.min_score=10
results = [self.no_age, self.age_4_5, self.dodger,
self.age_9_10, self.obama]
expect(results, "president", by_title)
# Reverse the sort order to demonstrate that these works are being
# sorted by title rather than randomly.
by_title.order_ascending = False
expect(list(reversed(results)), "president", by_title)
# Finally, verify that we can run multiple queries
# simultaneously.
# Different query strings.
self._expect_results_multi(
[[self.moby_dick], [self.moby_duck]],
[("<NAME>", None, first_item),
("moby duck", None, first_item)]
)
# Same query string, different pagination settings.
self._expect_results_multi(
[[self.moby_dick], [self.moby_duck]],
[("<NAME>", None, first_item),
("mo<NAME>", None, second_item)]
)
# Same query string, same pagination settings, different
# filters. This is different from calling _expect_results() on
# a Filter with match_nothing=True. There, the query isn't
# even run. Here the query must be run, even though one
# branch will return no results.
match_nothing = Filter(match_nothing=True)
self._expect_results_multi(
[[self.moby_duck], []],
[("<NAME>", Filter(fiction=False), first_item),
(None, match_nothing, first_item)]
)
class TestFacetFilters(EndToEndSearchTest):
def populate_works(self):
_work = self.default_work
# A low-quality open-access work.
self.horse = _work(
title="Diseases of the Horse", with_open_access_download=True
)
self.horse.quality = 0.2
# A high-quality open-access work.
self.moby = _work(
title="<NAME>", with_open_access_download=True
)
self.moby.quality = 0.8
# A currently available commercially-licensed work.
self.duck = _work(title='Moby Duck')
self.duck.license_pools[0].licenses_available = 1
self.duck.quality = 0.5
# A currently unavailable commercially-licensed work.
self.becoming = _work(title='Becoming')
self.becoming.license_pools[0].licenses_available = 0
self.becoming.quality = 0.9
def test_facet_filtering(self):
# Add all the works created in the setup to the search index.
SearchIndexCoverageProvider(
self._db, search_index_client=self.search
).run_once_and_update_timestamp()
# Sleep to give the index time to catch up.
time.sleep(1)
def expect(availability, collection, works):
facets = Facets(
self._default_library, availability, collection,
order=Facets.ORDER_TITLE
)
self._expect_results(
works, None, Filter(facets=facets), ordered=False
)
# Get all the books in alphabetical order by title.
expect(Facets.COLLECTION_FULL, Facets.AVAILABLE_ALL,
[self.becoming, self.horse, self.moby, self.duck])
# Show only works that can be borrowed right now.
expect(Facets.COLLECTION_FULL, Facets.AVAILABLE_NOW,
[self.horse, self.moby, self.duck])
# Show only works that can *not* be borrowed right now.
expect(Facets.COLLECTION_FULL, Facets.AVAILABLE_NOT_NOW, [self.becoming])
# Show only open-access works.
expect(Facets.COLLECTION_FULL, Facets.AVAILABLE_OPEN_ACCESS,
[self.horse, self.moby])
# Show only featured-quality works.
expect(Facets.COLLECTION_FEATURED, Facets.AVAILABLE_ALL,
[self.becoming, self.moby])
class TestSearchOrder(EndToEndSearchTest):
def populate_works(self):
_work = self.default_work
# We're going to create three works:
# a: "<NAME>"
# b: "<NAME>"
# c: "[untitled]"
#
# The metadata of these books will be set up to generate
# intuitive orders under most of the ordering scenarios.
#
# The most complex ordering scenario is ORDER_LAST_UPDATE,
# which orders books differently depending on the modification
# date of the Work, the date a LicensePool for the work was
# first seen in a collection associated with the filter, and
# the date the work was first seen on a custom list associated
# with the filter.
#
# The modification dates of the works will be set in the order
# of their creation.
#
# We're going to put all three works in two different
# collections with different dates. All three works will be
# added to two different custom lists, and works a and c will
# be added to a third custom list.
#
# The dates associated with the "collection add" and "list add"
# events will be set up to create the following orderings:
#
# a, b, c - when no collections or custom lists are associated with
# the Filter.
# a, c, b - when collection 1 is associated with the Filter.
# b, a, c - when collections 1 and 2 are associated with the Filter.
# b, c, a - when custom list 1 is associated with the Filter.
# c, a, b - when collection 1 and custom list 2 are associated with
# the Filter.
# c, a - when two sets of custom list restrictions [1], [3]
# are associated with the filter.
self.moby_dick = _work(title="<NAME>", authors="<NAME>", fiction=True)
self.moby_dick.presentation_edition.subtitle = "Or, the Whale"
self.moby_dick.presentation_edition.series = "Classics"
self.moby_dick.presentation_edition.series_position = 10
self.moby_dick.summary_text = "Ishmael"
self.moby_dick.presentation_edition.publisher = "Project Gutenberg"
self.moby_dick.random = 0.1
self.moby_duck = _work(title="Moby Duck", authors="<NAME>", fiction=False)
self.moby_duck.presentation_edition.subtitle = "The True Story of 28,800 Bath Toys Lost at Sea"
self.moby_duck.summary_text = "A compulsively readable narrative"
self.moby_duck.presentation_edition.series_position = 1
self.moby_duck.presentation_edition.publisher = "Penguin"
self.moby_duck.random = 0.9
self.untitled = _work(title="[Untitled]", authors="[Unknown]")
self.untitled.random = 0.99
self.untitled.presentation_edition.series_position = 5
# It's easier to refer to the books as a, b, and c when not
# testing sorts that rely on the metadata.
self.a = self.moby_dick
self.b = self.moby_duck
self.c = self.untitled
self.a.last_update_time = datetime.datetime(2000, 1, 1)
self.b.last_update_time = datetime.datetime(2001, 1, 1)
self.c.last_update_time = datetime.datetime(2002, 1, 1)
# Each work has one LicensePool associated with the default
# collection.
self.collection1 = self._default_collection
self.collection1.name = "Collection 1 - ACB"
[self.a1] = self.a.license_pools
[self.b1] = self.b.license_pools
[self.c1] = self.c.license_pools
self.a1.availability_time = datetime.datetime(2010, 1, 1)
self.c1.availability_time = datetime.datetime(2011, 1, 1)
self.b1.availability_time = datetime.datetime(2012, 1, 1)
# Here's a second collection with the same books in a different
# order.
self.collection2 = self._collection(name="Collection 2 - BAC")
self.a2 = self._licensepool(
edition=self.a.presentation_edition, collection=self.collection2,
with_open_access_download=True
)
self.a.license_pools.append(self.a2)
self.b2 = self._licensepool(
edition=self.b.presentation_edition, collection=self.collection2,
with_open_access_download=True
)
self.b.license_pools.append(self.b2)
self.c2 = self._licensepool(
edition=self.c.presentation_edition, collection=self.collection2,
with_open_access_download=True
)
self.c.license_pools.append(self.c2)
self.b2.availability_time = datetime.datetime(2020, 1, 1)
self.a2.availability_time = datetime.datetime(2021, 1, 1)
self.c2.availability_time = datetime.datetime(2022, 1, 1)
# Here are three custom lists which contain the same books but
# with different first appearances.
self.list1, ignore = self._customlist(
name="Custom list 1 - BCA", num_entries=0
)
self.list1.add_entry(
self.b, first_appearance=datetime.datetime(2030, 1, 1)
)
self.list1.add_entry(
self.c, first_appearance=datetime.datetime(2031, 1, 1)
)
self.list1.add_entry(
self.a, first_appearance=datetime.datetime(2032, 1, 1)
)
self.list2, ignore = self._customlist(
name="Custom list 2 - CAB", num_entries=0
)
self.list2.add_entry(
self.c, first_appearance=datetime.datetime(2001, 1, 1)
)
self.list2.add_entry(
self.a, first_appearance=datetime.datetime(2014, 1, 1)
)
self.list2.add_entry(
self.b, first_appearance=datetime.datetime(2015, 1, 1)
)
self.list3, ignore = self._customlist(
name="Custom list 3 -- CA", num_entries=0
)
self.list3.add_entry(
self.a, first_appearance=datetime.datetime(2032, 1, 1)
)
self.list3.add_entry(
self.c, first_appearance=datetime.datetime(1999, 1, 1)
)
# Create two custom lists which contain some of the same books,
# but with different first appearances.
self.by_publication_date, ignore = self._customlist(
name="First appearance on list is publication date",
num_entries=0
)
self.by_publication_date.add_entry(
self.moby_duck, first_appearance=datetime.datetime(2011, 3, 1)
)
self.by_publication_date.add_entry(
self.untitled, first_appearance=datetime.datetime(2018, 1, 1)
)
self.staff_picks, ignore = self._customlist(
name="First appearance is date book was made a staff pick",
num_entries=0
)
self.staff_picks.add_entry(
self.moby_dick, first_appearance=datetime.datetime(2015, 5, 2)
)
self.staff_picks.add_entry(
self.moby_duck, first_appearance=datetime.datetime(2012, 8, 30)
)
# Create two extra works, d and e, which are only used to
# demonstrate one case.
#
# The custom list and the collection both put d earlier than e, but the
# last_update_time wins out, and it puts e before d.
self.collection3 = self._collection()
self.d = self._work(collection=self.collection3, with_license_pool=True)
self.e = self._work(collection=self.collection3, with_license_pool=True)
self.d.license_pools[0].availability_time = datetime.datetime(2010, 1, 1)
self.e.license_pools[0].availability_time = datetime.datetime(2011, 1, 1)
self.extra_list, ignore = self._customlist(num_entries=0)
self.extra_list.add_entry(
self.d, first_appearance=datetime.datetime(2020, 1, 1)
)
self.extra_list.add_entry(
self.e, first_appearance=datetime.datetime(2021, 1, 1)
)
self.e.last_update_time = datetime.datetime(2090, 1, 1)
self.d.last_update_time = datetime.datetime(2091, 1, 1)
def test_ordering(self):
def assert_order(sort_field, order, **filter_kwargs):
"""Verify that when the books created during test setup are ordered by
the given `sort_field`, they show up in the given `order`.
Also verify that when the search is ordered descending,
the same books show up in the opposite order. This proves
that `sort_field` isn't being ignored creating a test that
only succeeds by chance.
:param sort_field: Sort by this field.
:param order: A list of books in the expected order.
:param filter_kwargs: Extra keyword arguments to be passed
into the `Filter` constructor.
"""
expect = self._expect_results
facets = Facets(
self._default_library, Facets.COLLECTION_FULL,
Facets.AVAILABLE_ALL, order=sort_field, order_ascending=True
)
expect(order, None, Filter(facets=facets, **filter_kwargs))
facets.order_ascending = False
expect(list(reversed(order)), None, Filter(facets=facets, **filter_kwargs))
# Get each item in the list as a separate page. This
# proves that pagination works for this sort order for
# both Pagination and SortKeyPagination.
facets.order_ascending = True
for pagination_class in (
Pagination, SortKeyPagination
):
pagination = pagination_class(size=1)
to_process = list(order) + [[]]
while to_process:
filter = Filter(facets=facets, **filter_kwargs)
expect_result = to_process.pop(0)
expect(expect_result, None, filter, pagination=pagination)
pagination = pagination.next_page
# We are now off the edge of the list -- we got an
# empty page of results and there is no next page.
assert None == pagination
# Now try the same tests but in reverse order.
facets.order_ascending = False
for pagination_class in (
Pagination, SortKeyPagination
):
pagination = pagination_class(size=1)
to_process = list(reversed(order)) + [[]]
results = []
pagination = SortKeyPagination(size=1)
while to_process:
filter = Filter(facets=facets, **filter_kwargs)
expect_result = to_process.pop(0)
expect(expect_result, None, filter, pagination=pagination)
pagination = pagination.next_page
# We are now off the edge of the list -- we got an
# empty page of results and there is no next page.
assert None == pagination
# We can sort by title.
assert_order(
Facets.ORDER_TITLE, [self.untitled, self.moby_dick, self.moby_duck],
collections=[self._default_collection]
)
# We can sort by author; 'Hohn' sorts before 'Melville' sorts
# before "[Unknown]"
assert_order(
Facets.ORDER_AUTHOR, [self.moby_duck, self.moby_dick, self.untitled],
collections=[self._default_collection]
)
# We can sort by series position. Here, the books aren't in
# the same series; in a real scenario we would also filter on
# the value of 'series'.
assert_order(
Facets.ORDER_SERIES_POSITION,
[self.moby_duck, self.untitled, self.moby_dick],
collections=[self._default_collection]
)
# We can sort by internal work ID, which isn't very useful.
assert_order(
Facets.ORDER_WORK_ID,
[self.moby_dick, self.moby_duck, self.untitled],
collections=[self._default_collection]
)
# We can sort by the time the Work's LicensePools were first
# seen -- this would be used when showing patrons 'new' stuff.
#
# The LicensePools showed up in different orders in different
# collections, so filtering by collection will give different
# results.
assert_order(
Facets.ORDER_ADDED_TO_COLLECTION,
[self.a, self.c, self.b], collections=[self.collection1]
)
assert_order(
Facets.ORDER_ADDED_TO_COLLECTION,
[self.b, self.a, self.c], collections=[self.collection2]
)
# If a work shows up with multiple availability times through
# multiple collections, the earliest availability time for
# that work is used. All the dates in collection 1 predate the
# dates in collection 2, so collection 1's ordering holds
# here.
assert_order(
Facets.ORDER_ADDED_TO_COLLECTION,
[self.a, self.c, self.b],
collections=[self.collection1, self.collection2]
)
# Finally, here are the tests of ORDER_LAST_UPDATE, as described
# above in setup().
assert_order(Facets.ORDER_LAST_UPDATE, [self.a, self.b, self.c, self.e, self.d])
assert_order(
Facets.ORDER_LAST_UPDATE, [self.a, self.c, self.b],
collections=[self.collection1]
)
assert_order(
Facets.ORDER_LAST_UPDATE, [self.b, self.a, self.c],
collections=[self.collection1, self.collection2]
)
assert_order(
Facets.ORDER_LAST_UPDATE, [self.b, self.c, self.a],
customlist_restriction_sets=[[self.list1]]
)
assert_order(
Facets.ORDER_LAST_UPDATE, [self.c, self.a, self.b],
collections=[self.collection1],
customlist_restriction_sets=[[self.list2]]
)
assert_order(
Facets.ORDER_LAST_UPDATE, [self.c, self.a],
customlist_restriction_sets=[[self.list1], [self.list3]]
)
assert_order(
Facets.ORDER_LAST_UPDATE, [self.e, self.d],
collections=[self.collection3],
customlist_restriction_sets=[[self.extra_list]]
)
class TestAuthorFilter(EndToEndSearchTest):
# Test the various techniques used to find books where a certain
# person had an authorship role.
def populate_works(self):
_work = self.default_work
# Create a number of Contributor objects--some fragmentary--
# representing the same person.
self.full = Contributor(
display_name='<NAME>', sort_name='<NAME>', viaf="73520345",
lc="n2013008575"
)
self.display_name = Contributor(
sort_name=Edition.UNKNOWN_AUTHOR, display_name='<NAME>'
)
self.sort_name = Contributor(sort_name='<NAME>')
self.viaf = Contributor(
sort_name=Edition.UNKNOWN_AUTHOR, viaf="73520345"
)
self.lc = Contributor(
sort_name=Edition.UNKNOWN_AUTHOR, lc="n2013008575"
)
# Create a different Work for every Contributor object.
# Alternate among the various 'author match' roles.
self.works = []
roles = list(Filter.AUTHOR_MATCH_ROLES)
for i, (contributor, title, attribute) in enumerate(
[(self.full, "<NAME>", 'justice'),
(self.display_name, "<NAME>", 'sword'),
(self.sort_name, "<NAME>", 'mercy'),
(self.viaf, "Provenance", 'provenance'),
(self.lc, "Raven Tower", 'raven'),
]):
self._db.add(contributor)
edition, ignore = self._edition(
title=title, authors=[], with_license_pool=True
)
contribution, was_new = get_one_or_create(
self._db, Contribution, edition=edition,
contributor=contributor,
role=roles[i % len(roles)]
)
work = self.default_work(
presentation_edition=edition,
)
self.works.append(work)
setattr(self, attribute, work)
# This work is a decoy. The author we're looking for
# contributed to the work in an ineligible role, so it will
# always be filtered out.
edition, ignore = self._edition(
title="Science Fiction: The Best of the Year (2007 Edition)",
authors=[], with_license_pool=True
)
contribution, is_new = get_one_or_create(
self._db, Contribution, edition=edition, contributor=self.full,
role=Contributor.CONTRIBUTOR_ROLE
)
self.literary_wonderlands = self.default_work(
presentation_edition=edition
)
# Another decoy. This work is by a different person and will
# always be filtered out.
self.ubik = self.default_work(
title="Ubik", authors=["<NAME>"]
)
def test_author_match(self):
# By providing a Contributor object with all the identifiers,
# we get every work with an author-type contribution from
# someone who can be identified with that Contributor.
self._expect_results(
self.works, None, Filter(author=self.full), ordered=False
)
# If we provide a Contributor object with partial information,
# we can only get works that are identifiable with that
# Contributor through the information provided.
#
# In all cases below we will find '<NAME>', since
# the Contributor associated with that work has all the
# identifiers. In each case we will also find one additional
# work -- the one associated with the Contributor whose
# data overlaps what we're passing in.
for filter, extra in [
(Filter(author=self.display_name), self.sword),
(Filter(author=self.sort_name), self.mercy),
(Filter(author=self.viaf), self.provenance),
(Filter(author=self.lc), self.raven),
]:
self._expect_results(
[self.justice, extra], None, filter, ordered=False
)
# ContributorData also works here.
# By specifying two types of author identification we'll find
# three books -- the one that knows its author's sort_name,
# the one that knows its author's VIAF number, and the one
# that knows both.
author = ContributorData(sort_name="<NAME>", viaf="73520345")
self._expect_results(
[self.justice, self.mercy, self.provenance], None,
Filter(author=author), ordered=False
)
# The filter can also accommodate very minor variants in names
# such as those caused by capitalization differences and
# accented characters.
for variant in ("<NAME>", u"<NAME>"):
author = ContributorData(display_name=variant)
self._expect_results(
[self.justice, self.sword], None,
Filter(author=author), ordered=False
)
# It cannot accommodate misspellings, no matter how minor.
author = ContributorData(display_name="<NAME>")
self._expect_results([], None, Filter(author=author))
# If the information in the ContributorData is inconsistent,
# the results may also be inconsistent.
author = ContributorData(
sort_name="<NAME>.", lc="n2013008575"
)
self._expect_results(
[self.justice, self.raven, self.ubik],
None, Filter(author=author), ordered=False
)
class TestExactMatches(EndToEndSearchTest):
"""Verify that exact or near-exact title and author matches are
privileged over matches that span fields.
"""
def populate_works(self):
_work = self.default_work
# Here the title is 'Modern Romance'
self.modern_romance = _work(
title="Modern Romance",
authors=["<NAME>", "<NAME>"],
)
# Here 'Modern' is in the subtitle and 'Romance' is the genre.
self.ya_romance = _work(
title="Gumby In Love",
authors="Pokey",
audience=Classifier.AUDIENCE_YOUNG_ADULT, genre="Romance"
)
self.ya_romance.presentation_edition.subtitle = (
"Modern Fairytale Series, Book 3"
)
self.parent_book = _work(
title="Our Son Aziz",
authors=["<NAME>", "<NAME>"],
genre="Biography & Memoir",
)
self.behind_the_scenes = _work(
title="The Making of Biography With Peter Graves",
genre="Entertainment",
)
self.biography_of_peter_graves = _work(
"He Is <NAME>",
authors="<NAME>",
genre="Biography & Memoir",
)
self.book_by_peter_graves = _work(
title="My Experience At The University of Minnesota",
authors="<NAME>",
genre="Entertainment",
)
self.book_by_someone_else = _work(
title="The Deadly Graves",
authors="<NAME>",
genre="Mystery"
)
def test_exact_matches(self):
expect = self._expect_results
# A full title match takes precedence over a match that's
# split across genre and subtitle.
expect(
[
self.modern_romance, # "modern romance" in title
self.ya_romance # "modern" in subtitle, genre "romance"
],
"modern romance"
)
# A full author match takes precedence over a partial author
# match. A partial author match ("<NAME>") doesn't show up
# all all because it can't match two words.
expect(
[
self.modern_romance, # "<NAME>" in author
self.parent_book, # "Aziz" in title, "Ansari" in author
],
"<NAME>"
)
# '<NAME>' is a string that has exact matches in both
# title and author.
# Books with author '<NAME>' are the top match, since
# "<NAME>" matches the entire string. Books with "Peter
# Graves" in the title are the next results, ordered by how
# much other stuff is in the title. A partial match split
# across fields ("peter" in author, "graves" in title) is the
# last result.
order = [
self.book_by_peter_graves,
self.biography_of_peter_graves,
self.behind_the_scenes,
self.book_by_someone_else,
]
expect(order, "<NAME>")
# Now we throw in "biography", a term that is both a genre and
# a search term in its own right.
#
# 1. A book whose title mentions all three terms
# 2. A book in genre "biography" whose title
# matches the other two terms
# 3. A book with an author match containing two of the terms.
# 'biography' just doesn't match. That's okay --
# if there are more than two search terms, only two must match.
order = [
self.behind_the_scenes, # all words match in title
self.biography_of_peter_graves, # title + genre 'biography'
self.book_by_peter_graves, # author (no 'biography')
]
expect(order, "peter graves biography")
class TestFeaturedFacets(EndToEndSearchTest):
"""Test how a FeaturedFacets object affects search ordering.
"""
def populate_works(self):
_work = self.default_work
self.hq_not_available = _work(title="HQ but not available")
self.hq_not_available.quality = 1
self.hq_not_available.license_pools[0].licenses_available = 0
self.hq_available = _work(title="HQ and available")
self.hq_available.quality = 1
self.hq_available_2 = _work(title="Also HQ and available")
self.hq_available_2.quality = 1
self.not_featured_on_list = _work(title="On a list but not featured")
self.not_featured_on_list.quality = 0.19
# This work has nothing going for it other than the fact
# that it's been featured on a custom list.
self.featured_on_list = _work(title="Featured on a list")
self.featured_on_list.quality = 0.18
self.featured_on_list.license_pools[0].licenses_available = 0
self.best_seller_list, ignore = self._customlist(num_entries=0)
self.best_seller_list.add_entry(self.featured_on_list, featured=True)
self.best_seller_list.add_entry(self.not_featured_on_list)
def test_scoring_functions(self):
# Verify that FeaturedFacets sets appropriate scoring functions
# for ElasticSearch queries.
f = FeaturedFacets(minimum_featured_quality=0.55, random_seed=42)
filter = Filter()
f.modify_search_filter(filter)
# In most cases, there are three things that can boost a work's score.
[featurable, available_now, random] = f.scoring_functions(filter)
# It can be high-quality enough to be featured.
assert isinstance(featurable, ScriptScore)
source = filter.FEATURABLE_SCRIPT % dict(
cutoff=f.minimum_featured_quality ** 2, exponent=2
)
assert source == featurable.script['source']
# It can be currently available.
availability_filter = available_now['filter']
assert (
dict(nested=dict(
path='licensepools',
query=dict(term={'licensepools.available': True})
)) ==
availability_filter.to_dict())
assert 5 == available_now['weight']
# It can get lucky.
assert isinstance(random, RandomScore)
assert 42 == random.seed
assert 1.1 == random.weight
# If the FeaturedFacets is set to be deterministic (which only happens
# in tests), the RandomScore is removed.
f.random_seed = filter.DETERMINISTIC
[featurable_2, available_now_2] = f.scoring_functions(filter)
assert featurable_2 == featurable
assert available_now_2 == available_now
# If custom lists are in play, it can also be featured on one
# of its custom lists.
filter.customlist_restriction_sets = [[1,2], [3]]
[featurable_2, available_now_2,
featured_on_list] = f.scoring_functions(filter)
assert featurable_2 == featurable
assert available_now_2 == available_now
# Any list will do -- the customlist restriction sets aren't
# relevant here.
featured_filter = featured_on_list['filter']
assert (dict(
nested=dict(
path='customlists',
query=dict(bool=dict(
must=[{'term': {'customlists.featured': True}},
{'terms': {'customlists.list_id': [1, 2, 3]}}])))) ==
featured_filter.to_dict())
assert 11 == featured_on_list['weight']
def test_run(self):
def works(worklist, facets):
return worklist.works(
self._db, facets, None, self.search, debug=True
)
def assert_featured(description, worklist, facets, expect):
# Generate a list of featured works for the given `worklist`
# and compare that list against `expect`.
actual = works(worklist, facets)
self._assert_works(description, expect, actual)
worklist = WorkList()
worklist.initialize(self._default_library)
facets = FeaturedFacets(1, random_seed=Filter.DETERMINISTIC)
# Even though hq_not_available is higher-quality than
# not_featured_on_list, not_featured_on_list shows up first because
# it's available right now.
w = works(worklist, facets)
assert w.index(self.not_featured_on_list) < w.index(
self.hq_not_available
)
# not_featured_on_list shows up before featured_on_list because
# it's higher-quality and list membership isn't relevant.
assert w.index(self.not_featured_on_list) < w.index(
self.featured_on_list
)
# Create a WorkList that's restricted to best-sellers.
best_sellers = WorkList()
best_sellers.initialize(
self._default_library, customlists=[self.best_seller_list]
)
# The featured work appears above the non-featured work,
# even though it's lower quality and is not available.
assert_featured(
"Works from WorkList based on CustomList", best_sellers, facets,
[self.featured_on_list, self.not_featured_on_list],
)
# By changing the minimum_featured_quality you can control
# at what point a work is considered 'featured' -- at which
# point its quality stops being taken into account.
#
# An extreme case of this is to set the minimum_featured_quality
# to 0, which makes all works 'featured' and stops quality
# from being considered altogether. Basically all that matters
# is availability.
all_featured_facets = FeaturedFacets(
0, random_seed=Filter.DETERMINISTIC
)
# We don't know exactly what order the books will be in,
# because even without the random element Elasticsearch is
# slightly nondeterministic, but we do expect that all of the
# available books will show up before all of the unavailable
# books.
only_availability_matters = worklist.works(
self._db, facets, None, self.search, debug=True
)
assert 5 == len(only_availability_matters)
last_two = only_availability_matters[-2:]
assert self.hq_not_available in last_two
assert self.featured_on_list in last_two
# Up to this point we've been avoiding the random element,
# but we can introduce that now by passing in a numeric seed.
# In normal usage, the current time is used as the seed.
#
# The random element is relatively small, so it mainly acts
# to rearrange works whose scores were similar before.
random_facets = FeaturedFacets(1, random_seed=43)
assert_featured(
"Works permuted by a random seed",
worklist, random_facets,
[self.hq_available_2, self.hq_available,
self.not_featured_on_list, self.hq_not_available,
self.featured_on_list],
)
class TestSearchBase(object):
def test__boost(self):
# Verify that _boost() converts a regular query (or list of queries)
# into a boosted query.
m = SearchBase._boost
q1 = Q("simple_query_string", query="query 1")
q2 = Q("simple_query_string", query="query 2")
boosted_one = m(10, q1)
assert "bool" == boosted_one.name
assert 10.0 == boosted_one.boost
assert [q1] == boosted_one.must
# By default, if you pass in multiple queries, only one of them
# must match for the boost to apply.
boosted_multiple = m(4.5, [q1, q2])
assert "bool" == boosted_multiple.name
assert 4.5 == boosted_multiple.boost
assert 1 == boosted_multiple.minimum_should_match
assert [q1, q2] == boosted_multiple.should
# Here, every query must match for the boost to apply.
boosted_multiple = m(4.5, [q1, q2], all_must_match=True)
assert "bool" == boosted_multiple.name
assert 4.5 == boosted_multiple.boost
assert [q1, q2] == boosted_multiple.must
def test__nest(self):
# Test the _nest method, which turns a normal query into a
# nested query.
query = Term(**{"nested_field" : "value"})
nested = SearchBase._nest("subdocument", query)
assert (Nested(path='subdocument', query=query) ==
nested)
def test_nestable(self):
# Test the _nestable helper method, which turns a normal
# query into an appropriate nested query, if necessary.
m = SearchBase._nestable
# A query on a field that's not in a subdocument is
# unaffected.
field = "name.minimal"
normal_query = Term(**{field : "name"})
assert normal_query == m(field, normal_query)
# A query on a subdocument field becomes a nested query on
# that subdocument.
field = "contributors.sort_name.minimal"
subdocument_query = Term(**{field : "name"})
nested = m(field, subdocument_query)
assert (
Nested(path='contributors', query=subdocument_query) ==
nested)
def test__match_term(self):
# _match_term creates a Match Elasticsearch object which does a
# match against a specific field.
m = SearchBase._match_term
qu = m("author", "<NAME>")
assert (
Term(author="<NAME>") ==
qu)
# If the field name references a subdocument, the query is
# embedded in a Nested object that describes how to match it
# against that subdocument.
field = "genres.name"
qu = m(field, "Biography")
assert (
Nested(path='genres', query=Term(**{field: "Biography"})) ==
qu)
def test__match_range(self):
# Test the _match_range helper method.
# This is used to create an Elasticsearch query term
# that only matches if a value is in a given range.
# This only matches if field.name has a value >= 5.
r = SearchBase._match_range("field.name", "gte", 5)
assert r == {'range': {'field.name': {'gte': 5}}}
def test__combine_hypotheses(self):
# Verify that _combine_hypotheses creates a DisMax query object
# that chooses the best one out of whichever queries it was passed.
m = SearchBase._combine_hypotheses
h1 = Term(field="value 1")
h2 = Term(field="value 2")
hypotheses = [h1, h2]
combined = m(hypotheses)
assert DisMax(queries=hypotheses) == combined
# If there are no hypotheses to test, _combine_hypotheses creates
# a MatchAll instead.
assert MatchAll() == m([])
def test_make_target_age_query(self):
# Search for material suitable for children between the
# ages of 5 and 10.
#
# This gives us two similar queries: one to use as a filter
# and one to use as a boost query.
as_filter, as_query = Query.make_target_age_query((5,10))
# Here's the filter part: a book's age range must be include the
# 5-10 range, or it gets filtered out.
filter_clauses = [
Range(**{"target_age.upper":dict(gte=5)}),
Range(**{"target_age.lower":dict(lte=10)}),
]
assert Bool(must=filter_clauses) == as_filter
# Here's the query part: a book gets boosted if its
# age range fits _entirely_ within the target age range.
query_clauses = [
Range(**{"target_age.upper":dict(lte=10)}),
Range(**{"target_age.lower":dict(gte=5)}),
]
assert (Bool(boost=1.1, must=filter_clauses, should=query_clauses) ==
as_query)
class TestQuery(DatabaseTest):
def test_constructor(self):
# Verify that the Query constructor sets members with
# no processing.
filter = Filter()
query = Query("query string", filter)
assert "query string" == query.query_string
assert filter == query.filter
# The query string does not contain English stopwords.
assert False == query.contains_stopwords
# Every word in the query string passes spellcheck,
# so a fuzzy query will be given less weight.
assert 0.5 == query.fuzzy_coefficient
# Try again with a query containing a stopword and
# a word that fails spellcheck.
query = Query("just a xlomph")
assert True == query.contains_stopwords
assert 1 == query.fuzzy_coefficient
# Try again with a query that contains no query string.
# The fuzzy hypotheses will not be run at all.
query = Query(None)
assert None == query.contains_stopwords
assert 0 == query.fuzzy_coefficient
def test_build(self):
# Verify that the build() method combines the 'query' part of
# a Query and the 'filter' part to create a single
# Elasticsearch Search object, complete with (if necessary)
# subqueries, sort ordering, and script fields.
class MockSearch(object):
"""A mock of the Elasticsearch-DSL `Search` object.
Calls to Search methods tend to create a new Search object
based on the old one. This mock simulates that behavior.
If necessary, you can look at all MockSearch objects
created by to get to a certain point by following the
.parent relation.
"""
def __init__(
self, parent=None, query=None, nested_filter_calls=None,
order=None, script_fields=None
):
self.parent = parent
self._query = query
self.nested_filter_calls = nested_filter_calls or []
self.order = order
self._script_fields = script_fields
def filter(self, **kwargs):
"""Simulate the application of a nested filter.
:return: A new MockSearch object.
"""
new_filters = self.nested_filter_calls + [kwargs]
return MockSearch(
self, self._query, new_filters, self.order,
self._script_fields
)
def query(self, query):
"""Simulate the creation of an Elasticsearch-DSL `Search`
object from an Elasticsearch-DSL `Query` object.
:return: A New MockSearch object.
"""
return MockSearch(
self, query, self.nested_filter_calls, self.order,
self._script_fields
)
def sort(self, *order_fields):
"""Simulate the application of a sort order."""
return MockSearch(
self, self._query, self.nested_filter_calls, order_fields,
self._script_fields
)
def script_fields(self, **kwargs):
"""Simulate the addition of script fields."""
return MockSearch(
self, self._query, self.nested_filter_calls, self.order,
kwargs
)
class MockQuery(Query):
# A Mock of the Query object from external_search
# (not the one from Elasticsearch-DSL).
@property
def elasticsearch_query(self):
return Q("simple_query_string", query=self.query_string)
class MockPagination(object):
def modify_search_query(self, search):
return search.filter(name_or_query="pagination modified")
# That's a lot of mocks, but here's one more. Mock the Filter
# class's universal_base_filter() and
# universal_nested_filters() methods. These methods queue up
# all kinds of modifications to queries, so it's better to
# replace them with simpler versions.
class MockFilter(object):
universal_base_term = Q('term', universal_base_called=True)
universal_nested_term = Q('term', universal_nested_called=True)
universal_nested_filter = dict(nested_called=[universal_nested_term])
@classmethod
def universal_base_filter(cls):
cls.universal_called=True
return cls.universal_base_term
@classmethod
def universal_nested_filters(cls):
cls.nested_called = True
return cls.universal_nested_filter
@classmethod
def validate_universal_calls(cls):
"""Verify that both universal methods were called
and that the return values were incorporated into
the query being built by `search`.
This method modifies the `search` object in place so
that the rest of a test can ignore all the universal
stuff.
"""
assert True == cls.universal_called
assert True == cls.nested_called
# Reset for next time.
cls.base_called = None
cls.nested_called = None
original_base = Filter.universal_base_filter
original_nested = Filter.universal_nested_filters
Filter.universal_base_filter = MockFilter.universal_base_filter
Filter.universal_nested_filters = MockFilter.universal_nested_filters
# Test the simple case where the Query has no filter.
qu = MockQuery("query string", filter=None)
search = MockSearch()
pagination = MockPagination()
built = qu.build(search, pagination)
# The return value is a new MockSearch object based on the one
# that was passed in.
assert isinstance(built, MockSearch)
assert search == built.parent.parent.parent
# The (mocked) universal base query and universal nested
# queries were called.
MockFilter.validate_universal_calls()
# The mocked universal base filter was the first
# base filter to be applied.
universal_base_term = built._query.filter.pop(0)
assert MockFilter.universal_base_term == universal_base_term
# The pagination filter was the last one to be applied.
pagination = built.nested_filter_calls.pop()
assert dict(name_or_query='pagination modified') == pagination
# The mocked universal nested filter was applied
# just before that.
universal_nested = built.nested_filter_calls.pop()
assert (
dict(
name_or_query='nested',
path='nested_called',
query=Bool(filter=[MockFilter.universal_nested_term])
) ==
universal_nested)
# The result of Query.elasticsearch_query is used as the basis
# for the Search object.
assert Bool(must=qu.elasticsearch_query) == built._query
# Now test some cases where the query has a filter.
# If there's a filter, a boolean Query object is created to
# combine the original Query with the filter.
filter = Filter(fiction=True)
qu = MockQuery("query string", filter=filter)
built = qu.build(search)
MockFilter.validate_universal_calls()
# The 'must' part of this new Query came from calling
# Query.query() on the original Query object.
#
# The 'filter' part came from calling Filter.build() on the
# main filter.
underlying_query = built._query
# The query we passed in is used as the 'must' part of the
assert underlying_query.must == [qu.elasticsearch_query]
main_filter, nested_filters = filter.build()
# The filter we passed in was combined with the universal
# base filter into a boolean query, with its own 'must'.
main_filter.must = main_filter.must + [MockFilter.universal_base_term]
assert (
underlying_query.filter ==
[main_filter])
# There are no nested filters, apart from the universal one.
assert {} == nested_filters
universal_nested = built.nested_filter_calls.pop()
assert (
dict(
name_or_query='nested',
path='nested_called',
query=Bool(filter=[MockFilter.universal_nested_term])
) ==
universal_nested)
assert [] == built.nested_filter_calls
# At this point the universal filters are more trouble than they're
# worth. Disable them for the rest of the test.
MockFilter.universal_base_term = None
MockFilter.universal_nested_filter = None
# Now let's try a combination of regular filters and nested filters.
filter = Filter(
fiction=True,
collections=[self._default_collection]
)
qu = MockQuery("query string", filter=filter)
built = qu.build(search)
underlying_query = built._query
# We get a main filter (for the fiction restriction) and one
# nested filter.
main_filter, nested_filters = filter.build()
[nested_licensepool_filter] = nested_filters.pop('licensepools')
assert {} == nested_filters
# As before, the main filter has been applied to the underlying
# query.
assert underlying_query.filter == [main_filter]
# The nested filter was converted into a Bool query and passed
# into Search.filter(). This applied an additional filter on the
# 'licensepools' subdocument.
[filter_call] = built.nested_filter_calls
assert 'nested' == filter_call['name_or_query']
assert 'licensepools' == filter_call['path']
filter_as_query = filter_call['query']
assert Bool(filter=nested_licensepool_filter) == filter_as_query
# Now we're going to test how queries are built to accommodate
# various restrictions imposed by a Facets object.
def from_facets(*args, **kwargs):
"""Build a Query object from a set of facets, then call
build() on it.
"""
facets = Facets(self._default_library, *args, **kwargs)
filter = Filter(facets=facets)
qu = MockQuery("query string", filter=filter)
built = qu.build(search)
# Return the rest to be verified in a test-specific way.
return built
# When using the 'featured' collection...
built = from_facets(Facets.COLLECTION_FEATURED, None, None)
# There is no nested filter.
assert [] == built.nested_filter_calls
# A non-nested filter is applied on the 'quality' field.
[quality_filter] = built._query.filter
quality_range = Filter._match_range(
'quality', 'gte', self._default_library.minimum_featured_quality
)
assert Q('bool', must=[quality_range], must_not=[RESEARCH]) == quality_filter
# When using the AVAILABLE_OPEN_ACCESS availability restriction...
built = from_facets(Facets.COLLECTION_FULL,
Facets.AVAILABLE_OPEN_ACCESS, None)
# An additional nested filter is applied.
[available_now] = built.nested_filter_calls
assert 'nested' == available_now['name_or_query']
assert 'licensepools' == available_now['path']
# It finds only license pools that are open access.
nested_filter = available_now['query']
open_access = dict(term={'licensepools.open_access': True})
assert (
nested_filter.to_dict() ==
{'bool': {'filter': [open_access]}})
# When using the AVAILABLE_NOW restriction...
built = from_facets(Facets.COLLECTION_FULL, Facets.AVAILABLE_NOW, None)
# An additional nested filter is applied.
[available_now] = built.nested_filter_calls
assert 'nested' == available_now['name_or_query']
assert 'licensepools' == available_now['path']
# It finds only license pools that are open access *or* that have
# active licenses.
nested_filter = available_now['query']
available = {'term': {'licensepools.available': True}}
assert (
nested_filter.to_dict() ==
{'bool': {'filter': [{'bool': {'should': [open_access, available],
'minimum_should_match': 1}}]}})
# When using the AVAILABLE_NOT_NOW restriction...
built = from_facets(Facets.COLLECTION_FULL, Facets.AVAILABLE_NOT_NOW, None)
# An additional nested filter is applied.
[not_available_now] = built.nested_filter_calls
assert 'nested' == available_now['name_or_query']
assert 'licensepools' == available_now['path']
# It finds only license pools that are licensed, but not
# currently available or open access.
nested_filter = not_available_now['query']
not_available = {'term': {'licensepools.available': False}}
licensed = {'term': {'licensepools.licensed': True}}
not_open_access = {'term': {'licensepools.open_access': False}}
assert (
nested_filter.to_dict() ==
{'bool': {'filter': [{'bool': {'must': [not_open_access, licensed, not_available]}}]}})
# If the Filter specifies script fields, those fields are
# added to the Query through a call to script_fields()
script_fields = dict(field1="Definition1",
field2="Definition2")
filter = Filter(script_fields=script_fields)
qu = MockQuery("query string", filter=filter)
built = qu.build(search)
assert script_fields == built._script_fields
# If the Filter specifies a sort order, Filter.sort_order is
# used to convert it to appropriate Elasticsearch syntax, and
# the MockSearch object is modified appropriately.
built = from_facets(
None, None, order=Facets.ORDER_RANDOM, order_ascending=False
)
# We asked for a random sort order, and that's the primary
# sort field.
order = list(built.order)
assert dict(random="desc") == order.pop(0)
# But a number of other sort fields are also employed to act
# as tiebreakers.
for tiebreaker_field in ('sort_author', 'sort_title', 'work_id'):
assert {tiebreaker_field: "asc"} == order.pop(0)
assert [] == order
# Finally, undo the mock of the Filter class methods
Filter.universal_base_filter = original_base
Filter.universal_nested_filters = original_nested
def test_build_match_nothing(self):
# No matter what the Filter looks like, if its .match_nothing
# is set, it gets built into a simple filter that matches
# nothing, with no nested subfilters.
filter = Filter(
fiction=True,
collections=[self._default_collection],
match_nothing = True
)
main, nested = filter.build()
assert MatchNone() == main
assert {} == nested
def test_elasticsearch_query(self):
# The elasticsearch_query property calls a number of other methods
# to generate hypotheses, then creates a dis_max query
# to find the most likely hypothesis for any given book.
class Mock(Query):
_match_phrase_called_with = []
_boosts = {}
_filters = {}
_kwargs = {}
def match_one_field_hypotheses(self, field):
yield "match %s" % field, 1
@property
def match_author_hypotheses(self):
yield "author query 1", 2
yield "author query 2", 3
@property
def match_topic_hypotheses(self):
yield "topic query", 4
def title_multi_match_for(self, other_field):
yield "multi match title+%s" % other_field, 5
# Define this as a constant so it's easy to check later
# in the test.
SUBSTRING_HYPOTHESES = (
"hypothesis based on substring",
"another such hypothesis",
)
@property
def parsed_query_matches(self):
return self.SUBSTRING_HYPOTHESES, "only valid with this filter"
def _hypothesize(
self, hypotheses, new_hypothesis, boost="default",
filters=None, **kwargs
):
self._boosts[new_hypothesis] = boost
if kwargs:
self._kwargs[new_hypothesis] = kwargs
if filters:
self._filters[new_hypothesis] = filters
hypotheses.append(new_hypothesis)
return hypotheses
def _combine_hypotheses(self, hypotheses):
self._combine_hypotheses_called_with = hypotheses
return hypotheses
# Before we get started, try an easy case. If there is no query
# string we get a match_all query that returns everything.
query = Mock(None)
result = query.elasticsearch_query
assert dict(match_all=dict()) == result.to_dict()
# Now try a real query string.
q = "query string"
query = Mock(q)
result = query.elasticsearch_query
# The final result is the result of calling _combine_hypotheses
# on a number of hypotheses. Our mock class just returns
# the hypotheses as-is, for easier testing.
assert result == query._combine_hypotheses_called_with
# We ended up with a number of hypothesis:
assert (result ==
[
# Several hypotheses checking whether the search query is an attempt to
# match a single field -- the results of calling match_one_field()
# many times.
'match title',
'match subtitle',
'match series',
'match publisher',
'match imprint',
# The results of calling match_author_queries() once.
'author query 1',
'author query 2',
# The results of calling match_topic_queries() once.
'topic query',
# The results of calling multi_match() for three fields.
'multi match title+subtitle',
'multi match title+series',
'multi match title+author',
# The 'query' part of the return value of
# parsed_query_matches()
Mock.SUBSTRING_HYPOTHESES
])
# That's not the whole story, though. parsed_query_matches()
# said it was okay to test certain hypotheses, but only
# in the context of a filter.
#
# That filter was passed in to _hypothesize. Our mock version
# of _hypothesize added it to the 'filters' dict to indicate
# we know that those filters go with the substring
# hypotheses. That's the only time 'filters' was touched.
assert (
{Mock.SUBSTRING_HYPOTHESES: 'only valid with this filter'} ==
query._filters)
# Each call to _hypothesize included a boost factor indicating
# how heavily to weight that hypothesis. Rather than do
# anything with this information -- which is mostly mocked
# anyway -- we just stored it in _boosts.
boosts = sorted(list(query._boosts.items()), key=lambda x: str(x[0]))
boosts = sorted(boosts, key=lambda x: x[1])
assert (boosts ==
[
('match imprint', 1),
('match publisher', 1),
('match series', 1),
('match subtitle', 1),
('match title', 1),
# The only non-mocked value here is this one. The
# substring hypotheses have their own weights, which
# we don't see in this test. This is saying that if a
# book matches those sub-hypotheses and _also_ matches
# the filter, then whatever weight it got from the
# sub-hypotheses should be boosted slighty. This gives
# works that match the filter an edge over works that
# don't.
(Mock.SUBSTRING_HYPOTHESES, 1.1),
('author query 1', 2),
('author query 2', 3),
('topic query', 4),
('multi match title+author', 5),
('multi match title+series', 5),
('multi match title+subtitle', 5),
])
def test_match_one_field_hypotheses(self):
# Test our ability to generate hypotheses that a search string
# is trying to match a single field of data.
class Mock(Query):
WEIGHT_FOR_FIELD = dict(
regular_field=2,
stopword_field=3,
stemmable_field=4,
)
STOPWORD_FIELDS = ['stopword_field']
STEMMABLE_FIELDS = ['stemmable_field']
def __init__(self, *args, **kwargs):
super(Mock, self).__init__(*args, **kwargs)
self.fuzzy_calls = {}
def _fuzzy_matches(self, field_name, **kwargs):
self.fuzzy_calls[field_name] = kwargs
# 0.66 is an arbitrarily chosen value -- look
# for it in the validate_fuzzy() helper method.
yield "fuzzy match for %s" % field_name, 0.66
# Let's start with the simplest case: no stopword variant, no
# stemmed variant, no fuzzy variants.
query = Mock("book")
query.fuzzy_coefficient = 0
m = query.match_one_field_hypotheses
# We'll get a Term query and a MatchPhrase query.
term, phrase = list(m('regular_field'))
# The Term hypothesis tries to find an exact match for 'book'
# in this field. It is boosted 1000x relative to the baseline
# weight for this field.
def validate_keyword(field, hypothesis, expect_weight):
hypothesis, weight = hypothesis
assert Term(**{"%s.keyword" % field: "book"}) == hypothesis
assert expect_weight == weight
validate_keyword("regular_field", term, 2000)
# The MatchPhrase hypothesis tries to find a partial phrase
# match for 'book' in this field. It is boosted 1x relative to
# the baseline weight for this field.
def validate_minimal(field, hypothesis, expect_weight):
hypothesis, weight = hypothesis
assert MatchPhrase(**{"%s.minimal" % field: "book"}) == hypothesis
assert expect_weight == weight
validate_minimal("regular_field", phrase, 2)
# Now let's try the same query, but with fuzzy searching
# turned on.
query.fuzzy_coefficient = 0.5
term, phrase, fuzzy = list(m("regular_field"))
# The first two hypotheses are the same.
validate_keyword("regular_field", term, 2000)
validate_minimal("regular_field", phrase, 2)
# But we've got another hypothesis yielded by a call to
# _fuzzy_matches. It goes against the 'minimal' field and its
# weight is the weight of that field's non-fuzzy hypothesis,
# multiplied by a value determined by _fuzzy_matches()
def validate_fuzzy(field, hypothesis, phrase_weight):
minimal_field = field + ".minimal"
hypothesis, weight = fuzzy
assert 'fuzzy match for %s' % minimal_field == hypothesis
assert phrase_weight*0.66 == weight
# Validate standard arguments passed into _fuzzy_matches.
# Since a fuzzy match is kind of loose, we don't allow a
# match on a single word of a multi-word query. At least
# two of the words have to be involved.
assert (dict(minimum_should_match=2, query='book') ==
query.fuzzy_calls[minimal_field])
validate_fuzzy("regular_field", fuzzy, 2)
# Now try a field where stopwords might be relevant.
term, phrase, fuzzy = list(m("stopword_field"))
# There was no new hypothesis, because our query doesn't
# contain any stopwords. Let's make it look like it does.
query.contains_stopwords = True
term, phrase, fuzzy, stopword = list(m("stopword_field"))
# We have the term query, the phrase match query, and the
# fuzzy query. Note that they're boosted relative to the base
# weight for the stopword_field query, which is 3.
validate_keyword("stopword_field", term, 3000)
validate_minimal("stopword_field", phrase, 3)
validate_fuzzy("stopword_field", fuzzy, 3)
# We also have a new hypothesis which matches the version of
# stopword_field that leaves the stopwords in place. This
# hypothesis is boosted just above the baseline hypothesis.
hypothesis, weight = stopword
assert (hypothesis ==
MatchPhrase(**{"stopword_field.with_stopwords": "book"}))
assert weight == 3 * Mock.SLIGHTLY_ABOVE_BASELINE
# Finally, let's try a stemmable field.
term, phrase, fuzzy, stemmable = list(m("stemmable_field"))
validate_keyword("stemmable_field", term, 4000)
validate_minimal("stemmable_field", phrase, 4)
validate_fuzzy("stemmable_field", fuzzy, 4)
# The stemmable field becomes a Match hypothesis at 75% of the
# baseline weight for this field. We set
# minimum_should_match=2 here for the same reason we do it for
# the fuzzy search -- a normal Match query is kind of loose.
hypothesis, weight = stemmable
assert (hypothesis ==
Match(
stemmable_field=dict(
minimum_should_match=2,
query="book"
)
))
assert weight == 4 * 0.75
def test_match_author_hypotheses(self):
# Test our ability to generate hypotheses that a query string
# is an attempt to identify the author of a book. We do this
# by calling _author_field_must_match several times -- that's
# where most of the work happens.
class Mock(Query):
def _author_field_must_match(self, base_field, query_string=None):
yield "%s must match %s" % (base_field, query_string)
query = Mock("<NAME>")
hypotheses = list(query.match_author_hypotheses)
# We test three hypotheses: the query string is the author's
# display name, it's the author's sort name, or it matches the
# author's sort name when automatically converted to a sort
# name.
assert (
[
'display_name must match <NAME>',
'sort_name must match le guin, ursula'
] ==
hypotheses)
# If the string passed in already looks like a sort name, we
# don't try to convert it -- but someone's name may contain a
# comma, so we do check both fields.
query = Mock("le guin, ursula")
hypotheses = list(query.match_author_hypotheses)
assert (
[
'display_name must match <NAME>, ursula',
'sort_name must match le guin, ursula',
] ==
hypotheses)
def test__author_field_must_match(self):
class Mock(Query):
def match_one_field_hypotheses(self, field_name, query_string):
hypothesis = "maybe %s matches %s" % (field_name, query_string)
yield hypothesis, 6
def _role_must_also_match(self, hypothesis):
return [hypothesis, "(but the role must be appropriate)"]
query = Mock("<NAME>")
m = query._author_field_must_match
# We call match_one_field_hypothesis with the field name, and
# run the result through _role_must_also_match() to ensure we
# only get works where this author made a major contribution.
[(hypothesis, weight)] = list(m("display_name"))
assert (
['maybe contributors.display_name matches <NAME>',
'(but the role must be appropriate)'] ==
hypothesis)
assert 6 == weight
# We can pass in a different query string to override
# .query_string. This is how we test a match against our guess
# at an author's sort name.
[(hypothesis, weight)] = list(m("sort_name", "le guin, ursula"))
assert (
['maybe contributors.sort_name matches le guin, ursula',
'(but the role must be appropriate)'] ==
hypothesis)
assert 6 == weight
def test__role_must_also_match(self):
class Mock(Query):
@classmethod
def _nest(cls, subdocument, base):
return ("nested", subdocument, base)
# Verify that _role_must_also_match() puts an appropriate
# restriction on a match against a field in the 'contributors'
# sub-document.
original_query = Term(**{'contributors.sort_name': '<NAME>'})
modified = Mock._role_must_also_match(original_query)
# The resulting query was run through Mock._nest. In a real
# scenario this would turn it into a nested query against the
# 'contributors' subdocument.
nested, subdocument, modified_base = modified
assert "nested" == nested
assert "contributors" == subdocument
# The original query was combined with an extra clause, which
# only matches people if their contribution to a book was of
# the type that library patrons are likely to search for.
extra = Terms(**{"contributors.role": ['Primary Author', 'Author', 'Narrator']})
assert Bool(must=[original_query, extra]) == modified_base
def test_match_topic_hypotheses(self):
query = Query("whales")
[(hypothesis, weight)] = list(query.match_topic_hypotheses)
# There's a single hypothesis -- a MultiMatch covering both
# summary text and classifications. The score for a book is
# whichever of the two types of fields is a better match for
# 'whales'.
assert (
MultiMatch(
query="whales",
fields=["summary", "classifications.term"],
type="best_fields",
) ==
hypothesis)
# The weight of the hypothesis is the base weight associated
# with the 'summary' field.
assert Query.WEIGHT_FOR_FIELD['summary'] == weight
def test_title_multi_match_for(self):
# Test our ability to hypothesize that a query string might
# contain some text from the title plus some text from
# some other field.
# If there's only one word in the query, then we don't bother
# making this hypothesis at all.
assert (
[] ==
list(Query("grasslands").title_multi_match_for("other field")))
query = Query("grass lands")
[(hypothesis, weight)] = list(query.title_multi_match_for("author"))
expect = MultiMatch(
query="grass lands",
fields = ['title.minimal', 'author.minimal'],
type="cross_fields",
operator="and",
minimum_should_match="100%",
)
assert expect == hypothesis
# The weight of this hypothesis is between the weight of a
# pure title match and the weight of a pure author match.
title_weight = Query.WEIGHT_FOR_FIELD['title']
author_weight = Query.WEIGHT_FOR_FIELD['author']
assert weight == author_weight * (author_weight/title_weight)
def test_parsed_query_matches(self):
# Test our ability to take a query like "asteroids
# nonfiction", and turn it into a single hypothesis
# encapsulating the idea: "what if they meant to do a search
# on 'asteroids' but with a nonfiction filter?
query = Query("nonfiction asteroids")
# The work of this method is simply delegated to QueryParser.
parser = QueryParser(query.query_string)
expect = (parser.match_queries, parser.filters)
assert expect == query.parsed_query_matches
def test_hypothesize(self):
# Verify that _hypothesize() adds a query to a list,
# boosting it if necessary.
class Mock(Query):
boost_extras = []
@classmethod
def _boost(cls, boost, queries, filters=None, **kwargs):
if filters or kwargs:
cls.boost_extras.append((filters, kwargs))
return "%s boosted by %d" % (queries, boost)
hypotheses = []
# _hypothesize() does nothing if it's not passed a real
# query.
Mock._hypothesize(hypotheses, None, 100)
assert [] == hypotheses
assert [] == Mock.boost_extras
# If it is passed a real query, _boost() is called on the
# query object.
Mock._hypothesize(hypotheses, "query object", 10)
assert ["query object boosted by 10"] == hypotheses
assert [] == Mock.boost_extras
Mock._hypothesize(hypotheses, "another query object", 1)
assert (["query object boosted by 10", "another query object boosted by 1"] ==
hypotheses)
assert [] == Mock.boost_extras
# If a filter or any other arguments are passed in, those arguments
# are propagated to _boost().
hypotheses = []
Mock._hypothesize(hypotheses, "query with filter", 2, filters="some filters",
extra="extra kwarg")
assert ["query with filter boosted by 2"] == hypotheses
assert [("some filters", dict(extra="extra kwarg"))] == Mock.boost_extras
class TestQueryParser(DatabaseTest):
"""Test the class that tries to derive structure from freeform
text search requests.
"""
def test_constructor(self):
# The constructor parses the query string, creates any
# necessary query objects, and turns the remaining part of
# the query into a 'simple query string'-type query.
class MockQuery(Query):
"""Create 'query' objects that are easier to test than
the ones the Query class makes.
"""
@classmethod
def _match_term(cls, field, query):
return (field, query)
@classmethod
def make_target_age_query(cls, query, boost="default boost"):
return ("target age (filter)", query), ("target age (query)", query, boost)
@property
def elasticsearch_query(self):
# Mock the creation of an extremely complicated DisMax
# query -- we just want to verify that such a query
# was created.
return "A huge DisMax for %r" % self.query_string
parser = QueryParser("science fiction about dogs", MockQuery)
# The original query string is always stored as .original_query_string.
assert "science fiction about dogs" == parser.original_query_string
# The part of the query that couldn't be parsed is always stored
# as final_query_string.
assert "about dogs" == parser.final_query_string
# Leading and trailing whitespace is never regarded as
# significant and it is stripped from the query string
# immediately.
whitespace = QueryParser(" abc ", MockQuery)
assert "abc" == whitespace.original_query_string
# parser.filters contains the filters that we think we were
# able to derive from the query string.
assert [('genres.name', 'Science Fiction')] == parser.filters
# parser.match_queries contains the result of putting the rest
# of the query string into a Query object (or, here, our
# MockQuery) and looking at its .elasticsearch_query. In a
# real scenario, this will result in a huge DisMax query
# that tries to consider all the things someone might be
# searching for, _in addition to_ applying a filter.
assert ["A huge DisMax for 'about dogs'"] == parser.match_queries
# Now that you see how it works, let's define a helper
# function which makes it easy to verify that a certain query
# string becomes a certain set of filters, plus a certain set
# of queries, plus a DisMax for some remainder string.
def assert_parses_as(query_string, filters, remainder, extra_queries=None):
if not isinstance(filters, list):
filters = [filters]
queries = extra_queries or []
if not isinstance(queries, list):
queries = [queries]
parser = QueryParser(query_string, MockQuery)
assert filters == parser.filters
if remainder:
queries.append(MockQuery(remainder).elasticsearch_query)
assert queries == parser.match_queries
# Here's the same test from before, using the new
# helper function.
assert_parses_as(
"science fiction about dogs",
("genres.name", "Science Fiction"),
"about dogs"
)
# Test audiences.
assert_parses_as(
"children's picture books",
("audience", "children"),
"picture books"
)
# (It's possible for the entire query string to be eaten up,
# such that there is no remainder match at all.)
assert_parses_as(
"young adult romance",
[("genres.name", "Romance"),
("audience", "youngadult")],
''
)
# Test fiction/nonfiction status.
assert_parses_as(
"fiction dinosaurs",
("fiction", "fiction"),
"dinosaurs"
)
# (Genres are parsed before fiction/nonfiction; otherwise
# "science fiction" would be chomped by a search for "fiction"
# and "nonfiction" would not be picked up.)
assert_parses_as(
"science fiction or nonfiction dinosaurs",
[("genres.name", "Science Fiction"),
("fiction", "nonfiction")],
"or dinosaurs"
)
# Test target age.
#
# These are a little different because the target age
# restriction shows up twice: once as a filter (to eliminate
# all books that don't fit the target age restriction) and
# once as a query (to boost books that cluster tightly around
# the target age, at the expense of books that span a wider
# age range).
assert_parses_as(
"grade 5 science",
[("genres.name", "Science"),
("target age (filter)", (10, 10))],
'',
("target age (query)", (10, 10), 'default boost')
)
assert_parses_as(
'divorce ages 10 and up',
("target age (filter)", (10, 14)),
'divorce and up', # TODO: not ideal
("target age (query)", (10, 14), 'default boost'),
)
# Nothing can be parsed out from this query--it's an author's name
# and will be handled by another query.
parser = QueryParser("octavia butler")
assert [] == parser.match_queries
assert "octavia butler" == parser.final_query_string
# Finally, try parsing a query without using MockQuery.
query = QueryParser("nonfiction asteroids")
[nonfiction] = query.filters
[asteroids] = query.match_queries
# It creates real Elasticsearch-DSL query objects.
# The filter is a very simple Term query.
assert Term(fiction="nonfiction") == nonfiction
# The query part is an extremely complicated DisMax query, so
# I won't test the whole thing, but it's what you would get if
# you just tried a search for "asteroids".
assert isinstance(asteroids, DisMax)
assert asteroids == Query("asteroids").elasticsearch_query
def test_add_match_term_filter(self):
# TODO: this method could use a standalone test, but it's
# already covered by the test_constructor.
pass
def test_add_target_age_filter(self):
parser = QueryParser("")
parser.filters = []
parser.match_queries = []
remainder = parser.add_target_age_filter(
(10, 11), "penguins grade 5-6", "grade 5-6"
)
assert "penguins " == remainder
# Here's the filter part: a book's age range must be include the
# 10-11 range, or it gets filtered out.
filter_clauses = [
Range(**{"target_age.upper":dict(gte=10)}),
Range(**{"target_age.lower":dict(lte=11)}),
]
assert [Bool(must=filter_clauses)] == parser.filters
# Here's the query part: a book gets boosted if its
# age range fits _entirely_ within the target age range.
query_clauses = [
Range(**{"target_age.upper":dict(lte=11)}),
Range(**{"target_age.lower":dict(gte=10)}),
]
assert ([Bool(boost=1.1, must=filter_clauses, should=query_clauses)] ==
parser.match_queries)
def test__without_match(self):
# Test our ability to remove matched text from a string.
m = QueryParser._without_match
assert " fiction" == m("young adult fiction", "young adult")
assert " of dinosaurs" == m("science of dinosaurs", "science")
# If the match cuts off in the middle of a word, we remove
# everything up to the end of the word.
assert " books" == m("children's books", "children")
assert "" == m("adulting", "adult")
class TestFilter(DatabaseTest):
def setup_method(self):
super(TestFilter, self).setup_method()
# Look up three Genre objects which can be used to make filters.
self.literary_fiction, ignore = Genre.lookup(
self._db, "Literary Fiction"
)
self.fantasy, ignore = Genre.lookup(self._db, "Fantasy")
self.horror, ignore = Genre.lookup(self._db, "Horror")
# Create two empty CustomLists which can be used to make filters.
self.best_sellers, ignore = self._customlist(num_entries=0)
self.staff_picks, ignore = self._customlist(num_entries=0)
def test_constructor(self):
# Verify that the Filter constructor sets members with
# minimal processing.
collection = self._default_collection
media = object()
languages = object()
fiction = object()
audiences = ["somestring"]
author = object()
match_nothing = object()
min_score = object()
# Test the easy stuff -- these arguments just get stored on
# the Filter object. If necessary, they'll be cleaned up
# later, during build().
filter = Filter(
media=media, languages=languages,
fiction=fiction, audiences=audiences, author=author,
match_nothing=match_nothing, min_score=min_score
)
assert media == filter.media
assert languages == filter.languages
assert fiction == filter.fiction
assert audiences == filter.audiences
assert author == filter.author
assert match_nothing == filter.match_nothing
assert min_score == filter.min_score
# Test the `collections` argument.
# If you pass in a library, you get all of its collections.
library_filter = Filter(collections=self._default_library)
assert [self._default_collection.id] == library_filter.collection_ids
# If the library has no collections, the collection filter
# will filter everything out.
self._default_library.collections = []
library_filter = Filter(collections=self._default_library)
assert [] == library_filter.collection_ids
# If you pass in Collection objects, you get their IDs.
collection_filter = Filter(collections=self._default_collection)
assert [self._default_collection.id] == collection_filter.collection_ids
collection_filter = Filter(collections=[self._default_collection])
assert [self._default_collection.id] == collection_filter.collection_ids
# If you pass in IDs, they're left alone.
ids = [10, 11, 22]
collection_filter = Filter(collections=ids)
assert ids == collection_filter.collection_ids
# If you pass in nothing, there is no collection filter. This
# is different from the case above, where the library had no
# collections and everything was filtered out.
empty_filter = Filter()
assert None == empty_filter.collection_ids
# Test the `target_age` argument.
assert None == empty_filter.target_age
one_year = Filter(target_age=8)
assert (8,8) == one_year.target_age
year_range = Filter(target_age=(8,10))
assert (8,10) == year_range.target_age
year_range = Filter(target_age=NumericRange(3, 6, '()'))
assert (4, 5) == year_range.target_age
# Test genre_restriction_sets
# In these three cases, there are no restrictions on genre.
assert [] == empty_filter.genre_restriction_sets
assert [] == Filter(genre_restriction_sets=[]).genre_restriction_sets
assert [] == Filter(genre_restriction_sets=None).genre_restriction_sets
# Restrict to books that are literary fiction AND (horror OR
# fantasy).
restricted = Filter(
genre_restriction_sets = [
[self.horror, self.fantasy],
[self.literary_fiction],
]
)
assert (
[[self.horror.id, self.fantasy.id],
[self.literary_fiction.id]] ==
restricted.genre_restriction_sets)
# This is a restriction: 'only books that have no genre'
assert [[]] == Filter(genre_restriction_sets=[[]]).genre_restriction_sets
# Test customlist_restriction_sets
# In these three cases, there are no restrictions.
assert [] == empty_filter.customlist_restriction_sets
assert [] == Filter(customlist_restriction_sets=None).customlist_restriction_sets
assert [] == Filter(customlist_restriction_sets=[]).customlist_restriction_sets
# Restrict to books that are on *both* the best sellers list and the
# staff picks list.
restricted = Filter(
customlist_restriction_sets = [
[self.best_sellers],
[self.staff_picks],
]
)
assert (
[[self.best_sellers.id],
[self.staff_picks.id]] ==
restricted.customlist_restriction_sets)
# This is a restriction -- 'only books that are not on any lists'.
assert (
[[]] ==
Filter(customlist_restriction_sets=[[]]).customlist_restriction_sets)
# Test the license_datasource argument
overdrive = DataSource.lookup(self._db, DataSource.OVERDRIVE)
overdrive_only = Filter(license_datasource=overdrive)
assert [overdrive.id] == overdrive_only.license_datasources
overdrive_only = Filter(license_datasource=overdrive.id)
assert [overdrive.id] == overdrive_only.license_datasources
# If you pass in a Facets object, its modify_search_filter()
# and scoring_functions() methods are called.
class Mock(object):
def modify_search_filter(self, filter):
self.modify_search_filter_called_with = filter
def scoring_functions(self, filter):
self.scoring_functions_called_with = filter
return ["some scoring functions"]
facets = Mock()
filter = Filter(facets=facets)
assert filter == facets.modify_search_filter_called_with
assert filter == facets.scoring_functions_called_with
assert ["some scoring functions"] == filter.scoring_functions
# Some arguments to the constructor only exist as keyword
# arguments, but you can't pass in whatever keywords you want.
with pytest.raises(ValueError) as excinfo:
Filter(no_such_keyword="nope")
assert "Unknown keyword arguments" in str(excinfo.value)
def test_from_worklist(self):
# Any WorkList can be converted into a Filter.
#
# WorkList.inherited_value() and WorkList.inherited_values()
# are used to determine what should go into the constructor.
# Disable any excluded audiobook data sources -- they will
# introduce unwanted extra clauses into our filters.
excluded_audio_sources = ConfigurationSetting.sitewide(
self._db, Configuration.EXCLUDED_AUDIO_DATA_SOURCES
)
excluded_audio_sources.value = json.dumps([])
library = self._default_library
assert True == library.allow_holds
parent = self._lane(
display_name="Parent Lane", library=library
)
parent.media = Edition.AUDIO_MEDIUM
parent.languages = ["eng", "fra"]
parent.fiction = True
parent.audiences = set([Classifier.AUDIENCE_CHILDREN])
parent.target_age = NumericRange(10, 11, '[]')
parent.genres = [self.horror, self.fantasy]
parent.customlists = [self.best_sellers]
parent.license_datasource = DataSource.lookup(
self._db, DataSource.GUTENBERG
)
# This lane inherits most of its configuration from its parent.
inherits = self._lane(
display_name="Child who inherits", parent=parent
)
inherits.genres = [self.literary_fiction]
inherits.customlists = [self.staff_picks]
class Mock(object):
def modify_search_filter(self, filter):
self.called_with = filter
def scoring_functions(self, filter):
return []
facets = Mock()
filter = Filter.from_worklist(self._db, inherits, facets)
assert [self._default_collection.id] == filter.collection_ids
assert parent.media == filter.media
assert parent.languages == filter.languages
assert parent.fiction == filter.fiction
assert parent.audiences + [Classifier.AUDIENCE_ALL_AGES] == filter.audiences
assert [parent.license_datasource_id] == filter.license_datasources
assert ((parent.target_age.lower, parent.target_age.upper) ==
filter.target_age)
assert True == filter.allow_holds
# Filter.from_worklist passed the mock Facets object in to
# the Filter constructor, which called its modify_search_filter()
# method.
assert facets.called_with is not None
# For genre and custom list restrictions, the child values are
# appended to the parent's rather than replacing it.
assert ([parent.genre_ids, inherits.genre_ids] ==
[set(x) for x in filter.genre_restriction_sets])
assert ([parent.customlist_ids, inherits.customlist_ids] ==
filter.customlist_restriction_sets)
# If any other value is set on the child lane, the parent value
# is overridden.
inherits.media = Edition.BOOK_MEDIUM
filter = Filter.from_worklist(self._db, inherits, facets)
assert inherits.media == filter.media
# This lane doesn't inherit anything from its parent.
does_not_inherit = self._lane(
display_name="Child who does not inherit", parent=parent
)
does_not_inherit.inherit_parent_restrictions = False
# Because of that, the final filter we end up with is
# nearly empty. The only restriction here is the collection
# restriction imposed by the fact that `does_not_inherit`
# is, itself, associated with a specific library.
filter = Filter.from_worklist(self._db, does_not_inherit, facets)
built_filters, subfilters = self.assert_filter_builds_to([], filter)
# The collection restriction is not reflected in the main
# filter; rather it's in a subfilter that will be applied to the
# 'licensepools' subdocument, where the collection ID lives.
[subfilter] = subfilters.pop('licensepools')
assert ({'terms': {'licensepools.collection_id': [self._default_collection.id]}} ==
subfilter.to_dict())
# No other subfilters were specified.
assert {} == subfilters
# If the library does not allow holds, this information is
# propagated to its Filter.
library.setting(library.ALLOW_HOLDS).value = False
filter = Filter.from_worklist(self._db, parent, facets)
assert False == library.allow_holds
# Any excluded audio sources in the sitewide settings
# will be propagated to all Filters.
overdrive = DataSource.lookup(self._db, DataSource.OVERDRIVE)
excluded_audio_sources.value = json.dumps([overdrive.name])
filter = Filter.from_worklist(self._db, parent, facets)
assert [overdrive.id] == filter.excluded_audiobook_data_sources
# A bit of setup to test how WorkList.collection_ids affects
# the resulting Filter.
# Here's a collection associated with the default library.
for_default_library = WorkList()
for_default_library.initialize(self._default_library)
# Its filter uses all the collections associated with that library.
filter = Filter.from_worklist(self._db, for_default_library, None)
assert [self._default_collection.id] == filter.collection_ids
# Here's a child of that WorkList associated with a different
# library.
library2 = self._library()
collection2 = self._collection()
library2.collections.append(collection2)
for_other_library = WorkList()
for_other_library.initialize(library2)
for_default_library.append_child(for_other_library)
# Its filter uses the collection from the second library.
filter = Filter.from_worklist(self._db, for_other_library, None)
assert [collection2.id] == filter.collection_ids
# If for whatever reason, collection_ids on the child is not set,
# all collections associated with the WorkList's library will be used.
for_other_library.collection_ids = None
filter = Filter.from_worklist(self._db, for_other_library, None)
assert [collection2.id] == filter.collection_ids
# If no library is associated with a WorkList, we assume that
# holds are allowed. (Usually this is controleld by a library
# setting.)
for_other_library.library_id = None
filter = Filter.from_worklist(self._db, for_other_library, None)
assert True == filter.allow_holds
def assert_filter_builds_to(self, expect, filter, _chain_filters=None):
"""Helper method for the most common case, where a
Filter.build() returns a main filter and no nested filters.
"""
final_query = {'bool': {'must_not': [RESEARCH.to_dict()]}}
if expect:
final_query['bool']['must'] = expect
main, nested = filter.build(_chain_filters)
assert final_query == main.to_dict()
return main, nested
def test_audiences(self):
# Verify that the .audiences property correctly represents the
# combination of what's in the ._audiences list and application
# policies.
filter = Filter()
assert filter.audiences == None
# The output is a list whether audiences is a string...
filter = Filter(audiences=Classifier.AUDIENCE_ALL_AGES)
assert filter.audiences == [Classifier.AUDIENCE_ALL_AGES]
# ...or a list.
filter = Filter(audiences=[Classifier.AUDIENCE_ALL_AGES])
assert filter.audiences == [Classifier.AUDIENCE_ALL_AGES]
# "all ages" should always be an audience if the audience is
# young adult or adult.
filter = Filter(audiences=Classifier.AUDIENCE_YOUNG_ADULT)
assert filter.audiences == [Classifier.AUDIENCE_YOUNG_ADULT, Classifier.AUDIENCE_ALL_AGES]
filter = Filter(audiences=Classifier.AUDIENCE_ADULT)
assert filter.audiences == [Classifier.AUDIENCE_ADULT, Classifier.AUDIENCE_ALL_AGES]
filter = Filter(audiences=[Classifier.AUDIENCE_ADULT, Classifier.AUDIENCE_YOUNG_ADULT])
assert (
filter.audiences ==
[Classifier.AUDIENCE_ADULT,
Classifier.AUDIENCE_YOUNG_ADULT,
Classifier.AUDIENCE_ALL_AGES])
# If the audience is meant for adults, then "all ages" should not
# be included
for audience in (
Classifier.AUDIENCE_ADULTS_ONLY, Classifier.AUDIENCE_RESEARCH
):
filter = Filter(audiences=audience)
assert(Classifier.AUDIENCE_ALL_AGES not in filter.audiences)
# If the audience and target age is meant for children, then the
# audience should only be for children
filter = Filter(
audiences=Classifier.AUDIENCE_CHILDREN,
target_age=5
)
assert filter.audiences == [Classifier.AUDIENCE_CHILDREN]
# If the children's target age includes children older than
# ALL_AGES_AGE_CUTOFF, or there is no target age, the
# audiences includes "all ages".
all_children = Filter(audiences=Classifier.AUDIENCE_CHILDREN)
nine_years = Filter(audiences=Classifier.AUDIENCE_CHILDREN, target_age=9)
for filter in (all_children, nine_years):
assert (
filter.audiences ==
[Classifier.AUDIENCE_CHILDREN, Classifier.AUDIENCE_ALL_AGES])
def test_build(self):
# Test the ability to turn a Filter into an ElasticSearch
# filter object.
# build() takes the information in the Filter object, scrubs
# it, and uses _chain_filters to chain together a number of
# alternate hypotheses. It returns a 2-tuple with a main Filter
# and a dictionary describing additional filters to be applied
# to subdocuments.
#
# Let's try it with some simple cases before mocking
# _chain_filters for a more detailed test.
# Start with an empty filter. No filter is built and there are no
# nested filters.
filter = Filter()
built_filters, subfilters = self.assert_filter_builds_to([], filter)
assert {} == subfilters
# Add a medium clause to the filter.
filter.media = "a medium"
medium_built = {'terms': {'medium': ['amedium']}}
built_filters, subfilters = self.assert_filter_builds_to([medium_built], filter)
assert {} == subfilters
# Add a language clause to the filter.
filter.languages = ["lang1", "LANG2"]
language_built = {'terms': {'language': ['lang1', 'lang2']}}
# Now both the medium clause and the language clause must match.
built_filters, subfilters = self.assert_filter_builds_to(
[medium_built, language_built],
filter
)
assert {} == subfilters
chain = self._mock_chain
filter.collection_ids = [self._default_collection]
filter.fiction = True
filter._audiences = 'CHILDREN'
filter.target_age = (2,3)
overdrive = DataSource.lookup(self._db, DataSource.OVERDRIVE)
filter.excluded_audiobook_data_sources = [overdrive.id]
filter.allow_holds = False
last_update_time = datetime.datetime(2019, 1, 1)
i1 = self._identifier()
i2 = self._identifier()
filter.identifiers = [i1, i2]
filter.updated_after = last_update_time
# We want books from a specific license source.
filter.license_datasources = overdrive
# We want books by a specific author.
filter.author = ContributorData(sort_name="<NAME>")
# We want books that are literary fiction, *and* either
# fantasy or horror.
filter.genre_restriction_sets = [
[self.literary_fiction], [self.fantasy, self.horror]
]
# We want books that are on _both_ of the custom lists.
filter.customlist_restriction_sets = [
[self.best_sellers], [self.staff_picks]
]
# At this point every item on this Filter that can be set, has been
# set. When we run build, we'll end up with the output of our mocked
# chain() method -- a list of small filters.
built, nested = filter.build(_chain_filters=chain)
# This time we do see a nested filter. The information
# necessary to enforce the 'current collection', 'excluded
# audiobook sources', 'no holds', and 'license source'
# restrictions is kept in the nested 'licensepools' document,
# so those restrictions must be described in terms of nested
# filters on that document.
[licensepool_filter, datasource_filter, excluded_audiobooks_filter,
no_holds_filter] = nested.pop('licensepools')
# The 'current collection' filter.
assert (
{'terms': {'licensepools.collection_id': [self._default_collection.id]}} ==
licensepool_filter.to_dict())
# The 'only certain data sources' filter.
assert ({'terms': {'licensepools.data_source_id': [overdrive.id]}} ==
datasource_filter.to_dict())
# The 'excluded audiobooks' filter.
audio = Q('term', **{'licensepools.medium': Edition.AUDIO_MEDIUM})
excluded_audio_source = Q(
'terms', **{'licensepools.data_source_id' : [overdrive.id]}
)
excluded_audio = Bool(must=[audio, excluded_audio_source])
not_excluded_audio = Bool(must_not=excluded_audio)
assert not_excluded_audio == excluded_audiobooks_filter
# The 'no holds' filter.
open_access = Q('term', **{'licensepools.open_access' : True})
licenses_available = Q('term', **{'licensepools.available' : True})
currently_available = Bool(should=[licenses_available, open_access])
assert currently_available == no_holds_filter
# The best-seller list and staff picks restrictions are also
# expressed as nested filters.
[best_sellers_filter, staff_picks_filter] = nested.pop('customlists')
assert ({'terms': {'customlists.list_id': [self.best_sellers.id]}} ==
best_sellers_filter.to_dict())
assert ({'terms': {'customlists.list_id': [self.staff_picks.id]}} ==
staff_picks_filter.to_dict())
# The author restriction is also expressed as a nested filter.
[contributor_filter] = nested.pop('contributors')
# It's value is the value of .author_filter, which is tested
# separately in test_author_filter.
assert isinstance(filter.author_filter, Bool)
assert filter.author_filter == contributor_filter
# The genre restrictions are also expressed as nested filters.
literary_fiction_filter, fantasy_or_horror_filter = nested.pop(
'genres'
)
# There are two different restrictions on genre, because
# genre_restriction_sets was set to two lists of genres.
assert ({'terms': {'genres.term': [self.literary_fiction.id]}} ==
literary_fiction_filter.to_dict())
assert ({'terms': {'genres.term': [self.fantasy.id, self.horror.id]}} ==
fantasy_or_horror_filter.to_dict())
# There's a restriction on the identifier.
[identifier_restriction] = nested.pop('identifiers')
# The restriction includes subclases, each of which matches
# the identifier and type of one of the Identifier objects.
subclauses = [
Bool(must=[Term(identifiers__identifier=x.identifier),
Term(identifiers__type=x.type)])
for x in [i1, i2]
]
# Any identifier will work, but at least one must match.
assert (Bool(minimum_should_match=1, should=subclauses) ==
identifier_restriction)
# There are no other nested filters.
assert {} == nested
# Every other restriction imposed on the Filter object becomes an
# Elasticsearch filter object in this list.
(medium, language, fiction, audience, target_age,
updated_after) = built
# Test them one at a time.
#
# Throughout this test, notice that the data model objects --
# Collections (above), Genres, and CustomLists -- have been
# replaced with their database IDs. This is done by
# filter_ids.
#
# Also, audience, medium, and language have been run through
# scrub_list, which turns scalar values into lists, removes
# spaces, and converts to lowercase.
# These we tested earlier -- we're just making sure the same
# documents are put into the full filter.
assert medium_built == medium.to_dict()
assert language_built == language.to_dict()
assert {'term': {'fiction': 'fiction'}} == fiction.to_dict()
assert {'terms': {'audience': ['children']}} == audience.to_dict()
# The contents of target_age_filter are tested below -- this
# just tests that the target_age_filter is included.
assert filter.target_age_filter == target_age
# There's a restriction on the last updated time for bibliographic
# metadata. The datetime is converted to a number of seconds since
# the epoch, since that's how we index times.
expect = (
last_update_time - datetime.datetime.utcfromtimestamp(0)
).total_seconds()
assert (
{'bool': {'must': [
{'range': {'last_update_time': {'gte': expect}}}
]}} ==
updated_after.to_dict())
# We tried fiction; now try nonfiction.
filter = Filter()
filter.fiction = False
built_filters, subfilters = self.assert_filter_builds_to([{'term': {'fiction': 'nonfiction'}}], filter)
assert {} == subfilters
def test_build_series(self):
# Test what happens when a series restriction is placed on a Filter.
f = Filter(series="Talking Hedgehog Mysteries")
built, nested = f.build()
assert {} == nested
# A match against a keyword field only matches on an exact
# string match.
assert (
built.to_dict()['bool']['must'] ==
[{'term': {'series.keyword': 'Talking Hedgehog Mysteries'}}])
# Find books that are in _some_ series--which one doesn't
# matter.
f = Filter(series=True)
built, nested = f.build()
assert {} == nested
# The book must have an indexed series.
assert (
built.to_dict()['bool']['must'] ==
[{'exists': {'field': 'series'}}])
# But the 'series' that got indexed must not be the empty string.
assert {'term': {'series.keyword': ''}} in built.to_dict()['bool']['must_not']
def test_sort_order(self):
# Test the Filter.sort_order property.
# No sort order.
f = Filter()
assert [] == f.sort_order
assert False == f.order_ascending
def validate_sort_order(filter, main_field):
"""Validate the 'easy' part of the sort order -- the tiebreaker
fields. Return the 'difficult' part.
:return: The first part of the sort order -- the field that
is potentially difficult.
"""
# The tiebreaker fields are always in the same order, but
# if the main sort field is one of the tiebreaker fields,
# it's removed from the list -- there's no need to sort on
# that field a second time.
default_sort_fields = [
{x: "asc"} for x in ['sort_author', 'sort_title', 'work_id']
if x != main_field
]
assert default_sort_fields == filter.sort_order[1:]
return filter.sort_order[0]
# A simple field, either ascending or descending.
f.order='field'
assert False == f.order_ascending
first_field = validate_sort_order(f, 'field')
assert dict(field='desc') == first_field
f.order_ascending = True
first_field = validate_sort_order(f, 'field')
assert dict(field='asc') == first_field
# When multiple fields are given, they are put at the
# beginning and any remaining tiebreaker fields are added.
f.order=['series_position', 'work_id', 'some_other_field']
assert (
[
dict(series_position='asc'),
dict(work_id='asc'),
dict(some_other_field='asc'),
dict(sort_author='asc'),
dict(sort_title='asc'),
] ==
f.sort_order)
# You can't sort by some random subdocument field, because there's
# not enough information to know how to aggregate multiple values.
#
# You _can_ sort by license pool availability time and first
# appearance on custom list -- those are tested below -- but it's
# complicated.
f.order = 'subdocument.field'
with pytest.raises(ValueError) as excinfo:
f.sort_order()
assert "I don't know how to sort by subdocument.field" in str(excinfo.value)
# It's possible to sort by every field in
# Facets.SORT_ORDER_TO_ELASTICSEARCH_FIELD_NAME.
used_orders = Facets.SORT_ORDER_TO_ELASTICSEARCH_FIELD_NAME
added_to_collection = used_orders[Facets.ORDER_ADDED_TO_COLLECTION]
series_position = used_orders[Facets.ORDER_SERIES_POSITION]
last_update = used_orders[Facets.ORDER_LAST_UPDATE]
for sort_field in used_orders.values():
if sort_field in (added_to_collection, series_position,
last_update):
# These are complicated cases, tested below.
continue
f.order = sort_field
first_field = validate_sort_order(f, sort_field)
assert {sort_field: 'asc'} == first_field
# A slightly more complicated case is when a feed is ordered by
# series position -- there the second field is title rather than
# author.
f.order = series_position
assert (
[
{x:'asc'} for x in [
'series_position', 'sort_title', 'sort_author', 'work_id'
]
] ==
f.sort_order)
# A more complicated case is when a feed is ordered by date
# added to the collection. This requires an aggregate function
# and potentially a nested filter.
f.order = added_to_collection
first_field = validate_sort_order(f, added_to_collection)
# Here there's no nested filter but there is an aggregate
# function. If a book is available through multiple
# collections, we sort by the _earliest_ availability time.
simple_nested_configuration = {
'licensepools.availability_time': {'mode': 'min', 'order': 'asc'}
}
assert simple_nested_configuration == first_field
# Setting a collection ID restriction will add a nested filter.
f.collection_ids = [self._default_collection]
first_field = validate_sort_order(f, 'licensepools.availability_time')
# The nested filter ensures that when sorting the results, we
# only consider availability times from license pools that
# match our collection filter.
#
# Filter.build() will apply the collection filter separately
# to the 'filter' part of the query -- that's what actually
# stops books from showing up if they're in the wrong collection.
#
# This just makes sure that the books show up in the right _order_
# for any given set of collections.
nested_filter = first_field['licensepools.availability_time'].pop('nested')
assert (
{'path': 'licensepools',
'filter': {
'terms': {
'licensepools.collection_id': [self._default_collection.id]
}
}
} ==
nested_filter)
# Apart from the nested filter, this is the same ordering
# configuration as before.
assert simple_nested_configuration == first_field
# An ordering by "last update" may be simple, if there are no
# collections or lists associated with the filter.
f.order = last_update
f.collection_ids = []
first_field = validate_sort_order(f, last_update)
assert dict(last_update_time='asc') == first_field
# Or it can be *incredibly complicated*, if there _are_
# collections or lists associated with the filter. Which,
# unfortunately, is almost all the time.
f.collection_ids = [self._default_collection.id]
f.customlist_restriction_sets = [[1], [1,2]]
first_field = validate_sort_order(f, last_update)
# Here, the ordering is done by a script that runs on the
# ElasticSearch server.
sort = first_field.pop('_script')
assert {} == first_field
# The script returns a numeric value and we want to sort those
# values in ascending order.
assert 'asc' == sort.pop('order')
assert 'number' == sort.pop('type')
script = sort.pop('script')
assert {} == sort
# The script is the 'simplified.work_last_update' stored script.
assert (CurrentMapping.script_name("work_last_update") ==
script.pop('stored'))
# Two parameters are passed into the script -- the IDs of the
# collections and the lists relevant to the query. This is so
# the query knows which updates should actually be considered
# for purposes of this query.
params = script.pop('params')
assert {} == script
assert [self._default_collection.id] == params.pop('collection_ids')
assert [1,2] == params.pop('list_ids')
assert {} == params
def test_author_filter(self):
# Test an especially complex subfilter for authorship.
# If no author filter is set up, there is no author filter.
no_filter = Filter(author=None)
assert None == no_filter.author_filter
def check_filter(contributor, *shoulds):
# Create a Filter with an author restriction and verify
# that its .author_filter looks the way we expect.
actual = Filter(author=contributor).author_filter
# We only count contributions that were in one of the
# matching roles.
role_match = Terms(
**{"contributors.role": Filter.AUTHOR_MATCH_ROLES}
)
# Among the other restrictions on fields in the
# 'contributors' subdocument (sort name, VIAF, etc.), at
# least one must also be met.
author_match = [Term(**should) for should in shoulds]
expect = Bool(
must=[
role_match,
Bool(minimum_should_match=1, should=author_match)
]
)
assert expect == actual
# You can apply the filter on any one of these four fields,
# using a Contributor or a ContributorData
for contributor_field in ('sort_name', 'display_name', 'viaf', 'lc'):
for cls in Contributor, ContributorData:
contributor = cls(**{contributor_field:"value"})
index_field = contributor_field
if contributor_field in ('sort_name', 'display_name'):
# Sort name and display name are indexed both as
# searchable text fields and filterable keywords.
# We're filtering, so we want to use the keyword
# version.
index_field += '.keyword'
check_filter(
contributor,
{"contributors.%s" % index_field: "value"}
)
# You can also apply the filter using a combination of these
# fields. At least one of the provided fields must match.
for cls in Contributor, ContributorData:
contributor = cls(
display_name='<NAME>', sort_name='<NAME>',
viaf="73520345", lc="n2013008575"
)
check_filter(
contributor,
{"contributors.sort_name.keyword": contributor.sort_name},
{"contributors.display_name.keyword": contributor.display_name},
{"contributors.viaf": contributor.viaf},
{"contributors.lc": contributor.lc},
)
# If an author's name is Edition.UNKNOWN_AUTHOR, matches
# against that field are not counted; otherwise all works with
# unknown authors would show up.
unknown_viaf = ContributorData(
sort_name=Edition.UNKNOWN_AUTHOR,
display_name=Edition.UNKNOWN_AUTHOR,
viaf="123"
)
check_filter(unknown_viaf, {"contributors.viaf": "123"})
# This can result in a filter that will match nothing because
# it has a Bool with a 'minimum_should_match' but no 'should'
# clauses.
totally_unknown = ContributorData(
sort_name=Edition.UNKNOWN_AUTHOR,
display_name=Edition.UNKNOWN_AUTHOR,
)
check_filter(totally_unknown)
# This is fine -- if the search engine is asked for books by
# an author about whom absolutely nothing is known, it's okay
# to return no books.
def test_target_age_filter(self):
# Test an especially complex subfilter for target age.
# We're going to test the construction of this subfilter using
# a number of inputs.
# First, let's create a filter that matches "ages 2 to 5".
two_to_five = Filter(target_age=(2,5))
filter = two_to_five.target_age_filter
# The result is the combination of two filters -- both must
# match.
#
# One filter matches against the lower age range; the other
# matches against the upper age range.
assert "bool" == filter.name
lower_match, upper_match = filter.must
# We must establish that two-year-olds are not too old
# for the book.
def dichotomy(filter):
"""Verify that `filter` is a boolean filter that
matches one of a number of possibilities. Return those
possibilities.
"""
assert "bool" == filter.name
assert 1 == filter.minimum_should_match
return filter.should
more_than_two, no_upper_limit = dichotomy(upper_match)
# Either the upper age limit must be greater than two...
assert (
{'range': {'target_age.upper': {'gte': 2}}} ==
more_than_two.to_dict())
# ...or the upper age limit must be missing entirely.
def assert_matches_nonexistent_field(f, field):
"""Verify that a filter only matches when there is
no value for the given field.
"""
assert (
f.to_dict() ==
{'bool': {'must_not': [{'exists': {'field': field}}]}})
assert_matches_nonexistent_field(no_upper_limit, 'target_age.upper')
# We must also establish that five-year-olds are not too young
# for the book. Again, there are two ways of doing this.
less_than_five, no_lower_limit = dichotomy(lower_match)
# Either the lower age limit must be less than five...
assert (
{'range': {'target_age.lower': {'lte': 5}}} ==
less_than_five.to_dict())
# ...or the lower age limit must be missing entirely.
assert_matches_nonexistent_field(no_lower_limit, 'target_age.lower')
# Now let's try a filter that matches "ten and under"
ten_and_under = Filter(target_age=(None, 10))
filter = ten_and_under.target_age_filter
# There are two clauses, and one of the two must match.
less_than_ten, no_lower_limit = dichotomy(filter)
# Either the lower part of the age range must be <= ten, or
# there must be no lower age limit. If neither of these are
# true, then ten-year-olds are too young for the book.
assert ({'range': {'target_age.lower': {'lte': 10}}} ==
less_than_ten.to_dict())
assert_matches_nonexistent_field(no_lower_limit, 'target_age.lower')
# Next, let's try a filter that matches "twelve and up".
twelve_and_up = Filter(target_age=(12, None))
filter = twelve_and_up.target_age_filter
# There are two clauses, and one of the two must match.
more_than_twelve, no_upper_limit = dichotomy(filter)
# Either the upper part of the age range must be >= twelve, or
# there must be no upper age limit. If neither of these are true,
# then twelve-year-olds are too old for the book.
assert ({'range': {'target_age.upper': {'gte': 12}}} ==
more_than_twelve.to_dict())
assert_matches_nonexistent_field(no_upper_limit, 'target_age.upper')
# Finally, test filters that put no restriction on target age.
no_target_age = Filter()
assert None == no_target_age.target_age_filter
no_target_age = Filter(target_age=(None, None))
assert None == no_target_age.target_age_filter
def test__scrub(self):
# Test the _scrub helper method, which transforms incoming strings
# to the type of strings Elasticsearch uses.
m = Filter._scrub
assert None == m(None)
assert "foo" == m("foo")
assert "youngadult" == m("Young Adult")
def test__scrub_list(self):
# Test the _scrub_list helper method, which scrubs incoming
# strings and makes sure they are in a list.
m = Filter._scrub_list
assert [] == m(None)
assert [] == m([])
assert ["foo"] == m("foo")
assert ["youngadult", "adult"] == m(["Young Adult", "Adult"])
def test__filter_ids(self):
# Test the _filter_ids helper method, which converts database
# objects to their IDs.
m = Filter._filter_ids
assert None == m(None)
assert [] == m([])
assert [1,2,3] == m([1,2,3])
library = self._default_library
assert [library.id] == m([library])
def test__scrub_identifiers(self):
# Test the _scrub_identifiers helper method, which converts
# Identifier objects to IdentifierData.
i1 = self._identifier()
i2 = self._identifier()
si1, si2 = Filter._scrub_identifiers([i1, i2])
for before, after in ((i1, si1), (i2, si2)):
assert isinstance(si1, IdentifierData)
assert before.identifier == after.identifier
assert before.type == after.type
# If you pass in an IdentifierData you get it back.
assert [si1] == list(Filter._scrub_identifiers([si1]))
def test__chain_filters(self):
# Test the _chain_filters method, which combines
# two Elasticsearch filter objects.
f1 = Q('term', key="value")
f2 = Q('term', key2="value2")
m = Filter._chain_filters
# If this filter is the start of the chain, it's returned unaltered.
assert f1 == m(None, f1)
# Otherwise, a new filter is created.
chained = m(f1, f2)
# The chained filter is the conjunction of the two input
# filters.
assert chained == f1 & f2
def test_universal_base_filter(self):
# Test the base filters that are always applied.
# We only want to show works that are presentation ready.
base = Filter.universal_base_filter(self._mock_chain)
assert [Term(presentation_ready=True)] == base
def test_universal_nested_filters(self):
# Test the nested filters that are always applied.
nested = Filter.universal_nested_filters()
# Currently all nested filters operate on the 'licensepools'
# subdocument.
[not_suppressed, currently_owned] = nested.pop('licensepools')
assert {} == nested
# Let's look at those filters.
# The first one is simple -- the license pool must not be
# suppressed.
assert (Term(**{"licensepools.suppressed": False}) ==
not_suppressed)
# The second one is a little more complex
owned = Term(**{"licensepools.licensed": True})
open_access = Term(**{"licensepools.open_access": True})
# We only count license pools that are open-access _or_ that have
# currently owned licenses.
assert Bool(should=[owned, open_access]) == currently_owned
def _mock_chain(self, filters, new_filter):
"""A mock of _chain_filters so we don't have to check
test results against super-complicated Elasticsearch
filter objects.
Instead, we'll get a list of smaller filter objects.
"""
if filters is None:
# There are no active filters.
filters = []
if isinstance(filters, elasticsearch_dsl_query):
# An initial filter was passed in. Convert it to a list.
filters = [filters]
filters.append(new_filter)
return filters
class TestSortKeyPagination(DatabaseTest):
"""Test the Elasticsearch-implementation of Pagination that does
pagination by tracking the last item on the previous page,
rather than by tracking the number of items seen so far.
"""
def test_from_request(self):
# No arguments -> Class defaults.
pagination = SortKeyPagination.from_request({}.get, None)
assert isinstance(pagination, SortKeyPagination)
assert SortKeyPagination.DEFAULT_SIZE == pagination.size
assert None == pagination.pagination_key
# Override the default page size.
pagination = SortKeyPagination.from_request({}.get, 100)
assert isinstance(pagination, SortKeyPagination)
assert 100 == pagination.size
assert None == pagination.pagination_key
# The most common usages.
pagination = SortKeyPagination.from_request(dict(size="4").get)
assert isinstance(pagination, SortKeyPagination)
assert 4 == pagination.size
assert None == pagination.pagination_key
pagination_key = json.dumps(["field 1", 2])
pagination = SortKeyPagination.from_request(
dict(key=pagination_key).get
)
assert isinstance(pagination, SortKeyPagination)
assert SortKeyPagination.DEFAULT_SIZE == pagination.size
assert pagination_key == pagination.pagination_key
# Invalid size -> problem detail
error = SortKeyPagination.from_request(dict(size="string").get)
assert INVALID_INPUT.uri == error.uri
assert "Invalid page size: string" == str(error.detail)
# Invalid pagination key -> problem detail
error = SortKeyPagination.from_request(dict(key="not json").get)
assert INVALID_INPUT.uri == error.uri
assert "Invalid page key: not json" == str(error.detail)
# Size too large -> cut down to MAX_SIZE
pagination = SortKeyPagination.from_request(dict(size="10000").get)
assert isinstance(pagination, SortKeyPagination)
assert SortKeyPagination.MAX_SIZE == pagination.size
assert None == pagination.pagination_key
def test_items(self):
# Test the values added to URLs to propagate pagination
# settings across requests.
pagination = SortKeyPagination(size=20)
assert [("size", 20)] == list(pagination.items())
key = ["the last", "item"]
pagination.last_item_on_previous_page = key
assert (
[("key", json.dumps(key)), ("size", 20)] ==
list(pagination.items()))
def test_pagination_key(self):
# SortKeyPagination has no pagination key until it knows
# about the last item on the previous page.
pagination = SortKeyPagination()
assert None == pagination. pagination_key
key = ["the last", "item"]
pagination.last_item_on_previous_page = key
assert pagination.pagination_key == json.dumps(key)
def test_unimplemented_features(self):
# Check certain features of a normal Pagination object that
# are not implemented in SortKeyPagination.
# Set up a realistic SortKeyPagination -- certain things
# will remain undefined.
pagination = SortKeyPagination(last_item_on_previous_page=object())
pagination.this_page_size = 100
pagination.last_item_on_this_page = object()
# The offset is always zero.
assert 0 == pagination.offset
# The total size is always undefined, even though we could
# theoretically track it.
assert None == pagination.total_size
# The previous page is always undefined, through theoretically
# we could navigate backwards.
assert None == pagination.previous_page
with pytest.raises(NotImplementedError) as excinfo:
pagination.modify_database_query(object())
assert "SortKeyPagination does not work with database queries." in str(excinfo.value)
def test_modify_search_query(self):
class MockSearch(object):
update_from_dict_called_with = "not called"
getitem_called_with = "not called"
def update_from_dict(self, dict):
self.update_from_dict_called_with = dict
return self
def __getitem__(self, slice):
self.getitem_called_with = slice
return "modified search object"
search = MockSearch()
# We start off in a state where we don't know the last item on the
# previous page.
pagination = SortKeyPagination()
# In this case, modify_search_query slices out the first
# 'page' of results and returns a modified search object.
assert "modified search object" == pagination.modify_search_query(search)
assert slice(0, 50) == search.getitem_called_with
# update_from_dict was not called. We don't know where to
# start our search, so we start at the beginning.
assert "not called" == search.update_from_dict_called_with
# Now let's say we find out the last item on the previous page
# -- in real life, this would be because we call page_loaded()
# and then next_page().
last_item = object()
pagination.last_item_on_previous_page = last_item
# Reset the object so we can verify __getitem__ gets called
# again.
search.getitem_called_with = "not called"
# With .last_item_on_previous_page set, modify_search_query()
# calls update_from_dict() on our mock ElasticSearch `Search`
# object, passing in the last item on the previous page.
# The return value of modify_search_query() becomes the active
# Search object.
assert "modified search object" == pagination.modify_search_query(search)
# Now we can see that the Elasticsearch object was modified to
# use the 'search_after' feature.
assert dict(search_after=last_item) == search.update_from_dict_called_with
# And the resulting object was modified _again_ to get the
# first 'page' of results following last_item.
assert slice(0, 50) == search.getitem_called_with
def test_page_loaded(self):
# Test what happens to a SortKeyPagination object when a page of
# results is loaded.
this_page = SortKeyPagination()
# Mock an Elasticsearch 'hit' object -- we'll be accessing
# hit.meta.sort.
class MockMeta(object):
def __init__(self, sort_key):
self.sort = sort_key
class MockItem(object):
def __init__(self, sort_key):
self.meta = MockMeta(sort_key)
# Make a page of results, each with a unique sort key.
hits = [
MockItem(['sort', 'key', num]) for num in range(5)
]
last_hit = hits[-1]
# Tell the page about the results.
assert False == this_page.page_has_loaded
this_page.page_loaded(hits)
assert True == this_page.page_has_loaded
# We know the size.
assert 5 == this_page.this_page_size
# We know the sort key of the last item in the page.
assert last_hit.meta.sort == this_page.last_item_on_this_page
# This code has coverage elsewhere, but just so you see how it
# works -- we can now get the next page...
next_page = this_page.next_page
# And it's defined in terms of the last item on its
# predecessor. When we pass the new pagination object into
# create_search_doc, it'll call this object's
# modify_search_query method. The resulting search query will
# pick up right where the previous page left off.
assert last_hit.meta.sort == next_page.last_item_on_previous_page
def test_next_page(self):
# To start off, we can't say anything about the next page,
# because we don't know anything about _this_ page.
first_page = SortKeyPagination()
assert None == first_page.next_page
# Let's learn about this page.
first_page.this_page_size = 10
last_item = object()
first_page.last_item_on_this_page = last_item
# When we call next_page, the last item on this page becomes the
# next page's "last item on previous_page"
next_page = first_page.next_page
assert last_item == next_page.last_item_on_previous_page
# Again, we know nothing about this page, since we haven't
# loaded it yet.
assert None == next_page.this_page_size
assert None == next_page.last_item_on_this_page
# In the unlikely event that we know the last item on the
# page, but the page size is zero, there is no next page.
first_page.this_page_size = 0
assert None == first_page.next_page
class TestBulkUpdate(DatabaseTest):
def test_works_not_presentation_ready_kept_in_index(self):
w1 = self._work()
w1.set_presentation_ready()
w2 = self._work()
w2.set_presentation_ready()
w3 = self._work()
index = MockExternalSearchIndex()
successes, failures = index.bulk_update([w1, w2, w3])
# All three works are regarded as successes, because their
# state was successfully mirrored to the index.
assert set([w1, w2, w3]) == set(successes)
assert [] == failures
# All three works were inserted into the index, even the one
# that's not presentation-ready.
ids = set(x[-1] for x in index.docs.keys())
assert set([w1.id, w2.id, w3.id]) == ids
# If a work stops being presentation-ready, it is kept in the
# index.
w2.presentation_ready = False
successes, failures = index.bulk_update([w1, w2, w3])
assert set([w1.id, w2.id, w3.id]) == set([x[-1] for x in index.docs.keys()])
assert set([w1, w2, w3]) == set(successes)
assert [] == failures
class TestSearchErrors(ExternalSearchTest):
def test_search_connection_timeout(self):
attempts = []
def bulk_with_timeout(docs, raise_on_error=False, raise_on_exception=False):
attempts.append(docs)
def error(doc):
return dict(index=dict(status='TIMEOUT',
exception='ConnectionTimeout',
error='Connection Timeout!',
_id=doc['_id'],
data=doc))
errors = map(error, docs)
return 0, errors
self.search.bulk = bulk_with_timeout
work = self._work()
work.set_presentation_ready()
successes, failures = self.search.bulk_update([work])
assert [] == successes
assert 1 == len(failures)
assert work == failures[0][0]
assert "Connection Timeout!" == failures[0][1]
# When all the documents fail, it tries again once with the same arguments.
assert ([work.id, work.id] ==
[docs[0]['_id'] for docs in attempts])
def test_search_single_document_error(self):
successful_work = self._work()
successful_work.set_presentation_ready()
failing_work = self._work()
failing_work.set_presentation_ready()
def bulk_with_error(docs, raise_on_error=False, raise_on_exception=False):
failures = [dict(data=dict(_id=failing_work.id),
error="There was an error!",
exception="Exception")]
success_count = 1
return success_count, failures
self.search.bulk = bulk_with_error
successes, failures = self.search.bulk_update([successful_work, failing_work])
assert [successful_work] == successes
assert 1 == len(failures)
assert failing_work == failures[0][0]
assert "There was an error!" == failures[0][1]
class TestWorkSearchResult(DatabaseTest):
# Test the WorkSearchResult class, which wraps together a data
# model Work and an ElasticSearch Hit into something that looks
# like a Work.
def test_constructor(self):
work = self._work()
hit = object()
result = WorkSearchResult(work, hit)
# The original Work object is available as ._work
assert work == result._work
# The Elasticsearch Hit object is available as ._hit
assert hit == result._hit
# Any other attributes are delegated to the Work.
assert work.sort_title == result.sort_title
class TestSearchIndexCoverageProvider(DatabaseTest):
def test_operation(self):
index = MockExternalSearchIndex()
provider = SearchIndexCoverageProvider(
self._db, search_index_client=index
)
assert (WorkCoverageRecord.UPDATE_SEARCH_INDEX_OPERATION ==
provider.operation)
def test_success(self):
work = self._work()
work.set_presentation_ready()
index = MockExternalSearchIndex()
provider = SearchIndexCoverageProvider(
self._db, search_index_client=index
)
results = provider.process_batch([work])
# We got one success and no failures.
assert [work] == results
# The work was added to the search index.
assert 1 == len(index.docs)
def test_failure(self):
class DoomedExternalSearchIndex(MockExternalSearchIndex):
"""All documents sent to this index will fail."""
def bulk(self, docs, **kwargs):
return 0, [
dict(data=dict(_id=failing_work['_id']),
error="There was an error!",
exception="Exception")
for failing_work in docs
]
work = self._work()
work.set_presentation_ready()
index = DoomedExternalSearchIndex()
provider = SearchIndexCoverageProvider(
self._db, search_index_client=index
)
results = provider.process_batch([work])
# We have one transient failure.
[record] = results
assert work == record.obj
assert True == record.transient
assert 'There was an error!' == record.exception
```
#### File: simplified-server-core/tests/test_monitor.py
```python
import datetime
import pytest
from ..testing import DatabaseTest
from ..config import Configuration
from ..metadata_layer import TimestampData
from ..model import (
CachedFeed,
CirculationEvent,
Collection,
CollectionMissing,
ConfigurationSetting,
Credential,
DataSource,
Edition,
ExternalIntegration,
Genre,
Identifier,
Measurement,
Patron,
Subject,
Timestamp,
Work,
WorkCoverageRecord,
create,
get_one,
get_one_or_create,
)
from ..monitor import (
CachedFeedReaper,
CirculationEventLocationScrubber,
CollectionMonitor,
CollectionReaper,
CoverageProvidersFailed,
CredentialReaper,
CustomListEntrySweepMonitor,
CustomListEntryWorkUpdateMonitor,
EditionSweepMonitor,
IdentifierSweepMonitor,
MakePresentationReadyMonitor,
MeasurementReaper,
Monitor,
NotPresentationReadyWorkSweepMonitor,
OPDSEntryCacheMonitor,
PatronNeighborhoodScrubber,
PatronRecordReaper,
PermanentWorkIDRefreshMonitor,
PresentationReadyWorkSweepMonitor,
ReaperMonitor,
SubjectSweepMonitor,
SweepMonitor,
TimelineMonitor,
WorkReaper,
WorkSweepMonitor,
)
from ..testing import (
AlwaysSuccessfulCoverageProvider,
NeverSuccessfulCoverageProvider,
)
class MockMonitor(Monitor):
SERVICE_NAME = "Dummy monitor for test"
def __init__(self, _db, collection=None):
super(MockMonitor, self).__init__(_db, collection)
self.run_records = []
self.cleanup_records = []
def run_once(self, progress):
# Record the TimestampData object passed in.
self.run_records.append(progress)
def cleanup(self):
self.cleanup_records.append(True)
class TestMonitor(DatabaseTest):
def test_must_define_service_name(self):
class NoServiceName(MockMonitor):
SERVICE_NAME = None
with pytest.raises(ValueError) as excinfo:
NoServiceName(self._db)
assert "NoServiceName must define SERVICE_NAME." in str(excinfo.value)
def test_collection(self):
monitor = MockMonitor(self._db, self._default_collection)
assert self._default_collection == monitor.collection
monitor.collection_id = None
assert None == monitor.collection
def test_initial_start_time(self):
monitor = MockMonitor(self._db, self._default_collection)
# Setting the default start time to NEVER explicitly says to use
# None as the initial time.
monitor.default_start_time = monitor.NEVER
assert None == monitor.initial_start_time
# Setting the value to None means "use the current time".
monitor.default_start_time = None
self.time_eq(datetime.datetime.utcnow(), monitor.initial_start_time)
# Any other value is returned as-is.
default = object()
monitor.default_start_time = default
assert default == monitor.initial_start_time
def test_monitor_lifecycle(self):
monitor = MockMonitor(self._db, self._default_collection)
monitor.default_start_time = datetime.datetime(2010, 1, 1)
# There is no timestamp for this monitor.
def get_timestamp():
return get_one(self._db, Timestamp, service=monitor.service_name)
assert None == get_timestamp()
# Run the monitor.
monitor.run()
# The monitor ran once and then stopped.
[progress] = monitor.run_records
# The TimestampData passed in to run_once() had the
# Monitor's default start time as its .start, and an empty
# time for .finish.
assert monitor.default_start_time == progress.start
assert None == progress.finish
# But the Monitor's underlying timestamp has been updated with
# the time that the monitor actually took to run.
timestamp = get_timestamp()
assert timestamp.start > monitor.default_start_time
assert timestamp.finish > timestamp.start
self.time_eq(datetime.datetime.utcnow(), timestamp.start)
# cleanup() was called once.
assert [True] == monitor.cleanup_records
def test_initial_timestamp(self):
class NeverRunMonitor(MockMonitor):
SERVICE_NAME = "Never run"
DEFAULT_START_TIME = MockMonitor.NEVER
# The Timestamp object is created, but its .start is None,
# indicating that it has never run to completion.
m = NeverRunMonitor(self._db, self._default_collection)
assert None == m.timestamp().start
class RunLongAgoMonitor(MockMonitor):
SERVICE_NAME = "Run long ago"
DEFAULT_START_TIME = MockMonitor.ONE_YEAR_AGO
# The Timestamp object is created, and its .timestamp is long ago.
m = RunLongAgoMonitor(self._db, self._default_collection)
timestamp = m.timestamp()
now = datetime.datetime.utcnow()
assert timestamp.start < now
# Timestamp.finish is set to None, on the assumption that the
# first run is still in progress.
assert timestamp.finish == None
def test_run_once_returning_timestampdata(self):
# If a Monitor's run_once implementation returns a TimestampData,
# that's the data used to set the Monitor's Timestamp, even if
# the data doesn't make sense by the standards used by the main
# Monitor class.
start = datetime.datetime(2011, 1, 1)
finish = datetime.datetime(2012, 1, 1)
class Mock(MockMonitor):
def run_once(self, progress):
return TimestampData(start=start, finish=finish, counter=-100)
monitor = Mock(self._db, self._default_collection)
monitor.run()
timestamp = monitor.timestamp()
assert start == timestamp.start
assert finish == timestamp.finish
assert -100 == timestamp.counter
def test_run_once_with_exception(self):
# If an exception happens during a Monitor's run_once
# implementation, a traceback for that exception is recorded
# in the appropriate Timestamp, but the timestamp itself is
# not updated.
# This test function shows the behavior we expect from a
# Monitor.
def assert_run_sets_exception(monitor, check_for):
timestamp = monitor.timestamp()
old_start = timestamp.start
old_finish = timestamp.finish
assert None == timestamp.exception
monitor.run()
# The timestamp has been updated, but the times have not.
assert check_for in timestamp.exception
assert old_start == timestamp.start
assert old_finish == timestamp.finish
# Try a monitor that raises an unhandled exception.
class DoomedMonitor(MockMonitor):
SERVICE_NAME = "Doomed"
def run_once(self, *args, **kwargs):
raise Exception("I'm doomed")
m = DoomedMonitor(self._db, self._default_collection)
assert_run_sets_exception(m, "Exception: I'm doomed")
# Try a monitor that sets .exception on the TimestampData it
# returns.
class AlsoDoomed(MockMonitor):
SERVICE_NAME = "Doomed, but in a different way."
def run_once(self, progress):
return TimestampData(exception="I'm also doomed")
m = AlsoDoomed(self._db, self._default_collection)
assert_run_sets_exception(m, "I'm also doomed")
def test_same_monitor_different_collections(self):
"""A single Monitor has different Timestamps when run against
different Collections.
"""
c1 = self._collection()
c2 = self._collection()
m1 = MockMonitor(self._db, c1)
m2 = MockMonitor(self._db, c2)
# The two Monitors have the same service name but are operating
# on different Collections.
assert m1.service_name == m2.service_name
assert c1 == m1.collection
assert c2 == m2.collection
assert [] == c1.timestamps
assert [] == c2.timestamps
# Run the first Monitor.
m1.run()
[t1] = c1.timestamps
assert m1.service_name == t1.service
assert m1.collection == t1.collection
old_m1_timestamp = m1.timestamp
# Running the first Monitor did not create a timestamp for the
# second Monitor.
assert [] == c2.timestamps
# Run the second monitor.
m2.run()
# The timestamp for the first monitor was not updated when
# we ran the second monitor.
assert old_m1_timestamp == m1.timestamp
# But the second Monitor now has its own timestamp.
[t2] = c2.timestamps
assert t2.start > t1.start
class TestCollectionMonitor(DatabaseTest):
"""Test the special features of CollectionMonitor."""
def test_protocol_enforcement(self):
"""A CollectionMonitor can require that it be instantiated
with a Collection that implements a certain protocol.
"""
class NoProtocolMonitor(CollectionMonitor):
SERVICE_NAME = "Test Monitor 1"
PROTOCOL = None
class OverdriveMonitor(CollectionMonitor):
SERVICE_NAME = "Test Monitor 2"
PROTOCOL = ExternalIntegration.OVERDRIVE
# Two collections.
c1 = self._collection(protocol=ExternalIntegration.OVERDRIVE)
c2 = self._collection(protocol=ExternalIntegration.BIBLIOTHECA)
# The NoProtocolMonitor can be instantiated with either one,
# or with no Collection at all.
NoProtocolMonitor(self._db, c1)
NoProtocolMonitor(self._db, c2)
NoProtocolMonitor(self._db, None)
# The OverdriveMonitor can only be instantiated with the first one.
OverdriveMonitor(self._db, c1)
with pytest.raises(ValueError) as excinfo:
OverdriveMonitor(self._db, c2)
assert "Collection protocol (Bibliotheca) does not match Monitor protocol (Overdrive)" in str(excinfo.value)
with pytest.raises(CollectionMissing):
OverdriveMonitor(self._db, None)
def test_all(self):
"""Test that we can create a list of Monitors using all()."""
class OPDSCollectionMonitor(CollectionMonitor):
SERVICE_NAME = "Test Monitor"
PROTOCOL = ExternalIntegration.OPDS_IMPORT
# Here we have three OPDS import Collections...
o1 = self._collection("o1")
o2 = self._collection("o2")
o3 = self._collection("o3")
# ...and a Bibliotheca collection.
b1 = self._collection(protocol=ExternalIntegration.BIBLIOTHECA)
# o1 just had its Monitor run.
Timestamp.stamp(
self._db, OPDSCollectionMonitor.SERVICE_NAME,
Timestamp.MONITOR_TYPE, o1
)
# o2 and b1 have never had their Monitor run, but o2 has had some other Monitor run.
Timestamp.stamp(
self._db, "A Different Service", Timestamp.MONITOR_TYPE,
o2
)
# o3 had its Monitor run an hour ago.
now = datetime.datetime.utcnow()
an_hour_ago = now - datetime.timedelta(seconds=3600)
Timestamp.stamp(
self._db, OPDSCollectionMonitor.SERVICE_NAME,
Timestamp.MONITOR_TYPE, o3, start=an_hour_ago,
finish=an_hour_ago
)
monitors = list(OPDSCollectionMonitor.all(self._db))
# Three OPDSCollectionMonitors were returned, one for each
# appropriate collection. The monitor that needs to be run the
# worst was returned first in the list. The monitor that was
# run most recently is returned last. There is no
# OPDSCollectionMonitor for the Bibliotheca collection.
assert [o2, o3, o1] == [x.collection for x in monitors]
# If `collections` are specified, monitors should be yielded in the same order.
opds_collections = [o3, o1, o2]
monitors = list(OPDSCollectionMonitor.all(self._db, collections=opds_collections))
monitor_collections = [m.collection for m in monitors]
# We should get a monitor for each collection.
assert set(opds_collections) == set(monitor_collections)
# We should get them back in order.
assert opds_collections == monitor_collections
# If `collections` are specified, monitors should be yielded in the same order.
opds_collections = [o3, o1]
monitors = list(OPDSCollectionMonitor.all(self._db, collections=opds_collections))
monitor_collections = [m.collection for m in monitors]
# We should get a monitor for each collection.
assert set(opds_collections) == set(monitor_collections)
# We should get them back in order.
assert opds_collections == monitor_collections
# If collections are specified, they must match the monitor's protocol.
with pytest.raises(ValueError) as excinfo:
monitors = list(OPDSCollectionMonitor.all(self._db, collections=[b1]))
assert 'Collection protocol (Bibliotheca) does not match Monitor protocol (OPDS Import)' in str(excinfo.value)
assert 'Only the following collections are available: ' in str(excinfo.value)
class TestTimelineMonitor(DatabaseTest):
def test_run_once(self):
class Mock(TimelineMonitor):
SERVICE_NAME = "Just a timeline"
catchups = []
def catch_up_from(self, start, cutoff, progress):
self.catchups.append((start, cutoff, progress))
m = Mock(self._db)
progress = m.timestamp().to_data()
m.run_once(progress)
now = datetime.datetime.utcnow()
# catch_up_from() was called once.
(start, cutoff, progress) = m.catchups.pop()
assert m.initial_start_time == start
self.time_eq(cutoff, now)
# progress contains a record of the timespan now covered
# by this Monitor.
assert start == progress.start
assert cutoff == progress.finish
def test_subclass_cannot_modify_dates(self):
"""The subclass can modify some fields of the TimestampData
passed in to it, but it can't modify the start or end dates.
If you want that, you shouldn't subclass TimelineMonitor.
"""
class Mock(TimelineMonitor):
DEFAULT_START_TIME = Monitor.NEVER
SERVICE_NAME = "I aim to misbehave"
def catch_up_from(self, start, cutoff, progress):
progress.start = 1
progress.finish = 2
progress.counter = 3
progress.achievements = 4
m = Mock(self._db)
progress = m.timestamp().to_data()
m.run_once(progress)
now = datetime.datetime.utcnow()
# The timestamp values have been set to appropriate values for
# the portion of the timeline covered, overriding our values.
assert None == progress.start
self.time_eq(now, progress.finish)
# The non-timestamp values have been left alone.
assert 3 == progress.counter
assert 4 == progress.achievements
def test_timestamp_not_updated_on_exception(self):
"""If the subclass sets .exception on the TimestampData
passed into it, the dates aren't modified.
"""
class Mock(TimelineMonitor):
DEFAULT_START_TIME = datetime.datetime(2011, 1, 1)
SERVICE_NAME = "doomed"
def catch_up_from(self, start, cutoff, progress):
self.started_at = start
progress.exception = "oops"
m = Mock(self._db)
progress = m.timestamp().to_data()
m.run_once(progress)
# The timestamp value is set to a value indicating that the
# initial run never completed.
assert m.DEFAULT_START_TIME == progress.start
assert None == progress.finish
def test_slice_timespan(self):
# Test the slice_timespan utility method.
# Slicing up the time between 121 minutes ago and now in increments
# of one hour will yield three slices:
#
# 121 minutes ago -> 61 minutes ago
# 61 minutes ago -> 1 minute ago
# 1 minute ago -> now
now = datetime.datetime.utcnow()
one_hour = datetime.timedelta(minutes=60)
ago_1 = now - datetime.timedelta(minutes=1)
ago_61 = ago_1 - one_hour
ago_121 = ago_61 - one_hour
slice1, slice2, slice3 = list(
TimelineMonitor.slice_timespan(ago_121, now, one_hour)
)
assert slice1 == (ago_121, ago_61, True)
assert slice2 == (ago_61, ago_1, True)
assert slice3 == (ago_1, now, False)
# The True/True/False indicates that the first two slices are
# complete -- they cover a span of an entire hour. The final
# slice is incomplete -- it covers only one minute.
class MockSweepMonitor(SweepMonitor):
"""A SweepMonitor that does nothing."""
MODEL_CLASS = Identifier
SERVICE_NAME = "Sweep Monitor"
DEFAULT_BATCH_SIZE = 2
def __init__(self, _db, **kwargs):
super(MockSweepMonitor, self).__init__(_db, **kwargs)
self.cleanup_called = []
self.batches = []
self.processed = []
def scope_to_collection(self, qu, collection):
return qu
def process_batch(self, batch):
self.batches.append(batch)
return super(MockSweepMonitor, self).process_batch(batch)
def process_item(self, item):
self.processed.append(item)
def cleanup(self):
self.cleanup_called.append(True)
class TestSweepMonitor(DatabaseTest):
def setup_method(self):
super(TestSweepMonitor, self).setup_method()
self.monitor = MockSweepMonitor(self._db)
def test_model_class_is_required(self):
class NoModelClass(SweepMonitor):
MODEL_CLASS = None
with pytest.raises(ValueError) as excinfo:
NoModelClass(self._db)
assert "NoModelClass must define MODEL_CLASS" in str(excinfo.value)
def test_batch_size(self):
assert MockSweepMonitor.DEFAULT_BATCH_SIZE == self.monitor.batch_size
monitor = MockSweepMonitor(self._db, batch_size=29)
assert 29 == monitor.batch_size
# If you pass in an invalid value you get the default.
monitor = MockSweepMonitor(self._db, batch_size=-1)
assert MockSweepMonitor.DEFAULT_BATCH_SIZE == monitor.batch_size
def test_run_against_empty_table(self):
# If there's nothing in the table to be swept, a SweepMonitor runs
# to completion and accomplishes nothing.
self.monitor.run()
timestamp = self.monitor.timestamp()
assert "Records processed: 0." == timestamp.achievements
assert None == timestamp.exception
def test_run_sweeps_entire_table(self):
# Three Identifiers -- the batch size is 2.
i1, i2, i3 = [self._identifier() for i in range(3)]
assert 2 == self.monitor.batch_size
# Run the monitor.
self.monitor.run()
# All three Identifiers, and no other items, were processed.
assert [i1, i2, i3] == self.monitor.processed
# We ran process_batch() three times: once starting at zero,
# once starting at the ID that ended the first batch, and
# again starting at the ID that ended the second batch.
assert [0, i2.id, i3.id] == self.monitor.batches
# The cleanup method was called once.
assert [True] == self.monitor.cleanup_called
# The number of records processed reflects what happened over
# the entire run, not just the final batch.
assert "Records processed: 3." == self.monitor.timestamp().achievements
def test_run_starts_at_previous_counter(self):
# Two Identifiers.
i1, i2 = [self._identifier() for i in range(2)]
# The monitor was just run, but it was not able to proceed past
# i1.
timestamp = Timestamp.stamp(
self._db, self.monitor.service_name,
Timestamp.MONITOR_TYPE,
self.monitor.collection
)
timestamp.counter = i1.id
# Run the monitor.
self.monitor.run()
# The last item in the table was processed. i1 was not
# processed, because it was processed in a previous run.
assert [i2] == self.monitor.processed
# The monitor's counter has been reset.
assert 0 == timestamp.counter
def test_exception_interrupts_run(self):
# Four Identifiers.
i1, i2, i3, i4 = [self._identifier() for i in range(4)]
# This monitor will never be able to process the fourth one.
class IHateI4(MockSweepMonitor):
def process_item(self, item):
if item is i4:
raise Exception("HOW DARE YOU")
super(IHateI4, self).process_item(item)
monitor = IHateI4(self._db)
timestamp = monitor.timestamp()
original_start = timestamp.start
monitor.run()
# The monitor's counter was updated to the ID of the final
# item in the last batch it was able to process. In this case,
# this is I2.
assert i2.id == timestamp.counter
# The exception that stopped the run was recorded.
assert "Exception: HOW DARE YOU" in timestamp.exception
# Even though the run didn't complete, the dates and
# achievements of the timestamp were updated to reflect the
# work that _was_ done.
now = datetime.datetime.utcnow()
assert timestamp.start > original_start
self.time_eq(now, timestamp.start)
self.time_eq(now, timestamp.finish)
assert timestamp.start < timestamp.finish
assert "Records processed: 2." == timestamp.achievements
# I3 was processed, but the batch did not complete, so any
# changes wouldn't have been written to the database.
assert [i1, i2, i3] == monitor.processed
# Running the monitor again will process I3 again, but the same error
# will happen on i4 and the counter will not be updated.
monitor.run()
assert [i1, i2, i3, i3] == monitor.processed
assert i2.id == timestamp.counter
# cleanup() is only called when the sweep completes successfully.
assert [] == monitor.cleanup_called
class TestIdentifierSweepMonitor(DatabaseTest):
def test_scope_to_collection(self):
# Two Collections, each with a LicensePool.
c1 = self._collection()
c2 = self._collection()
e1, p1 = self._edition(with_license_pool=True, collection=c1)
e2, p2 = self._edition(with_license_pool=True, collection=c2)
# A Random Identifier not associated with any Collection.
i3 = self._identifier()
class Mock(IdentifierSweepMonitor):
SERVICE_NAME = "Mock"
# With a Collection, we only process items that are licensed through
# that collection.
monitor = Mock(self._db, c1)
assert [p1.identifier] == monitor.item_query().all()
# With no Collection, we process all items.
monitor = Mock(self._db, None)
assert [p1.identifier, p2.identifier, i3] == monitor.item_query().all()
class TestSubjectSweepMonitor(DatabaseTest):
def test_item_query(self):
class Mock(SubjectSweepMonitor):
SERVICE_NAME = "Mock"
s1, ignore = Subject.lookup(self._db, Subject.DDC, "100", None)
s2, ignore = Subject.lookup(
self._db, Subject.TAG, None, "100 Years of Solitude"
)
# By default, SubjectSweepMonitor handles every Subject
# in the database.
everything = Mock(self._db)
assert [s1, s2] == everything.item_query().all()
# But you can tell SubjectSweepMonitor to handle only Subjects
# of a certain type.
dewey_monitor = Mock(self._db, subject_type=Subject.DDC)
assert [s1] == dewey_monitor.item_query().all()
# You can also SubjectSweepMonitor to handle only Subjects
# whose names or identifiers match a certain string.
one_hundred_monitor = Mock(self._db, filter_string="100")
assert [s1, s2] == one_hundred_monitor.item_query().all()
specific_tag_monitor = Mock(
self._db, subject_type=Subject.TAG, filter_string="Years"
)
assert [s2] == specific_tag_monitor.item_query().all()
class TestCustomListEntrySweepMonitor(DatabaseTest):
def test_item_query(self):
class Mock(CustomListEntrySweepMonitor):
SERVICE_NAME = "Mock"
# Three CustomLists, each containing one book.
list1, [edition1] = self._customlist(num_entries=1)
list2, [edition2] = self._customlist(num_entries=1)
list3, [edition3] = self._customlist(num_entries=1)
[entry1] = list1.entries
[entry2] = list2.entries
[entry3] = list3.entries
# Two Collections, each with one book from one of the lists.
c1 = self._collection()
c1.licensepools.extend(edition1.license_pools)
c2 = self._collection()
c2.licensepools.extend(edition2.license_pools)
# If we don't pass in a Collection to
# CustomListEntrySweepMonitor, we get all three
# CustomListEntries, in their order of creation.
monitor = Mock(self._db)
assert [entry1, entry2, entry3] == monitor.item_query().all()
# If we pass in a Collection to CustomListEntrySweepMonitor,
# we get only the CustomListEntry whose work is licensed
# to that collection.
monitor = Mock(self._db, collection=c2)
assert [entry2] == monitor.item_query().all()
class TestEditionSweepMonitor(DatabaseTest):
def test_item_query(self):
class Mock(EditionSweepMonitor):
SERVICE_NAME = "Mock"
# Three Editions, two of which have LicensePools.
e1, p1 = self._edition(with_license_pool=True)
e2, p2 = self._edition(with_license_pool=True)
e3 = self._edition(with_license_pool=False)
# Two Collections, each with one book.
c1 = self._collection()
c1.licensepools.extend(e1.license_pools)
c2 = self._collection()
c2.licensepools.extend(e2.license_pools)
# If we don't pass in a Collection to EditionSweepMonitor, we
# get all three Editions, in their order of creation.
monitor = Mock(self._db)
assert [e1, e2, e3] == monitor.item_query().all()
# If we pass in a Collection to EditionSweepMonitor, we get
# only the Edition whose work is licensed to that collection.
monitor = Mock(self._db, collection=c2)
assert [e2] == monitor.item_query().all()
class TestWorkSweepMonitors(DatabaseTest):
"""To reduce setup costs, this class tests WorkSweepMonitor,
PresentationReadyWorkSweepMonitor, and
NotPresentationReadyWorkSweepMonitor at once.
"""
def test_item_query(self):
class Mock(WorkSweepMonitor):
SERVICE_NAME = "Mock"
# Three Works with LicensePools. Only one is presentation
# ready.
w1, w2, w3 = [self._work(with_license_pool=True) for i in range(3)]
# Another Work that's presentation ready but has no
# LicensePool.
w4 = self._work()
w4.presentation_ready = True
w2.presentation_ready = False
w3.presentation_ready = None
# Two Collections, each with one book.
c1 = self._collection()
c1.licensepools.append(w1.license_pools[0])
c2 = self._collection()
c2.licensepools.append(w2.license_pools[0])
# If we don't pass in a Collection to WorkSweepMonitor, we
# get all four Works, in their order of creation.
monitor = Mock(self._db)
assert [w1, w2, w3, w4] == monitor.item_query().all()
# If we pass in a Collection to EditionSweepMonitor, we get
# only the Work licensed to that collection.
monitor = Mock(self._db, collection=c2)
assert [w2] == monitor.item_query().all()
# PresentationReadyWorkSweepMonitor is the same, but it excludes
# works that are not presentation ready.
class Mock(PresentationReadyWorkSweepMonitor):
SERVICE_NAME = "Mock"
assert [w1, w4] == Mock(self._db).item_query().all()
assert [w1] == Mock(self._db, collection=c1).item_query().all()
assert [] == Mock(self._db, collection=c2).item_query().all()
# NotPresentationReadyWorkSweepMonitor is the same, but it _only_
# includes works that are not presentation ready.
class Mock(NotPresentationReadyWorkSweepMonitor):
SERVICE_NAME = "Mock"
assert [w2, w3] == Mock(self._db).item_query().all()
assert [] == Mock(self._db, collection=c1).item_query().all()
assert [w2] == Mock(self._db, collection=c2).item_query().all()
class TestOPDSEntryCacheMonitor(DatabaseTest):
def test_process_item(self):
"""This Monitor calculates OPDS entries for works."""
class Mock(OPDSEntryCacheMonitor):
SERVICE_NAME = "Mock"
monitor = Mock(self._db)
work = self._work()
assert None == work.simple_opds_entry
assert None == work.verbose_opds_entry
monitor.process_item(work)
assert work.simple_opds_entry != None
assert work.verbose_opds_entry != None
class TestPermanentWorkIDRefresh(DatabaseTest):
def test_process_item(self):
"""This Monitor calculates an Editions' permanent work ID."""
class Mock(PermanentWorkIDRefreshMonitor):
SERVICE_NAME = "Mock"
edition = self._edition()
assert None == edition.permanent_work_id
Mock(self._db).process_item(edition)
assert edition.permanent_work_id != None
class TestMakePresentationReadyMonitor(DatabaseTest):
def setup_method(self):
super(TestMakePresentationReadyMonitor, self).setup_method()
# This CoverageProvider will always succeed.
class MockProvider1(AlwaysSuccessfulCoverageProvider):
SERVICE_NAME = "Provider 1"
INPUT_IDENTIFIER_TYPES = Identifier.GUTENBERG_ID
DATA_SOURCE_NAME = DataSource.OCLC
# This CoverageProvider will always fail.
class MockProvider2(NeverSuccessfulCoverageProvider):
SERVICE_NAME = "Provider 2"
INPUT_IDENTIFIER_TYPES = Identifier.GUTENBERG_ID
DATA_SOURCE_NAME = DataSource.OVERDRIVE
self.success = MockProvider1(self._db)
self.failure = MockProvider2(self._db)
self.work = self._work(
DataSource.GUTENBERG, with_license_pool=True)
# Don't fake that the work is presentation ready, as we usually do,
# because presentation readiness is what we're trying to test.
self.work.presentation_ready = False
def test_process_item_sets_presentation_ready_on_success(self):
# Create a monitor that doesn't need to do anything.
monitor = MakePresentationReadyMonitor(self._db, [])
monitor.process_item(self.work)
# When it's done doing nothing, it sets the work as
# presentation-ready.
assert None == self.work.presentation_ready_exception
assert True == self.work.presentation_ready
def test_process_item_sets_exception_on_failure(self):
monitor = MakePresentationReadyMonitor(
self._db, [self.success, self.failure]
)
monitor.process_item(self.work)
assert (
"Provider(s) failed: %s" % self.failure.SERVICE_NAME ==
self.work.presentation_ready_exception)
assert False == self.work.presentation_ready
def test_prepare_raises_exception_with_failing_providers(self):
monitor = MakePresentationReadyMonitor(
self._db, [self.success, self.failure]
)
with pytest.raises(CoverageProvidersFailed) as excinfo:
monitor.prepare(self.work)
assert self.failure.service_name in str(excinfo.value)
def test_prepare_does_not_call_irrelevant_provider(self):
monitor = MakePresentationReadyMonitor(self._db, [self.success])
result = monitor.prepare(self.work)
# There were no failures.
assert [] == result
# The 'success' monitor ran.
assert ([self.work.presentation_edition.primary_identifier] ==
self.success.attempts)
# The 'failure' monitor did not. (If it had, it would have
# failed.)
assert [] == self.failure.attempts
# The work has not been set to presentation ready--that's
# handled in process_item().
assert False == self.work.presentation_ready
class TestCustomListEntryWorkUpdateMonitor(DatabaseTest):
def test_set_item(self):
# Create a CustomListEntry.
list1, [edition1] = self._customlist(num_entries=1)
[entry] = list1.entries
# Pretend that its CustomListEntry's work was never set.
old_work = entry.work
entry.work = None
# Running process_item resets it to the same value.
monitor = CustomListEntryWorkUpdateMonitor(self._db)
monitor.process_item(entry)
assert old_work == entry.work
class MockReaperMonitor(ReaperMonitor):
MODEL_CLASS = Timestamp
TIMESTAMP_FIELD = 'timestamp'
class TestReaperMonitor(DatabaseTest):
def test_cutoff(self):
"""Test that cutoff behaves correctly when given different values for
ReaperMonitor.MAX_AGE.
"""
m = MockReaperMonitor(self._db)
# A number here means a number of days.
for value in [1, 1.5, -1]:
m.MAX_AGE = value
expect = datetime.datetime.utcnow() - datetime.timedelta(
days=value
)
self.time_eq(m.cutoff, expect)
# But you can pass in a timedelta instead.
m.MAX_AGE = datetime.timedelta(seconds=99)
self.time_eq(m.cutoff, datetime.datetime.utcnow() - m.MAX_AGE)
def test_specific_reapers(self):
assert CachedFeed.timestamp == CachedFeedReaper(self._db).timestamp_field
assert 30 == CachedFeedReaper.MAX_AGE
assert Credential.expires == CredentialReaper(self._db).timestamp_field
assert 1 == CredentialReaper.MAX_AGE
assert Patron.authorization_expires == PatronRecordReaper(self._db).timestamp_field
assert 60 == PatronRecordReaper.MAX_AGE
def test_where_clause(self):
m = CachedFeedReaper(self._db)
assert "cachedfeeds.timestamp < :timestamp_1" == str(m.where_clause)
def test_run_once(self):
# Create four Credentials: two expired, two valid.
expired1 = self._credential()
expired2 = self._credential()
now = datetime.datetime.utcnow()
expiration_date = now - datetime.timedelta(
days=CredentialReaper.MAX_AGE + 1
)
for e in [expired1, expired2]:
e.expires = expiration_date
active = self._credential()
active.expires = now - datetime.timedelta(
days=CredentialReaper.MAX_AGE - 1
)
eternal = self._credential()
m = CredentialReaper(self._db)
# Set the batch size to 1 to make sure this works even
# when there are multiple batches.
m.BATCH_SIZE = 1
assert "Reaper for Credential.expires" == m.SERVICE_NAME
result = m.run_once()
assert "Items deleted: 2" == result.achievements
# The expired credentials have been reaped; the others
# are still in the database.
remaining = set(self._db.query(Credential).all())
assert set([active, eternal]) == remaining
def test_reap_patrons(self):
m = PatronRecordReaper(self._db)
expired = self._patron()
credential = self._credential(patron=expired)
now = datetime.datetime.utcnow()
expired.authorization_expires = now - datetime.timedelta(
days=PatronRecordReaper.MAX_AGE + 1
)
active = self._patron()
active.expires = now - datetime.timedelta(
days=PatronRecordReaper.MAX_AGE - 1
)
result = m.run_once()
assert "Items deleted: 1" == result.achievements
remaining = self._db.query(Patron).all()
assert [active] == remaining
assert [] == self._db.query(Credential).all()
class TestWorkReaper(DatabaseTest):
def test_end_to_end(self):
# Search mock
class MockSearchIndex():
removed = []
def remove_work(self, work):
self.removed.append(work)
# First, create three works.
# This work has a license pool.
has_license_pool = self._work(with_license_pool=True)
# This work had a license pool and then lost it.
had_license_pool = self._work(with_license_pool=True)
self._db.delete(had_license_pool.license_pools[0])
# This work never had a license pool.
never_had_license_pool = self._work(with_license_pool=False)
# Each work has a presentation edition -- keep track of these
# for later.
works = self._db.query(Work)
presentation_editions = [x.presentation_edition for x in works]
# If and when Work gets database-level cascading deletes, this
# is where they will all be triggered, with no chance that an
# ORM-level delete is doing the work. So let's verify that all
# of the cascades work.
# First, set up some related items for each Work.
# Each work is assigned to a genre.
genre, ignore = Genre.lookup(self._db, "Science Fiction")
for work in works:
work.genres = [genre]
# Each work is on the same CustomList.
l, ignore = self._customlist("a list", num_entries=0)
for work in works:
l.add_entry(work)
# Each work has a WorkCoverageRecord.
for work in works:
WorkCoverageRecord.add_for(work, operation="some operation")
# Each work has a CachedFeed.
for work in works:
feed = CachedFeed(
work=work, type='page', content="content",
pagination="", facets=""
)
self._db.add(feed)
# Also create a CachedFeed that has no associated Work.
workless_feed = CachedFeed(
work=None, type='page', content="content",
pagination="", facets=""
)
self._db.add(workless_feed)
self._db.commit()
# Run the reaper.
s = MockSearchIndex()
m = WorkReaper(self._db, search_index_client=s)
print(m.search_index_client)
m.run_once()
# Search index was updated
assert 2 == len(s.removed)
assert has_license_pool not in s.removed
assert had_license_pool in s.removed
assert never_had_license_pool in s.removed
# Only the work with a license pool remains.
assert [has_license_pool] == [x for x in works]
# The presentation editions are still around, since they might
# theoretically be used by other parts of the system.
all_editions = self._db.query(Edition).all()
for e in presentation_editions:
assert e in all_editions
# The surviving work is still assigned to the Genre, and still
# has WorkCoverageRecords.
assert [has_license_pool] == genre.works
surviving_records = self._db.query(WorkCoverageRecord)
assert surviving_records.count() > 0
assert all(x.work==has_license_pool for x in surviving_records)
# The CustomListEntries still exist, but two of them have lost
# their work.
assert 2 == len([x for x in l.entries if not x.work])
assert [has_license_pool] == [x.work for x in l.entries if x.work]
# The CachedFeeds associated with the reaped Works have been
# deleted. The surviving Work still has one, and the
# CachedFeed that didn't have a work in the first place is
# unaffected.
feeds = self._db.query(CachedFeed).all()
assert [workless_feed] == [x for x in feeds if not x.work]
assert [has_license_pool] == [x.work for x in feeds if x.work]
class TestCollectionReaper(DatabaseTest):
def test_query(self):
# This reaper is looking for collections that are marked for
# deletion.
collection = self._default_collection
reaper = CollectionReaper(self._db)
assert [] == reaper.query().all()
collection.marked_for_deletion = True
assert [collection] == reaper.query().all()
def test_reaper_delete_calls_collection_delete(self):
# Unlike most ReaperMonitors, CollectionReaper.delete()
# is overridden to call delete() on the object it was passed,
# rather than just doing a database delete.
class MockCollection(object):
def delete(self):
self.was_called = True
collection = MockCollection()
reaper = CollectionReaper(self._db)
reaper.delete(collection)
assert True == collection.was_called
def test_run_once(self):
# End-to-end test
c1 = self._default_collection
c2 = self._collection()
c2.marked_for_deletion = True
reaper = CollectionReaper(self._db)
result = reaper.run_once()
# The Collection marked for deletion has been deleted; the other
# one is unaffected.
assert [c1] == self._db.query(Collection).all()
assert "Items deleted: 1" == result.achievements
class TestMeasurementReaper(DatabaseTest):
def test_query(self):
# This reaper is looking for measurements that are not current.
measurement, created = get_one_or_create(
self._db, Measurement,
is_most_recent=True)
reaper = MeasurementReaper(self._db)
assert [] == reaper.query().all()
measurement.is_most_recent = False
assert [measurement] == reaper.query().all()
def test_run_once(self):
# End-to-end test
measurement1, created = get_one_or_create(
self._db, Measurement,
quantity_measured=u"answer",
value=12,
is_most_recent=True)
measurement2, created = get_one_or_create(
self._db, Measurement,
quantity_measured=u"answer",
value=42,
is_most_recent=False)
reaper = MeasurementReaper(self._db)
result = reaper.run_once()
assert [measurement1] == self._db.query(Measurement).all()
assert "Items deleted: 1" == result.achievements
def test_disable(self):
# This reaper can be disabled with a configuration setting
enabled = ConfigurationSetting.sitewide(self._db, Configuration.MEASUREMENT_REAPER)
enabled.value = False
measurement1, created = get_one_or_create(
self._db, Measurement,
quantity_measured=u"answer",
value=12,
is_most_recent=True)
measurement2, created = get_one_or_create(
self._db, Measurement,
quantity_measured=u"answer",
value=42,
is_most_recent=False)
reaper = MeasurementReaper(self._db)
reaper.run()
assert [measurement1, measurement2] == self._db.query(Measurement).all()
enabled.value = True
reaper.run()
assert [measurement1] == self._db.query(Measurement).all()
class TestScrubberMonitor(DatabaseTest):
def test_run_once(self):
# ScrubberMonitor is basically an abstract class, with
# subclasses doing nothing but define missing constants. This
# is an end-to-end test using a specific subclass,
# CirculationEventLocationScrubber.
m = CirculationEventLocationScrubber(self._db)
assert "Scrubber for CirculationEvent.location" == m.SERVICE_NAME
# CirculationEvents are only scrubbed if they have a location
# *and* are older than MAX_AGE.
now = datetime.datetime.utcnow()
not_long_ago = (
m.cutoff + datetime.timedelta(days=1)
)
long_ago = (
m.cutoff - datetime.timedelta(days=1)
)
new, ignore = create(
self._db, CirculationEvent, start=now, location="loc"
)
recent, ignore = create(
self._db, CirculationEvent, start=not_long_ago, location="loc"
)
old, ignore = create(
self._db, CirculationEvent, start=long_ago, location="loc"
)
already_scrubbed, ignore = create(
self._db, CirculationEvent, start=long_ago, location=None
)
# Only the old unscrubbed CirculationEvent is eligible
# to be scrubbed.
assert [old] == m.query().all()
# Other reapers say items were 'deleted'; we say they were
# 'scrubbed'.
timestamp = m.run_once()
assert "Items scrubbed: 1" == timestamp.achievements
# Only the old unscrubbed CirculationEvent has been scrubbed.
assert None == old.location
for untouched in (new, recent):
assert "loc" == untouched.location
def test_specific_scrubbers(self):
# Check that all specific ScrubberMonitors are set up
# correctly.
circ = CirculationEventLocationScrubber(self._db)
assert CirculationEvent.start == circ.timestamp_field
assert CirculationEvent.location == circ.scrub_field
assert 365 == circ.MAX_AGE
patron = PatronNeighborhoodScrubber(self._db)
assert Patron.last_external_sync == patron.timestamp_field
assert Patron.cached_neighborhood == patron.scrub_field
assert Patron.MAX_SYNC_TIME == patron.MAX_AGE
```
#### File: simplified-server-core/util/string_helpers.py
```python
import base64 as stdlib_base64
import binascii
import os
import sys
import six
class UnicodeAwareBase64(object):
"""Simulate the interface of the base64 module, but make it look as
though base64-encoding and -decoding works on Unicode strings.
Behind the scenes, Unicode strings are encoded to a particular
encoding, then base64-encoded or -decoded, then decoded from that
encoding.
Since we get Unicode strings out of the database, this lets us
base64-encode and -decode strings based on those strings, without
worrying about encoding to bytes and then decoding.
"""
def __init__(self, encoding):
self.encoding = encoding
def _ensure_bytes(self, s):
if isinstance(s, bytes):
return s
return s.encode(self.encoding)
def _ensure_unicode(self, s):
if isinstance(s, bytes):
return s.decode(self.encoding)
return s
def wrap(func):
def wrapped(self, s, *args, **kwargs):
s = self._ensure_bytes(s)
value = func(s, *args, **kwargs)
return self._ensure_unicode(value)
return wrapped
# Wrap most of the base64 module API so that Unicode is handled
# transparently.
b64encode = wrap(stdlib_base64.b64encode)
b64decode = wrap(stdlib_base64.b64decode)
standard_b64encode = wrap(stdlib_base64.standard_b64encode)
standard_b64decode = wrap(stdlib_base64.standard_b64decode)
urlsafe_b64encode = wrap(stdlib_base64.urlsafe_b64encode)
urlsafe_b64decode = wrap(stdlib_base64.urlsafe_b64decode)
# These are deprecated in base64 and we should stop using them.
encodestring = wrap(stdlib_base64.encodestring)
decodestring = wrap(stdlib_base64.decodestring)
# If you're okay with a Unicode strings being converted to/from UTF-8
# when you try to encode/decode them, you can use this object instead of
# the standard 'base64' module.
base64 = UnicodeAwareBase64("utf8")
def random_string(size):
"""Generate a random string of binary, encoded as hex digits.
:param: Size of binary string in bytes.
:return: A Unicode string.
"""
return binascii.hexlify(os.urandom(size)).decode("utf8")
def native_string(x):
"""Convert a bytestring or a Unicode string to the 'native string'
class for this version of Python.
In Python 2, the native string class is a bytestring. In Python 3,
the native string class is a Unicode string.
This function exists to smooth the conversion process and can be
removed once we convert to Python 3.
"""
if sys.version_info.major == 2:
if isinstance(x, unicode):
x = x.encode("utf8")
else:
if isinstance(x, bytes):
x = x.decode("utf8")
return x
def is_string(value):
"""Return a boolean value indicating whether the value is a string or not.
This method is compatible with both Python 2.7 and Python 3.x.
NOTE:
1. We can't use isinstance(string_value, str) because strings in Python 2.7 can have "unicode" type.
2. We can't use isinstance(string_value, basestring) because "basestring" type is not available in Python 3.x.
:param value: Value
:type value: Any
:return: Boolean value indicating whether the value is a string or not
:rtype: bool
"""
return isinstance(value, six.string_types)
``` |
{
"source": "jonathan-greig/dftimewolf",
"score": 3
} |
#### File: dftimewolf/metawolf/output.py
```python
import hashlib
import subprocess
import time
from datetime import datetime
from datetime import timezone
from typing import Any, List, Optional, Dict
import psutil
from dftimewolf.metawolf import utils
PURPLE = '\033[95m'
GREEN = '\033[92m'
BLUE = '\033[94m'
YELLOW = '\033[93m'
RED = '\033[91m'
ENDC = '\033[0m'
DFTIMEWOLF = 'dftimewolf'
CRITICAL_ERROR = 'Critical error found. Aborting.'
class MetawolfOutput:
"""MetawolfOutput handles formatting of strings to display in Metawolf."""
def Welcome(self) -> str:
"""Print Metawolf welcome message.
Returns:
str: The welcome message.
"""
# pylint: disable=anomalous-backslash-in-string
return self.Color('''
_____ __ __ __ .__ _____
/ \ ____ _/ |_ _____ / \ / \ ____ | | _/ ____\\
/ \ / \ _/ __ \\\\ __\\\\__ \ \ \/\/ // _ \ | | \ __\
/ Y \\\\ ___/ | | / __ \_\ /( <_> )| |__| |
\____|__ / \___ >|__| (____ / \__/\ / \____/ |____/|__|
\/ \/ \/ \/
''', PURPLE)
# pylint: enable=anomalous-backslash-in-string
@staticmethod
def Color(value: Any, color: str) -> str:
"""Return a colored output for stdout.
Args:
value (str): The value to format.
color (str): The color to format the string with.
Returns:
str: The formatted string.
"""
return '{0:s}{1!s}{2:s}'.format(color, value, ENDC)
class MetawolfProcess:
"""MetawolfProcess captures all information about metawolf processes.
Attributes:
process (Any): A subprocess.Popen or psutil.Process object, representing
metawolf's process.
session_id (str): The session ID this process belongs to.
recipe (str): The DFTimewolf recipe this process is executing.
cmd (List[str]): The command to execute, as a list.
cmd_readable (str): The command to execute, as a string.
output_id (int): The output ID used to check the process' output.
cmd_id (str): The id corresponding to the command being executed.
outfile_path (str): The path to the file that contains this process' stdout
and stderr.
timestamp_readable (str): The timestamp at which this process was run.
interrupted (bool): True if this process was killed manually, False
otherwise.
"""
def __init__(
self,
session_id: Optional[str] = None,
cmd: Optional[List[str]] = None,
output_id: Optional[int] = None,
from_dict: Optional[Dict[str, str]] = None
) -> None:
"""Initialize MetawolfProcess.
Args:
session_id (str): Optional. The session ID this process belongs to.
cmd (List[str]): Optional. The command this process is running. This
should be of the form [dftimewolf, recipe_name, recipe_arguments...].
output_id (int): Optional. The output ID that this process corresponds to.
from_dict (Dict[str, str]): Optional. A json-like dictionary that
contains the attributes of this object.
Raises:
ValueError: If the cmd does not match a valid dftimewolf invocation.
"""
process = None
recipe = ''
if cmd and cmd[1] in utils.MetawolfUtils().GetRecipes():
recipe = cmd[1]
if not from_dict:
from_dict = {}
if cmd and len(cmd) < 2:
raise ValueError('Command mis-configured. Format: [dftimewolf, '
'recipe_name, recipe_arguments...]')
if cmd and not cmd[0] == DFTIMEWOLF or not recipe:
raise ValueError('Command mis-configured. Format: [dftimewolf, '
'recipe_name, recipe_arguments...]')
else:
# Look for background processes if some are still running
for proc in psutil.process_iter():
try:
proc_cmd = proc.cmdline()[1:] # We discard the parent process
proc_cmd[0] = proc_cmd[0].split('/')[-1] # And the full path
if proc_cmd == from_dict['cmd_readable'].split(' '):
process = proc
break
except (psutil.AccessDenied, psutil.ZombieProcess, IndexError):
pass
self.process = process
self.session_id = from_dict.get('session_id', session_id)
self.recipe = from_dict.get('recipe', recipe)
cmd_readable = from_dict.get('cmd_readable')
if cmd_readable:
cmd = cmd_readable.split(' ')
self.cmd = cmd
if self.cmd: # Always true here, but needed by Mypy.
self.cmd_readable = cmd_readable or ' '.join(self.cmd)
self.output_id = utils.CastToType(
from_dict.get('output_id', str(output_id)), int)
self.cmd_id = from_dict.get('cmd_id')
self.outfile_path = from_dict.get('outfile_path')
self.timestamp_readable = from_dict.get('timestamp')
self.interrupted = from_dict.get('interrupted', False)
self.stdout = None # type: Any
def Run(self) -> None:
"""Run the process."""
self.timestamp_readable = str(
datetime.fromtimestamp(time.time(), timezone.utc))
# Metawolf writes each dftimewolf run into a file located in /tmp that
# is identified by the process's session id, recipe and timestamp.
file_id = '{0:s}-{1:s}-{2!s}'.format(
self.session_id, self.recipe, self.timestamp_readable).encode('utf-8')
self.cmd_id = str(hashlib.sha256(file_id).hexdigest()[:6])
self.outfile_path = '/tmp/metawolf-{0:s}.log'.format(self.cmd_id)
self.stdout = open(self.outfile_path, mode='w+')
if self.cmd: # Always true here, but needed by Mypy.
self.process = subprocess.Popen(self.cmd,
shell=False,
stdout=self.stdout,
stderr=self.stdout,
text=True)
def Poll(self) -> Optional[int]:
"""Poll the process.
If self.process is a subprocess.Popen object, we call poll(). If
self.process is a psutil.Process object, we call status().
If None is returned, the process is still running.
If -1 is returned, the process was interrupted.
if 0 is returned, the process exited (this does not mean that the recipe
executed successfully, but that the dftimewolf command completed).
Returns:
int: The process status.
"""
if not self.process:
return 0
# https://docs.python.org/3/library/subprocess.html#subprocess.Popen.returncode
if hasattr(self.process, 'poll'):
# self.process is a subprocess.Popen object
err = self.process.poll()
if err is None:
return None
if err >= 0:
return 0
return -1
# self.process is a psutil.Process object
try:
status = self.process.status()
except psutil.NoSuchProcess:
# Process no longer exists
return 0
if status == psutil.STATUS_RUNNING:
return None
return 0
def Status(self) -> str:
"""Return the process status.
Returns:
str: The status of the running recipe.
"""
return_code = self.Poll()
if return_code is None:
return MetawolfOutput.Color('Running', YELLOW)
# Process can be in 3 states: interrupted, failed, or completed.
if return_code == -1 or self.interrupted:
return MetawolfOutput.Color('Interrupted', RED)
# Else, dftimewolf completed and we need to look into the output file to
# check whether or not the recipe executed successfully.
if CRITICAL_ERROR in self.Read():
return MetawolfOutput.Color('Failed', RED)
return MetawolfOutput.Color('Completed', GREEN)
def Read(self) -> str:
"""Read the output of the process.
Returns:
str: The stdout of the process written to file.
"""
if self.outfile_path:
try:
with open(self.outfile_path, 'r') as f:
return f.read()
except FileNotFoundError:
print(MetawolfOutput.Color(
'Output file {0:s} does not exist anymore. To clear old output '
'files, type `clean`'.format(self.outfile_path), RED))
return ''
def Terminate(self) -> str:
"""Terminate a process and close its IO file.
Returns:
str: An output (e.g. informational message), if any.
"""
out = ''
if self.Poll() is None and self.process:
self.process.terminate()
out = MetawolfOutput.Color(
'Killed: {0:s}'.format(self.cmd_id), YELLOW)
self.interrupted = True
else:
out = '{0:s} has already terminated'.format(self.cmd_id)
if self.stdout:
# This is always set if the process was not recovered from a previous
# session
try:
self.stdout.close()
except IOError:
pass
return out
def Marshal(self) -> Dict[str, Any]:
"""Marshal part of the object into a JSON dictionary."""
return {
'session_id': self.session_id,
'recipe': self.recipe,
'cmd_readable': self.cmd_readable,
'output_id': self.output_id,
'cmd_id': self.cmd_id,
'outfile_path': self.outfile_path,
'timestamp': self.timestamp_readable,
'interrupted': self.interrupted
}
``` |
{
"source": "jonathan-greig/docker-explorer",
"score": 2
} |
#### File: jonathan-greig/docker-explorer/tests.py
```python
from __future__ import unicode_literals
import collections
import os
import shutil
import sys
import tarfile
import tempfile
import unittest
import unittest.mock
from io import StringIO
from docker_explorer import __version__ as de_version
from docker_explorer import container
from docker_explorer import downloader
from docker_explorer import errors
from docker_explorer import explorer
from docker_explorer import storage
from docker_explorer import utils
from tools import de
# pylint: disable=protected-access
# pylint: disable=line-too-long
class UtilsTests(unittest.TestCase):
"""Tests Utils methods."""
def testFormatDatetime(self):
"""Tests the utils.FormatDatetime function."""
test_date = '2017-12-25T15:59:59.102938 msqedigrb msg'
expected_time_str = '2017-12-25T15:59:59.102938'
self.assertEqual(expected_time_str, utils.FormatDatetime(test_date))
def testPrettyPrintJSON(self):
"""Tests the utils.PrettyPrintJSON function."""
test_dict = {'test': [{'dict1': {'key1': 'val1'}, 'dict2': None}]}
expected_string = ('{\n "test": [\n {\n "dict1": {\n'
' "key1": "val1"\n }, \n'
' "dict2": null\n }\n ]\n}\n')
self.assertEqual(expected_string, utils.PrettyPrintJSON(test_dict))
class TestDEMain(unittest.TestCase):
"""Tests DockerExplorerTool object methods."""
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.docker_directory_path)
@classmethod
def setUpClass(cls):
# We setup one overlay2 backed Docker root folder for all the following
# tests.
cls.driver = 'overlay2'
cls.docker_directory_path = os.path.join('test_data', 'docker')
if not os.path.isdir(cls.docker_directory_path):
docker_tar = os.path.join('test_data', 'overlay2.v2.tgz')
tar = tarfile.open(docker_tar, 'r:gz')
tar.extractall('test_data')
tar.close()
cls.explorer_object = explorer.Explorer()
cls.explorer_object.SetDockerDirectory(cls.docker_directory_path)
cls.explorer_object.DetectDockerStorageVersion()
def testParseArguments(self):
"""Tests the DockerExplorerTool.ParseArguments function."""
de_object = de.DockerExplorerTool()
prog = sys.argv[0]
expected_docker_root = os.path.join('test_data', 'docker')
args = [prog, '-r', expected_docker_root, 'list', 'repositories']
sys.argv = args
options = de_object.ParseArguments()
usage_string = de_object._argument_parser.format_usage()
expected_usage = '[-h] [-d] [-r DOCKER_DIRECTORY] [-V]'
expected_usage_commands = '{download,mount,list,history}'
self.assertTrue(expected_usage in usage_string)
self.assertTrue(expected_usage_commands in usage_string)
self.assertEqual(expected_docker_root, options.docker_directory)
def testShowHistory(self):
"""Tests that ShowHistory shows history."""
self.maxDiff = None
de_object = de.DockerExplorerTool()
de_object._explorer = self.explorer_object
# We pick one of the container IDs.
container_id = container.GetAllContainersIDs(self.docker_directory_path)[0]
with unittest.mock.patch('sys.stdout', new=StringIO()) as fake_output:
de_object.docker_directory = self.docker_directory_path
de_object.ShowHistory(container_id)
expected_string = """{
"sha256:8ac48589692a53a9b8c2d1ceaa6b402665aa7fe667ba51ccc03002300856d8c7": {
"created_at": "2018-04-05T10:41:28.876407",
"container_cmd": "/bin/sh -c #(nop) CMD [\\"sh\\"]",
"size": 0
}
}
"""
self.assertEqual(expected_string, fake_output.getvalue())
def testDetectStorageFail(self):
"""Tests that the DockerExplorerTool.DetectStorage function fails on
Docker directory."""
explorer_object = explorer.Explorer()
explorer_object.docker_directory = 'this_dir_shouldnt_exist'
expected_error_message = (
'this_dir_shouldnt_exist is not a Docker directory')
with self.assertRaises(errors.BadStorageException) as err:
explorer_object.SetDockerDirectory('this_dir_shouldnt_exist')
self.assertEqual(expected_error_message, err.exception.message)
class DockerTestCase(unittest.TestCase):
"""Base class for tests of different Storage implementations."""
@classmethod
def tearDownClass(cls):
shutil.rmtree(os.path.join('test_data', 'docker'))
@classmethod
def _setup(cls, driver, driver_class, storage_version=2):
"""Internal method to set up the TestCase on a specific storage."""
cls.driver = driver
docker_directory_path = os.path.join('test_data', 'docker')
if not os.path.isdir(docker_directory_path):
docker_tar = os.path.join(
'test_data', '{0:s}.v{1:d}.tgz'.format(driver, storage_version))
tar = tarfile.open(docker_tar, 'r:gz')
tar.extractall('test_data')
tar.close()
cls.explorer_object = explorer.Explorer()
cls.explorer_object.SetDockerDirectory(docker_directory_path)
cls.explorer_object.DetectDockerStorageVersion()
cls.driver_class = driver_class
cls.storage_version = storage_version
def testDetectStorage(self):
"""Tests the Explorer.DetectStorage function."""
for container_obj in self.explorer_object.GetAllContainers():
self.assertIsNotNone(container_obj.storage_object)
self.assertEqual(container_obj.storage_name, self.driver)
self.assertIsInstance(container_obj.storage_object, self.driver_class)
self.assertEqual(self.storage_version, container_obj.docker_version)
if self.storage_version == 1:
self.assertEqual('config.json', container_obj.container_config_filename)
elif self.storage_version == 2:
self.assertEqual(
'config.v2.json', container_obj.container_config_filename)
class TestAufsStorage(DockerTestCase):
"""Tests methods in the BaseStorage object."""
@classmethod
def setUpClass(cls):
cls._setup('aufs', storage.AufsStorage)
def testGetAllContainers(self):
"""Tests the GetAllContainers function on a AuFS storage."""
containers_list = self.explorer_object.GetAllContainers()
containers_list = sorted(containers_list, key=lambda ci: ci.name)
self.assertEqual(7, len(containers_list))
container_obj = containers_list[1]
self.assertEqual('/dreamy_snyder', container_obj.name)
self.assertEqual(
'2017-02-13T16:45:05.629904159Z', container_obj.creation_timestamp)
self.assertEqual('busybox', container_obj.config_image_name)
self.assertTrue(container_obj.running)
self.assertEqual(
'7b02fb3e8a665a63e32b909af5babb7d6ba0b64e10003b2d9534c7d5f2af8966',
container_obj.container_id)
def testGetOrderedLayers(self):
"""Tests the BaseStorage.GetOrderedLayers function on a AuFS storage."""
container_obj = self.explorer_object.GetContainer(
'7b02fb3e8a665a63e32b909af5babb7d6ba0b64e10003b2d9534c7d5f2af8966')
layers = container_obj.GetOrderedLayers()
self.assertEqual(1, len(layers))
self.assertEqual(
'sha256:'
'7968321274dc6b6171697c33df7815310468e694ac5be0ec03ff053bb135e768',
layers[0])
def testGetRunningContainersList(self):
"""Tests the BaseStorage.GetContainersList function on a AuFS storage."""
running_containers = self.explorer_object.GetContainersList(
only_running=True)
running_containers = sorted(
running_containers, key=lambda ci: ci.container_id)
self.assertEqual(1, len(running_containers))
container_obj = running_containers[0]
self.assertEqual('/dreamy_snyder', container_obj.name)
self.assertEqual(
'2017-02-13T16:45:05.629904159Z', container_obj.creation_timestamp)
self.assertEqual('busybox', container_obj.config_image_name)
self.assertTrue(container_obj.running)
def testGetContainersJson(self):
"""Tests the GetContainersJson function on a AuFS storage."""
self.maxDiff = None
result = self.explorer_object.GetContainersJson(only_running=True)
mount_point = collections.OrderedDict()
mount_point['source'] = (
'test_data/docker/volumes/'
'28297de547b5473a9aff90aaab45ed108ebf019981b40c3c35c226f54c13ac0d/_data'
)
mount_point['destination'] = '/var/jenkins_home'
expected = collections.OrderedDict()
expected['image_name'] = 'busybox'
expected['container_id'] = '7b02fb3e8a665a63e32b909af5babb7d6ba0b64e10003b2d9534c7d5f2af8966'
expected['image_id'] = '7968321274dc6b6171697c33df7815310468e694ac5be0ec03ff053bb135e768'
expected['start_date'] = '2017-02-13T16:45:05.785658'
expected['mount_id'] = 'b16a494082bba0091e572b58ff80af1b7b5d28737a3eedbe01e73cd7f4e01d23'
expected['mount_points'] = [mount_point]
expected['log_path'] = '/tmp/docker/containers/7b02fb3e8a665a63e32b909af5babb7d6ba0b64e10003b2d9534c7d5f2af8966/7b02fb3e8a665a63e32b909af5babb7d6ba0b64e10003b2d9534c7d5f2af8966-json.log'
self.assertEqual([expected], result)
def testGetLayerInfo(self):
"""Tests the BaseStorage.GetLayerInfo function on a AuFS storage."""
container_obj = self.explorer_object.GetContainer(
'7b02fb3e8a665a63e32b909af5babb7d6ba0b64e10003b2d9534c7d5f2af8966')
layer_info = container_obj.GetLayerInfo(
'sha256:'
'7968321274dc6b6171697c33df7815310468e694ac5be0ec03ff053bb135e768')
self.assertEqual('2017-01-13T22:13:54.401355854Z', layer_info['created'])
self.assertEqual(['/bin/sh', '-c', '#(nop) ', 'CMD ["sh"]'],
layer_info['container_config']['Cmd'])
def testGetRepositoriesString(self):
"""Tests GetRepositoriesString() on a AuFS storage."""
self.maxDiff = None
result_string = self.explorer_object.GetRepositoriesString()
expected_string = (
'[\n'
' {\n'
' "Repositories": {\n'
' "busybox": {\n'
' "busybox:latest": "sha256:'
'7968321274dc6b6171697c33df7815310468e694ac5be0ec03ff053bb135e768"\n'
' }\n'
' }, \n'
' "path": "test_data/docker/image/aufs/repositories.json"\n'
' }\n'
']\n')
self.assertEqual(expected_string, result_string)
def testMakeMountCommands(self):
"""Tests the BaseStorage.MakeMountCommands function on a AuFS storage."""
self.maxDiff = None
container_obj = self.explorer_object.GetContainer(
'7b02fb3e8a665a63e32b909af5babb7d6ba0b64e10003b2d9534c7d5f2af8966')
commands = container_obj.storage_object.MakeMountCommands(
container_obj, '/mnt')
commands = [' '.join(x) for x in commands]
expected_commands = [
(
'/bin/mount -t aufs -o ro,br=test_data/docker/aufs/diff/test_data/'
'docker/aufs/diff/'
'b16a494082bba0091e572b58ff80af1b7b5d28737a3eedbe01e73cd7f4e01d23'
'=ro+wh none /mnt'),
(
'/bin/mount -t aufs -o ro,remount,append:test_data/docker/aufs/diff/'
'b16a494082bba0091e572b58ff80af1b7b5d28737a3eedbe01e73cd7f4e01d23'
'-init=ro+wh none /mnt'),
(
'/bin/mount -t aufs -o ro,remount,append:test_data/docker/aufs/diff/'
'd1c54c46d331de21587a16397e8bd95bdbb1015e1a04797c76de128107da83ae'
'=ro+wh none /mnt'),
(
'/bin/mount --bind -o ro {0:s}/volumes/'
'28297de547b5473a9aff90aaab45ed108ebf019981b40c3c35c226f54c13ac0d/'
'_data /mnt/var/jenkins_home').format(os.path.abspath('test_data'))
]
self.assertEqual(expected_commands, commands)
def testGetHistory(self):
"""Tests the BaseStorage.GetHistory function on a AuFS storage."""
self.maxDiff = None
container_obj = self.explorer_object.GetContainer(
'7b02fb3e8a665a63e32b909af5babb7d6ba0b64e10003b2d9534c7d5f2af8966')
expected = {
'sha256:'
'7968321274dc6b6171697c33df7815310468e694ac5be0ec03ff053bb135e768': {
'created_at': '2017-01-13T22:13:54.401355',
'container_cmd': '/bin/sh -c #(nop) CMD ["sh"]',
'size': 0
}
}
self.assertEqual(expected, container_obj.GetHistory())
def testGetFullContainerID(self):
"""Tests the DockerExplorerTool._GetFullContainerID function on AuFS."""
self.assertEqual(
'2cc4b0d9c1dfdf71099c5e9a109e6a0fe286152a5396bd1850689478e8f70625',
self.explorer_object._GetFullContainerID('2cc4b0d'))
with self.assertRaises(Exception) as err:
self.explorer_object._GetFullContainerID('')
self.assertEqual(
'Too many container IDs starting with "": '
'1171e9631158156ba2b984d335b2bf31838403700df3882c51aed70beebb604f, '
'2cc4b0d9c1dfdf71099c5e9a109e6a0fe286152a5396bd1850689478e8f70625, '
'7b02fb3e8a665a63e32b909af5babb7d6ba0b64e10003b2d9534c7d5f2af8966, '
'986c6e682f30550512bc2f7243f5a57c91b025e543ef703c426d732585209945, '
'b6f881bfc566ed604da1dc9bc8782a3540380c094154d703a77113b1ecfca660, '
'c8a38b6c29b0c901c37c2bb17bfcd73942c44bb71cc528505385c62f3c6fff35, '
'dd39804186d4f649f1e9cec89df1583e7a12a48193223a16cc40958f7e76b858',
err.exception.message)
with self.assertRaises(Exception) as err:
self.explorer_object._GetFullContainerID('xx')
self.assertEqual(
'Could not find any container ID starting with "xx"',
err.exception.message)
class TestAufsV1Storage(DockerTestCase):
"""Tests methods in the BaseStorage object."""
@classmethod
def setUpClass(cls):
cls._setup('aufs', storage.AufsStorage, storage_version=1)
def testGetAllContainers(self):
"""Tests the GetAllContainers function on a AuFS storage."""
containers_list = self.explorer_object.GetAllContainers()
containers_list = sorted(containers_list, key=lambda ci: ci.name)
self.assertEqual(3, len(containers_list))
container_obj = containers_list[0]
self.assertEqual('/angry_rosalind', container_obj.name)
self.assertEqual(
'2018-12-27T10:53:17.096746609Z', container_obj.creation_timestamp)
self.assertEqual('busybox', container_obj.config_image_name)
self.assertTrue(container_obj.running)
self.assertEqual(
'de44dd97cfd1c8d1c1aad7f75a435603991a7a39fa4f6b20a69bf4458809209c',
container_obj.container_id)
def testGetOrderedLayers(self):
"""Tests the BaseStorage.GetOrderedLayers function on a AuFS storage."""
container_obj = self.explorer_object.GetContainer(
'de44dd97cfd1c8d1c1aad7f75a435603991a7a39fa4f6b20a69bf4458809209c')
layers = container_obj.GetOrderedLayers()
self.assertEqual(2, len(layers))
self.assertEqual(
'1cee97b18f87b5fa91633db35f587e2c65c093facfa2cbbe83d5ebe06e1d9125',
layers[0])
def testGetRunningContainersList(self):
"""Tests the BaseStorage.GetContainersList function on a AuFS storage."""
running_containers = self.explorer_object.GetContainersList(
only_running=True)
running_containers = sorted(
running_containers, key=lambda ci: ci.container_id)
self.assertEqual(1, len(running_containers))
container_obj = running_containers[0]
self.assertEqual('/angry_rosalind', container_obj.name)
self.assertEqual(
'2018-12-27T10:53:17.096746609Z', container_obj.creation_timestamp)
self.assertEqual('busybox', container_obj.config_image_name)
self.assertTrue(container_obj.running)
def testGetContainersJson(self):
"""Tests the GetContainersJson function on a AuFS storage."""
result = self.explorer_object.GetContainersJson(only_running=True)
expected = collections.OrderedDict()
expected['image_name'] = 'busybox'
expected['container_id'] = 'de44dd97cfd1c8d1c1aad7f75a435603991a7a39fa4f6b20a69bf4458809209c'
expected['image_id'] = '1cee97b18f87b5fa91633db35f587e2c65c093facfa2cbbe83d5ebe06e1d9125'
expected['start_date'] = '2018-12-27T10:53:17.409426'
expected['log_path'] = '/var/lib/docker/containers/de44dd97cfd1c8d1c1aad7f75a435603991a7a39fa4f6b20a69bf4458809209c/de44dd97cfd1c8d1c1aad7f75a435603991a7a39fa4f6b20a69bf4458809209c-json.log'
self.assertEqual([expected], result)
def testGetLayerInfo(self):
"""Tests the BaseStorage.GetLayerInfo function on a AuFS storage."""
container_obj = self.explorer_object.GetContainer(
'de44dd97cfd1c8d1c1aad7f75a435603991a7a39fa4f6b20a69bf4458809209c')
layer_info = container_obj.GetLayerInfo(
'1cee97b18f87b5fa91633db35f587e2c65c093facfa2cbbe83d5ebe06e1d9125')
self.assertEqual('2018-12-26T08:20:42.831353376Z', layer_info['created'])
self.assertEqual(['/bin/sh', '-c', '#(nop) ', 'CMD ["sh"]'],
layer_info['container_config']['Cmd'])
def testGetRepositoriesString(self):
"""Tests GetRepositoriesString() on a AuFS storage."""
self.maxDiff = None
result_string = self.explorer_object.GetRepositoriesString()
expected_string = (
'[\n'
' {\n'
' "Repositories": {\n'
' "busybox": {\n'
' "latest": "'
'1cee97b18f87b5fa91633db35f587e2c65c093facfa2cbbe83d5ebe06e1d9125"\n'
' }\n'
' }, \n'
' "path": "test_data/docker/repositories-aufs"\n'
' }\n'
']\n')
self.assertEqual(expected_string, result_string)
def testMakeMountCommands(self):
"""Tests the BaseStorage.MakeMountCommands function on a AuFS storage."""
container_obj = self.explorer_object.GetContainer(
'de44dd97cfd1c8d1c1aad7f75a435603991a7a39fa4f6b20a69bf4458809209c')
commands = container_obj.storage_object.MakeMountCommands(
container_obj, '/mnt')
commands = [' '.join(x) for x in commands]
expected_commands = [
(
'/bin/mount -t aufs -o ro,br=test_data/'
'docker/aufs/diff/'
'de44dd97cfd1c8d1c1aad7f75a435603991a7a39fa4f6b20a69bf4458809209c'
'=ro+wh none /mnt'),
(
'/bin/mount -t aufs -o ro,remount,append:test_data/docker/aufs/diff/'
'de44dd97cfd1c8d1c1aad7f75a435603991a7a39fa4f6b20a69bf4458809209c'
'-init=ro+wh none /mnt'),
(
'/bin/mount -t aufs -o ro,remount,append:test_data/docker/aufs/diff/'
'1cee97b18f87b5fa91633db35f587e2c65c093facfa2cbbe83d5ebe06e1d9125'
'=ro+wh none /mnt'),
(
'/bin/mount -t aufs -o ro,remount,append:test_data/docker/aufs/diff/'
'df557f39d413a1408f5c28d8aab2892f927237ec22e903ef04b331305130ab38'
'=ro+wh none /mnt')
]
self.assertEqual(expected_commands, commands)
def testGetHistory(self):
"""Tests the BaseStorage.GetHistory function on a AuFS storage."""
self.maxDiff = None
container_obj = self.explorer_object.GetContainer(
'de44dd97cfd1c8d1c1aad7f75a435603991a7a39fa4f6b20a69bf4458809209c')
expected = {
'1cee97b18f87b5fa91633db35f587e2c65c093facfa2cbbe83d5ebe06e1d9125': {
'size': 0
},
'df557f39d413a1408f5c28d8aab2892f927237ec22e903ef04b331305130ab38': {
'created_at':
'2018-12-26T08:20:42.687925',
'container_cmd': (
'/bin/sh -c #(nop) ADD file:ce026b62356eec3ad1214f92be2c'
'9dc063fe205bd5e600be3492c4dfb17148bd in / '),
'size':
1154361
}
}
self.assertEqual(expected, container_obj.GetHistory())
def testGetFullContainerID(self):
"""Tests the DockerExplorerTool._GetFullContainerID function on AuFS."""
self.assertEqual(
'de44dd97cfd1c8d1c1aad7f75a435603991a7a39fa4f6b20a69bf4458809209c',
self.explorer_object._GetFullContainerID('de44dd'))
self.maxDiff = None
with self.assertRaises(Exception) as err:
self.explorer_object._GetFullContainerID('')
self.assertEqual((
'Too many container IDs starting with "": '
'3b03d0958390ccfb92e9f1ee67de628ab315c532120d4512cb72a1805465fb35, '
'de44dd97cfd1c8d1c1aad7f75a435603991a7a39fa4f6b20a69bf4458809209c, '
'fbb6711cefc70193cb6cb0b113fc9ed6b9eaddcdd33667adb5cb690a4dca413a'),
err.exception.message)
with self.assertRaises(Exception) as err:
self.explorer_object._GetFullContainerID('xx')
self.assertEqual(
'Could not find any container ID starting with "xx"',
err.exception.message)
class TestOverlayStorage(DockerTestCase):
"""Tests methods in the OverlayStorage object."""
@classmethod
def setUpClass(cls):
cls._setup('overlay', storage.OverlayStorage)
def testGetAllContainers(self):
"""Tests the GetAllContainers function on a Overlay storage."""
containers_list = self.explorer_object.GetAllContainers()
containers_list = sorted(containers_list, key=lambda ci: ci.name)
self.assertEqual(6, len(containers_list))
container_obj = containers_list[0]
self.assertEqual('/elastic_booth', container_obj.name)
self.assertEqual(
'2018-01-26T14:55:56.280943771Z', container_obj.creation_timestamp)
self.assertEqual('busybox:latest', container_obj.config_image_name)
self.assertTrue(container_obj.running)
self.assertEqual(
'5dc287aa80b460652a5584e80a5c8c1233b0c0691972d75424cf5250b917600a',
container_obj.container_id)
def testGetOrderedLayers(self):
"""Tests the BaseStorage.GetOrderedLayers function on a Overlay storage."""
container_obj = self.explorer_object.GetContainer(
'5dc287aa80b460652a5584e80a5c8c1233b0c0691972d75424cf5250b917600a')
layers = container_obj.GetOrderedLayers()
self.assertEqual(1, len(layers))
self.assertEqual(
'sha256:'
'5b0d59026729b68570d99bc4f3f7c31a2e4f2a5736435641565d93e7c25bd2c3',
layers[0])
def testGetRunningContainersList(self):
"""Tests the BaseStorage.GetContainersList function on a Overlay storage."""
running_containers = self.explorer_object.GetContainersList(
only_running=True)
running_containers = sorted(
running_containers, key=lambda ci: ci.container_id)
self.assertEqual(1, len(running_containers))
container_obj = running_containers[0]
self.assertEqual('/elastic_booth', container_obj.name)
self.assertEqual(
'2018-01-26T14:55:56.280943771Z', container_obj.creation_timestamp)
self.assertEqual('busybox:latest', container_obj.config_image_name)
self.assertTrue(container_obj.running)
def testGetContainersJson(self):
"""Tests the GetContainersJson function on a Overlay storage."""
result = self.explorer_object.GetContainersJson(only_running=True)
expected = collections.OrderedDict()
expected['image_name'] = 'busybox:latest'
expected['container_id'] = '5dc287aa80b460652a5584e80a5c8c1233b0c0691972d75424cf5250b917600a'
expected['image_id'] = '5b0d59026729b68570d99bc4f3f7c31a2e4f2a5736435641565d93e7c25bd2c3'
expected['start_date'] = '2018-01-26T14:55:56.574924'
expected['mount_id'] = '974e2b994f9db74e1ddd6fc546843bc65920e786612a388f25685acf84b3fed1'
expected['upper_dir'] = 'test_data/docker/overlay/974e2b994f9db74e1ddd6fc546843bc65920e786612a388f25685acf84b3fed1/upper'
expected['log_path'] = '/var/lib/docker/containers/5dc287aa80b460652a5584e80a5c8c1233b0c0691972d75424cf5250b917600a/5dc287aa80b460652a5584e80a5c8c1233b0c0691972d75424cf5250b917600a-json.log'
self.assertEqual([expected], result)
def testGetLayerInfo(self):
"""Tests the BaseStorage.GetLayerInfo function on a Overlay storage."""
container_obj = self.explorer_object.GetContainer(
'5dc287aa80b460652a5584e80a5c8c1233b0c0691972d75424cf5250b917600a')
layer_info = container_obj.GetLayerInfo(
'sha256:'
'5b0d59026729b68570d99bc4f3f7c31a2e4f2a5736435641565d93e7c25bd2c3')
self.assertEqual('2018-01-24T04:29:35.590938514Z', layer_info['created'])
self.assertEqual(['/bin/sh', '-c', '#(nop) ', 'CMD ["sh"]'],
layer_info['container_config']['Cmd'])
def testGetRepositoriesString(self):
"""Tests GetRepositoriesString() on a Overlay storage."""
result_string = self.explorer_object.GetRepositoriesString()
self.maxDiff = None
expected_string = (
'[\n'
' {\n'
' "Repositories": {\n'
' "busybox": {\n'
' "busybox:latest": "sha256:'
'5b0d59026729b68570d99bc4f3f7c31a2e4f2a5736435641565d93e7c25bd2c3", \n'
' "busybox@sha256:'
'1669a6aa7350e1cdd28f972ddad5aceba2912f589f19a090ac75b7083da748db": '
'"sha256:'
'5b0d59026729b68570d99bc4f3f7c31a2e4f2a5736435641565d93e7c25bd2c3"\n'
' }\n'
' }, \n'
' "path": "test_data/docker/image/overlay/repositories.json"\n'
' }\n'
']\n')
self.assertEqual(expected_string, result_string)
def testMakeMountCommands(self):
"""Tests the BaseStorage.MakeMountCommands function on a Overlay storage."""
container_obj = self.explorer_object.GetContainer(
'5dc287aa80b460652a5584e80a5c8c1233b0c0691972d75424cf5250b917600a')
commands = container_obj.storage_object.MakeMountCommands(
container_obj, '/mnt')
commands = [' '.join(cmd) for cmd in commands]
expected_commands = [(
'/bin/mount -t overlay overlay -o ro,lowerdir='
'test_data/docker/overlay/974e2b994f9db74e1ddd6fc546843bc65920e786612'
'a388f25685acf84b3fed1/upper:'
'test_data/docker/overlay/a94d714512251b0d8a9bfaacb832e0c6cb70f71cb71'
'976cca7a528a429336aae/root '
'/mnt')]
self.assertEqual(expected_commands, commands)
def testGetHistory(self):
"""Tests the BaseStorage.GetHistory function on a Overlay storage."""
self.maxDiff = None
container_obj = self.explorer_object.GetContainer(
'5dc287aa80b460652a5584e80a5c8c1233b0c0691972d75424cf5250b917600a')
expected = {
'sha256:'
'5b0d59026729b68570d99bc4f3f7c31a2e4f2a5736435641565d93e7c25bd2c3': {
'created_at': '2018-01-24T04:29:35.590938',
'container_cmd': '/bin/sh -c #(nop) CMD ["sh"]',
'size': 0
}
}
self.assertEqual(expected, container_obj.GetHistory())
def testGetFullContainerID(self):
"""Tests the DockerExplorerTool._GetFullContainerID function on Overlay."""
self.assertEqual(
'5dc287aa80b460652a5584e80a5c8c1233b0c0691972d75424cf5250b917600a',
self.explorer_object._GetFullContainerID('5dc287aa80'))
with self.assertRaises(Exception) as err:
self.explorer_object._GetFullContainerID('4')
self.assertEqual(
'Too many container IDs starting with "4": '
'42e8679f78d6ea623391cdbcb928740ed804f928bd94f94e1d98687f34c48311, '
'4ad09bee61dcc675bf41085dbf38c31426a7ed6666fdd47521bfb8f5e67a7e6d',
err.exception.message)
with self.assertRaises(Exception) as err:
self.explorer_object._GetFullContainerID('xx')
self.assertEqual(
'Could not find any container ID starting with "xx"',
err.exception.message)
class TestOverlay2Storage(DockerTestCase):
"""Tests methods in the Overlay2Storage object."""
@classmethod
def setUpClass(cls):
cls._setup('overlay2', storage.Overlay2Storage)
def testGetAllContainers(self):
"""Tests the GetAllContainers function on a Overlay2 storage."""
containers_list = self.explorer_object.GetAllContainers()
containers_list = sorted(containers_list, key=lambda ci: ci.name)
self.assertEqual(5, len(containers_list))
container_obj = containers_list[0]
self.assertEqual('/festive_perlman', container_obj.name)
self.assertEqual(
'2018-05-16T10:51:39.271019533Z', container_obj.creation_timestamp)
self.assertEqual('busybox', container_obj.config_image_name)
self.assertTrue(container_obj.running)
self.assertEqual(
'8e8b7f23eb7cbd4dfe7e91646ddd0e0f524218e25d50113559f078dfb2690206',
container_obj.container_id)
def testGetOrderedLayers(self):
"""Tests the BaseStorage.GetOrderedLayers function on a Overlay2 storage."""
container_obj = self.explorer_object.GetContainer(
'8e8b7f23eb7cbd4dfe7e91646ddd0e0f524218e25d50113559f078dfb2690206')
layers = container_obj.GetOrderedLayers()
self.assertEqual(1, len(layers))
self.assertEqual(
'sha256:'
'8ac48589692a53a9b8c2d1ceaa6b402665aa7fe667ba51ccc03002300856d8c7',
layers[0])
def testGetRunningContainersList(self):
"""Tests the BaseStorage.GetContainersList function on Overlay2 storage."""
running_containers = self.explorer_object.GetContainersList(
only_running=True)
running_containers = sorted(
running_containers, key=lambda ci: ci.container_id)
self.assertEqual(1, len(running_containers))
container_obj = running_containers[0]
self.assertEqual('/festive_perlman', container_obj.name)
self.assertEqual(
'2018-05-16T10:51:39.271019533Z', container_obj.creation_timestamp)
self.assertEqual('busybox', container_obj.config_image_name)
self.assertTrue(container_obj.running)
def testGetContainersJson(self):
"""Tests the GetContainersJson function on a Overlay2 storage."""
result = self.explorer_object.GetContainersJson(only_running=True)
expected = collections.OrderedDict()
expected['image_name'] = 'busybox'
expected['container_id'] = '8e8b7f23eb7cbd4dfe7e91646ddd0e0f524218e25d50113559f078dfb2690206'
expected['image_id'] = '8ac48589692a53a9b8c2d1ceaa6b402665aa7fe667ba51ccc03002300856d8c7'
expected['start_date'] = '2018-05-16T10:51:39.625989'
expected['mount_id'] = '92fd3b3e7d6101bb701743c9518c45b0d036b898c8a3d7cae84e1a06e6829b53'
expected['upper_dir'] = 'test_data/docker/overlay2/92fd3b3e7d6101bb701743c9518c45b0d036b898c8a3d7cae84e1a06e6829b53/diff'
expected['log_path'] = '/var/lib/docker/containers/8e8b7f23eb7cbd4dfe7e91646ddd0e0f524218e25d50113559f078dfb2690206/8e8b7f23eb7cbd4dfe7e91646ddd0e0f524218e25d50113559f078dfb2690206-json.log'
self.assertEqual([expected], result)
def testGetLayerInfo(self):
"""Tests the BaseStorage.GetLayerInfo function on a Overlay2 storage."""
container_obj = self.explorer_object.GetContainer(
'8e8b7f23eb7cbd4dfe7e91646ddd0e0f524218e25d50113559f078dfb2690206')
layer_info = container_obj.GetLayerInfo(
'sha256:'
'8ac48589692a53a9b8c2d1ceaa6b402665aa7fe667ba51ccc03002300856d8c7')
self.assertEqual('2018-04-05T10:41:28.876407948Z', layer_info['created'])
self.assertEqual(['/bin/sh', '-c', '#(nop) ', 'CMD ["sh"]'],
layer_info['container_config']['Cmd'])
def testGetRepositoriesString(self):
"""Tests GetRepositoriesString() on a Overlay2 storage."""
result_string = self.explorer_object.GetRepositoriesString()
self.maxDiff = None
expected_string = (
'[\n'
' {\n'
' "Repositories": {}, \n'
' "path": "test_data/docker/image/overlay/repositories.json"\n'
' }, \n'
' {\n'
' "Repositories": {\n'
' "busybox": {\n'
' "busybox:latest": "sha256:'
'8ac48589692a53a9b8c2d1ceaa6b402665aa7fe667ba51ccc03002300856d8c7", \n'
' "busybox@sha256:'
'58ac43b2cc92c687a32c8be6278e50a063579655fe3090125dcb2af0ff9e1a64": '
'"sha256:'
'8ac48589692a53a9b8c2d1ceaa6b402665aa7fe667ba51ccc03002300856d8c7"\n'
' }\n'
' }, \n'
' "path": "test_data/docker/image/overlay2/repositories.json"\n'
' }\n'
']\n')
self.assertEqual(expected_string, result_string)
def testMakeMountCommands(self):
"""Tests the BaseStorage.MakeMountCommands function on Overlay2 storage."""
self.maxDiff = None
container_obj = self.explorer_object.GetContainer(
'8e8b7f23eb7cbd4dfe7e91646ddd0e0f524218e25d50113559f078dfb2690206')
commands = container_obj.storage_object.MakeMountCommands(
container_obj, '/mnt')
commands = [' '.join(cmd) for cmd in commands]
expected_commands = [(
'/bin/mount -t overlay overlay -o ro,lowerdir='
'test_data/docker/overlay2/'
'92fd3b3e7d6101bb701743c9518c45b0d036b898c8a3d7cae84e1a06e6829b53/diff:'
'test_data/docker/overlay2/l/OTFSLJCXWCECIG6FVNGRTWUZ7D:'
'test_data/docker/overlay2/l/CH5A7XWSBP2DUPV7V47B7DOOGY /mnt')]
self.assertEqual(expected_commands, commands)
def testGetHistory(self):
"""Tests the BaseStorage.GetHistory function on a Overlay2 storage."""
self.maxDiff = None
container_obj = self.explorer_object.GetContainer(
'8e8b7f23eb7cbd4dfe7e91646ddd0e0f524218e25d50113559f078dfb2690206')
expected = {
'sha256:'
'8ac48589692a53a9b8c2d1ceaa6b402665aa7fe667ba51ccc03002300856d8c7': {
'created_at': '2018-04-05T10:41:28.876407',
'container_cmd': '/bin/sh -c #(nop) CMD ["sh"]',
'size': 0
}
}
self.assertEqual(expected, container_obj.GetHistory(container_obj))
def testGetFullContainerID(self):
"""Tests the DockerExplorerTool._GetFullContainerID function on Overlay2."""
self.assertEqual(
'61ba4e6c012c782186c649466157e05adfd7caa5b551432de51043893cae5353',
self.explorer_object._GetFullContainerID('61ba4e6c012c782'))
with self.assertRaises(Exception) as err:
self.explorer_object._GetFullContainerID('')
self.assertEqual(
'Too many container IDs starting with "": '
'10acac0b3466813c9e1f85e2aa7d06298e51fbfe86bbcb6b7a19dd33d3798f6a, '
'61ba4e6c012c782186c649466157e05adfd7caa5b551432de51043893cae5353, '
'8e8b7f23eb7cbd4dfe7e91646ddd0e0f524218e25d50113559f078dfb2690206, '
'9949fa153b778e39d6cab0a4e0ba60fa34a13fedb1f256d613a2f88c0c98408a, '
'f83f963c67cbd36055f690fc988c1e42be06c1253e80113d1d516778c06b2841',
err.exception.message)
with self.assertRaises(Exception) as err:
self.explorer_object._GetFullContainerID('xx')
self.assertEqual(
'Could not find any container ID starting with "xx"',
err.exception.message)
class TestDownloader(unittest.TestCase):
"""Tests methods in the DockerImageDownloader object."""
TEST_REPO = 'hello-world'
@classmethod
def setUpClass(cls):
cls.dl_object = downloader.DockerImageDownloader(cls.TEST_REPO)
def testSetupRepository(self):
"""Tests the DockerImageDownloader._SetupRepository() method."""
dl = downloader.DockerImageDownloader('')
with tempfile.TemporaryDirectory() as tmp_dir:
dl._output_directory = tmp_dir
dl._SetupRepository('foo')
self.assertEqual('library/foo', dl.repository)
self.assertEqual('latest', dl.tag)
dl._SetupRepository('foo/bar')
self.assertEqual('foo/bar', dl.repository)
self.assertEqual('latest', dl.tag)
dl._SetupRepository('foo:bar')
self.assertEqual('library/foo', dl.repository)
self.assertEqual('bar', dl.tag)
dl._SetupRepository('foo/bar:baz')
self.assertEqual('foo/bar', dl.repository)
self.assertEqual('baz', dl.tag)
def testGetToken(self):
"""Tests that we properly get an access token."""
# Token is base64 for a json object so always starts with '{"'
self.assertTrue(self.dl_object._access_token.startswith('eyJ'))
self.assertTrue(len(self.dl_object._access_token) > 100)
def testGetBadManifest(self):
"""Tests that GetManifest failes on an unknown image."""
dl = downloader.DockerImageDownloader('non/existing:image')
with tempfile.TemporaryDirectory() as tmp_dir:
dl._output_directory = tmp_dir
with self.assertRaises(errors.DownloaderException):
dl._GetManifest()
def testGetManifest(self):
"""Tests the GetManifest method"""
manifest = self.dl_object._GetManifest()
self.assertTrue(
manifest.get('mediaType') ==
'application/vnd.docker.distribution.manifest.v2+json')
self.assertTrue('layers' in manifest)
def testDownloadDockerFile(self):
"""Tests a Dockerfile is properly downloaded"""
expected_dockerfile = (
'# Pseudo Dockerfile\n'
'# Generated by de.py ({0:s})\n\n'
'COPY file:7bf12aab75c3867a023fe3b8bd6d113d43a4fcc415f3cc27cbcf0fff37b6'
'5a02 in / \n'
'CMD ["/hello"]'.format(de_version))
with tempfile.TemporaryDirectory() as tmp_dir:
self.dl_object._output_directory = tmp_dir
self.dl_object.DownloadPseudoDockerfile()
with open(os.path.join(tmp_dir, 'Dockerfile')) as f:
self.assertEqual(expected_dockerfile, f.read())
class TestDEVolumes(unittest.TestCase):
"""Tests various volumes/bind mounts."""
@classmethod
def setUpClass(cls):
"""Internal method to set up the TestCase on a specific storage."""
cls.driver = 'overlay2'
cls.docker_directory_path = os.path.join('test_data', 'docker')
if not os.path.isdir(cls.docker_directory_path):
docker_tar = os.path.join('test_data', 'vols.v2.tgz')
tar = tarfile.open(docker_tar, 'r:gz')
tar.extractall('test_data')
tar.close()
cls.explorer_object = explorer.Explorer()
cls.explorer_object.SetDockerDirectory(cls.docker_directory_path)
cls.driver_class = storage.Overlay2Storage
cls.storage_version = 2
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.docker_directory_path)
def testGenerateBindMountPoints(self):
"""Tests generating command to mount 'bind' MountPoints."""
self.maxDiff = None
de_object = de.DockerExplorerTool()
de_object._explorer = self.explorer_object
container_obj = de_object._explorer.GetContainer(
'8b6e90cc742bd63f6acb7ecd40ddadb4e5dee27d8db2b739963f7cd2c7bcff4a')
commands = container_obj.storage_object._MakeVolumeMountCommands(
container_obj, '/mnt')
commands = [' '.join(x) for x in commands]
expected_commands = [
('/bin/mount --bind -o ro {0:s}/opt/vols/bind'
' /mnt/opt').format(os.path.abspath('test_data'))]
self.assertEqual(expected_commands, commands)
def testGenerateVolumesMountpoints(self):
"""Tests generating command to mount 'volumes' MountPoints."""
self.maxDiff = None
de_object = de.DockerExplorerTool()
de_object._explorer = self.explorer_object
container_obj = de_object._explorer.GetContainer(
'712909b5ab80d8785841f12e361c218a2faf5365f9ed525f2a0d6b6590ba89cb')
commands = container_obj.storage_object._MakeVolumeMountCommands(
container_obj, '/mnt')
commands = [' '.join(x) for x in commands]
expected_commands = [
('/bin/mount --bind -o ro {0:s}/volumes/'
'f5479c534bbc6e2b9861973c2fbb4863ff5b7b5843c098d7fb1a027fe730a4dc/'
'_data /mnt/opt/vols/volume').format(os.path.abspath('test_data'))]
self.assertEqual(expected_commands, commands)
del DockerTestCase
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jonathan-greig/plaso",
"score": 3
} |
#### File: cli/helpers/language.py
```python
from plaso.cli import tools
from plaso.cli.helpers import interface
from plaso.cli.helpers import manager
from plaso.helpers import language_tags
from plaso.lib import errors
class LanguageArgumentsHelper(interface.ArgumentsHelper):
"""Language CLI arguments helper."""
NAME = 'language'
DESCRIPTION = 'Language command line arguments.'
@classmethod
def AddArguments(cls, argument_group):
"""Adds command line arguments to an argument group.
This function takes an argument parser or an argument group object and adds
to it all the command line arguments this helper supports.
Args:
argument_group (argparse._ArgumentGroup|argparse.ArgumentParser):
argparse group.
"""
argument_group.add_argument(
'--language', metavar='LANGUAGE_TAG', dest='preferred_language',
default=None, type=str, help=(
'The preferred language, which is used for extracting and '
'formatting Windows EventLog message strings. Use "--language '
'list" to see a list of supported language tags. The en-US (LCID '
'0x0409) language is used as fallback if preprocessing could not '
'determine the system language or no language information is '
'available in the winevt-rc.db database.'))
@classmethod
def ParseOptions(cls, options, configuration_object):
"""Parses and validates options.
Args:
options (argparse.Namespace): parser options.
configuration_object (CLITool): object to be configured by the argument
helper.
Raises:
BadConfigObject: when the configuration object is of the wrong type.
BadConfigOption: when the language tag is not supported.
"""
if not isinstance(configuration_object, tools.CLITool):
raise errors.BadConfigObject(
'Configuration object is not an instance of CLITool')
language_tag = cls._ParseStringOption(options, 'preferred_language')
if language_tag and not language_tags.LanguageTagHelper.IsLanguageTag(
language_tag):
raise errors.BadConfigOption(
'Unsupported preferred language tag: {0!s}'.format(language_tag))
setattr(configuration_object, '_preferred_language', language_tag)
manager.ArgumentHelperManager.RegisterHelper(LanguageArgumentsHelper)
```
#### File: plaso/containers/counts.py
```python
from plaso.containers import interface
from plaso.containers import manager
class EventLabelCount(interface.AttributeContainer):
"""Event label count attribute container.
Attributes:
label (str): event label.
number_of_events (int): number of events with label.
"""
CONTAINER_TYPE = 'event_label_count'
SCHEMA = {
'label': 'str',
'number_of_events': 'int'}
def __init__(self, label=None, number_of_events=None):
"""Initializes an event label count attribute container.
Args:
label (Optional[str]): event label.
number_of_events (Optional[int]): number of events with label.
the parser or parser plugin.
"""
super(EventLabelCount, self).__init__()
self.label = label
self.number_of_events = number_of_events
class ParserCount(interface.AttributeContainer):
"""Parser count attribute container.
Attributes:
name (str): name of the parser or parser plugin.
number_of_events (int): number of events generated by the parser or
parser plugin.
"""
CONTAINER_TYPE = 'parser_count'
SCHEMA = {
'name': 'str',
'number_of_events': 'int'}
def __init__(self, name=None, number_of_events=None):
"""Initializes a parser count attribute container.
Args:
name (Optional[str]): name of the parser or parser plugin.
number_of_events (Optional[int]): number of events generated by
the parser or parser plugin.
"""
super(ParserCount, self).__init__()
self.name = name
self.number_of_events = number_of_events
manager.AttributeContainersManager.RegisterAttributeContainers([
EventLabelCount, ParserCount])
```
#### File: plaso/containers/reports.py
```python
from plaso.containers import interface
from plaso.containers import manager
class AnalysisReport(interface.AttributeContainer):
"""Analysis report attribute container.
Attributes:
analysis_counter (collections.Counter): counter of analysis results, for
example number of events analyzed and tagged.
event_filter (str): event filter expression that was used when the analysis
plugin was run.
plugin_name (str): name of the analysis plugin that generated the report.
report_dict (dict[str]): ???
text (str): report text.
time_compiled (int): timestamp of the date and time the report was compiled.
"""
CONTAINER_TYPE = 'analysis_report'
def __init__(self, plugin_name=None, text=None):
"""Initializes the analysis report.
Args:
plugin_name (Optional[str]): name of the analysis plugin that generated
the report.
text (Optional[str]): report text.
"""
super(AnalysisReport, self).__init__()
self.analysis_counter = None
self.event_filter = None
self.plugin_name = plugin_name
self.report_dict = None
# TODO: rename text to body?
self.text = text
self.time_compiled = None
def CopyToDict(self):
"""Copies the attribute container to a dictionary.
Returns:
dict[str, object]: attribute values per name.
"""
dictionary = {}
for attribute_name, attribute_value in self.GetAttributes():
if attribute_value is None:
continue
dictionary[attribute_name] = attribute_value
return dictionary
manager.AttributeContainersManager.RegisterAttributeContainer(AnalysisReport)
```
#### File: plaso/formatters/interface.py
```python
import abc
import re
from plaso.formatters import logger
class EventFormatterHelper(object):
"""Base class of helper for formatting event data."""
@abc.abstractmethod
def FormatEventValues(self, event_values):
"""Formats event values using the helper.
Args:
event_values (dict[str, object]): event values.
"""
class BooleanEventFormatterHelper(EventFormatterHelper):
"""Helper for formatting boolean event data.
Attributes:
input_attribute (str): name of the attribute that contains the boolean
input value.
output_attribute (str): name of the attribute where the boolean output
value should be stored.
value_if_false (str): output value if the boolean input value is False.
value_if_true (str): output value if the boolean input value is True.
"""
def __init__(
self, input_attribute=None, output_attribute=None, value_if_false=None,
value_if_true=None):
"""Initialized a helper for formatting boolean event data.
Args:
input_attribute (Optional[str]): name of the attribute that contains
the boolean input value.
output_attribute (Optional[str]): name of the attribute where the
boolean output value should be stored.
value_if_false (str): output value if the boolean input value is False.
value_if_true (str): output value if the boolean input value is True.
"""
super(BooleanEventFormatterHelper, self).__init__()
self.input_attribute = input_attribute
self.output_attribute = output_attribute
self.value_if_false = value_if_false
self.value_if_true = value_if_true
def FormatEventValues(self, event_values):
"""Formats event values using the helper.
Args:
event_values (dict[str, object]): event values.
"""
input_value = event_values.get(self.input_attribute, None)
if input_value:
output_value = self.value_if_true
else:
output_value = self.value_if_false
event_values[self.output_attribute] = output_value
class CustomEventFormatterHelper(EventFormatterHelper):
"""Base class for a helper for custom formatting of event data."""
DATA_TYPE = ''
IDENTIFIER = ''
@abc.abstractmethod
def FormatEventValues(self, event_values):
"""Formats event values using the helper.
Args:
event_values (dict[str, object]): event values.
"""
class EnumerationEventFormatterHelper(EventFormatterHelper):
"""Helper for formatting enumeration event data.
Attributes:
default (str): default value.
input_attribute (str): name of the attribute that contains the enumeration
input value.
output_attribute (str): name of the attribute where the enumeration output
value should be stored.
values (dict[str, str]): mapping of enumeration input and output values.
"""
def __init__(
self, default=None, input_attribute=None, output_attribute=None,
values=None):
"""Initialized a helper for formatting enumeration event data.
Args:
default (Optional[str]): default value.
input_attribute (Optional[str]): name of the attribute that contains
the enumeration input value.
output_attribute (Optional[str]): name of the attribute where the
enumeration output value should be stored.
values (Optional[dict[str, str]]): mapping of enumeration input and
output values.
"""
super(EnumerationEventFormatterHelper, self).__init__()
self.default = default
self.input_attribute = input_attribute
self.output_attribute = output_attribute
self.values = values or {}
def FormatEventValues(self, event_values):
"""Formats event values using the helper.
If default value is None and there is no corresponding enumeration value
then the original value is used.
Args:
event_values (dict[str, object]): event values.
"""
input_value = event_values.get(self.input_attribute, None)
if input_value is not None:
default_value = self.default
if default_value is None:
default_value = input_value
event_values[self.output_attribute] = self.values.get(
input_value, default_value)
class FlagsEventFormatterHelper(EventFormatterHelper):
"""Helper for formatting flags event data.
Attributes:
input_attribute (str): name of the attribute that contains the flags
input value.
output_attribute (str): name of the attribute where the flags output
value should be stored.
values (dict[str, str]): mapping of flags input and output values.
"""
def __init__(
self, input_attribute=None, output_attribute=None, values=None):
"""Initialized a helper for formatting flags event data.
Args:
input_attribute (Optional[str]): name of the attribute that contains
the flags input value.
output_attribute (Optional[str]): name of the attribute where the
flags output value should be stored.
values (Optional[dict[str, str]]): mapping of flags input and output
values.
"""
super(FlagsEventFormatterHelper, self).__init__()
self.input_attribute = input_attribute
self.output_attribute = output_attribute
self.values = values or {}
def FormatEventValues(self, event_values):
"""Formats event values using the helper.
Args:
event_values (dict[str, object]): event values.
"""
input_value = event_values.get(self.input_attribute, None)
if input_value is None:
return
output_values = []
for flag, mapped_value in self.values.items():
if flag & input_value:
output_values.append(mapped_value)
event_values[self.output_attribute] = ', '.join(output_values)
class EventFormatter(object):
"""Base class to format event values.
Attributes:
custom_helpers (list[str]): identifiers of custom event formatter helpers.
helpers (list[EventFormatterHelper]): event formatter helpers.
"""
# The format string can be defined as:
# {name}, {name:format}, {name!conversion}, {name!conversion:format}
_FORMAT_STRING_ATTRIBUTE_NAME_RE = re.compile(
'{([a-z][a-zA-Z0-9_]*)[!]?[^:}]*[:]?[^}]*}')
def __init__(self, data_type='internal'):
"""Initializes an event formatter.
Args:
data_type (Optional[str]): unique identifier for the event data supported
by the formatter.
"""
super(EventFormatter, self).__init__()
self._data_type = data_type
self._format_string_attribute_names = None
self.custom_helpers = []
self.helpers = []
@property
def data_type(self):
"""str: unique identifier for the event data supported by the formatter."""
return self._data_type.lower()
def _FormatMessage(self, format_string, event_values):
"""Determines the formatted message.
Args:
format_string (str): message format string.
event_values (dict[str, object]): event values.
Returns:
str: formatted message.
"""
try:
message_string = format_string.format(**event_values)
except KeyError as exception:
data_type = event_values.get('data_type', 'N/A')
display_name = event_values.get('display_name', 'N/A')
event_identifier = event_values.get('uuid', 'N/A')
parser_chain = event_values.get('parser', 'N/A')
error_message = (
'unable to format string: "{0:s}" missing required event '
'value: {1!s}').format(format_string, exception)
error_message = (
'Event: {0:s} data type: {1:s} display name: {2:s} '
'parser chain: {3:s} with error: {4:s}').format(
event_identifier, data_type, display_name, parser_chain,
error_message)
logger.error(error_message)
attribute_values = []
for attribute, value in event_values.items():
attribute_values.append('{0:s}: {1!s}'.format(attribute, value))
message_string = ' '.join(attribute_values)
except UnicodeDecodeError as exception:
data_type = event_values.get('data_type', 'N/A')
display_name = event_values.get('display_name', 'N/A')
event_identifier = event_values.get('uuid', 'N/A')
parser_chain = event_values.get('parser', 'N/A')
error_message = 'Unicode decode error: {0!s}'.format(exception)
error_message = (
'Event: {0:s} data type: {1:s} display name: {2:s} '
'parser chain: {3:s} with error: {4:s}').format(
event_identifier, data_type, display_name, parser_chain,
error_message)
logger.error(error_message)
message_string = ''
# Strip carriage return and linefeed form the message strings.
# Using replace function here because it is faster than re.sub() or
# string.strip().
return message_string.replace('\r', '').replace('\n', '')
def FormatEventValues(self, event_values):
"""Formats event values using the helpers.
Args:
event_values (dict[str, object]): event values.
"""
for helper in self.helpers:
helper.FormatEventValues(event_values)
@abc.abstractmethod
def GetFormatStringAttributeNames(self):
"""Retrieves the attribute names in the format string.
Returns:
set(str): attribute names.
"""
# pylint: disable=unused-argument
def AddCustomHelper(
self, identifier, input_attribute=None, output_attribute=None):
"""Adds a custom event formatter helper.
Args:
identifier (str): identifier.
input_attribute (Optional[str]): name of the attribute that contains
the input value.
output_attribute (Optional[str]): name of the attribute where the
output value should be stored.
"""
self.custom_helpers.append(identifier)
def AddHelper(self, helper):
"""Adds an event formatter helper.
Args:
helper (EventFormatterHelper): event formatter helper to add.
"""
self.helpers.append(helper)
@abc.abstractmethod
def GetMessage(self, event_values):
"""Determines the message.
Args:
event_values (dict[str, object]): event values.
Returns:
str: message.
"""
@abc.abstractmethod
def GetMessageShort(self, event_values):
"""Determines the short message.
Args:
event_values (dict[str, object]): event values.
Returns:
str: short message.
"""
class BasicEventFormatter(EventFormatter):
"""Format event values using a message format string.
Attributes:
custom_helpers (list[str]): identifiers of custom event formatter helpers.
helpers (list[EventFormatterHelper]): event formatter helpers.
"""
def __init__(
self, data_type='basic', format_string=None, format_string_short=None):
"""Initializes a basic event formatter.
The syntax of the format strings is similar to that of format() where
the place holder for a certain event object attribute is defined as
{attribute_name}.
Args:
data_type (Optional[str]): unique identifier for the event data supported
by the formatter.
format_string (Optional[str]): (long) message format string.
format_string_short (Optional[str]): short message format string.
"""
super(BasicEventFormatter, self).__init__(data_type=data_type)
self._format_string_attribute_names = None
self._format_string = format_string
self._format_string_short = format_string_short
def GetFormatStringAttributeNames(self):
"""Retrieves the attribute names in the format string.
Returns:
set(str): attribute names.
"""
if self._format_string_attribute_names is None:
self._format_string_attribute_names = (
self._FORMAT_STRING_ATTRIBUTE_NAME_RE.findall(
self._format_string))
return set(self._format_string_attribute_names)
def GetMessage(self, event_values):
"""Determines the message.
Args:
event_values (dict[str, object]): event values.
Returns:
str: message.
"""
return self._FormatMessage(self._format_string, event_values)
def GetMessageShort(self, event_values):
"""Determines the short message.
Args:
event_values (dict[str, object]): event values.
Returns:
str: short message.
"""
if self._format_string_short:
format_string = self._format_string_short
else:
format_string = self._format_string
short_message_string = self._FormatMessage(format_string, event_values)
# Truncate the short message string if necessary.
if len(short_message_string) > 80:
short_message_string = '{0:s}...'.format(short_message_string[:77])
return short_message_string
class ConditionalEventFormatter(EventFormatter):
"""Conditionally format event values using format string pieces."""
_DEFAULT_FORMAT_STRING_SEPARATOR = ' '
def __init__(
self, data_type='conditional', format_string_pieces=None,
format_string_separator=None, format_string_short_pieces=None):
"""Initializes a conditional event formatter.
The syntax of the format strings pieces is similar to of the basic event
formatter (BasicEventFormatter). Every format string piece should contain
at maximum one unique attribute name. Format string pieces without an
attribute name are supported.
Args:
data_type (Optional[str]): unique identifier for the event data supported
by the formatter.
format_string_pieces (Optional[list[str]]): (long) message format string
pieces.
format_string_separator (Optional[str]): string by which separate format
string pieces should be joined.
format_string_short_pieces (Optional[list[str]]): short message format
string pieces.
"""
if format_string_separator is None:
format_string_separator = self._DEFAULT_FORMAT_STRING_SEPARATOR
super(ConditionalEventFormatter, self).__init__(data_type=data_type)
self._format_string_pieces = format_string_pieces or []
self._format_string_pieces_map = []
self._format_string_separator = format_string_separator
self._format_string_short_pieces = format_string_short_pieces or []
self._format_string_short_pieces_map = []
def _CreateFormatStringMap(
self, format_string_pieces, format_string_pieces_map):
"""Creates a format string map.
The format string pieces map is a list containing the attribute name
per format string piece. E.g. ["Description: {description}"] would be
mapped to: [0] = "description". If the string piece does not contain
an attribute name it is treated as text that does not needs formatting.
Args:
format_string_pieces (list[str]): format string pieces.
format_string_pieces_map (list[str]): format string pieces map.
Raises:
RuntimeError: when an invalid format string piece is encountered.
"""
for format_string_piece in format_string_pieces:
attribute_names = self._FORMAT_STRING_ATTRIBUTE_NAME_RE.findall(
format_string_piece)
if len(set(attribute_names)) > 1:
raise RuntimeError((
'Invalid format string piece: [{0:s}] contains more than 1 '
'attribute name.').format(format_string_piece))
if not attribute_names:
# The text format string piece is stored as an empty map entry to keep
# the index in the map equal to the format string pieces.
attribute_name = ''
else:
attribute_name = attribute_names[0]
format_string_pieces_map.append(attribute_name)
def _CreateFormatStringMaps(self):
"""Creates the format string maps.
Maps are built of the string pieces and their corresponding attribute
name to optimize conditional string formatting.
Raises:
RuntimeError: when an invalid format string piece is encountered.
"""
self._format_string_pieces_map = []
self._CreateFormatStringMap(
self._format_string_pieces, self._format_string_pieces_map)
self._format_string_short_pieces_map = []
self._CreateFormatStringMap(
self._format_string_short_pieces, self._format_string_short_pieces_map)
def _ConditionalFormatMessage(
self, format_string_pieces, format_string_pieces_map, event_values):
"""Determines the conditional formatted message.
Args:
format_string_pieces (dict[str, str]): format string pieces.
format_string_pieces_map (list[int, str]): format string pieces map.
event_values (dict[str, object]): event values.
Returns:
str: conditional formatted message.
Raises:
RuntimeError: when an invalid format string piece is encountered.
"""
string_pieces = []
for map_index, attribute_name in enumerate(format_string_pieces_map):
if not attribute_name or event_values.get(
attribute_name, None) is not None:
string_pieces.append(format_string_pieces[map_index])
format_string = self._format_string_separator.join(string_pieces)
return self._FormatMessage(format_string, event_values)
def GetFormatStringAttributeNames(self):
"""Retrieves the attribute names in the format string.
Returns:
set(str): attribute names.
"""
if self._format_string_attribute_names is None:
self._format_string_attribute_names = []
for format_string_piece in self._format_string_pieces:
attribute_names = self._FORMAT_STRING_ATTRIBUTE_NAME_RE.findall(
format_string_piece)
if attribute_names:
self._format_string_attribute_names.extend(attribute_names)
return set(self._format_string_attribute_names)
def GetMessage(self, event_values):
"""Determines the message.
Args:
event_values (dict[str, object]): event values.
Returns:
str: message.
"""
if not self._format_string_pieces_map:
self._CreateFormatStringMaps()
return self._ConditionalFormatMessage(
self._format_string_pieces, self._format_string_pieces_map,
event_values)
def GetMessageShort(self, event_values):
"""Determines the short message.
Args:
event_values (dict[str, object]): event values.
Returns:
str: short message.
"""
if not self._format_string_pieces_map:
self._CreateFormatStringMaps()
if (self._format_string_short_pieces and
self._format_string_short_pieces != ['']):
format_string_pieces = self._format_string_short_pieces
format_string_pieces_map = self._format_string_short_pieces_map
else:
format_string_pieces = self._format_string_pieces
format_string_pieces_map = self._format_string_pieces_map
short_message_string = self._ConditionalFormatMessage(
format_string_pieces, format_string_pieces_map, event_values)
# Truncate the short message string if necessary.
if len(short_message_string) > 80:
short_message_string = '{0:s}...'.format(short_message_string[:77])
return short_message_string
```
#### File: plaso/multi_process/merge_helpers.py
```python
from plaso.containers import analysis_results
from plaso.containers import artifacts
from plaso.containers import event_sources
from plaso.containers import events
from plaso.containers import reports
from plaso.containers import warnings
class BaseTaskMergeHelper(object):
"""Interface of heler for merging task related attribute containers.
Attributes:
task_identifier (str): identifier of the task that is merged.
"""
_CONTAINER_TYPES = ()
def __init__(self, task_storage_reader, task_identifier):
"""Initialize a helper for merging task related attribute containers.
Args:
task_storage_reader (StorageReader): task storage reader.
task_identifier (str): identifier of the task that is merged.
"""
super(BaseTaskMergeHelper, self).__init__()
self._container_identifier_mappings = {}
self._generator = self._GetAttributeContainers(task_storage_reader)
self._task_storage_reader = task_storage_reader
self.fully_merged = False
self.task_identifier = task_identifier
def _GetAttributeContainers(self, task_storage_reader):
"""Retrieves attribute containers to merge.
Args:
task_storage_reader (StorageReader): task storage reader.
Yields:
AttributeContainer: attribute container.
"""
for container_type in self._CONTAINER_TYPES:
for container in task_storage_reader.GetAttributeContainers(
container_type):
yield container
self.fully_merged = True
def Close(self):
"""Closes the task storage reader."""
self._task_storage_reader.Close()
self._task_storage_reader = None
def GetAttributeContainer(self):
"""Retrieves an attribute container to merge.
Returns:
AttributeContainer: attribute container or None if not available.
"""
try:
container = next(self._generator)
except StopIteration:
container = None
return container
def GetAttributeContainerIdentifier(self, lookup_key):
"""Retrieves an attribute container.
Args:
lookup_key (str): lookup key that identifies the attribute container.
Returns:
AttributeContainerIdentifier: attribute container identifier that maps
to the lookup key or None if not available.
"""
return self._container_identifier_mappings.get(lookup_key, None)
def SetAttributeContainerIdentifier(self, lookup_key, identifier):
"""Sets an attribute container.
Args:
lookup_key (str): lookup key that identifies the attribute container.
identifier (AttributeContainerIdentifier): attribute container identifier.
"""
self._container_identifier_mappings[lookup_key] = identifier
class AnalysisTaskMergeHelper(BaseTaskMergeHelper):
"""Assists in merging attribute containers of an analysis task."""
# Container types produced by the analysis worker processes that need to be
# merged. Note that some container types reference other container types and
# therefore container types that are referenced, must be defined before
# container types that reference them.
_CONTAINER_TYPES = (
events.EventTag.CONTAINER_TYPE,
reports.AnalysisReport.CONTAINER_TYPE,
warnings.AnalysisWarning.CONTAINER_TYPE,
analysis_results.BrowserSearchAnalysisResult.CONTAINER_TYPE,
analysis_results.ChromeExtensionAnalysisResult.CONTAINER_TYPE)
class ExtractionTaskMergeHelper(BaseTaskMergeHelper):
"""Assists in merging attribute containers of an extraction task.
Attributes:
event_data_parser_mappings (dict[str, str]): maps event data lookup keys
to parser names.
"""
# Container types produced by the extraction worker processes that need to be
# merged. Note that some container types reference other container types and
# therefore container types that are referenced, must be defined before
# container types that reference them.
_CONTAINER_TYPES = (
event_sources.EventSource.CONTAINER_TYPE,
events.EventDataStream.CONTAINER_TYPE,
events.EventData.CONTAINER_TYPE,
events.EventObject.CONTAINER_TYPE,
warnings.ExtractionWarning.CONTAINER_TYPE,
warnings.RecoveryWarning.CONTAINER_TYPE,
artifacts.WindowsEventLogMessageFileArtifact.CONTAINER_TYPE,
artifacts.WindowsEventLogMessageStringArtifact.CONTAINER_TYPE)
def __init__(self, task_storage_reader, task_identifier):
"""Initialize a helper for merging extraction task attribute containers.
Args:
task_storage_reader (StorageReader): task storage reader.
task_identifier (str): identifier of the task that is merged.
"""
super(ExtractionTaskMergeHelper, self).__init__(
task_storage_reader, task_identifier)
self.event_data_parser_mappings = {}
```
#### File: parsers/plist_plugins/spotlight.py
```python
from dfdatetime import time_elements as dfdatetime_time_elements
from plaso.containers import plist_event
from plaso.containers import time_events
from plaso.lib import definitions
from plaso.parsers import plist
from plaso.parsers.plist_plugins import interface
class SpotlightPlugin(interface.PlistPlugin):
"""Plist parser plugin for Spotlight searched terms plist files.
Further information about extracted fields:
name of the item:
search term.
PATH:
path of the program associated to the term.
LAST_USED:
last time when it was executed.
DISPLAY_NAME:
the display name of the program associated.
"""
NAME = 'spotlight'
DATA_FORMAT = 'Spotlight plist file'
PLIST_PATH_FILTERS = frozenset([
interface.PlistPathFilter('com.apple.spotlight.plist')])
PLIST_KEYS = frozenset(['UserShortcuts'])
# pylint: disable=arguments-differ
def _ParsePlist(self, parser_mediator, match=None, **unused_kwargs):
"""Extracts relevant Spotlight entries.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
match (Optional[dict[str: object]]): keys extracted from PLIST_KEYS.
"""
shortcuts = match.get('UserShortcuts', {})
for search_text, data in shortcuts.items():
datetime_value = data.get('LAST_USED', None)
if not datetime_value:
continue
display_name = data.get('DISPLAY_NAME', '<DISPLAY_NAME>')
path = data.get('PATH', '<PATH>')
event_data = plist_event.PlistTimeEventData()
event_data.desc = (
'Spotlight term searched "{0:s}" associate to {1:s} ({2:s})').format(
search_text, display_name, path)
event_data.key = search_text
event_data.root = '/UserShortcuts'
date_time = dfdatetime_time_elements.TimeElementsInMicroseconds()
date_time.CopyFromDatetime(datetime_value)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
plist.PlistParser.RegisterPlugin(SpotlightPlugin)
```
#### File: parsers/sqlite_plugins/twitter_android.py
```python
from dfdatetime import java_time as dfdatetime_java_time
from plaso.containers import events
from plaso.containers import time_events
from plaso.lib import definitions
from plaso.parsers import sqlite
from plaso.parsers.sqlite_plugins import interface
class TwitterAndroidContactEventData(events.EventData):
"""Twitter on Android contact event data.
Attributes:
description (str): twitter account profile description.
followers (int): number of followers.
friends (int): number of following.
identifier (int): contact row id.
image_url (str): profile picture url.
location (str): twitter account profile location content.
name (str): twitter account name.
query (str): SQL query that was used to obtain the event data.
statuses (int): twitter account number of tweets.
user_identifier (int): twitter account id.
username (str): twitter account handler.
web_url (str): twitter account profile url content.
"""
DATA_TYPE = 'twitter:android:contact'
def __init__(self):
"""Initializes event data."""
super(TwitterAndroidContactEventData,
self).__init__(data_type=self.DATA_TYPE)
self.description = None
self.followers = None
self.friends = None
self.identifier = None
self.image_url = None
self.location = None
self.name = None
self.query = None
self.statuses = None
self.user_identifier = None
self.username = None
self.web_url = None
class TwitterAndroidStatusEventData(events.EventData):
"""Twitter on Android status event data.
Attributes:
author_identifier (int): twitter account identifier.
content (str): status content.
favorited (int): favorited flag as 0/1 value.
identifier (int): status row identifier.
query (str): SQL query that was used to obtain the event data.
retweeted (int): retweeted flag as 0/1 value.
username (str): twitter account handler.
"""
DATA_TYPE = 'twitter:android:status'
def __init__(self):
"""Initializes event data."""
super(TwitterAndroidStatusEventData,
self).__init__(data_type=self.DATA_TYPE)
self.identifier = None
self.author_identifier = None
self.username = None
self.content = None
self.favorited = None
self.query = None
self.retweeted = None
class TwitterAndroidSearchEventData(events.EventData):
"""Twitter on Android search event data.
Attributes:
name (str): twitter name handler.
query (str): SQL query that was used to obtain the event data.
search_query (str): search query.
"""
DATA_TYPE = 'twitter:android:search'
def __init__(self):
"""Initializes event data."""
super(TwitterAndroidSearchEventData,
self).__init__(data_type=self.DATA_TYPE)
self.name = None
self.query = None
self.search_query = None
class TwitterAndroidPlugin(interface.SQLitePlugin):
"""SQLite parser plugin for Twitter on Android database files."""
NAME = 'twitter_android'
DATA_FORMAT = 'Twitter on Android SQLite database file'
REQUIRED_STRUCTURE = {
'search_queries': frozenset([
'name', 'query', 'time']),
'statuses': frozenset([
'_id', 'author_id', 'content', 'created', 'favorited', 'retweeted']),
'users': frozenset([
'username', 'user_id', '_id', 'name', 'profile_created',
'description', 'web_url', 'location', 'followers', 'friends',
'statuses', 'image_url', 'updated', 'friendship_time'])}
QUERIES = [
('SELECT name, query, time FROM search_queries', 'ParseSearchRow'),
(('SELECT statuses._id AS _id, statuses.author_id AS author_id, '
'users.username AS username, statuses.content AS content, '
'statuses.created AS time, statuses.favorited AS favorited, '
'statuses.retweeted AS retweeted FROM statuses LEFT JOIN users ON '
'statuses.author_id = users.user_id'), 'ParseStatusRow'),
(('SELECT _id, user_id, username, name, profile_created, description, '
'web_url, location, followers, friends, statuses, image_url, updated, '
'friendship_time FROM users'), 'ParseContactRow')]
SCHEMAS = [{
'activities': (
'CREATE TABLE activities (_id INTEGER PRIMARY KEY,type INT,event '
'INT,created_at INT,hash INT,max_position INT,min_position '
'INT,sources_size INT,source_type INT,sources BLOB,targets_size '
'INT,target_type INT,targets BLOB,target_objects_size '
'INT,target_object_type INT,target_objects BLOB,is_last INT,tag '
'INT,magic_rec_id INT,UNIQUE (type, max_position) ON CONFLICT '
'REPLACE)'),
'ads_account_permissions': (
'CREATE TABLE ads_account_permissions (_id INTEGER PRIMARY '
'KEY,promotable_users BLOB,last_synced INT NOT NULL)'),
'android_metadata': (
'CREATE TABLE android_metadata (locale TEXT)'),
'business_profiles': (
'CREATE TABLE business_profiles (_id INTEGER PRIMARY KEY,user_id '
'INT UNIQUE NOT NULL,business_profile BLOB,last_synced INT NOT '
'NULL)'),
'card_state': (
'CREATE TABLE card_state (_id INTEGER PRIMARY KEY '
'AUTOINCREMENT,card_status_id INT,card_id INT, card_state BLOB)'),
'category_timestamp': (
'CREATE TABLE category_timestamp (_id INTEGER PRIMARY '
'KEY,cat_status_id INT NOT NULL,cat_tag INT NOT NULL,cat_timestamp '
'INT NOT NULL)'),
'clusters': (
'CREATE TABLE clusters (_id INTEGER PRIMARY KEY,cl_cluster_id TEXT '
'UNIQUE NOT NULL,cl_type INT,cl_title TEXT,cl_subtitle TEXT,cl_size '
'INT,cl_timestamp INT,cl_content BLOB)'),
'conversation_entries': (
'CREATE TABLE conversation_entries (_id INTEGER PRIMARY '
'KEY,entry_id INT UNIQUE NOT NULL,sort_entry_id INT UNIQUE NOT '
'NULL,conversation_id TEXT,user_id INT,created INT,entry_type '
'INT,data BLOB,request_id TEXT)'),
'conversation_participants': (
'CREATE TABLE conversation_participants (_id INTEGER PRIMARY '
'KEY,conversation_id TEXT NOT NULL,user_id TEXT NOT NULL,join_time '
'INT NOT NULL,participant_type INT NOT NULL)'),
'conversations': (
'CREATE TABLE conversations (_id INTEGER PRIMARY '
'KEY,conversation_id TEXT UNIQUE NOT NULL,title TEXT,avatar_url '
'TEXT,type INT,sort_event_id BIGINT,last_readable_event_id '
'BIGINT,last_read_event_id BIGINT,sort_timestamp BIGINT,is_muted '
'INT,min_event_id BIGINT,is_hidden INT,has_more INT,read_only INT)'),
'cursors': (
'CREATE TABLE cursors (_id INTEGER PRIMARY KEY,kind INT,type '
'INT,owner_id INT,ref_id TEXT,next TEXT)'),
'dismiss_info': (
'CREATE TABLE dismiss_info(timeline_id INTEGER REFERENCES '
'timeline(_id),feedback_action_id INTEGER REFERENCES '
'feedback_action(_id),UNIQUE(timeline_id,feedback_action_id))'),
'feedback_action': (
'CREATE TABLE feedback_action(_id INTEGER PRIMARY KEY '
'AUTOINCREMENT,feedback_type TEXT,prompt TEXT,confirmation '
'TEXT,UNIQUE(feedback_type,prompt,confirmation))'),
'list_mapping': (
'CREATE TABLE list_mapping (_id INTEGER PRIMARY '
'KEY,list_mapping_list_id TEXT,list_mapping_type '
'INT,list_mapping_user_id INT,list_is_last INT)'),
'locations': (
'CREATE TABLE locations (_id INTEGER PRIMARY KEY,name TEXT,woeid '
'INT,country TEXT,country_code TEXT)'),
'moments': (
'CREATE TABLE moments (_id INTEGER PRIMARY KEY,title TEXT NOT '
'NULL,can_subscribe INT,is_live INT,is_sensitive '
'INT,subcategory_string TEXT,subcategory_favicon_url '
'TEXT,time_string TEXT,duration_string TEXT,is_subscribed '
'INT,description TEXT NOT NULL,moment_url TEXT,num_subscribers '
'INT,author_info BLOB,promoted_content BLOB)'),
'moments_guide': (
'CREATE TABLE moments_guide (_id INTEGER PRIMARY KEY,moment_id INT '
'NOT NULL,section_id INT NOT NULL,tweet_id INT NOT NULL, crop_data '
'BLOB,media_id INT,media_url TEXT,media_size BLOB,FOREIGN '
'KEY(section_id) REFERENCES moments_sections(_id) ON DELETE '
'CASCADE)'),
'moments_guide_categories': (
'CREATE TABLE moments_guide_categories (_id INTEGER PRIMARY '
'KEY,category_id TEXT NOT NULL,is_default_category INT NOT '
'NULL,category_name TEXT NOT NULL,fetch_timestamp INT NOT NULL)'),
'moments_guide_user_states': (
'CREATE TABLE moments_guide_user_states (_id INTEGER PRIMARY '
'KEY,moment_id INT NOT NULL,is_read INT,is_updated INT,FOREIGN '
'KEY(moment_id) REFERENCES moments(_id) ON DELETE CASCADE)'),
'moments_pages': (
'CREATE TABLE moments_pages (_id INTEGER PRIMARY KEY,moment_id INT '
'NOT NULL,page_id TEXT,type BLOB,tweet_id INT,display_mode '
'BLOB,page_number INT,crop_data BLOB,theme_data BLOB,media_id '
'INT,media_size BLOB,media_url TEXT,last_read_timestamp INT,FOREIGN '
'KEY(moment_id) REFERENCES moments(_id))'),
'moments_sections': (
'CREATE TABLE moments_sections (_id INTEGER PRIMARY '
'KEY,section_title TEXT,section_type BLOB NOT NULL,section_group_id '
'TEXT,section_group_type INT NOT NULL)'),
'moments_visit_badge': (
'CREATE TABLE moments_visit_badge (_id INTEGER PRIMARY '
'KEY,moment_id INT UNIQUE NOT NULL,is_new_since_visit '
'INT,is_updated_since_visit INT)'),
'news': (
'CREATE TABLE news (_id INTEGER PRIMARY KEY AUTOINCREMENT,country '
'TEXT,language TEXT,topic_id INT,news_id TEXT,title TEXT,image_url '
'TEXT,author_name TEXT,article_description TEXT,article_url '
'TEXT,tweet_count INT,start_time INT,news_id_hash INT)'),
'notifications': (
'CREATE TABLE notifications (_id INTEGER PRIMARY KEY,type '
'INT,notif_id INT,source_user_name TEXT,s_name TEXT,s_id '
'INT,notif_txt TEXT,aggregation_data TEXT,notif_extra_data BLOB)'),
'one_click': (
'CREATE TABLE one_click (_id INTEGER PRIMARY KEY,topic '
'TEXT,filter_name TEXT,filter_location TEXT,filter_follow INT)'),
'order_history': (
'CREATE TABLE order_history (_id INTEGER PRIMARY KEY,ordered_at INT '
',order_id INT ,data BLOB)'),
'promoted_retry': (
'CREATE TABLE promoted_retry(impression_id TEXT,event INT NOT '
'NULL,is_earned INT NOT NULL,trend_id INT,num_retries INT NOT '
'NULL,url TEXT,video_playlist_url TEXT,video_content_uuid '
'TEXT,video_content_type TEXT,video_cta_url TEXT,video_cta_app_id '
'TEXT,video_cta_app_name TEXT,card_event TEXT,PRIMARY '
'KEY(impression_id,event,is_earned,trend_id))'),
'prompts': (
'CREATE TABLE prompts (_id INTEGER PRIMARY KEY,p_id INT,p_format '
'TEXT,p_template TEXT,p_header TEXT,p_text TEXT,p_action_text '
'TEXT,p_action_url TEXT,p_icon TEXT,p_background_image_url '
'TEXT,p_persistence TEXT,p_entities BLOB,p_header_entities '
'BLOB,p_status_id LONG,p_insertion_index INT,p_trigger TEXT)'),
'rankings': (
'CREATE TABLE rankings (_id INTEGER PRIMARY KEY '
'AUTOINCREMENT,country TEXT,language TEXT,granularity TEXT,category '
'TEXT,date INT)'),
'search_queries': (
'CREATE TABLE search_queries (_id INTEGER PRIMARY KEY,type INT,name '
'TEXT NOT NULL,query TEXT NOT NULL,query_id INT,time INT,latitude '
'REAL,longitude REAL,radius REAL,location TEXT,pc '
'BLOB,cluster_titles BLOB)'),
'search_results': (
'CREATE TABLE search_results (_id INTEGER PRIMARY KEY,search_id '
'INT,s_type INT,data_type INT,type_id INT,polled INT,data_id '
'INT,related_data BLOB,cluster_id INT)'),
'search_suggestion_metadata': (
'CREATE TABLE search_suggestion_metadata (_id INTEGER PRIMARY '
'KEY,type INT,last_update LONG)'),
'status_groups': (
'CREATE TABLE status_groups (_id INTEGER PRIMARY KEY,tweet_type INT '
'DEFAULT 0,type INT,sender_id INT,owner_id INT,ref_id INT,tag '
'INT,g_status_id INT,is_read INT,page INT,is_last INT,updated_at '
'INT,timeline INT,pc BLOB,g_flags INT,preview_draft_id '
'INT,preview_media BLOB,tweet_pivots BLOB)'),
'status_metadata': (
'CREATE TABLE status_metadata (_id INTEGER PRIMARY KEY,owner_id INT '
'NOT NULL,status_id INT NOT NULL,status_group INT NOT '
'NULL,status_group_tag INT NOT NULL,soc_type INT,soc_name '
'TEXT,soc_second_name TEXT,soc_others_count INT,soc_fav_count '
'INT,soc_rt_count INT,reason_icon_type TEXT,reason_text '
'TEXT,scribe_component TEXT,scribe_data BLOB,highlights TEXT)'),
'statuses': (
'CREATE TABLE statuses (_id INTEGER PRIMARY KEY,status_id INT '
'UNIQUE NOT NULL,author_id INT,content TEXT,source TEXT,created '
'INT,in_r_user_id INT,in_r_status_id INT,favorited INT,latitude '
'TEXT,longitude TEXT,place_data BLOB,entities TEXT,retweet_count '
'INT,r_content TEXT,cards BLOB,flags INT,favorite_count INT,lang '
'TEXT,supplemental_language TEXT,view_count INT,quoted_tweet_data '
'BLOB,quoted_tweet_id INT,retweeted INT)'),
'stories': (
'CREATE TABLE stories ( _id INTEGER PRIMARY KEY,story_id '
'TEXT,story_order INT,story_type INT,story_proof_type '
'INT,story_proof_addl_count INT,data_type INT,data_id '
'INT,story_is_read INT,story_meta_title TEXT,story_meta_subtitle '
'TEXT,story_meta_query TEXT,story_meta_header_img_url '
'TEXT,story_source TEXT,story_impression_info TEXT,story_tag INT)'),
'timeline': (
'CREATE TABLE timeline (_id INTEGER PRIMARY KEY '
'AUTOINCREMENT,owner_id INT,type INT,sort_index INT,entity_id '
'INT,entity_type INT,data_type INT,data_type_group '
'INT,data_type_tag INT,timeline_tag TEXT,timeline_group_id '
'INT,timeline_scribe_group_id INT,data_id INT,data BLOB,flags '
'INT,updated_at INT,data_origin_id TEXT,is_last INT,is_read '
'INT,scribe_content BLOB,timeline_moment_info BLOB,dismissed INT '
'NOT NULL DEFAULT 0,dismiss_actions INT NOT NULL DEFAULT 0)'),
'tokens': (
'CREATE TABLE tokens (_id INTEGER PRIMARY KEY,text TEXT,weight '
'INT,type INT,ref_id INT)'),
'topics': (
'CREATE TABLE topics (_id INTEGER PRIMARY KEY,ev_id TEXT UNIQUE NOT '
'NULL,ev_type INT,ev_query TEXT NOT NULL,ev_seed_hashtag '
'TEXT,ev_title STRING,ev_subtitle STRING,ev_view_url '
'STRING,ev_status STRING,ev_image_url TEXT,ev_explanation '
'TEXT,ev_tweet_count INT,ev_start_time INT,ev_owner_id INT,ev_pc '
'BLOB,ev_content BLOB,ev_hash INT)'),
'user_groups': (
'CREATE TABLE user_groups (_id INTEGER PRIMARY KEY,type INT,tag '
'INT,rank INT,owner_id INT,user_id INT,is_last INT,pc BLOB,g_flags '
'INT)'),
'user_metadata': (
'CREATE TABLE user_metadata (_id INTEGER PRIMARY KEY,owner_id INT '
'NOT NULL,user_id INT NOT NULL,user_group_type INT NOT '
'NULL,user_group_tag INT NOT NULL,soc_type INT,soc_name '
'TEXT,soc_follow_count INT,user_title TEXT,token TEXT)'),
'users': (
'CREATE TABLE users (_id INTEGER PRIMARY KEY,user_id INT UNIQUE NOT '
'NULL,username TEXT,name TEXT,description TEXT,web_url '
'TEXT,bg_color INT,location TEXT,structured_location '
'BLOB,user_flags INT,followers INT,fast_followers INT DEFAULT '
'0,friends INT,statuses INT,profile_created INT,image_url TEXT,hash '
'INT,updated INT,friendship INT,friendship_time INT,favorites INT '
'DEFAULT 0,header_url TEXT,description_entities BLOB,url_entities '
'BLOB,media_count INT,extended_profile_fields BLOB,pinned_tweet_id '
'INT,link_color INT,advertiser_type TEXT,business_profile_state '
'TEXT)')}]
def ParseSearchRow(self, parser_mediator, query, row, **unused_kwargs):
"""Parses a search row from the database.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
query (str): query that created the row.
row (sqlite3.Row): row resulting from query.
"""
query_hash = hash(query)
event_data = TwitterAndroidSearchEventData()
event_data.query = query
event_data.name = self._GetRowValue(query_hash, row, 'name')
event_data.search_query = self._GetRowValue(query_hash, row, 'query')
timestamp = self._GetRowValue(query_hash, row, 'time')
if timestamp:
date_time = dfdatetime_java_time.JavaTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_CREATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
def ParseStatusRow(self, parser_mediator, query, row, **unused_kwargs):
"""Parses a status row from the database.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
query (str): query that created the row.
row (sqlite3.Row): row resulting from query.
"""
query_hash = hash(query)
event_data = TwitterAndroidStatusEventData()
event_data.query = query
event_data.identifier = self._GetRowValue(query_hash, row, '_id')
event_data.author_identifier = self._GetRowValue(
query_hash, row, 'author_id')
event_data.username = self._GetRowValue(query_hash, row, 'username')
event_data.content = self._GetRowValue(query_hash, row, 'content')
event_data.favorited = self._GetRowValue(query_hash, row, 'favorited')
event_data.retweeted = self._GetRowValue(query_hash, row, 'retweeted')
timestamp = self._GetRowValue(query_hash, row, 'time')
if timestamp:
date_time = dfdatetime_java_time.JavaTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_CREATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
def ParseContactRow(self, parser_mediator, query, row, **unused_kwargs):
"""Parses a status row from the database.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
query (str): query that created the row.
row (sqlite3.Row): row resulting from query.
"""
query_hash = hash(query)
event_data = TwitterAndroidContactEventData()
event_data.query = query
event_data.identifier = self._GetRowValue(query_hash, row, '_id')
event_data.user_identifier = self._GetRowValue(query_hash, row, 'user_id')
event_data.username = self._GetRowValue(query_hash, row, 'username')
event_data.name = self._GetRowValue(query_hash, row, 'name')
event_data.description = self._GetRowValue(query_hash, row, 'description')
event_data.web_url = self._GetRowValue(query_hash, row, 'web_url')
event_data.location = self._GetRowValue(query_hash, row, 'location')
event_data.followers = self._GetRowValue(query_hash, row, 'followers')
event_data.friends = self._GetRowValue(query_hash, row, 'friends')
event_data.statuses = self._GetRowValue(query_hash, row, 'statuses')
event_data.image_url = self._GetRowValue(query_hash, row, 'image_url')
timestamp = self._GetRowValue(query_hash, row, 'profile_created')
if timestamp:
date_time = dfdatetime_java_time.JavaTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_CREATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
timestamp = self._GetRowValue(query_hash, row, 'updated')
if timestamp:
date_time = dfdatetime_java_time.JavaTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_UPDATE)
parser_mediator.ProduceEventWithEventData(event, event_data)
timestamp = self._GetRowValue(query_hash, row, 'friendship_time')
if timestamp:
date_time = dfdatetime_java_time.JavaTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_MODIFICATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
sqlite.SQLiteParser.RegisterPlugin(TwitterAndroidPlugin)
```
#### File: parsers/winreg_plugins/amcache.py
```python
import re
from dfdatetime import filetime as dfdatetime_filetime
from dfdatetime import posix_time as dfdatetime_posix_time
from dfdatetime import time_elements as dfdatetime_time_elements
from dfwinreg import errors as dfwinreg_errors
from plaso.containers import events
from plaso.containers import time_events
from plaso.lib import definitions
from plaso.parsers import winreg_parser
from plaso.parsers.winreg_plugins import interface
class AMCacheFileEventData(events.EventData):
"""AMCache file event data.
Attributes:
company_name (str): company name that created product file belongs to.
file_description (str): description of file.
file_reference (str): file system file reference, for example 9-1 (MFT
entry - sequence number).
file_size (int): size of file in bytes.
file_version (str): version of file.
full_path (str): full path of file.
language_code (int): language code of file.
product_name (str): product name file belongs to.
program_identifier (str): GUID of entry under Root/Program key file belongs
to.
sha1 (str): SHA-1 of file.
"""
DATA_TYPE = 'windows:registry:amcache'
def __init__(self):
"""Initializes event data."""
super(AMCacheFileEventData, self).__init__(data_type=self.DATA_TYPE)
self.company_name = None
self.file_description = None
self.file_reference = None
self.file_size = None
self.file_version = None
self.full_path = None
self.language_code = None
self.product_name = None
self.program_identifier = None
self.sha1 = None
class AMCacheProgramEventData(events.EventData):
"""AMCache programs event data.
Attributes:
entry_type (str): type of entry (usually AddRemoveProgram).
file_paths (str): file paths of installed program.
files (str): list of files belonging to program.
language_code (int): language_code of program.
msi_package_code (str): MSI package code of program.
msi_product_code (str): MSI product code of program.
name (str): name of installed program.
package_code (str): package code of program.
product_code (str): product code of program.
publisher (str): publisher of program.
uninstall_key (str): unicode string of uninstall registry key for program.
version (str): version of program.
"""
DATA_TYPE = 'windows:registry:amcache:programs'
def __init__(self):
"""Initializes event data."""
super(AMCacheProgramEventData, self).__init__(data_type=self.DATA_TYPE)
self.entry_type = None
self.file_paths = None
self.files = None
self.language_code = None
self.msi_package_code = None
self.msi_product_code = None
self.name = None
self.package_code = None
self.product_code = None
self.publisher = None
self.uninstall_key = None
self.version = None
class AMCachePlugin(interface.WindowsRegistryPlugin):
"""AMCache.hve Windows Registry plugin."""
NAME = 'amcache'
DATA_FORMAT = 'AMCache (AMCache.hve)'
FILTERS = frozenset([
interface.WindowsRegistryKeyPathFilter('\\Root')])
# Contains: {value name: attribute name}
_APPLICATION_SUB_KEY_VALUES = {
'LowerCaseLongPath': 'full_path',
'ProductName': 'product_name',
'ProductVersion': 'file_version',
'ProgramId': 'program_identifier',
'Publisher': 'company_name',
'Size': 'file_size'}
_FILE_REFERENCE_KEY_VALUES = {
'0': 'product_name',
'1': 'company_name',
'3': 'language_code',
'5': 'file_version',
'6': 'file_size',
'c': 'file_description',
'15': 'full_path',
'100': 'program_identifier',
'101': 'sha1'}
_AMCACHE_LINK_TIME = 'f'
_AMCACHE_FILE_MODIFICATION_TIME = '11'
_AMCACHE_FILE_CREATION_TIME = '12'
_AMCACHE_ENTRY_WRITE_TIME = '17'
_AMCACHE_P_INSTALLATION_TIME = 'a'
_AMCACHE_P_FILES = 'Files'
# Date and time string formatted as: "MM/DD/YYYY hh:mm:ss"
# for example "04/07/2014 15:18:49"
# TODO: determine if this is true for other locales.
_LINK_DATE_TIME_RE = re.compile(
r'([0-9][0-9])/([0-9][0-9])/([0-9][0-9][0-9][0-9]) '
r'([0-9][0-9]):([0-9][0-9]):([0-9][0-9])')
_PRODUCT_KEY_VALUES = {
'0': 'name',
'1': 'version',
'2': 'publisher',
'3': 'language_code',
'6': 'entry_type',
'7': 'uninstall_key',
'd': 'file_paths',
'f': 'product_code',
'10': 'package_code',
'11': 'msi_product_code',
'12': 'msi_package_code'}
def _GetValueDataAsObject(
self, parser_mediator, key_path, value_name, registry_value):
"""Retrieves the value data as an object from a Windows Registry value.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
key_path (str): key path.
value_name (str): name of the value.
registry_value (dfwinreg.WinRegistryValue): Windows Registry value.
Returns:
object: value data or None when the value data cannot be determined.
"""
if registry_value.data is None:
return '(empty)'
try:
value_object = registry_value.GetDataAsObject()
if registry_value.DataIsMultiString():
value_object = list(value_object)
elif (not registry_value.DataIsInteger() and
not registry_value.DataIsString()):
# Represent remaining types like REG_BINARY and
# REG_RESOURCE_REQUIREMENT_LIST.
value_object = registry_value.data
except dfwinreg_errors.WinRegistryValueError as exception:
parser_mediator.ProduceRecoveryWarning((
'Unable to retrieve value data of type: {0:s} as object from '
'value: {1:s} in key: {2:s} with error: {3!s}').format(
registry_value.data_type_string, value_name, key_path, exception))
value_object = None
return value_object
def _ParseApplicationSubKey(self, parser_mediator, application_sub_key):
"""Parses a Root\\InventoryApplicationFile\\%NAME%|%IDENTIFIER% key.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfVFS.
application_sub_key (dfwinreg.WinRegistryKey): application sub key of the
InventoryApplicationFile Windows Registry key.
"""
event_data = AMCacheFileEventData()
for value_name, attribute_name in self._APPLICATION_SUB_KEY_VALUES.items():
value = application_sub_key.GetValueByName(value_name)
if value:
value_data = self._GetValueDataAsObject(
parser_mediator, application_sub_key.path, value_name, value)
setattr(event_data, attribute_name, value_data)
install_date_value = application_sub_key.GetValueByName('InstallDate')
if install_date_value:
date_time = self._ParseDateStringValue(
parser_mediator, application_sub_key.path, install_date_value)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_LINK_TIME)
parser_mediator.ProduceEventWithEventData(event, event_data)
install_date_msi_value = application_sub_key.GetValueByName(
'InstallDateMsi')
if install_date_msi_value:
date_time = self._ParseDateStringValue(
parser_mediator, application_sub_key.path, install_date_msi_value)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_LINK_TIME)
parser_mediator.ProduceEventWithEventData(event, event_data)
link_date_value = application_sub_key.GetValueByName('LinkDate')
if link_date_value:
date_time = self._ParseDateStringValue(
parser_mediator, application_sub_key.path, link_date_value)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_LINK_TIME)
parser_mediator.ProduceEventWithEventData(event, event_data)
def _ParseDateStringValue(self, parser_mediator, key_path, registry_value):
"""Parses a date and time string value.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
key_path (str): key path.
registry_value (dfwinreg.WinRegistryValue): Windows Registry value.
Returns:
dfdatetime_time_elements.TimeElements: date and time value or None
if not available.
"""
if not registry_value.DataIsString():
parser_mediator.ProduceExtractionWarning((
'unsupported {0:s} with value data type: {1:s} in key: '
'{2:s}').format(
registry_value.name, registry_value.data_type_string, key_path))
return None
date_time_string = registry_value.GetDataAsObject()
if not date_time_string:
parser_mediator.ProduceExtractionWarning(
'missing {0:s} value data in key: {1:s}'.format(
registry_value.name, key_path))
return None
re_match = self._LINK_DATE_TIME_RE.match(date_time_string)
if not re_match:
parser_mediator.ProduceExtractionWarning(
'unsupported {0:s} value data: {1!s} in key: {2:s}'.format(
registry_value.name, date_time_string, key_path))
return None
month, day_of_month, year, hours, minutes, seconds= re_match.groups()
try:
year = int(year, 10)
month = int(month, 10)
day_of_month = int(day_of_month, 10)
hours = int(hours, 10)
minutes = int(minutes, 10)
seconds = int(seconds, 10)
except (TypeError, ValueError):
parser_mediator.ProduceExtractionWarning(
'invalid {0:s} date time value: {1!s} in key: {2:s}'.format(
registry_value.name, date_time_string, key_path))
return None
time_elements_tuple = (year, month, day_of_month, hours, minutes, seconds)
try:
date_time = dfdatetime_time_elements.TimeElements(
time_elements_tuple=time_elements_tuple)
except ValueError:
parser_mediator.ProduceExtractionWarning(
'invalid {0:s} date time value: {1!s} in key: {2:s}'.format(
registry_value.name, time_elements_tuple, key_path))
return None
return date_time
def _ParseFileKey(self, parser_mediator, file_key):
"""Parses a Root\\File key.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfVFS.
file_key (dfwinreg.WinRegistryKey): the File Windows Registry key.
"""
for volume_key in file_key.GetSubkeys():
for file_reference_key in volume_key.GetSubkeys():
self._ParseFileReferenceKey(parser_mediator, file_reference_key)
def _ParseFileReferenceKey(self, parser_mediator, file_reference_key):
"""Parses a file reference key (sub key of Root\\File\\%VOLUME%) for events.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfVFS.
file_reference_key (dfwinreg.WinRegistryKey): file reference Windows
Registry key.
"""
event_data = AMCacheFileEventData()
try:
if '0000' in file_reference_key.name:
# A NTFS file is a combination of MFT entry and sequence number.
sequence_number, mft_entry = file_reference_key.name.split('0000')
mft_entry = int(mft_entry, 16)
sequence_number = int(sequence_number, 16)
event_data.file_reference = '{0:d}-{1:d}'.format(
mft_entry, sequence_number)
else:
# A FAT file is a single number.
file_reference = int(file_reference_key.name, 16)
event_data.file_reference = '{0:d}'.format(file_reference)
except (ValueError, TypeError):
pass
for value_name, attribute_name in self._FILE_REFERENCE_KEY_VALUES.items():
value = file_reference_key.GetValueByName(value_name)
if not value:
continue
value_data = self._GetValueDataAsObject(
parser_mediator, file_reference_key.path, value_name, value)
if attribute_name == 'sha1' and value_data.startswith('0000'):
# Strip off the 4 leading zero's from the sha1 hash.
value_data = value_data[4:]
setattr(event_data, attribute_name, value_data)
write_time_value = file_reference_key.GetValueByName(
self._AMCACHE_ENTRY_WRITE_TIME)
if write_time_value:
timestamp = write_time_value.GetDataAsObject()
date_time = dfdatetime_filetime.Filetime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_MODIFICATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
creation_time_value = file_reference_key.GetValueByName(
self._AMCACHE_FILE_CREATION_TIME)
if creation_time_value:
timestamp = creation_time_value.GetDataAsObject()
date_time = dfdatetime_filetime.Filetime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_CREATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
modification_time_value = file_reference_key.GetValueByName(
self._AMCACHE_FILE_MODIFICATION_TIME)
if modification_time_value:
timestamp = modification_time_value.GetDataAsObject()
date_time = dfdatetime_filetime.Filetime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_MODIFICATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
link_time_value = file_reference_key.GetValueByName(self._AMCACHE_LINK_TIME)
if link_time_value:
timestamp = link_time_value.GetDataAsObject()
date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_LINK_TIME)
parser_mediator.ProduceEventWithEventData(event, event_data)
def _ParseInventoryApplicationFileKey(
self, parser_mediator, inventory_application_file_key):
"""Parses a Root\\InventoryApplicationFile key.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfVFS.
inventory_application_file_key (dfwinreg.WinRegistryKey): the
InventoryApplicationFile Windows Registry key.
"""
for application_sub_key in inventory_application_file_key.GetSubkeys():
self._ParseApplicationSubKey(parser_mediator, application_sub_key)
def _ParseProgramKey(self, parser_mediator, program_key):
"""Parses a program key (a sub key of Root\\Programs) for events.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfVFS.
program_key (dfwinreg.WinRegistryKey): program Windows Registry key.
"""
event_data = AMCacheProgramEventData()
for value_name, attribute_name in self._PRODUCT_KEY_VALUES.items():
value = program_key.GetValueByName(value_name)
if value:
value_data = self._GetValueDataAsObject(
parser_mediator, program_key.path, value_name, value)
setattr(event_data, attribute_name, value_data)
installation_time_value = program_key.GetValueByName(
self._AMCACHE_P_INSTALLATION_TIME)
if installation_time_value:
timestamp = installation_time_value.GetDataAsObject()
installation_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
installation_time, definitions.TIME_DESCRIPTION_INSTALLATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
def _ParseProgramsKey(self, parser_mediator, programs_key):
"""Parses a Root\\Programs key.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfVFS.
programs_key (dfwinreg.WinRegistryKey): the Programs Windows Registry key.
"""
for program_key in programs_key.GetSubkeys():
self._ParseProgramKey(parser_mediator, program_key)
def _ParseRootKey(self, parser_mediator, root_key):
"""Parses a Root key.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfVFS.
root_key (dfwinreg.WinRegistryKey): the Root Windows Registry key.
"""
self._ProduceDefaultWindowsRegistryEvent(parser_mediator, root_key)
for sub_key in root_key.GetSubkeys():
self._ParseSubKey(parser_mediator, sub_key)
if sub_key.name == 'File':
self._ParseFileKey(parser_mediator, sub_key)
elif sub_key.name == 'InventoryApplicationFile':
self._ParseInventoryApplicationFileKey(parser_mediator, sub_key)
elif sub_key.name == 'Programs':
self._ParseProgramsKey(parser_mediator, sub_key)
def _ParseSubKey(self, parser_mediator, registry_key):
"""Parses a sub key.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfVFS.
registry_key (dfwinreg.WinRegistryKey): the Windows Registry key.
"""
self._ProduceDefaultWindowsRegistryEvent(parser_mediator, registry_key)
for sub_key in registry_key.GetSubkeys():
self._ParseSubKey(parser_mediator, sub_key)
def ExtractEvents(self, parser_mediator, registry_key, **kwargs):
"""Extracts events from a Windows Registry key.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfVFS.
registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
"""
self._ParseRootKey(parser_mediator, registry_key)
winreg_parser.WinRegistryParser.RegisterPlugin(AMCachePlugin)
```
#### File: storage/fake/writer.py
```python
from plaso.lib import definitions
from plaso.storage import writer
from plaso.storage.fake import fake_store
class FakeStorageWriter(writer.StorageWriter):
"""Fake (in-memory only) storage writer object.
Attributes:
task_completion (TaskCompletion): task completion attribute container.
task_start (TaskStart): task start attribute container.
"""
def __init__(self, storage_type=definitions.STORAGE_TYPE_SESSION):
"""Initializes a storage writer object.
Args:
storage_type (Optional[str]): storage type.
"""
super(FakeStorageWriter, self).__init__(storage_type=storage_type)
self.task_completion = None
self.task_start = None
def GetFirstWrittenEventSource(self):
"""Retrieves the first event source that was written after open.
Using GetFirstWrittenEventSource and GetNextWrittenEventSource newly
added event sources can be retrieved in order of addition.
Returns:
EventSource: event source or None if there are no newly written ones.
Raises:
IOError: when the storage writer is closed.
OSError: when the storage writer is closed.
"""
if not self._store:
raise IOError('Unable to read from closed storage writer.')
event_source = self._store.GetAttributeContainerByIndex(
self._CONTAINER_TYPE_EVENT_SOURCE,
self._first_written_event_source_index)
self._written_event_source_index = (
self._first_written_event_source_index + 1)
return event_source
def GetNextWrittenEventSource(self):
"""Retrieves the next event source that was written after open.
Returns:
EventSource: event source or None if there are no newly written ones.
Raises:
IOError: when the storage writer is closed.
OSError: when the storage writer is closed.
"""
if not self._store:
raise IOError('Unable to read from closed storage writer.')
event_source = self._store.GetAttributeContainerByIndex(
self._CONTAINER_TYPE_EVENT_SOURCE, self._written_event_source_index)
self._written_event_source_index += 1
return event_source
def Open(self, **unused_kwargs):
"""Opens the storage writer.
Raises:
IOError: if the storage writer is already opened.
OSError: if the storage writer is already opened.
"""
if self._store:
raise IOError('Storage writer already opened.')
self._store = fake_store.FakeStore()
self._store.Open()
self._first_written_event_source_index = 0
self._written_event_source_index = 0
```
#### File: plaso/storage/identifiers.py
```python
from plaso.containers import interface as containers_interface
class FakeIdentifier(containers_interface.AttributeContainerIdentifier):
"""Fake attribute container identifier intended for testing.
Attributes:
sequence_number (int): sequence number of the attribute container.
"""
def __init__(self, sequence_number):
"""Initializes a fake attribute container identifier.
Args:
sequence_number (int): sequence number of the attribute container.
"""
super(FakeIdentifier, self).__init__()
self.sequence_number = sequence_number
def CopyToString(self):
"""Copies the identifier to a string representation.
Returns:
str: unique identifier or None.
"""
if self.sequence_number is None:
return None
return '{0:d}'.format(self.sequence_number)
class RedisKeyIdentifier(containers_interface.AttributeContainerIdentifier):
"""Redis key attribute container identifier.
Attributes:
name (str): name of the attribute container.
sequence_number (int): sequence number of the attribute container.
"""
def __init__(self, name, sequence_number):
""""Initializes a Redis key identifier.
Args:
name (str): name of the table.
sequence_number (int): sequence number of the attribute container.
"""
super(RedisKeyIdentifier, self).__init__()
self.name = name
self.sequence_number = sequence_number
def CopyToString(self):
"""Copies the identifier to a string representation.
Returns:
str: unique identifier or None.
"""
if self.name is not None and self.sequence_number is not None:
return '{0:s}.{1:d}'.format(self.name, self.sequence_number)
return None
class SQLTableIdentifier(containers_interface.AttributeContainerIdentifier):
"""SQL table attribute container identifier.
The identifier is used to uniquely identify attribute containers. Where
for example an attribute container is stored as a JSON serialized data in
a SQLite database file.
Attributes:
name (str): name of the table (attribute container).
sequence_number (int): sequence number of the attribute container.
"""
def __init__(self, name, sequence_number):
"""Initializes a SQL table attribute container identifier.
Args:
name (str): name of the table (attribute container).
sequence_number (int): sequence number of the attribute container.
"""
super(SQLTableIdentifier, self).__init__()
self.name = name
self.sequence_number = sequence_number
@property
def row_identifier(self):
"""int: unique identifier of the row in the table."""
return self.sequence_number
def CopyToString(self):
"""Copies the identifier to a string representation.
Returns:
str: unique identifier or None.
"""
if self.name is not None and self.sequence_number is not None:
return '{0:s}.{1:d}'.format(self.name, self.sequence_number)
return None
```
#### File: cli/helpers/viper_analysis.py
```python
import argparse
import unittest
from plaso.analysis import viper
from plaso.lib import errors
from plaso.cli.helpers import viper_analysis
from tests.cli import test_lib as cli_test_lib
from tests.cli.helpers import test_lib
class ViperAnalysisArgumentsHelperTest(
test_lib.AnalysisPluginArgumentsHelperTest):
"""Tests the Viper analysis plugin CLI arguments helper."""
# pylint: disable=no-member,protected-access
_EXPECTED_OUTPUT = """\
usage: cli_helper.py [--viper-hash HASH] [--viper-host HOST]
[--viper-port PORT] [--viper-protocol PROTOCOL]
Test argument parser.
{0:s}:
--viper-hash HASH, --viper_hash HASH
Type of hash to use to query the Viper server, the
default is: sha256. Supported options: md5, sha256
--viper-host HOST, --viper_host HOST
Hostname of the Viper server to query, the default is:
localhost
--viper-port PORT, --viper_port PORT
Port of the Viper server to query, the default is:
8080.
--viper-protocol PROTOCOL, --viper_protocol PROTOCOL
Protocol to use to query Viper, the default is: http.
Supported options: http, https
""".format(cli_test_lib.ARGPARSE_OPTIONS)
def testAddArguments(self):
"""Tests the AddArguments function."""
argument_parser = argparse.ArgumentParser(
prog='cli_helper.py',
description='Test argument parser.', add_help=False,
formatter_class=cli_test_lib.SortedArgumentsHelpFormatter)
viper_analysis.ViperAnalysisArgumentsHelper.AddArguments(
argument_parser)
output = self._RunArgparseFormatHelp(argument_parser)
self.assertEqual(output, self._EXPECTED_OUTPUT)
def testParseOptions(self):
"""Tests the ParseOptions function."""
options = cli_test_lib.TestOptions()
analysis_plugin = viper.ViperAnalysisPlugin()
with self.assertRaises(errors.BadConfigOption):
viper_analysis.ViperAnalysisArgumentsHelper.ParseOptions(
options, analysis_plugin)
with self.assertRaises(errors.BadConfigObject):
viper_analysis.ViperAnalysisArgumentsHelper.ParseOptions(
options, None)
if __name__ == '__main__':
unittest.main()
```
#### File: tests/containers/counts.py
```python
import unittest
from plaso.containers import counts
from tests import test_lib as shared_test_lib
class EventLabelCountTest(shared_test_lib.BaseTestCase):
"""Tests for the event label count attribute container."""
def testGetAttributeNames(self):
"""Tests the GetAttributeNames function."""
attribute_container = counts.EventLabelCount()
expected_attribute_names = [
'label',
'number_of_events']
attribute_names = sorted(attribute_container.GetAttributeNames())
self.assertEqual(attribute_names, expected_attribute_names)
class ParserCountTest(shared_test_lib.BaseTestCase):
"""Tests for the parser count attribute container."""
def testGetAttributeNames(self):
"""Tests the GetAttributeNames function."""
attribute_container = counts.ParserCount()
expected_attribute_names = [
'name',
'number_of_events']
attribute_names = sorted(attribute_container.GetAttributeNames())
self.assertEqual(attribute_names, expected_attribute_names)
if __name__ == '__main__':
unittest.main()
```
#### File: tests/helpers/language_tags.py
```python
import unittest
from plaso.helpers import language_tags
from tests import test_lib as shared_test_lib
class LanguageTagHelperTest(shared_test_lib.BaseTestCase):
"""Tests for the language tags helper."""
def testGetLanguages(self):
"""Tests the GetLanguages function."""
languages = dict(language_tags.LanguageTagHelper.GetLanguages())
self.assertIn('is-IS', languages)
self.assertEqual(languages['is-IS'], 'Icelandic, Iceland')
def testIsLanguageTag(self):
"""Tests the IsLanguageTag function."""
result = language_tags.LanguageTagHelper.IsLanguageTag('is-IS')
self.assertTrue(result)
result = language_tags.LanguageTagHelper.IsLanguageTag('bogus')
self.assertFalse(result)
if __name__ == '__main__':
unittest.main()
```
#### File: tests/multi_process/zeromq_queue.py
```python
import unittest
from plaso.lib import errors
from plaso.multi_process import zeromq_queue
from tests import test_lib as shared_test_lib
class ZeroMQPullBindQueue(zeromq_queue.ZeroMQPullQueue):
"""A Plaso queue backed by a ZeroMQ PULL socket that binds to a port.
This queue may only be used to pop items, not to push.
"""
SOCKET_CONNECTION_TYPE = zeromq_queue.ZeroMQQueue.SOCKET_CONNECTION_BIND
class ZeroMQPushConnectQueue(zeromq_queue.ZeroMQPushQueue):
"""A Plaso queue backed by a ZeroMQ PUSH socket that connects to a port.
This queue may only be used to push items, not to pop.
"""
SOCKET_CONNECTION_TYPE = zeromq_queue.ZeroMQQueue.SOCKET_CONNECTION_CONNECT
class ZeroMQRequestBindQueue(zeromq_queue.ZeroMQRequestQueue):
"""A Plaso queue backed by a ZeroMQ REQ socket that binds to a port.
This queue may only be used to pop items, not to push.
"""
SOCKET_CONNECTION_TYPE = zeromq_queue.ZeroMQQueue.SOCKET_CONNECTION_BIND
class ZeroMQBufferedReplyConnectQueue(zeromq_queue.ZeroMQBufferedReplyQueue):
"""A Plaso queue backed by a ZeroMQ REP socket that connects to a port.
This queue may only be used to pop items, not to push.
"""
SOCKET_CONNECTION_TYPE = zeromq_queue.ZeroMQQueue.SOCKET_CONNECTION_CONNECT
class ZeroMQQueuesTest(shared_test_lib.BaseTestCase):
"""Tests for ZeroMQ queues."""
# pylint: disable=protected-access
_QUEUE_CLASSES = frozenset([
zeromq_queue.ZeroMQPushBindQueue, ZeroMQPullBindQueue,
ZeroMQRequestBindQueue])
def _testItemTransferred(self, push_queue, pop_queue):
"""Tests than item can be transferred between two queues."""
item = 'This is an item going from {0:s} to {1:s}.'.format(
push_queue.name, pop_queue.name)
push_queue.PushItem(item)
popped_item = pop_queue.PopItem()
self.assertEqual(item, popped_item)
def testBufferedReplyQueue(self):
"""Tests for the buffered reply queue."""
test_queue = zeromq_queue.ZeroMQBufferedReplyBindQueue(
name='bufferedreply_bind', delay_open=False, linger_seconds=1)
test_queue.PushItem('This is a test item.')
test_queue.Close(abort=True)
with self.assertRaises(errors.QueueAlreadyClosed):
test_queue.PushItem('This shouldn\'t work')
def testPushPullQueues(self):
"""Tests than an item can be transferred between push and pull queues."""
push_queue = zeromq_queue.ZeroMQPushBindQueue(
name='pushpull_pushbind', delay_open=False, linger_seconds=1)
pull_queue = zeromq_queue.ZeroMQPullConnectQueue(
name='pushpull_pullconnect', delay_open=False, port=push_queue.port,
linger_seconds=1)
self._testItemTransferred(push_queue, pull_queue)
push_queue.Close()
pull_queue.Close()
pull_queue = ZeroMQPullBindQueue(
name='pushpull_pullbind', delay_open=False, linger_seconds=1)
push_queue = ZeroMQPushConnectQueue(
name='pushpull_pushconnect', delay_open=False, port=pull_queue.port,
linger_seconds=1)
self._testItemTransferred(push_queue, pull_queue)
push_queue.Close()
pull_queue.Close()
def testQueueStart(self):
"""Tests that delayed creation of ZeroMQ sockets occurs correctly."""
for queue_class in self._QUEUE_CLASSES:
queue_name = 'queuestart_{0:s}'.format(queue_class.__name__)
test_queue = queue_class(
name=queue_name, delay_open=True, linger_seconds=1)
message = '{0:s} socket already exists.'.format(queue_name)
self.assertIsNone(test_queue._zmq_socket, message)
test_queue.Open()
self.assertIsNotNone(test_queue._zmq_socket)
test_queue.Close()
def testRequestAndBufferedReplyQueues(self):
"""Tests REQ and buffered REP queue pairs."""
reply_queue = zeromq_queue.ZeroMQBufferedReplyBindQueue(
name='requestbufferedreply_replybind', delay_open=False,
linger_seconds=1)
request_queue = zeromq_queue.ZeroMQRequestConnectQueue(
name='requestbufferedreply_requestconnect', delay_open=False,
port=reply_queue.port, linger_seconds=1)
self._testItemTransferred(reply_queue, request_queue)
reply_queue.Close()
request_queue.Close()
request_queue = ZeroMQRequestBindQueue(
name='requestbufferedreply_requestbind', delay_open=False,
linger_seconds=1)
reply_queue = ZeroMQBufferedReplyConnectQueue(
name='requestbufferedreply_replyconnect', delay_open=False,
port=request_queue.port, linger_seconds=0)
self._testItemTransferred(reply_queue, request_queue)
reply_queue.Close()
request_queue.Close()
def testEmptyBufferedQueues(self):
"""Tests the Empty method for buffered queues."""
queue = zeromq_queue.ZeroMQBufferedReplyBindQueue(
name='requestbufferedreply_replybind', delay_open=False,
linger_seconds=1, buffer_max_size=3, timeout_seconds=2,
buffer_timeout_seconds=1)
try:
while True:
queue.PushItem('item', block=False)
except errors.QueueFull:
# Queue is now full
pass
with self.assertRaises(errors.QueueFull):
queue.PushItem('item', block=False)
queue.Empty()
# We should now be able to push another item without an exception.
queue.PushItem('item')
queue.Empty()
queue.Close()
def testSocketCreation(self):
"""Tests that ZeroMQ sockets are created when a new queue is created."""
for queue_class in self._QUEUE_CLASSES:
queue_name = 'socket_creation_{0:s}'.format(queue_class.__name__)
test_queue = queue_class(
name=queue_name, delay_open=False, linger_seconds=1)
self.assertIsNotNone(test_queue._zmq_socket)
test_queue.Close()
if __name__ == '__main__':
unittest.main()
```
#### File: tests/output/shared_json.py
```python
import os
import sys
import unittest
from dfvfs.lib import definitions as dfvfs_definitions
from dfvfs.path import factory as path_spec_factory
from plaso.lib import definitions
from plaso.output import shared_json
from tests import test_lib as shared_test_lib
from tests.containers import test_lib as containers_test_lib
from tests.output import test_lib
class JSONEventFormattingHelperTest(test_lib.OutputModuleTestCase):
"""Tests the JSON output module event formatting helper."""
# pylint: disable=protected-access
_OS_PATH_SPEC = path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_OS, location='{0:s}{1:s}'.format(
os.path.sep, os.path.join('cases', 'image.dd')))
_TEST_EVENTS = [
{'data_type': 'test:event',
'hostname': 'ubuntu',
'path_spec': path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_TSK, inode=15,
location='/var/log/syslog.1', parent=_OS_PATH_SPEC),
'text': (
'Reporter <CRON> PID: |8442| (pam_unix(cron:session): session\n '
'closed for user root)'),
'timestamp': '2012-06-27 18:17:01',
'timestamp_desc': definitions.TIME_DESCRIPTION_UNKNOWN,
'username': 'root'}]
def testWriteSerializedDict(self):
"""Tests the _WriteSerializedDict function."""
output_mediator = self._CreateOutputMediator()
formatters_directory_path = self._GetTestFilePath(['formatters'])
output_mediator.ReadMessageFormattersFromDirectory(
formatters_directory_path)
formatting_helper = shared_json.JSONEventFormattingHelper(output_mediator)
event, event_data, event_data_stream = (
containers_test_lib.CreateEventFromValues(self._TEST_EVENTS[0]))
expected_timestamp = shared_test_lib.CopyTimestampFromString(
'2012-06-27 18:17:01')
if sys.platform.startswith('win'):
# The dict comparison is very picky on Windows hence we
# have to make sure the drive letter is in the same case.
expected_os_location = os.path.abspath('\\{0:s}'.format(
os.path.join('cases', 'image.dd')))
else:
expected_os_location = '{0:s}{1:s}'.format(
os.path.sep, os.path.join('cases', 'image.dd'))
expected_json_dict = {
'__container_type__': 'event',
'__type__': 'AttributeContainer',
'date_time': {
'__class_name__': 'PosixTimeInMicroseconds',
'__type__': 'DateTimeValues',
'timestamp': 1340821021000000,
},
'data_type': 'test:event',
'display_name': 'TSK:/var/log/syslog.1',
'filename': '/var/log/syslog.1',
'hostname': 'ubuntu',
'inode': '15',
'message': (
'Reporter <CRON> PID: |8442| (pam_unix(cron:session): session '
'closed for user root)'),
'pathspec': {
'__type__': 'PathSpec',
'type_indicator': 'TSK',
'location': '/var/log/syslog.1',
'inode': 15,
'parent': {
'__type__': 'PathSpec',
'type_indicator': 'OS',
'location': expected_os_location,
}
},
'text': (
'Reporter <CRON> PID: |8442| (pam_unix(cron:session): '
'session\n closed for user root)'),
'timestamp': expected_timestamp,
'timestamp_desc': definitions.TIME_DESCRIPTION_UNKNOWN,
'username': 'root',
}
json_dict = formatting_helper._WriteSerializedDict(
event, event_data, event_data_stream, None)
self.assertEqual(json_dict, expected_json_dict)
def testGetFormattedEvent(self):
"""Tests the GetFormattedEvent function."""
output_mediator = self._CreateOutputMediator()
formatters_directory_path = self._GetTestFilePath(['formatters'])
output_mediator.ReadMessageFormattersFromDirectory(
formatters_directory_path)
formatting_helper = shared_json.JSONEventFormattingHelper(output_mediator)
event, event_data, event_data_stream = (
containers_test_lib.CreateEventFromValues(self._TEST_EVENTS[0]))
if sys.platform.startswith('win'):
# The dict comparison is very picky on Windows hence we
# have to make sure the drive letter is in the same case.
expected_os_location = os.path.abspath('\\{0:s}'.format(
os.path.join('cases', 'image.dd')))
expected_os_location = expected_os_location.replace('\\', '\\\\')
else:
expected_os_location = '{0:s}{1:s}'.format(
os.path.sep, os.path.join('cases', 'image.dd'))
expected_json_string = (
'{{"__container_type__": "event", "__type__": "AttributeContainer", '
'"data_type": "test:event", "date_time": {{"__class_name__": '
'"PosixTimeInMicroseconds", "__type__": "DateTimeValues", "timestamp": '
'1340821021000000}}, "display_name": "TSK:/var/log/syslog.1", '
'"filename": "/var/log/syslog.1", "hostname": "ubuntu", "inode": '
'"15", "message": "Reporter <CRON> PID: |8442| '
'(pam_unix(cron:session): session closed for user root)", "pathspec": '
'{{"__type__": "PathSpec", "inode": 15, "location": '
'"/var/log/syslog.1", "parent": {{"__type__": "PathSpec", "location": '
'"{0:s}", "type_indicator": "OS"}}, "type_indicator": "TSK"}}, "text": '
'"Reporter <CRON> PID: |8442| (pam_unix(cron:session): session\\n '
'closed for user root)", "timestamp": 1340821021000000, '
'"timestamp_desc": "Unknown Time", "username": "root"}}').format(
expected_os_location)
json_string = formatting_helper.GetFormattedEvent(
event, event_data, event_data_stream, None)
self.assertEqual(json_string, expected_json_string)
if __name__ == '__main__':
unittest.main()
```
#### File: tests/parsers/bencode_parser.py
```python
import unittest
from plaso.parsers import bencode_parser
from plaso.parsers import bencode_plugins # pylint: disable=unused-import
from tests.parsers import test_lib
class BencodeTest(test_lib.ParserTestCase):
"""Tests for the Bencode file parser."""
# pylint: disable=protected-access
def testEnablePlugins(self):
"""Tests the EnablePlugins function."""
parser = bencode_parser.BencodeParser()
number_of_plugins = len(parser._plugin_classes)
parser.EnablePlugins([])
self.assertEqual(len(parser._plugins), 0)
parser.EnablePlugins(parser.ALL_PLUGINS)
self.assertEqual(len(parser._plugins), number_of_plugins)
parser.EnablePlugins(['bencode_transmission'])
self.assertEqual(len(parser._plugins), 1)
if __name__ == '__main__':
unittest.main()
```
#### File: parsers/cookie_plugins/ganalytics.py
```python
import unittest
from plaso.lib import definitions
from plaso.parsers.cookie_plugins import ganalytics # pylint: disable=unused-import
from plaso.parsers.sqlite_plugins import chrome_cookies
from plaso.parsers.sqlite_plugins import firefox_cookies
from tests.parsers.sqlite_plugins import test_lib as sqlite_plugins_test_lib
class GoogleAnalyticsPluginTest(sqlite_plugins_test_lib.SQLitePluginTestCase):
"""Tests for the Google Analytics plugin."""
def testParsingFirefox29CookieDatabase(self):
"""Tests the Process function on a Firefox 29 cookie database file."""
plugin = firefox_cookies.FirefoxCookiePlugin()
storage_writer = self._ParseDatabaseFileWithPlugin(
['firefox_cookies.sqlite'], plugin)
number_of_events = storage_writer.GetNumberOfAttributeContainers('event')
self.assertEqual(number_of_events, 295)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'extraction_warning')
self.assertEqual(number_of_warnings, 0)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'recovery_warning')
self.assertEqual(number_of_warnings, 0)
cookie_events = []
for event in storage_writer.GetEvents():
event_data = self._GetEventDataOfEvent(storage_writer, event)
if event_data.data_type.startswith('cookie:google:analytics'):
cookie_events.append(event)
self.assertEqual(len(cookie_events), 25)
expected_event_values = {
'cookie_name': '__utmz',
'data_type': 'cookie:google:analytics:utmz',
'date_time': '2013-10-30 21:56:06',
'domain_hash': '137167072',
'sessions': 1,
'sources': 1,
'url': 'http://ads.aha.is/',
'utmccn': '(referral)',
'utmcct': (
'/frettir/erlent/2013/10/30/maelt_med_kerfisbundnum_hydingum/'),
'utmcmd': 'referral',
'utmcsr': 'mbl.is'}
self.CheckEventValues(
storage_writer, cookie_events[14], expected_event_values)
def testParsingChromeCookieDatabase(self):
"""Test the process function on a Chrome cookie database."""
plugin = chrome_cookies.Chrome17CookiePlugin()
storage_writer = self._ParseDatabaseFileWithPlugin(['cookies.db'], plugin)
number_of_events = storage_writer.GetNumberOfAttributeContainers('event')
self.assertEqual(number_of_events, 1755)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'extraction_warning')
self.assertEqual(number_of_warnings, 0)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'recovery_warning')
self.assertEqual(number_of_warnings, 0)
cookie_events = []
for event in storage_writer.GetEvents():
event_data = self._GetEventDataOfEvent(storage_writer, event)
if event_data.data_type.startswith('cookie:google:analytics'):
cookie_events.append(event)
# There are 75 events created by the Google Analytics plugin.
self.assertEqual(len(cookie_events), 75)
# Check few "random" events to verify.
# Check an UTMZ Google Analytics event.
expected_event_values = {
'cookie_name': '__utmz',
'data_type': 'cookie:google:analytics:utmz',
'domain_hash': '68898382',
'sessions': 1,
'sources': 1,
'url': 'http://imdb.com/',
'utmccn': '(organic)',
'utmctr': 'enders game',
'utmcmd': 'organic',
'utmcsr': 'google'}
self.CheckEventValues(
storage_writer, cookie_events[39], expected_event_values)
# Check the UTMA Google Analytics event.
expected_event_values = {
'cookie_name': '__utma',
'data_type': 'cookie:google:analytics:utma',
'date_time': '2012-03-22 01:55:29',
'domain_hash': '151488169',
'sessions': 2,
'timestamp_desc': 'Analytics Previous Time',
'url': 'http://assets.tumblr.com/',
'visitor_id': '1827102436'}
self.CheckEventValues(
storage_writer, cookie_events[41], expected_event_values)
# Check the UTMB Google Analytics event.
expected_event_values = {
'cookie_name': '__utmb',
'data_type': 'cookie:google:analytics:utmb',
'date_time': '2012-03-22 01:48:30',
'domain_hash': '154523900',
'pages_viewed': 1,
'timestamp_desc': definitions.TIME_DESCRIPTION_LAST_VISITED,
'url': 'http://upressonline.com/'}
self.CheckEventValues(
storage_writer, cookie_events[34], expected_event_values)
if __name__ == '__main__':
unittest.main()
```
#### File: parsers/esedb_plugins/srum.py
```python
import collections
import unittest
from plaso.lib import definitions
from plaso.parsers.esedb_plugins import srum
from tests.parsers.esedb_plugins import test_lib
class SystemResourceUsageMonitorESEDBPluginTest(test_lib.ESEDBPluginTestCase):
"""Tests for the System Resource Usage Monitor (SRUM) ESE database plugin."""
def testProcess(self):
"""Tests the Process function."""
plugin = srum.SystemResourceUsageMonitorESEDBPlugin()
storage_writer = self._ParseESEDBFileWithPlugin(['SRUDB.dat'], plugin)
# TODO: confirm this is working as intended. Also see: #2134
number_of_events = storage_writer.GetNumberOfAttributeContainers('event')
self.assertEqual(number_of_events, 18543)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'extraction_warning')
self.assertEqual(number_of_warnings, 2)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'recovery_warning')
self.assertEqual(number_of_warnings, 0)
events = list(storage_writer.GetSortedEvents())
data_types = collections.Counter()
for event in events:
event_data = self._GetEventDataOfEvent(storage_writer, event)
data_types[event_data.data_type] += 1
self.assertEqual(len(data_types.keys()), 3)
self.assertEqual(data_types['windows:srum:application_usage'], 16183)
self.assertEqual(data_types['windows:srum:network_connectivity'], 520)
self.assertEqual(data_types['windows:srum:network_usage'], 1840)
# Test event with data type windows:srum:application_usage
expected_event_values = {
'application': 'Memory Compression',
'data_type': 'windows:srum:application_usage',
'date_time': '2017-11-05 11:32:00.000000',
'identifier': 22167,
'timestamp_desc': definitions.TIME_DESCRIPTION_SAMPLE}
self.CheckEventValues(storage_writer, events[92], expected_event_values)
# Test event with data type windows:srum:network_connectivity
expected_event_values = {
'application': 1,
'data_type': 'windows:srum:network_connectivity',
'date_time': '2017-11-05 10:30:48.1679714',
'identifier': 501,
'timestamp_desc': definitions.TIME_DESCRIPTION_FIRST_CONNECTED}
self.CheckEventValues(storage_writer, events[2], expected_event_values)
# Test event with data type windows:srum:network_usage
expected_event_values = {
'application': 'DiagTrack',
'bytes_sent': 2076,
'data_type': 'windows:srum:network_usage',
'date_time': '2017-11-05 11:32:00.000000',
'identifier': 3495,
'interface_luid': 1689399632855040,
'timestamp_desc': definitions.TIME_DESCRIPTION_SAMPLE,
'user_identifier': 'S-1-5-18'}
self.CheckEventValues(storage_writer, events[8], expected_event_values)
if __name__ == '__main__':
unittest.main()
```
#### File: tests/parsers/msiecf.py
```python
import unittest
from plaso.lib import definitions
from plaso.parsers import msiecf
from tests.parsers import test_lib
class MSIECFParserTest(test_lib.ParserTestCase):
"""Tests for the MSIE Cache Files (MSIECF) parser."""
def testParse(self):
"""Tests the Parse function."""
parser = msiecf.MSIECFParser()
storage_writer = self._ParseFile(['index.dat'], parser)
# MSIE Cache File information:
# Version : 5.2
# File size : 32768 bytes
# Number of items : 7
# Number of recovered items : 11
# 7 + 11 records, each with 4 records.
number_of_events = storage_writer.GetNumberOfAttributeContainers('event')
self.assertEqual(number_of_events, (7 + 11) * 4)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'extraction_warning')
self.assertEqual(number_of_warnings, 0)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'recovery_warning')
self.assertEqual(number_of_warnings, 0)
events = list(storage_writer.GetEvents())
# Record type : URL
# Offset range : 21376 - 21632 (256)
# Location : Visited: testing@http://www.trafficfusionx.com
# /download/tfscrn2/funnycats.exe
# Primary time : Jun 23, 2011 18:02:10.066000000
# Secondary time : Jun 23, 2011 18:02:10.066000000
# Expiration time : Jun 29, 2011 17:55:02
# Last checked time : Jun 23, 2011 18:02:12
# Cache directory index : -2 (0xfe)
expected_event_values = {
'cache_directory_index': -2,
'cached_file_size': 0,
'data_type': 'msiecf:url',
'date_time': '2011-06-23 18:02:10.0660000',
'number_of_hits': 6,
'offset': 21376,
'timestamp_desc': definitions.TIME_DESCRIPTION_LAST_VISITED,
'url': (
'Visited: testing@http://www.trafficfusionx.com/download/tfscrn2'
'/funnycats.exe')}
self.CheckEventValues(storage_writer, events[8], expected_event_values)
expected_event_values = {
'data_type': 'msiecf:url',
'date_time': '2011-06-23 18:02:10.0660000',
'timestamp_desc': definitions.TIME_DESCRIPTION_LAST_VISITED}
self.CheckEventValues(storage_writer, events[9], expected_event_values)
expected_event_values = {
'data_type': 'msiecf:url',
'date_time': '2011-06-29 17:55:02',
'timestamp_desc': definitions.TIME_DESCRIPTION_EXPIRATION}
self.CheckEventValues(storage_writer, events[10], expected_event_values)
expected_event_values = {
'data_type': 'msiecf:url',
'date_time': '2011-06-23 18:02:12',
'timestamp_desc': definitions.TIME_DESCRIPTION_LAST_CHECKED}
self.CheckEventValues(storage_writer, events[11], expected_event_values)
def testParseLeakAndRedirect(self):
"""Tests the Parse function with leak and redirected records."""
parser = msiecf.MSIECFParser()
storage_writer = self._ParseFile(['nfury_index.dat'], parser)
# MSIE Cache File information:
# Version : 5.2
# File size : 491520 bytes
# Number of items : 1027
# Number of recovered items : 8
number_of_events = storage_writer.GetNumberOfAttributeContainers('event')
self.assertEqual(number_of_events, 2898)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'extraction_warning')
self.assertEqual(number_of_warnings, 0)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'recovery_warning')
self.assertEqual(number_of_warnings, 0)
events = list(storage_writer.GetEvents())
expected_event_values = {
'cache_directory_index': 0,
'cache_directory_name': 'R6QWCVX4',
'cached_file_size': 4286,
'cached_filename': 'favicon[1].ico',
'data_type': 'msiecf:url',
'date_time': '2010-11-10 07:54:32',
'http_headers': (
'HTTP/1.1 200 OK\r\n'
'Content-Type: image/x-icon\r\n'
'ETag: "0922651f38cb1:0",\r\n'
'X-Powered-By: ASP.NET\r\n'
'P3P: CP="BUS CUR CONo FIN IVDo ONL OUR PHY SAMo TELo"\r\n'
'Content-Length: 4286\r\n'
'\r\n'
'~U:nfury\r\n'),
'number_of_hits': 1,
'timestamp_desc': definitions.TIME_DESCRIPTION_LAST_CHECKED,
'url': 'http://col.stc.s-msn.com/br/gbl/lg/csl/favicon.ico'}
self.CheckEventValues(storage_writer, events[3], expected_event_values)
expected_event_values = {
'cache_directory_index': 1,
'cache_directory_name': 'VUQHQA73',
'cached_file_size': 1966,
'cached_filename': 'ADSAdClient31[1].htm',
'data_type': 'msiecf:leak',
'date_time': 'Not set',
'recovered': False,
'timestamp_desc': definitions.TIME_DESCRIPTION_NOT_A_TIME}
self.CheckEventValues(storage_writer, events[16], expected_event_values)
expected_event_values = {
'data_type': 'msiecf:redirected',
'date_time': 'Not set',
'recovered': False,
'timestamp_desc': definitions.TIME_DESCRIPTION_NOT_A_TIME,
'url': (
'http://ad.doubleclick.net/ad/N2724.Meebo/B5343067.13;'
'sz=1x1;pc=[TPAS_ID];ord=2642102')}
self.CheckEventValues(storage_writer, events[21], expected_event_values)
def testParseWithTimeZone(self):
"""Tests the Parse function with a time zone."""
parser = msiecf.MSIECFParser()
storage_writer = self._ParseFile(
['MSHist012013031020130311-index.dat'], parser,
timezone='Europe/Amsterdam')
number_of_events = storage_writer.GetNumberOfAttributeContainers('event')
self.assertEqual(number_of_events, 83)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'extraction_warning')
self.assertEqual(number_of_warnings, 0)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'recovery_warning')
self.assertEqual(number_of_warnings, 0)
events = list(storage_writer.GetEvents())
# Test primary last visited time, in UTC, event.
expected_event_values = {
'date_time': '2013-03-10 10:18:17.2810000',
'timestamp': '2013-03-10 10:18:17.281000',
'timestamp_desc': definitions.TIME_DESCRIPTION_LAST_VISITED,
'url': ':2013031020130311: -@:Host: libmsiecf.googlecode.com'}
self.CheckEventValues(storage_writer, events[80], expected_event_values)
# Test secondary last visited time, in local time, event.
expected_event_values = {
'date_time': '2013-03-10 11:18:17.2810000',
'timestamp': '2013-03-10 10:18:17.281000',
'timestamp_desc': definitions.TIME_DESCRIPTION_LAST_VISITED,
'url': ':2013031020130311: -@:Host: libmsiecf.googlecode.com'}
self.CheckEventValues(storage_writer, events[81], expected_event_values)
# Test last checked time event.
expected_event_values = {
'date_time': '2013-03-10 10:18:18',
'timestamp': '2013-03-10 10:18:18.000000',
'timestamp_desc': definitions.TIME_DESCRIPTION_LAST_CHECKED,
'url': ':2013031020130311: -@:Host: libmsiecf.googlecode.com'}
self.CheckEventValues(storage_writer, events[82], expected_event_values)
if __name__ == '__main__':
unittest.main()
```
#### File: tests/parsers/pe.py
```python
import unittest
from plaso.lib import definitions
from plaso.parsers import pe
from tests.parsers import test_lib
class PECOFFTest(test_lib.ParserTestCase):
"""Tests for the PE file parser."""
def testParseFileObjectOnExecutable(self):
"""Tests the ParseFileObject on a PE executable (EXE) file."""
parser = pe.PEParser()
storage_writer = self._ParseFile(['test_pe.exe'], parser)
number_of_events = storage_writer.GetNumberOfAttributeContainers('event')
self.assertEqual(number_of_events, 3)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'extraction_warning')
self.assertEqual(number_of_warnings, 0)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'recovery_warning')
self.assertEqual(number_of_warnings, 0)
events = list(storage_writer.GetSortedEvents())
expected_event_values = {
'data_type': 'pe',
'date_time': '2015-04-21 14:53:56',
'pe_attribute': None,
'pe_type': 'Executable (EXE)',
'timestamp_desc': definitions.TIME_DESCRIPTION_CREATION}
self.CheckEventValues(storage_writer, events[2], expected_event_values)
expected_event_values = {
'data_type': 'pe',
'date_time': '2015-04-21 14:53:55',
'pe_attribute': 'DIRECTORY_ENTRY_IMPORT',
'pe_type': 'Executable (EXE)',
'timestamp_desc': definitions.TIME_DESCRIPTION_MODIFICATION}
self.CheckEventValues(storage_writer, events[1], expected_event_values)
expected_event_values = {
'data_type': 'pe',
'date_time': '2015-04-21 14:53:54',
'dll_name': 'USER32.dll',
'imphash': '8d0739063fc8f9955cc6696b462544ab',
'pe_attribute': 'DIRECTORY_ENTRY_DELAY_IMPORT',
'pe_type': 'Executable (EXE)',
'timestamp_desc': definitions.TIME_DESCRIPTION_MODIFICATION}
self.CheckEventValues(storage_writer, events[0], expected_event_values)
def testParseFileObjectOnDriver(self):
"""Tests the ParseFileObject on a PE driver (SYS) file."""
parser = pe.PEParser()
storage_writer = self._ParseFile(['test_driver.sys'], parser)
number_of_events = storage_writer.GetNumberOfAttributeContainers('event')
self.assertEqual(number_of_events, 1)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'extraction_warning')
self.assertEqual(number_of_warnings, 0)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'recovery_warning')
self.assertEqual(number_of_warnings, 0)
events = list(storage_writer.GetSortedEvents())
expected_event_values = {
'data_type': 'pe',
'date_time': '2015-04-21 14:53:54',
'pe_attribute': None,
'pe_type': 'Driver (SYS)',
'timestamp_desc': definitions.TIME_DESCRIPTION_CREATION}
self.CheckEventValues(storage_writer, events[0], expected_event_values)
if __name__ == '__main__':
unittest.main()
```
#### File: parsers/plist_plugins/test_lib.py
```python
import plistlib
from plaso.storage.fake import writer as fake_writer
from tests.parsers import test_lib
class PlistPluginTestCase(test_lib.ParserTestCase):
"""The unit test case for a plist plugin."""
def _ParsePlistFileWithPlugin(
self, plugin, path_segments, plist_name,
knowledge_base_values=None):
"""Parses a file using the parser and plugin object.
Args:
plugin (PlistPlugin): a plist plugin.
path_segments (list[str]): the path segments inside the test data
directory to the test file.
plist_name (str): name of the plist to parse.
knowledge_base_values (Optional[dict[str, object]]): knowledge base
values.
Returns:
FakeStorageWriter: a storage writer.
Raises:
SkipTest: if the path inside the test data directory does not exist and
the test should be skipped.
"""
file_entry = self._GetTestFileEntry(path_segments)
file_object = file_entry.GetFileObject()
top_level_object = plistlib.load(file_object)
self.assertIsNotNone(top_level_object)
return self._ParsePlistWithPlugin(
plugin, plist_name, top_level_object,
knowledge_base_values=knowledge_base_values)
def _ParsePlistWithPlugin(
self, plugin, plist_name, top_level_object,
knowledge_base_values=None):
"""Parses a plist using the plugin object.
Args:
plugin (PlistPlugin): a plist plugin.
plist_name (str): name of the plist to parse.
top_level_object (dict[str, object]): plist top-level key.
knowledge_base_values (Optional[dict[str, object]]): knowledge base
values.
Returns:
FakeStorageWriter: a storage writer.
"""
storage_writer = fake_writer.FakeStorageWriter()
storage_writer.Open()
parser_mediator = self._CreateParserMediator(
storage_writer, knowledge_base_values=knowledge_base_values)
plugin.Process(
parser_mediator, plist_name=plist_name, top_level=top_level_object)
return storage_writer
```
#### File: tests/parsers/selinux.py
```python
import unittest
from plaso.parsers import selinux
from tests.parsers import test_lib
class SELinuxUnitTest(test_lib.ParserTestCase):
"""Tests for the selinux log file parser."""
def testParse(self):
"""Tests the Parse function."""
parser = selinux.SELinuxParser()
knowledge_base_values = {'year': 2013}
storage_writer = self._ParseFile(
['selinux.log'], parser,
knowledge_base_values=knowledge_base_values)
number_of_events = storage_writer.GetNumberOfAttributeContainers('event')
self.assertEqual(number_of_events, 7)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'extraction_warning')
self.assertEqual(number_of_warnings, 4)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'recovery_warning')
self.assertEqual(number_of_warnings, 0)
events = list(storage_writer.GetEvents())
# Test case: normal entry.
expected_event_values = {
'audit_type': 'LOGIN',
'body': (
'pid=25443 uid=0 old auid=4294967295 new auid=0 old ses=4294967295 '
'new ses=1165'),
'date_time': '2012-05-24 07:40:01.174000',
'data_type': 'selinux:line',
'pid': '25443'}
self.CheckEventValues(storage_writer, events[0], expected_event_values)
# Test case: short date.
expected_event_values = {
'audit_type': 'SHORTDATE',
'body': 'check rounding',
'date_time': '2012-05-24 07:40:01.000000',
'data_type': 'selinux:line'}
self.CheckEventValues(storage_writer, events[1], expected_event_values)
# Test case: no message.
expected_event_values = {
'audit_type': 'NOMSG',
'date_time': '2012-05-24 07:40:22.174000',
'data_type': 'selinux:line'}
self.CheckEventValues(storage_writer, events[2], expected_event_values)
# Test case: under score.
expected_event_values = {
'audit_type': 'UNDER_SCORE',
'body': (
'pid=25444 uid=0 old auid=4294967295 new auid=54321 old '
'ses=4294967295 new ses=1166'),
'date_time': '2012-05-24 07:47:46.174000',
'data_type': 'selinux:line',
'pid': '25444'}
self.CheckEventValues(storage_writer, events[3], expected_event_values)
if __name__ == '__main__':
unittest.main()
```
#### File: tests/parsers/skydrivelog.py
```python
import unittest
from plaso.parsers import skydrivelog
from tests.parsers import test_lib
class SkyDriveLogUnitTest(test_lib.ParserTestCase):
"""Tests for the SkyDrive log parser."""
def testParseErrorLog(self):
"""Tests the Parse function or error log."""
parser = skydrivelog.SkyDriveLogParser()
storage_writer = self._ParseFile(['skydriveerr.log'], parser)
number_of_events = storage_writer.GetNumberOfAttributeContainers('event')
self.assertEqual(number_of_events, 19)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'extraction_warning')
self.assertEqual(number_of_warnings, 0)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'recovery_warning')
self.assertEqual(number_of_warnings, 0)
events = list(storage_writer.GetSortedEvents())
expected_event_values = {
'date_time': '2013-07-25 16:03:23.291',
'data_type': 'skydrive:log:line',
'detail': (
'Logging started. Version= 17.0.2011.0627 StartLocalTime: '
'2013-07-25-180323.291 PID=0x8f4 TID=0x718 ContinuedFrom=')}
self.CheckEventValues(storage_writer, events[0], expected_event_values)
expected_event_values = {
'date_time': '2013-07-25 16:03:24.649',
'data_type': 'skydrive:log:line',
'detail': 'Sign in failed : DRX_E_AUTH_NO_VALID_CREDENTIALS,',
'log_level': 'ERR',
'module': 'AUTH',
'source_code': 'authapi.cpp(280)'}
self.CheckEventValues(storage_writer, events[1], expected_event_values)
expected_event_values = {
'date_time': '2013-08-01 21:27:44.124',
'data_type': 'skydrive:log:line',
'detail': (
'Received data from server,dwID=0x0;dwSize=0x3e;pbData=PNG 9 '
'CON 48 <ping-response><wait>44</wait></ping-response>'),
'log_level': 'VRB',
'module': 'WNS',
'source_code': 'absconn.cpp(177)'}
self.CheckEventValues(storage_writer, events[18], expected_event_values)
def testParseErrorLogUnicode(self):
"""Tests the Parse function on Unicode error log."""
parser = skydrivelog.SkyDriveLogParser()
storage_writer = self._ParseFile(['skydriveerr-unicode.log'], parser)
number_of_events = storage_writer.GetNumberOfAttributeContainers('event')
self.assertEqual(number_of_events, 19)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'extraction_warning')
self.assertEqual(number_of_warnings, 0)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'recovery_warning')
self.assertEqual(number_of_warnings, 0)
events = list(storage_writer.GetSortedEvents())
expected_event_values = {
'date_time': '2013-07-25 16:04:02.669',
'data_type': 'skydrive:log:line',
'detail': (
'No node found named Passport-Jméno-člena, no user name '
'available,')}
self.CheckEventValues(storage_writer, events[3], expected_event_values)
def testParseLog(self):
"""Tests the Parse function on normal log."""
parser = skydrivelog.SkyDriveLogParser()
storage_writer = self._ParseFile(['skydrive.log'], parser)
number_of_events = storage_writer.GetNumberOfAttributeContainers('event')
self.assertEqual(number_of_events, 17)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'extraction_warning')
self.assertEqual(number_of_warnings, 0)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'recovery_warning')
self.assertEqual(number_of_warnings, 0)
events = list(storage_writer.GetSortedEvents())
expected_event_values = {
'date_time': '2013-08-12 01:08:52.985',
'data_type': 'skydrive:log:line'}
self.CheckEventValues(storage_writer, events[0], expected_event_values)
expected_event_values = {
'date_time': '2013-08-12 01:10:08.835',
'data_type': 'skydrive:log:line'}
self.CheckEventValues(storage_writer, events[1], expected_event_values)
expected_event_values = {
'date_time': '2013-08-12 02:52:32.976',
'data_type': 'skydrive:log:line',
'detail': (
'Received data from server,dwID=0x0;dwSize=0x15a;pbData=GET 5 '
'WNS 331 Context: 2891 <channel-response><id>1;'
'13714367258539257282</id><exp>2013-09-11T02:52:37Z</exp><url>'
'https://bn1.notify.windows.com/?token=<KEY>'
'5TIv0Kz317BKYIAfBNO6szULCOEE2393owBINnPC5xoika5SJlNtXZ%2bwzaR'
'VsPRcP1p64XFn90vGwr07DGZxfna%2bxBpBBplzZhLV9y%2fNV%2bBPxNmTI5'
'sRgaZ%2foGvYCIj6MdeU1</url></channel-response>'),
'log_level': 'VRB',
'module': 'WNS',
'source_code': 'absconn.cpp(177)'}
self.CheckEventValues(storage_writer, events[11], expected_event_values)
expected_event_values = {
'date_time': '2013-08-12 03:18:57.232',
'data_type': 'skydrive:log:line',
'detail': (
'Logging started. Version= 17.0.2011.0627 StartLocalTime: '
'2013-08-11-231857.232 PID=0x1ef0 TID=0x1ef4 ContinuedFrom=')}
self.CheckEventValues(storage_writer, events[13], expected_event_values)
expected_event_values = {
'date_time': '2013-08-31 03:45:37.940',
'data_type': 'skydrive:log:line',
'detail': (
',output=GET <- /MyData/LiveFolders?Filter=changes&InlineBlobs='
'false&MaxItemCount=50&SyncToken=<PASSWORD>'
'95149027848ED!103%3bLR%3d63513517536493%3bEP%3d2%3bTD%3dTrue&'
'View=SkyDriveSync;m_httpStatus=0x130;hr=8004db30;m_pSink=null;'
'cb=0x0;msec=0x4e'),
'log_level': 'VRB',
'module': 'PAL',
'source_code': 'cwinhttp.cpp(1581)'}
self.CheckEventValues(storage_writer, events[15], expected_event_values)
class SkyDriveOldLogUnitTest(test_lib.ParserTestCase):
"""Tests for the SkyDrive old log parser."""
def testParse(self):
"""Tests the Parse function."""
parser = skydrivelog.SkyDriveOldLogParser()
storage_writer = self._ParseFile(['skydrive_old.log'], parser)
number_of_events = storage_writer.GetNumberOfAttributeContainers('event')
self.assertEqual(number_of_events, 18)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'extraction_warning')
self.assertEqual(number_of_warnings, 1)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'recovery_warning')
self.assertEqual(number_of_warnings, 0)
events = list(storage_writer.GetSortedEvents())
expected_event_values = {
'date_time': '2013-08-01 21:22:28.999',
'data_type': 'skydrive:log:old:line',
'log_level': 'DETAIL',
'source_code': 'global.cpp:626!logVersionInfo',
'text': '17.0.2011.0627 (Ship)'}
self.CheckEventValues(storage_writer, events[0], expected_event_values)
expected_event_values = {
'date_time': '2013-08-01 21:22:29.702',
'data_type': 'skydrive:log:old:line'}
self.CheckEventValues(storage_writer, events[1], expected_event_values)
expected_event_values = {
'date_time': '2013-08-01 21:22:29.702',
'data_type': 'skydrive:log:old:line'}
self.CheckEventValues(storage_writer, events[2], expected_event_values)
expected_event_values = {
'date_time': '2013-08-01 21:22:29.702',
'data_type': 'skydrive:log:old:line',
'text': (
'SyncToken = LM%3d12345<PASSWORD>0%3bID%3d<PASSWORD>!'
'103%3bLR%3d12345678905623%3aEP%3d2')}
self.CheckEventValues(storage_writer, events[3], expected_event_values)
expected_event_values = {
'date_time': '2013-08-01 21:22:58.344',
'data_type': 'skydrive:log:old:line'}
self.CheckEventValues(storage_writer, events[4], expected_event_values)
expected_event_values = {
'date_time': '2013-08-01 21:22:58.344',
'data_type': 'skydrive:log:old:line'}
self.CheckEventValues(storage_writer, events[5], expected_event_values)
expected_event_values = {
'date_time': '2013-08-01 21:28:46.742',
'data_type': 'skydrive:log:old:line',
'text': 'SyncToken = Not a sync token (\xe0\xe8\xec\xf2\xf9)!'}
self.CheckEventValues(storage_writer, events[17], expected_event_values)
if __name__ == '__main__':
unittest.main()
```
#### File: parsers/sqlite_plugins/gdrive.py
```python
import unittest
from plaso.lib import definitions
from plaso.parsers.sqlite_plugins import gdrive
from tests.parsers.sqlite_plugins import test_lib
class GoogleDrivePluginTest(test_lib.SQLitePluginTestCase):
"""Tests for the Google Drive database plugin."""
def testProcess(self):
"""Tests the Process function on a Google Drive database file."""
plugin = gdrive.GoogleDrivePlugin()
storage_writer = self._ParseDatabaseFileWithPlugin(['snapshot.db'], plugin)
number_of_events = storage_writer.GetNumberOfAttributeContainers('event')
self.assertEqual(number_of_events, 30)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'extraction_warning')
self.assertEqual(number_of_warnings, 0)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'recovery_warning')
self.assertEqual(number_of_warnings, 0)
# Let's verify that we've got the correct balance of cloud and local
# entry events.
# 10 files mounting to:
# 20 Cloud Entries (two timestamps per entry).
# 10 Local Entries (one timestamp per entry).
local_entries = []
cloud_entries = []
for event in storage_writer.GetEvents():
event_data = self._GetEventDataOfEvent(storage_writer, event)
if event_data.data_type == 'gdrive:snapshot:local_entry':
local_entries.append(event)
else:
cloud_entries.append(event)
self.assertEqual(len(local_entries), 10)
self.assertEqual(len(cloud_entries), 20)
# Test one local and one cloud entry.
expected_event_values = {
'data_type': 'gdrive:snapshot:local_entry',
'date_time': '2014-01-28 00:11:25',
'path': (
'%local_sync_root%/Top Secret/Enn meiri '
'leyndarmál/Sýnileiki - Örverpi.gdoc'),
'size': 184}
self.CheckEventValues(
storage_writer, local_entries[5], expected_event_values)
expected_event_values = {
'data_type': 'gdrive:snapshot:cloud_entry',
'date_time': '2014-01-28 00:12:27',
'document_type': 6,
'path': '/Almenningur/Saklausa hliðin',
'size': 0,
'timestamp_desc': definitions.TIME_DESCRIPTION_MODIFICATION,
'url': (
'https://docs.google.com/document/d/1ypXwXhQWliiMSQN9S5M0K6Wh39XF4U'
'z4GmY-njMf-Z0/edit?usp=docslist_api')}
self.CheckEventValues(
storage_writer, cloud_entries[16], expected_event_values)
if __name__ == '__main__':
unittest.main()
```
#### File: tests/parsers/syslog.py
```python
import unittest
from plaso.parsers import syslog
from tests.parsers import test_lib
class SyslogParserTest(test_lib.ParserTestCase):
"""Tests for the syslog parser."""
# pylint: disable=protected-access
def testEnablePlugins(self):
"""Tests the EnablePlugins function."""
parser = syslog.SyslogParser()
number_of_plugins = len(parser._plugin_classes)
parser.EnablePlugins([])
self.assertEqual(len(parser._plugins), 0)
parser.EnablePlugins(parser.ALL_PLUGINS)
self.assertEqual(len(parser._plugins), number_of_plugins)
parser.EnablePlugins(['cron'])
self.assertEqual(len(parser._plugins), 1)
def testParseRsyslog(self):
"""Tests the Parse function on a rsyslog file."""
parser = syslog.SyslogParser()
knowledge_base_values = {'year': 2020}
storage_writer = self._ParseFile(
['syslog_rsyslog'], parser, knowledge_base_values=knowledge_base_values)
number_of_events = storage_writer.GetNumberOfAttributeContainers('event')
self.assertEqual(number_of_events, 5)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'extraction_warning')
self.assertEqual(number_of_warnings, 0)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'recovery_warning')
self.assertEqual(number_of_warnings, 0)
def testParseRsyslogTraditional(self):
"""Tests the Parse function on a traditional rsyslog file."""
parser = syslog.SyslogParser()
knowledge_base_values = {'year': 2016}
storage_writer = self._ParseFile(
['syslog_rsyslog_traditional'], parser,
knowledge_base_values=knowledge_base_values)
number_of_events = storage_writer.GetNumberOfAttributeContainers('event')
self.assertEqual(number_of_events, 8)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'extraction_warning')
self.assertEqual(number_of_warnings, 0)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'recovery_warning')
self.assertEqual(number_of_warnings, 0)
events = list(storage_writer.GetSortedEvents())
expected_event_values = {
'date_time': '2016-01-22 07:54:32',
'data_type': 'syslog:line',
'hostname': 'myhostname.myhost.com',
'reporter': 'Job',
'severity': None}
self.CheckEventValues(storage_writer, events[0], expected_event_values)
def testParseRsyslogProtocol23(self):
"""Tests the Parse function on a protocol 23 rsyslog file."""
parser = syslog.SyslogParser()
knowledge_base_values = {'year': 2021}
storage_writer = self._ParseFile(
['syslog_rsyslog_SyslogProtocol23Format'], parser,
knowledge_base_values=knowledge_base_values)
number_of_events = storage_writer.GetNumberOfAttributeContainers('event')
self.assertEqual(number_of_events, 9)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'extraction_warning')
self.assertEqual(number_of_warnings, 0)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'recovery_warning')
self.assertEqual(number_of_warnings, 0)
events = list(storage_writer.GetSortedEvents())
expected_event_values = {
'date_time': '2021-03-06 04:07:38.251122',
'data_type': 'syslog:line',
'hostname': 'hostname',
'reporter': 'log_tag',
'severity': 'DEBUG'}
self.CheckEventValues(storage_writer, events[0], expected_event_values)
def testParseRsyslogSysklogd(self):
"""Tests the Parse function on a syslogkd format rsyslog file."""
parser = syslog.SyslogParser()
knowledge_base_values = {'year': 2021}
storage_writer = self._ParseFile(
['syslog_rsyslog_SysklogdFileFormat'], parser,
knowledge_base_values=knowledge_base_values)
number_of_events = storage_writer.GetNumberOfAttributeContainers('event')
self.assertEqual(number_of_events, 9)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'extraction_warning')
self.assertEqual(number_of_warnings, 0)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'recovery_warning')
self.assertEqual(number_of_warnings, 0)
events = list(storage_writer.GetSortedEvents())
expected_event_values = {
'date_time': '2021-03-06 04:07:28',
'data_type': 'syslog:line',
'hostname': 'hostname',
'reporter': 'log_tag',
'severity': None}
self.CheckEventValues(storage_writer, events[0], expected_event_values)
def testParseDarwin(self):
"""Tests the Parse function on an Darwin-style syslog file."""
parser = syslog.SyslogParser()
knowledge_base_values = {'year': 2016}
storage_writer = self._ParseFile(
['syslog_osx'], parser,
knowledge_base_values=knowledge_base_values)
number_of_events = storage_writer.GetNumberOfAttributeContainers('event')
self.assertEqual(number_of_events, 2)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'extraction_warning')
self.assertEqual(number_of_warnings, 0)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'recovery_warning')
self.assertEqual(number_of_warnings, 0)
def testParseChromeOS(self):
"""Tests the Parse function."""
parser = syslog.SyslogParser()
knowledge_base_values = {'year': 2016}
storage_writer = self._ParseFile(
['syslog_chromeos'], parser,
knowledge_base_values=knowledge_base_values)
number_of_events = storage_writer.GetNumberOfAttributeContainers('event')
self.assertEqual(number_of_events, 8)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'extraction_warning')
self.assertEqual(number_of_warnings, 0)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'recovery_warning')
self.assertEqual(number_of_warnings, 0)
events = list(storage_writer.GetSortedEvents())
# Note that syslog_chromeos contains -07:00 as time zone offset.
expected_event_values = {
'body': 'cleanup_logs: job completed',
'date_time': '2016-10-25 12:37:23.297265',
'data_type': 'syslog:line',
'reporter': 'periodic_scheduler',
'pid': 13707,
'severity': 'INFO',
'timestamp': '2016-10-25 19:37:23.297265'}
self.CheckEventValues(storage_writer, events[0], expected_event_values)
expected_event_values = {
'date_time': '2016-10-25 12:37:24.987014',
'data_type': 'syslog:line',
'reporter': 'kernel',
'severity': 'DEBUG'}
self.CheckEventValues(storage_writer, events[2], expected_event_values)
# Testing year increment.
expected_event_values = {
'date_time': '2016-10-25 12:37:24.993079',
'data_type': 'syslog:line',
'reporter': 'kernel',
'severity': 'DEBUG'}
self.CheckEventValues(storage_writer, events[4], expected_event_values)
expected_event_values = {
'date_time': '2016-10-25 12:37:25.007963',
'data_type': 'syslog:line',
'reporter': 'kernel',
'severity': 'ERR'}
self.CheckEventValues(storage_writer, events[6], expected_event_values)
expected_event_values = {
'body': (
'[ 316.587330] cfg80211: This is a multi-line\n\tmessage that '
'screws up many syslog parsers.'),
'date_time': '2016-10-25 12:37:25.014015',
'data_type': 'syslog:line',
'reporter': 'aprocess',
'severity': 'INFO'}
self.CheckEventValues(storage_writer, events[7], expected_event_values)
def testParse(self):
"""Tests the Parse function."""
parser = syslog.SyslogParser()
knowledge_base_values = {'year': 2012}
storage_writer = self._ParseFile(
['syslog'], parser, knowledge_base_values=knowledge_base_values)
number_of_events = storage_writer.GetNumberOfAttributeContainers('event')
self.assertEqual(number_of_events, 16)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'extraction_warning')
self.assertEqual(number_of_warnings, 1)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'recovery_warning')
self.assertEqual(number_of_warnings, 0)
events = list(storage_writer.GetSortedEvents())
expected_event_values = {
'body': 'INFO No new content in ímynd.dd.',
'date_time': '2012-01-22 07:52:33',
'data_type': 'syslog:line',
'hostname': 'myhostname.myhost.com',
'pid': 30840,
'reporter': 'client',
'severity': None}
self.CheckEventValues(storage_writer, events[0], expected_event_values)
expected_event_values = {
'date_time': '2012-02-29 01:15:43',
'data_type': 'syslog:line',
'reporter': '---',
'severity': None}
self.CheckEventValues(storage_writer, events[6], expected_event_values)
# Testing year increment.
expected_event_values = {
'body': 'This syslog message has a fractional value for seconds.',
'date_time': '2013-03-23 23:01:18',
'data_type': 'syslog:line',
'reporter': 'somrandomexe',
'severity': None}
self.CheckEventValues(storage_writer, events[9], expected_event_values)
expected_event_values = {
'date_time': '2013-12-31 17:54:32',
'data_type': 'syslog:line',
'reporter': '/sbin/anacron',
'severity': None}
self.CheckEventValues(storage_writer, events[11], expected_event_values)
expected_event_values = {
'body': (
'This is a multi-line message that screws up\n\tmany syslog '
'parsers.'),
'date_time': '2013-11-18 01:15:20',
'data_type': 'syslog:line',
'pid': 10100,
'reporter': 'aprocess',
'severity': None}
self.CheckEventValues(storage_writer, events[10], expected_event_values)
expected_event_values = {
'body': '[997.390602] sda2: rw=0, want=65, limit=2',
'date_time': '2014-11-18 08:30:20',
'data_type': 'syslog:line',
'hostname': None,
'reporter': 'kernel',
'severity': None}
self.CheckEventValues(storage_writer, events[14], expected_event_values)
# Testing non-leap year.
parser = syslog.SyslogParser()
knowledge_base_values = {'year': 2013}
storage_writer = self._ParseFile(
['syslog'], parser,
knowledge_base_values=knowledge_base_values)
number_of_events = storage_writer.GetNumberOfAttributeContainers('event')
self.assertEqual(number_of_events, 15)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'extraction_warning')
self.assertEqual(number_of_warnings, 2)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'recovery_warning')
self.assertEqual(number_of_warnings, 0)
def testParseWithTimeZone(self):
"""Tests the Parse function with a time zone."""
parser = syslog.SyslogParser()
knowledge_base_values = {'year': 2016}
storage_writer = self._ParseFile(
['syslog_rsyslog_traditional'], parser,
knowledge_base_values=knowledge_base_values, timezone='CET')
number_of_events = storage_writer.GetNumberOfAttributeContainers('event')
self.assertEqual(number_of_events, 8)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'extraction_warning')
self.assertEqual(number_of_warnings, 0)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'recovery_warning')
self.assertEqual(number_of_warnings, 0)
events = list(storage_writer.GetSortedEvents())
expected_event_values = {
'date_time': '2016-01-22 07:54:32',
'data_type': 'syslog:line',
'hostname': 'myhostname.myhost.com',
'reporter': 'Job',
'severity': None,
'timestamp': '2016-01-22 06:54:32.000000'}
self.CheckEventValues(storage_writer, events[0], expected_event_values)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jonathan-gruber/corona-scraper",
"score": 2
} |
#### File: corona-scraper/lambdaFunctions/country_query_history.py
```python
import boto3
import json
from boto3.dynamodb.conditions import Key, Attr
import datetime
from botocore.exceptions import ClientError
import decimal
# Helper class to convert a DynamoDB item to JSON.
class DecimalEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, decimal.Decimal):
if o % 1 > 0:
return float(o)
else:
return int(o)
return super(DecimalEncoder, self).default(o)
def country_query_history(event, context):
dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table('corona-db')
query_param = event['queryStringParameters']
pays = query_param['country']
response = table.query(
KeyConditionExpression=Key('country').eq(pays)
)
country_totalcases = []
for i in response['Items']:
country_totalcases.append(i)
body_response = json.dumps(response, indent=4, cls=DecimalEncoder)
return {
"statusCode": 200,
"body": body_response,
"headers": {
"Access-Control-Allow-Origin": "*"
}
}
``` |
{
"source": "JonathanGzzBen/Todox",
"score": 4
} |
#### File: todox/data/tododata.py
```python
import sqlite3
import os
database_filename = "todox.db"
def create_database_if_not_exists():
if os.path.exists(database_filename):
return
try:
sqlite_connection = sqlite3.connect(database_filename)
sqlite_create_table_todo_query = """
CREATE TABLE IF NOT EXISTS Todo (
id INTEGER PRIMARY KEY,
content TEXT NOT NULL
);
"""
cursor = sqlite_connection.cursor()
cursor.execute(sqlite_create_table_todo_query)
sqlite_connection.commit()
cursor.close()
except sqlite3.Error as error:
print("Error while creating sqlite table", error)
finally:
if sqlite_connection:
sqlite_connection.close()
def save_todo(content):
try:
create_database_if_not_exists()
sqlite_connection = sqlite3.connect(database_filename)
cursor = sqlite_connection.cursor()
sqlite_insert_todo_query = """
INSERT INTO Todo (content)
VALUES (?)
"""
cursor.execute(sqlite_insert_todo_query, (content,))
sqlite_connection.commit()
cursor.close()
except sqlite3.Error as error:
print("Failed to insert data into sqlite table", error)
finally:
if sqlite_connection:
sqlite_connection.close()
def get_todos():
try:
create_database_if_not_exists()
sqlite_connection = sqlite3.connect(database_filename)
cursor = sqlite_connection.cursor()
sqlite_select_all_todos = """
SELECT id, content
FROM Todo
"""
cursor.execute(sqlite_select_all_todos)
todos = cursor.fetchall()
cursor.close()
return todos
except sqlite3.Error as error:
print("Failed to read data from sqlite table", error)
finally:
if sqlite_connection:
sqlite_connection.close()
def delete_todo(id):
try:
create_database_if_not_exists()
sqlite_connection = sqlite3.connect(database_filename)
cursor = sqlite_connection.cursor()
sqlite_delete_todo_query = """
DELETE FROM Todo
WHERE id=?
"""
cursor.execute(sqlite_delete_todo_query, (id,))
sqlite_connection.commit()
cursor.close()
except sqlite3.Error as error:
print("Failed to delete todo", error)
finally:
if sqlite_connection:
sqlite_connection.close()
``` |
{
"source": "jonathanhacker/mopidy-jamendo",
"score": 2
} |
#### File: mopidy-jamendo/mopidy_jamendo/jamendo.py
```python
import logging
from contextlib import closing
from typing import List, Optional
import cachetools.func
import pykka
import requests
from mopidy import backend, httpclient
from mopidy.audio import Audio
from mopidy.models import Album, Artist, Track
from requests import HTTPError
import mopidy_jamendo
logger = logging.getLogger(__name__)
def get_requests_session(
proxy_config: dict, user_agent: str
) -> requests.Session:
proxy = httpclient.format_proxy(proxy_config)
full_user_agent = httpclient.format_user_agent(user_agent)
session = requests.Session()
session.proxies.update({"http": proxy, "https": proxy})
session.headers.update({"user-agent": full_user_agent})
return session
def parse_track(data: dict) -> Optional[Track]:
if not data:
return None
track_kwargs = {}
artist_kwargs = {}
album_kwargs = {}
if "name" in data:
track_kwargs["name"] = data["name"]
if "artist_name" in data:
artist_kwargs["name"] = data["artist_name"]
album_kwargs["name"] = "Jamendo"
if "releasedate" in data:
track_kwargs["date"] = data["releasedate"]
track_kwargs["uri"] = data["audio"]
track_kwargs["length"] = int(data.get("duration", 0) * 1000)
track_kwargs["comment"] = data.get("shareurl", "")
if artist_kwargs:
track_kwargs["artists"] = [Artist(**artist_kwargs)]
if album_kwargs:
track_kwargs["album"] = Album(**album_kwargs)
return Track(**track_kwargs)
class JamendoClient:
def __init__(self, config: dict):
super().__init__()
self.http_client = get_requests_session(
proxy_config=config["proxy"],
user_agent=(
f"{mopidy_jamendo.Extension.dist_name}/"
f"{mopidy_jamendo.__version__}"
),
)
self.client_id = config["jamendo"]["client_id"]
def _get(self, url: str, params: dict = None) -> dict:
url = f"https://api.jamendo.com/v3.0/{url}"
if not params:
params = {}
params["client_id"] = self.client_id
try:
with closing(self.http_client.get(url, params=params)) as res:
logger.debug(f"Requested {res.url}")
res.raise_for_status()
return res.json()
except Exception as e:
if isinstance(e, HTTPError) and e.response.status_code == 401:
logger.error(
'Invalid "client_id" used for Jamendo authentication!'
)
else:
logger.error(f"Jamendo API request failed: {e}")
return {}
@cachetools.func.ttl_cache(ttl=3600)
def get_track(self, track_id: str) -> Optional[Track]:
logger.debug(f"Getting info for track with ID {track_id}")
try:
result = self._get("tracks/", params={"id": track_id})["results"][0]
except (KeyError, IndexError):
logger.warning(f"No results for track {track_id}")
return None
track = parse_track(result)
return track
class JamendoPlaybackProvider(backend.PlaybackProvider):
def translate_uri(self, uri: str) -> Optional[str]:
if "jamendo:track:" in uri:
uri = uri[len("jamendo:track:") :]
track = self.backend.remote.get_track(uri)
if track is None:
return None
return track.uri
return None
class JamendoLibraryProvider(backend.LibraryProvider):
def lookup(self, uri: str) -> List[Optional[Track]]:
if "jamendo:track:" in uri:
uri = uri[len("jamendo:track:") :]
return [self.backend.remote.get_track(uri)]
return [None]
class JamendoBackend(pykka.ThreadingActor, backend.Backend):
def __init__(self, config: dict, audio: Audio):
super(JamendoBackend, self).__init__()
self.audio = audio
self.config = config
self.remote = JamendoClient(config)
self.library = JamendoLibraryProvider(backend=self)
self.playback = JamendoPlaybackProvider(audio=audio, backend=self)
self.uri_schemes = ["jamendo"]
``` |
{
"source": "jonathanhaigh/ju-jump-backend",
"score": 2
} |
#### File: ju-jump-backend/jujumpbackend/backend.py
```python
import argparse
import importlib
import json
import pathlib
import sys
DEFAULT_CONFIG_DIR = pathlib.Path.home() / ".ju-jump"
ABBR_SPECS_FILENAME = "abbrs.json"
RESOLVERS_MODULE_NAME = "resolvers"
class AbbrSpec:
def __init__(self, spec_dict, resolvers_module):
self._spec_dict = spec_dict
self._resolvers_module = resolvers_module
@property
def prefix(self):
if "prefix" in self._spec_dict:
return pathlib.Path(self._spec_dict["prefix"])
if "resolver" in self._spec_dict:
resolver = self.get_resolver(self._spec_dict["resolver"])
return pathlib.Path(resolver(self._spec_dict["abbr"]))
return pathlib.Path()
def get_resolver(self, resolver_name):
return getattr(self._resolvers_module, resolver_name)
class AbbrSpecs:
def __init__(self, config_dir):
sys.path.insert(0, str(config_dir))
resolvers_module = importlib.import_module(RESOLVERS_MODULE_NAME)
abbr_specs_path = config_dir / ABBR_SPECS_FILENAME
with open(abbr_specs_path, mode="r") as f:
self._abbr_specs = {
s["abbr"]: AbbrSpec(s, resolvers_module) for s in json.load(f)
}
def __getitem__(self, key):
return self._abbr_specs[key]
def __str__(self):
return str(self._abbr_specs)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"abbr",
type=str,
)
parser.add_argument(
"suffix",
nargs="?",
default=pathlib.Path("."),
type=pathlib.Path,
)
return parser.parse_args()
def main():
args = parse_args()
specs = AbbrSpecs(config_dir=DEFAULT_CONFIG_DIR)
path = specs[args.abbr].prefix.joinpath(args.suffix).resolve()
print(path)
if __name__ == "__main__":
main()
``` |
{
"source": "jonathanhaigh/sq",
"score": 2
} |
#### File: sq/test/test_sq_data_size.py
```python
import itertools
import math
import pytest
import util
DATA_SIZES = itertools.chain(
(0, 500, 512),
(int(1.5 * (1000 ** x)) for x in (1, 2, 3, 4, 5, 6)),
(int(2.5 * (1024 ** x)) for x in (1, 2, 3, 4, 5, 6)),
)
DECIMAL_UNITS = ("B", "kB", "MB", "GB", "TB", "PB", "EB")
BINARY_UNITS = ("B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB")
@pytest.mark.parametrize(
"exponent,unit,base,data_size",
(
(exponent, unit, base, data_size)
for data_size in DATA_SIZES
for base, units_tuple in ((1000, DECIMAL_UNITS), (1024, BINARY_UNITS))
for exponent, unit in enumerate(units_tuple)
),
)
def test_data_size_units(exponent, unit, base, data_size):
size_in_units = data_size / (base ** exponent)
result = util.sq(f"<data_size({data_size}).<{unit}")
assert math.isclose(result, size_in_units)
```
#### File: sq/test/test_sq_root.py
```python
import pytest
import util
simple_tests = []
out_of_range_tests = []
invalid_args_tests = []
# SqRoot::int
simple_tests.extend((f"<int({i})", int(i)) for i in util.INT_STRS)
simple_tests.extend((f"<int(value={i})", int(i)) for i in util.INT_STRS)
out_of_range_tests.extend(f"int({i})" for i in util.OUT_OF_RANGE_INT_STRS)
invalid_args_tests.extend(
f"int({i})" for i in ("1.0", "true", "false", '"str"')
)
# SqRoot::float
simple_tests.extend((f"<float({i})", float(i)) for i in util.FLOAT_STRS)
simple_tests.extend((f"<float(value={i})", float(i)) for i in util.FLOAT_STRS)
simple_tests.extend((f"<float({i})", float(i)) for i in ("-1", "1", "0"))
out_of_range_tests.extend(f"float({i})" for i in util.OUT_OF_RANGE_FLOAT_STRS)
invalid_args_tests.extend(f"float({i})" for i in ("true", "false", '"str"'))
# SqRoot::bool
simple_tests.append(("<bool(true)", True))
simple_tests.append(("<bool(false)", False))
simple_tests.append(("<bool(value=true)", True))
simple_tests.append(("<bool(value=false)", False))
invalid_args_tests.extend(
f"bool({i})" for i in ('"true"', '"false"', "1", "1.0")
)
# SqRoot::string
simple_tests.extend((f"<string({util.quote(i)})", i) for i in util.STRINGS)
simple_tests.extend(
(f"<string(value={util.quote(i)})", i) for i in util.STRINGS
)
invalid_args_tests.extend(
f"string({i})" for i in ("-1" "1.0", "true", "false")
)
# SqRoot::ints
simple_tests.extend(
[
("<ints(0, 5)", [0, 1, 2, 3, 4]),
("<ints(-5, 0)", [-5, -4, -3, -2, -1]),
("<ints(-6, -1)", [-6, -5, -4, -3, -2]),
("<ints(5)[:5]", [5, 6, 7, 8, 9]),
("<ints(-5)[:5]", [-5, -4, -3, -2, -1]),
("<ints(start=5)[:5]", [5, 6, 7, 8, 9]),
("<ints(stop=5)", [0, 1, 2, 3, 4]),
("<ints(5, stop=10)", [5, 6, 7, 8, 9]),
("<ints(start=5, stop=10)", [5, 6, 7, 8, 9]),
]
)
# SqRoot::path
simple_tests.extend((f"<path({util.quote(i)})", i) for i in util.PATH_STRS)
simple_tests.extend(
(f"<path(value={util.quote(i)})", i) for i in util.PATH_STRS
)
# SqRoot::data_size
simple_tests.extend(
(f"<data_size({i})", int(i)) for i in util.INT_STRS if int(i) >= 0
)
simple_tests.extend(
(f"<data_size(bytes={i})", int(i)) for i in util.INT_STRS if int(i) >= 0
)
out_of_range_tests.extend(
f"data_size({i})" for i in util.INT_STRS if int(i) < 0
)
out_of_range_tests.extend(
f"data_size({i})" for i in util.OUT_OF_RANGE_INT_STRS
)
# SqRoot::devices
simple_tests.append(("<devices[sys_name=\"null\"]", ["null"]))
@pytest.mark.parametrize("query,result", simple_tests)
def test_simple(query, result):
assert util.sq(query) == result
@pytest.mark.parametrize("query", out_of_range_tests)
def test_out_of_range(query):
util.sq_error(query, "out ?of ?range")
@pytest.mark.parametrize("query", invalid_args_tests)
def test_invalid_argument(query):
util.sq_error(query, "invalid ?argument")
```
#### File: sq/test/test_sq_schema.py
```python
import copy
import pytest
import util
def flatten_doc_list(entity):
if isinstance(entity["doc"], list):
entity["doc"] = "\n".join(entity["doc"])
def test_schema(sq_schema):
# We're going to test that the schema returned by SQ is (mostly) the same
# as the original schema in schema.json.
#
# There are a couple of things that will be different between the two
# schemas though:
# * doc arrays will have been converted to single strings with newlines.
# * optional fields will always exist but might be null.
#
# Modify the schema we got from schema.json to match what we think SQ
# should return, then just do a test using "=="
schema = copy.deepcopy(sq_schema)
for t in schema["types"]:
flatten_doc_list(t)
for f in t["fields"]:
flatten_doc_list(f)
for p in f["params"]:
flatten_doc_list(p)
if "default_value" not in p:
p["default_value"] = None
if "default_value_doc" not in p:
p["default_value_doc"] = None
result = util.sq(
"schema {"
"types {"
"name doc fields { "
"name doc return_type return_list null params {"
"name doc index type required "
"default_value default_value_doc"
"}"
"}"
"}"
" primitive_types { name doc }"
" root_type"
"}"
)
assert result == {"schema": schema}
def test_schema_to_primitive():
assert util.sq('<schema.<types[="SqRoot"]') == ["SqRoot"]
assert util.sq('<schema.<primitive_types[="PrimitiveInt"]') == [
"PrimitiveInt"
]
assert util.sq('<schema.<types[="SqRoot"].<fields[="schema"]') == [
["schema"]
]
assert util.sq(
'<schema.<types[="SqRoot"].<fields[="int"].<params[="value"]'
) == [[["value"]]]
``` |
{
"source": "jonathanharg/covid_dashboard",
"score": 3
} |
#### File: jonathanharg/covid_dashboard/app.py
```python
from datetime import datetime, timedelta
import threading
import logging
from flask import Flask, render_template, Markup, request, redirect
from scheduler import (
scheduler,
scheduled_events,
schedule_event,
remove_event,
keep_alive,
)
from covid_data_handler import get_covid_data
from covid_news_handling import get_news, remove_article
from utils import get_settings, time_until
LOG_FORMAT = (
"%(asctime)s (%(thread)d) [%(levelname)s]: %(message)s (%(funcName)s in %(module)s)"
)
CLOCK_ICON = (
"<svg xmlns='http://www.w3.org/2000/svg' width='16' height='16' fill='currentColor'"
" class='bi bi-clock' viewBox='0 0 16 16'><path d='M8 3.5a.5.5 0 0 0-1 0V9a.5.5 0 0"
" 0 .252.434l3.5 2a.5.5 0 0 0 .496-.868L8 8.71V3.5z'/><path d='M8 16A8 8 0 1 0 8"
" 0a8 8 0 0 0 0 16zm7-8A7 7 0 1 1 1 8a7 7 0 0 1 14 0z'/></svg>"
)
HOSPITAL_ICON = (
"<svg xmlns='http://www.w3.org/2000/svg' width='32' height='32' fill='currentColor'"
" class='bi bi-thermometer-half' viewBox='0 0 16 16'><path d='M9.5 12.5a1.5 1.5 0 1"
" 1-2-1.415V6.5a.5.5 0 0 1 1 0v4.585a1.5 1.5 0 0 1 1 1.415z'/><path d='M5.5 2.5a2.5"
" 2.5 0 0 1 5 0v7.55a3.5 3.5 0 1 1-5 0V2.5zM8 1a1.5 1.5 0 0 0-1.5"
" 1.5v7.987l-.167.15a2.5 2.5 0 1 0 3.333 0l-.166-.15V2.5A1.5 1.5 0 0 0 8"
" 1z'/></svg>"
)
DEATHS_ICON = (
"<svg xmlns='http://www.w3.org/2000/svg' width='16' height='16' fill='currentColor'"
" class='bi bi-activity' viewBox='0 0 16 16'><path fill-rule='evenodd' d='M6 2a.5.5"
" 0 0 1 .47.33L10 12.036l1.53-4.208A.5.5 0 0 1 12 7.5h3.5a.5.5 0 0 1 0"
" 1h-3.15l-1.88 5.17a.5.5 0 0 1-.94 0L6 3.964 4.47 8.171A.5.5 0 0 1 4 8.5H.5a.5.5 0"
" 0 1 0-1h3.15l1.88-5.17A.5.5 0 0 1 6 2Z'/></svg>"
)
NEW_ICON = "<span class='badge badge-primary'>New</span>"
logging.basicConfig(
filename="covid_dashboard.log", format=LOG_FORMAT, level=logging.DEBUG
)
log = logging.getLogger("covid_dashboard")
def create_app(testing: bool = False) -> Flask:
"""Create the covid_dashboard flask app.
Args:
testing (bool): If the server is in testing mode or not. Defaults to False.
Returns:
Flask: the covid_dashboard flask app.
"""
thread = threading.Thread(target=scheduler.run)
flask_app = Flask(__name__)
flask_app.testing = testing
log.info(
"Creating covid_dashboard flask app %s with testing = %s", __name__, testing
)
# pylint: disable=W0632
location, nation = get_settings(
"location", "nation"
)
# update_news()
get_news(force_update=True)
get_covid_data(location, nation, force_update=True)
schedule_event(
timedelta(hours=0, minutes=0),
"Default COVID Update",
True,
True,
False,
new=False,
)
schedule_event(
timedelta(hours=1, minutes=0),
"Default News Update",
True,
False,
True,
new=False,
)
keep_alive()
thread.start()
log.info("Starting scheduler tread with ID = %s", thread.native_id)
@flask_app.route("/")
def main():
"""Handles input requests if any, otherwise renders the COVID dashboard"""
# GET PAGE VARIABLES & CONTENT
log.info("Requested /")
# pylint: disable=unbalanced-tuple-unpacking
(
favicon,
image,
title,
location,
nation,
) = get_settings(
"favicon", "image", "title", "location", "nation"
)
news_articles = get_news()
covid_data = get_covid_data(location, nation)
# Format Strings
log.info("Formatting data")
title = Markup(f"<strong>{title}</strong>")
location = Markup(f"<strong>{location}</strong>")
nation_location = Markup(f"<strong>{nation}</strong>")
# pylint: disable=E1136
# pylint: disable=E1136
local_7day_infections = (
None
if covid_data["local_7day"]
is None
else f"{covid_data['local_7day']:,}"
)
# pylint: disable=E1136
# pylint: disable=E1136
national_7day_infections = (
None
if covid_data["national_7day"]
is None
else f"{covid_data['national_7day']:,}"
)
# pylint: disable=E1136
# pylint: disable=E1136
hospital_cases = (
None
if covid_data["hospital"] is None
else Markup(
f"{HOSPITAL_ICON} {covid_data['hospital']:,} hospital cases"
)
)
# pylint: disable=E1136
# pylint: disable=E1136
deaths_total = (
None
if covid_data["deaths"] is None
else Markup(
f"{DEATHS_ICON} {covid_data['deaths']:,} total deaths"
)
)
for article in news_articles:
time = datetime.strptime(article["publishedAt"], "%Y-%m-%dT%H:%M:%S%z")
article["content"] = Markup(
f"<u>{time.strftime('%I:%M %p %d/%m/%y')}</u><br>{article['description']} <a"
f" href='{article['url']}' target='_blank'>Read More.</a>"
)
for update in scheduled_events:
repeating = (
"Repeating scheduled update" if update["repeat"] else "Scheduled update"
)
types = []
if update["data"] is True:
types.append("<strong>COVID data</strong>")
if update["news"] is True:
types.append("<strong>news</strong>")
updating = " and ".join(types) if types else "<strong>nothing</strong>"
time = update["target_time"]
time_hours = time.seconds // 3600
time_minutes = (time.seconds // 60) % 60
time_to = str(time_until(time))
new = ""
if update["new"] is True:
update["new"] = False
new = "<br>" + NEW_ICON
update["content"] = Markup(
f"{CLOCK_ICON} <u>{time_hours:02d}:{time_minutes:02d}</u><br>{repeating} for"
f" {updating} in {time_to} {new}"
)
# Render data with ./templates/index.html
log.info("Rendering template for /")
return render_template(
"index.html",
favicon=favicon,
updates=scheduled_events,
image=image,
title=title,
location=location,
local_7day_infections=local_7day_infections,
nation_location=nation_location,
national_7day_infections=national_7day_infections,
hospital_cases=hospital_cases,
deaths_total=deaths_total,
news_articles=news_articles[:4],
)
@flask_app.route("/index")
def index():
# Handle Inputs
log.info("Requested /index")
# Handle Remove Scheduled Event Request
if "update_item" in request.args:
title = request.args.get("update_item")
log.info("Requested removal of event %s", title)
for event in scheduled_events:
if event["title"] == title:
remove_event(title)
scheduler.cancel(event["sched_event"])
# Handle Remove News Article Request
if "notif" in request.args:
title = request.args.get("notif")
log.info("Requested removal of news article %s", title)
remove_article(title)
# Handle Request to Schedule New Event
if "update" in request.args:
label = request.args.get("two")
log.info("Request to schedule new event %s", label)
# Change request arguments to booleans
repeat = "repeat" in request.args
data = "covid-data" in request.args
news = "news" in request.args
supplied_time = request.args.get("update")
# Make sure an event with the same title does not exist
if any(event["title"] == label for event in scheduled_events):
log.warning(
"An event with the name %s already exists! Ignoring!", label
)
# Make sure either data or news is being updated
elif data or news:
try:
# Converts time string time into datetime
time_offset = datetime(1900, 1, 1, 0, 0)
time = datetime.strptime(supplied_time, "%H:%M") - time_offset
schedule_event(time, label, repeat, data, news)
except ValueError:
log.error(
"Supplied time %s does not match the format %%H:%%M",
supplied_time,
)
else:
log.warning(
"New event %s either already exists or does not request a"
" news or data update",
label,
)
# Redirect user back to root URL to stop form being submitted again on a page reload
log.info("Redirecting user to /")
return redirect("/", code=302)
return flask_app
if __name__ == "__main__":
log.info("covid_dashboard running as main")
app = create_app()
app.run()
```
#### File: covid_dashboard/tests/test_app.py
```python
from app import create_app
from utils import get_setting
import pytest
@pytest.fixture
def client():
app = create_app(testing=True)
with app.test_client() as client:
yield client
@pytest.mark.parametrize("url", ["/", "/index"])
def test_get_url(client, url):
response = client.get(url)
assert response.status_code in [200, 302]
remove_nonexisting_event = {
"update_item": "TRY TO REMOVE AN ARTICLE THAT DOES NOT EXIST"
}
remove_nonexisting_news = {"notif": "TRY TO REMOVE AN ARTICLE THAT DOES NOT EXIST"}
schedule_update_with_no_label = {
"update": "12:30",
"covid-data": "covid-data",
}
schedule_update_with_no_time = {
"update": "",
"two": "No Time",
"covid-data": "covid-data",
}
schedule_update_with_invalid_time = {
"update": "25:72",
"two": "Invalid Time",
"covid-data": "covid-data",
}
schedule_update_with_same_name = {
"update": "12:30",
"two": "Same Name",
"covid-data": "covid-data",
}
remove_update_with_same_name = {"update_item": "Same Name"}
schedule_update_with_no_covid_or_news = {"update": "12:30", "two": "Label"}
requests = [
remove_nonexisting_event,
remove_nonexisting_news,
schedule_update_with_no_label,
schedule_update_with_no_time,
schedule_update_with_invalid_time,
schedule_update_with_no_covid_or_news,
schedule_update_with_same_name,
schedule_update_with_same_name,
remove_update_with_same_name,
remove_update_with_same_name,
]
@pytest.mark.parametrize("requests", requests)
def test_input_sequence(client, requests):
url = "index"
for i, arg in enumerate(requests):
if i == 0:
url += "?"
else:
url += "&"
url += arg + "=" + requests[arg]
response = client.get(url)
assert response.status_code in [200, 302]
# TEST FAVICON, TEST IMAGE
def test_favicon(client):
favicon = get_setting("favicon")
response = client.get(favicon)
assert response.status_code in [200, 302]
def test_image(client):
image = get_setting("image")
response = client.get("/static/images/" + image)
``` |
{
"source": "JonathanHemnes/no-code-faster",
"score": 3
} |
#### File: example/utils/cart.py
```python
def create_price_dict(cart):
price_dict = {
total_price: 0,
total_tax: 0,
stripe_fees: 0,
net: 0,
}
for item in cart.items:
taxes = item.product.price * (cart.tax_rate / 100)
total_price = item.product.price + taxes
# the man always gets his cut
stripe_fees = total_price * (3/100)
# we make only what we keep
net = total_price - taxes - stripe_fees
price_dict['total_tax'] += taxes
price_dict['total_price'] += total_price
price_dict['stripe_fees'] += stripe_fees
price_dict['net'] += net
return price_dict
def set_cart_price(cart):
cart_price_dict = create_price_dict(cart)
cart.price = cart_price_dict['total_price']
cart.save()
``` |
{
"source": "jonathan-hepp/pyle_of_sticks",
"score": 3
} |
#### File: jonathan-hepp/pyle_of_sticks/flask_app.py
```python
from flask import abort, Flask, jsonify, render_template, request, session
from flask.ext.session import Session
from sticks_game import ComputerPlayer, EndOfGame, Game, HumanPlayer, PileOfSticks
app = Flask(__name__)
app.secret_key = "CHANGEIT"
app.config["SESSION_TYPE"] = "filesystem"
Session(app)
# The pool of values shared by all computer players.
# If the app would be accessed by a lot of people at the same time
# some safe checking could be done here, just in case, but since
# that's not the case and we only read and append, it's not really an issue.
shared_pool = [[1,2,3] for x in range(21)]
def new_game_builder(human_player = None, computer_player = None):
if computer_player is None:
computer_player = ComputerPlayer()
computer_player.pool = shared_pool
if human_player is None:
human_player = HumanPlayer()
return Game(PileOfSticks(), computer_player, [human_player])
@app.route('/')
def index():
if "game" not in session:
session["game"] = new_game_builder()
return render_template("index.html", initial_sticks=session["game"].pile.count())
# This method gets the number played by the user and then generates the number
# that the computer will play, returning the value to the view.
# If the game ends, we update the current game with a new one and return appropriate
# data to the view.
@app.route("/play", methods=["POST"])
def play():
comp_number = 0
try:
if "game" in session:
game = session["game"]
number = int(request.form["number"])
game.play(number)
comp_number = game.computer_player.get_sticks_number(game.pile.count())
game.play(comp_number)
return jsonify({"number": comp_number})
abort(401)
except EndOfGame as error:
session["game"].computer_player.end_game(error.loser is session["game"].human_players[0])
session["game"] = new_game_builder(session["game"].human_players[0], session["game"].computer_player)
return jsonify({"endOfGame": True, "loser": error.loser.name, "number": comp_number})
@app.errorhandler(401)
def session_expired(error):
return "The session has expired.", 401
if __name__ == "__main__":
app.run("0.0.0.0", debug=True)
``` |
{
"source": "jonathan-hepp/Sequence-Solver",
"score": 4
} |
#### File: Sequence-Solver/sequence_solver/strategies.py
```python
import string
class BaseStrategy:
def solve(self, sequence, debug):
raise NotImplemented()
class DiffTableStrategy(BaseStrategy):
def solve(self, sequence, debug=False):
levels = self.__calculate_difference({0 : sequence}, sequence)
if debug:
print(levels)
return self.__calculate_next_item(levels)
def __calculate_difference(self, levels, sequence):
new_sequence = []
for i in range(1, len(sequence)):
new_sequence.append(sequence[i] - sequence[i-1])
levels[len(levels)] = new_sequence
if len(new_sequence) == 1:
raise Exception("Unsolvable sequence")
return levels if self.__all_elements_equal(new_sequence) else self.__calculate_difference(levels, new_sequence)
def __all_elements_equal(self, sequence):
return len(set(sequence)) <= 1
def __calculate_next_item(self, levels):
next_item = 0
for sequence in levels.values():
next_item += sequence[-1]
return next_item
class AlphabetSubstitutionStrategy(BaseStrategy):
__alphabet = {l : string.ascii_lowercase.index(l)+1 for l in string.ascii_lowercase}
__diffTableStrategy = DiffTableStrategy()
def solve(self, sequence, debug=False):
new_sequence = [string.ascii_lowercase.index(l.lower())+1 for l in sequence]
return string.ascii_lowercase[(self.__diffTableStrategy.solve(new_sequence, debug)-1) % len(string.ascii_lowercase)]
class SpecialCasesStrategy(BaseStrategy):
def solve(self, sequence, debug=False):
if self.__is_fibonacci(sequence):
return sequence[-2] + sequence[-1]
raise Exception("Unsolvable sequence")
def __is_fibonacci(self, sequence):
return all(map(lambda x: x == 0 or abs(round(x*1.618) - x*1.618) < 1.0 / x, sequence)) and self.__is_fibonacci_ordered(sequence)
def __is_fibonacci_ordered(self, sequence):
for i in range(1, len(sequence)-1):
if sequence[i+1] != sequence[i-1] + sequence[i]:
return False
return True
``` |
{
"source": "JonathanHerrmann/Black-Jack",
"score": 4
} |
#### File: Black-Jack/blackjack/__init__.py
```python
import random
class Card:
def __init__(self, suit, value):
if suit not in ['spade', 'club', 'diamond', 'heart']:
raise ValueError('Unexpected suit. This is a standard Deck')
self.suit = suit
if value not in ['A', 'K', 'Q', 'J'] and value not in list(range(2, 11)):
raise ValueError('Not a valid value')
self.value = value
if value in ['J', 'Q', 'K']:
self.numeric_value = 10
elif value == 'A':
self.numeric_value = 11
else:
self.numeric_value = self.value
def __add__(self, other):
return self.numeric_value + other.numeric_value
def __gt__(self, other):
return self.numeric_value > other.numeric_value
def __eq__(self, other):
return self.numeric_value == other.numeric_value
def __repr__(self):
return "<{} of {}s>".format(self.value, self.suit)
def __str__(self):
return repr(self)
class Deck:
"""A Standard deck class with 52 cards, 13 cards in each suit"""
def __init__(self):
self.cards = [] #same as list()
for suit in ['spade', 'club', 'diamond', 'heart']:
for value in list(range(2, 11)) + ['J', 'Q', 'K', 'A']:
self.cards.append(Card(suit, value))
def __len__(self):
return len(self.cards)
def __iter__(self):
return self
def __next__(self):
try:
card = self.cards.pop(0)
return card
except IndexError:
raise StopIteration("No more cards!")
def shuffle(self):
random.shuffle(self.cards)
def deal_hand(self):
return self._deal(), self._deal()
def deal_card(self):
return self._deal()
def _deal(self):
"""
:return: a card from our Deck
"""
return next(self)
``` |
{
"source": "jonathanhhb/sem_ver_demo",
"score": 2
} |
#### File: module_b/module_b/thing.py
```python
def foo():
print( "Module B Third Implementation. Patched another implementation bug." )
return
def foo2():
print( "This is a new function. This means we need a minor version bump." )
``` |
{
"source": "jonathanhild/recalls-dashboard",
"score": 2
} |
#### File: recalls-dashboard/data_scraper/mappings.py
```python
from sqlalchemy import Column, Integer, String, Date, ForeignKey
from sqlalchemy.orm import relationship
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class OpenFDARecall(Base):
__tablename__ = 'recall'
def __init__(self, data):
if data:
for key, val in data.items():
setattr(self, key, val)
id = Column(Integer, primary_key=True)
recall_number = Column(String)
report_date = Column(Date)
recall_initiation_date = Column(Date)
center_classification_date = Column(Date)
termination_date = Column(Date)
classification = Column(String)
voluntary_mandated = Column(String)
initial_firm_notification = Column(String)
status = Column(String)
# openfda = Column(String)
event_id = Column(Integer)
recalling_firm = Column(String)
address_1 = Column(String)
address_2 = Column(String)
city = Column(String)
state = Column(String)
postal_code = Column(String)
country = Column(String)
product_type = Column(String)
product_description = Column(String)
product_quantity = Column(String)
code_info = Column(String)
more_code_info = Column(String)
reason_for_recall = Column(String)
distribution_pattern = Column(String)
def __iter__(self):
return iter([self.id,
self.recall_number,
self.report_date,
self.recall_initiation_date,
self.center_classification_date,
self.termination_date,
self.classification,
self.voluntary_mandated,
self.initial_firm_notification,
self.status,
# self.openfda,
self.event_id,
self.recalling_firm,
self.address_1,
self.address_2,
self.city,
self.state,
self.postal_code,
self.country,
self.product_type,
self.product_description,
self.product_quantity,
self.code_info,
self.more_code_info,
self.reason_for_recall,
self.distribution_pattern])
class AdverseEvent(Base):
__tablename__ = 'adverse_event'
def __init__(self, data):
if data:
for key, val in data.items():
setattr(self, key, val)
id = Column(Integer, primary_key=True)
report_number = Column(String)
outcomes = Column(String)
date_created = Column(Date)
reactions = relationship('AdverseEventReaction')
products = relationship('AdverseEventProduct')
consumer_age = Column(String)
consumer_age_unit = Column(String)
consumer_gender = Column(String)
class AdverseEventReaction(Base):
__tablename__ = 'adverse_event_reaction'
id = Column(Integer, primary_key=True)
adverse_event_id = Column(Integer, ForeignKey('adverse_event.id'))
def __iter__(self):
return ([])
class AdverseEventProduct(Base):
__tablename__ = 'adverse_event_product'
id = Column(Integer, primary_key=True)
adverse_event_id = Column(Integer, ForeignKey('adverse_event.id'))
def __iter__(self):
return ([])
```
#### File: jonathanhild/recalls-dashboard/scraper.py
```python
import click
import tqdm
from data_scraper import factory
from data_scraper import database as db
from data_scraper import openfda
db_options = click.Choice(['pgsql', 'sqlite'])
source_options = click.Choice(
['recalls', 'adverseevents', 'inspectionclassifications', 'inspectioncitations', 'importrefusals']
)
@click.group()
def cli():
pass
@cli.command(help='Initialize a PostgreSQL or SQLite database.')
@click.option('--database', '-db', type=db_options)
def init(database):
session = None
if database == 'pgsql':
db._pgsql_env_file()
username, password = db._pgsql_get_vars()
pgsql_conn = db._pgsql_conn(username, password)
session = db.create_session(pgsql_conn, create_all=True)
click.echo('PostgreSQL database successfully created.')
if database == 'sqlite':
sqlite_conn = db._sqlite_conn()
session = db.create_session(sqlite_conn)
return session
@cli.command(help='Retrieve data from API sources.')
@click.option('--source', '-s', type=source_options, multiple=True)
@click.option('--database', '-db', type=db_options)
@click.option('--filepath', '-f', type=str)
def scrape(source, database, filepath):
# Create a new session to the chosen database.
session = db.create_session(database=database, path=filepath)
if 'recalls' in source:
recall = factory.create('OPENFDA_RECALL')
recall.get_metadata()
recall.get_events()
recall.to_db(session)
@cli.command(help='Run machine learning models.')
def model():
pass
@cli.command(help='Start the recalls dashboard.')
def start():
pass
if __name__ == '__main__':
cli()
``` |
{
"source": "jonathanhines/cucSpiritVisuals",
"score": 3
} |
#### File: cucSpiritVisuals/src/summary_normalized.py
```python
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from numpy.polynomial.polynomial import polyfit
import scipy.stats
def plot(year):
filePath = "./data/CUC" + year + ".csv"
chart_title = "CUC" + year + " Average Spirit Score vs Normalized Rank"
chart_file_name = "./results/CUC" + year + "_SOTG_vs_rank_normalized.png"
df = pd.read_csv(filePath)
colors = plt.get_cmap("tab20").colors
# Table of results
divisions = df["Division"].unique()
divisions.sort()
global_spirit_mean = df["Spirit"].mean()
x_limits = [-2, 102]
fig = plt.figure(figsize=(12,8), dpi=300)
ax = plt.gca()
plt.plot(
x_limits,
[global_spirit_mean, global_spirit_mean],
linestyle='--',
alpha=0.4,
color="#468c3a"
)
score_max = df["Score"].max()
x = 100 * (df["Score"].values - 1) / (score_max - 1)
y = df["Spirit"].values
m, b, r_value, p_value, std_err = scipy.stats.linregress(x, y)
plt.plot(
x_limits,
b + m * np.asarray(x_limits),
linestyle='--',
color="#000000",
)
ax.annotate(
'slope = ' + "{:.4f}".format(m) + "\n" + r'$\sigma_{est}$' + " = " + "{:.4f}".format(std_err) + "\n" + r'$r$' + " = " + "{:.4f}".format(r_value),
xy=(101, 101 * m + b),
xytext=(95, 85 * m + b + 1 ), textcoords='data',
arrowprops=dict(arrowstyle='->', facecolor='black'),
horizontalalignment='right', verticalalignment='top',
)
for i, division in enumerate(divisions):
df_div = df[df["Division"] == division]
score_max = df_div["Score"].max()
x = 100 * (df_div["Score"].values - 1) / (score_max - 1)
y = df_div["Spirit"].values
plt.plot(
x,
y,
linestyle="none",
# linewidth=genderLineWidths[i],
color=colors[i],
label=division,
marker=".",
)
m, b, r_value, p_value, std_err = scipy.stats.linregress(x, y)
plt.plot(
x,
b + m * x,
linestyle='-',
color=colors[i],
)
plt.ylabel("Average CUC" + year + " Spirit Score" + "\n" + r'$\mu$' + " = " + "{:.2f}".format(global_spirit_mean))
plt.xlabel('Normalized Rank\n(% teams Ranked Higher)')
plt.xticks(np.arange(0, 120, 20))
plt.xlim(x_limits)
plt.legend(loc='lower right', ncol=4,)
plt.gca().set_axisbelow(True)
plt.grid(color='#EEEEEE', linestyle='-', linewidth=1)
plt.title(chart_title)
plt.savefig(chart_file_name)
plt.close()
print("Saved plot \"" + chart_title + "\" to file \"" + chart_file_name + "\"")
``` |
{
"source": "JonathanHolvey/tastopo",
"score": 3
} |
#### File: tastopo/dimensions/paper.py
```python
import math
import re
class Paper:
"""A piece of paper"""
def __init__(self, spec):
if not re.match(r'^[aA]\d+$', spec):
raise ValueError(f"'{spec}' is not a valid ISO 216 A-series paper size")
self.spec = spec
self.series = spec[0]
self.size = int(spec[1:])
def dimensions(self):
"""Get the dimensions of an ISO 216 A-series paper size"""
size = 0
area = 1e6 # A0 area in square mm
while size < self.size:
area /= 2
size += 1
width = math.sqrt(area / math.sqrt(2))
height = width * math.sqrt(2)
rounder = round if size == 0 else math.floor
return rounder(width), rounder(height)
```
#### File: tastopo/dimensions/tilegrid.py
```python
import math
class TileGrid:
"""Calculate the dimensions of a grid of map tiles"""
def __init__(self, layer, level, centre, size):
self.layer = layer
self.level = level
self.centre = centre
self.size = size
def tiles(self):
"""Get a list of tile coordinates to cover a real-world map area"""
start, shape = self.grid()
return [(start[0] + col, start[1] + row)
for row in range(shape[1], 0, -1) for col in range(shape[0])]
def grid(self):
"""Get the start tile and shape of a grid of tiles"""
x1, y1 = self.bbox()[:2]
overflow = self.overflow()
start = math.floor(self.tileunits(x1)), math.floor(self.tileunits(y1))
shape = (
round(self.tileunits(self.size[0]) + sum(overflow[0])),
round(self.tileunits(self.size[1]) + sum(overflow[1])),
)
return start, shape
def bbox(self):
"""Get the coordinates of the corners bounding the map area"""
x1 = self.centre[0] - self.layer.origin[0] - self.size[0] / 2
x2 = self.centre[0] - self.layer.origin[0] + self.size[0] / 2
y1 = self.centre[1] - self.layer.origin[1] - self.size[1] / 2
y2 = self.centre[1] - self.layer.origin[1] + self.size[1] / 2
return x1, y1, x2, y2
def tileunits(self, size):
"""Convert a real-world distance in metres to a number of tile widths"""
resolution = self.layer.resolution(self.level)
return size / (resolution * self.layer.tilesize)
def pixelsize(self):
"""Get the grid dimensions in pixels"""
resolution = self.layer.resolution(self.level)
return [round(s / resolution) for s in self.size]
def overflow(self):
"""Get the proportion of a tile that the grid extends beyond the map area by on each side"""
x1, y1, x2, y2 = self.bbox()
left = self.tileunits(x1) % 1
bottom = self.tileunits(y1) % 1
top = 1 - self.tileunits(y2) % 1
right = 1 - self.tileunits(x2) % 1
return (left, right), (top, bottom)
def origin(self):
"""Get the position of the first tile in pixels"""
overflow = self.overflow()
left = -1 * round(overflow[0][0] * self.layer.tilesize)
top = -1 * round(overflow[1][0] * self.layer.tilesize)
return left, top
```
#### File: src/tastopo/layout.py
```python
from importlib import resources
from base64 import b64encode
from lxml import etree
class SVG:
"""An XML wrapper for manipulating SVG documents"""
NAMESPACES = {
'svg': 'http://www.w3.org/2000/svg',
'xlink': 'http://www.w3.org/1999/xlink',
}
def __init__(self, filepath, aliases):
self.document = etree.parse(str(filepath))
self.elements = self._alias(aliases)
def _alias(self, paths):
"""Build a dictionary of element aliases"""
elements = {}
for key, path in paths.items():
try:
elements[key] = self.document.xpath(path, namespaces=self.NAMESPACES)[0]
except IndexError:
pass
return elements
def ns(self, fullname):
"""Convert a SVG namespace prefix into a full namespace URI"""
[ns, name] = fullname.split(':')
namespace = self.NAMESPACES[ns]
return f'{{{namespace}}}{name}'
def get(self, key):
"""Get a previously selected element by key"""
return self.elements[key]
def position(self, key, x, y, width=None, height=None):
"""Set the size and position of a SVG node"""
element = self.elements[key]
if element.tag == self.ns('svg:g'):
self._position_transform(element, x, y)
else:
self._position_absolute(element, x, y, width, height)
def _position_absolute(self, element, x, y, width, height):
"""Set the positional attributes on an element"""
element.attrib.update({
'x': str(x),
'y': str(y),
'width': str(width),
'height': str(height),
})
def _position_transform(self, element, x, y):
"""Set the transform attribute on an element"""
element.attrib['transform'] = f'translate({x} {y})'
def line(self, parent_key, start, end):
"""Add a line element with a start and end point"""
element = etree.SubElement(self.get(parent_key), 'line')
element.attrib.update({
'x1': str(start[0]),
'y1': str(start[1]),
'x2': str(end[0]),
'y2': str(end[1]),
})
class Layout:
MAX_GRID_SPACING = 50
INFO_ORDER = ['scale', 'grid', 'datum', 'centre', 'size']
GRID_SIZES = [200, 100, 50, 25, 10, 5, 4, 3, 2, 1, 0.5, 0.25, 0.1, 0.05, 0.025, 0.01]
"""A map sheet layout"""
def __init__(self, sheet, location, image, title=None):
self.sheet = sheet
self.location = location
self.image = image
self.title = title or location.description.title()
self.grid = False
self.details = {
'scale': f'1:{image.scale}',
'datum': image.datum,
'centre': location.uri,
'size': sheet.spec.upper(),
}
def compose(self):
"""Set the layout's variable elements"""
with resources.path(__package__, 'template.svg') as template_path:
svg = SVG(template_path, {
'image': '//svg:image[@id="map-data"]',
'title': '//svg:text[@id="map-title"]',
'border': '//svg:rect[@id="map-border"]',
'clip': '//svg:clipPath[@id="map-clip"]/svg:rect',
'grid': '//svg:g[@id="map-grid"]',
'logos': '//svg:g[@id="footer-logos"]',
'text': '//svg:g[@id="footer-text"]',
'info': '//svg:text[@id="map-info"]',
})
self._size(svg)
mapdata = 'data:image/png;base64,' + b64encode(self.image.mapdata).decode('utf-8')
svg.get('image').attrib[svg.ns('xlink:href')] = mapdata
svg.get('title').text = self.title
if self.grid:
self._drawgrid(svg)
svg.get('info').text = self.format_info()
return svg.document.getroot()
def _size(self, svg):
"""Prepare the template for the sheet size in use"""
root = svg.document.getroot()
width, height = self.sheet.dimensions()
viewport = self.sheet.viewport()
margin = self.sheet.MARGIN
footer = self.sheet.FOOTER_HEIGHT + margin
root.attrib['width'] = f'{width}mm'
root.attrib['height'] = f'{height}mm'
root.attrib['viewBox'] = f'0 0 {width} {height}'
svg.position('image', *self.sheet.viewport(True))
svg.position('border', *viewport)
svg.position('clip', *viewport)
svg.position('grid', *viewport)
svg.position('logos', width - margin - 68, height - footer + 2.5)
svg.position('text', margin + 0.2, height - footer + 3.5)
def _drawgrid(self, svg):
"""Add a grid to the map template"""
width, height = self.sheet.viewport()[2:]
grid_size, km_size = self._gridsize(max(width, height), self.image.scale)
spacing = grid_size * km_size
for x in range(1, int(width / spacing) + 1):
svg.line('grid', (x * spacing, 0), (x * spacing, height))
for y in range(1, int(height / spacing) + 1):
svg.line('grid', (0, height - y * spacing), (width, height - y * spacing))
self.details['grid'] = (f'{grid_size}\u2009km' if grid_size >= 1
else f'{grid_size * 1000:.0f}\u2009m')
def _gridsize(self, size, scale):
"""Select the best grid size for the map scale"""
km = 1e6 / scale
for grid in self.GRID_SIZES:
if grid <= self.MAX_GRID_SPACING / km:
break
return grid, km
def format_info(self):
"""Format map info details"""
items = [f'{k.upper()} {self.details[k]}' for k in self.INFO_ORDER if k in self.details]
if 'version' in self.details:
items.append(f'TasTopo {self.details["version"]}')
return ' '.join(items)
``` |
{
"source": "jonathan-hosmer/datadogpy",
"score": 2
} |
#### File: datadog/api/roles.py
```python
from datadog.api.resources import ActionAPIResource, CreateableAPIResource, CustomUpdatableAPIResource,\
DeletableAPIResource, GetableAPIResource, ListableAPIResource
from datadog.api.api_client import APIClient
class Roles(ActionAPIResource, CreateableAPIResource, CustomUpdatableAPIResource, GetableAPIResource,
ListableAPIResource, DeletableAPIResource):
"""
A wrapper around Tag HTTP API.
"""
_resource_name = 'roles'
_api_version = 'v2'
@classmethod
def update(cls, id, **body):
"""
Update a role's attributes
:param id: uuid of the role
:param body: dict with type of the input, role `id`, and modified attributes
:returns: Dictionary representing the API's JSON response
"""
params = {}
return super(Roles, cls).update("PATCH", id, params=params, **body)
@classmethod
def assign_permission(cls, id, **body):
"""
Assign permission to a role
:param id: uuid of the role to assign permission to
:param body: dict with "type": "permissions" and uuid of permission to assign
:returns: Dictionary representing the API's JSON response
"""
params = {}
path = '{resource_name}/{resource_id}/permissions'.format(
resource_name=cls._resource_name,
resource_id=id
)
api_version = getattr(cls, '_api_version', None)
return APIClient.submit("POST", path, api_version, body, **params)
@classmethod
def unassign_permission(cls, id, **body):
"""
Unassign permission from a role
:param id: uuid of the role to unassign permission from
:param body: dict with "type": "permissions" and uuid of permission to unassign
:returns: Dictionary representing the API's JSON response
"""
params = {}
path = '{resource_name}/{resource_id}/permissions'.format(
resource_name=cls._resource_name,
resource_id=id
)
api_version = getattr(cls, '_api_version', None)
return APIClient.submit("DELETE", path, api_version, body, **params)
```
#### File: datadog/dogshell/service_check.py
```python
import json
# 3p
from datadog.util.format import pretty_json
# datadog
from datadog import api
from datadog.dogshell.common import report_errors, report_warnings
class ServiceCheckClient(object):
@classmethod
def setup_parser(cls, subparsers):
parser = subparsers.add_parser('service_check', help="Perform service checks")
verb_parsers = parser.add_subparsers(title='Verbs', dest='verb')
verb_parsers.required = True
check_parser = verb_parsers.add_parser('check', help="text for the message")
check_parser.add_argument('check', help="text for the message")
check_parser.add_argument('host_name', help="name of the host submitting the check")
check_parser.add_argument('status', help="integer for the status of the check."
" i.e: '0': OK, '1': WARNING, '2': CRITICAL, '3': UNKNOWN")
check_parser.add_argument('--timestamp', help="POSIX timestamp of the event", default=None)
check_parser.add_argument('--message', help="description of why this status occurred",
default=None)
check_parser.add_argument('--tags', help="comma separated list of tags", default=None)
check_parser.set_defaults(func=cls._check)
@classmethod
def _check(cls, args):
api._timeout = args.timeout
format = args.format
if args.tags:
tags = sorted(set([t.strip() for t in args.tags.split(',') if t.strip()]))
else:
tags = None
res = api.ServiceCheck.check(
check=args.check, host_name=args.host_name, status=int(args.status),
timestamp=args.timestamp, message=args.message, tags=tags)
report_warnings(res)
report_errors(res)
if format == 'pretty':
print(pretty_json(res))
else:
print(json.dumps(res))
```
#### File: unit/dogstatsd/fixtures.py
```python
import os
def load_fixtures(name):
"""
Load fixtures.
Args:
name (string): name of the fixture
"""
with open(os.path.join(os.path.dirname(__file__), 'fixtures', '{}'.format(name))) as fixture:
return fixture.read()
``` |
{
"source": "jonathanhperry/bitmovin-python",
"score": 2
} |
#### File: services/encodings/encoding_start_tests.py
```python
import unittest
import json
from bitmovin import Bitmovin, StartEncodingRequest, Scheduling, Tweaks, AudioVideoSyncMode, PerTitle, \
H264PerTitleConfiguration, AutoRepresentation
from tests.bitmovin import BitmovinTestCase
from bitmovin.utils import BitmovinJSONEncoder
class EncodingStartTests(BitmovinTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
@classmethod
def tearDownClass(cls):
super().tearDownClass()
def setUp(self):
super().setUp()
self.bitmovin = Bitmovin(self.api_key)
self.assertIsNotNone(self.bitmovin)
self.assertTrue(isinstance(self.bitmovin, Bitmovin))
def tearDown(self):
super().tearDown()
def test_encoding_start_request_serialization_with_prewarmed_instance_ids(self):
prewarmed_instance_pool_ids = ['4a67260e-2fd3-4e9e-9829-651280ea8f06', '4b67260e-2fd3-4e9e-9829-651280ea8f07']
scheduling = Scheduling(prewarmed_instance_pool_ids=prewarmed_instance_pool_ids)
scheduling_serialized = json.dumps(obj=scheduling, cls=BitmovinJSONEncoder)
self.assertEqual(first=scheduling_serialized, second='{"prewarmedInstancePoolIds": ["4a67260e-2fd3-4e9e-9829-651280ea8f06", "4b67260e-2fd3-4e9e-9829-651280ea8f07"]}')
start_encoding_request = StartEncodingRequest(scheduling=scheduling)
start_encoding_request_serialized = json.dumps(obj=start_encoding_request, cls=BitmovinJSONEncoder)
self.assertEqual(first=start_encoding_request_serialized, second='{"scheduling": {"prewarmedInstancePoolIds": ["4a67260e-2fd3-4e9e-9829-651280ea8f06", "4b67260e-2fd3-4e9e-9829-651280ea8f07"]}}')
def test_encoding_start_request_serialization_with_tweaks(self):
tweaks = Tweaks(audio_video_sync_mode=AudioVideoSyncMode.RESYNC_AT_START)
tweaks_serialized = json.dumps(obj=tweaks, cls=BitmovinJSONEncoder)
self.assertEqual(first=tweaks_serialized, second='{"audioVideoSyncMode": "RESYNC_AT_START"}')
start_encoding_request = StartEncodingRequest(tweaks=tweaks)
start_encoding_request_serialized = json.dumps(obj=start_encoding_request, cls=BitmovinJSONEncoder)
self.assertEqual(first=start_encoding_request_serialized, second='{"tweaks": {"audioVideoSyncMode": "RESYNC_AT_START"}}')
def test_encoding_start_request_with_per_title_configuration(self):
auto_representation = AutoRepresentation(adopt_configuration_threshold=1.5)
h264_configuration = H264PerTitleConfiguration(auto_representations=auto_representation,
min_bitrate_step_size=15000, max_bitrate_step_size=20000,
min_bitrate=500000, max_bitrate=8000000, target_quality_crf=0.5)
per_title = PerTitle(h264_configuration=h264_configuration)
per_title_serialized = json.dumps(per_title, cls=BitmovinJSONEncoder)
self.assertEqual(first=per_title_serialized, second='{"h264Configuration": {"minBitrate": 500000, '
'"maxBitrate": 8000000, "minBitrateStepSize": 15000, '
'"maxBitrateStepSize": 20000, "targetQualityCrf": 0.5, '
'"autoRepresentations": {"adoptConfigurationThreshold": '
'1.5}}}')
start_encoding_request = StartEncodingRequest(per_title=per_title)
start_encoding_request_serialized = json.dumps(obj=start_encoding_request, cls=BitmovinJSONEncoder)
self.assertEqual(first=start_encoding_request_serialized, second='{"perTitle": {"h264Configuration": {'
'"minBitrate": 500000, "maxBitrate": '
'8000000, "minBitrateStepSize": 15000, '
'"maxBitrateStepSize": 20000, '
'"targetQualityCrf": 0.5, '
'"autoRepresentations": {'
'"adoptConfigurationThreshold": 1.5}}}}')
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jonathan-hsu123/AdaSpeech2",
"score": 2
} |
#### File: jonathan-hsu123/AdaSpeech2/wav2mel.py
```python
from utils.stft import TacotronSTFT
from utils.util import read_wav_np
from dataset.audio_processing import pitch
import os
import glob
import tqdm
import torch
import argparse
from utils.stft import TacotronSTFT
from utils.util import read_wav_np
from dataset.audio_processing import pitch
from utils.hparams import HParam
import torch.nn.functional as F
from utils.util import str_to_int_list
import numpy as np
def preprocess(data_path, hp, file):
stft = TacotronSTFT(
filter_length=hp.audio.n_fft,
hop_length=hp.audio.hop_length,
win_length=hp.audio.win_length,
n_mel_channels=hp.audio.n_mels,
sampling_rate=hp.audio.sample_rate,
mel_fmin=hp.audio.fmin,
mel_fmax=hp.audio.fmax,
)
sr, wav = read_wav_np('./ref_wav/Eleanor.wav', hp.audio.sample_rate)
p = pitch(wav, hp) # [T, ] T = Number of frames
wav = torch.from_numpy(wav).unsqueeze(0)
mel, mag = stft.mel_spectrogram(wav) # mel [1, 80, T] mag [1, num_mag, T]
mel = mel.squeeze(0) # [num_mel, T]
mag = mag.squeeze(0) # [num_mag, T]
e = torch.norm(mag, dim=0) # [T, ]
p = p[: mel.shape[1]]
np.save("./ref_wav/Eleanor.npy", mel.numpy(), allow_pickle=True)
def main(args, hp):
preprocess(args.config, hp, hp.data.train_filelist)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"-c", "--config", type=str, required=True, help="yaml file for configuration"
)
args = parser.parse_args()
hp = HParam(args.config)
main(args, hp)
``` |
{
"source": "JonathanHunz/BLL_MNIST",
"score": 3
} |
#### File: data/utils/BinaryReader.py
```python
import gzip
from .DataReader import DataReader
class BinaryReader(DataReader):
def __init__(self, path, element_count, element_size, offset=0):
super().__init__()
self.element_count = element_count
self.element_size = element_size
self.index = 0
# Open file as binary stream
stream = gzip.open(path)
# Skip offset bytes
stream.read(offset)
self.stream = stream
def next(self):
# Read and return next element
if self.index < self.element_count:
self.index += 1
return self.stream.read(self.element_size)
else:
raise StopIteration
```
#### File: data/utils/Extractor.py
```python
import os
import logging
import tensorflow as tf
import numpy as np
from .BinaryReader import BinaryReader
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
class Extractor:
"""Extracts data from data-sources and writes them to a .tfrecords file
Attributes:
dataset (dictionary): Dataset specification in form of a dictionary
input_dir (string): Path of the directory in which the input files can be found
output_dir (string): Path of the directory in which the .tfrecords file will be generated in
"""
def __init__(self, dataset, input_dir, output_dir):
self.input_dir = input_dir
self.output_dir = output_dir
self.dataset = dataset
def extract(self):
"""Generates features from a given dataset specification and input files and writen the to a .tfrecords file"""
# Create output directory if not existent
if not os.path.isdir(self.output_dir):
os.makedirs(self.output_dir)
logging.debug("Created data directory (%s)", self.output_dir)
output_filename = self.dataset["name"] + ".tfrecords"
output_path = os.path.join(self.output_dir, output_filename)
logging.info("Extracting data")
# Create file readers for all features
feature_readers = {}
for feature in self.dataset["features"]:
example_size = np.prod(feature["shape"])
source_path = os.path.join(self.input_dir, feature["source"])
reader = BinaryReader(source_path, self.dataset["element_count"], example_size, feature["offset"])
feature_readers[feature["name"]] = reader
# Initialize TFRecord writer
writer = tf.python_io.TFRecordWriter(output_path)
for _ in range(self.dataset["element_count"]):
feature_list = {}
for feature in self.dataset["features"]:
reader = feature_readers[feature["name"]]
# Read next element
raw_data = np.frombuffer(reader.next(), dtype=np.uint8)
if feature["type"] == "byte":
raw_data = raw_data.astype(np.float32)
raw_data.resize(feature["shape"])
feature_list[feature["name"]] = _bytes_feature(raw_data.tostring())
elif feature["type"] == "int":
raw_data = raw_data.astype(np.int64)
raw_data.resize(feature["shape"])
feature_list[feature["name"]] = _int64_feature(int(raw_data))
# Create example
example = tf.train.Example(features=tf.train.Features(feature=feature_list))
# Write example to TFRecord
writer.write(example.SerializeToString())
writer.close()
``` |
{
"source": "JonathanHuot/cellarpy",
"score": 3
} |
#### File: cellarpy/cellar/menu.py
```python
from bottle import app
from bottle import request
def __convert_menu(route):
entry = {
"name": route["name"],
"url": route["rule"],
}
entry.update(route["config"])
return entry
def read_menu_entry(name):
try:
for route in app().routes:
entry = route.__dict__
if name == entry["name"]:
return __convert_menu(entry)
return None
except:
return None
def read_menu(menutitle):
entries = []
for route in app().routes:
entry = route.__dict__
if "name" not in entry or not entry["name"]:
continue
if "menu" not in entry["config"] or entry["config"]["menu"] != menutitle:
continue
entries.append(__convert_menu(entry))
return entries
def read_breadcrumb():
current = read_current()
if current["name"] == "homepage":
return []
return [read_menu_entry(name="homepage")]
def read_current():
return __convert_menu(request.route.__dict__)
```
#### File: cellarpy/tests/test_settings.py
```python
import unittest
import tempfile
import cellar.settings
from os import path
import os
import json
try:
from tempfile import TemporaryDirectory
except ImportError:
from backports.tempfile import TemporaryDirectory
class test_settings(unittest.TestCase):
def test_load(self):
with tempfile.NamedTemporaryFile(mode='w+') as temp:
foo = {
"x": 12,
"y": [
"12",
"23"
],
"z": "zzz"
}
json.dump(foo, temp)
temp.flush()
bar = cellar.settings.load_settings(
path.basename(temp.name),
dirs=[path.dirname(temp.name)]
)
self.assertEqual(foo, bar, "loaded settings must be equal to dict")
def test_fromenv(self):
with tempfile.NamedTemporaryFile(mode='w+') as temp:
foo = {"x": 12}
json.dump(foo, temp)
temp.flush()
try:
os.environ["CELLAR_SETTINGS"] = temp.name
bar = cellar.settings.load_settings()
self.assertEqual(foo, bar, "loaded settings must be equal to dict")
finally:
del os.environ["CELLAR_SETTINGS"]
def test_readperm(self):
with TemporaryDirectory() as dir1:
with TemporaryDirectory() as dir2:
with open(path.join(dir1, "settings.json"), "w+") as fd:
json.dump(["foo"], fd)
with open(path.join(dir2, "settings.json"), "w+") as fd:
json.dump(["bar"], fd)
os.chmod(path.join(dir1, "settings.json"), 0)
bar = cellar.settings.load_settings(
"settings.json",
dirs=[dir1, dir2]
)
self.assertEqual(["bar"], bar, "loaded settings must be the 1st w/ read access")
def test_storage(self):
pass
``` |
{
"source": "jonathanHuwP/CrystalGrowthTracker",
"score": 2
} |
#### File: cgt/gui/markupwidget.py
```python
import PyQt5.QtWidgets as qw
import PyQt5.QtCore as qc
import PyQt5.QtGui as qg
from cgt.gui.markupview import MarkUpStates
from cgt.gui.resultsstoreproxy import ResultsStoreProxy
from cgt.gui.videobasewidget import PlayStates
from cgt.util.markers import (get_region,
get_frame,
hash_marker)
# import UI
from cgt.gui.Ui_markupwidget import Ui_MarkUpWidget
class MarkUpWidget(qw.QWidget, Ui_MarkUpWidget):
"""
The tool for marking up items identified in the video
"""
def __init__(self, parent, data_source):
"""
the object initalization function
Args:
parent (QObject): the parent QObject for this widget
data_store (CrystalGrowthTrackeMain) the main GUI
"""
super().__init__(parent)
self.setupUi(self)
# pointer to the main gui
self._data_source = data_source
## a proxy for the data store
self._results_proxy = None
## the current frame of the video
self._current_frame = 0
## the current raw pixmap
self._current_pixmap = None
## playing state of the video
self._playing = PlayStates.MANUAL
## are the markers going forward or backward in time
self._entry_forward = True
## has a key-frame been set for the region
self._base_key_frame = None
## pointer for the help dialog
self._help = None
## pointer for the video source
self._video_source = None
self._entryView.set_parent_and_pens(self, self._data_source.get_pens())
self._cloneView.set_parent_and_pens(self, self._data_source.get_pens())
self._cloneView.assign_state(MarkUpStates.CLONE_ITEM)
self.make_connections()
font = qg.QFont( "Monospace", 8, qg.QFont.DemiBold)
self._frameLabel.setFont(font)
self._frameLabel_2.setFont(font)
self._videoNameLabel.setFont(font)
def set_results(self, results_store):
"""
set a new results object
Args:
results_store (VideoAnalysisResultsStore) the store
"""
self._results_proxy = ResultsStoreProxy(results_store,
self._entryView,
self._cloneView)
self.setup_regions_combobox()
def setup_video_widget(self):
"""
connect up the control widgets
"""
self.make_connections()
def make_connections(self):
"""
set up the video control connections
"""
self._entryControls.zoom_value.connect(self.entry_zoom_changed)
self._entryControls.forwards.connect(self.play_video)
self._entryControls.backwards.connect(self.play_reverse_video)
self._entryControls.pause.connect(self.pause_video)
self._entryControls.one_frame_forward.connect(self.step_video)
self._entryControls.one_frame_backward.connect(self.step_reverse_video)
self._entryControls.start_end.connect(self.start_or_end)
self._entryControls.frame_changed.connect(self.display_frame)
self._cloneControls.zoom_value.connect(self.clone_zoom_changed)
self._cloneControls.forwards.connect(self.play_video)
self._cloneControls.backwards.connect(self.play_reverse_video)
self._cloneControls.pause.connect(self.pause_video)
self._cloneControls.one_frame_forward.connect(self.step_video)
self._cloneControls.one_frame_backward.connect(self.step_reverse_video)
self._cloneControls.start_end.connect(self.start_or_end)
self._cloneControls.frame_changed.connect(self.display_frame)
self._cloneControls.disable_all_but_zoom()
def setup_regions_combobox(self):
"""
add list of regions to combobox
"""
if self._results_proxy is None:
return
self._regionsBox.blockSignals(True)
all_regions = self._results_proxy.get_regions()
index = self._regionsBox.currentIndex()
self._regionsBox.clear()
for i in range(len(all_regions)):
self._regionsBox.addItem(f"Region {i}")
if index > -1:
self._regionsBox.setCurrentIndex(index)
self._regionsBox.blockSignals(False)
if self._video_source is not None:
self.region_changed()
def get_results_proxy(self):
"""
getter for results proxy object
Returns:
pointer to the results proxy object
"""
return self._results_proxy
@qc.pyqtSlot()
def region_changed(self):
"""
callback for change of regions combobox
"""
key_frames = self._results_proxy.get_key_frames(self._regionsBox.currentIndex())
if key_frames is not None:
if self._entry_forward:
self._base_key_frame = min(key_frames)
else:
self._base_key_frame = max(key_frames)
self.region_changed_with_key_frames()
else:
self._base_key_frame = None
self.region_changed_no_key_frames()
self.fill_key_frame_combo()
def region_changed_with_key_frames(self):
"""
handel region change if the region posesses key frames
"""
self.time_changed(self._base_key_frame)
self._cloneControls.enable_all()
self._entryControls.freeze()
if self._entry_forward:
self._cloneControls.set_range(self._video_source.get_video_data().get_frame_count(),
self._base_key_frame)
else:
self._cloneControls.set_range(self._base_key_frame, 0)
if self._current_pixmap is not None:
self._results_proxy.clear()
self.display_pixmap()
self._results_proxy.redraw_markers(self._regionsBox.currentIndex())
def region_changed_no_key_frames(self):
"""
handel region change if the region posesses no key frames
"""
if self._current_pixmap is not None:
self._results_proxy.clear()
self.display_pixmap()
self._entryControls.enable_all()
self._entryControls.set_range(self._video_source.get_video_data().get_frame_count(), 0)
self._cloneControls.disable_all_but_zoom()
self._cloneControls.set_range(self._video_source.get_video_data().get_frame_count(), 0)
def display_frame(self, frame):
"""
display a given frame
Args:
frame (int): the time of the frame to display (user FPS)
"""
pixmap = self._video_source.get_pixmap(frame)
self.display_image(pixmap, frame)
def display_image(self, pixmap, frame_number):
"""
callback function to display an image from a source
Args:
pixmap (QPixmap) the pixmap to be displayed
frame_number (int) the frame number of the video
"""
if self._regionsBox.count() < 1:
return
self._current_pixmap = pixmap
self._current_frame = frame_number
self.display_pixmap()
self.set_key_frame_combo()
# display the current time
data = self._video_source.get_video_data()
length = data.get_time_duration_user()
now = data.frame_to_user_time(self._current_frame)
message = f"Time {now:0>5.1f} of {length:0>5.1f} (Frames: {data.get_frame_count()})"
self._frameLabel_2.setText(message)
if self._base_key_frame is None:
self._frameLabel.setText(message)
self._entryControls.set_frame_currently_displayed(self._current_frame)
self._cloneControls.set_frame_currently_displayed(self._current_frame)
delay = self._video_source.get_video_data().get_user_time_step()
qc.QTimer.singleShot(delay, self.next_pixmap)
@qc.pyqtSlot()
def next_pixmap(self):
"""
call back for timer in display, will incrament/decrement frame as needed
"""
if self._playing == PlayStates.PLAY_FORWARD:
self.incrament_frame()
elif self._playing == PlayStates.PLAY_BACKWARD:
self.decrament_frame()
def display_pixmap(self):
"""
display the current pixmap
"""
pixmap = self._current_pixmap
regions = self._results_proxy.get_regions()
if len(regions) > 0:
index = self._regionsBox.currentIndex()
region = regions[index].rect()
pixmap = self._current_pixmap.copy(region.toRect())
if self._base_key_frame is None:
self._entryView.set_region_pixmap(pixmap, self._current_frame, index)
elif self._base_key_frame == self._current_frame:
self._entryView.set_region_pixmap(pixmap, self._base_key_frame, index)
self._cloneView.set_region_pixmap(pixmap, self._current_frame, index)
def redisplay(self):
"""
emit the current frame
"""
self.display_frame(self._current_frame)
def set_key_frame_combo(self):
"""
check if the current frame is a key_frame & if so change combo
"""
if self._base_key_frame is None:
return
key_frames = self._results_proxy.get_key_frames(self._regionsBox.currentIndex())
if key_frames is None:
return
if self._current_frame in key_frames:
index = key_frames.index(self._current_frame)
self._keyFrameBox.blockSignals(True)
self._keyFrameBox.setCurrentIndex(index+1)
self._keyFrameBox.blockSignals(False)
return
if not self._keyFrameBox.currentIndex() == 0:
self._keyFrameBox.blockSignals(True)
self._keyFrameBox.setCurrentIndex(0)
self._keyFrameBox.blockSignals(False)
def setEnabled(self, enabled):
"""
enable/disable widget: on enable the source
is connected; on disable play is paused
"""
if enabled and self._video_source is not None:
super().setEnabled(True)
self.redisplay()
self.region_changed()
elif not enabled:
super().setEnabled(False)
self.play_pause()
def set_video_source(self, video_source):
"""
set the video_source object, set length for controls
Args:
video_source (VideoSource): the source object
"""
self._video_source = video_source
self._cloneControls.set_range(self._video_source.get_video_data().get_frame_count())
self._entryControls.set_range(self._video_source.get_video_data().get_frame_count())
@qc.pyqtSlot()
def play_video(self):
"""
callback for starting the video
"""
self._playing = PlayStates.PLAY_FORWARD
self.block_user_entry()
self.incrament_frame()
@qc.pyqtSlot()
def step_video(self):
"""
callback for stepping the video one frame
"""
if self._playing == PlayStates.MANUAL:
self.incrament_frame()
@qc.pyqtSlot()
def pause_video(self):
"""
callback for calling the video
"""
self._playing = PlayStates.MANUAL
self.unblock_user_entry()
@qc.pyqtSlot()
def step_reverse_video(self):
"""
callback for calling the video
"""
if self._playing == PlayStates.MANUAL:
self.decrament_frame()
@qc.pyqtSlot()
def play_reverse_video(self):
"""
callback for calling the video
"""
self._playing = PlayStates.PLAY_BACKWARD
self.block_user_entry()
self.decrament_frame()
@qc.pyqtSlot(int)
def time_changed(self, frame):
"""
callback for the jump to a new frame
Args:
frame (int) the frame number for the jump
"""
self.display_frame(frame)
@qc.pyqtSlot(bool)
def start_or_end(self, start):
"""
callback for moving the video to the start or end frame
Args:
start (bool) if true first frame else last
"""
if start:
self.display_frame(self._video_source.get_video_data().get_frame_count()-1)
else:
self.display_frame(0)
@qc.pyqtSlot(int)
def jump_to_key_frame(self, index):
"""
jump the clone view to a key-frame
Args:
index (int) the array index of the key frame
"""
frame = self._keyFrameBox.itemData(index)
if frame is not None:
self.time_changed(frame)
@qc.pyqtSlot(float)
def entry_zoom_changed(self, zoom_value):
"""
callback for change of entry controls zoom
"""
self._entryView.set_zoom(zoom_value)
@qc.pyqtSlot(float)
def clone_zoom_changed(self, zoom_value):
"""
callback for change of clone controls zoom
"""
self._cloneView.set_zoom(zoom_value)
@qc.pyqtSlot(qw.QAbstractButton)
def clone_view_set_state(self, button):
"""
callback for set state of right view
Args:
button (QPushButton) the button
"""
if button == self._cloneButton:
self._cloneView.assign_state(MarkUpStates.CLONE_ITEM)
elif button == self._deleteCloneButton:
self._cloneView.assign_state(MarkUpStates.DELETE_ITEM)
@qc.pyqtSlot(int)
def entry_view_set_marker_type(self, index):
"""
callback for set artifact input type on right view
Args:
index (int) index of selected item
"""
if index == 0:
self._entryView.assign_state(MarkUpStates.DRAW_LINES)
elif index == 1:
self._entryView.assign_state(MarkUpStates.DRAW_CROSS)
def incrament_frame(self):
"""
emit a signal for the next frame looping at max
"""
upper_limit = self._video_source.get_video_data().get_frame_count()
lower_limit = 0
if self._base_key_frame is not None:
current_range = self._cloneControls.get_range()
if self._entry_forward:
lower_limit = current_range[0]
else:
upper_limit = current_range[1]
if self._current_frame < upper_limit-1:
self.display_frame(self._current_frame+1)
else:
self.display_frame(lower_limit)
def decrament_frame(self):
"""
emit a signal for the previous frame looping at min
"""
upper_limit = self._video_source.get_video_data().get_frame_count()
lower_limit = 0
if self._base_key_frame is not None:
current_range = self._cloneControls.get_range()
if self._entry_forward:
lower_limit = current_range[0]
else:
upper_limit = current_range[1]
if self._current_frame > lower_limit:
self.display_frame(self._current_frame-1)
else:
self.display_frame(upper_limit-1)
def add_point(self, point):
"""
add point to results asking user if a new key frame is generated
Args:
point (QGraphicsPathItem)
"""
key_frame = get_frame(point)
region = get_region(point)
key_frames = self._results_proxy.get_key_frames(region)
if key_frames is None:
if self.request_start_of_key_frame():
self.start_region_key_frame(key_frame)
self._results_proxy.add_point(point)
else:
self._results_proxy.remove_item_from_views(hash_marker(point))
return
if key_frame in key_frames:
self._results_proxy.add_point(point)
return
if self.request_start_of_key_frame():
self._results_proxy.add_point(point)
self.fill_key_frame_combo(key_frame)
else:
self._results_proxy.remove_item_from_views(hash_marker(point))
def add_line(self, line):
"""
add line to results asking user if a new key frame is generated
Args:
line (QGraphicsLineItem)
"""
key_frame = get_frame(line)
region = get_region(line)
key_frames = self._results_proxy.get_key_frames(region)
if key_frames is None:
if self.request_start_of_key_frame():
self.start_region_key_frame(key_frame)
self._results_proxy.add_line(line)
else:
self._results_proxy.remove_item_from_views(hash_marker(line))
return
if key_frame in key_frames:
self._results_proxy.add_line(line)
return
if self.request_start_of_key_frame():
self._results_proxy.add_line(line)
self.fill_key_frame_combo(key_frame)
else:
self._results_proxy.remove_item_from_views(hash_marker(line))
def request_start_of_key_frame(self):
"""
check if user wants to start a new key frame
Returns:
True if yes else False
"""
reply = qw.QMessageBox.question(self,
self.tr("New Key Frame?"),
self.tr("Do you wish to start a new <b>key-frame</b>?"),
qw.QMessageBox.Yes|qw.QMessageBox.No,
qw.QMessageBox.No)
return reply == qw.QMessageBox.Yes
def start_region_key_frame(self, key_frame):
"""
first key frame added to region
Args:
key_frame (int) the key frame
"""
self._cloneControls.enable_all()
self._entryControls.freeze()
self._base_key_frame = self._current_frame
self._keyFrameBox.blockSignals(True)
self._keyFrameBox.clear()
self._keyFrameBox.addItem(self.tr("None"))
self._keyFrameBox.addItem(f"{key_frame}", key_frame)
self._keyFrameBox.blockSignals(False)
if self._entry_forward:
self._cloneControls.set_range(self._video_source.get_video_data().get_frame_count(), key_frame)
else:
self._cloneControls.set_range(key_frame, 0)
def fill_key_frame_combo(self, current_key_frame=-1):
"""
start a new key frame
Args:
current_key_frame (int) if viewer is in a newly defined key-frame, set this frame
"""
self._keyFrameBox.blockSignals(True)
self._keyFrameBox.clear()
key_frames = self._results_proxy.get_key_frames(self._regionsBox.currentIndex())
self._keyFrameBox.addItem(self.tr("None"))
if key_frames is None:
return
set_index = None
for i, key_frame in enumerate(key_frames):
self._keyFrameBox.addItem(f"{key_frame}", key_frame)
if key_frame == current_key_frame:
set_index = i
if set_index is not None:
self._keyFrameBox.setCurrentIndex(set_index+1)
self._keyFrameBox.blockSignals(False)
def add_marker(self, marker):
"""
add marker to results asking user if a new key frame is generated
Args:
line (QGraphicsItem)
"""
if self._results_proxy.check_if_marker_already_has_key_frame(marker):
qw.QMessageBox.warning(self,
self.tr("Warning no new frame"),
self.tr("The selected marker is already defined in this frame!"))
self._results_proxy.remove_item_from_views(hash_marker(marker))
return
key_frame = get_frame(marker)
region = get_region(marker)
key_frames = self._results_proxy.get_key_frames(region)
if key_frames is None:
self._results_proxy.add_marker(marker)
return
if key_frame in key_frames:
self._results_proxy.add_marker(marker)
return
if self.request_start_of_key_frame():
self._results_proxy.add_marker(marker)
self.fill_key_frame_combo(key_frame)
else:
self._results_proxy.remove_item_from_views(hash_marker(marker))
def block_user_entry(self):
"""
stop user drawing or cloning
"""
self._entryView.assign_state(MarkUpStates.VIEW_ONLY)
self._cloneView.assign_state(MarkUpStates.VIEW_ONLY)
self._regionsBox.setEnabled(False)
def unblock_user_entry(self):
"""
allow user to draw or clone
"""
if self._entryComboBox.currentIndex() == 0:
self._entryView.assign_state(MarkUpStates.DRAW_LINES)
elif self._entryComboBox.currentIndex() == 1:
self._entryView.assign_state(MarkUpStates.DRAW_CROSS)
if self._cloneButtonGroup.checkedButton() == self._cloneButton:
self._cloneView.assign_state(MarkUpStates.CLONE_ITEM)
elif self._cloneButtonGroup.checkedButton() == self._deleteCloneButton:
self._cloneView.assign_state(MarkUpStates.DELETE_ITEM)
self._regionsBox.setEnabled(True)
@qc.pyqtSlot()
def play_pause(self):
"""
pause the playing
"""
self._playing = PlayStates.MANUAL
@qc.pyqtSlot()
def display_help(self):
"""
pop-up help
"""
text = """<ol>
<li>Use the left controls to find a first frame.</li>
<li>Draw lines and select points on the left image.</li>
<li>Use right controls to find next frame.</li>
<li>Select and drag lines and points on the left image.</li>
</ol>
"""
self._help = qw.QTextBrowser()
self._help.setWindowFlags(qc.Qt.Window)
self._help.setDocumentTitle(self.tr("Feature Tracking Help"))
self._help.setText(text)
self._help.show()
def clear(self):
"""
empyt scene graphs and results proxy
"""
self._cloneView.scene().clear()
self._entryView.scene().clear()
self._results_proxy = None
def grab_clone_image(self):
"""
save what part of the clone view is visible to the user (WYSIWYG).
Returns:
QPixmap holding the visible part of the view
"""
return self._cloneView.viewport().grab()
def grab_entry_image(self):
"""
save what part of the entry view is visible to the user (WYSIWYG).
Returns:
QPixmap holding the visible part of the view
"""
return self._entryView.viewport().grab()
def get_clone_image(self):
"""
Save the whole clone view contents.
Returns:
QImage holding image of everything within the scene-graph's bounding rectangle
"""
return self.get_scene_image(self._cloneView)
def get_entry_image(self):
"""
Save the whole entry view contents.
Returns:
QImage holding image of everything within the scene-graph's bounding rectangle
"""
return self.get_scene_image(self._entryView)
def update_data_display(self):
"""
data changed reload regions
"""
self.setup_regions_combobox()
def save_scene(self, file_path):
"""
save the current scene regarless of current view
Args:
file_path (string): the file
"""
self._cloneView.save_scene(file_path)
@staticmethod
def get_scene_image(view):
"""
get an image of a whole scene
Args:
view (QGraphicsView) the view holding the scene
Returns:
QImage holding image of everything within the scene-graph's bounding rectangle
"""
bound_rect = view.scene().itemsBoundingRect()
image = qg.QImage(bound_rect.size().toSize(),
qg.QImage.Format_ARGB32)
top_left = view.mapFromScene(bound_rect.toAlignedRect().topLeft())
bottom_right = view.mapFromScene(bound_rect.toAlignedRect().bottomRight())
image.fill(qc.Qt.white)
painter = qg.QPainter(image)
view.render(painter, source=qc.QRect(top_left, bottom_right))
# avoid error if pixmap is garbage collected before painter
del painter
return image
def display_video_file_name(self):
"""
dispaly the name of the video
"""
name = self._data_source.get_project()["enhanced_video_no_path"]
if name is not None:
self._videoNameLabel.setText(name)
def change_entry_pens(self):
"""
force redraw of view
"""
self._entryView.change_display_pen(self._data_source.get_pens())
```
#### File: cgt/gui/projectstartdialog.py
```python
import sys
import os
from pathlib import Path
import PyQt5.QtWidgets as qw
import PyQt5.QtCore as qc
from cgt.util.utils import timestamp
from cgt.gui.Ui_projectstartdialog import Ui_ProjectStartDialog
class ProjectStartDialog(qw.QDialog, Ui_ProjectStartDialog):
"""
a qDialog the allows the user to start a new project
"""
def __init__(self, parent=None):
"""
set up the dialog
Args:
parent (QObject) the parent object
Returns:
None
"""
super().__init__(parent)
## the parent object, if any
self._parent = parent
## the name in translation, if any
self._translated_name = self.tr("ProjectStartDialog")
self.setupUi(self)
self._projDir.setText(os.path.expanduser('~'))
@qc.pyqtSlot()
def find_project_dir(self):
"""
callback for running a file dialog to find the directory
in which the project directory will reside
Returns:
None
"""
dir_name = qw.QFileDialog.getExistingDirectory(
self,
self.tr("Select directory"),
os.path.expanduser('~'),
options=qw.QFileDialog.ShowDirsOnly)
if dir_name is not None:
self._projDir.setText(dir_name)
@qc.pyqtSlot()
def find_enhanced_video_file(self):
"""
callback for running a file dialog to find the enhanced_video file
Returns:
None
"""
file_name, _ = qw.QFileDialog.getOpenFileName(
self,
self.tr("Project Source File"),
os.path.expanduser('~'),
self.tr("AVI (*.avi)"))
if file_name is not None:
self._enhancedVideo.setText(file_name)
file = os.path.basename(self._enhancedVideo.text())
file = file.rsplit('.', 1)[0]
file += "_" + timestamp()
self._projName.setText(file)
@qc.pyqtSlot()
def find_raw_video_file(self):
"""
callback for running a file dialog to find the raw_video file
Returns:
None
"""
file_name, _ = qw.QFileDialog.getOpenFileName(
self,
self.tr("Processed Copy of Source"),
os.path.expanduser('~'),
self.tr("AVI (*.avi)"))
if file_name is not None:
self._rawVideo.setText(file_name)
@qc.pyqtSlot()
def make_project(self):
"""
callback for finished, validates data and calls the start_project
method of the parent
Returns:
None
"""
text = self._enhancedVideo.text().strip()
if not text:
message = self.tr("You must provide a enhanced_video file")
qw.QMessageBox.warning(self, "Error", message)
return
enhanced_video = Path(text)
if not enhanced_video.exists():
message = self.tr("Source file \"{}\" does not exist!")
message = message.format(enhanced_video)
qw.QMessageBox.critical(self, "Error", message)
return
text = self._rawVideo.text().strip()
if text:
raw_video = Path(text)
if not raw_video.exists():
message = self.tr(f"Source file \"{raw_video}\" does not exist!")
qw.QMessageBox.critical(self, "Error", message)
return
if raw_video == enhanced_video:
message = self.tr(f"Enhanced video file \"{enhanced_video}\" ")
message += self.tr("and raw video file \"{raw_video}\" are the same!")
qw.QMessageBox.critical(self, "Error", message)
return
else:
raw_video = None
proj_name = self._projName.text().strip()
if not proj_name:
message = self.tr("You must provide a project name!")
qw.QMessageBox.warning(self, "Error", message)
return
text = self._projDir.text().strip()
if not text:
message = self.tr("You must provide a project directory path!")
qw.QMessageBox.warning(self, "Error", message)
return
proj_dir = Path(text)
if not proj_dir.exists():
message = self.tr("Project directory location {} does not exist!")
message = message.format(proj_dir)
qw.QMessageBox.warning(self, "Error", message)
return
notes = self._notesEdit.toPlainText().strip()
if self.parent() is not None:
self.parent().start_project(
enhanced_video,
raw_video,
proj_dir,
proj_name,
notes,
self._copyCheckBox.isChecked(),
self._statisticsFromEnhancedVideo.isChecked())
self.close()
else:
message = "Enhanced: {}\nRaw: {}\nPath: {}\nName: {}\nCopy video: {}"
message = message.format(
enhanced_video,
raw_video,
proj_dir,
proj_name,
self._copyCheckBox.isChecked())
print(message)
print(notes)
#######################################
def run():
"""
use a local function to make an isolated the QApplication object
Returns:
None
"""
app = qw.QApplication(sys.argv)
window = ProjectStartDialog()
window.show()
app.exec_()
if __name__ == "__main__":
run()
```
#### File: cgt/gui/regionselectionview.py
```python
from enum import IntEnum
from collections import namedtuple
import PyQt5.QtWidgets as qw
import PyQt5.QtGui as qg
import PyQt5.QtCore as qc
from cgt.gui.videobaseview import VideoBaseView
from cgt.util.scenegraphitems import (make_positive_rect, length_squared)
## storage for a rectangle being drawn, start point + current rectangle
UnfinishedRect = namedtuple("UnfinishedRect", ["start_point", "graphics_rect"])
class SelectStates(IntEnum):
"""
possible states of the widget
"""
VIEW = 0
MAKE_REGION = 2
EDIT_REGION = 4
DELETE_REGION = 8
class RegionSelectionView(VideoBaseView):
"""
provides a viewer for a pixmaps
"""
## a rectangle needs to be shown
show_rect = qc.pyqtSignal(qc.QRectF)
## stop showing the rectangle
stop_showing_rect = qc.pyqtSignal()
def __init__(self, parent):
"""
set up the scene graph
"""
super().__init__(parent)
##
self._parent = parent
## the state
self._state = SelectStates.VIEW
## the mode the rectangle being drawn
self._draw_rect = None
## the store for results
self._data_source = None
def set_data_source(self, data_source):
"""
setter for the results holder
Args:
data_source (CrystalGrowthTrackerMain) the data source
"""
self._data_source = data_source
if self._data_source.get_results() is not None:
self.redisplay_regions()
def redisplay_regions(self):
"""
remove old regions and redisplay current
"""
for item in self.scene().items():
if isinstance(item, qw.QGraphicsRectItem):
self.scene().removeItem(item)
for region in self._data_source.get_results().get_regions():
pen = self._data_source.get_pens().get_display_pen()
region.setPen(pen)
self.scene().addItem(region)
def set_state(self, state):
"""
setter for the operating state
Args:
state (SelectStates) the new state
"""
self._state = state
self._draw_rect = None
self.scene().clearSelection()
self.stop_showing_rect.emit()
if self._state == SelectStates.VIEW:
self.make_regions_selectable()
return
if self._state == SelectStates.EDIT_REGION:
self.make_regions_selectable()
return
if self._state == SelectStates.DELETE_REGION:
self.make_regions_selectable()
return
self.make_regions_selectable(False)
def delete_selected(self):
"""
callback for the selection of an item in the scenegraph
"""
if self._state != SelectStates.DELETE_REGION:
return
items = self.scene().selectedItems()
if len(items)==0:
return
item = items.pop(0)
self.show_rect.emit(item.rect())
if self._data_source.region_has_markers(item):
message = "The region has associated markers, that must be deleted before the region."
qw.QMessageBox.critical(self, "Error: Region has markers", message)
return
mb_reply = qw.QMessageBox.question(self,
self.tr('CrystalGrowthTracker'),
self.tr('Do you wish to delete the selected region?'),
qw.QMessageBox.Yes | qw.QMessageBox.No,
qw.QMessageBox.No)
if mb_reply == qw.QMessageBox.Yes:
item.setSelected(False)
self.scene().removeItem(item)
self._data_source.remove_region(item)
self.stop_showing_rect.emit()
def make_regions_selectable(self, flag=True):
"""
change the selectable status of the regions
Args:
flag (bool) if True items will be selectable, else not selctable
"""
for item in self.scene().items():
if isinstance(item, qw.QGraphicsRectItem):
item.setFlag(qw.QGraphicsItem.ItemIsSelectable, flag)
@qc.pyqtSlot(qg.QMouseEvent)
def mousePressEvent(self, event):
"""
callback for a mouse press
Args:
event (QMouseEvent) the event
"""
self.test_and_select(event)
if self._state == SelectStates.VIEW:
self.display_selected()
return
if self._state == SelectStates.DELETE_REGION:
self.delete_selected()
if self._state == SelectStates.MAKE_REGION:
self.mouse_down_create(event)
if self._state == SelectStates.EDIT_REGION:
self.mouse_down_edit(event)
def mouse_down_edit(self, event):
"""
respond to a mouse button press in EDIT_REGION mode
Args:
event (QMouseEvent) the event
"""
items = self.scene().selectedItems()
if len(items)>0:
self._draw_rect = UnfinishedRect(None, items[0])
self.mouse_event_edit(event)
def mouse_event_edit(self, event):
"""
handle mouse button press in EDIT_REGION mode
Args:
event (QMouseEvent) the event
"""
if self._draw_rect is None:
return
sensitivity = 20
limit = sensitivity*sensitivity
# get the event location in scene and rectangle coordinates
scene_point = self.mapToScene(event.pos())
item_point = self._draw_rect.graphics_rect.mapFromScene(scene_point)
# test the coreners of the old rectangle
rect = self._draw_rect.graphics_rect.rect()
diff = rect.topLeft() - item_point
if length_squared(diff) < limit:
self.new_draw_rect(rect.bottomRight(), item_point)
return
diff = rect.topRight() - item_point
if length_squared(diff) < limit:
self.new_draw_rect(rect.bottomLeft(), item_point)
return
diff = rect.bottomLeft() - item_point
if length_squared(diff) < limit:
self.new_draw_rect(rect.topRight(), item_point)
return
diff = rect.bottomRight() - item_point
if length_squared(diff) < limit:
self.new_draw_rect(rect.topLeft(), item_point)
return
self._draw_rect = None
def new_draw_rect(self, start_point, moving_point):
"""
make a new drawing rectangle
Args:
start_point (QPointF) the fixed point for the drawing, scene coords
moving_point (QPointF) the point the user is moving, scene coords
"""
rect = make_positive_rect(start_point, moving_point)
self._draw_rect.graphics_rect.setRect(rect)
self._draw_rect = UnfinishedRect(start_point, self._draw_rect.graphics_rect)
self.show_rect.emit(self._draw_rect.graphics_rect.rect())
def mouse_down_create(self, event):
"""
down event in MAKE_REGION mode
Args:
event (QMouseEvent) the event
"""
point = self.mapToScene(event.pos())
rect = make_positive_rect(point, point)
pen = self._data_source.get_pens().get_drawing_pen()
rect = self.scene().addRect(rect, pen)
self._draw_rect = UnfinishedRect(point, rect)
self.show_rect.emit(rect.rect())
def mouseMoveEvent(self, event):
"""
callback for a mouse movement
Args:
event (QMouseEvent) the event
"""
self.test_and_select(event)
if self._state == SelectStates.VIEW:
return
if self._state == SelectStates.DELETE_REGION:
return
if self._state == SelectStates.MAKE_REGION:
self.mouse_move_create(event)
if self._state == SelectStates.EDIT_REGION:
self.mouse_move_edit(event)
def mouse_move_edit(self, event):
"""
respond to a mouse movement event in EDIT_REGION mode
Args:
event (QMouseEvent) the event
"""
if self._draw_rect is None:
return
moving_point = self.mapToScene(event.pos())
self.new_draw_rect(self._draw_rect.start_point, moving_point)
def mouse_move_create(self, event):
"""
respond to a mouse movement event in MAKE_REGION mode
Args:
event (QMouseEvent) the event
"""
if self._draw_rect is None:
return
if self._draw_rect.graphics_rect is None:
return
rect = make_positive_rect(self._draw_rect.start_point,
self.mapToScene(event.pos()))
self._draw_rect.graphics_rect.setRect(rect)
self.show_rect.emit(rect)
def mouseReleaseEvent(self, event):
"""
callback for a mouse button release
Args:
event (QMouseEvent) the event
"""
self.test_and_select(event)
if self._state == SelectStates.VIEW:
return
if self._state == SelectStates.DELETE_REGION:
return
if self._state == SelectStates.MAKE_REGION:
self.mouse_up_create(event)
if self._state == SelectStates.EDIT_REGION:
self.mouse_up_edit(event)
self.scene().clearSelection()
def mouse_up_create(self, event):
"""
respond to a user releasing the mouse button in MAKE_REGION mode
Args:
event (QMouseEvent) the event
"""
if self._draw_rect is None:
return
rect = make_positive_rect(self._draw_rect.start_point,
self.mapToScene(event.pos()))
self._draw_rect.graphics_rect.setRect(rect)
pen = self._data_source.get_pens()
self._draw_rect.graphics_rect.setPen(pen.get_display_pen())
self._data_source.append_region(self._draw_rect.graphics_rect)
self._draw_rect = None
def mouse_up_edit(self, event):
"""
respond to a user releasing the mouse button in EDIT_REGION mode
Args:
event (QMouseEvent) the event
"""
if self._draw_rect is None:
return
self.mouse_event_edit(event)
self._draw_rect = None
def display_selected(self):
"""
if a rectangle is selected, emit the rectangl using the show_rect signal
"""
items = self.scene().selectedItems()
if len(items) <= 0:
self.stop_showing_rect.emit()
return
rect = items[0].rect()
self.show_rect.emit(rect)
def test_and_select(self, event):
"""
test if a mouse event was in a region and if so select the region
Agrs:
event (QMouseEvent) the event
"""
point = self.mapToScene(event.pos())
self.scene().clearSelection()
for item in self.scene().items():
if isinstance(item, qw.QGraphicsRectItem):
if item.contains(point):
item.setSelected(True)
return
self.stop_showing_rect.emit()
```
#### File: cgt/gui/videoparametersdialog.py
```python
import PyQt5.QtWidgets as qw
import PyQt5.QtCore as qc
from cgt.gui.Ui_videoparametersdialog import Ui_VideoParametersDialog
class VideoParametersDialog(qw.QDialog, Ui_VideoParametersDialog):
"""
a qDialog the allows the user to start a new project
"""
## the available units for the resolution
RESOLUTION_UNITS = ["nanometers", "microns", "mm"]
@staticmethod
def get_values_from_user(parent, fps, resolution, units):
"""
run VideoParametersDialog to get user input
"""
window = VideoParametersDialog(parent, fps, resolution, units)
window.exec_()
return window.get_values()
def __init__(self, parent=None, fps=None, resolution=None, units=None):
"""
set up the dialog
Args:
parent (QObject) the parent object
fps: (int) frames pre second
resolution (float) the real world size of a pixel
unit (string) the units, must be in RESOLUTION_UNITS
Returns:
None
"""
super().__init__(parent)
## the parent object, if any
self._parent = parent
## the name in translation, if any
self._translated_name = self.tr("VideoParametersDialog")
self.setupUi(self)
## storage for the frames per second
self._fps = fps
## storage for the resolution
self._resolution = resolution
index = VideoParametersDialog.RESOLUTION_UNITS.index(units)
## storage for the resolution units
self._resolution_units = units
self._unitsComboBox.addItems(VideoParametersDialog.RESOLUTION_UNITS)
if fps is not None:
self._fpsBox.setValue(fps)
if resolution is not None:
self._resolutionBox.setValue(resolution)
if units is not None:
self._unitsComboBox.setCurrentIndex(index)
@qc.pyqtSlot()
def set_parameters(self):
"""
callback for setting the parameters
Returns:
None
"""
self._fps = self._fpsBox.value()
self._resolution = self._resolutionBox.value()
self._resolution_units = self._unitsComboBox.currentText()
self.close()
def get_values(self):
"""
getter for the values
Returns:
(<frames per second>, <resoultion>, <resolution units>)
"""
return self._fps, self._resolution, self._resolution_units
```
#### File: cgt/io/videoanalyser.py
```python
import subprocess
import os
import PyQt5.QtCore as qc
import numpy as np
import ffmpeg
from cgt.io.ffmpegbase import FfmpegBase
from cgt.util.framestats import FrameStats, VideoIntensityStats
class VideoAnalyser(FfmpegBase):
"""
an object to analyse statistic of a video
"""
## the pixel format and number of bytes
PIX_FMT = ('gray', 1)
## the progress signal
frames_analysed = qc.pyqtSignal(int)
def __init__(self, video_file, parent=None):
"""
initalize by usng opencv opening the video file
Args:
video_file (string) the path to the video file
parent (QObject): parent object
"""
super().__init__(video_file, parent)
self.probe_video(1, VideoAnalyser.PIX_FMT[1])
def stats_whole_film(self):
"""
get the statistics for every frame of the video
Returns:
the statistics (VideoIntensityStats)
"""
if self._video_data is None:
return None
length = self._video_data.get_frame_count()
args = (ffmpeg
.input(self.get_name())
.output('pipe:', format='rawvideo', pix_fmt=VideoAnalyser.PIX_FMT[0], vframes=length)
.compile())
with open(os.devnull, 'w') as f_err:
video_proc = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=f_err)
return self.read_and_analyse(video_proc)
def read_and_analyse(self, video_proc):
"""
read the frame and analyse them
Args:
video_proc (subprocess): ffmpeg process producing frames
Retruns:
(VideoIntensityStats)
"""
bins = np.linspace(0, 256, 32)
vid_statistics = VideoIntensityStats(bins)
count = 0
flag = True
while flag:
in_bytes = video_proc.stdout.read(self._video_data.get_frame_size())
if not len(in_bytes) == 0:
vid_statistics.append_frame(self.make_stats(in_bytes, bins))
count += 1
if count%10 == 0:
self.frames_analysed.emit(count)
else:
flag = False
self.frames_analysed.emit(count)
return vid_statistics
@staticmethod
def make_stats(image_bytes, bins):
"""
make the statistics for a single frame
Args:
image_bytes (bytes): the image in raw bytes
bins ([int]) the bins for counting
"""
image = np.frombuffer(image_bytes, dtype=np.uint8)
mean = np.mean(image)
standard_deviation = np.std(image)
count, _ = np.histogram(image, bins)
return FrameStats(mean, standard_deviation, count)
def get_number_frames(self):
"""
get number of frames in video
"""
return self._video_data.get_frame_count()
```
#### File: cgt/model/velocitiescalculator.py
```python
from collections import namedtuple
from math import sqrt
from cgt.util.markers import (MarkerTypes,
get_point_of_point,
get_frame)
from cgt.util.scenegraphitems import perpendicular_dist_to_position
class ScreenDisplacement():
"""data type for a single marker displacement"""
def __init__(self, start_frame, end_frame, fps, length):
"""
initalize the object
Args:
start_frame (int) frame number of the first frame
end_frame (int) frame number of the end frame
fps (float) the number of frames per second
length (int/float) the length in pixels
"""
## first frame of the interval
self._start_frame = None
## end frame the last frame of the interval
self._end_frame = None
## the length of the displacement
self._length = length
## the number of frames per second
self._fps = fps
if start_frame < end_frame:
self._start_frame = start_frame
self._end_frame = end_frame
else:
self._start_frame = end_frame
self._end_frame = start_frame
def get_start(self):
"""
getter for start of time interval
"""
return self._start_frame
def get_end(self):
"""
getter for end of time interval
"""
return self._end_frame
def get_length(self):
"""
getter for the length
"""
return self._length
def get_speed(self):
"""
find the speed of the motion (length/(end-start))
Returns
(float) the speed
"""
time_interval = (float(self._end_frame)-float(self._start_frame))/self._fps
raw_speed = (float(self._length))/time_interval
return abs(raw_speed)
## data type for the speed of a marker
MarkerSpeed = namedtuple("MarkerSpeed", ["ID", "m_type", "speed"])
class VelocitiesCalculator():
"""
calculate the velocities of the marker objects
"""
def __init__(self, lines, points, fps, scale):
"""
initialize object
Args:
lines ([]): array of line markers
points ([]): array of point markers
fps (float): the number of frames per second
scale (float): the size of a pixel
"""
## the store of markers
self._lines = lines
self._points = points
self._frames_per_second = fps
self._scale = scale
## the velocities of the lines
self._line_displacements = None
## the velocities of the points
self._point_displacements = None
def get_line_displacements(self):
"""
getter for the array of line displacments
"""
return self._line_displacements
def get_point_displacements(self):
"""
getter for the array of point displacments
"""
return self._point_displacements
def number_markers(self):
"""
get the number of line and point markers
Returns:
tuple (number lines, number points)
"""
return (len(self._lines),
len(self._points))
def process_latest_data(self):
"""
get the latest data and calculate the screen displacements
"""
self.make_lines()
self.make_points()
def make_lines(self):
"""
get and convert the marker lines to displacements
"""
self._line_displacements = []
for marker in self._lines:
previous = marker[0]
marker_displacements = []
for i in range(1, len(marker)):
current = marker[i]
if not (current.line().dx() == 0.0 and current.line().dy() == 0.0):
previous_dist = perpendicular_dist_to_position(previous, self._scale)
current_dist = perpendicular_dist_to_position(current, self._scale)
distance = current_dist - previous_dist
start = get_frame(previous)
end = get_frame(current)
# start_frame, end_frame, fps, length
displacement = ScreenDisplacement(start, end, self._frames_per_second, distance)
marker_displacements.append(displacement)
previous = current
if len(marker_displacements) > 0:
self._line_displacements.append(marker_displacements)
def make_points(self):
"""
get and convert the marker points to displacements
"""
self._point_displacements = []
for marker in self._points:
previous = marker[0]
marker_displacements = []
for i in range(1, len(marker)):
current = marker[i]
start = get_point_of_point(previous) + previous.pos()
end = get_point_of_point(current) + current.pos()
seperation = start - end
del_x = seperation.x()*self._scale
del_y = seperation.y()*self._scale
length = sqrt(del_x*del_x + del_y*del_y)
start = get_frame(previous)
end = get_frame(current)
# start_frame, end_frame, fps, length
displacement = ScreenDisplacement(start, end, self._frames_per_second, length)
marker_displacements.append(displacement)
previous = current
if len(marker_displacements) > 0:
self._point_displacements.append(marker_displacements)
def get_average_speeds(self):
"""
make a list of average speeds of all markers
Returns:
[MarkerSpeed] the averages
"""
averages = []
for i, marker in enumerate(self._line_displacements):
speed = 0.0
for item in marker:
speed += item.get_speed()
speed /= float(len(marker))
averages.append(MarkerSpeed(i, MarkerTypes.LINE, speed))
for i, marker in enumerate(self._point_displacements):
speed = 0.0
for item in marker:
speed += item.get_speed()
speed /= float(len(marker))
averages.append(MarkerSpeed(i, MarkerTypes.POINT, speed))
return averages
```
#### File: cgt/model/videoanalysisresultsstore.py
```python
import enum
import bisect
import PyQt5.QtCore as qc
from cgt.util.markers import (ItemDataTypes,
MarkerTypes,
get_parent_hash,
hash_graphics_point,
hash_graphics_line,
get_frame,
get_region,
get_marker_type)
class DataTypes(enum.IntEnum):
"""
define the types of data stored
"""
## a line
LINE = 0
## a point
POINT = 1
## a region
REGION = 2
## a key frame
KEY_FRAME = 3
class VideoAnalysisResultsStore(qc.QObject):
"""
a storage class that records the results of a video analysis
"""
## signal to indicate that the contents has changed
## 0 any change
## 1 Region added
data_changed = qc.pyqtSignal(int)
def __init__(self, parent):
"""
initalize an object
"""
super().__init__(parent)
## store of lines
self._lines = []
## store of points
self._points = []
## store of regions
self._regions = []
## store of keyframes mapping of regions to [int]
self._key_frames = {}
## storage for the intensity statistics of the video
self._video_statistics = None
## flag to indicate store has been changed
self._changed = False
def has_been_changed(self):
"""
getter for the changed status
Returns:
the changed flag
"""
return self._changed
def reset_changed(self):
"""
make the changed status false
Returns:
None
"""
self._changed = False
def set_changed(self, value=0):
"""
set the changed status to true
Returns:
None
"""
self.data_changed.emit(value)
self._changed = True
def get_video_statistics(self):
"""
getter for the video statistics
Returns:
[FrameStats]
"""
return self._video_statistics
def set_video_statistics(self, video_stats):
"""
setter for the video statistics
Args:
video_stats ([FrameStats]) the statistics
"""
self._video_statistics = video_stats
self.set_changed()
def replace_region(self, region, index):
"""
replace an existing region
Args:
rectangle (QRect) the new region
index (int) the list index
Throws:
IndexError: pop index out of range
"""
self._regions[index] = region
self.set_changed(1)
def remove_region(self, index):
"""
remove an item
Args:
index (int) the index of the item to be removed
Throws:
IndexError: pop index out of range
"""
markers = []
markers.append(self.get_lines_for_region(index))
markers.append(self.get_points_for_region(index))
markers = [x for x in markers if x is not None]
self._regions.pop(index)
for marker in markers:
self.delete_marker(marker)
self.set_changed(1)
def get_regions(self):
"""
getter for the regions
Returns:
[QGraphicsRectItem]
"""
return self._regions
def get_lines(self):
"""
getter for the lines array
Returns:
the lines array [[QGraphicsLineItem]]
"""
return self._lines
def get_points(self):
"""
getter for the points array
Returns:
the points array [[QGraphicsLineItem]]
"""
return self._points
def get_key_frames(self, region_index):
"""
get the list of key frames for a region_index
Args:
region_index (int) the array index of the region
Returns:
the list of key frames [int] or None
"""
if region_index not in self._key_frames.keys():
return None
return self._key_frames[region_index]
def get_key_frames_for_points(self, index):
"""
get a list of the key-frames for the point markers at index
Args:
index (int) the array index of the point marker
Returns:
array of key-frames [int]
"""
key_frames = []
for point in self._points[index]:
key_frames.append(get_frame(point))
return key_frames
def get_key_frames_for_lines(self, index):
"""
get a list of the key-frames for the line markers at index
Args:
index (int) the array index of the line marker
Returns:
array of key-frames [int]
"""
key_frames = []
for line in self._lines[index]:
key_frames.append(get_frame(line))
return key_frames
def add_key_frame(self, region_index, frame_number):
"""
add a new key frame
Args:
region_index (int) the array index of the region
frame_number (int) the key_frame number
"""
if region_index not in self._key_frames.keys():
self._key_frames[region_index] = [frame_number]
self.set_changed()
return
if frame_number not in self._key_frames[region_index]:
bisect.insort(self._key_frames[region_index], frame_number)
self.set_changed()
def add_region(self, region):
"""
add a region
Args:
region (QRect) the region
"""
self._regions.append(region)
self.set_changed(1)
def add_point(self, point):
"""
add a new point
Args:
point (QGraphicsPathItem) the path item
"""
if get_parent_hash(point) == "p":
self._points.append([point])
self.add_key_frame(get_region(point), get_frame(point))
self.set_changed()
return None
index = self.find_list_for_new_point(point)
if index is None:
raise LookupError("Graphics path with parent hash not matching any in store")
self._points[index].append(point)
self._points[index].sort(key=get_frame)
self.add_key_frame(get_region(point), get_frame(point))
self.set_changed()
tmp = self._points[index].index(point)
if tmp > 0:
return self._points[index][tmp-1]
return None
def add_line(self, line):
"""
add a new line
Args:
point (QGraphicsLineItem) the line item
"""
if get_parent_hash(line) == "p":
self._lines.append([line])
self.add_key_frame(get_region(line), get_frame(line))
self.set_changed()
return None
index = self.find_list_for_new_line(line)
if index is None:
raise LookupError("Graphics item with parent hash not matching any in store")
self._lines[index].append(line)
self._lines[index].sort(key=get_frame)
self.add_key_frame(get_region(line), get_frame(line))
self.set_changed()
tmp = self._lines[index].index(line)
if tmp > 0:
return self._lines[index][tmp-1]
return None
def insert_line_marker(self, marker):
"""
add a new marker to the lines with no change results call
"""
self._lines.append(marker)
def insert_point_marker(self, marker):
"""
add a new marker to the points with no change results call
"""
self._points.append(marker)
def line_frame_number_unique(self, line):
"""
check if a line is unique, or has a unique frame number
Args:
line (QGraphicsLineItem) the line
Returns:
True if line is unique, or has unique frame number; else False
"""
hash_code = get_parent_hash(line)
if hash_code == "p":
return True
# this is a pythonic way of doing
if self.find_list_for_new_line(line) is None:
return True
return False
def find_list_for_new_line(self, line):
"""
get the index of the list holding the parent of a line
Args
line (QGraphicsLineItem) the line, must not have data(0) == "p"
Returns:
index of the list holding the lines parent
Throws
LookupError if there is no match
"""
hash_code = get_parent_hash(line)
for i, _ in enumerate(self._lines):
for line_move in self._lines[i]:
if hash_graphics_line(line_move) == hash_code:
return i
return None
def find_list_for_old_line(self, line):
"""
get the index of the list holding a line
Args
line (QGraphicsLineItem) the line
Returns:
index of the list holding the line
Throws
LookupError if there is no match
"""
target = hash_graphics_line(line)
for i, markers in enumerate(self._lines):
hashes = [hash_graphics_line(x) for x in markers]
if target in hashes:
return i
return None
def find_list_for_new_point(self, point):
"""
get the index of the list holding the parent of a point
Args
point (QGraphicsPathItem) the point, must not have data(0) == "p"
Returns:
index of the list holding the points parent
Throws
LookupError if there is no match
"""
hash_code = get_parent_hash(point)
for i, _ in enumerate(self._points):
for point_move in self._points[i]:
if hash_graphics_point(point_move) == hash_code:
return i
return None
def find_list_for_old_point(self, point):
"""
get the index of the list holding a point
Args
point (QGraphicsPathItem) the point
Returns:
index of the list holding the point
Throws
LookupError if there is no match
"""
target = hash_graphics_point(point)
for i, markers in enumerate(self._points):
hashes = [hash_graphics_point(x) for x in markers]
if target in hashes:
return i
return None
def delete_marker(self, marker):
"""
delete marker and all clones
Args:
marker (QGraphicsItem) the marker to be removed
"""
m_type = get_marker_type(marker)
if m_type == MarkerTypes.LINE:
index = self.find_list_for_old_line(marker)
del self._lines[index]
self.set_changed()
if m_type == MarkerTypes.POINT:
index = self.find_list_for_old_point(marker)
del self._points[index]
self.set_changed()
def remove_point(self, hash_code):
"""
remove the line with the given hash code
Args:
hash_code (int) the hash code of the line to be removed
Returns:
None if line was one frame, else remaining lines
"""
point_index = None
marker_index = None
for i, points in enumerate(self._points):
for j, point in enumerate(points):
if hash_graphics_point(point) == hash_code:
marker_index = j
point_index = i
if point_index is None or marker_index is None:
return None
del self._points[point_index][marker_index]
self.set_changed()
if len(self._points[point_index]) == 0:
del self._points[point_index]
return None
return self._points[point_index]
def remove_line(self, hash_code):
"""
remove the line with the given hash code
Args:
hash_code (int) the hash code of the line to be removed
Returns:
None if line was one frame, else remaining lines
"""
line_index = None
marker_index = None
for i, lines in enumerate(self._lines):
for j, line in enumerate(lines):
if hash_graphics_line(line) == hash_code:
marker_index = j
line_index = i
if line_index is None or marker_index is None:
return None
del self._lines[line_index][marker_index]
self.set_changed()
if len(self._lines[line_index]) == 0:
del self._lines[line_index]
return None
return self._lines[line_index]
def delete_line(self, line, index):
"""
remove a line and fix the linked list
Args:
line (QGraphicsLineItem) the line
index (int) the array index of the list holding the line
"""
root_hash = None
if get_parent_hash(line) == 'p':
if len(self._lines[index]) == 1:
del self._lines[index]
return
root_hash = 'p'
else:
root_hash = get_parent_hash(line)
p_hash = hash_graphics_line(line)
children = [x for x in self._lines[index] if get_parent_hash(x) == p_hash]
if len(children) > 0:
new_p = children.pop(0)
new_p.setData(ItemDataTypes.PARENT_HASH, root_hash)
p_hash = hash_graphics_line(new_p)
for child in children:
child.setData(ItemDataTypes.PARENT_HASH, p_hash)
self._lines[index].remove(line)
self.set_changed()
def delete_point(self, point, index):
"""
remove a point and fix the linked list
Args:
point (QGraphicsPathItem) the point
index (int) the array index of the list holding the point
"""
root_hash = None
if get_parent_hash(point) == 'p':
if len(self._points[index]) == 1:
del self._points[index]
return
root_hash = 'p'
else:
root_hash = get_parent_hash(point)
p_hash = hash_graphics_point(point)
children = [x for x in self._points[index] if get_parent_hash(x) == p_hash]
if len(children) > 0:
new_p = children.pop(0)
new_p.setData(ItemDataTypes.PARENT_HASH, root_hash)
p_hash = hash_graphics_point(new_p)
for child in children:
child.setData(ItemDataTypes.PARENT_HASH, p_hash)
self._points[index].remove(point)
self.set_changed()
def get_lines_for_region(self, index):
"""
get a list of lines associated with a region
Args:
index (int) array index of region
Returns:
list of lines [line], or None if none found
"""
tmp = []
for line in self._lines:
if get_region(line[0]) == index:
tmp.append(line)
if len(tmp) > 0:
return tmp
return None
def get_points_for_region(self, index):
"""
get a list of points associated with a region
Args:
index (int) array index of region
Returns:
list of points [points], or None if none found
"""
tmp = []
for point in self._points:
if get_region(point[0]) == index:
tmp.append(point)
if len(tmp) > 0:
return tmp
return None
def region_has_markers(self, index):
"""
find if a region has associated markers defined.
Args:
index (int): the index of the region
Returns:
True if markers defined else False
"""
if len(self.get_points_for_region(index)) > 0:
return True
if len(self.get_lines_for_region(index)) > 0:
return True
return False
def change_marker_props(self, pens):
"""
change the pen of exisiting items
Args:
pens (PenStore): the holder of the pens
"""
for marker in self._lines:
for line in marker:
line.setPen(pens.get_display_pen())
for marker in self._points:
for point in marker:
point.setPen(pens.get_display_pen())
```
#### File: cgt/util/markers.py
```python
import enum
class MarkerTypes(enum.IntEnum):
"""
define the types of marker used in images
"""
## mark a line
LINE = 1
## mark a point
POINT = 2
## a region
REGION=4
## not in any other.
DECORATOR = 8
class ItemDataTypes(enum.IntEnum):
"""
define the indices for storing data in QGraphicsItem
"""
## store for the type of data item
ITEM_TYPE = 0
## store for parent hash code
PARENT_HASH = 1
## store for the number of the frame in which the artifact was defined
FRAME_NUMBER = 2
## the index number of the region in which the mark is defined
REGION_INDEX = 3
## for a cross the centre point
CROSS_CENTRE = 4
def hash_marker(marker):
"""
find hash code for marker
Args:
marker (QGraphicsItem) the item to hash
Returns:
hash code or None if not appropriate type
"""
m_type = get_marker_type(marker)
if m_type == MarkerTypes.LINE:
return hash_graphics_line(marker)
if m_type == MarkerTypes.POINT:
return hash_graphics_point(marker)
return None
def hash_graphics_line(line):
"""
a hash function for QGraphicsLineItem,
Args:
line (QGraphicsLineItem) the line
Returns:
hash of tuple (line hash, position hash, frame)
"""
hashes = (hash_qlinef(line.line()),
hash_qpointf(line.pos()),
hash(line.data(ItemDataTypes.FRAME_NUMBER)))
return hash(hashes)
def hash_graphics_point(point):
"""
a hash function for QGraphicsPathItem,
Args:
line (QGraphicsPathItem) the line
Returns:
hash of tuple (centre hash, position hash, frame number)
"""
hashes = (hash_qpointf(point.data(ItemDataTypes.CROSS_CENTRE)),
hash_qpointf(point.pos()),
hash(point.data(ItemDataTypes.FRAME_NUMBER)))
return hash(hashes)
def hash_qlinef(line):
"""
a hash function for QLineF,
Args:
line (QLineF) the line
Returns:
hash of tuple formed from end point coordinates (x1, x2, y1, y2)
"""
coords = (line.x1(), line.x2(), line.y1(), line.y2())
return hash(coords)
def hash_qpointf(point):
"""
a hash function for QPontF,
Args:
point (QpointF) the point
Returns:
hash of tuple formed from end point coordinates (x, y)
"""
coords = (point.x(), point.y())
return hash(coords)
def hash_framestats(stats):
"""
get a hash code for the statistics of one frame of video
Return:
(int) hash code
"""
items = []
items.append(stats.mean)
items.append(stats.std_deviation)
for count in stats.bin_counts:
items.append(count)
return hash(tuple(items))
def hash_videointensitystats(stats):
"""
get hashcode for a complet set of video stats
Return:
(int) hash code
"""
items = []
for stat in stats.get_frames():
items.append(hash_framestats(stat))
for s_bin in stats.get_bins():
items.append(hash(s_bin))
return hash(tuple(items))
def hash_graphics_region(region):
"""
get hash code for a QGraphicsRectItem
Args:
region (QGraphicsRectItem): the region
Returns:
(int) hash code
"""
rect = region.rect()
tmp = (hash_qpointf(rect.topLeft()), hash_qpointf(rect.bottomRight()))
return hash(tmp)
def hash_results(results):
"""
find hash of results store
Return:
(int) hash code
"""
items = []
stats = results.get_video_statistics()
if stats is not None:
items.append(hash_videointensitystats(stats))
for marker in results.get_lines():
for line in marker:
items.append(hash_graphics_line(line))
for marker in results.get_points():
for point in marker:
items.append(hash_graphics_point(point))
for region in results.get_regions():
items.append(hash_graphics_region(region))
return hash(tuple(items))
def get_marker_type(item):
"""
get the type enum of the item
Args:
item (QGraphicsItem)
Returns:
the type enum or None
"""
return item.data(ItemDataTypes.ITEM_TYPE)
def get_parent_hash(item):
"""
get the parent hash code of the item
Args:
item (QGraphicsItem)
Returns:
the parent hash code (int): 'p' if progenitor, or None
"""
return item.data(ItemDataTypes.PARENT_HASH)
def get_frame(item):
"""
get the frame number of the item
Args:
item (QGraphicsItem)
Returns:
the frame number (int), or None
"""
return item.data(ItemDataTypes.FRAME_NUMBER)
def get_region(item):
"""
get the index of the region in which the item is defined
Args:
item (QGraphicsItem)
Returns:
the region index (int), or None
"""
return item.data(ItemDataTypes.REGION_INDEX)
def get_point_of_point(item):
"""
get the centre point of a cross
Args:
item (QGraphicsItem)
Returns:
the centre point (QPontF), or None
"""
return item.data(ItemDataTypes.CROSS_CENTRE)
```
#### File: cgt/util/scenegraphitems.py
```python
from math import (sqrt, isfinite)
import PyQt5.QtGui as qg
import PyQt5.QtCore as qc
import PyQt5.QtWidgets as qw
from cgt.util.markers import (MarkerTypes, ItemDataTypes, get_point_of_point)
def difference_to_distance(difference, scale):
"""
convert a difference object to distance to
Args:
difference (ImageLineDifference) the difference
scale (float) the pixel size
Returns:
the average seperation as a distance
"""
return difference.average * scale
def difference_list_to_velocities(diff_list, scale, fps):
"""
converts a list of (frame interval, difference) tuples to a list of velocities
Args:
diff_list (tuple(int, ImageLineDifference)) the list of inputs
scale (float) the size of a pixel
fps (int) the number of frames per second
Returns:
a list of velocities
"""
velocities = []
for frames, diff in diff_list:
distance = difference_to_distance(diff, scale)
time = frames/fps
velocity = distance/time
if velocity < 0.0:
velocities.append(-velocity)
else:
velocities.append(velocity)
return velocities
def rectangle_properties(rectangle):
"""
find the top left, bottom right and centre of a rectangle
Args:
rectangle (QRect) the rectangle
Returns:
top left, top right, bottom left, bottom right, centre (QPoint)
"""
top_left = rectangle.topLeft()
top_right = rectangle.topRight()
bottom_left = rectangle.bottomLeft()
bottom_right = rectangle.bottomRight()
ctr = top_left + bottom_right
ctr /= 2
return top_left, top_right, bottom_left, bottom_right, ctr
def qpoint_sepertation_squared(point_a, point_b):
"""
find the square of the distance apart of two points
Args:
point_a (QPoint) first point
point_b (QPoint) second point
Returns:
the square of the distance from a to b
"""
difference = point_a - point_b
return difference.x()*difference.x() + difference.y()*difference.y()
def make_positive_rect(corner, opposite_corner):
"""
draw a rectangle with positive size (x, y) from two points
Args:
corner (QPointF) scene coordinates of a corner
opposite_corner (QPointF) scene coordinates of the opposing corner
"""
# get the width and height (strictly positive)
width = abs(opposite_corner.x()-corner.x())
height = abs(opposite_corner.y()-corner.y())
# find the top left of the new adjusted rectangle
top_left_x = min(opposite_corner.x(), corner.x())
top_left_y = min(opposite_corner.y(), corner.y())
return qc.QRectF(top_left_x, top_left_y, width, height)
def length_squared(point):
"""
square of length from origin of a point
Args:
point (QPointF) the point
Returns
square of length
"""
return point.x()*point.x() + point.y()*point.y()
def make_cross_path(point):
"""
make the path object corresponding to a cross centred at a scene point
Args:
point (QPointF) location in scene coordinates
Returns:
the path (QPainterPath) for the cross
"""
path = qg.QPainterPath()
up_right = qc.QPointF(10.0, 10.0)
up_left = qc.QPointF(-10.0, 10.0)
path.moveTo(point)
path.lineTo(point+up_right)
path.moveTo(point)
path.lineTo(point+up_left)
path.moveTo(point)
path.lineTo(point-up_right)
path.moveTo(point)
path.lineTo(point-up_left)
return path
def cgt_intersection(centred_normal, clone):
"""
find intersection of centred_normal and clone
Args:
centred_normal (QLineF) the normal vector
clone (QLineF) the clone
Returns:
intersection (QPointF) the intersection point
extensiong (QLineF) the extension to clone if needed, else None
"""
## based on Graphics Gems III's "Faster Line Segment Intersection"
a = centred_normal.p2() - centred_normal.p1()
b = clone.p1() - clone.p2()
c = centred_normal.p1() - clone.p1()
# test if parallel
denominator = a.y() * b.x() - a.x() * b.y()
if denominator == 0 or not isfinite(denominator):
raise ArithmeticError("Clone line is parallel to parent")
# find the intersection
reciprocal = 1.0 / denominator
na = (b.y() * c.x() - b.x() * c.y()) * reciprocal
intersection = centred_normal.p1() + (a * na)
# test if outside clone segmet and assign extension as required
nb = (a.x() * c.y() - a.y() * c.x()) * reciprocal
extension = None
if nb < 0.0:
extension = qc.QLineF(clone.p1(), intersection)
elif nb > 1.0:
extension = qc.QLineF(clone.p2(), intersection)
return intersection, extension
def make_arrow_head(line, length_cutoff=10):
"""
if line.length() > length_cutoff add a small triangle to the end
Args:
line (QLineF) the line
length_cutoff (float) the minimum length for a head to be added
Returns:
QPolygon the triangle
"""
if line.length() < length_cutoff:
return None
# make normal based at p2
delta_t = (line.length()-10.0)/line.length()
normal = line.normalVector()
offset = line.pointAt(delta_t)-line.p1()
offset_normal = qc.QLineF(normal.p1()+offset, normal.p2()+offset)
opposit_normal = qc.QLineF(offset_normal.p1(), offset_normal.pointAt(-1.0))
offset_normal.setLength(5.0)
opposit_normal.setLength(5.0)
return qg.QPolygonF([line.p2(), offset_normal.p2(), opposit_normal.p2()])
def make_arrow(line, clone):
"""
make the arrow line between a line and a parallel clone
Args:
line (QLineF) the parent line
clone (QLineF) the parallel clone line
Returns:
arrow_line (QLineF) the arrow line (p1, p2) as parent to clone
extension (QLineF) the extension to clone, None if not needed
"""
# make normal based at centre of parent line
normal = line.normalVector()
centre = line.center()
offset = centre-line.p1()
centred_normal = qc.QLineF(normal.p1()+offset, normal.p2()+offset)
intersection, extension = cgt_intersection(centred_normal, clone)
arrow = qc.QLineF(centre, intersection)
return arrow, extension
def perpendicular_dist_to_position(gline, scale):
"""
find the distance to the position of a QGraphicsLine
Args:
gline (QGraphicsLine): the line
scale (float): the pixel scale
"""
unit_normal = gline.line().normalVector().unitVector()
del_x = gline.pos().x()*unit_normal.dx()*scale
del_y = gline.pos().y()*unit_normal.dy()*scale
return sqrt(del_x*del_x + del_y*del_y)
def rect_to_tuple(rect):
"""
convert a qrectangl to a tuple
Args:
rect (QRect)
Returns:
((left, top, width, height))
"""
array = []
array.append(rect.left())
array.append(rect.top())
array.append(rect.width())
array.append(rect.height())
return array
def g_point_to_tuple(point):
"""
convert the data in a QGraphicsPathItem reprsenting a point to a tuple
Args:
point (QGraphicsPathItem) the point for conversion
Returns:
list [x1, y1, px, py, frame]
"""
array = []
centre = point.data(ItemDataTypes.CROSS_CENTRE)
position = point.pos()
array.append(centre.x())
array.append(centre.y())
array.append(position.x())
array.append(position.y())
array.append(point.data(ItemDataTypes.FRAME_NUMBER))
array.append(point.data(ItemDataTypes.REGION_INDEX))
return array
def g_line_to_tuple(line):
"""
convert the data in a QGraphicsLineItem to a tuple
Args:
line (QGraphicsLineItem) the line
Returns:
list [x1, y1, x2, y2, px, py, frame]
"""
array = []
array.append(line.line().x1())
array.append(line.line().y1())
array.append(line.line().x2())
array.append(line.line().y2())
array.append(line.pos().x())
array.append(line.pos().y())
array.append(line.data(ItemDataTypes.FRAME_NUMBER))
array.append(line.data(ItemDataTypes.REGION_INDEX))
return array
def list_to_g_point(point, pen):
"""
convert the data in a list to a graphics point
Args:
point (list [ID, x, y, pos_x, pos_y, frame, region]) the point as list
pen (QPen) the drawing pen
Returns:
QGraphicsPathItem
"""
centre_x = float(point[1])
centre_y = float(point[2])
position_x = float(point[3])
position_y = float(point[4])
frame = int(point[5])
region = int(point[6])
centre = qc.QPointF(centre_x, centre_y)
position = qc.QPointF(position_x, position_y)
path = make_cross_path(centre)
item = qw.QGraphicsPathItem(path)
item.setPos(position)
item.setData(ItemDataTypes.ITEM_TYPE, MarkerTypes.POINT)
item.setData(ItemDataTypes.FRAME_NUMBER, frame)
item.setData(ItemDataTypes.REGION_INDEX, region)
item.setData(ItemDataTypes.CROSS_CENTRE, centre)
item.setPen(pen)
item.setZValue(1.0)
return item
def list_to_g_line(line, pen):
"""
convert the data in a list to a graphics line
Args:
line (list [ID, x1, y1, x2, y2, pos_x, pos_y, frame, region]) the line as list
pen (QPen) the drawing pen
Returns:
QGraphicsLineItem
"""
x1 = float(line[1])
y1 = float(line[2])
x2 = float(line[3])
y2 = float(line[4])
position_x = float(line[5])
position_y = float(line[6])
frame = int(line[7])
region = int(line[8])
position = qc.QPointF(position_x, position_y)
item = qw.QGraphicsLineItem(x1, y1, x2, y2)
item.setPos(position)
item.setData(ItemDataTypes.ITEM_TYPE, MarkerTypes.LINE)
item.setData(ItemDataTypes.FRAME_NUMBER, frame)
item.setData(ItemDataTypes.REGION_INDEX, region)
item.setPen(pen)
item.setZValue(1.0)
return item
def get_rect_even_dimensions(rect_item, even_dimensions=True):
"""
get the the graphics rectangle of the item, moved to position, with sides of even length
Args:
rect_item (QGraphicsRectItem)
even_dimensions (bool): if False even dimensions are not enforced
Returns
(QRect): with even length sides
"""
rect = rect_item.rect().toAlignedRect()
pos = rect_item.pos().toPoint()
rect.translate(pos)
width = rect.width()
height = rect.height()
if not even_dimensions:
return rect
if width%2 == 1:
rect.setWidth(width+1)
if height%2 == 1:
rect.setHeight(height+1)
return rect
def compare_lines(first, second):
"""
compare the lines withing two line items
Args:
first (QGraphicsLineItem):
second (QGraphicsLineItem)
"""
return first.line() == second.line()
def compare_points(first, second):
"""
compare the points withing two line items
Args:
first (QGraphicsLineItem):
second (QGraphicsLineItem)
"""
return get_point_of_point(first) == get_point_of_point(second)
```
#### File: jonathanHuwP/CrystalGrowthTracker/run_tests.py
```python
import unittest
import argparse
def get_arguments():
"""
get command line arguments
"""
parser = argparse.ArgumentParser()
parser.add_argument("-q",
"--quiet",
help="if set verbosity is low",
action="store_true")
return parser.parse_args()
def run_tests(args):
"""
run the tests
Args:
args (argparse.namespace): command line
"""
loader = unittest.TestLoader()
test_dir = './tests'
suite = loader.discover(test_dir)
verbosity = 2
if args.quiet:
verbosity = 1
runner = unittest.TextTestRunner(verbosity=verbosity)
runner.run(suite)
if __name__ == '__main__':
run_tests(get_arguments())
```
#### File: CrystalGrowthTracker/tests/makeresults.py
```python
from collections import namedtuple
import PyQt5.QtCore as qc
import PyQt5.QtWidgets as qw
from cgt.gui.penstore import PenStore
from cgt.util.scenegraphitems import list_to_g_line, list_to_g_point
from cgt.model.videoanalysisresultsstore import VideoAnalysisResultsStore
## store for test values
TestValues = namedtuple("TestValues", ["fps", "scale", "point_speed", "line_speed"])
def get_test_values():
"""
get the values needed to set up and carry out a test of calculation of speeds
Returns:
frames per seconds
scale length/pixel
correct point speed
correct line speed
"""
return TestValues(10.0, 1.5, 8.3853, 7.5)
def make_results_object():
"""
make a complete results object
"""
store = VideoAnalysisResultsStore(None)
for region in make_regions():
store.add_region(region)
for item in make_key_frames().items():
for key_frame in item[1]:
store.add_key_frame(item[0], key_frame)
store.insert_line_marker(make_test_lines())
store.insert_point_marker(make_test_points())
return store
def make_region(x, y, width, height):
"""
make a region
Args:
x (float): x coordinate of top left
y (float): y coordinage of top left
width (float): width of rectangle
height (float):height of rectangle
Returns:
(QGraphicsRectItem)
"""
rect = qc.QRectF(x, y, width, height)
return qw.QGraphicsRectItem(rect)
def make_regions():
"""
make a list of regions
Returns:
[QRect]
"""
regions = []
regions.append(make_region(0, 0, 100, 50))
regions.append(make_region(20, 20, 200, 200))
return regions
def make_key_frames():
"""
make key frames for the regions
"""
key_frames = {}
key_frames[0] = [100, 200, 300]
key_frames[1] = [50, 150]
return key_frames
def make_test_points():
"""
make test point data distance moved 55.902 each time step
Returns:
[QGraphicsItem]: holding one point marker with 3 keyframes
"""
string_lists = []
# id, ctrx, ctry, offsetx, offsety, frame, region
string_lists.append(["0", "0", "0", "0", "0", "100", "0"])
string_lists.append(["0", "0", "0", "50", "25", "200", "0"])
string_lists.append(["0", "0", "0", "100", "50", "300", "0"])
pen = PenStore()
points = []
for string_list in string_lists:
points.append(list_to_g_point(string_list, pen.get_display_pen()))
return points
def make_test_lines():
"""
make test line data distance moved 50 pixels
Returns:
[QGraphicsItem]: holding one line marker with 2 keyframes
"""
line_lists = []
# id, startx, starty, endx, endy, offsetx, offsety, frame, region
line_lists.append(["0", "20", "20", "20", "220", "0", "0", "50", "1"])
line_lists.append(["0", "20", "20", "20", "220", "50", "0", "150", "1"])
pen = PenStore()
lines = []
for line_list in line_lists:
lines.append(list_to_g_line(line_list, pen.get_display_pen()))
return lines
```
#### File: CrystalGrowthTracker/tests/test_io.py
```python
import unittest
import tempfile
import pathlib
import getpass
from cgt.gui.penstore import PenStore
from cgt.io.writecsvreports import save_csv_project
from cgt.io.readcsvreports import read_csv_project
from cgt.model.cgtproject import CGTProject
from cgt.model.videoanalysisresultsstore import VideoAnalysisResultsStore
from cgt.util.scenegraphitems import compare_lines, compare_points
from tests.makeresults import make_results_object
class TestIO(unittest.TestCase):
"""
tests of IO
"""
def setUp(self):
"""
build a full test class
"""
self._project = CGTProject()
self._project.init_new_project()
self._project["results"] = make_results_object()
self._project["resolution"] = 0.8
self._project["frame_rate"] = 10.0
self._tmp_dir = None
def tearDown(self):
"""
clean up
"""
if isinstance(self._tmp_dir, tempfile.TemporaryDirectory):
self._tmp_dir.cleanup()
def test_write_read(self):
"""
test output
"""
self._tmp_dir = tempfile.TemporaryDirectory()
self._project["proj_full_path"] = self._tmp_dir.name
self._project["proj_name"] = "testing"
save_csv_project(self._project)
self.assert_file_names(pathlib.Path(self._tmp_dir.name))
self.run_test_input()
def run_test_input(self):
"""
read and test the output files
"""
project = CGTProject()
project["results"] = VideoAnalysisResultsStore(None)
pens = PenStore()
read_csv_project(self._project["proj_full_path"], project, pens)
self.assertEqual(project["start_user"],
getpass.getuser(),
"user name not read correctly")
self.assert_regions(project)
self.assert_lines_points(project)
def assert_regions(self, project):
"""
check the reagions have been read in correctly
Args:
project (CGTProject): the read-back-in project
"""
in_regions = project["results"].get_regions()
out_regions = self._project["results"].get_regions()
self.assertEqual(len(in_regions),
len(out_regions),
"wrong number of reagions read")
for i, region in enumerate(out_regions):
out_rect = region.rect()
in_rect = in_regions[i].rect()
self.assertEqual(in_rect, out_rect, "rectangle are wrong")
out_kf = self._project["results"].get_key_frames(i)
in_kf = project["results"].get_key_frames(i)
self.assertEqual(out_kf, in_kf, "error in key frames")
def assert_lines_points(self, project):
"""
check the lines and points have been read in correctly
Args:
project (CGTProject): the read-back-in project
"""
in_lines = project["results"].get_lines_for_region(1)[0]
out_lines = self._project["results"].get_lines_for_region(1)[0]
for i, line in enumerate(out_lines):
self.assertTrue(compare_lines(in_lines[i], line), "wrong line")
in_points = project["results"].get_points_for_region(0)[0]
out_points = self._project["results"].get_points_for_region(0)[0]
for i, point in enumerate(out_points):
self.assertTrue(compare_points(in_points[i], point), "wrong point")
def assert_file_names(self, dir_path):
"""
assert the correct number and names of output files
Args:
dir_path (pathlib.Path): the directory holding the output files
"""
files = ["CGT_testing_lines.csv",
"CGT_testing_points.csv",
"CGT_testing_project_info.csv",
"CGT_testing_regions.csv"]
contents = [x.name for x in dir_path.iterdir()]
self.assertEqual(len(contents), 4, "wrong number of csv files")
for file in files:
self.assertIn(file, contents, "unknown file in csv directory")
if __name__ == "__main__":
unittest.main(verbosity=2)
```
#### File: CrystalGrowthTracker/tests/test_velocities.py
```python
import unittest
from cgt.util.markers import MarkerTypes
from cgt.model.velocitiescalculator import (ScreenDisplacement,
VelocitiesCalculator)
import tests.makeresults as mkres
class TestDisplacements(unittest.TestCase):
"""
test the ScreenDisplacement class
"""
def setUp(self):
"""
initalize objects
"""
## the start frame
self._start = 100
## the end frame
self._end = 200
## the frames per second
self._fps = 10.0
## the displacement length in pixels
self._length = 100
## the test object (created with end & start reversed to check for switch)
self._displacement = ScreenDisplacement(self._end,
self._start,
self._fps,
self._length)
self._speed = 10.0
def tearDown(self):
"""
clean up
"""
del self._displacement
def test_velocity(self):
"""
ensure ScreenDisplacement works
"""
message = "start frame not correct"
self.assertEqual(self._start, self._displacement.get_start(), message)
message = "end frame not correct"
self.assertEqual(self._end, self._displacement.get_end(), message)
message = "length not correct"
self.assertEqual(self._length, self._displacement.get_length(), message)
# test speed to 5 decimal places
speed = self._displacement.get_speed()
message = f"speed wrong: {speed} should be 10.0"
self.assertAlmostEqual(self._speed, speed, 5, message)
class TestVelocities(unittest.TestCase):
"""
test the velocities calculator class
"""
def setUp(self):
"""
initalize objects
"""
self._points = mkres.make_test_points()
self._lines = mkres.make_test_lines()
self._test_values = mkres.get_test_values()
self._calculator = VelocitiesCalculator([self._lines],
[self._points],
self._test_values.fps,
self._test_values.scale)
self._calculator.process_latest_data()
def tearDown(self):
"""
clean up
"""
del self._calculator
def test_calculator(self):
"""
ensure calculator works
"""
speeds = self._calculator.get_average_speeds()
for speed in speeds:
if speed.m_type is MarkerTypes.POINT:
message = "point speed is wrong"
self.assertAlmostEqual(self._test_values.point_speed,
speed.speed,
places=4,
msg=message)
elif speed.m_type is MarkerTypes.LINE:
message = "line speed is wrong"
self.assertAlmostEqual(self._test_values.line_speed,
speed.speed,
places=4,
msg=message)
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "jonathanHuwP/PyQtDemoElection",
"score": 2
} |
#### File: demo/gui/Ui_electiondemo.py
```python
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_ElectionDemo(object):
def setupUi(self, ElectionDemo):
ElectionDemo.setObjectName("ElectionDemo")
ElectionDemo.resize(800, 821)
self.centralwidget = QtWidgets.QWidget(ElectionDemo)
self.centralwidget.setObjectName("centralwidget")
self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.centralwidget)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.verticalLayout = QtWidgets.QVBoxLayout()
self.verticalLayout.setObjectName("verticalLayout")
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setObjectName("label")
self.verticalLayout.addWidget(self.label)
self._constituencyTableView = QtWidgets.QTableView(self.centralwidget)
self._constituencyTableView.setObjectName("_constituencyTableView")
self.verticalLayout.addWidget(self._constituencyTableView)
self.verticalLayout_3.addLayout(self.verticalLayout)
self.verticalLayout_2 = QtWidgets.QVBoxLayout()
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.label_2 = QtWidgets.QLabel(self.centralwidget)
self.label_2.setObjectName("label_2")
self.verticalLayout_2.addWidget(self.label_2)
self._partyTableView = QtWidgets.QTableView(self.centralwidget)
self._partyTableView.setObjectName("_partyTableView")
self.verticalLayout_2.addWidget(self._partyTableView)
self.verticalLayout_3.addLayout(self.verticalLayout_2)
ElectionDemo.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(ElectionDemo)
self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 31))
self.menubar.setObjectName("menubar")
self.menuFile = QtWidgets.QMenu(self.menubar)
self.menuFile.setObjectName("menuFile")
ElectionDemo.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(ElectionDemo)
self.statusbar.setObjectName("statusbar")
ElectionDemo.setStatusBar(self.statusbar)
self.actionOpen = QtWidgets.QAction(ElectionDemo)
self.actionOpen.setObjectName("actionOpen")
self.actionSave = QtWidgets.QAction(ElectionDemo)
self.actionSave.setObjectName("actionSave")
self.actionExit = QtWidgets.QAction(ElectionDemo)
self.actionExit.setObjectName("actionExit")
self.menuFile.addAction(self.actionOpen)
self.menuFile.addAction(self.actionSave)
self.menuFile.addSeparator()
self.menuFile.addAction(self.actionExit)
self.menubar.addAction(self.menuFile.menuAction())
self.retranslateUi(ElectionDemo)
self.actionExit.triggered.connect(ElectionDemo.close)
self.actionOpen.triggered.connect(ElectionDemo.load_data)
self.actionSave.triggered.connect(ElectionDemo.save_data)
QtCore.QMetaObject.connectSlotsByName(ElectionDemo)
def retranslateUi(self, ElectionDemo):
_translate = QtCore.QCoreApplication.translate
ElectionDemo.setWindowTitle(_translate("ElectionDemo", "Election Data"))
self.label.setText(_translate("ElectionDemo", "Constituencies"))
self.label_2.setText(_translate("ElectionDemo", "Party Share"))
self.menuFile.setTitle(_translate("ElectionDemo", "File"))
self.actionOpen.setText(_translate("ElectionDemo", "Open"))
self.actionSave.setText(_translate("ElectionDemo", "Save"))
self.actionExit.setText(_translate("ElectionDemo", "Exit"))
```
#### File: demo/io/csvio.py
```python
import csv
from demo.datastructs.electionresults import ElectionResults
def read_file(file_name):
"""
read the csv file of election results
Args:
file_name (string) the file_name, including relative path
Returns:
dictionary of rows, first entry => rest of row
Throws:
IOError if file cannot be opened
"""
results = ElectionResults()
with open(file_name, 'r') as csv_file:
csv_reader = csv.reader(csv_file)
for row in csv_reader:
results[row[0]] = row[1:]
return results
def write_file(results, file_name):
"""
write the csv file of election results
Args:
results (dict) the dictionary of results
file_name (string) the file_name, including relative path
Throws:
IOError if file cannot be opened
"""
with open(file_name, 'w', newline='') as csv_file:
csv_writer = csv.writer(csv_file)
for item in results.items():
row = [item[0]] + item[1]
csv_writer.writerow(row)
if __name__ == "__main__":
tmp_results = read_file(".\\resources\\sample_data.csv")
for tmp in tmp_results.items():
print("{} => {}".format(tmp[0], tmp[1]))
test = {}
# comment on use of csv to fix test etc
test["North\" Winging"] = [12, 17, 4]
write_file(test, "test_output.csv")
```
#### File: PyQtDemoElection/testing/demoelectiontests.py
```python
import sys
import pathlib
import unittest
sys.path.insert(0, str(pathlib.Path.cwd()))
from demo.datastructs.electionresults import ElectionResults
## class for unit tests of the ImagePoints
class TestElectionResults(unittest.TestCase):
"""test haness for the results class"""
def setUp(self):
"""make test case"""
self._results = ElectionResults()
self._results["Somewhere"] = [10, 5, 0]
self._results["Somewhere Else"] = [0, 10, 5]
self._results["Elsewhare"] = [5, 0, 10]
def tearDown(self):
"""delete test case"""
self._results = None
def test_results(self):
"""
test the percentage of the vote function
"""
percentages = self._results.party_percentages()
self.assertAlmostEqual(percentages[0],
33.3333,
msg="percentage of first party failed",
delta=0.0001)
self.assertAlmostEqual(percentages[1],
33.3333,
msg="percentage of second party failed",
delta=0.0001)
self.assertAlmostEqual(percentages[2],
33.3333,
msg="percentage of third party failed",
delta=0.0001)
if __name__ == "__main__":
print("Unit Tests for DemoElection")
unittest.main()
``` |
{
"source": "jonathan-hxj/FlaskTpp",
"score": 2
} |
#### File: apis/movie_user/movie_user_api.py
```python
import uuid
from flask_restful import Resource, reqparse, abort, fields, marshal_with, marshal
from App.models.movie_user import MovieUser
from App.apis.movie_user.model_utils import get_user
from App.apis.api_constant import HTTP_CREATE_SUCCESS, USER_ACTION_LOGIN, USER_ACTION_REGISTER, HTTP_SUCCESS
from App.ext import cache
base_parse = reqparse.RequestParser()
base_parse.add_argument("password", type=str, required=True, help="请输入密码!")
base_parse.add_argument("action", type=str, required=True, help="请确认请求参数!")
register_parse = reqparse.deepcopy(base_parse)
register_parse.add_argument("phone", type=str, required=True, help="请输入手机号!")
register_parse.add_argument("username", type=str, required=True, help="请输入用户名!")
login_parse = reqparse.deepcopy(base_parse)
login_parse.add_argument("phone", type=str, help="请输入手机号!")
login_parse.add_argument("username", type=str, help="请输入用户名!")
movie_fields = {
"username": fields.String(attribute="user_name"),
"password": fields.String(attribute="_password"),
"phone": fields.String
}
singe_movie_user_fields = {
"status": fields.Integer,
"msg": fields.String,
"data": fields.Nested(movie_fields)
}
class MovieUsersResource(Resource):
def post(self):
args = base_parse.parse_args()
password = args.get("password")
action = args.get("action").lower()
if action == USER_ACTION_REGISTER:
register_args = register_parse.parse_args()
username = register_args.get("username")
phone = register_args.get("phone")
movie_user = MovieUser()
movie_user.user_name = username
movie_user.password = password
movie_user.phone = phone
if not movie_user.save():
abort(400, msg="create fail")
data = {
"status": HTTP_CREATE_SUCCESS,
"msg": "用户创建成功",
"data": movie_user
}
return marshal(data, singe_movie_user_fields)
elif action == USER_ACTION_LOGIN:
login_args = login_parse.parse_args()
username = login_args.get("username")
phone = login_args.get("phone")
user = get_user(username) or get_user(phone)
if not user or user.is_delete:
abort(400, msg="用户不存在!")
if not user.check_password(password):
abort(401, msg="密码错误!")
token = uuid.uuid4().hex
cache.set(token, user.id, timeout=60 * 60 * 24 * 7)
data = {
"msg": "login success",
"status": HTTP_SUCCESS,
"token": token
}
return data
else:
abort(400, msg="请提供正确的参数!")
```
#### File: FlaskTpp/App/__init__.py
```python
from flask import Flask
from App.settings import envs
from App.ext import init_ext
from App.apis import init_api
def create_app(env):
app = Flask(__name__)
# 初始化项目配置
app.config.from_object(envs.get(env))
# 初始化扩展库
init_ext(app)
init_api(app)
return app
```
#### File: models/movie_user/movie_user_model.py
```python
from App.ext import db
from App.models import BaseModel
from werkzeug.security import check_password_hash, generate_password_hash
from App.models.movie_user.model_constant import PERMISSION_NONE
COMMON_USER = 0
BLACK_USER = 1
VIP_USER = 2
class MovieUser(BaseModel):
user_name = db.Column(db.String(32), unique=True)
_password = db.Column(db.String(256))
phone = db.Column(db.String(32), unique=True)
is_delete = db.Column(db.Boolean, default=False)
permission = db.Column(db.Integer, default=PERMISSION_NONE)
@property
def password(self):
raise Exception("Can't access")
@password.setter
def password(self, val):
self._password = generate_password_hash(val)
def check_password(self, val):
return check_password_hash(self._password, password=val)
def check_permission(self, permission):
if BLACK_USER & self.permission == BLACK_USER:
return False
else:
return permission & self.permission == permission
``` |
{
"source": "JonathanILevi/DwarfHeim-MinePlotter",
"score": 3
} |
#### File: DwarfHeim-MinePlotter/builder/htmlInject.py
```python
import re, random, sys
def readFile(f):
with open(f, "r") as file:
return file.read()
def writeFile(f, content):
with open(f, "w") as file:
file.write(content)
for arg in sys.argv[1:]:
writeFile(arg, readFile(arg).replace("vtag", str(random.randint(0,999))))
## writeFile(arg, re.sub('(<script[^>]*src=)"([^"\\\\]*(?:\\\\.[^"\\\\]*)*)"', '\\1"\\2?'+str(random.randint(0,999))+'"', readFile(arg)))
``` |
{
"source": "jonathanimb/ADS1118",
"score": 3
} |
#### File: jonathanimb/ADS1118/ADS1118.py
```python
import time
import RPi.GPIO as GPIO
class ConnectionError(Exception):
pass
def int_to_list(data, bits=8):
"""converts an integer to a big-endian list of booleans
>>> int_to_list(187)
[1, 0, 1, 1, 1, 0, 1, 1]
"""
return [data >> i & 1 for i in range(bits-1, -1, -1)]
def list_to_int(data):
"""converts a big-endian list of booleans to an integer
>>> list_to_int([1, 0, 1, 1, 1, 0, 1, 1])
187
"""
return sum(val << i for i, val in enumerate(reversed(data)))
# the bytearray functions are not needed, but may be useful if anyone has commands
# in the more traditional 2-byte form.
def list_to_bytearray(data):
return bytearray(list_to_int(data[i:i+8]) for i in range(0, len(data), 8))
def bytearray_to_list(data):
data = bytearray(data)
return sum(map(int_to_list, data), [])
def bytearray_to_int(data):
return list_to_int(bytearray_to_list(data))
def int_to_bytearray(data):
return list_to_bytearray(int_to_list(data))
gains = { # calculate the value of the least significant bit (LSB) by dividing the full range by the number of available bits
0: 6.144 * 2 / 2**16, # 187.5 microvolts per bit at +- 6.144 volts full range
1: 4.096 * 2 / 2**16, # 125.0
2: 2.048 * 2 / 2**16, # 62.50 (default)
3: 1.024 * 2 / 2**16, # 31.25
4: 0.512 * 2 / 2**16, # 15.62
5: 0.256 * 2 / 2**16, # 7.812
6: 0.256 * 2 / 2**16, # 7.812
7: 0.256 * 2 / 2**16 # 7.812
}
sleep_times = { # calculate the time in seconds it takes for a single data point at each data rate
0: 1. / 8, # 125.0 ms per measurement = 8 measurements per second
1: 1. / 16, # 62.5
2: 1. / 32, # 31.3
3: 1. / 64, # 15.6
4: 1. / 128, # 7.81 (default)
5: 1. / 250, # 4.00
6: 1. / 475, # 2.11
7: 1. / 860 # 1.12
}
def encode(
single_shot = False, # If ADS is powered down, start a single measurement.
multiplex = 0, # [0/1, 0/3, 1/3, 2/3, 0/gnd, 1/gnd, 2/gnd, 3/gnd]
gain = 2, #[+/-6.144, 4.096, 2.048, 1.024, 0.512, .256] volts full range
single_shot_mode = True, # power down after measurement
data_rate = 4, # [8, 16, 32, 64, 128, 250, 475, 860] samples per second
temp_sensor = False, # read the internal temperature
pullup = True, # enable the DIN pullup resistor
operation = True): # when false, config is not written to config register
data = []
data.append(int(single_shot))
data.extend(int_to_list(multiplex, 3))
data.extend(int_to_list(gain, 3))
data.append(int(single_shot_mode))
data.extend(int_to_list(data_rate, 3))
data.append(int(temp_sensor))
data.append(int(pullup))
data.append(0) # reserved
data.append(int(operation))
data.append(1) # reserved
return data
def decode(data):
'''input a list of 16 bits'''
return dict(
single_shot = bool(data[0]),
multiplex = list_to_int(data[1:4]),
gain = list_to_int(data[4:7]),
single_shot_mode = bool(data[7]),
data_rate = list_to_int(data[8:11]),
temp_sensor = bool(data[11]),
pullup = bool(data[12]),
operation = bool(data[13]))
def convert(data, lsb_size):
'''convert a data block into a number
:data: a list of bits
:lsb_size: the value of the least significant bit'''
if data[0]: #negative value, use binarys two's complement
data = [not x for x in data]
return -(lsb_size * (list_to_int(data) + 1))
else:
return lsb_size * list_to_int(data)
def interpret(config, data):
'''convert the data block to a meaningful value.
:config:
the config that was sent or that was echoed (should be the same)
this is used to determine how the data should be interpreted
:data:
the data block from the ADS1118 as a length 16 list of booleans
'''
if config[11]: # temp measurement
# convert a data block into a temperature, returns degrees C
return convert(data[:14], 0.03125)
else: # voltage measurement
# convert a data block into a voltage
gain = list_to_int(config[4:7])
return convert(data, gains[gain])
def verify(command, config):
'''
compares the command sent to the echoed config returned from the ADS1118.
If they don't match then there was a communications problem.
if the sum of bits is zero than the ADS1118 is likely not connected
if the sum is non-zero then you probably have more than one instance running
'''
if config[1:15] != command[1:15]:
raise ConnectionError('sum of bits: {}'.format(sum(config)))
def pause(command):
'''wait for the amount of time the command takes to execute'''
time.sleep(sleep_times[list_to_int(command[8:11])])
INT_TEMP = encode(single_shot=True, temp_sensor=True, data_rate=5) # read internal temperature
class ADS1118(object):
'''although the 1118 says it can handle a 5V input voltage, it's a lie.
At least when running from a Pi, the 1118 must be powered from the 3.3 V line
Perhaps you can use more power if you add a voltage divider to the DOUT line'''
def __init__(self, SCLK=None, DIN=None, DOUT=None, CS=None, pause_mode=True):
'''
:SCLK:, :DIN:, :DOUT:, :CS:
the GPIO pin numbers that connect to the pins of the same name on the ADS1118 chip
:pause_mode:
After sending a command, the computer must wait for the command to be processed
and the data to be ready to read. How long this takes is set by the data_rate argument.
When the data is ready, the ADS1118 sets the DOUT pin low.
the pause_mode argument sets how the computer waits for the data to be ready
if pause_mode is True, the computer calculates the time it should take and sleeps that long
this does not take into account the time needed to communicate, so it will always sleep
slightly longer than needed, generally about 0.25 ms longer
if pause_mode is False, the computer will continuously poll the DOUT pin
and collect the data as soon as it's ready. This locks up a CPU and can
slow down other tasks the computer has running.
'''
self.SCLK = SCLK
self.DIN = DIN
self.DOUT = DOUT
self.CS = CS
self.pause_mode = pause_mode
GPIO.setmode(GPIO.BCM)
GPIO.setup(self.SCLK, GPIO.OUT)
GPIO.setup(self.DIN, GPIO.OUT)
GPIO.setup(self.DOUT, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
if self.CS is not None:
GPIO.setup(self.CS, GPIO.OUT)
GPIO.output(self.CS, GPIO.HIGH)
GPIO.output(self.SCLK, GPIO.LOW)
GPIO.output(self.DIN, GPIO.LOW)
time.sleep(.030) # something sets high, and the ADS1118 needs to have the clock held low for 28 ms to reset (datasheet page 22, section 9.5.3)
config, data = self._read(INT_TEMP) # clear the cobwebs
verify(INT_TEMP, config)
pause(INT_TEMP) # using wait here will sometimes hang
def wait(self):
'''wait until DOUT is low, signaling that the next data is ready'''
while GPIO.input(self.DOUT):
pass
# another method to accomplish the same
# this method is slower than above, and slower than the pause function
# in addition it has the tendancy to corrupt data
# I left this in here to prove I've tested it
#~ if GPIO.input(self.DOUT):
#~ GPIO.wait_for_edge(self.DOUT, GPIO.FALLING, timeout=40)
def _read(self, command):
'''
read / write a single 32-bit cycle
:command: a list of 16 booleans
:wait: wait for DOUT to be low (ADS signal data is ready)
waiting is the 'proper' way to minimize the time, but it ties up a CPU
pausing does not use the CPU and is generally about 1 ms slower
returns
:config: the current command echoed back from the ADS1118
:data: the result from the _PREVIOUS_ call'''
#~ assert isinstance(command, (tuple, list)), "command must be a list"
#~ assert len(command) >= 16 and len(command) % 16 == 0, "command must have a multiple of 16 elements"
#~ assert all(x in (0,1) for x in command), "command must be a list of booleans"
if self.CS:
GPIO.output(self.CS, GPIO.LOW)
data_out = []
for bit in command*2:
GPIO.output(self.SCLK, GPIO.HIGH)
GPIO.output(self.DIN, int(bit))
data_out.append(GPIO.input(self.DOUT))
GPIO.output(self.SCLK, GPIO.LOW)
if self.CS:
GPIO.output(self.CS, GPIO.HIGH)
# the data should be 32 bits long, split in half to output data, config
# index from the back to allow commands longer than 16 bits.
data = data_out[-32:-16]
config = data_out[-16:]
return config, data
def read(self, *commands):
'''
make some single shot measurements
this method makes the vital assumption that you allow enough time
between calls that the ADS is powered down (7 ms in default mode).
if that might not be, add
pause(INT_TEMP)
to your routine.'''
responses = []
for command in commands:
responses.append(self._read(command))
if self.pause_mode:
pause(command)
else:
self.wait()
responses.append(self._read(INT_TEMP)) # dummy command to get the last data value
configs, datas = zip(*responses)
results = []
for command, config, data in zip(commands, configs, datas[1:]): # skip the first data since it's residual
results.append(interpret(config, data))
verify(command, config)
return results
``` |
{
"source": "jonathanimb/pyimzML",
"score": 3
} |
#### File: pyimzML/pyimzml/compression.py
```python
import zlib
class NoCompression(object):
name = "no compression"
def rounding(self, data):
return data
def compress(self, bytes):
return bytes
def decompress(self, bytes):
return bytes
class ZlibCompression(object):
name = "zlib compression"
def __init__(self, round_amt=None):
self.round_amt = round_amt
def rounding(self, data):
if self.round_amt is not None:
return [round(x,self.round_amt) for x in data] #rounding helps the compression, but is lossy
return data
def compress(self, bytes):
return zlib.compress(bytes)
def decompress(self, bytes):
return zlib.decompress(bytes)
``` |
{
"source": "jonathanimb/tkvariables",
"score": 3
} |
#### File: jonathanimb/tkvariables/tkvariables.py
```python
try:
import Tkinter as tk
except ImportError:
import tkinter as tk
class _Base:
def __init__(self, master=None, min=None, max=None, trace_w=None, trace_o=None, trace_r=None, **kwargs):
self.min = min
self.max = max
self.get_funcs = []
self.set_funcs = []
self.onchange_funcs = []
self.tk_super.__init__(self, master, **kwargs)
self.tk_super.trace(self, 'w', self.run_set)
self.old_value = self.get()
self.tk_super.trace(self, 'r', self.run_get)
self.trace('w', trace_w)
self.trace('o', trace_o)
self.trace('r', trace_r)
def set(self, value):
if self.min is not None and value < self.min:
value = self.min
if self.max is not None and value > self.max:
value = self.max
self.tk_super.set(self, value)
def trace(self, mode, funcs, delete=False):
if funcs is None:
return
if not isinstance(funcs, (list, tuple)):
funcs = [funcs]
for func in funcs:
if mode == 'w':
self.set_funcs.append(func)
elif mode == 'r':
self.get_funcs.append(func)
elif mode == 'o':
self.onchange_funcs.append(func)
else:
raise ValueError('mode {!r} is not known'.format(mode))
def run_get(self, *useless_args):
for func in self.get_funcs:
func(self.get(), self.get(), self)
def run_set(self, *useless_args):
new_value = self.get()
for func in self.set_funcs:
func(new_value, self.old_value, self)
if self.old_value != new_value:
for func in self.onchange_funcs:
func(new_value, self.old_value, self)
self.old_value = new_value
def _get_value(self, other):
if isinstance(other, tk.Variable):
return other.get()
else:
return other
def __iadd__(self, other):
self.set(self.get() + self._get_value(other))
return self
def __isub__(self, other):
self.set(self.get() - self._get_value(other))
return self
def __imul__(self, other):
self.set(self.get() * self._get_value(other))
return self
def __imul__(self, other):
self.set(self.get() * self._get_value(other))
return self
def __idiv__(self, other):
self.set(self.get() / self._get_value(other))
return self
def __ifloordiv__(self, other):
self.set(self.get() // self._get_value(other))
return self
class StringVar(_Base, tk.StringVar):
def __init__(self, *args, **kwargs):
self.tk_super = tk.StringVar
_Base.__init__(self, *args, **kwargs)
class IntVar(_Base, tk.IntVar):
def __init__(self, *args, **kwargs):
self.tk_super = tk.IntVar
_Base.__init__(self, *args, **kwargs)
class DoubleVar(_Base, tk.DoubleVar):
def __init__(self, *args, **kwargs):
self.tk_super = tk.DoubleVar
_Base.__init__(self, *args, **kwargs)
class BooleanVar(_Base, tk.BooleanVar):
def __init__(self, *args, **kwargs):
self.tk_super = tk.BooleanVar
_Base.__init__(self, *args, **kwargs)
##debug / demo
def set_callback(new, old, var):
print("the {} variable was set. (was {!r} now {!r}).".format(var, old, new))
def get_callback(new, old, var):
print("the {} variable was got. (was {!r} now {!r}).".format(var, old, new))
def onchange_callback(new, old, var):
print("the {} variable was changed. (was {!r} now {!r}).".format(var, old, new))
def increment():
global counter
counter += 1
def decrement():
global counter
counter -= 1
def main():
global counter
r = tk.Tk()
r.geometry('300x300')
options = 'spam and eggs'.split()
var = StringVar(value=options[0])
var.trace('w', set_callback)
var.trace('o', onchange_callback)
var.trace('r', get_callback)
ent = tk.OptionMenu(r, var, *options)
ent.pack()
f = tk.Frame(r)
counter = IntVar(min=-2, max=15)
btn = tk.Button(f, text='-', command=decrement)
btn.pack(side=tk.LEFT)
lbl = tk.Label(f, textvariable=counter)
lbl.pack(side=tk.LEFT)
btn = tk.Button(f, text='+', command=increment)
btn.pack(side=tk.LEFT)
other = IntVar(value=10)
def add10():
global counter
counter += other
btn = tk.Button(f, text='+10', command=add10)
btn.pack(side=tk.LEFT)
f.pack()
r.mainloop()
if __name__ == "__main__":
main()
``` |
{
"source": "jonathan-innis/azure-extension-foundation",
"score": 2
} |
#### File: shim/helper/status.py
```python
from ctypes import *
from helper.types import GoString
class Status:
def __init__(self, lib):
self.lib = lib
self.lib.ReportTransitioning.argtypes = [GoString, GoString]
self.lib.ReportError.argtypes = [GoString, GoString]
self.lib.ReportSuccess.argtypes = [GoString, GoString]
self.lib.ReportTransitioning.restype = c_char_p
self.lib.ReportError.restype = c_char_p
self.lib.ReportSuccess.restype = c_char_p
def transitioning(self, operation, message):
operation_str = GoString(operation.encode('utf-8'), len(operation))
message_str = GoString(message.encode('utf-8'), len(message))
return self.lib.ReportTransitioning(operation_str, message_str)
def error(self, operation, message):
operation_str = GoString(operation.encode('utf-8'), len(operation))
message_str = GoString(message.encode('utf-8'), len(message))
return self.lib.ReportError(operation_str, message_str)
def success(self, operation, message):
operation_str = GoString(operation.encode('utf-8'), len(operation))
message_str = GoString(message.encode('utf-8'), len(message))
return self.lib.ReportSuccess(operation_str, message_str)
```
#### File: azure-extension-foundation/shim/shim.py
```python
from helper import log, status, settings, sequence
from abc import abstractmethod, ABCMeta
from ctypes import *
import os
EXTENSION_NAME = "some extension"
class Shim(metaclass=ABCMeta):
def __init__(self):
lib = cdll.LoadLibrary(os.path.dirname(__file__) + "/main.so")
self.status = status.Status(lib)
self.sequence = sequence.Sequence(lib)
self.settings = settings.Settings(lib)
self.log = log.Log(lib)
"""
Install calls
"""
def pre_install(self):
self.log.info("BEGIN Install Extension: %s"%(EXTENSION_NAME))
self.status.transitioning("BEGIN Install Extension: %s"%(EXTENSION_NAME), "BEGIN Install Extension: %s"%(EXTENSION_NAME))
@abstractmethod
def install(self):
pass
def post_install(self):
self.log.info("END Install Extension %s"%(EXTENSION_NAME))
self.status.transitioning("END Install Extension: %s"%(EXTENSION_NAME), "END Install Extension: %s"%(EXTENSION_NAME))
"""
Enable calls
"""
def pre_enable(self):
shouldProcess = self.sequence.check_sequence_number()
self.log.info("BEGIN Enable Extension: %s"%(EXTENSION_NAME))
self.status.transitioning("BEGIN Enable Extension: %s"%(EXTENSION_NAME), "BEGIN Enable Extension: %s"%(EXTENSION_NAME))
# Get settings to return back to the user to use in application logic
self.settings = self.settings.get_settings()
@abstractmethod
def enable(self):
pass
def post_enable(self):
self.log.info("END Enable Extension: %s"%(EXTENSION_NAME))
self.status.success("END Enable Extension: %s"%(EXTENSION_NAME), "END Enable Extension: %s"%(EXTENSION_NAME))
"""
Disable calls
"""
def pre_disable(self):
self.log.info("BEGIN Disable Extension: %s"%(EXTENSION_NAME))
self.status.transitioning("BEGIN Disable Extension: %s"%(EXTENSION_NAME), "BEGIN Disable Extension: %s"%(EXTENSION_NAME))
@abstractmethod
def disable(self):
pass
def post_disable(self):
self.log.info("END Disable Extension %s"%(EXTENSION_NAME))
self.status.success("END Disable Extension: %s"%(EXTENSION_NAME), "END Disable Extension: %s"%(EXTENSION_NAME))
"""
Uninstall calls
"""
def pre_uninstall(self):
self.log.info("BEGIN Uninstall Extension: %s"%(EXTENSION_NAME))
self.status.transitioning("BEGIN Uninstall Extension: %s"%(EXTENSION_NAME), "BEGIN Uninstall Extensions: %s"%(EXTENSION_NAME))
@abstractmethod
def uninstall(self):
pass
def post_uinstall(self):
self.log.info("END Uninstall Extension: %s"%(EXTENSION_NAME))
self.status.transitioning("END Uninstall Extension: %s"%(EXTENSION_NAME), "END Uninstall Extension: %s"%(EXTENSION_NAME))
def on_timeout(self):
self.log.error("Extension install took to long for Extension: %s"%(EXTENSION_NAME))
self.status.error("Enabling failed for extension: %s"%(EXTENSION_NAME), "failed installing %s"%(EXTENSION_NAME))
``` |
{
"source": "jonathanishhorowicz/RATE_python_package",
"score": 2
} |
#### File: RATE_python_package/rate/importance.py
```python
import numpy as np
import tensorflow as tf
import multiprocessing as mp
from multiprocessing import Process, Manager
#import ray
import time
#import rfpimp as rfp
from tqdm import tqdm
from scipy.linalg import solve_triangular, sqrtm
from .models import BnnBase, BnnBinaryClassifier
from .projections import CovarianceProjection
from .logutils import TqdmLoggingHandler
import logging
logger = logging.getLogger(__name__)
#logger.addHandler(TqdmLoggingHandler())
def qr_solve(A, b):
Q, R = np.linalg.qr(A)
return np.matmul(solve_triangular(R, Q.T), b)
def rate(X, M_F, V_F, projection=CovarianceProjection(), nullify=None,
exact_KLD=False, method="KLD", solver="qr", jitter=1e-9, return_time=False, return_KLDs=False,
n_jobs=1, parallel_backend="", progressbar=False):
"""Calculate RATE values. This function will replace previous versions in v1.0
Args:
X: array containing input data, shape (n_examples, n_variables)
M_F: array containing logit posterior mean, shape (n_classes, n_examples).
V_F: array containing logit posterior covariance, shape (n_classes, n_examples, n_examples).
projection: an projection defining the effect size analogue. Must inherit from ProjectionBase. These are defined in projections.py
nullify: array-like containing indices of variables for which RATE will not be calculated. Default `None`, in which case RATE values are calculated for every variable.
exact_KLD: whether to include the log determinant, trace and 1-p terms in the KLD calculation. Default is False.
method: Used in development. Use "KLD" (default) for the RATE calculation.
solver: If 'qr', solve the linear system using QR (default). Choose 'lstsq' for a least-squares solution
jitter: added to the diagonal of the effect size analogue posterior to ensure positive semi-definitiveness. The code will warn you if any of the resulting KLD values
are negative, in which case you should try a larger jitter. This is due to the covariance matrices of the logit posterior not being positive semi-definite.
return_time: whether or not to return the time taken to compute the RATE values. Default if False.
return KLDs: whether to return the KLD values as well as the RATE values. For debugging. Default is False.
parallel_backend: the parallel backend (only relevant if n_jobs > 1). One of 'ray' or 'multiprocessing'
progressbar: whether to display the tqdm progress bar (default False).
Returns:
rate_vals: a list of length n_classes, where each item is an array of per-variable RATE values for a given class. A single array is returned for n_classes = 1.
If return_time=True then a 2-tuple containing rate_vals and the computation time is returned.
If return_KLDs=True then the first item of the 2-tuple is itself a 2-tuple of (RATE_values, KLD_values)
"""
logger.debug("Input shapes: X: {}, M_F: {}, V_F: {}".format(X.shape, M_F.shape, V_F.shape))
logger.debug("Using {} method and {} solver".format(method, solver))
#
# Shape checks. 1D M_F and 2D V_F will have extra dimension added at the front (for the output class)
#
if M_F.ndim==1:
M_F = M_F[np.newaxis]
logger.debug("Reshaping M_F to {}".format(M_F.shape))
if V_F.ndim==2:
V_F = V_F[np.newaxis]
logger.debug("Reshaping 2D V_F to {}".format(V_F.shape))
if not (X.shape[0] == M_F.shape[1] == V_F.shape[1] == V_F.shape[2]):
raise ValueError("Inconsistent number of examples across X and logit posterior")
if M_F.shape[0] != V_F.shape[0]:
raise ValueError("Inconsistent number of classes between logit posterior mean and covariance")
logger.info("Calculating RATE values for {} classes, {} examples and {} variables".format(
M_F.shape[0], X.shape[0], X.shape[1]))
if exact_KLD:
logger.info("Using exact KLD calcuation")
# PARALLELISATION NOT FULLY TESTED YET - CALL RATE_ray directly
# if n_jobs > 1:
# if parallel_backend not in ["ray", "multiprocessing"]:
# raise ValueError("{} is not a recognised parallel backend - choose `ray` or `multiprocessing`")
# logger.info("Using {} parallel backend with {} jobs".format(parallel_backend, n_jobs))
M_B, V_B = projection.esa_posterior(X, M_F, V_F)
C = M_F.shape[0]
p = X.shape[1]
J = np.arange(p)
if nullify is not None:
J = np.delete(J, nullify, axis=0)
KLDs = [np.zeros(J.shape[0]) for _ in range(C)]
if solver == "qr":
alpha_solve_fn = qr_solve
elif solver == "lstsq":
alpha_solve_fn = lambda A, b: np.linalg.lstsq(A, b, rcond=None)[0]
else:
logger.warning("Unrecognised solver {}, using qr".format(solver))
alpha_solve_fn = qr_solve
start_time = time.time()
for c in range(C):
logger.info("Calculating RATE values for class {} of {}".format(c+1, C))
Lambda = np.linalg.pinv(V_B[c] + jitter*np.eye(V_B.shape[1]))
for j in tqdm(J, disable=not progressbar):
if method=="KLD":
if nullify is not None:
j = np.array(np.unique(np.concatenate(([j], nullify)), axis=0))
m = M_B[c,j]
Lambda_red = np.delete(Lambda, j, axis=0)[:,j]
alpha = np.matmul(
Lambda_red.T,
alpha_solve_fn(
np.delete(np.delete(Lambda, j, axis=0), j, axis=1),
Lambda_red))
# Approximation to the full KLD (equation S6 in AoAs supplemental)
if nullify is None:
KLDs[c][j] = 0.5 * m**2.0 * alpha
else:
KLDs[c][j] = 0.5 * np.matmul(np.matmul(m.T, alpha), m)
# Additional terms in the full KLD calculation (equation 9 in AoAS paper)
if exact_KLD:
sigma_lambda_product = np.matmul(
np.delete(np.delete(V_B[c], j, axis=0), j, axis=1),
np.delete(np.delete(Lambda, j, axis=0), j, axis=1)
)
KLDs[c][j] += 0.5 * (
- np.log(np.linalg.det(sigma_lambda_product) + 1e-9)
+ np.trace(sigma_lambda_product)
+ 1.0 - p)
elif method=="cond_var_red":
Sigma = V_B[c]
m = M_B[c,j]
Sigma_red = np.delete(Sigma, j, axis=0)[:,j]
KLDs[c][j] = np.matmul(
Sigma_red.T,
np.linalg.lstsq(
np.delete(np.delete(Sigma, j, axis=0), j, axis=1),
Sigma_red,
rcond=None)[0])
elif method=="MI":
Sigma = V_B[c]
m = M_B[c,j]
Sigma_red = np.delete(Sigma, j, axis=0)[:,j]
alpha = np.matmul(
Sigma_red.T,
np.linalg.lstsq(
np.delete(np.delete(Sigma, j, axis=0), j, axis=1),
Sigma_red,
rcond=None)[0])
KLDs[c][j] = -0.5 * np.log(1.0 - alpha/Sigma[j,j])
logger.debug("{} of the KLD values are negative and {} of them are nan".format(np.sum(np.array(KLDs)<0.0), np.isnan(KLDs).sum()))
if (np.array(KLDs) < 0.0).any():
logger.warning("Some KLD values are negative - try a larger jitter value (current value: {})".format(jitter))
out = [klds / np.sum(klds) for klds in KLDs]
rate_time = time.time() - start_time
logger.info("The RATE calculation took {} seconds".format(round(rate_time, 3)))
if C==1:
out = out[0]
KLDs = KLDs[0]
if return_KLDs:
out = [out, KLDs]
if return_time:
out = [out, rate_time]
return out
def jth_partition(mu, Sigma, j):
mu_j = np.array(mu[j]).reshape(1,1)
mu_min_j = np.delete(mu, j, axis=0)[:,np.newaxis]
sigma_j = np.array(Sigma[j,j]).reshape(1,1)
sigma_min_j = np.delete(Sigma, j, axis=0)[:,j][:,np.newaxis]
Sigma_min_j = np.delete(np.delete(Sigma, j, axis=0), j, axis=1)
# print("Sizes:\n\tmu_j: {}, mu_min_j: {}\n\tsigma_j: {}, sigma_min_j:{}, Sigma_min_j:{}".format(
# mu_j.shape, mu_min_j.shape, sigma_j.shape, sigma_min_j.shape, Sigma_min_j.shape
# ))
return mu_j, mu_min_j, sigma_j, sigma_min_j, Sigma_min_j
def condition_gaussian(mu_j, mu_min_j, sigma_j, sigma_min_j, Sigma_min_j):
mu_cond = mu_min_j - np.dot(sigma_min_j, mu_j)/sigma_j
#print("\n\tmu_cond: {}".format(mu_cond.shape))
Sigma_cond = Sigma_min_j - np.dot(sigma_min_j, sigma_min_j.T)/sigma_j
#print("\tSigma_cond: {}".format(Sigma_cond.shape))
return mu_cond, Sigma_cond
def Wasserstein_gaussian(mu_0, Sigma_0, mu_1, Sigma_1):
"""https://github.com/VersElectronics/WGPOT/blob/master/wgpot.py"""
sqrtK_0 = sqrtm(Sigma_0)
first_term = np.dot(sqrtK_0, Sigma_1)
K_0_K_1_K_0 = np.dot(first_term, sqrtK_0)
cov_dist = np.trace(Sigma_0) + np.trace(Sigma_1) - 2.0 * np.trace(sqrtm(K_0_K_1_K_0))
l2norm = np.sum(np.square(np.abs(mu_0 - mu_1)))
d = np.real(np.sqrt(l2norm + cov_dist))
return d
def rate_wasserstein(X, M_F, V_F, projection=CovarianceProjection()):
M_B, V_B = projection.esa_posterior(X, M_F, V_F)
C = M_F.shape[0]
wass_unnorm = [np.zeros(X.shape[1]) for _ in range(M_F.shape[0])]
for c in range(C):
logger.info("Calculating Wasserstein RATE values for class {} of {}".format(c+1, C))
for j in range(X.shape[1]):
mu_j, mu_min_j, sigma_j, sigma_min_j, Sigma_min_j = jth_partition(M_B[c], V_B[c], j)
mu_cond, Sigma_cond = condition_gaussian(mu_j, mu_min_j, sigma_j, sigma_min_j, Sigma_min_j)
wass_unnorm[c][j] = Wasserstein_gaussian(mu_cond, Sigma_cond, mu_min_j, Sigma_min_j)
return [wass/wass.sum() for wass in wass_unnorm]
# def perm_importances(model, X, y, features=None, n_examples=None, n_mc_samples=100):
# """
# Calculate permutation importances for a BNN or its mimic. Also returns the time taken
# so result is a 2-tuple (array of importance values, time)
# Args:
# model: a BnnBinaryClassifier, RandomForestClassifier or GradientBoostingClassifier
# X, y: examples and labels. The permutation importances are computed by shuffling columns
# of X and seeing how the prediction accuracy for y is affected
# features: How many features to compute importances for. Default (None) is to compute
# for every feature. Otherwise use a list of integers
# n_examples: How many examples to use in the computation. Default (None) uses all the
# features. Otherwise choose a positive integer that is less than
# the number of rows of X/y.
# n_mc_samples: number of MC samples (BNN only)
# Returns a 1D array of permutation importance values in the same order as the columns of X
# """
# X_df, y_df = pd.DataFrame(X), pd.DataFrame(y)
# X_df.columns = X_df.columns.map(str) # rfpimp doesn't like integer column names
# if n_examples is None:
# n_examples = -1
# start_time = time.time()
# if isinstance(model, BnnBinaryClassifier):
# imp_vals = np.squeeze(rfp.importances(model, X_df, y_df,
# metric=lambda model, X, y, sw: model.score(X, y, n_mc_samples, sample_weight=sw), n_samples=n_examples, sort=False).values)
# elif isinstance(model, RandomForestClassifier) or isinstance(model, GradientBoostingClassifier):
# imp_vals = np.squeeze(rfp.importances(model, X_df, y_df, n_samples=n_examples, sort=False).values)
# time_taken = time.time() - start_time
# return imp_vals, time_taken
def vanilla_gradients(model, X, numpy=True):
"""Computes the vanilla gradients of model output w.r.t inputs.
Args:
model: keras model
X: input array
numpy: True to return numpy array, otherwise returns Tensor
Returns:
Gradients of the predictions w.r.t input
"""
X_tensor = tf.cast(X, tf.float32)
with tf.GradientTape() as tape:
tape.watch(X_tensor)
preds = model(X_tensor)
grads = tape.batch_jacobian(preds, X_tensor)
if numpy:
grads = grads.numpy()
return grads
def gradient_input(model, X, numpy=True):
"""Computes the gradients*inputs, where gradients are of model
output wrt input
Args:
model: keras model
X: input array
numpy: True to return numpy array, otherwise returns Tensor
Returns:
Gradients of the predictions w.r.t input
"""
gradients = vanilla_gradients(model, X, False)
gradients_inputs = tf.math.multiply(gradients, X[:,tf.newaxis,:])
if numpy:
gradients_inputs = gradients_inputs.numpy()
return gradients_inputs
def integrated_gradients(model, X, n_steps=20, numpy=True):
"""Integrated gradients using zero baseline
https://keras.io/examples/vision/integrated_gradients/
Args:
model: keras model
X: input array
n_steps: number of interpolation steps
numpy: True to return numpy array, otherwise returns Tensor
Returns:
Integrated gradients wrt input
"""
baseline = np.zeros(X.shape).astype(np.float32)
# 1. Do interpolation.
X = X.astype(np.float32)
interpolated_X = [
baseline + (step / n_steps) * (X - baseline)
for step in range(n_steps + 1)
]
interpolated_X = np.array(interpolated_X).astype(np.float32)
# 2. Get the gradients
grads = []
for i, x in enumerate(interpolated_X):
grad = vanilla_gradients(model, x)
grads.append(grad)
# 3. Approximate the integral using the trapezoidal rule
grads = np.array(grads)
grads = (grads[:-1] + grads[1:]) / 2.0
avg_grads = grads.mean(axis=0)
# 4. Calculate integrated gradients and return
integrated_grads = (X - baseline)[:,np.newaxis,:] * avg_grads
return integrated_grads
def smoothed_gradients(model, X, noise=1.0, n_samples=10, numpy=True):
"""SmoothGrad
Args:
model: keras model
X: input array
noise: variance of Gaussian noise added to each pixel
n_samples: number of noisy samples
numpy: True to return numpy array, otherwise returns Tensor
Returns:
SmoothGrad wrt input
"""
X = X.astype(np.float32)
# 1. Add noise then get the gradients
noisy_grads = []
for i in range(n_samples):
noisy_grad = vanilla_gradients(model, X + np.random.normal(0.0, noise, X.shape))
noisy_grads.append(noisy_grad)
noisy_grads = tf.convert_to_tensor(noisy_grads, dtype=tf.float32)
# 2. Mean noisy gradient
avg_noisy_grads = tf.reduce_mean(noisy_grads, axis=0)
if numpy:
avg_noisy_grads = avg_noisy_grads.numpy()
return avg_noisy_grads
def guided_backprop(model, X, numpy=True):
preds = model(X)[:,:,tf.newaxis]
grads = vanilla_gradients(model, X, False)
guided_grads = (
tf.cast(preds > 0, "float32")
* tf.cast(preds > 0, "float32")
* grads
)
if numpy:
guided_grads = guided_grads.numpy()
return guided_grads
```
#### File: RATE_python_package/rate/mimic.py
```python
import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
from sklearn.model_selection._search import BaseSearchCV
from sklearn.base import is_regressor, is_classifier
from sklearn.utils.multiclass import type_of_target
import time
import logging
logger = logging.getLogger(__name__)
def mean_soft_prediction(bnn, X, n_mc_samples):
"""
Mean prediction by a BNN over MC samples. Predicted probabiltity if classification
and raw prediction in regression
"""
if is_classifier(bnn):
return bnn.predict_proba(X, n_mc_samples)
elif is_regressor(bnn):
return bnn.predict(X, n_mc_samples)
else:
raise ValueError("BNN is neither regressor nor classifier!")
def train_mimic(mimic_model, bnn, x_train, y_train=None, x_test=None, n_mc_samples=100, return_time=False):
"""
Get the random forest trained to mimic the mean
predictions of a Bayesian neural network. The mimic model is a regression model trained
trained on the soft predictions (the logits) of the BNN.
TODO: better to pass prediction lambda function as argument rather than the bnn itself
Model selection is performed using random search cross-validation with 10 iterations and 5 folds - this can be quite
slow but shouldn't take more than 10 minutes when parallelised over all available
cores. Default behaviour is to use one core.
Args:
mimic_model: a Scikit-Learn model that implements the fit and score methods. Depending on the context
this could be a regression model (e.g. RandomForestRegressor) or a cross-validation search
object from sklearn.model_selection (e.g. RandomizedSearchCV).
bnn_object: an instance of the BNN class. If `y_train` is not `None` then this argument is ignored when training the mimic
model. The BNN is still used if `x_test` is provided.
x_train: array of training examples with shape (n_examples, n_features).
The random forest will be trained on these examples and their
BNN predictions. The size of the second dimension must match the number of input dimensions expected by the BNN.
y_train: array of soft labels which are used to train the mimic model. Default is `None`, in which case the labels are generated
from `bnn`. This is useful if you want to train several mimic models without making new predictions.
x_test: array of test examples with shape (n_examples, n_features).
If provided (default is None) then the random forest will be
evaluated by comparing its predictions
to those of the BNN. The size of the second dimension must match the number of input dimensions expected by the BNN
n_mc_samples: the number of MC samples used when making BNN predictions.
Their mean is used as the labels for the random forest.
return_time: whether or not to return the time taken to compute the RATE values. Default if False.
"""
logger.debug("Fitting mimic model of type {} on inputs with shape {} and {} MC samples".format(type(mimic_model), x_train.shape, n_mc_samples))
logger.debug("Supplied arguments: y_train: {}, x_test: {}".format(y_train is not None, x_test is not None))
if isinstance(mimic_model, BaseSearchCV):
if not is_regressor(mimic_model.estimator):
raise ValueError("Mimic model must be a regression model")
else:
if not is_regressor(mimic_model):
raise ValueError("Mimic model must be a regression model")
if (bnn is not None) and (bnn.p != x_train.shape[1]):
raise ValueError("Number of BNN input dimensions must match x_train")
if x_test is not None:
if x_train.shape[1] != x_test.shape[1]:
raise ValueError("x_train and x_test must have the same number of dimensions")
if y_train is not None:
logger.warning("Using provided y_train and ignoring the supplied BNN for mimic training")
if type_of_target(y_train) != "continuous":
raise ValueError("Use soft labels (predicted probabilities) to train a mimic model")
else:
y_train = mean_soft_prediction(bnn, x_train, n_mc_samples)
start_time = time.time()
fit_result = mimic_model.fit(x_train, y_train)
fit_time = time.time() - start_time
if x_test is not None:
mimic_test_r2 = fit_result.score(x_test, mean_soft_prediction(bnn, x_test, n_mc_samples))
logger.info("Mimic R^2 on x_test: {:.3f}".format(mimic_test_r2))
if return_time:
return mimic_model, fit_time
else:
return mimic_model
```
#### File: RATE_python_package/rate/utils.py
```python
import numpy as np
import pandas as pd
import tensorflow as tf
from sklearn.preprocessing import OneHotEncoder
from sklearn.metrics import roc_curve, auc
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.stats import rankdata
import time
import logging
logger = logging.getLogger(__name__)
def plot_learning_curves(bnn, plotsize=4):
total_epochs = len(bnn.fit_history.history["loss"])
fig, axes = plt.subplots(
1, len(bnn.metrics_names),
figsize=(len(bnn.metrics_names)*plotsize, plotsize))
axes[0].plot(range(total_epochs), bnn.fit_history.history["loss"], '-o', label="training")
if 'val_loss' in bnn.fit_history.history:
axes[0].plot(range(total_epochs), bnn.fit_history.history["val_loss"], '-o', label="validation")
axes[0].legend()
axes[0].set_ylabel("ELBO")
axes[0].set_xlabel("epoch")
for i in range(len(bnn.metrics_names)-1):
this_metric = bnn.metrics_names[i+1]
axes[i+1].plot(range(total_epochs), bnn.fit_history.history[this_metric], '-o', label="training")
if "val_{}".format(this_metric) in bnn.fit_history.history:
axes[i+1].plot(range(total_epochs), bnn.fit_history.history["val_{}".format(this_metric)], '-o', label="validation")
axes[i+1].legend()
axes[i+1].set_ylabel(this_metric)
axes[i+1].set_xlabel("epoch")
plt.tight_layout()
return fig, axes
def make_1d2d(arr):
assert arr.ndim == 1
return arr.reshape(arr.shape[0], 1)
def onehot_encode_labels(y):
"""
One-hot encode integer labels y. The number of classes is assumed to be
the largest value in y
Args:
y: array with shape (n_examples,)
Returns:
array with shape (n_examples, n_classes)
"""
return OneHotEncoder(categories="auto", sparse=False).fit_transform(y.reshape(y.shape[0],1))
def get_roc_curves(variable_importances):
"""
Calculate ROC curves
# TODO: set row idx as variable
Args:
variable_importances: A dataframe with the following columns:
- method
- n
- p
- repeat_idx
- variable
"""
roc_curve_df = pd.DataFrame()
base_fpr = np.linspace(0, 1, 101) # Interpolate tpr (y-axis) at these fpr (x-axis) values
for method in variable_importances["method"].unique():
for n in variable_importances["n"].unique():
for p in variable_importances["p"].unique():
for repeat_idx in range(np.amax(variable_importances["repeat_idx"].unique()+1)):
df = variable_importances.loc[
(variable_importances["method"]==method) &
(variable_importances["repeat_idx"]==repeat_idx) &
(variable_importances["n"]==n) &
(variable_importances["p"]==p)
]
if len(df)==0:
continue
preds, labels = df["value"].values, df["causal"].values.astype(float)
fpr, tpr, _ = roc_curve(labels, np.abs(preds))
interp_tpr = np.interp(base_fpr, fpr, tpr)
auroc = auc(fpr, tpr)
roc_curve_df = pd.concat([
roc_curve_df,
pd.DataFrame({
"fpr" : base_fpr, "tpr" : interp_tpr, "auc" : auroc,
"method" : method, "n" : n, "p" : p
})
])
return roc_curve_df
def load_mnist(fashion, onehot_encode=True, flatten_x=False, crop_x=0, classes=None):
"""
Load the MNIST dataset
Args:
onehot_encode: Boolean indicating whether to one-hot encode training
and test labels (default True)
flatten_x: Boolean indicating whether to flatten the training and
test inputs to 2D arrays with shape (n_examples, image_size**2).
If False, returned inputs have shape (n_examples, image_size, image_size
(default False)
crop_x: Integer controlling the size of the border to be removed from the input
images (default 0, meaning no cropping).
classes: None to include all classes (default). Otherwise include a list of two
integers that will be encoded as 0, 1 in the order they appear.
Returns:
x_train, y_train, x_test, y_test: train and test inputs and labels.
First dimension is always the number of examples
"""
if not fashion:
(x_train, y_train),(x_test, y_test) = tf.keras.datasets.mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
else:
(x_train, y_train),(x_test, y_test) = tf.keras.datasets.fashion_mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
def crop(X, crop_size):
assert crop_x < X.shape[1]/2
assert crop_x < X.shape[2]/2
return X[:,crop_size:-crop_size,crop_size:-crop_size]
if crop_x > 0:
x_train = crop(x_train, crop_x)
x_test = crop(x_test, crop_x)
# Flatten to 2d arrays (each example 1d)
def flatten_image(X):
return X.reshape(X.shape[0], X.shape[1]*X.shape[1])
if flatten_x:
x_train = flatten_image(x_train)
x_test = flatten_image(x_test)
if onehot_encode:
y_train = onehot_encode_labels(y_train)
y_test = onehot_encode_labels(y_test)
if classes is not None:
assert len(classes) == 2
c0, c1 = classes
train_idxs_to_keep = np.logical_or(y_train==c0, y_train==c1)
x_train, y_train = x_train[train_idxs_to_keep,:], y_train[train_idxs_to_keep]
test_idxs_to_keep = np.logical_or(y_test==c0, y_test==c1)
x_test, y_test = x_test[test_idxs_to_keep,:], y_test[test_idxs_to_keep]
y_train = (y_train==c1).astype(int)[:,np.newaxis]
y_test = (y_test==c1).astype(int)[:,np.newaxis]
return x_train, y_train, x_test, y_test
def make_square(arr):
"""
Reshape a 1D array (or 2D array with .shape[2]==1) into a square 2D array
"""
assert arr.ndim==1 or arr.ndim==2, "array must be 1 or 2-D"
if arr.ndim==2:
assert arr.shape[1]==1, "If array is 2d then second dimension must be 1"
arr = arr.reshape(arr.shape[0])
assert arr.shape[0]**0.5 == int(arr.shape[0]**0.5), "array shape must be square (it is {})".format(arr.shape[0])
return arr.reshape(int(arr.shape[0]**0.5), int(arr.shape[0]**0.5))
def accuracy_onehot(labels, preds):
"""
Compute the accuracy of predictions using one-hot encoded labels
Args:
labels: array of labels with shape (n_examples, n_classes). Must be one-hot encoded
or result may be nonsense (this is not checked)
preds: array of predictions with shape (n_examples, n_classes)
Returns:
Accuracy as float. Result is in [0,1]
"""
assert labels.shape[0]==preds.shape[0]
return np.sum(np.argmax(preds, axis=1) == np.argmax(labels, axis=1))/float(labels.shape[0])
def accuracy(labels, preds):
"""
Compute the accuracy of predictions using integer labels
Args:
labels: array of labels with shape (n_examples,)
preds: array of predictions with shape (n_examples, n_classes)
Returns:
Accuracy as float. Result is in [0,1]
"""
assert labels.shape[0]==preds.shape[0]
return np.sum(preds==labels)/float(labels.shape[0])
def get_nullify_idxs(original_size, border_size):
"""
Get the indices of a flattened image that lie within border_size of the
edge of an image (use to pass to nullify argument in RATE function)
Args:
original size: Integer giving the size of the image
border_size: Integer giving the size of the border to be removed.
Returns:
Array of (integer) indices that lie in the border.
"""
assert border_size < original_size/2, "Border too large to be removed from image of this size"
tmp = np.zeros((original_size, original_size), dtype=int)
tmp[:border_size,:] = 1
tmp[-border_size:,:] = 1
tmp[:,-border_size:] = 1
tmp[:,:border_size] = 1
tmp = tmp.reshape(tmp.shape[0]*tmp.shape[1])
return np.where(tmp==1)[0]
def idx2pixel(idx, image_size):
"""
Get the 2D pixel location corresponding to the index of its flattened array
Args:
idx: integer index to be converted to pixel location
image_size: integer giving size of the image
Returns:
i, j: the location of the pixel corresponding to idx
"""
assert idx < image_size**2, "index {} too large for image size {}".format(idx, image_size)
tmp = np.zeros(image_size**2)
tmp[idx] = 1
tmp = tmp.reshape(image_size, image_size)
i, j = np.where(tmp==1)
return i[0], j[0]
def sampled_accuracies(pred_proba_samples, labels):
"""
Get the sampled accuracies over the entire test set from logit samples.
Args:
pred_proba_samples: array of predicted probability samples with shape
(n_mc_samples, n_examples, n_classes)/(n_mc_samples, n_examples)
for multiclass/binary classification. (This is the shape returned by BNN_Classifier.predict).
labels: array of one-hot encoded labels with shape (n_examples, n_classes) for non-binary clasification
or (n_examples,1) for binary classification.
Returns:
Array of test accuracies for each round of MC samples with shape (n_mc_samples,)
"""
binary_labels = labels.shape[1]==1
assert pred_proba_samples.shape[1]==labels.shape[0], "Different number of examples in logit samples and labels"
if not binary_labels:
assert pred_proba_samples.shape[2]==labels.shape[1], "Different number of classes in logit samples and labels"
sampled_test_accuracies = np.sum(
np.argmax(pred_proba_samples, axis=2) == np.argmax(labels, axis=1)[:,np.newaxis], axis=1)/float(labels.shape[0])
else:
sampled_test_accuracies = np.sum((pred_proba_samples[:,:]>0.5) == labels[:,0], axis=1)/float(labels.shape[0])
return sampled_test_accuracies
def accuracy_hist(pred_proba_samples, labels):
"""
Plot a histogram showing test accuracies.
Just calls sampled_accuracies then plots the result.
"""
sampled_acc = sampled_accuracies(pred_proba_samples, labels)
avg_accuracy = round(np.mean(sampled_acc) * 100, 3)
print("average accuracy across " + str(pred_proba_samples.shape[0]) + " samples: " + str(avg_accuracy) + "%\n")
fig, ax = plt.subplots(figsize=(10,5))
sns.distplot(100*sampled_acc, ax=ax, rug=True, kde=False)
ax.set_xlabel("Test set accuracy (%)", fontsize=30)
ax.set_ylabel("Frequency density", fontsize=30);
ax.tick_params("both", labelsize=15)
return sampled_acc
def rank_array(arr):
assert arr.ndim==1
return (arr.shape[0] - rankdata(arr)).astype(int)
def reverse_ranks(rankarr):
return rankarr.shape[0] - rankarr - 1
def compute_power(pvals, SNPs):
"""
Compute the power for identifying causal predictors.
Args:
Ps: list of causal predictors
Output: matrix with dimension (num. predictors, 2), where columns are FPR, TPR
"""
nsnps = len(pvals)
all_snps = np.arange(0, nsnps)
pos = SNPs
negs = list(set(all_snps) - set(SNPs))
pvals_rank = rank_array(pvals)
rocr = np.zeros((nsnps, 2))
for i in all_snps:
v = pvals_rank[0:i] # test positives
z = list(set(all_snps) - set(v)) # test negatives
TP = len(set(v) & set(pos))
FP = len(set(v) & set(negs))
TN = len(set(z) & set(negs))
FN = len(set(z) & set(pos))
TPR = 1.0*TP/(TP+FN); FPR = 1.0*FP/(FP+TN); #FDR = 1.0*FP/(FP+TP)
rocr[i, :] = [FPR, TPR]
return rocr
```
#### File: RATE_python_package/rate/wrapped_r.py
```python
import numpy as np
import rpy2.robjects as robjects
from rpy2.robjects.numpy2ri import activate
def init_rate_r():
"""Returns a python function that calls Lorin's original R code under the hood.
The resulting function only uses the X, f.draws and projection arguments.
This is only used for validating the Python code against in the unit tests.
Example usage:
n, p = 100, 10
n_draws = 1000
X = np.random.randn(n, p)
projection = "covariance"
f_draws = np.random.multivariate_normal(np.zeros(n), np.eye(n, n), size=(n_draws))
rate_r = init_rate_r() # Returns a function that calls the R code directly
out = rate_r(X, f_draws, projection, False) # Final argument is for matrix factorisation
# out is a list of [KLDs, RATE, Delta, ESS] (i.e. matches the R function)
"""
### Include R string automatically - hard to get to work with pytest
# r_code_path = pkg_resources.resource_filename(__name__, 'reference_code.R')
# with open(r_code_path, "r") as f:
# rcode = "".join(f.readlines())
# robjects.r(rcode)
# This long string should match reference_code.R
robjects.r('''
RATE <- function(X, f.draws, projection, factorize,
nullify=NULL, snp.nms=colnames(X), cores=1){
### Determine the number of Cores for Parallelization ###
if(cores > 1){
if(cores>parallel::detectCores()) {
warning("The number of cores you're setting is larger than detected cores - all available cores will be used")
cores = parallel::detectCores()
}
}
`%dopar%` <- foreach::`%dopar%` # so that we don't need to import foreach
cl <- parallel::makeCluster(cores)
doParallel::registerDoParallel(cl, cores=cores)
### Linear projection operator
if (projection == "linear") {
if(factorize) {
### First Run the Matrix Factorizations, if required ###
if(nrow(X) < ncol(X)){
#In case X has linearly dependent columns, first take SVD of X to get v.
# svd() returns list with d, u, v s.t. X = U D V', where d a vector of entries in D the diagonal matrix
svd_X = svd(X)
r_X = sum(svd_X$d>1e-10) # d is diagonal
u = with(svd_X,(1/d[1:r_X]*t(u[,1:r_X])))
v = svd_X$v[,1:r_X]
# Now, calculate Sigma_star
SigmaFhat = cov(f.draws)
Sigma_star = u %*% SigmaFhat %*% t(u)
# Now, calculate U st Lambda = U %*% t(U)
svd_Sigma_star = svd(Sigma_star)
r = sum(svd_Sigma_star$d > 1e-10)
U = t(MASS::ginv(v)) %*% with(svd_Sigma_star, t(1/sqrt(d[1:r])*t(u[,1:r])))
} else {
beta.draws = t(MASS::ginv(X)%*%t(f.draws))
V = cov(beta.draws); #V = as.matrix(nearPD(V)$mat)
D = MASS::ginv(V)
svd_D = svd(D)
r = sum(svd_D$d>1e-10)
U = with(svd_D,t(sqrt(d[1:r])*t(u[,1:r])))
}
Lambda = Matrix::tcrossprod(U)
} else {
# Direct inversion of the effect size analogue posterior covariance if
# don't want to do the factorisations
X.dagger = MASS::ginv(X)
Lambda = MASS::ginv(X.dagger%*%cov(f.draws)%*%t(X.dagger))
}
### Compute the Kullback-Leibler divergence (KLD) for Each Predictor ###
mu = c(MASS::ginv(X)%*%colMeans(f.draws))
int = 1:length(mu); l = nullify;
if(length(l)>0){int = int[-l]}
if(nrow(X) < ncol(X)){
KLD = foreach::foreach(j = int)%dopar%{
q = unique(c(j,l))
m = mu[q]
U_Lambda_sub = qr.solve(U[-q,],Lambda[-q,q,drop=FALSE])
kld = crossprod(U_Lambda_sub%*%m)/2
names(kld) = snp.nms[j]
kld
}
} else {
KLD = foreach::foreach(j = int)%dopar%{
q = unique(c(j,l))
m = mu[q]
alpha = t(Lambda[-q,q])%*%MASS::ginv(as.matrix(Matrix::nearPD(Lambda[-q,-q])$mat))%*%Lambda[-q,q]
kld = (t(m)%*%alpha%*%m)/2
names(kld) = snp.nms[j]
kld
}
}
### Covariance projection operator
} else if (projection == "covariance") {
if (factorize)
warning("Matrix factorisation is only possible with the linear/pseudoinverse projection")
# dim: (num. of draws in f.draws x p)
beta.draws <- t(cov(X, t(f.draws)))
# empirical mean of beta.draws
mu = colMeans(beta.draws)
int = 1:length(mu); l = nullify;
if(length(l)>0){int = int[-l]}
Lambda <- MASS::ginv(cov(beta.draws))
### Compute the Kullback-Leibler divergence (KLD) for Each Predictor ###
KLD <- foreach::foreach(j = 1:ncol(X))%dopar%{
q = unique(c(j,l))
m = mu[q]
alpha = t(Lambda[-q,q])%*%MASS::ginv(as.matrix(Matrix::nearPD(Lambda[-q,-q])$mat))%*%Lambda[-q,q]
kld = (t(m)%*%alpha%*%m)/2
names(kld) = snp.nms[j]
kld
}
}
KLD = unlist(KLD)
### Compute the corresponding “RelATive cEntrality” (RATE) measure ###
RATE = KLD/sum(KLD)
### Find the entropic deviation from a uniform distribution ###
Delta = sum(RATE*log((length(mu)-length(nullify))*RATE))
### Calibrate Delta via the effective sample size (ESS) measures from importance sampling ###
#(Gruber and West, 2016, 2017)
ESS = 1/(1+Delta)*100
parallel::stopCluster(cl)
### Return a list of the values and results ###
return(list("RATE"=RATE,"KLD"=KLD,"Delta"=Delta,"ESS"=ESS))
}
''')
activate() # Activate R to numpy type conversions
def wrapped_r_rate(X, f_draws, projection, factorize):
r_out = robjects.globalenv['RATE'](X, f_draws, projection, factorize)
return [np.array(r_out[0]), np.array(r_out[1]), np.array(r_out[2])[0], np.array(r_out[3])[0]]
return wrapped_r_rate
```
#### File: tests/rate_validation/run_validation_tests.py
```python
import numpy as np
from rate.projections import PseudoinverseProjection, CovarianceProjection
from rate.importance import rate
from rate.wrapped_r import init_rate_r
import matplotlib.pyplot as plt
from scipy.stats import spearmanr, pearsonr
def make_plot(n_draw_vals, norms, pearsons_rho, spearmans_rho, filename):
"""makes and saves the convergence/correlation plots"""
fig, axes = plt.subplots(1, 2, figsize=(8,4))
axes[0].errorbar(np.log10(n_draw_vals), norms.mean(axis=1), yerr=norms.std(axis=1), fmt='o')
axes[0].set_xlabel("log10(number of posterior draws)")
axes[0].set_ylabel(r"$||$R-python$||_2$")
axes[0].set_ylim([0.0, axes[0].get_ylim()[1]])
axes[1].errorbar(np.log10(n_draw_vals), spearmans_rho.mean(axis=1), yerr=spearmans_rho.std(axis=1), fmt='o', label="spearman")
axes[1].errorbar(np.log10(n_draw_vals), pearsons_rho.mean(axis=1), yerr=pearsons_rho.std(axis=1), fmt='o', label="pearson")
axes[1].legend(loc="lower right")
axes[1].set_xlabel("log10(number of posterior draws)")
axes[1].set_ylabel("correlation(R,python)")
plt.tight_layout()
fig.savefig(filename+".pdf", bbox_inches="tight")
#
# Settings
#
n, p = 100, 10
n_draw_vals = [100, 300, 1000, 3000, 10000, 30000, 100000, 300000, 1000000, 3000000]
n_repeats = 10
rate_r_func = init_rate_r() # initialise R code
#
# Logit posterior - multivariate normal with full rank covariance
#
M_F = np.random.rand(n)[np.newaxis]
V_F = np.random.randn(n, n)
V_F = np.dot(V_F, V_F.T)[np.newaxis]
X = np.random.randn(n, p)
assert np.linalg.matrix_rank(V_F)==n
#####################################################################
### PSEUDOINVERSE PROJECTION - NO MATRIX FACTORISATION - FIGURE 1 ###
#####################################################################
rate_python, klds_python = rate(X, M_F, V_F, projection=PseudoinverseProjection(), return_KLDs=True) # the python result. Doesn't use matrix factorisation
print("Pseudoinverse projection, no matrix factorisation...", end="")
norms = np.zeros((len(n_draw_vals), n_repeats))
spearmans_rho = np.zeros((len(n_draw_vals), n_repeats))
pearsons_rho = np.zeros((len(n_draw_vals), n_repeats))
for i, n_draws in enumerate(n_draw_vals):
for j in range(n_repeats):
f_draws = np.random.multivariate_normal(M_F[0], V_F[0], size=(n_draws)) # Draw samples
rate_r, klds_r, _, _ = rate_r_func(X, f_draws, "linear", False) # Calculate rate using samples (uses R code)
norms[i,j] = np.linalg.norm(rate_r-rate_python, ord=2) # Calculate evaluation metrics (norm, correlation)
pearsons_rho[i,j] = pearsonr(rate_r, rate_python)[0]
spearmans_rho[i,j] = spearmanr(rate_r, rate_python)[0]
make_plot(n_draw_vals, norms, pearsons_rho, spearmans_rho, "linear_projection_validation")
print("done")
#######################################################################
### PSEUDOINVERSE PROJECTION - WITH MATRIX FACTORISATION - FIGURE 2 ###
#######################################################################
# Note - we reuse the previous Python RATE result since the matrix factorisation is only implemented in the R code
print("Pseudoinverse projection, with matrix factorisation...", end="")
norms = np.zeros((len(n_draw_vals), n_repeats))
spearmans_rho = np.zeros((len(n_draw_vals), n_repeats))
pearsons_rho = np.zeros((len(n_draw_vals), n_repeats))
for i, n_draws in enumerate(n_draw_vals):
for j in range(n_repeats):
f_draws = np.random.multivariate_normal(M_F[0], V_F[0], size=(n_draws))
rate_r, klds_r, _, _ = rate_r_func(X, f_draws, "linear", True)
norms[i,j] = np.linalg.norm(rate_r-rate_python, ord=2)
pearsons_rho[i,j] = pearsonr(rate_r, rate_python)[0]
spearmans_rho[i,j] = spearmanr(rate_r, rate_python)[0]
make_plot(n_draw_vals, norms, pearsons_rho, spearmans_rho, "linear_projection_validation_with_mat_fac")
print("done")
########################################
### COVARIANCE PROJECTION - FIGURE 3 ###
########################################
print("Covariance projection...", end="")
rate_python, klds_python = rate(X, M_F, V_F, projection=CovarianceProjection(), return_KLDs=True) # the python result
norms = np.zeros((len(n_draw_vals), n_repeats))
spearmans_rho = np.zeros((len(n_draw_vals), n_repeats))
pearsons_rho = np.zeros((len(n_draw_vals), n_repeats))
for i, n_draws in enumerate(n_draw_vals):
for j in range(n_repeats):
f_draws = np.random.multivariate_normal(M_F[0], V_F[0], size=(n_draws))
rate_r, klds_r, _, _ = rate_r_func(X, f_draws, "covariance", False)
norms[i,j] = np.linalg.norm(rate_r-rate_python, ord=2)
pearsons_rho[i,j] = pearsonr(rate_r, rate_python)[0]
spearmans_rho[i,j] = spearmanr(rate_r, rate_python)[0]
make_plot(n_draw_vals, norms, pearsons_rho, spearmans_rho, "cov_projection_validation")
print("done")
```
#### File: RATE_python_package/tests/test_crossvalidation.py
```python
import pytest
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
from tensorflow.keras.layers import Reshape, Conv2D, Dense, BatchNormalization, Flatten
from tensorflow_probability.python.layers.dense_variational import DenseLocalReparameterization
from tensorflow.keras.optimizers import Adam, SGD
from tensorflow.keras import Input
from rate.cross_validation import cross_validate_bnn
def network_architectures(p):
return [
[
Dense(32, activation='relu', input_shape=(p,)),
DenseLocalReparameterization(1)
],
[
Dense(32, activation='relu', input_shape=(p,)),
BatchNormalization(),
Dense(16, activation='relu'),
DenseLocalReparameterization(1)
]
]
optimizers = [lambda: Adam(1e-2), lambda: SGD(1e-4)]
def test_all():
n, p = 100, 10
X = np.random.randn(n, p)
y = np.random.choice(2, size=n)
k = 3
n_iter = 2
n_epochs = 3
bnn, val_df = cross_validate_bnn(
network_architectures(p),
optimizers,
"grid",
X, y, k,
fit_args={'epochs' : n_epochs})
assert val_df.shape==(4, k+1)
bnn, val_df = cross_validate_bnn(
network_architectures(p),
optimizers,
"random",
X, y, k,
n_iter=n_iter,
fit_args={'epochs' : n_epochs})
assert val_df.shape==(n_iter, k+1)
```
#### File: RATE_python_package/tests/test_models.py
```python
import pytest
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split
from sklearn.exceptions import NotFittedError
from rate.models import BnnBinaryClassifier, BnnScalarRegressor
def toy_binary_classification_data(n, p):
X, y = make_classification(
n_samples=n, n_features=p, n_informative=p, n_redundant=0, n_repeated=0,
n_classes=2, n_clusters_per_class=3, flip_y=0.05, shuffle=False,
random_state=123)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.3, random_state=123)
return (X_train, y_train), (X_test, y_test)
def network_layers(p, C):
layers = []
layers.append(tf.keras.layers.Dense(128, activation='relu', input_shape=(p,)))
layers.append(tf.keras.layers.BatchNormalization())
layers.append(tfp.layers.DenseLocalReparameterization(C))
return layers
def test_unfitted_models():
"""Calling predict before fit should raise exception
"""
bnn = BnnBinaryClassifier()
with pytest.raises(NotFittedError):
bnn.predict(np.random.randn(10,3))
bnn.predict_proba(np.random.randn(10,3))
bnn.predict_samples(np.random.randn(10,3))
bnn.predict_proba_samples(np.random.randn(10,3))
bnn = BnnScalarRegressor()
with pytest.raises(NotFittedError):
bnn.predict(np.random.randn(10,3))
bnn.predict_samples(np.random.randn(10,3))
# Test using predict on unfitted model
def test_input_shapes_classification():
"""Input dimensionality should match model's expectation
(either from layers passed in constructor or first fit call)
"""
p = 100
# First check default layer behaviour (p not set until fit)
bnn = BnnBinaryClassifier()
bnn.fit(np.random.randn(100, p), np.random.randint(2, size=100))
with pytest.raises(ValueError):
bnn.fit(np.random.randn(100, p+1), np.random.randint(2, size=100))
# Now check if layers are provided
bnn = BnnBinaryClassifier(layers=network_layers(p, 1))
with pytest.raises(ValueError):
bnn.fit(np.random.randn(p+1, 102), np.random.randint(2, size=100))
def test_input_shapes_regression():
"""Input dimensionality should match model's expectation
(either from layers passed in constructor or first fit call)
"""
p = 100
# First check default layer behaviour (p not set until fit)
bnn = BnnScalarRegressor()
bnn.fit(np.random.randn(100, p), np.random.rand(100))
with pytest.raises(ValueError):
bnn.fit(np.random.randn(100, p+1), np.random.rand(100))
# Now check if layers are provided
bnn = BnnBinaryClassifier(layers=network_layers(p, 1))
with pytest.raises(ValueError):
bnn.fit(np.random.randn(p+1, 102), np.random.rand(100))
def test_label_types_classification():
"""Binary classifier only accepts bianry labels
"""
p = 100
# First check default layer behaviour (p not set until fit)
bnn = BnnBinaryClassifier()
with pytest.raises(ValueError):
X_train = np.random.randn(100, p)
bnn.fit(X_train, np.random.rand(100)) # Float labels
bnn.fit(X_train, np.random.randint(3, size=100)) # Multiclass classification labels
def test_label_types_regression():
"""Scalar regressino only accepts floats
"""
p = 100
# First check default layer behaviour (p not set until fit)
bnn = BnnScalarRegressor()
with pytest.raises(ValueError):
X_train = np.random.randn(100, p)
bnn.fit(X_train, np.random.randint(2, size=100)) # Binary labels
bnn.fit(X_train, np.random.randint(3, size=100)) # Multiclass classification labels
# RTODO multiclass continuous labels
def test_predictions_classification():
"""Binary classifier only predicts 0s and 1s for labels and values in
[0,1] for prbabilities. Also checks shapes
"""
n, p = 100, 10
n_test = 50
n_mc_samples = 15
#(X_train, y_train), (X_test, y_test) = toy_binary_classification_data(n, p)
bnn = BnnBinaryClassifier(verbose=0)
bnn.fit(np.random.randn(n, p), np.random.randint(2, size=n))
yhat_labels = bnn.predict(np.random.randn(n_test, p))
assert yhat_labels.shape[0] == n_test
assert np.sum(yhat_labels==0) + np.sum(yhat_labels==1) == n_test
yhat_proba = bnn.predict_proba(np.random.randn(n_test, p))
assert np.all([prob >= 0.0 or prob <= 1.0 for prob in yhat_proba])
yhat_labels_samples = bnn.predict_samples(np.random.randn(n_test, p), n_mc_samples)
assert yhat_labels_samples.shape == (n_mc_samples, n_test)
assert np.all([val in [0,1] for val in yhat_labels_samples.flat])
yhat_proba_samples = bnn.predict_proba_samples(np.random.randn(n_test, p), n_mc_samples)
assert yhat_proba_samples.shape == (n_mc_samples, n_test)
assert np.all([prob >= 0.0 or prob <= 1.0 for prob in yhat_proba_samples.flat])
def test_H_classification():
"""The shape of H should match the network architecture
"""
n, p = 100, 20
bnn = BnnBinaryClassifier(network_layers(p, 1))
with pytest.raises(ValueError):
H_arr = bnn.H(np.random.randn(n, p))
bnn.fit(np.random.randn(n, p), np.random.randint(2, size=n))
H_arr = bnn.H(np.random.randn(n, p))
assert H_arr.shape == (n, 128)
def test_H_regression():
"""The shape of H should match the network architecture
"""
n, p = 100, 20
bnn = BnnScalarRegressor(network_layers(p, 1))
with pytest.raises(ValueError):
H_arr = bnn.H(np.random.randn(n, p))
bnn.fit(np.random.randn(n, p), np.random.rand(n))
H_arr = bnn.H(np.random.randn(n, p))
assert H_arr.shape == (n, 128)
# def test_score_classification():
# """The result of score should be correct for the bianry classifier (returns accuracy)
# """
# n, p = 100, 10
# (X_train, y_train), (X_test, y_test) = toy_binary_classification_data(n, p)
# bnn = BnnBinaryClassifier(verbose=0)
# bnn.fit(X_train, y_train)
# yhat_labels = bnn.predict(X_test)
# assert bnn.score(X_test, y_test, metric="accuracy") == np.sum(y_test==yhat_labels)/float(y_test.shape[0])
def test_var_params_classification():
n, p = 100, 20
bnn = BnnBinaryClassifier(network_layers(p, 1))
bnn.fit(np.random.randn(n, p), np.random.randint(2, size=n))
W_mean, W_var, b = bnn.var_params()
assert W_mean.shape[0] == 128
assert W_var.shape[0] == 128
assert b.shape[0] == 1
# Same for r2 in regression
# Chech output shapes
# Test input shapes error handling
# test default constructors
# Check label exception raising
# Check accuracy metricc
``` |
{
"source": "jonathanjameswatson/kivygames",
"score": 3
} |
#### File: games/noughtsandcrosses/__init__.py
```python
import numpy as np
from kivygames.games import Game
import kivygames.games.noughtsandcrosses.c as c
class CellOccupiedError(Exception):
pass
class NoughtsAndCrosses(Game):
minPlayers = 2
maxPlayers = 2
hasAI = True
gridShape = (3, 3)
def __init__(self):
Game.__init__(self)
self.grid = np.zeros(self.gridShape, dtype="u1")
self.player = 1
def isEmpty(self, position):
return self.grid[position] == 0
async def turn(self):
await self.sendOutput("Player", self.player)
while True:
position = await self.getInput("Position", tuple, self.player)
if self.isEmpty(position):
break
await self.sendOutput("Error", "That space is already full.")
await self.sendOutput("Error", "")
self.grid[position] = self.player
await self.sendOutput("Grid", self.grid)
if c.hasPlayerWon(self.grid, self.player):
await self.sendOutput("End", f"Player {self.player} wins.")
return True
if np.count_nonzero(self.grid) == 9:
await self.sendOutput("End", f"It's a draw!")
return True
self.player = 3 - self.player
return False
def getAIInput(self, name):
if name == "Position":
return c.minimax(self.player, self.player, True, self.grid)[1]
async def game(self):
while True:
ended = await self.turn()
if ended:
break
await self.end()
```
#### File: kivygames/kivygames/__main__.py
```python
from os import scandir, environ, name
from os.path import abspath, join
from importlib import import_module
from kivy.config import Config
Config.set("kivy", "window_icon", "kivygames/assets/icon.ico")
import kivy
from kivy.app import App
from kivy.lang.builder import Builder
from kivy.core.window import Window
from kivy.resources import resource_add_path
kivy.require("1.11.1")
if name == "nt":
import ctypes
import tkinter
ctypes.windll.shcore.SetProcessDpiAwareness(1)
root = tkinter.Tk()
root.overrideredirect(1)
root.withdraw()
dpi = ctypes.windll.user32.GetDpiForWindow(root.winfo_id())
scale = str(dpi / 96)
root.destroy()
environ["KIVY_METRICS_DENSITY"] = scale
def importAll():
Builder.load_file(abspath(f"{__file__}/../style.kv"))
importWidgets("widgets")
gameLayouts = importWidgets("gamelayouts", True)
return gameLayouts
def importWidgets(dirName, returnLayouts=False):
gameLayouts = []
widgetsDir = abspath(f"{__file__}/../{dirName}")
widgetDirs = (
f for f in scandir(widgetsDir) if f.is_dir() and f.name != "__pycache__"
)
for widgetDir in widgetDirs:
Builder.load_file(join(widgetDir.path, "widget.kv"))
module = import_module(f"kivygames.{dirName}.{widgetDir.name}")
if returnLayouts:
gameLayouts.append(getattr(module, "layout")())
return gameLayouts if returnLayouts else None
class KivyGamesApp(App):
def __init__(self, gameLayouts, **kwargs):
App.__init__(self, **kwargs)
self.gameLayouts = gameLayouts
def build(self):
Window.clearcolor = (1, 1, 1, 1)
self.title = "Kivy Games"
def run():
resource_add_path(abspath(f"{__file__}/../assets"))
gameLayouts = importAll()
KivyGamesApp(gameLayouts=gameLayouts).run()
if __name__ == "__main__":
run()
``` |
{
"source": "jonathanjameswatson/maths-race",
"score": 2
} |
#### File: blueprints/game/views.py
```python
from flask import render_template
from ..game import game
from mathsrace.blueprints.auth import login_required
@game.route('/')
@login_required
def index():
return render_template('game/game.html.j2')
``` |
{
"source": "jonathanjameswatson/web-app",
"score": 3
} |
#### File: web-app/backend/KeywordMatch.py
```python
import yake
from nltk.stem import PorterStemmer
class KeywordMatch:
def __init__(self):
language = "en"
max_ngram_size = 3
deduplication_threshold = 0.9
numOfKeywords = 5
self.custom_kw_extractor = yake.KeywordExtractor(
lan=language,
n=max_ngram_size,
dedupLim=deduplication_threshold,
top=numOfKeywords,
features=None,
)
def stem_phrases(self, words):
stemmed = set()
stemmer = PorterStemmer()
for word in words:
stemmed.add(" ".join([stemmer.stem(x) for x in word.split(" ")]))
return stemmed
def find_keyword_match(self, text1, text2):
keywords1 = [
x[0]
for x in sorted(
self.custom_kw_extractor.extract_keywords(text1),
key=lambda x: x[1],
reverse=True,
)
]
keywords2 = [
x[0]
for x in sorted(
self.custom_kw_extractor.extract_keywords(text2),
key=lambda x: x[1],
reverse=True,
)
]
keyword_set_1 = self.stem_phrases(keywords1)
keyword_set_2 = self.stem_phrases(keywords2)
if len(keyword_set_1) + len(keyword_set_2) <= 6:
threshold = 1
else:
threshold = 2
score = len(set.intersection(keyword_set_1, keyword_set_2))
return score if score >= threshold else None
```
#### File: web-app/backend/Question.py
```python
from __future__ import annotations
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from Tree import Tree
class Question:
def __init__(self, id: int, text: str):
self.id: int = id
self.text: str = text
self.votes: int = 1
def add_parent(self, parent: Tree):
self.parent = parent
``` |
{
"source": "jonathanj/cacofonix",
"score": 2
} |
#### File: src/cacofonix/_app.py
```python
import json
import secrets
import time
from datetime import date
from fs.base import FS
from fs.path import join, dirname, basename, splitext
from semver import VersionInfo, parse_version_info
from typing import (
Iterable,
List,
Optional,
TextIO,
Tuple,
Union)
from . import _yaml, _log as log
from ._config import Config
from .errors import InvalidChangeMetadata, FragmentCompilationError
from ._towncrier import (
render_fragment,
render_changelog,
merge_with_existing_changelog)
from ._types import Fragment, FoundFragment, GuessPair
from ._effects import SideEffects
class Application(object):
METADATA_FILENAME: str = 'metadata.yaml'
def __init__(self, config: Config, effects: SideEffects):
self.config = config
self.effects = effects
fragments_path = effects.fragments_fs.getsyspath('.')
log.debug(f'Fragments root: {fragments_path}')
def load_fragment(self, fd: TextIO) -> Fragment:
"""
Parse and validate a fragment from a stream.
"""
fragment = _yaml.load(fd)
fragment['issues'] = {
str(k): v for k, v in fragment.get('issues', {}).items()}
return self.validate_fragment(fragment)
def find_fragments(
self,
version: Union[str, VersionInfo]
) -> Iterable[FoundFragment]:
"""
Find all fragments for a particular verison.
"""
log.debug(f'Finding fragments for version {version}')
fragments_fs = self.effects.fragments_fs
name = str(version)
if fragments_fs.exists(name):
version_fs = fragments_fs.opendir(name)
matches = version_fs.filterdir(
'.',
files=['*.yaml'],
exclude_files=[self.METADATA_FILENAME])
for file_info in matches:
file_path = file_info.name
log.debug(f'Found {file_path}')
yield version_fs, file_path
def find_new_fragments(self) -> Iterable[FoundFragment]:
"""
Find fragment files for the next version.
"""
return self.find_fragments('next')
def archive_fragments(
self,
found_fragments: Iterable[FoundFragment],
version: VersionInfo,
version_date: date,
version_author: str,
) -> Tuple[int, List[str]]:
"""
Archive new fragment, into the path for ``version``.
"""
problems = []
n = 0
with self.effects.archive_fs(str(version)) as archive_fs:
log.info(f'Archiving for {version}')
for n, (version_fs, filename) in enumerate(found_fragments, 1):
try:
path = version_fs.getsyspath(filename)
archive_path = archive_fs.getsyspath(filename)
log.info(f'Archive {path} -> {archive_path}')
self.effects.git_mv(path, archive_path)
self.effects.git_stage(archive_path)
except (OSError, FileNotFoundError):
log.exception(
f'Unable to archive fragment: {version_fs} {filename}')
problems.append(path)
if not problems:
log.info('Writing archival metadata')
metadata = {
'date': version_date,
'version': str(version),
'author': version_author}
log.debug(metadata)
archive_fs.settext(
self.METADATA_FILENAME, _yaml.dump(metadata))
metadata_path = archive_fs.getsyspath(self.METADATA_FILENAME)
self.effects.git_stage(metadata_path)
return n, problems
def create_new_fragment(self, yaml_text: str) -> str:
"""
Generate a unique filename for a fragment, and write the content to it.
"""
filename = '{}-{}.yaml'.format(
int(time.time() * 1000),
secrets.token_urlsafe(6))
with self.effects.archive_fs('next') as next_fs:
if next_fs.exists(filename):
raise RuntimeError(
'Generated fragment name already exists!', filename)
path = next_fs.getsyspath(filename)
log.debug(f'Writing new fragment {path}')
next_fs.settext(filename, yaml_text)
self.effects.git_stage(path)
return filename
def validate_fragment(self, fragment: Optional[Fragment]) -> Fragment:
"""
Validate change fragment data.
Fragments must have some value (not empty) and must have a known
fragment type and section.
"""
if fragment is None:
raise InvalidChangeMetadata('No data to parse')
fragment_type = fragment.get('type')
if not self.config.has_fragment_type(fragment_type):
raise InvalidChangeMetadata(
'Missing or unknown fragment type', fragment_type)
section = fragment.get('section') or None
if section and not self.config.has_section(section):
raise InvalidChangeMetadata(
'Missing or unknown section', section)
description = fragment.get('description')
if description is None or not description.strip():
raise InvalidChangeMetadata(
'Missing a change description')
return fragment
def validate_fragment_text(self, fragment_text: Optional[str]) -> None:
"""
Validate change fragment text.
"""
fragment = _yaml.load(fragment_text)
self.validate_fragment(fragment)
def compile_fragment_files(
self,
write_fs: FS,
found_fragments: Iterable[FoundFragment]) -> List[str]:
"""
Compile fragment files into `parent_dir`.
"""
outputs = []
for version_fs, filename in found_fragments:
try:
fragment = self.load_fragment(version_fs.readtext(filename))
fragment_type = fragment.get('type')
showcontent = self.config.fragment_types.get(
fragment_type, {}).get('showcontent', True)
section = fragment.get('section') or None
rendered_content = render_fragment(
fragment,
showcontent,
self.config.changelog_output_type)
if rendered_content.strip():
filename_stem = splitext(basename(filename))[0]
output_path = join(*filter(None, [
section,
'{}.{}'.format(filename_stem, fragment_type)]))
log.info(
'Compiling {} -> {}'.format(
version_fs.getsyspath(filename),
write_fs.getsyspath(output_path)))
parent_dir = dirname(output_path)
if parent_dir:
write_fs.makedirs(parent_dir, recreate=True)
write_fs.writetext(output_path, rendered_content)
outputs.append(output_path)
except Exception:
raise FragmentCompilationError(filename)
return outputs
def render_changelog(
self,
fs: FS,
version: VersionInfo,
version_date: date) -> str:
"""
Find compiled fragments in `parent_dir` and render a changelog with
them.
"""
parent_dir = fs.getsyspath('.')
return render_changelog(
parent_dir,
self.config.changelog_output_type,
self.config._towncrier_sections(parent_dir),
self.config._towncrier_fragment_types(),
self.config._towncrier_underlines(),
project_version=str(version),
project_date=version_date.isoformat())
def merge_with_existing_changelog(self, changelog: str) -> None:
"""
Merge a new changelog into an existing one.
"""
with self.effects.changelog_fs() as changelog_fs:
changelog_path = changelog_fs.getsyspath(
self.config.changelog_path)
merge_with_existing_changelog(
changelog_path,
self.config.changelog_marker,
changelog)
self.effects.git_stage(changelog_path)
def guess_version(self, cwd_fs: FS) -> Optional[str]:
"""
Attempt to guess the software version.
"""
return detect_version(cwd_fs)
def known_versions(self) -> List[VersionInfo]:
"""
Sorted list of archived versions.
"""
fragments_fs = self.effects.fragments_fs
return sorted(
(parse_version_info(info.name) for info in
fragments_fs.filterdir(
'.',
exclude_files=['*'],
exclude_dirs=['next'])),
reverse=True)
def package_json(cwd_fs: FS):
"""
Try guess a version from ``package.json``.
"""
log.debug('Looking for package.json')
if cwd_fs.exists('package.json'):
log.debug('Guessing version with package.json')
try:
with cwd_fs.open('package.json', 'r') as fd:
return json.load(fd).get('version')
except json.JSONDecodeError:
pass
return None
_default_guesses = [
('package.json', package_json),
]
def detect_version(
cwd_fs: FS,
_guesses: List[GuessPair] = _default_guesses
) -> Optional[str]:
"""
Make several attempts to guess the version of the package.
"""
for kind, guess in _guesses:
result = guess(cwd_fs)
if result is not None:
return kind, result
return None
``` |
{
"source": "jonathanj/custref-to-sql-py",
"score": 3
} |
#### File: src/custref_to_sql/main.py
```python
import argparse
import csv
import sys
class InconsistentState(RuntimeError):
"""
Encountered an inconsistent state.
"""
def merge(*ds):
"""
Merge several ``dict``s together.
"""
res = {}
for d in ds:
if d:
res.update(d)
return res
def to_sql_name(name):
"""
Ensure ``name`` is a valid SQL name.
"""
return name.lower().replace(' ', '_')
def deserialize_text(value):
"""
Deserialize a text value,
"""
return value
def serialize_text(value):
"""
Serialize a text value.
"""
return "'{}'".format(quote_sql_string(value))
def deserialize_yesno(value):
"""
Deserialize a boolean (yes or no) value.
"""
return value.lower() == 'yes'
def serialize_yesno(value):
"""
Serialize a boolean (yes or no) value.
"""
return str(1 if value else 0)
def deserialize_integer(value):
"""
Deserialize an integer value.
"""
return int(value)
def serialize_integer(value):
"""
Serialize an integer value.
"""
return str(value)
class InOutType(object):
sql_type = None
_nothing = object()
def __init__(self, serialize=_nothing, deserialize=_nothing):
if serialize is not self._nothing:
self.serialize = serialize
if deserialize is not self._nothing:
self.deserialize = deserialize
@property
def is_sql(self):
return self.sql_type and self.serialize is not None
@property
def is_csv(self):
return self.deserialize is not None
@classmethod
def no_sql(cls):
return cls(serialize=None)
@classmethod
def no_csv(cls):
return cls(deserialize=None)
class text(InOutType):
sql_type = 'TEXT'
serialize = staticmethod(serialize_text)
deserialize = staticmethod(deserialize_text)
class integer(InOutType):
sql_type = 'INTEGER'
serialize = staticmethod(serialize_integer)
deserialize = staticmethod(deserialize_integer)
class yesno(InOutType):
sql_type = 'INTEGER'
serialize = staticmethod(serialize_yesno)
deserialize = staticmethod(deserialize_yesno)
class Column(object):
def __init__(self, name, field_type):
self.name = to_sql_name(name)
self.field_type = field_type
def __repr__(self):
return '<{} name={!r} field_type={!r}>'.format(
type(self).__name__,
self.name,
self.field_type)
@property
def is_sql(self):
return self.field_type.is_sql
@property
def is_csv(self):
return self.field_type.is_csv
class Table(object):
def __init__(self, name, columns, foreign_keys=[]):
self.name = to_sql_name(name)
self.columns = columns
self.foreign_keys = foreign_keys
def __repr__(self):
return '<{} name={!r} columns={!r} foreign_keys={!r}>'.format(
type(self).__name__,
self.name,
self.columns,
self.foreign_keys)
@property
def only_sql_columns(self):
return [c for c in self.columns if c.is_sql]
@property
def only_csv_columns(self):
return [c for c in self.columns if c.is_csv]
def create_sql(self):
cols = self.only_sql_columns + self.foreign_keys
cols_sql = [
'{} {}'.format(col.name, col.field_type.sql_type)
for col in cols]
return 'CREATE TABLE {} ({});'.format(
self.name, ', '.join(cols_sql))
def insert_sql(self, row, foreign_keys=None):
if foreign_keys:
fk_cols = [fk_col for fk_col in self.foreign_keys
if fk_col.name in foreign_keys]
else:
fk_cols = []
columns = self.only_sql_columns + fk_cols
row = merge(row, foreign_keys or {})
col_names = [col.name for col in columns]
data = [col.field_type.serialize(row[col.name]) for col in columns]
return 'INSERT INTO {} ({}) VALUES ({});'.format(
self.name,
', '.join(col_names),
', '.join(data))
def parse_csv(self, data):
return {col.name: col.field_type.deserialize(v)
for (col, v) in zip(self.only_csv_columns, data)}
DATA_TYPE_HEADERS = {
'CUST': Table(
name='customers',
columns=[
Column('record_type', text.no_sql()),
Column('customer_code', text),
Column('name', text),
Column('name_extra', text),
Column('address_number', text),
Column('address_line_1', text),
Column('address_line_2', text),
Column('address_line_3', text),
Column('address_line_4', text),
Column('address_line_5', text),
Column('contact_name', text),
Column('contact_extra', text),
Column('language_code', text),
Column('language', text),
Column('headquarter', yesno),
Column('headquarter_code', text),
Column('telephone', text),
Column('mobile_phone', text),
Column('insert_date', text.no_csv()),
Column('insert_time', text.no_csv()),
]),
'REF': Table(
name='customer_references',
columns=[
Column('record_type', text.no_sql()),
Column('reference_identifier', text),
Column('length', integer),
Column('mandatory', yesno),
Column('numeric_only', yesno),
Column('folf_start_position', integer),
Column('folf_length', integer),
Column('print_on_invoice', yesno),
Column('check_type', text),
Column('send_to_crs', yesno),
Column('validation_mask', text),
Column('internal_name', text),
Column('customer_reference_desc', text),
Column('dbi_connector', text),
Column('dbi_connector_desc', text),
Column('alphabetic_only', yesno),
Column('no_special_characters', yesno),
Column('only_capital_letters', yesno),
Column('minimum_length', integer),
Column('reference_type', text),
Column('insert_date', text.no_csv()),
Column('insert_time', text.no_csv()),
],
foreign_keys=[
Column('customer_code', text),
]),
}
def parse_headers(headers, data):
"""
Given a header structure and some data, parse the data as headers.
"""
return {k: f(v) for (k, (f, _), _), v in zip(headers, data)}
def exit_H_CUST(state):
"""
Handle leaving the ``H_CUST`` state.
Add the customer data being processed to the list of all processed customers.
"""
current_customer = state.pop('current_customer', None)
if current_customer:
state.setdefault('customers', []).append(current_customer)
return state
def enter_final(state, data):
"""
Handle entering the final state.
Wrap up any lingering customer data.
"""
return exit_H_CUST(state)
def enter_CUST(state, data):
"""
Handle entering the ``CUST`` state.
Process a customer record.
"""
if state.get('current_customer'):
raise InconsistentState(
'Found unflushed CUST record when processing a new one')
row = merge(
DATA_TYPE_HEADERS['CUST'].parse_csv(data),
state.get('append_to_cust'))
state['current_customer'] = row
return state
def enter_REF(state, data):
"""
Handle entering the ``REF`` state.
Process a customer reference and associate it with the customer record
currently being processed.
"""
current_customer = state.get('current_customer')
if not current_customer:
raise InconsistentState('Found REF but no current customer')
references = current_customer.setdefault('references', [])
row = merge(
DATA_TYPE_HEADERS['REF'].parse_csv(data),
state.get('append_to_ref'))
references.append(row)
return state
def enter_H(state, data):
extra = [
('insert_date', data[7]),
('insert_time', data[8]),
]
state['append_to_ref'] = extra
state['append_to_cust'] = extra
return state
# Valid state transitions.
STATES = {
'__initial__': {
'valid_states': {'H', '*'},
},
'H': {
'enter': enter_H,
'valid_states': {'H_CUST', 'S'},
},
'S': {
'valid_states': {'H_CUST'},
},
'H_CUST': {
'exit': exit_H_CUST,
'valid_states': {'CUST', 'H_REF'},
},
'CUST': {
'enter': enter_CUST,
'valid_states': {'H_REF', 'H_CUST', 'MEDIUM'},
},
'H_REF': {
'valid_states': {'REF'},
},
'REF': {
'enter': enter_REF,
'valid_states': {'REF', 'H_CUST', 'MEDIUM'},
},
'MEDIUM': {
},
'__final__': {
'final': True,
'enter': enter_final,
}
}
def quote_sql_string(value):
"""
Quote an SQL string.
"""
return value.replace("'", "''")
def as_sql(state, create=True):
"""
Serialize the application state as SQL.
"""
yield 'BEGIN TRANSACTION;'
ref_table = DATA_TYPE_HEADERS['REF']
cust_table = DATA_TYPE_HEADERS['CUST']
if create:
yield ''
yield '-- Create tables'
yield ref_table.create_sql()
yield cust_table.create_sql()
yield ''
customers = state.get('customers')
if customers:
yield '-- Customers'
for customer in customers:
yield cust_table.insert_sql(customer)
for ref in customer.get('references', []):
yield ref_table.insert_sql(ref, foreign_keys={
'customer_code': customer['customer_code'],
})
yield ''
yield 'COMMIT;'
def fsm(action, data):
"""
Finite-state machine to process an action and data, possibly leading to a
new action.
"""
valid_states = action.get('valid_states')
if valid_states is None:
return STATES['__final__']
maybe_next = data[0]
if maybe_next in valid_states:
return STATES[maybe_next]
elif '*' in valid_states:
return action
else:
raise RuntimeError('Expected one of {!r} but got {!r}'.format(
valid_states, maybe_next))
def parse_command_line():
"""
Parse command line arguments.
"""
parser = argparse.ArgumentParser()
parser.add_argument(
'input_file',
nargs='?',
type=argparse.FileType('rb'),
default=sys.stdin)
parser.add_argument(
'output_file',
nargs='?',
type=argparse.FileType('wb'),
default=sys.stdout)
parser.add_argument(
'--create',
action='store_true',
help='Include SQL "CREATE TABLE" commands.')
return parser.parse_args()
def main():
args = parse_command_line()
reader = csv.reader(args.input_file, delimiter=';')
state, action = {}, STATES['__initial__']
while reader:
data = next(reader, None)
# XXX:
if data is None:
break
new_action = fsm(action, data)
if new_action != action:
exitf = action.get('exit')
if exitf:
state = exitf(state)
enterf = new_action.get('enter')
if enterf:
state = enterf(state, data)
if new_action.get('final'):
break
action = new_action
args.output_file.write(
'\n'.join(as_sql(state, create=args.create)))
if __name__ == '__main__':
main()
``` |
{
"source": "jonathanjfshaw/timeflux",
"score": 3
} |
#### File: timeflux/core/node.py
```python
import re
import logging
from abc import ABC, abstractmethod
from timeflux.core.io import Port
class Node(ABC):
def __new__(cls, *args, **kwargs):
"""Create instance and initialize the logger."""
instance = super().__new__(cls)
instance.logger = logging.getLogger('timeflux.' + cls.__module__ + '.' + cls.__name__)
instance.ports = {}
return instance
def __init__(self):
"""Instantiate the node."""
pass
def __getattr__(self, name):
"""Create input and output ports on the fly.
Args:
name (string): The name of the port, prefixed with `i_` (input) or `o_` (output).
Default ports are also allowed (`i` or `o`).
Returns:
Port: The newly created port.
"""
if name == 'i' or name.startswith('i_') or name == 'o' or name.startswith('o_'):
self.ports[name] = Port()
setattr(self, name, self.ports[name])
return self.ports[name]
raise AttributeError(f"type object '{type(self).__name__}' has no attribute '{name}'")
def iterate(self, name='*'):
"""Iterate through ports.
If ``name`` ends with the globbing character (`*`), the generator iterates through all existing
ports matching that pattern. Otherwise, only one port is returned. If it does not already exist,
it is automatically created.
Args:
name (string): The matching pattern.
Yields:
(tupple): A tupple containing:
* name (`string`): The full port name.
* suffix (`string`): The part of the name matching the globbing character.
* port (`Port`): The port object.
"""
if name.endswith('*'):
skip = len(name) - 1
name = name[:-1]
for key, port in self.ports.items():
if key.startswith(name):
yield key, key[skip:], port
else:
yield name, '', getattr(self, name)
def clear(self):
"""Reset all ports.
It is assumed that numbered ports (i.e. those with a name ending with an underscore followed by numbers)
are temporary and must be completely removed. All other ports are simply emptied to avoid the cost of
reinstanciating a new `Port` object before each update.
"""
if not hasattr(self, '_re_dynamic_port'):
self._re_dynamic_port = re.compile('.*_[0-9]+$')
remove = []
for name, port in self.ports.items():
port.clear()
if self._re_dynamic_port.match(name):
remove.append(name)
for name in remove:
del self.ports[name]
delattr(self, name)
@abstractmethod
def update(self):
"""Update the input and output ports."""
pass
def terminate(self):
"""Perform cleanup upon termination."""
pass
```
#### File: timeflux/helpers/background.py
```python
import sys
import time
import logging
import traceback
import zmq
from subprocess import Popen
class Runner():
""" Background base class. Provides common methods.
.. warning::
Do not use directly!
"""
def __init__(self):
self.logger = logging.getLogger(__name__)
def _send(self, data):
try:
self._socket.send_pyobj(data, copy=False)
except zmq.ZMQError as e:
self.logger.error(e)
def _receive(self, blocking=True):
flag = 0 if blocking else zmq.NOBLOCK
try:
return self._socket.recv_pyobj(flag)
except zmq.ZMQError:
pass # No data
class Task(Runner):
""" Background task.
Launch a 0MQ PAIR server, start a client and dispatch the task.
Attributes:
done (bool): Indicates if the task is complete.
Args:
instance (object): A picklable class instance.
method (string): The method name to call from the instance.
*args: Arbitrary variable arguments to be passed to the method.
**kwargs: Arbitrary keyword arguments to be passed to the method.
"""
def __init__(self, instance, method, *args, **kwargs):
super().__init__()
context = zmq.Context()
self._socket = context.socket(zmq.PAIR)
self._port = self._socket.bind_to_random_port('tcp://127.0.0.1')
self.done = False
self.instance = instance
self.method = method
self.args = args
self.kwargs = kwargs
def start(self):
"""Run the task."""
self._process = Popen(['python', '-m', __name__, str(self._port)])
self._send({'instance': self.instance, 'method': self.method, 'args': self.args, 'kwargs': self.kwargs})
return self
def stop(self):
"""Terminate the task."""
self._process.kill()
self.done = True
def status(self):
"""Get the task status.
Returns:
`None` if the task is not complete or a dict containing the following keys.
- ``success``: A boolean indicating if the task ran successfully.
- ``instance``: The (possibly modified) instance.
- ``result``: The result of the method call, if `success` is `True`.
- ``exception``: The exception, if `success` is `False`.
- ``traceback``: The traceback, if `success` is `False`.
- ``time``: The time it took to run the task.
"""
response = self._receive(False)
if response is not None:
self.done = True
return response
class Worker(Runner):
""" Background worker. Connects to the server and executes the task.
.. warning::
Do not use directly!
"""
def __init__(self, port):
super().__init__()
self.logger = logging.getLogger(__name__)
context = zmq.Context()
self._socket = context.socket(zmq.PAIR)
self._socket.connect(f'tcp://127.0.0.1:{port}')
def execute(self):
"""Get the task from the socket and run it."""
response = {}
start = time.perf_counter()
try:
data = self._receive()
result = getattr(data['instance'], data['method'])(*data['args'], **data['kwargs'])
response['instance'] = data['instance']
response['result'] = result
response['success'] = True
except Exception as e:
response['exception'] = e
response['traceback'] = traceback.format_tb(e.__traceback__)
response['success'] = False
response['time'] = time.perf_counter() - start
self._send(response)
if __name__ == '__main__':
if len(sys.argv) == 1: sys.exit()
port = sys.argv[1]
Worker(port).execute()
```
#### File: timeflux/helpers/mne.py
```python
import pandas as pd
import numpy as np
import xarray as xr
import logging
logger = logging.getLogger()
from timeflux.core.exceptions import WorkerInterrupt
try:
import mne
except ModuleNotFoundError:
logger.error('MNE is not installed')
def _context_to_id(context, context_key, event_id):
if context_key is None:
return context
else:
return event_id.get(context.get(context_key))
def xarray_to_mne(data, meta, context_key, event_id, reporting='warn',
ch_types='eeg', **kwargs):
""" Convert DataArray and meta into mne Epochs object
Args:
data (DataArray): Array of dimensions ('epoch', 'time', 'space')
meta (dict): Dictionary with keys 'epochs_context', 'rate', 'epochs_onset'
context_key (str|None): key to select the context label.
If the context is a string, `context_key` should be set to ``None``.
event_id (dict): Associates context label to an event_id that should be an int. (eg. dict(auditory=1, visual=3))
reporting ('warn'|'error'| None): How this function handles epochs with invalid context:
- 'error' will raise a TimefluxException
- 'warn' will print a warning with :py:func:`warnings.warn` and skip the corrupted epochs
- ``None`` will skip the corrupted epochs
ch_types (list|str): Channel type to
Returns:
epochs (mne.Epochs): mne object with the converted data.
"""
if isinstance(ch_types, str): ch_types = [ch_types] * len(data.space)
if isinstance(data, xr.DataArray):
pass
elif isinstance(data, xr.Dataset):
# extract data
data = data.data
else:
raise ValueError(f'data should be of type DataArray or Dataset, received {data.type} instead. ')
_dims = data.coords.dims
if 'target' in _dims:
np_data = data.transpose('target', 'space', 'time').values
elif 'epoch' in _dims:
np_data = data.transpose('epoch', 'space', 'time').values
else:
raise ValueError(f'Data should have either `target` or `epoch` in its coordinates. Found {_dims}')
# create events objects are essentially numpy arrays with three columns:
# event_sample | previous_event_id | event_id
events = np.array([[onset.value, 0, _context_to_id(context, context_key, event_id)]
for (context, onset)
in zip(meta['epochs_context'], meta['epochs_onset'])]) # List of three arbitrary events
events_mask = np.isnan(events.astype(float))[:, 2]
if events_mask.any():
if reporting == 'error':
raise WorkerInterrupt(f'Found {events_mask.sum()} epochs with corrupted context. ')
else: # reporting is either None or warn
# be cool, skip those evens
events = events[~events_mask, :]
np_data = np_data[~events_mask, :, :]
if reporting == 'warn':
logger.warning(f'Found {events_mask.sum()} epochs with corrupted context. '
f'Skipping them. ')
# Fill the second column with previous event ids.
events[0, 1] = events[0, 2]
events[1:, 1] = events[0:-1, 2]
# set the info
rate = meta['rate']
info = mne.create_info(ch_names=list(data.space.values), sfreq=rate,
ch_types=ch_types)
# construct the mne object
epochs = mne.EpochsArray(np_data, info=info, events=events.astype(int),
event_id=event_id,
tmin=data.time.values[0] / np.timedelta64(1, 's'),
verbose=False, **kwargs)
return epochs
def mne_to_xarray(epochs, context_key, event_id, output='dataarray'):
""" Convert mne Epochs object into DataArray along with meta.
Args:
epochs (mne.Epochs): mne object with the converted data.
context_key (str|None): key to select the context label.
If the context is a string, `context_key` should be set to ``None``.
event_id (dict): Associates context label to an event_id that should be an int.
(eg. dict(auditory=1, visual=3))
output (str): type of the expected output (DataArray or Dataset)
Returns:
data (DataArray|Dataset): Array of dimensions ('epoch', 'time', 'space')
meta (dict): Dictionary with keys 'epochs_context', 'rate', 'epochs_onset'
"""
reversed_event_id = {value: key for (key, value) in event_id.items()}
np_data = epochs._data
ch_names = epochs.ch_names
epochs_onset = [pd.Timestamp(event_sample) for event_sample in epochs.events[:, 0]]
epochs_context = [{context_key: reversed_event_id[_id]} for _id in epochs.events[:, 2]]
meta = dict(epochs_onset=epochs_onset,
epochs_context=epochs_context,
rate=epochs.info['sfreq'])
n_epochs = len(epochs)
times = pd.TimedeltaIndex(data=epochs.times, unit='s')
data = xr.DataArray(np_data,
dims=('epoch', 'space', 'time'),
coords=(np.arange(n_epochs), ch_names, times)).transpose('epoch', 'time', 'space')
if output == 'dataarray':
return data, meta
else: # output == 'dataset'
data = xr.Dataset({'data': data, 'target': [reversed_event_id[_id]
for _id in epochs.events[:, 2]]})
return data, meta
```
#### File: timeflux/helpers/testing.py
```python
import numpy as np
import pandas as pd
import xarray as xr
class DummyData():
"""Generate dummy data."""
def __init__(self,
num_rows=1000,
num_cols=5,
cols=None,
rate=10,
jitter=.05,
start_date='2018-01-01',
seed=42,
round=6):
"""
Initialize the dataframe.
Args:
num_rows (int): Number of rows
num_cols (int): Number of columns
cols (list): List of column names
rate (float): Frequency, in Hertz
jitter (float): Amount of jitter, relative to rate
start_date (string): Start date
seed (int): Seed for random number generation
round (int): Number of decimals for random numbers
"""
np.random.seed(seed)
frequency = 1 / rate
indices = pd.date_range(
start=start_date,
periods=num_rows,
freq=pd.DateOffset(seconds=frequency))
jitter = frequency * jitter
deltas = pd.to_timedelta(
np.random.uniform(-jitter, jitter, num_rows), unit='s')
indices = indices + deltas
if cols is not None:
num_cols = len(cols)
rows = np.random.rand(num_rows, num_cols).round(round)
self._data = pd.DataFrame(rows, indices)
if cols is not None:
self._data.columns = cols
self._cursor = 0
def next(self, num_rows=10):
"""
Get the next chunk of data.
Args:
num_rows (int): Number of rows to fetch
"""
start = self._cursor
stop = start + num_rows
self._cursor += num_rows
return self._data[start:stop]
def reset(self):
"""
Reset the cursor.
"""
self._cursor = 0
class DummyXArray():
"""Generate dummy data of type XArray."""
def __init__(self,
num_time=1000,
num_space=5,
rate=10,
jitter=.05,
start_date='2018-01-01',
seed=42,
round=6
):
"""
Initialize the dataframe.
Args:
num_time (int): Number of rows
num_space (int): Number of columns
rate (float): Frequency, in Hertz
jitter (float): Amount of jitter, relative to rate
start_date (string): Start date
seed (int): Seed for random number generation
round (int): Number of decimals for random numbers
"""
np.random.seed(seed)
frequency = 1 / rate
times = pd.date_range(
start=start_date,
periods=num_time,
freq=pd.DateOffset(seconds=frequency))
jitter = frequency * jitter
deltas = pd.to_timedelta(
np.random.uniform(-jitter, jitter, num_time), unit='s')
times = times + deltas
locs = np.arange(num_space)
data = np.random.rand(num_time, num_space).round(round)
self._data = xr.DataArray(data, coords=[times, locs], dims=['time', 'space'])
self._cursor = 0
def next(self, num_rows=10):
"""
Get the next chunk of data.
Args:
num_rows (int): Number of rows to fetch
"""
start = self._cursor
stop = start + num_rows
self._cursor += num_rows
return self._data.isel({'time': np.arange(start, stop)})
def reset(self):
"""
Reset the cursor.
"""
self._cursor = 0
class ReadData():
"""Generate custom data."""
def __init__(self, data):
"""
Initialize the dataframe.
Args:
data (DataFrame): custom data to stream.
"""
self._data = data
self._cursor = 0
def next(self, num_rows=10):
"""
Get the next chunk of data.
Args:
num_rows (int): Number of rows to fetch
"""
start = self._cursor
stop = start + num_rows
self._cursor += num_rows
return self._data.iloc[start:stop]
def reset(self):
"""
Reset the cursor.
"""
self._cursor = 0
class Looper():
""" Mimics the scheduler behavior to allow testing the output of a node offline.
"""
def __init__(self, generator, node, input_port='i', output_port='o'):
""" Initialize the helper
:param generator (Node): timeflux node to test
:param data (Object): data generator object with a method `next` and `reset`
"""
self._generator = generator
self._node = node
self._input_port = input_port
self._output_port = output_port
def run(self, chunk_size=None):
""" Loop across chunks of a generator, update the node and return data and meta.
:param chunk_size (int): number of samples per chunk
:return:
output_data (DataFrame): concatenated output data
output_meta: list of meta
"""
chunk_size = chunk_size or len(self._generator._data)
# mimic the scheduler
end_of_data = False
output_data = []
output_meta = []
while not end_of_data:
self._node.clear()
chunk = self._generator.next(chunk_size)
i = getattr(self._node, self._input_port)
i.data = chunk.copy()
self._node.update()
o = getattr(self._node, self._output_port)
output_data.append(o.data)
output_meta.append(o.meta)
end_of_data = chunk.empty
output_data = pd.concat(output_data)
return output_data, output_meta
```
#### File: timeflux/nodes/accumulate.py
```python
import pandas as pd
import xarray as xr
from timeflux.core.node import Node
class AppendDataFrame(Node):
"""Accumulates and appends data of type DataFrame after a gate.
This node should be plugged after a Gate. As long as it receives data,
it appends them to an internal buffer. When it receives a meta with key
`gate_status` set to `closed`, it releases the accumulated data and empty the
buffer.
Attributes:
i (Port): Default data input, expects DataFrame and meta
o (Port): Default output, provides DataFrame
Args:
**kwargs: key word arguments to pass to pandas.DataFrame.append method.
"""
def __init__(self, meta_keys=None, **kwargs):
super().__init__()
self._meta_keys = meta_keys
self._kwargs = kwargs
self._reset()
def _reset(self):
self._data = pd.DataFrame()
self._meta = []
def _release(self):
self.logger.info(f'AppendDataFrame is releasing {len(self._data)} '
f'accumulated rows.')
self.o.data = self._data
if self._meta_keys is None:
self.o.meta = {'accumulate': self._meta}
else:
self.o.meta = {key: [] for key in self._meta_keys}
for meta_key in self._meta_keys:
for meta in self._meta:
self.o.meta[meta_key] += meta.get(meta_key, [])
def update(self):
gate_status = self.i.meta.get('gate_status')
if self.i.ready():
# update the meta
self._meta.append(self.i.meta)
# append the data
self._data = self._data.append(self.i.data, **self._kwargs)
# if gate is close, release the data and reset the buffer
if gate_status == 'closed' and not self._data.empty:
self._release()
self._reset()
class AppendDataArray(Node):
"""Accumulates and appends data of type XArray after a gate.
This node should be plugged after a Gate. As long as it receives DataArrays,
it appends them to a buffer list. When it receives a meta with key
`gate_status` set to `closed`, it concatenates the list of accumulated DataArray,
releases it and empty the buffer list.
Attributes:
i (Port): Default data input, expects DataArray and meta
o (Port): Default output, provides DataArray
Args:
dim: Name of the dimension to concatenate along.
**kwargs: key word arguments to pass to xarray.concat method.
"""
def __init__(self, dim, meta_keys=None, **kwargs):
super().__init__()
self._dim = dim
self._meta_keys = meta_keys
self._kwargs = kwargs
self._reset()
def _reset(self):
self._data_list = []
self._meta = []
def _release(self):
self.logger.info(f'AppendDataArray is releasing {len(self._data_list)} '
f'accumulated data chunks.')
self.o.data = xr.concat(self._data_list, self._dim, **self._kwargs)
if self._meta_keys is None:
self.o.meta = {'accumulate': self._meta}
else:
self.o.meta = {key: [] for key in self._meta_keys}
for meta_key in self._meta_keys:
for meta in self._meta:
self.o.meta[meta_key] += meta.get(meta_key, [])
def update(self):
gate_status = self.i.meta.get('gate_status')
# append the data
if self.i.ready():
self._data_list.append(self.i.data)
# update the meta
self._meta.append(self.i.meta)
# if gate is close, release the data and reset the buffer
if gate_status == 'closed' and self._data_list:
self._release()
self._reset()
```
#### File: timeflux/nodes/apply.py
```python
from timeflux.core.node import Node
from importlib import import_module
import pandas as pd
class ApplyMethod(Node):
"""Apply a function along an axis of the DataFrame.
This node applies a function along an axis of the DataFrame.
Objects passed to the function are Series objects whose index is
either the DataFrame's index (``axis`` = `0`) or the DataFrame's columns
(``axis`` = `1`).
Attributes:
i (Port): default data input, expects DataFrame.
o (Port): default output, provides DataFrame.
Args:
func (func): custom function specified directely that takes as input a n_array (eg. lambda x: x+1). Default: None.
method (str): name of the module to import, in which the method is defined. eg. `numpy.mean`.
apply_mode (str`): {`universal`, `reduce`, `expand` }. Default: `universal`.
- `universal` if function is a transformation from n_array to n_array
- `reduce` if function is a transformation from n_array to scalar
- `expand` if function is a transformation from n_array to nk_array [not yet implemented]
axis (int) : if 0, the transformation is applied to columns, if 1 to rows. Default: `0`.
closed (str) : {`left`, `right`, `center`}: timestamp to transfer in the output, only when method_type is "reduce" and axis = 0, in which case, the output port's lenght is 1. Default: `right`.
kwargs: additional keyword arguments to pass as keywords arguments to `func`.
Notes:
Note that the passed function will receive ndarray objects for performance purposes.
For universal functions, ie. transformation from n_array to n_array, input and output ports have the same size.
For reducing function, ie. from n_array to scalar, output ports's index is set to first (if ``closed`` = `left`), last (if ``closed`` = `right`), or middle (if ``closed`` = `center`)
.. todo::
Allow expanding functions such as n_array to nk_array (with XArray usage)
Example:
Universal function: in this example, we apply `numpy.sqrt` to each value of the data. Shapes of input and output data are the same.
* ``method`` = `numpy.sqrt`
* ``method_type`` = `universal`
If data in input port is ``i`` is: ::
0
2018-10-25 07:33:41.871131 9.0
2018-10-25 07:33:41.873084 16.0
2018-10-25 07:33:41.875037 1.0
2018-10-25 07:33:41.876990 4.0
It returns the squared root of the data on port ``o``: ::
0
2018-10-25 07:33:41.871131 3.0
2018-10-25 07:33:41.873084 4.0
2018-10-25 07:33:41.875037 1.0
2018-10-25 07:33:41.876990 2.0
Example:
Reducing function: in this example, we apply `numpy.sum` to each value of the data. Shapes of input and output data are not the same.
We set:
* ``method`` = `numpy.sum`
* ``method_type`` = `reduce`
* ``axis`` = `0`
* ``closed`` = `right`
If data in input port is ``i`` is: ::
0 1
2018-10-25 07:33:41.871131 9.0 10.0
2018-10-25 07:33:41.873084 16.0 2.0
2018-10-25 07:33:41.875037 1.0 5.0
2018-10-25 07:33:41.876990 4.0 2.0
It returns the sum amongst row axis on port ``o``: ::
0 1
2018-10-25 07:33:41.876990 30.0 19.0
References:
See the documentation of `pandas.apply <https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.apply.html>`_ .
"""
def __init__(self, method, apply_mode='universal',
axis=0, closed='right', func=None, **kwargs):
self._axis = axis
self._closed = closed
self._kwargs = kwargs
self._apply_mode = apply_mode
if func is not None:
self._func = func
else:
module_name, function_name = method.rsplit('.', 1)
try:
module = import_module(module_name)
except ImportError:
raise ImportError(f'Could not import module {module_name}')
try:
self._func = getattr(module, function_name)
except AttributeError:
raise ValueError(f'Module {module_name} has no function {function_name}')
if not callable(self._func):
raise ValueError(f'Could not call the method {self._methode_name}')
self._kwargs.update({'raw': True, 'axis': axis})
if self._apply_mode == 'reduce':
self._kwargs['result_type'] = 'reduce'
def update(self):
if not self.i.ready():
return
self.o.meta = self.i.meta
self.o.data = self.i.data.apply(func=self._func, **self._kwargs)
if self._apply_mode == 'reduce':
if self._axis == 0:
if self._closed == 'right':
index_to_keep = self.i.data.index[-1]
elif self._closed == 'left':
index_to_keep = self.i.data.index[0]
else: # self._closed == 'middle':
index_to_keep = self.i.data.index[len(self.i.data) // 2]
self.o.data = pd.DataFrame(self.o.data, columns=[index_to_keep]).T
else: # self._axis == 1:
self.o.data = self.o.data.to_frame()
```
#### File: timeflux/nodes/query.py
```python
import numpy as np
from timeflux.core.exceptions import WorkerInterrupt
from timeflux.core.node import Node
class SelectRange(Node):
"""Select a subset of the given data along vertical (index) or horizontal (columns) axis.
Attributes:
i (Port): default data input, expects DataFrame with eventually MultiIndex.
o (Port): default output, provides DataFrame with eventually MultiIndex.
Args:
ranges (dict): Dict with keys are level names and values are selection ranges.
axis (int): If 0, the level concerns row index, if 1, columns index (`0` or `1`). Default: `0`.
inclusive (bool) : Whether the boundaries are strict or included. Default: `False`.
Example:
In this example, we have an input DataFrame with multi level columns and we want to select data with index from level of name `second` in range `[1,1.5]`.
We set:
* ``ranges`` = `{"second": [1, 1.5]}`
* ``axis`` = `1`
* ``inclusive`` = `True`
If the data received on port ``i`` is: ::
first A ... B
second 1.3 1.6 1.9 1.3 1.6 1.9
2017-12-31 23:59:59.998745401 0.185133 0.541901 0.806561 ... 0.732225 0.806561 0.658783
2018-01-01 00:00:00.104507143 0.692277 0.849196 0.987668 ... 0.489425 0.221209 0.987668
2018-01-01 00:00:00.202319939 0.944059 0.039427 0.567945 ... 0.925248 0.180575 0.567945
The data provided on port ``o`` will be: ::
first A B
second 1.3 1.3
2017-12-31 23:59:59.998745401 0.185133 0.732225
2018-01-01 00:00:00.104507143 0.692277 0.489425
2018-01-01 00:00:00.202319939 0.944059 0.925248
"""
def __init__(self, ranges, axis=0, inclusive=False):
self._ranges = ranges # list of ranges per level
self._inclusive = inclusive # include boundaries.
self._axis = axis
def update(self):
if not self.i.ready():
return
self.o.meta = self.i.meta
if self._axis == 1:
self.i.data = self.i.data.T
mask = self._mask()
self.o.data = self.i.data[np.logical_and.reduce(mask)]
if self._axis == 1:
self.o.data = self.o.data.T
def _mask(self):
if self._inclusive:
mask = [(self.i.data.index.get_level_values(l) >= r[0]) &
(self.i.data.index.get_level_values(l) <= r[1])
for l, r in (self._ranges).items() if r is not None]
else:
mask = [(self.i.data.index.get_level_values(l) > r[0]) &
(self.i.data.index.get_level_values(l) < r[1])
for l, r in (self._ranges).items() if r is not None]
return mask
class XsQuery(Node):
"""Returns a cross-section (row(s) or column(s)) from the data.
Attributes:
i (Port): default input, expects DataFrame with eventually MultiIndex.
o (Port): default output, provides DataFrame with eventually MultiIndex.
Args:
key (str|tuple): Some label contained in the index, or partially in a MultiIndex index.
axis (int): Axis to retrieve cross-section on (`0` or `1`). Default: `0`.
level (str|int|tuple) : In case of a key partially contained in a MultiIndex, indicates which levels are used. Levels can be referred by label or position.
drop_level (bool) : If False, returns DataFrame with same level. Default: `False`.
Example:
In this example, we have an input DataFrame with multi level columns and we want to select cross section between `B` from level of name `first` and `1` from level of name `second`.
We set:
* ``key`` = `("B", 1)`
* ``axis`` = `1`
* ``level`` = `["first", "second"]`
* ``drop_level`` = `False`
If the data received on port ``i`` is: ::
first A ... B
second 1 2 ... 1 2
2017-12-31 23:59:59.998745401 0.185133 0.541901 ... 0.297349 0.806561
2018-01-01 00:00:00.104507143 0.692277 0.849196 ... 0.844549 0.221209
2018-01-01 00:00:00.202319939 0.944059 0.039427 ... 0.120567 0.180575
The data provided on port ``o`` will be: ::
first B
second 1
2018-01-01 00:00:00.300986584 0.297349
2018-01-01 00:00:00.396560186 0.844549
2018-01-01 00:00:00.496559945 0.120567
References:
See the documentation of `pandas.DataFrame.xs <https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.xs.html>`_ .
"""
def __init__(self, key, **kwargs):
"""
Args:
key (str|tuple): Some label contained in the index, or partially in a MultiIndex index.
kwargs: Keyword arguments to call pandas xs method: axis, level, drop_level
"""
self._key = key
self._kwargs = kwargs
self._ready = False
def update(self):
if not self.i.ready():
return
self.o.meta = self.i.meta
if not self._ready:
try:
self._query()
self._ready = True
except KeyError as e:
raise WorkerInterrupt(e)
else:
self._query()
def _query(self):
self.o.data = self.i.data.xs(key=self._key, **self._kwargs)
class LocQuery(Node):
"""Slices DataFrame on group of rows and columns by label(s)
Attributes:
i (Port): default data input, expects DataFrame.
o (Port): default output, provides DataFrame.
Args:
key (str|list|tuple): Label selection specification.
axis (int): Axis to query the label from (`0` or `1`). Default: `1`.
Example:
In this example, we have an input DataFrame with 5 columns `[A, B, C, D, E]` and we want to select columns A and E.
We set:
* ``key`` = `["A", "E"]`
* ``axis`` = `1`
If the data received on port ``i`` is: ::
A B ... E F
2017-12-31 23:59:59.998745401 0.185133 0.541901 ... 0.806561 0.658783
2018-01-01 00:00:00.104507143 0.692277 0.849196 ... 0.221209 0.987668
2018-01-01 00:00:00.202319939 0.944059 0.039427 ... 0.180575 0.567945
The data provided on port ``o`` will be: ::
A E
2017-12-31 23:59:59.998745401 0.185133 0.806561
2018-01-01 00:00:00.104507143 0.692277 0.221209
2018-01-01 00:00:00.202319939 0.944059 0.180575
References:
See the documentation of `pandas.DataFrame.loc <https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.loc.html>`_ .
"""
def __init__(self, key, axis=1):
self._axis = axis
if not isinstance(key, (list, tuple)):
self._key = [key]
else:
self._key = key
self._ready = False
def update(self):
if not self.i.ready():
return
self.o = self.i
if not self.i.ready():
return
self.o.meta = self.i.meta
if not self._ready:
try:
self._query()
self._ready = True
except KeyError as e:
raise WorkerInterrupt(e)
else:
self.o.data = self.i.data.loc[:, self._key]
def _query(self):
if self._axis == 0:
self.o.data = self.i.data.loc[self._key, :]
else: # self._axis == 1:
self.o.data = self.i.data.loc[:, self._key]
``` |
{
"source": "jonathanj/fugue",
"score": 2
} |
#### File: jonathanj/fugue/setup.py
```python
import os
import codecs
import versioneer
from setuptools import setup, find_packages
HERE = os.path.abspath(os.path.dirname(__file__))
def read(*parts):
with codecs.open(os.path.join(HERE, *parts), 'rb', 'utf-8') as f:
return f.read()
setup(
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
name='fugue',
description='Contrapuntal composition for HTTP',
license='Expat',
url='https://github.com/jonathanj/fugue',
author='<NAME>',
author_email='<EMAIL>',
maintainer='<NAME>',
maintainer_email='<EMAIL>',
include_package_data=True,
long_description=read('README.rst'),
packages=find_packages(where='src'),
package_dir={'': 'src'},
zip_safe=True,
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Software Development :: Libraries :: Python Modules',
],
install_requires=[
'Twisted[tls]>=15.5.0',
'pyrsistent>=0.14.2',
'hyperlink>=18.0.0',
'multipart>=0.1',
'eliot>=1.3.0',
],
extras_require={
'test': [
'testrepository>=0.0.20',
'testtools>=2.3.0',
],
},
)
```
#### File: fugue/adapters/twisted.py
```python
from __future__ import absolute_import
from pyrsistent import pmap, v
from twisted.web.resource import IResource
from twisted.web.server import NOT_DONE_YET
from zope.interface import implementer
from fugue.chain import execute
from fugue.interceptors.twisted import twisted, TWISTED_REQUEST
@implementer(IResource)
class _TwistedAdapterResource(object):
isLeaf = True
def __init__(self, interceptors):
self._interceptors = interceptors
def render(self, request):
context = pmap({TWISTED_REQUEST: request})
execute(context, v(twisted()) + self._interceptors)
return NOT_DONE_YET
def putChild(self, path, child):
raise NotImplementedError()
def getChildWithDefault(self, path, request):
# When this resource is the root resource, for example when using
# `twistd web --resource-script`, this function will be called despite
# being a leaf resource.
return self
def twisted_adapter_resource(interceptors=v()):
"""
Create a Twisted ``IResource`` that executes a context and avoids as much
Twisted machinery as possible.
A ~`fugue.interceptors.twisted.twisted` interceptor will be attached to the
front of the queue to facilitate the interaction with Twisted.
"""
return _TwistedAdapterResource(interceptors)
__all__ = ['twisted_adapter_resource']
```
#### File: fugue/interceptors/basic.py
```python
from pyrsistent import field, PRecord
from fugue._keys import REQUEST, RESPONSE
from fugue.util import callable_name
class Interceptor(PRecord):
"""
An interceptor.
The three stages of execution are `enter`, `leave` and `error`. Each stage
is invoked with the context (and an `Error`, for the error stage) and is
expected to return a context or a `Deferred` that returns a context.
"""
name = field(mandatory=True, type=(bytes, unicode))
enter = field(initial=None)
leave = field(initial=None)
error = field(initial=None)
def _interceptor_func_name(*fs):
"""
Derive an interceptor name from one or more functions.
"""
return u' & '.join(callable_name(f) for f in fs if f)
def error_handler(f, name=None):
"""
An interceptor which calls a function during the error stage.
:param f: Callable to be called with a context and an error, producing a
new context.
:param unicode name: Interceptor name.
:rtype: Interceptor
"""
return Interceptor(
name=name or _interceptor_func_name(f),
error=f)
def before(f, name=None):
"""
An interceptor which calls a function during the enter stage.
:param f: Callable.
:param unicode name: Interceptor name.
:rtype: Interceptor
"""
return Interceptor(
name=name or _interceptor_func_name(f),
enter=lambda ctx: f(ctx))
def after(f, name=None):
"""
An interceptor which calls a function during the leave stage.
:param f: Callable.
:param unicode name: Interceptor name.
:rtype: Interceptor
"""
return Interceptor(
name=name or _interceptor_func_name(f),
leave=lambda ctx: f(ctx))
def around(f1, f2, name=None):
"""
An interceptor which calls a function during the enter stage and another
function during the leave stage.
:param f1: Callable.
:param f2: Callable.
:param unicode name: Interceptor name.
:rtype: Interceptor
"""
return Interceptor(
name=name or _interceptor_func_name(f1, f2),
enter=f1,
leave=f2)
def handler(f, name=None):
"""
An interceptor which calls a function on the context `REQUEST` value and
sets result as the context `RESPONSE` value.
:param f: Callable.
:param unicode name: Interceptor name.
:rtype: Interceptor
"""
return before(
lambda ctx: ctx.set(RESPONSE, f(ctx.get(REQUEST))),
name=name or _interceptor_func_name(f))
def middleware(f1, f2, name=None):
"""
An interceptor which calls a function on the context `REQUEST` value and
another function on the context `RESPONSE` value.
:param f1: Callable.
:param f2: Callable.
:param unicode name: Interceptor name.
:rtype: Interceptor
"""
return around(
(None
if f1 is None else
lambda context: context.transform([REQUEST], f1)),
(None
if f2 is None else
lambda context: context.transform([RESPONSE], f2)),
name=name or _interceptor_func_name(f1, f2))
def on_request(f, name=None):
"""
An interceptor which updates the context value of `REQUEST` during the
enter stage.
:param f: Callable to update the request.
:param unicode name: Interceptor name.
:rtype: Interceptor
"""
return middleware(f, None, name=name)
def on_response(f, name=None):
"""
An interceptor which updates the context value of `RESPONSE` during the
leave stage.
:param f: Callable to update the response.
:param unicode name: Interceptor name.
:rtype: Interceptor
"""
return middleware(None, f, name=name)
__all__ = [
'error_handler', 'before', 'after', 'around', 'handler', 'middleware',
'on_request', 'on_response',
]
```
#### File: test/interceptors/test_twisted.py
```python
import itertools
from hyperlink import URL
from testtools import TestCase
from testtools.matchers import ContainsDict, Equals, Is
from twisted.web.server import Request
from fugue.interceptors.nevow import _nevow_request_to_request_map
from fugue.test.interceptors.test_nevow import fake_nevow_request
def fake_twisted_request(*args, **kwargs):
"""
Create a fake Twisted `Request` instance for the purposes of testing.
"""
kwargs.setdefault(
'Request', lambda channel: Request(channel=channel, queued=False))
request = fake_nevow_request(*args, **kwargs)
request.finish = lambda: next(request.finish.counter)
request.finish.counter = itertools.count()
return request
class TwistedRequestToRequestMapTests(TestCase):
"""
Tests for `_nevow_request_to_request_map` on a Twisted request.
"""
def test_basic(self):
"""
Test basic request map keys.
"""
request = fake_twisted_request(request_headers={
b'x-foo': [b'bar'],
})
self.assertThat(
_nevow_request_to_request_map(request),
ContainsDict({
'content_type': Equals(b'application/octet-stream'),
'content_length': Equals(0),
'character_encoding': Is(None),
'headers': Equals({b'Content-Length': [0],
b'X-Foo': [b'bar'],
b'Host': [b'example.com']}),
'remote_addr': Equals(b'192.168.1.1'),
'request_method': Equals(b'GET'),
'server_name': Equals(b'example.com'),
'server_port': Equals(80),
'scheme': Equals(b'http'),
'uri': Equals(URL.from_text(u'/one'))}))
```
#### File: fugue/test/util.py
```python
from testtools import try_import
def depends_on(*names):
"""
Decorate a test method with skip condition that all of ``names`` are
importable.
"""
def _depends_on(f):
def _depends_on_inner(self, *a, **kw):
for name in names:
if try_import(name) is None:
self.skipTest('"{}" dependency missing'.format(name))
return f(self, *a, **kw)
return _depends_on_inner
return _depends_on
``` |
{
"source": "jonathan-JIPSlok/DataScience",
"score": 4
} |
#### File: DataScience/Manipulando_Json/main.py
```python
import json
from urllib.request import urlopen
dict = {"Nome":["Jonathan", "Claudete"], "Idade":["19", "42"]}
for key, value in dict.items():
print(key, value)
def Convert_objectJson_write():
with open("Arquivo.json", "w") as file:
file.write(json.dumps(dict)) #json.dumps() converte o dicionario para um objeto json
def read_Json():
with open("Arquivo_copia.json", "r") as file:
text = file.read()
data = json.loads(text) #grava um texto que esta escrito no arquivo
print(data)
def json_internet():
response = urlopen("URL.json").read().decode("utf8") #Lê o arquivo da internet decodificado em utf-8
data = json.loads(response)[0]
def transferindo_conteudo():
Infile = "Arquivo.json" #copiaremos
Outfile = "Arquivo_copia.json" #colaremos
with open(Infile, "r") as infile:
text = infile.read()
with open(Outfile, "w") as outfile:
outfile.write(text)
def transferindo_conteudo_2metod():
Infile = "Arquivo.json" #copiaremos
Outfile = "Arquivo_copia.json" #colaremos
open(Outfile, "w").write(open(Infile,"r").read())
``` |
{
"source": "jonathan-JIPSlok/Materiais_Escolares_QRCode",
"score": 3
} |
#### File: Materiais_Escolares_QRCode/Manipulador/__init__.py
```python
import pandas as pd
import numpy as np
class File_Manipuling:
def __init__(self, File):
self.LOCAL_FILE = File
self.read_file()
def read_file(self) -> None:
"""Cria um dataframe com os dados do arquivo"""
self.__df = pd.read_csv(self.LOCAL_FILE, sep = ";", header = 0, encoding = "utf-8")
def transform_columns_value_br(self, columns) -> None:
"""transforma em valor brasileiro novamente"""
self.__df[columns] = self.__df[columns].applymap(lambda x: f"{x:.2f}")
self.__df[columns] = self.__df[columns].applymap(lambda x : self.__acrescentar_number(self.acrescentar_br_caractere(x)))
def __acrescentar_number(self, value):
"""Acrescenta um 0 aos valores que falta"""
if value[len(value) - 1] == ",":
value += "0"
return value
def acrescentar_br_caractere(self, value):
"""Acrescenta o R$ e a ,"""
if value != "NaN" and value != "nan":
value = f"R${value.replace('.', ',')}"
return value
def replace_columns(self, columns, data, new_data):
"""Subistitui dados de mais de uma coluna"""
self.__df[columns] = self.__df[columns].applymap(lambda x : x.replace(data, new_data))
def line_mean(self, Linha, Columns, Printar = False):
"""retorna a media da linha"""
mean = float(f"{self.__df.loc[Linha, Columns].mean():.2f}")
mean = self.acrescentar_br_caractere(self.__acrescentar_number(f"{mean:.2f}"))
if Printar == True:
print(f"{self.__df.iloc[Linha, 0]} \t Media:\t {mean}")
return mean
def lines_mean(self, Columns, Printar = False):
"""Retorna a media de valor dos produtos"""
Products = {}
cont = 0
for Column in range(18):
mean = self.line_mean(cont, Columns)
Products[self.__df.iloc[cont, 0]] = mean
cont += 1
if Printar == True:
print("Media".center(110))
[print("{:>47} \t\t{}".format(k, v)) for k, v in Products.items()]
return Products
def convert_columns_float32(self, Columns):
"""Converte uma coluna para float32"""
self.__df[Columns] = self.__df[Columns].astype(np.float32)
@property
def df(self):
"""Retorna o DataFrame"""
return self.__df
```
#### File: Materiais_Escolares_QRCode/QRCode/__init__.py
```python
import qrcode
def QR(Data):
"""Cria a imagen com os dados"""
img = qrcode.make(Data)
return img
def Salvar(img, Local):
"""salva a imagem"""
img = img.save(Local)
return type(img)
``` |
{
"source": "jonathan-JIPSlok/Projeto_Gestao",
"score": 3
} |
#### File: jonathan-JIPSlok/Projeto_Gestao/Function.py
```python
import sqlite3
from pathlib import Path
from random import randint
from os.path import join as path_join
class SQDB(): #Funcoes com o banco de dados
def __init__(self):
super().__init__()
#Criando ou abrindo a pasta do banco de dados
Pasta = Path.home() / path_join("MyAppPastaJP")
try:
pastinha = Pasta.mkdir()
except:
pass
self.connection = sqlite3.connect(Pasta / 'JestionApp.db')
self.cursor = self.connection.cursor()
self.CreateTables()
self.Tables_Alterations()
def Tables_Alterations(self):
try:
self.connection.execute('ALTER TABLE Objetos_Entregas ADD COLUMN Situacao text ')
except: pass
def CreateTables(self): #Cria as tabelas
self.cursor.execute('CREATE TABLE IF NOT EXISTS Usuarios(Cod interger primary key, Nome text, Login text, Senha text, Cpf text, Rg text, Email text, Tel interger, Status text)')
self.cursor.execute('CREATE TABLE IF NOT EXISTS Permisseds(Cod interger primary key, Permisseds text)')
self.cursor.execute("CREATE TABLE IF NOT EXISTS Objetos_Entregas(Cod interger primary key, Nome text, RG text, CPF text, End text, CEP text, CodCliente text, Entregador text)")
def InsertObject_Entrega(self, Data): #Insere um novo objeto a ser entregue
cod = randint(100000000, 999999999)
if Data[6].isnumeric() == True:
Entregador = SQDB().GetUsers(Data[6], Cod_Name='Cod')
Entregador = Entregador[1]
else:
Entregador = SQDB().GetUsers(Data[6], Cod_Name='Nome')
Entregador = Entregador[1]
self.cursor.execute("INSERT INTO Objetos_Entregas(Cod, Nome, RG, CPF, End, CEP, CodCliente, Entregador, Situacao) Values(?,?,?,?,?,?,?,?,?)", (cod, Data[0], Data[1], Data[2], Data[3], Data[4], Data[5], Entregador, 'Em Aberto'))
self.connection.commit()
def InsertUsers(self, Data, Permissions = [1,1,1,1], Type = "User"):
if Type == "User": #Insere User no Banco de dados
cod = randint(100000000, 999999999)
self.cursor.execute('INSERT INTO Usuarios(Cod, Nome, Login, Senha, Cpf, Rg, Email, Tel, Status) VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?)', (cod, Data[0], Data[1], Data[2], Data[3], Data[4], Data[5], Data[6], "User"))
self.cursor.execute('INSERT INTO Permisseds(Cod, Permisseds) VALUES(?, ?)', (cod, Permissions))
self.connection.commit()
if Type == "Adm": #Insere Adm no Banco de dados
self.cursor.execute('INSERT INTO Usuarios(Cod, Nome, Login, Senha, Cpf, Rg, Email, Tel, Status) VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?)', (randint(100000000, 999999999), Data[0], Data[1], Data[2], Data[3], Data[4], Data[5], Data[6], "Adm"))
self.connection.commit()
def DelObjectEntrega(self, Cod):
self.cursor.execute("DELETE FROM Objetos_Entregas WHERE Cod = ?", (Cod, ))
self.connection.commit()
def GetUsers(self, Data, Type = "Default", Cod_Name = 0):
if Cod_Name == 0 or Cod_Name == 'Login':
Cod_Name = 0
elif Cod_Name == 1 or Cod_Name == "Cod":
Cod_Name = 1
elif Cod_Name == 2 or Cod_Name == "Nome":
Cod_Name = 2
if Type == "Default" and Cod_Name == 0: #Procurar um Adm ou User no Banco de dados
User = self.cursor.execute('SELECT * FROM Usuarios WHERE Login = ?', (Data, ))
elif Type == "Default" and Cod_Name == 1: #Procurar um Adm ou User no Banco de dados
User = self.cursor.execute('SELECT * FROM Usuarios WHERE Cod = ?', (Data, ))
elif Type == "Default" and Cod_Name == 2: #Procurar um Adm ou User no Banco de dados
User = self.cursor.execute('SELECT * FROM Usuarios WHERE Nome = ?', (Data, ))
elif Type == "Adm" and Cod_Name == 0: #Procura um adm no banco de dados
User = self.cursor.execute('SELECT * FROM Usuarios WHERE (Login, Status) = (?, ?)', (Data, "Adm"))
elif Type == "Adm" and Cod_Name == 1: #Procura um adm no banco de dados
User = self.cursor.execute('SELECT * FROM Usuarios WHERE (Cod, Status) = (?, ?)', (Data, "Adm"))
elif Type == "Adm" and Cod_Name == 2: #Procura um adm no banco de dados
User = self.cursor.execute('SELECT * FROM Usuarios WHERE (Nome, Status) = (?, ?)', (Data, "Adm"))
elif Type == "User" and Cod_Name == 0: #Procura um User no banco de dados
User = self.cursor.execute('SELECT * FROM Usuarios WHERE (Login, Status) = (?, ?)', (Data, "User"))
elif Type == "User" and Cod_Name == 1: #Procura um User no banco de dados
User = self.cursor.execute('SELECT * FROM Usuarios WHERE (Cod, Status) = (?, ?)', (Data, "User"))
elif Type == "User" and Cod_Name == 2: #Procura um User no banco de dados
User = self.cursor.execute('SELECT * FROM Usuarios WHERE (Nome, Status) = (?, ?)', (Data, "User"))
User = User.fetchall()
try:
return User[0]
except:
return User
def GetPermissions(self, Cod):
data = self.cursor.execute('SELECT Permisseds FROM Permisseds WHERE Cod = ?', (Cod, ))
data = data.fetchall()
return data[0][0]
def DellUsers(self, Cod):
self.cursor.execute("DELETE FROM Usuarios WHERE Cod = ?", (Cod, ))
self.cursor.execute("DELETE FROM Permisseds WHERE Cod = ?", (Cod, ))
self.connection.commit()
def AlterSituation_Entrega(self, Cod, Situation):
assert 'Entregue' in Situation or 'Em Aberto' in Situation or 'Endereço não encontrado' in Situation, 'Situacao Infomada Incorretamente!'
self.cursor.execute("UPDATE Objetos_Entregas SET Situacao = ? WHERE Cod = ?", (Situation, Cod))
self.connection.commit()
class VerifyDatas(): #Verificador de dados
def __init__(self):
pass
def VerifyCampos(self, data, Type='Login'): #Verificas se todos os campos estao preenchidos
Verify = True
for Item in data:
if len(str(Item)) != 0:
if Item != ' ': pass
else: Verify = False
else: Verify = False
if Type != "Login":
db = SQDB().GetUsers(data[0], Cod_Name='Nome')
try:
if db[1] == data[0]:
Verify = False
except: pass
return Verify
```
#### File: jonathan-JIPSlok/Projeto_Gestao/Main.py
```python
import sys
import sqlite3
from PyQt5.QtWidgets import QWidget, QApplication, QMainWindow, QLineEdit, QPushButton, QLabel, QGridLayout, QRadioButton, QComboBox, QTableWidgetItem, QTableWidget
import Function as Funcao
import shelve
class Janela_Principal(QMainWindow): #Janela Principal
def __init__(self):
super().__init__()
Db = Funcao.SQDB()
Lista = Db.cursor.execute("SELECT * FROM Usuarios")
if len(Lista.fetchall()) == 0:
self.CentralWidget = Window_CadasterADM((1, 'PRIMEIRO LOGIN', 111111, 11111, 11111, '0', '0', '0', 'Adm'))
else:
self.CentralWidget = LoginWindow()
self.setCentralWidget(self.CentralWidget)
Db.connection.close()
setStyle(self)
self.show()
class LoginWindow(QWidget): #janela de Login
def __init__(self):
super().__init__()
self.Layout = QGridLayout()
#Widgets
self.Name = QLineEdit(self)
self.Senha = QLineEdit(self)
self.Button_Login = QPushButton('Logar', self)
self.Text = QLabel(self)
#Configurando widgets
self.Name.setPlaceholderText('Nome')
self.Name.returnPressed.connect(lambda :self.Senha.setFocus())
self.Senha.setPlaceholderText('Senha')
self.Senha.setEchoMode(QLineEdit.Password)
self.Senha.returnPressed.connect(self.Logar)
self.Button_Login.clicked.connect(self.Logar)
#Inserindo widgets no layout
self.Layout.addWidget(self.Name)
self.Layout.addWidget(self.Senha)
self.Layout.addWidget(self.Button_Login)
self.Layout.addWidget(self.Text)
self.setLayout(self.Layout)
def Logar(self): #Verifica se tudo esta ok e loga usuario
Verify = Funcao.VerifyDatas().VerifyCampos([self.Name.text(), self.Senha.text()]) #verifica se os campos estao preenchidos
if Verify == True:
db = Funcao.SQDB().GetUsers(self.Name.text()) #Pega os dados do usuario no banco de dados
if len(db) != 0:
if db[2] == self.Name.text() and db[3] == self.Senha.text(): #Verifica se a senha esta certa
if db[8] == 'Adm':
Janela.CentralWidget = Window_Adm(db)
Janela.setCentralWidget(Janela.CentralWidget)
if db[8] == 'User':
Janela.CentralWidget = Window_User(db)
Janela.setCentralWidget(Janela.CentralWidget)
else: self.Text.setText('Verifique se os campos estão preenchidos corretamente.')
else: self.Text.setText('Verifique se os campos estão preenchidos corretamente.')
else: self.Text.setText('Verifique se os campos estão preenchidos corretamente.')
class Window_Adm(QWidget):
def __init__(self, User):
super().__init__()
self.Layout = QGridLayout()
self.User_Data = User
#Widgets
self.UserLabel = QLabel(f'Cod: {User[0]} \t\t Nome: {User[1]}', self)
self.CadasterAdm_Button = QPushButton('Cadastrar ADM', self)
self.CadasterUser_Button = QPushButton('Cadastrar USUARIO', self)
self.SearchUSer_Button = QPushButton('Procurar Adm ou Usuario', self)
self.DellUser_Button = QPushButton("Deletar um User ou Adm", self)
self.AddObejects_Entrega = QPushButton("Adicionar Objetos para entrega", self)
self.TabelaObjetos = QPushButton("Tabela de Objetos a serem entregues", self)
self.TabelaAdm = QPushButton("Tabela de Adm", self)
self.TabelaUsers = QPushButton("Tabela de Users", self)
self.DellObjeto_Entrega = QPushButton("Deletar Objeto de Entrega", self)
self.UserEntregas = QPushButton("Suas Entregas", self)
self.UserEntregasEmAberto = QPushButton("Suas Entregas em Aberto", self)
self.UserEntregasJustificadas = QPushButton("Suas Entregas Justificadas", self)
Deslogar = QPushButton("Deslogar", self)
#Configurando Widgets
self.CadasterAdm_Button.clicked.connect(self.InitWindow_CadasterAdm)
self.CadasterUser_Button.clicked.connect(self.InitWindow_CadasterUser)
self.SearchUSer_Button.clicked.connect(self.InitWindow_SearchUsers)
self.DellUser_Button.clicked.connect(self.InitWindow_DellUsers)
Deslogar.clicked.connect(self.Deslogar)
self.AddObejects_Entrega.clicked.connect(self.InitWindow_AddObjectsEntrega)
self.TabelaObjetos.clicked.connect(self.InitWindow_TabelaObjetos)
self.TabelaAdm.clicked.connect(self.InitWindow_TabelaAdm)
self.TabelaUsers.clicked.connect(self.InitWindow_TabelaUsers)
self.DellObjeto_Entrega.clicked.connect(self.InitWindow_DellObjetoEntrega)
self.UserEntregas.clicked.connect(self.InitWindow_UserEntregas)
self.UserEntregasEmAberto.clicked.connect(self.InitWindow_UserEntregasEmAberto)
self.UserEntregasJustificadas.clicked.connect(self.InitWindow_UserEntregasJustificadas)
#Inserindo Widgets no layout
self.Layout.addWidget(self.UserLabel)
self.Layout.addWidget(self.CadasterAdm_Button)
self.Layout.addWidget(self.CadasterUser_Button)
self.Layout.addWidget(self.SearchUSer_Button)
self.Layout.addWidget(self.DellUser_Button)
self.Layout.addWidget(self.AddObejects_Entrega)
self.Layout.addWidget(self.TabelaObjetos)
self.Layout.addWidget(self.TabelaAdm)
self.Layout.addWidget(self.TabelaUsers)
self.Layout.addWidget(self.DellObjeto_Entrega)
self.Layout.addWidget(self.UserEntregas)
self.Layout.addWidget(self.UserEntregasEmAberto)
self.Layout.addWidget(self.UserEntregasJustificadas)
self.Layout.addWidget(Deslogar)
self.setLayout(self.Layout)
def Deslogar(self):
Janela.setCentralWidget(LoginWindow())
def InitWindow_CadasterAdm(self):
try: self.Window.close()
except: pass
self.Window = Window_CadasterADM(self.User_Data)
def InitWindow_CadasterUser(self):
try: self.Window.close()
except: pass
self.Window = Window_CadasterUser(self.User_Data)
def InitWindow_SearchUsers(self):
try: self.Window.close()
except: pass
self.Window = Window_SearchUsers(self.User_Data)
def InitWindow_DellUsers(self):
try: self.Window.close()
except: pass
self.Window = Window_DellUsers(self.User_Data)
def InitWindow_AddObjectsEntrega(self):
try: self.Window.close()
except: pass
self.Window = AddObjeto_Entrega(self.User_Data)
def InitWindow_TabelaObjetos(self):
try: self.Window.close()
except: pass
self.Window = Tabela(self.User_Data)
def InitWindow_TabelaAdm(self):
try: self.Window.close()
except: pass
self.Window = Tabela(self.User_Data, 'Adm')
def InitWindow_TabelaUsers(self):
try: self.Window.close()
except: pass
self.Window = Tabela(self.User_Data, 'User')
def InitWindow_DellObjetoEntrega(self):
try: self.Window.close()
except: pass
self.Window = DellObjeto_Entrega(self.User_Data)
def InitWindow_UserEntregas(self):
try: self.Window.close()
except: pass
self.Window = Tabela(self.User_Data, 'UserEntregas')
def InitWindow_UserEntregasEmAberto(self):
try: self.Window.close()
except: pass
self.Window = Tabela(self.User_Data, 'UserEntregas', "Em Aberto")
def InitWindow_UserEntregasJustificadas(self):
try: self.Window.close()
except: pass
self.Window = Tabela(self.User_Data, 'UserEntregas', "Endereço não encontrado")
class Window_User(QMainWindow):
def __init__(self, data):
super().__init__()
self.Permisseds = Funcao.SQDB().GetPermissions(data[0])
self.User = data
self.Wd = QWidget()
self.Layout = QGridLayout()
self.Layout.addWidget(QLabel(f"Cod: {data[0]} \t Nome: {data[1]}", self))
self.setOptions(self.Layout)
Deslogar = QPushButton("Deslogar" ,self)
self.Layout.addWidget(Deslogar)
Deslogar.clicked.connect(self.Deslogar)
self.Wd.setLayout(self.Layout)
setStyle(self)
self.setCentralWidget(self.Wd)
self.show()
def Deslogar(self):
Janela.setCentralWidget(LoginWindow())
def setOptions(self, Layout):
permissoes = []
for Valor in self.Permisseds:
if Valor.isnumeric() == True:
permissoes.append(int(Valor))
try:
if permissoes[0] == 1:
self.Cadaster_Adm = QPushButton('Cadastrar Adm', self)
self.Cadaster_Adm.clicked.connect(self.InitWindow_CadasterAdm)
self.Layout.addWidget(self.Cadaster_Adm)
except: pass
try:
if permissoes[1] == 1:
self.Cadaster_User = QPushButton('Cadastrar Usuario',self)
self.Cadaster_User.clicked.connect(self.InitWindow_CadasterUser)
self.Layout.addWidget(self.Cadaster_User)
except: pass
try:
if permissoes[2] == 1:
self.SearchUsers = QPushButton('Pesquisar User ou Adm', self)
self.SearchUsers.clicked.connect(self.InitWindow_SearchUsers)
self.Layout.addWidget(self.SearchUsers)
except: pass
try:
if permissoes[3] == 1:
self.DellUsers = QPushButton('Deletar User ou Adm', self)
self.Layout.addWidget(self.DellUsers)
self.DellUsers.clicked.connect(self.InitWindow_DellUsers)
except: pass
try:
if permissoes[4] == 1:
self.AddObjects_Entrega = QPushButton('Adicionar Objetos para entrega', self)
self.Layout.addWidget(self.AddObjects_Entrega)
self.AddObjects_Entrega.clicked.connect(self.InitWindow_AddObjectsEntrega)
except: pass
try:
if permissoes[5] == 1:
self.TabelaEntregas = QPushButton('Tabela de Entregas', self)
self.Layout.addWidget(self.TabelaEntregas)
self.TabelaEntregas.clicked.connect(self.InitWindow_TabelaEntregas)
except: pass
try:
if permissoes[6] == 1:
self.TabelaAdms = QPushButton('Tabela de Adm', self)
self.Layout.addWidget(self.TabelaAdms)
self.TabelaAdms.clicked.connect(self.InitWindow_TabelaAdms)
except: pass
try:
if permissoes[7] == 1:
self.TabelaUsers = QPushButton('Tabela de Users', self)
self.Layout.addWidget(self.TabelaUsers)
self.TabelaUsers.clicked.connect(self.InitWindow_TabelaUsers)
except: pass
try:
if permissoes[8] == 1:
self.DellObjetoEntrega = QPushButton('Deletar Objeto de Entrega', self)
self.Layout.addWidget(self.DellObjetoEntrega)
self.DellObjetoEntrega.clicked.connect(self.InitWindow_DellObjetoEntrega)
except: pass
self.UserEntregas = QPushButton('Suas Entregas', self)
self.Layout.addWidget(self.UserEntregas)
self.UserEntregas.clicked.connect(self.InitWindow_UserEntregas)
self.UserEntregasEmAberto = QPushButton('Suas Entregas em aberto', self)
self.Layout.addWidget(self.UserEntregasEmAberto)
self.UserEntregasEmAberto.clicked.connect(self.InitWindow_UserEntregasEmAberto)
self.UserEntregasJustificadas = QPushButton('Suas Entregas Justificadas', self)
self.Layout.addWidget(self.UserEntregasJustificadas)
self.UserEntregasJustificadas.clicked.connect(self.InitWindow_UserEntregasJustificadas)
def InitWindow_CadasterAdm(self):
try: self.Window.close()
except: pass
self.Window = Window_CadasterADM(self.User)
def InitWindow_CadasterUser(self):
try: self.Window.close()
except: pass
self.Window = Window_CadasterUser(self.User)
def InitWindow_SearchUsers(self):
try: self.Window.close()
except: pass
self.Window = Window_SearchUsers(self.User)
def InitWindow_DellUsers(self):
try: self.Window.close()
except: pass
self.Window = Window_DellUsers(self.User)
def InitWindow_AddObjectsEntrega(self):
try: self.Window.close()
except: pass
self.Window = AddObjeto_Entrega(self.User)
def InitWindow_TabelaEntregas(self):
try: self.Window.close()
except: pass
self.Window = Tabela(self.User)
def InitWindow_TabelaAdms(self):
try: self.Window.close()
except: pass
self.Window = Tabela(self.User, "Adm")
def InitWindow_TabelaUsers(self):
try: self.Window.close()
except: pass
self.Window = Tabela(self.User, "User")
def InitWindow_DellObjetoEntrega(self):
try: self.Window.close()
except: pass
self.Window = DellObjeto_Entrega(self.User)
def InitWindow_UserEntregas(self):
try: self.Window.close()
except: pass
self.Window = Tabela(self.User, "UserEntregas")
def InitWindow_UserEntregasEmAberto(self):
try: self.Window.close()
except: pass
self.Window = Tabela(self.User, "UserEntregas", "Em Aberto")
def InitWindow_UserEntregasJustificadas(self):
try: self.Window.close()
except: pass
self.Window = Tabela(self.User, "UserEntregas", 'Endereço não encontrado')
class Window_CadasterADM(QWidget):
def __init__(self, User):
super().__init__()
self.User = User
self.Layout = QGridLayout()
#Widgets
self.Nome = QLineEdit(self)
self.Senha = QLineEdit(self)
self.Login = QLineEdit(self)
self.Cpf = QLineEdit(self)
self.Rg = QLineEdit(self)
self.Email = QLineEdit(self)
self.Tel = QLineEdit(self)
self.ButtonCadaster = QPushButton('Cadastrar', self)
self.Status = QLabel(self)
self.ButtonVoltar = QPushButton("Voltar", self)
#Configurando Widgets
self.Nome.returnPressed.connect(lambda : self.Login.setFocus())
self.Login.returnPressed.connect(lambda : self.Senha.setFocus())
self.Senha.returnPressed.connect(lambda : self.Cpf.setFocus())
self.Cpf.returnPressed.connect(lambda : self.Rg.setFocus())
self.Rg.returnPressed.connect(lambda : self.Email.setFocus())
self.Email.returnPressed.connect(lambda : self.Tel.setFocus())
self.Tel.returnPressed.connect(self.Cadastrar)
self.ButtonCadaster.clicked.connect(self.Cadastrar)
if self.User[0] > 1:
self.ButtonVoltar.clicked.connect(self.Voltar)
else:
self.ButtonVoltar.setText("")
self.Nome.setPlaceholderText("Nome Completo")
self.Senha.setPlaceholderText('Senha')
self.Login.setPlaceholderText("Login")
self.Cpf.setPlaceholderText("CPF Ex: 000.000.000.00")
self.Rg.setPlaceholderText("RG Ex: 0.000.000")
self.Email.setPlaceholderText("Email")
self.Tel.setPlaceholderText("Telefone/Celular")
#inserindo Widgets no layout
self.Layout.addWidget(self.Nome, 0, 0, 1,2)
self.Layout.addWidget(self.Login, 1, 0, 1, 2)
self.Layout.addWidget(self.Senha,2, 0, 1,2)
self.Layout.addWidget(self.Cpf, 3, 0, 1, 2)
self.Layout.addWidget(self.Rg, 4, 0, 1, 2)
self.Layout.addWidget(self.Email, 5, 0, 1, 2)
self.Layout.addWidget(self.Tel, 6, 0, 1, 2)
self.Layout.addWidget(self.ButtonCadaster,7, 0, 1, 2)
self.Layout.addWidget(self.Status, 8, 0, 1, 2)
self.Layout.addWidget(self.ButtonVoltar, 9, 0, 1, 2)
self.setLayout(self.Layout)
try:
Janela.setCentralWidget(self)
except: pass
setStyle(self)
def Voltar(self):
if self.User[8] == 'Adm':
Janela.setCentralWidget(Window_Adm(self.User))
if self.User[8] == "User":
Janela.setCentralWidget(Window_User(self.User))
def Cadastrar(self):
if self.Tel.text().isnumeric() == True:
data = [self.Nome.text(), self.Login.text(), self.Senha.text(), self.Cpf.text(), self.Rg.text(), self.Email.text(), int(self.Tel.text())]
Verify = Funcao.VerifyDatas().VerifyCampos(data, Type="Cadaster") #Verifica se os campos estao preenchidos
if Verify == True:
Funcao.SQDB().InsertUsers(data, Type="Adm") #Cadasta no Banco de dados
self.Status.setText(f'Usuario {self.Nome.text()} Cadastrado!')
self.Nome.setText('')
self.Senha.setText('')
self.Login.setText('')
self.Cpf.setText('')
self.Rg.setText('')
self.Email.setText('')
self.Tel.setText('')
else:
self.Status.setText('Não foi possivel fazer o Cadastro. \nCampos em branco ou nome já utilizado.')
else: self.Status.setText('Telefone/Celular deve conter valores numericos.')
class Window_CadasterUser(QWidget):
def __init__(self, User):
super().__init__()
self.User = User
self.Layout = QGridLayout()
#Widgets
self.Nome = QLineEdit(self)
self.Senha = QLineEdit(self)
self.CadasterButton = QPushButton('Cadastrar', self)
self.Status = QLabel(self)
self.Login = QLineEdit(self)
self.Cpf = QLineEdit(self)
self.Rg = QLineEdit(self)
self.Email = QLineEdit(self)
self.Tel = QLineEdit(self)
self.Permissoes = QPushButton("Permissoes", self)
self.ButtonVoltar = QPushButton("Voltar", self)
#Configurando widgets
self.Nome.returnPressed.connect(lambda : self.Login.setFocus())
self.Login.returnPressed.connect(lambda : self.Senha.setFocus())
self.Senha.returnPressed.connect(lambda : self.Cpf.setFocus())
self.Cpf.returnPressed.connect(lambda : self.Rg.setFocus())
self.Rg.returnPressed.connect(lambda : self.Email.setFocus())
self.Email.returnPressed.connect(lambda : self.Tel.setFocus())
self.Tel.returnPressed.connect(self.Verify)
self.ButtonVoltar.clicked.connect(self.Voltar)
self.Permissoes.clicked.connect(self.InitWindow_Permissoes)
self.Nome.setPlaceholderText("Nome Completo")
self.Senha.setPlaceholderText('Senha')
self.Login.setPlaceholderText("Login")
self.Cpf.setPlaceholderText("CPF Ex: 000.000.000.00")
self.Rg.setPlaceholderText("RG Ex: 0.000.000")
self.Email.setPlaceholderText("Email")
self.Tel.setPlaceholderText("Telefone/Celular")
self.CadasterButton.clicked.connect(self.Verify)
#Inserindo widgets no Layout
self.Layout.addWidget(self.Nome)
self.Layout.addWidget(self.Login)
self.Layout.addWidget(self.Senha)
self.Layout.addWidget(self.Cpf)
self.Layout.addWidget(self.Rg)
self.Layout.addWidget(self.Email)
self.Layout.addWidget(self.Tel)
self.Layout.addWidget(self.CadasterButton)
self.Layout.addWidget(self.Status)
self.Layout.addWidget(self.Permissoes)
self.Layout.addWidget(self.ButtonVoltar)
self.setLayout(self.Layout)
Janela.setCentralWidget(self)
setStyle(self)
def Voltar(self):
if self.User[8] == 'Adm':
Janela.setCentralWidget(Window_Adm(self.User))
if self.User[8] == "User":
Janela.setCentralWidget(Window_User(self.User))
def Verify(self):
if self.Tel.text().isnumeric() == True:
data = [self.Nome.text(), self.Login.text(), self.Senha.text(), self.Cpf.text(), self.Rg.text(), self.Email.text(), int(self.Tel.text())]
Verify = Funcao.VerifyDatas().VerifyCampos(data, Type='Cadaster')
if Verify == True:
shelFile = shelve.open('GestionAPP')
self.Permissions = shelFile['Permissoes']
Funcao.SQDB().InsertUsers(data, str(self.Permissions), "User")
shelFile.close()
self.Status.setText(f'{self.Nome.text()} Cadastrado com sucesso!')
self.Nome.setText('')
self.Senha.setText('')
self.Login.setText('')
self.Cpf.setText('')
self.Rg.setText('')
self.Email.setText('')
self.Tel.setText('')
else:
self.Status.setText('Não foi possivel fazer o Cadastro. \nCampos em branco ou nome já utilizado.')
else: self.Status.setText('Telefone/Celular deve conter valores numericos.')
def InitWindow_Permissoes(self):
self.Window = Permissoes(self.User)
class Window_SearchUsers(QWidget):
def __init__(self, User):
super().__init__()
self.Layout = QGridLayout(self)
self.Tipo_Pesquisa = "Nome"
self.User = User
#Widgets
self.Campo = QLineEdit(self)
self.SerachButton = QPushButton('Procurar', self)
self.Adm_Box = QRadioButton('Adm', self)
self.User_Box = QRadioButton('User', self)
self.User_Default = QRadioButton('Default', self)
self.Nome_orCod = QComboBox(self)
self.Status = QLabel(self)
self.ButtonVoltar = QPushButton("Voltar", self)
self.Button_AlterarPermissions = QPushButton("Alterar permissões", self)
#Configurando Widgets
self.Campo.returnPressed.connect(self.Search)
self.SerachButton.clicked.connect(self.Search)
self.Campo.setPlaceholderText('Insira o Nome')
self.Adm_Box.setChecked(True)
self.Nome_orCod.insertItem(0, "Nome")
self.Nome_orCod.insertItem(1, "Cod")
self.Nome_orCod.activated[str].connect(self.DefinirTextoComboBox)
self.ButtonVoltar.clicked.connect(self.Voltar)
self.Button_AlterarPermissions.setDisabled(True)
#Inserindo Widgets lo Layout
self.Layout.addWidget(self.Campo, 0, 0, 1, 3)
self.Layout.addWidget(self.SerachButton, 1, 0, 1, 4)
self.Layout.addWidget(self.Adm_Box, 2, 0)
self.Layout.addWidget(self.User_Box, 2, 1)
self.Layout.addWidget(self.User_Default, 2, 2)
self.Layout.addWidget(self.Status, 3, 0, 1, 4)
self.Layout.addWidget(self.Nome_orCod, 0, 3, 1, 1)
self.Layout.addWidget(self.Button_AlterarPermissions, 4, 0, 1, 4)
self.Layout.addWidget(self.ButtonVoltar, 5, 0, 1, 4)
self.setLayout(self.Layout)
Janela.setCentralWidget(self)
setStyle(self)
def Voltar(self):
if self.User[8] == 'Adm':
Janela.setCentralWidget(Window_Adm(self.User))
if self.User[8] == "User":
Janela.setCentralWidget(Window_User(self.User))
def DefinirTextoComboBox(self, text):
self.Tipo_Pesquisa = text
self.Campo.setPlaceholderText(("Insira o Nome" if text == "Nome" else 'Insira o Cod'))
def Search(self):
Verify = Funcao.VerifyDatas().VerifyCampos([self.Campo.text()])
if Verify == True:
#Procurar adm
if self.Adm_Box.isChecked() == True:
try:
self.db = Funcao.SQDB().GetUsers(self.Campo.text(), "Adm", self.Tipo_Pesquisa)
self.Status.setText(f'Cod: {self.db[0]} \nNome: {self.db[1]} \nStatus: {self.db[8]}')
except IndexError:
self.Status.setText('Adm não encontrado.')
#Procurar User
elif self.User_Box.isChecked() == True:
try:
self.db = Funcao.SQDB().GetUsers(self.Campo.text(), "User", self.Tipo_Pesquisa)
self.Status.setText(f'Cod: {self.db[0]} \nNome: {self.db[1]} \nStatus: {self.db[8]}')
self.GetPermissions(self.db[0])
except IndexError:
self.Status.setText("User não encontrado.")
#Procurar User ou Adm
elif self.User_Default.isChecked() == True:
try:
self.db = Funcao.SQDB().GetUsers(self.Campo.text(), Cod_Name=self.Tipo_Pesquisa)
self.Status.setText(f'Cod: {self.db[0]} \nNome: {self.db[1]} \nStatus: {self.db[8]}')
if self.db[3] == 'User':
self.GetPermissions(self.db[0])
self.Liberar_Bot
except IndexError:
self.Status.setText("Nada encontrado.")
else:
self.Status.setText('Por favor! Insira os dados corretamente.')
try:
if self.User[8] == 'User' and self.db[8] != "Adm":
Permissoes_User = Funcao.SQDB().GetPermissions(self.User[0])
permi = []
for num in Permissoes_User:
if num.isnumeric() == True:
permi.append(int(num))
if permi[9] == 1:
self.Button_AlterarPermissions.setEnabled(True)
self.Button_AlterarPermissions.clicked.connect(self.Window_AlterarPermissoes)
elif self.db[8] != "Adm":
self.Button_AlterarPermissions.setEnabled(True)
self.Button_AlterarPermissions.clicked.connect(self.Window_AlterarPermissoes)
else:
self.Button_AlterarPermissions.setEnabled(False)
except: pass
def Window_AlterarPermissoes(self):
self.Window = Permissoes(self.User)
self.Window.definir.clicked.connect(self.AlterarPermisooes)
def AlterarPermisooes(self):
shelvFile = shelve.open('GestionAPP')
bd = Funcao.SQDB()
bd.cursor.execute('UPDATE Permisseds SET Permisseds = ? WHERE Cod = ?', (str(shelvFile['Permissoes']), self.db[0]))
bd.connection.commit()
bd.connection.close()
shelvFile.close()
def GetPermissions(self, cod):
Status = self.Status.text()
Status += '\n \t\t Permissoes \t\t\n'
Permisseds = Funcao.SQDB().GetPermissions(cod)
permissoes = []
for Valor in Permisseds:
if Valor.isnumeric() == True:
permissoes.append(int(Valor))
if permissoes[0] == 1:
Status += 'Cadastrar Adm\n'
if permissoes[1] == 1:
Status += 'Cadastrar Usuario\n'
if permissoes[2] == 1:
Status += 'Pesquisar User ou Adm\n'
if permissoes[3] == 1:
Status += 'Deletar User ou Adm\n'
if permissoes[4] == 1:
Status += 'Adicionar Objeto de entrega\n'
if permissoes[5] == 1:
Status += 'Tabela de Entregas\n'
if permissoes[6] == 1:
Status += 'Tabela de Adm\n'
if permissoes[7] == 1:
Status += 'Tabela de Users\n'
if permissoes[8] == 1:
Status += 'Deletar Objeto de Entrega\n'
if permissoes[9] == 1:
Status += 'Alterar permissoes\n'
if permissoes[10] == 1:
Status += "Alterar Status das Entregas\n"
self.Status.setText(Status)
class Window_DellUsers(QWidget):
def __init__(self, UserData):
super().__init__()
self.UserData = UserData
self.Layout = QGridLayout()
self.Tipo_Pesquisa = 'Nome'
#Widgets
self.Input = QLineEdit(self)
self.Type_Adm = QRadioButton('Adm', self)
self.Type_Users = QRadioButton('User', self)
self.Type_Default = QRadioButton('Default', self)
self.Type_Search = QComboBox(self)
self.Search_Button = QPushButton("Pesquisar", self)
self.Status = QLabel(self)
self.Button_DellUsers = QPushButton('Deletar' ,self)
self.ButtonVoltar = QPushButton("Voltar", self)
#Configurando Widgets
self.Type_Search.insertItem(0, "Nome")
self.Type_Search.insertItem(1, "Cod")
self.Type_Search.activated[str].connect(self.DefinirTipoPesquisa)
self.Input.setPlaceholderText(self.Tipo_Pesquisa)
self.Type_Adm.setChecked(True)
self.Search_Button.clicked.connect(self.Verify)
self.Input.returnPressed.connect(self.Verify)
self.Button_DellUsers.clicked.connect(self.DellUser)
self.ButtonVoltar.clicked.connect(self.Voltar)
#Inserindo Widgets no Layout
self.Layout.addWidget(self.Input, 0, 0, 1, 2)
self.Layout.addWidget(self.Type_Adm, 1,0)
self.Layout.addWidget(self.Type_Users, 1, 1)
self.Layout.addWidget(self.Type_Default, 1, 2)
self.Layout.addWidget(self.Type_Search, 0, 2)
self.Layout.addWidget(self.Search_Button, 2, 0, 1, 2)
self.Layout.addWidget(self.Button_DellUsers, 2, 2)
self.Layout.addWidget(self.Status, 3, 0, 1, 3)
self.Layout.addWidget(self.ButtonVoltar, 4, 0, 1, 3)
setStyle(self)
self.setLayout(self.Layout)
Janela.setCentralWidget(self)
def Voltar(self):
if self.UserData[8] == 'Adm':
Janela.setCentralWidget(Window_Adm(self.UserData))
if self.UserData[8] == "User":
Janela.setCentralWidget(Window_User(self.UserData))
def DefinirTipoPesquisa(self, text):
self.Tipo_Pesquisa = text
self.Input.setPlaceholderText(self.Tipo_Pesquisa)
def Verify(self):
Verify = Funcao.VerifyDatas().VerifyCampos([self.Input.text()])
if Verify == True:
#Procurar adm
if self.Type_Adm.isChecked() == True:
try:
db = Funcao.SQDB().GetUsers(self.Input.text(), "Adm", self.Tipo_Pesquisa)
self.Status.setText(f'Cod: {db[0]} \nNome: {db[1]} \nStatus: {db[8]}')
except IndexError:
self.Status.setText('Adm não encontrado.')
#Procurar User
elif self.Type_Users.isChecked() == True:
try:
db = Funcao.SQDB().GetUsers(self.Input.text(), "User", self.Tipo_Pesquisa)
self.Status.setText(f'Cod: {db[0]} \nNome: {db[1]} \nStatus: {db[8]}')
except IndexError:
self.Status.setText("User não encontrado.")
#Procurar User ou Adm
elif self.Type_Default.isChecked() == True:
try:
db = Funcao.SQDB().GetUsers(self.Input.text(), Cod_Name=self.Tipo_Pesquisa)
self.Status.setText(f'Cod: {db[0]} \nNome: {db[1]} \nStatus: {db[8]}')
except IndexError:
self.Status.setText("Nada encontrado.")
else:
self.Status.setText('Por favor! Insira os dados corretamente.')
def DellUser(self):
if self.Input.text().isnumeric() == True and self.Tipo_Pesquisa == "Cod":
try:
db = Funcao.SQDB().GetUsers(self.Input.text(), Cod_Name=self.Tipo_Pesquisa)
self.Status.setText(f"Usuario de Cod: {self.Input.text()} Nome: {db[1]} deletado dos registros!")
Funcao.SQDB().DellUsers(int(self.Input.text()))
except IndexError:
self.Status.setText("Usuario não encontrado.")
elif self.Tipo_Pesquisa == "Nome":
try:
db = Funcao.SQDB().GetUsers(self.Input.text(), Cod_Name="Nome")
Funcao.SQDB().DellUsers(db[0])
self.Status.setText(f"Usuario {db[1]} deletado dos registros!")
except IndexError:
self.Status.setText("Usuario não encontrado.")
class AddObjeto_Entrega(QWidget):
def __init__(self, UserData):
super().__init__()
self.UserData = UserData
self.Layout = QGridLayout()
#Widgets
self.Nome = QLineEdit(self)
self.Rg = QLineEdit(self)
self.Cpf = QLineEdit(self)
self.End = QLineEdit(self)
self.Cep = QLineEdit(self)
self.CodClient = QLineEdit(self)
self.Entregador = QLineEdit(self)
self.Registrar = QPushButton("Registrar", self)
self.Status = QLabel(self)
self.Voltar = QPushButton("Voltar", self)
#Configurando Widgets
self.Voltar.clicked.connect(self.VoltarWindow)
self.Nome.setPlaceholderText("Nome do Cliente")
self.Nome.returnPressed.connect(lambda : self.Rg.setFocus())
self.Rg.setPlaceholderText("RG do Cliente")
self.Rg.returnPressed.connect(lambda : self.Cpf.setFocus())
self.Cpf.setPlaceholderText("CPF do Cliente")
self.Cpf.returnPressed.connect(lambda : self.End.setFocus())
self.End.setPlaceholderText("Endereco do Cliente")
self.End.returnPressed.connect(lambda : self.Cep.setFocus())
self.Cep.setPlaceholderText("Cep do Cliente")
self.Cep.returnPressed.connect(lambda : self.CodClient.setFocus())
self.CodClient.setPlaceholderText("Codigo do Cliente")
self.CodClient.returnPressed.connect(lambda : self.Entregador.setFocus())
self.Entregador.setPlaceholderText("Nome ou Cod do Entregador")
self.Entregador.returnPressed.connect(self.Cadastrar)
self.Registrar.clicked.connect(self.Cadastrar)
#Inserindo Widgets no Layout
self.Layout.addWidget(self.Nome)
self.Layout.addWidget(self.Rg)
self.Layout.addWidget(self.Cpf)
self.Layout.addWidget(self.End)
self.Layout.addWidget(self.Cep)
self.Layout.addWidget(self.CodClient)
self.Layout.addWidget(self.Entregador)
self.Layout.addWidget(self.Registrar)
self.Layout.addWidget(self.Status)
self.Layout.addWidget(self.Voltar)
self.setLayout(self.Layout)
Janela.setCentralWidget(self)
setStyle(self)
def VoltarWindow(self):
if self.UserData[8] == 'Adm':
Janela.setCentralWidget(Window_Adm(self.UserData))
if self.UserData[8] == "User":
Janela.setCentralWidget(Window_User(self.UserData))
def Cadastrar(self):
data = [self.Nome.text(), self.Rg.text(), self.Cpf.text(), self.End.text(), self.Cep.text(), self.CodClient.text(), self.Entregador.text()]
Verify = Funcao.VerifyDatas().VerifyCampos(data, 'Login')
if Verify == True:
try:
Verify = Funcao.SQDB().GetUsers(int(self.Entregador.text()), Cod_Name='Cod')
self.Status.setText(f'Entrega em {self.End.text()} devera ser feita por {Verify[1]}')
Funcao.SQDB().InsertObject_Entrega(data)
self.Nome.setText(), self.Rg.setText(), self.Cpf.setText(), self.End.setText(), self.Cep.setText(), self.CodClient.setText(), self.Entregador.setText()
except:
try:
Verify = Funcao.SQDB().GetUsers(self.Entregador.text(), Cod_Name='Nome')
self.Status.setText(f'Entrega em {self.End.text()} devera ser feita por {Verify[1]}')
Funcao.SQDB().InsertObject_Entrega(data)
self.Nome.text(), self.Rg.text(), self.Cpf.text(), self.End.text(), self.Cep.text(), self.CodClient.text(), self.Entregador.text()
except:
self.Status.setText('Entregador não encontrado')
else:
self.Status.setText('Preencha todos os campos')
class Tabela(QWidget):
def __init__(self, User, Type = "Entregas", Situacao = 'Entregue'):
super().__init__()
self.UserData = User
self.Type = Type
self.Linha = 0
self.Situacao = Situacao
self.Layout = QGridLayout()
self.Tabela = QTableWidget()
#Widgets
self.Voltar = QPushButton('Voltar',self)
self.Line = QLineEdit(self)
self.Pesquisar = QPushButton("Pesquisar", self)
#Configurando Widgets
self.Voltar.clicked.connect(self.VoltarWindow)
self.Line.setPlaceholderText("Nome ou Cod")
self.Line.returnPressed.connect(self.Reset)
self.Pesquisar.clicked.connect(self.Reset)
self.TipoTabela()
#Adicionando Widgets no Layout
self.Layout.addWidget(self.Line)
self.Layout.addWidget(self.Pesquisar)
self.Layout.addWidget(self.Tabela)
self.Layout.addWidget(self.Voltar)
self.setLayout(self.Layout)
Janela.setCentralWidget(self)
def Reset(self):
while self.Linha != 0:
self.Tabela.removeRow(self.Linha)
self.Linha -= 1
self.Tabela.removeRow(self.Linha)
self.TipoTabela()
def TipoTabela(self):
if self.Type == "Entregas":
self.Entregas()
elif self.Type == "Adm":
self.Adm()
elif self.Type == "User":
self.Users()
elif self.Type == "UserEntregas":
self.UserEntregas()
def VoltarWindow(self):
if self.UserData[8] == 'Adm':
Janela.setCentralWidget(Window_Adm(self.UserData))
if self.UserData[8] == "User":
Janela.setCentralWidget(Window_User(self.UserData))
def Entregas(self): #Adiciona dados de Entregas na tabela
self.Line.setPlaceholderText("Nome(Entregador) ou Cod (Entrega)")
if self.Line.text() == "" or self.Line.text() == ' ':
Data = Funcao.SQDB().cursor.execute('SELECT * FROM Objetos_Entregas')
elif self.Line.text().isnumeric() == True:
Data = Funcao.SQDB().cursor.execute('SELECT * FROM Objetos_Entregas WHERE Cod = ?', (int(self.Line.text()), ))
else:
Data = Funcao.SQDB().cursor.execute('SELECT * FROM Objetos_Entregas WHERE Entregador = ?', (self.Line.text(), ))
Data = Data.fetchall()
self.Tabela.setRowCount(len(Data))
self.Tabela.setColumnCount(8)
self.Tabela.setStyleSheet('QTableWidget {background-color: rgb(120, 120, 120); font-size: 20px}')
Coluna = 0
self.Items = []
for Item in Data:
Linha_Dict = {}
Coluna = 0
for Row in Item:
self.LinhaDaTabela = QTableWidgetItem(str(Row))
self.Tabela.setItem(self.Linha, Coluna, self.LinhaDaTabela)
self.Tabela.setColumnWidth(Coluna, 200)
Linha_Dict[str(Row)] = self.LinhaDaTabela
Coluna += 1
self.Items.append(Linha_Dict)
self.Linha += 1
self.Tipo = 'Entregas'
self.Tabela.cellDoubleClicked.connect(self.Verificar_Selecionados)
def UserEntregas(self):
self.Line.setPlaceholderText("Cod (Entrega) ou Nome (Cliente)")
if self.Line.text() == "" or self.Line.text() == ' ':
Data = Funcao.SQDB().cursor.execute('SELECT * FROM Objetos_Entregas WHERE (Entregador, Situacao) = (?,?)', (self.UserData[1],self.Situacao))
elif self.Line.text().isnumeric() == True:
Data = Funcao.SQDB().cursor.execute('SELECT * FROM Objetos_Entregas WHERE (Cod, Entregador, Situacao) = (?,?, ?)', (int(self.Line.text()), self.UserData[1], self.Situacao))
else:
Data = Funcao.SQDB().cursor.execute('SELECT * FROM Objetos_Entregas WHERE (Nome, Situacao) = (?, ?)', (self.Line.text(), self.Situacao))
Data = Data.fetchall()
self.Tabela.setRowCount(len(Data))
self.Tabela.setColumnCount(8)
self.Tabela.setStyleSheet('QTableWidget {background-color: rgb(120, 120, 120); font-size: 20px}')
Coluna = 0
self.Items = []
for Item in Data:
Linha_Dict = {}
Coluna = 0
for Row in Item:
self.LinhaDaTabela = QTableWidgetItem(str(Row))
self.Tabela.setItem(self.Linha, Coluna, self.LinhaDaTabela)
self.Tabela.setColumnWidth(Coluna, 200)
Linha_Dict[str(Row)] = self.LinhaDaTabela
Coluna += 1
self.Items.append(Linha_Dict)
self.Linha += 1
self.Tipo = 'UserEntregas'
self.Tabela.cellDoubleClicked.connect(self.Verificar_Selecionados)
def Users(self): #Adiciona dados de User na tabela
if self.Line.text() == "" or self.Line.text() == ' ':
Data = Funcao.SQDB().cursor.execute('SELECT Cod, Nome, Cpf, Rg, Email, Tel, Status FROM Usuarios WHERE Status = "User"')
elif self.Line.text().isnumeric() == True:
Data = Funcao.SQDB().cursor.execute('SELECT Cod, Nome, Cpf, Rg, Email, Tel, Status FROM Usuarios WHERE (Status, Cod) = (?, ?)', ('User', int(self.Line.text()), ))
else:
Data = Funcao.SQDB().cursor.execute('SELECT Cod, Nome, Cpf, Rg, Email, Tel, Status FROM Usuarios WHERE (Status, Nome) = (?, ?)', ('User', self.Line.text(), ))
Data = Data.fetchall()
self.Tabela.setRowCount(len(Data))
self.Tabela.setColumnCount(6)
self.Tabela.setStyleSheet('QTableWidget {background-color: rgb(120, 120, 120); font-size: 20px}')
Coluna = 0
self.Items = []
for Item in Data:
Linha_Dict = {}
Coluna = 0
for Row in Item:
self.LinhaDaTabela = QTableWidgetItem(str(Row))
self.Tabela.setItem(self.Linha, Coluna, self.LinhaDaTabela)
self.Tabela.setColumnWidth(Coluna, 200)
Linha_Dict[str(Row)] = self.LinhaDaTabela
Coluna += 1
self.Items.append(Linha_Dict)
self.Linha += 1
self.Tipo = "User"
self.Tabela.cellDoubleClicked.connect(self.Verificar_Selecionados)
def Adm(self): #Adiciona dados de Adm na tabela
if self.Line.text() == "" or self.Line.text() == ' ':
Data = Funcao.SQDB().cursor.execute('SELECT Cod, Nome, Cpf, Rg, Email, Tel, Status FROM Usuarios WHERE Status = "Adm"')
elif self.Line.text().isnumeric() == True:
Data = Funcao.SQDB().cursor.execute('SELECT Cod, Nome, Cpf, Rg, Email, Tel, Status FROM Usuarios WHERE (Status, Cod) = (?, ?)', ('Adm', int(self.Line.text()), ))
else:
Data = Funcao.SQDB().cursor.execute('SELECT Cod, Nome, Cpf, Rg, Email, Tel, Status FROM Usuarios WHERE (Status, Nome) = (?, ?)', ('Adm', self.Line.text(), ))
Data = Data.fetchall()
self.Tabela.setRowCount(len(Data))
self.Tabela.setColumnCount(6)
self.Tabela.setStyleSheet('QTableWidget {background-color: rgb(120, 120, 120); font-size: 20px}')
Coluna = 0
self.Items = []
for Item in Data:
Linha_Dict = {}
Coluna = 0
for Row in Item:
self.LinhaDaTabela = QTableWidgetItem(str(Row))
self.Tabela.setItem(self.Linha, Coluna, self.LinhaDaTabela)
self.Tabela.setColumnWidth(Coluna, 200)
Linha_Dict[str(Row)] = self.LinhaDaTabela
Coluna += 1
self.Items.append(Linha_Dict)
self.Linha += 1
self.Tipo = "Adm"
self.Tabela.cellDoubleClicked.connect(self.Verificar_Selecionados)
def Verificar_Selecionados(self):
for Item in self.Items:
for items in Item.values():
try:
if items.isSelected() == True:
data = Item.keys() #Comtem a lista Com os dados da linha selecionada
lista = []
for c in data:
lista.append(c)
try: self.Window = Label_Items(lista, self.Tipo, self.UserData)
except: self.Window = Label_Items(lista, self.Tipo)
except RuntimeError: pass
class DellObjeto_Entrega(QWidget):
def __init__(self, User):
super().__init__()
self.User_Data = User
self.Layout = QGridLayout(self)
#Widgets
self.Line = QLineEdit(self)
Pesquisar = QPushButton("Pesquisar", self)
Deletar = QPushButton('Deletar' ,self)
self.Status = QLabel(self)
Voltar = QPushButton("Voltar", self)
#Configurando Widgets
self.Line.setPlaceholderText("Cod")
Pesquisar.clicked.connect(self.Pesquisar)
self.Line.returnPressed.connect(self.Pesquisar)
Voltar.clicked.connect(self.VoltarWindow)
Deletar.clicked.connect(self.Deletar)
#Adicionando Widgets no layout
self.Layout.addWidget(self.Line, 0, 0, 1, 2)
self.Layout.addWidget(Pesquisar, 1, 0)
self.Layout.addWidget(Deletar, 1, 1)
self.Layout.addWidget(self.Status)
self.Layout.addWidget(Voltar, 3, 0, 1, 2)
Janela.setCentralWidget(self)
def Pesquisar(self):
try:
if self.Line.text().isnumeric() == True:
Data = Funcao.SQDB().cursor.execute('SELECT * FROM Objetos_Entregas WHERE Cod = ?', (int(self.Line.text()), ))
Data = Data.fetchall()
Data = Data[0]
self.Status.setText(f'Cod: {Data[0]} \nNome Cliente: {Data[1]} \nRG: {Data[2]} \nCPF: {Data[3]} \nEnd: {Data[4]} \nCEP: {Data[5]} \nCodCliente: {Data[6]} \nEntregador: {Data[7]}')
else: self.Status.setText("Codigo Invalido.")
except: self.Status.setText("Nada Encontrado")
def Deletar(self):
if self.Line.text().isnumeric() == True:
try:
Funcao.SQDB().DelObjectEntrega(int(self.Line.text()))
self.Status.setText("Objeto de Entrega deletado dos registros.")
except: self.Status.setText("Nada encontrado.")
else: self.Status.setText("Codigo Invalido.")
def VoltarWindow(self):
if self.User_Data[8] == 'Adm':
Janela.setCentralWidget(Window_Adm(self.User_Data))
if self.User_Data[8] == "User":
Janela.setCentralWidget(Window_User(self.User_Data))
class Permissoes(QWidget):
def __init__(self, User):
super().__init__()
self.User_Data = User
self.Permissions = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
self.Layout = QGridLayout(self)
#Widgets
voltar = QPushButton('Voltar', self)
self.definir = QPushButton("Definir permissoes")
self.Cadaster_adm_Permission = QPushButton('Sim', self)
self.Cadaster_User_Permission = QPushButton('Sim', self)
self.Search_Users_Permission = QPushButton('Sim', self)
self.Dell_Users_Permission = QPushButton("Sim", self)
self.AddBojects_Entrega = QPushButton("Sim", self)
self.Tabela_Entrega = QPushButton("Sim", self)
self.Tabela_Adms = QPushButton("Sim", self)
self.Tabela_Users = QPushButton("Sim", self)
self.Dell_ObjetoEntrega = QPushButton("Sim", self)
self.Alterar_Permissoes = QPushButton('Sim', self)
self.AlterarStatus_Entrega = QPushButton('sim', self)
#Configurando Widgets
voltar.clicked.connect(self.VoltarWindow)
self.definir.clicked.connect(self.DefinirPermissoes)
self.Cadaster_adm_Permission.clicked.connect(self.setPermission_CadasterAdm)
self.Cadaster_User_Permission.clicked.connect(self.setPermission_CadasterUser)
self.Search_Users_Permission.clicked.connect(self.setPermission_SearchUsers)
self.Dell_Users_Permission.clicked.connect(self.setPermission_DellUsers)
self.AddBojects_Entrega.clicked.connect(self.setPermission_AddObjectEntrega)
self.Tabela_Entrega.clicked.connect(self.setPermission_TabelaEntregas)
self.Tabela_Adms.clicked.connect(self.setPermission_TabelaAdms)
self.Tabela_Users.clicked.connect(self.setPermission_TabelaUsers)
self.Dell_ObjetoEntrega.clicked.connect(self.setPermission_DellObjetoEntrega)
self.Alterar_Permissoes.clicked.connect(self.setPermission_AlterarPermissoes)
self.AlterarStatus_Entrega.clicked.connect(self.setPermission_AlterarStatusEntrega)
#Inserindo Widgets no layout
self.Layout.addWidget(self.Cadaster_adm_Permission, 7, 1)
self.Layout.addWidget(QLabel('Cadastarar Adm', self), 7, 0)
self.Layout.addWidget(self.Cadaster_User_Permission, 8, 1)
self.Layout.addWidget(QLabel("Cadastarar User", self), 8, 0)
self.Layout.addWidget(self.Search_Users_Permission, 9, 1)
self.Layout.addWidget(QLabel("Pesquisar Usuarios e Adm", self), 9, 0)
self.Layout.addWidget(self.Dell_Users_Permission,10 , 1)
self.Layout.addWidget(QLabel("Deletar Users e adms", self), 10, 0)
self.Layout.addWidget(self.AddBojects_Entrega, 11, 1)
self.Layout.addWidget(QLabel("Adicionar Objetos para Entrega", self), 11, 0)
self.Layout.addWidget(self.Tabela_Entrega, 12, 1)
self.Layout.addWidget(QLabel("Tabela de Entregas", self), 12, 0)
self.Layout.addWidget(self.Tabela_Adms, 13, 1)
self.Layout.addWidget(QLabel("Tabela de Adms", self), 13, 0)
self.Layout.addWidget(self.Tabela_Users, 14, 1)
self.Layout.addWidget(QLabel("Tabela de User", self), 14, 0)
self.Layout.addWidget(self.Dell_ObjetoEntrega, 15, 1)
self.Layout.addWidget(QLabel("Deletar Objeto de Entrega", self), 15, 0)
self.Layout.addWidget(self.Alterar_Permissoes, 16, 1)
self.Layout.addWidget(QLabel("Alterar Permissoes de User", self), 16, 0)
self.Layout.addWidget(self.AlterarStatus_Entrega, 17, 1)
self.Layout.addWidget(QLabel("Alterar Situação de Entregas", self), 17, 0)
self.Layout.addWidget(voltar)
self.Layout.addWidget(self.definir)
setStyle(self)
self.show()
def VoltarWindow(self):
self.close()
def DefinirPermissoes(self):
ShelFile = shelve.open('GestionAPP')
ShelFile['Permissoes'] = self.Permissions
ShelFile.close()
def setPermission_CadasterAdm(self):
if self.Permissions[0] == 1:
self.Permissions[0] = 0
self.Cadaster_adm_Permission.setText('Não')
else:
self.Permissions[0] = 1
self.Cadaster_adm_Permission.setText('Sim')
def setPermission_CadasterUser(self):
if self.Permissions[1] == 1:
self.Permissions[1] = 0
self.Cadaster_User_Permission.setText('Não')
else:
self.Permissions[1] = 1
self.Cadaster_User_Permission.setText('Sim')
def setPermission_SearchUsers(self):
if self.Permissions[2] == 1:
self.Permissions[2] = 0
self.Search_Users_Permission.setText('Não')
else:
self.Permissions[2] = 1
self.Search_Users_Permission.setText('Sim')
def setPermission_DellUsers(self):
if self.Permissions[3] == 1:
self.Permissions[3] = 0
self.Dell_Users_Permission.setText('Não')
else:
self.Permissions[3] = 1
self.Dell_Users_Permission.setText('Sim')
def setPermission_AddObjectEntrega(self):
if self.Permissions[4] == 1:
self.Permissions[4] = 0
self.AddBojects_Entrega.setText('Não')
else:
self.Permissions[4] = 1
self.AddBojects_Entrega.setText('Sim')
def setPermission_TabelaEntregas(self):
if self.Permissions[5] == 1:
self.Permissions[5] = 0
self.Tabela_Entrega.setText('Não')
else:
self.Permissions[5] = 1
self.Tabela_Entrega.setText('Sim')
def setPermission_TabelaAdms(self):
if self.Permissions[6] == 1:
self.Permissions[6] = 0
self.Tabela_Adms.setText('Não')
else:
self.Permissions[6] = 1
self.Tabela_Adms.setText('Sim')
def setPermission_TabelaUsers(self):
if self.Permissions[7] == 1:
self.Permissions[7] = 0
self.Tabela_Users.setText('Não')
else:
self.Permissions[7] = 1
self.Tabela_Users.setText('Sim')
def setPermission_DellObjetoEntrega(self):
if self.Permissions[8] == 1:
self.Permissions[8] = 0
self.Dell_ObjetoEntrega.setText('Não')
else:
self.Permissions[8] = 1
self.Dell_ObjetoEntrega.setText('Sim')
def setPermission_AlterarPermissoes(self):
if self.Permissions[9] == 1:
self.Permissions[9] = 0
self.Alterar_Permissoes.setText('Não')
else:
self.Permissions[9] = 1
self.Alterar_Permissoes.setText('Sim')
def setPermission_AlterarStatusEntrega(self):
if self.Permissions[10] == 1:
self.Permissions[10] = 0
self.AlterarStatus_Entrega.setText('Não')
else:
self.Permissions[10] = 1
self.AlterarStatus_Entrega.setText('Sim')
class Label_Items(QWidget):
def __init__(self, Data, Tipo, User=None):
super().__init__()
self.User = User
self.Tipo = Tipo
self.Layout = QGridLayout(self)
self.Label_Config(Data)
voltar = QPushButton('Voltar', self)
if Tipo == "Entregas":
voltar.clicked.connect(lambda : Janela.setCentralWidget(Tabela(self.User)))
else:
voltar.clicked.connect(lambda : Janela.setCentralWidget(Tabela(self.User, self.Tipo)))
self.Layout.addWidget(voltar, 2,0,1,2)
Janela.setCentralWidget(self)
def Label_Config(self, Data):
if self.Tipo == 'Entregas':
self.Layout.addWidget(QLabel('Codigo: \nCliente: \nRG: \nCPF: \nEndereço: \nCEP: \nCodigo Cliente: \nEntregador: \nSituação',self), 0, 0)
self.Layout.addWidget(QLabel(f'{Data[0]} \n{Data[1]} \n{Data[2]} \n{Data[3]} \n{Data[4]} \n{Data[5]} \n{Data[6]} \n{Data[7]} \n{Data[8]}', self), 0, 1)
self.AlterSituation = QPushButton("Altera Situação", self)
try:
Perm = Funcao.SQDB().GetPermissions(self.User[0])
Perms = []
for item in Perm:
if item.isnumeric() == True:
Perms.append(item)
if int(Perms[10]) == 1:
self.AlterSituation.setEnabled(True)
else:
self.AlterSituation.setEnabled(False)
except:
pass
self.Layout.addWidget(self.AlterSituation, 1, 0, 1, 2)
self.AlterSituation.clicked.connect(lambda : setSituation_Entregas(self.User, Data, self.Tipo, self))
elif self.Tipo == 'UserEntregas':
self.Layout.addWidget(QLabel('Codigo: \nCliente: \nRG: \nCPF: \nEndereço: \nCEP: \nCodigo Cliente: \nEntregador: \nSituação',self), 0, 0)
self.Layout.addWidget(QLabel(f'{Data[0]} \n{Data[1]} \n{Data[2]} \n{Data[3]} \n{Data[4]} \n{Data[5]} \n{Data[6]} \n{Data[7]} \n{Data[8]}', self), 0, 1)
self.AlterSituation = QPushButton("Altera Situação", self)
try:
Perm = Funcao.SQDB().GetPermissions(self.User[0])
Perms = []
for item in Perm:
if item.isnumeric() == True:
Perms.append(item)
if int(Perms[10]) == 1:
self.AlterSituation.setEnabled(True)
else:
self.AlterSituation.setEnabled(False)
except:
pass
self.Layout.addWidget(self.AlterSituation, 1, 0, 1, 2)
self.AlterSituation.clicked.connect(lambda : setSituation_Entregas(self.User, Data, 'UserEntregas', self))
elif self.Tipo == 'User':
self.Layout.addWidget(QLabel('Cod: \nNome: \nCpf: \nRg: \nEmail: \nTel: \nStatus:', self), 0, 0)
self.Layout.addWidget(QLabel(f'{Data[0]} \n{Data[1]} \n{Data[2]}\n {Data[3]}\n {Data[4]}\n {Data[5]}\n {Data[6]}', self),0 ,1)
self.Status = QLabel(self)
Window_SearchUsers.GetPermissions(self, Data[0])
self.Layout.addWidget(self.Status, 1, 0, 1, 2)
elif self.Tipo == 'Adm':
self.Layout.addWidget(QLabel('Cod: \nNome: \nCpf: \nRg: \nEmail: \nTel \nStatus', self), 0, 0)
self.Layout.addWidget(QLabel(f'{Data[0]} \n{Data[1]} \n{Data[2]} \n{Data[3]} \n{Data[4]} \n{Data[5]} \n{Data[6]}', self),0 , 1)
class setSituation_Entregas(QWidget):
def __init__(self, User, Entrega, Tipo, Window=None):
super().__init__()
self.Tipo = Tipo
try:
Window.close()
except: pass
self.Layout = QGridLayout(self)
self.Tipo_Situacao = "Em Aberto"
self.Label = QLabel('Cod: \nNome: \nRG: \nCPF: \nEnd: \nCEP: \nCodigo Cliente: \nEntregador: \nSituação:', self)
self.Label_Entrega = QLabel(f"{Entrega[0]} \n{Entrega[1]} \n{Entrega[2]}\n {Entrega[3]}\n {Entrega[4]} \n{Entrega[5]} \n{Entrega[6]} \n{Entrega[7]} \n{Entrega[8]}", self)
self.Situatios = QComboBox(self)
self.Situatios.insertItem(0, 'Em Aberto')
self.Situatios.insertItem(1, 'Entregue')
self.Situatios.insertItem(2, 'Endereço não encontrado')
Alter = QPushButton('Alterar',self)
voltar = QPushButton("Voltar", self)
Status = QLabel(self)
self.Situatios.activated[str].connect(self.DefinirSituacao)
self.Layout.addWidget(self.Label, 0, 0)
self.Layout.addWidget(self.Label_Entrega, 0, 1)
self.Layout.addWidget(self.Situatios, 1, 0, 1, 2)
self.Layout.addWidget(Status, 2, 0, 1, 2)
self.Layout.addWidget(Alter, 3, 0, 1, 2)
self.Layout.addWidget(voltar, 4, 0, 1, 2)
voltar.clicked.connect(lambda : Tabela(User, self.Tipo))
Alter.clicked.connect(lambda : Funcao.SQDB().AlterSituation_Entrega(Entrega[0], self.Tipo_Situacao))
Alter.clicked.connect(lambda : Status.setText(f"\n\nALTERAÇÂO EFETUADA SITUAÇÂO: {self.Tipo_Situacao}"))
Janela.setCentralWidget(self)
def DefinirSituacao(self, text):
self.Tipo_Situacao = text
def setStyle(Window):
Window.setStyleSheet('QMainWindow {background-color: rgb(50, 50, 50)}\
QPushButton {border-radius: 10px; padding: 5px; background-color: rgb(100, 100, 100); border: 1px solid rgb(150, 150, 150)}\
QPushButton:hover:!pressed {background-color: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1, stop: 0 rgb(40, 40, 40), stop: 1 rgb(100, 100, 100)); border-style: outset} \
QWidget {font-size: 15px; font: bold; color: rgb(200, 200, 200); background-color: rgb(50, 50, 50)}\
QLineEdit {background: rgb(70,70,70); border-radius: 10px; padding: 3px}\
QComboBox {border-radius: 10px; padding: 2px; background: rgb(100, 100, 100); color: rgb(200, 200, 200)}\
QComboBox:hover:!pressed {background-color: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1, stop: 0 rgb(40, 40, 80), stop: 1 rgb(100, 100, 180)); border-style: outset}')
Window.setGeometry(400, 100, 500, 400)
Window.setWindowTitle('MyApp JIPSlok')
App = QApplication(sys.argv)
Janela = Janela_Principal()
sys.exit(App.exec_())
``` |
{
"source": "jonathan-JIPSlok/Sistema_de_cadastro_gui",
"score": 3
} |
#### File: Sistema_de_cadastro_gui/gui/cadaster_window.py
```python
import sqlite3
from PyQt5.QtWidgets import QWidget, QLabel, QPushButton, QGridLayout, QLineEdit
from modules import database
from validate_docbr import CPF
from gui import main_window, initial_window
class window(QWidget):
def __init__(self, mainwindow):
super().__init__()
self.mainwindow = mainwindow
self.grid = QGridLayout(self)
self.title_label = QLabel("Cadastrar Usuario")
self.grid.addWidget(self.title_label)
#Configurando linha de inserção de nome
self.name_edit = QLineEdit()
self.name_edit.setPlaceholderText("Nome")
self.grid.addWidget(self.name_edit)
#Configurando linha de inserção de cpf
self.cpf_edit = QLineEdit()
self.cpf_edit.setPlaceholderText("CPF")
self.name_edit.returnPressed.connect(self.cpf_edit.setFocus)
#Define o texto com padrao de cpf
self.cpf_edit.textChanged.connect(lambda : self.cpf_edit.setInputMask("999.999.999-99") if self.cpf_edit.inputMask() == "" else self.cpf_edit)
self.name_edit.returnPressed.connect(lambda : self.cpf_edit.setInputMask("999.999.999-99"))
self.grid.addWidget(self.cpf_edit)
#Configurando linha de inserção de telefone
self.tel_edit = QLineEdit()
self.tel_edit.setPlaceholderText("Telefone")
self.cpf_edit.returnPressed.connect(self.tel_edit.setFocus)
#Define a linha com o padrao de telefone
self.tel_edit.textChanged.connect(lambda : self.tel_edit.setInputMask("(99) 99999-9999") if self.tel_edit.inputMask() == "" else self.tel_edit)
self.cpf_edit.returnPressed.connect(lambda : self.tel_edit.setInputMask("(99) 99999-9999"))
self.grid.addWidget(self.tel_edit)
#Configurando linha de inserção de email
self.email_edit = QLineEdit()
self.email_edit.setPlaceholderText("E-mail")
self.tel_edit.returnPressed.connect(self.email_edit.setFocus)
self.grid.addWidget(self.email_edit)
#Configurando linha de inserção de regiao
self.regiao_edit = QLineEdit()
self.regiao_edit.setPlaceholderText("Região")
self.regiao_edit.setMaxLength(2)
self.regiao_edit.textChanged.connect(lambda : self.regiao_edit.setText(self.regiao_edit.text().upper()))
self.email_edit.returnPressed.connect(self.regiao_edit.setFocus)
self.regiao_edit.returnPressed.connect(self.register)
self.grid.addWidget(self.regiao_edit)
self.register_button = QPushButton("Registrar")
self.register_button.clicked.connect(self.register)
self.grid.addWidget(self.register_button)
self.voltar = QPushButton("Voltar")
self.voltar.clicked.connect(lambda : self.mainwindow.setCentralWidget(initial_window.window(self.mainwindow)))
self.grid.addWidget(self.voltar)
self.error_label = QLabel()
self.grid.addWidget(self.error_label)
def register(self):
data = [self.name_edit.text(), self.cpf_edit.text(), self.tel_edit.text(), self.email_edit.text(), self.regiao_edit.text()]
if not '' in data and data[4][0].isnumeric() == False and data[4][1].isnumeric() == False:
if CPF().validate(data[1]):
try:
#Cadastra o usuario no banco de dados
db = database.SQL()
db.cadaster_user(data)
db.connection_close()
#Configura as linhas apos o cadastro
self.name_edit.setText("")
self.cpf_edit.setText("")
self.tel_edit.setText("")
self.email_edit.setText("")
self.regiao_edit.setText("")
self.error_label.setText("Usuario cadastrado com sucesso!")
self.cpf_edit.setInputMask("")
self.tel_edit.setInputMask("")
except sqlite3.IntegrityError:
self.error_label.setText("Usuario já cadastrado!")
else: self.error_label.setText("CPF invalido!")
else: self.error_label.setText("Preencha todos os dados! ou verifique se a região está correta")
```
#### File: Sistema_de_cadastro_gui/modules/database.py
```python
from os import sep, mkdir
import sqlite3
from random import randint
class SQL:
def __init__(self):
try: mkdir("Dados")
except: pass
self.connection = sqlite3.connect("Dados" + sep + 'data.db')
self.cursor = self.connection.cursor()
self.create_tables()
def create_tables(self):
self.cursor.execute("CREATE TABLE IF NOT EXISTS usuarios(id INTERGER PRIMARY KEY, nome TEXT, cpf TEXT UNIQUE, telefone TEXT, email TEXT, regiao TEXT)")
def connection_close(self):
self.connection.close()
def cadaster_user(self, data):
self.cursor.execute("INSERT INTO usuarios VALUES(?, ?, ?, ?, ?, ?)", (randint(100000, 999999), data[0], data[1], data[2], data[3], data[4]))
self.connection.commit()
def get_user_regiao(self, data):
return self.cursor.execute("SELECT * FROM usuarios WHERE regiao = ?", (data, )).fetchall()
def get_users(self):
return self.cursor.execute("SELECT * FROM usuarios").fetchall()
def get_user_name(self, data):
return self.cursor.execute("SELECT * FROM usuarios WHERE nome = ?", (data, )).fetchall()
def get_user_cpf(self, data):
return self.cursor.execute("SELECT * FROM usuarios WHERE cpf = ?", (data, )).fetchall()
``` |
{
"source": "jonathanjj19028/twitter_autobase",
"score": 3
} |
#### File: jonathanjj19028/twitter_autobase/app.py
```python
from twitter import Twitter
from time import sleep
from threading import Thread
from datetime import datetime, timezone, timedelta
import constants
from os.path import exists
from os import remove
from html import unescape
from random import randrange
if constants.database == True:
from github import Github
github = Github(constants.Github_token)
def start():
print("Starting program...")
dms = list()
tw = Twitter()
api = tw.api
constants.api = api
me = api.me()
tw.bot_id = me.id
open('follower_data.txt', 'w').truncate()
first = open('follower_data.txt').read()
# sent = api.send_direct_message(recipient_id=constants.Admin_id, text="Twitter autobase is starting...!").id
# tw.delete_dm(sent)
while True:
print("Updating followers...")
# Auto accept message requests
# Comment these if you want close your DM
follower = api.followers_ids(user_id=me.id)
if len(follower) != 0:
try:
if len(first) <= 3:
str_follower = [str(i) for i in follower]
data = " ".join(str_follower)
open("follower_data.txt", "w").write(data)
first = "checked"
del str_follower
data = open('follower_data.txt').read()
data = data.split()
data1 = str()
data2 = data.copy()
for i in follower:
if str(i) not in data:
data1 += " " + str(i)
notif = "YEAY! Sekarang kamu bisa mengirim menfess, jangan lupa baca peraturan base yaa!"
# I don't know, sometimes error happen here, so, I update tw.follower after this loop
sent = api.send_direct_message(
recipient_id=i, text=notif).id
tw.delete_dm(sent)
tw.follower = follower
for i in data2:
if int(i) not in follower:
data.remove(i)
if data != data2:
data = " ".join(data)
data = data + data1
new = open("follower_data.txt", "w")
new.write(data)
new.close()
elif data == data2 and len(data1) != 0:
new = open("follower_data.txt", "a")
new.write(data1)
new.close()
del data
del data1
del data2
except Exception as ex:
print("error when send DM to follower")
print("error when get follower from API")
pass
else:
print("error when get follower from API")
if len(dms) != 0:
for i in range(len(dms)):
try:
message = dms[i]['message']
sender_id = dms[i]['sender_id']
screen_name = tw.get_user_screen_name(sender_id)
if constants.database == True:
if exists(filename_github):
open(filename_github, 'a').write(
f'''\n"""{unescape(message)}""" {screen_name} {sender_id}\n''')
else:
open(filename_github, 'w').write(
"MESSAGE USERNAME SENDER_ID\n" +
f'''\n"""{unescape(message)}""" {screen_name} {sender_id}\n''')
print("Heroku Database saved")
notif = f"Yeay, Menfess kamu telah terkirim! https://twitter.com/{me.screen_name}/status/"
if constants.First_Keyword.lower() in message.lower():
# Keyword Deleter
message = message.split()
list_keyword = [constants.First_Keyword.lower(), constants.First_Keyword.upper(),
constants.First_Keyword.capitalize()]
[message.remove(i) for i in list_keyword if i in message]
message = " ".join(message)
if dms[i]['media'] == None:
print("DM will be posted")
if dms[i]['url'] == None:
postid = tw.post_tweet(message)
else:
message = message.split()
message.remove(dms[i]['url'][0])
message = " ".join(message)
postid = tw.post_tweet(
message, dms[i]['url'][1])
if postid != None:
text = notif + str(postid)
sent = api.send_direct_message(
recipient_id=sender_id, text=text).id
else:
sent = api.send_direct_message(
recipient_id=sender_id, text="Maaf ada kesalahan pada sistem :(\ntolong screenshot & laporkan kepada admin").id
tw.delete_dm(sent)
else:
print("DM will be posted with media.")
if dms[i]['url'] == None:
postid = tw.post_tweet_with_media(
message, dms[i]['media'], dms[i]['type'])
else:
message = message.split()
message.remove(dms[i]['url'][0])
message = " ".join(message)
postid = tw.post_tweet_with_media(
message, dms[i]['media'], dms[i]['type'], dms[i]['url'][1])
if postid != None:
text = notif + str(postid)
sent = api.send_direct_message(
recipient_id=sender_id, text=text).id
else:
sent = api.send_direct_message(
recipient_id=sender_id, text="Maaf ada kesalahan pada sistem :(\ntolong screenshot & laporkan kepada admin").id
tw.delete_dm(sent)
else:
sent = api.send_direct_message(
sender_id, "ketentuan keyword menfess kamu tidak sesuai!").id
tw.delete_dm(sent)
except Exception as ex:
print(ex)
sleep(30)
pass
dms = list()
else:
print("Direct message is empty...")
dms = tw.read_dm()
if len(dms) == 0:
sleep(25+randrange(0, 5))
def Check_file_github(new=True):
'''
True when bot was just started, download & save file from github
False when bot is running. if file exists, doesn't save the file from github
'''
print("checking github file...")
try:
datee = datetime.now(timezone.utc) + \
timedelta(hours=constants.Timezone)
globals()['filename_github'] = "Database {}-{}-{}.txt".format(
datee.day, datee.month, datee.year)
constants.filename_github = filename_github
contents = repo.get_contents("")
if any(filename_github == content.name for content in contents):
print(f"filename_github detected, set: {str(new)}")
if new == False:
return
for content in contents:
if filename_github == content.name:
contents = content.decoded_content.decode()
if contents[-1] != "\n":
contents += "\n"
break
else:
print("filename_github not detected")
repo.create_file(filename_github, "first commit",
"MESSAGE USERNAME SENDER_ID")
contents = "MESSAGE USERNAME SENDER_ID\n"
if exists(filename_github) == False:
open(filename_github, 'w').write(contents)
else:
pass
if exists("Database {}-{}-{}.txt".format(
datee.day - 1, datee.month, datee.year)):
remove("Database {}-{}-{}.txt".format(
datee.day - 1, datee.month, datee.year))
print("Heroku yesterday's database has been deleted")
else:
print("Heroku yesterday's database doesn't exist")
except Exception as ex:
pass
print(ex)
def database():
while True:
try:
# update every midnight, u can update directly from DM with 'db_update'
# check on constants.py
datee = datetime.now(timezone.utc) + timedelta(hours=constants.Timezone)
if filename_github != f"Database {datee.day}-{datee.month}-{datee.year}.txt":
print("Github threading active...")
contents = repo.get_contents(filename_github)
repo.update_file(contents.path, "updating database", open(
filename_github).read(), contents.sha)
Check_file_github(new=False)
print("Github Database updated")
sleep(60)
else:
sleep(60)
except Exception as ex:
print(ex)
print("Github threading failed..")
sleep(720)
pass
if __name__ == "__main__":
if constants.database == True:
# True = on, False = off
datee = datetime.now(timezone.utc) + timedelta(hours=constants.Timezone)
global filename_github, repo
filename_github = "Database {}-{}-{}.txt".format(
datee.day, datee.month, datee.year)
repo = github.get_repo(constants.Github_repo)
constants.repo = repo
constants.filename_github = filename_github
Check_file_github(new=True)
Thread(target=database).start()
Thread(target=start).start()
``` |
{
"source": "jonathanj/mantissa",
"score": 2
} |
#### File: listings/interstore/webcal.py
```python
from datetime import timedelta
from epsilon.extime import Time
from nevow.page import renderer
from nevow.loaders import stan
from nevow.tags import div
from nevow.athena import LiveElement
from xmantissa.liveform import TEXT_INPUT, LiveForm, Parameter
class CalendarElement(LiveElement):
docFactory = stan(div[
"It's a calendar!",
div(render="appointments"),
div(render="appointmentForm")])
def __init__(self, calendar):
LiveElement.__init__(self)
self.calendar = calendar
@renderer
def appointments(self, request, tag):
appointments = self.calendar.getAppointments()
for appointment in appointments:
appDiv = div[
"Appointment with ",
appointment.withWhomUsername, "@",
appointment.withWhomDomain, " at ",
appointment.when.asHumanly()]
if appointment.failed is not None:
appDiv[" (Rejected: ", appointment.failed, ")"]
elif appointment.remoteID is None:
appDiv[" (Pending confirmation)"]
tag[appDiv]
return tag
def _requestAppointment(self, whom):
local, domain = whom.split(u"@")
target = self.calendar.calendarIDFor(local, domain)
self.calendar.requestAppointmentWith(target, Time() + timedelta(days=2))
@renderer
def appointmentForm(self, request, tag):
form = LiveForm(
self._requestAppointment,
[Parameter(u"whom", TEXT_INPUT, unicode, u"Whom:",
u"The username of the person with whom "
u"to create an appointment (user@domain).",
None)],
"Request An Appointment")
form.setFragmentParent(self)
return form
```
#### File: listings/siteroot/adminpage.py
```python
from zope.interface import implements
from axiom.item import Item
from axiom.attributes import bytes
from nevow.url import URL
from xmantissa.ixmantissa import ISiteRootPlugin
class RedirectPlugin(Item):
redirectFrom = bytes(default='admin.php')
redirectTo = bytes(default='private')
powerupInterfaces = (ISiteRootPlugin,)
implements(*powerupInterfaces)
def produceResource(self, request, segments, viewer):
if segments == tuple([self.redirectFrom]):
return (URL.fromRequest(request).child(self.redirectTo), ())
```
#### File: mantissa/xmantissa/__init__.py
```python
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
from twisted.python import versions
def asTwistedVersion(packageName, versionString):
return versions.Version(
packageName, *map(int, versionString.split('+', 1)[0].split(".")))
version = asTwistedVersion("xmantissa", __version__)
__all__ = ['version', '__version__']
```
#### File: mantissa/xmantissa/myaccount.py
```python
from axiom.item import Item
from axiom.upgrade import registerUpgrader
class MyAccount(Item):
typeName = 'mantissa_myaccount'
schemaVersion = 2
def deleteMyAccount(old):
# Just get rid of the old account object. Don't even create a new one.
old.deleteFromStore()
return None
registerUpgrader(deleteMyAccount, 'mantissa_myaccount', 1, 2)
```
#### File: mantissa/xmantissa/search.py
```python
from __future__ import division
from zope.interface import implements
from twisted.internet import defer
from twisted.python import log, components
from nevow import inevow, athena, tags
from axiom import attributes, item
from axiom.upgrade import registerDeletionUpgrader
from xmantissa import ixmantissa
class SearchResult(item.Item):
"""
A temporary, in-database object associated with a particular search (ie,
one time that one guy typed in that one search phrase) and a single item
which was found in that search. These live in the database to make it easy
to display and sort them, but they are deleted when they get kind of
oldish.
These are no longer used. The upgrader to version 2 unconditionally
deletes them.
"""
schemaVersion = 2
indexedItem = attributes.reference()
identifier = attributes.integer()
registerDeletionUpgrader(SearchResult, 1, 2)
class SearchAggregator(item.Item):
implements(ixmantissa.ISearchAggregator, ixmantissa.INavigableElement)
powerupInterfaces = (ixmantissa.ISearchAggregator, ixmantissa.INavigableElement)
schemaVersion = 1
typeName = 'mantissa_search_aggregator'
installedOn = attributes.reference()
searches = attributes.integer(default=0)
# INavigableElement
def getTabs(self):
return []
# ISearchAggregator
def providers(self):
return list(self.store.powerupsFor(ixmantissa.ISearchProvider))
def count(self, term):
def countedHits(results):
total = 0
for (success, result) in results:
if success:
total += result
else:
log.err(result)
return total
return defer.DeferredList([
provider.count(term)
for provider
in self.providers()], consumeErrors=True).addCallback(countedHits)
def search(self, *a, **k):
self.searches += 1
d = defer.DeferredList([
provider.search(*a, **k)
for provider in self.providers()
], consumeErrors=True)
def searchCompleted(results):
allSearchResults = []
for (success, result) in results:
if success:
allSearchResults.append(result)
else:
log.err(result)
return allSearchResults
d.addCallback(searchCompleted)
return d
def parseSearchTerm(term):
"""
Turn a string search query into a two-tuple of a search term and a
dictionary of search keywords.
"""
terms = []
keywords = {}
for word in term.split():
if word.count(':') == 1:
k, v = word.split(u':')
if k and v:
keywords[k] = v
elif k or v:
terms.append(k or v)
else:
terms.append(word)
term = u' '.join(terms)
if keywords:
return term, keywords
return term, None
class AggregateSearchResults(athena.LiveFragment):
fragmentName = 'search'
def __init__(self, aggregator):
super(AggregateSearchResults, self).__init__()
self.aggregator = aggregator
def head(self):
return None
def render_search(self, ctx, data):
req = inevow.IRequest(ctx)
term = req.args.get('term', [None])[0]
charset = req.args.get('_charset_')[0]
if term is None:
return ''
try:
term = term.decode(charset)
except LookupError:
log.err('Unable to decode search query encoded as %s.' % charset)
return tags.div[
"Your browser sent your search query in an encoding that we do not understand.",
tags.br,
"Please set your browser's character encoding to 'UTF-8' (under the View menu in Firefox)."]
term, keywords = parseSearchTerm(term)
d = self.aggregator.search(term, keywords)
def gotSearchResultFragments(fragments):
for f in fragments:
f.setFragmentParent(self)
return fragments
d.addCallback(gotSearchResultFragments)
return d
components.registerAdapter(AggregateSearchResults, SearchAggregator, ixmantissa.INavigableFragment)
```
#### File: mantissa/xmantissa/suspension.py
```python
from twisted.python.components import registerAdapter
from axiom.attributes import reference
from axiom.item import Item
from nevow.page import Element
from xmantissa.ixmantissa import INavigableElement, INavigableFragment
from xmantissa.webnav import Tab
from zope.interface import implements, Interface
class ISuspender(Interface):
"""
Marker interface for suspended powerup facades.
"""
class SuspendedNavigableElement(Item):
implements(INavigableElement, ISuspender)
powerupInterfaces = (INavigableElement, ISuspender)
originalNE = reference()
def getTabs(self):
origTabs = self.originalNE.getTabs()
def proxyTabs(tabs):
for tab in tabs:
yield Tab(tab.name, self.storeID, tab.priority,
proxyTabs(tab.children),
authoritative=tab.authoritative,
linkURL=tab.linkURL)
return proxyTabs(origTabs)
class SuspendedFragment(Element):
"""
Temporary account-suspended fragment.
"""
fragmentName = 'suspend'
live = False
implements(INavigableFragment)
def head(self):
pass
registerAdapter(SuspendedFragment, SuspendedNavigableElement, INavigableFragment)
def suspendJustTabProviders(installation):
"""
Replace INavigableElements with facades that indicate their suspension.
"""
if installation.suspended:
raise RuntimeError("Installation already suspended")
powerups = list(installation.allPowerups)
for p in powerups:
if INavigableElement.providedBy(p):
p.store.powerDown(p, INavigableElement)
sne = SuspendedNavigableElement(store=p.store, originalNE=p)
p.store.powerUp(sne, INavigableElement)
p.store.powerUp(sne, ISuspender)
installation.suspended = True
def unsuspendTabProviders(installation):
"""
Remove suspension facades and replace them with their originals.
"""
if not installation.suspended:
raise RuntimeError("Installation not suspended")
powerups = list(installation.allPowerups)
allSNEs = list(powerups[0].store.powerupsFor(ISuspender))
for p in powerups:
for sne in allSNEs:
if sne.originalNE is p:
p.store.powerDown(sne, INavigableElement)
p.store.powerDown(sne, ISuspender)
p.store.powerUp(p, INavigableElement)
sne.deleteFromStore()
installation.suspended = False
```
#### File: test/acceptance/liveform.py
```python
from xmantissa.liveform import (TEXT_INPUT, InputError, Parameter, LiveForm,
ListChangeParameter, ChoiceParameter, Option)
def coerce(theText):
"""
Reject all values of C{theText} except C{'hello, world'}.
"""
if theText != u'hello, world':
raise InputError(u"Try entering 'hello, world'")
def inputerrors():
"""
Create a L{LiveForm} which rejects most inputs in order to demonstrate how
L{InputError} is handled in the browser.
"""
form = LiveForm(
lambda theText: None,
[Parameter(u'theText', TEXT_INPUT, coerce, 'Some Text')],
u'LiveForm input errors acceptance test',
)
return form
_parameterDefaults = [{u'foo': 1, u'bar': 2, u'baz': ['1']},
{u'foo': 10, u'bar': 20, u'baz': ['2']}]
def _listChangeParameter(**parameterKwargs):
counter = [0]
def theCallable(repeatableFoo):
for create in repeatableFoo.create:
create.setter(u'other thing %d' % (counter[0],))
counter[0] += 1
return u'Created %s, edited %s, deleted %s' % (repeatableFoo.create,
repeatableFoo.edit,
repeatableFoo.delete)
form = LiveForm(
theCallable,
[ListChangeParameter(
u'repeatableFoo',
[Parameter('foo', TEXT_INPUT, int, 'Enter a number'),
Parameter('bar', TEXT_INPUT, int, 'And another'),
ChoiceParameter(
'baz',
[Option('Value 1', '1', True),
Option('Value 2', '2', False)],
'Pick something')],
modelObjectDescription=u'Repeatable Foo',
**parameterKwargs)])
form.jsClass = u'Mantissa.Test.EchoingFormWidget'
return form
def listChangeParameter():
"""
Create a L{LiveForm} with a L{ListChangeParameter}.
"""
return _listChangeParameter(
defaults=_parameterDefaults,
modelObjects=(u'the first thing', u'the second thing'))
def listChangeParameterCompact():
"""
Create a compact L{LiveForm} with a L{ListChangeParameter}.
"""
liveForm = listChangeParameter()
liveForm.compact()
return liveForm
def listChangeParameterNoDefaults():
"""
Create a L{LiveForm} with a L{ListChangeParameter} and no defaults.
"""
return _listChangeParameter(defaults=[], modelObjects=[])
def choiceParameter():
"""
Create a L{LiveForm} with a L{ChoiceParameter}.
"""
return LiveForm(
lambda **k: unicode(k),
[ChoiceParameter(
'choice',
[Option('Thing 1', 'thing-one', False),
Option('Thing 2', 'thing-two', True),
Option('Thing 3', 'thing-three', False)],
'This is a choice between things')])
def choiceParameterCompact():
"""
Compact version of the form returned by L{choiceParameter}.
"""
liveForm = choiceParameter()
liveForm.compact()
return liveForm
```
#### File: test/historic/stub_addPerson1to2.py
```python
from axiom.test.historic.stubloader import saveStub
from xmantissa.people import AddPerson
def createDatabase(s):
AddPerson(store=s)
if __name__ == '__main__':
saveStub(createDatabase, 10664)
```
#### File: test/historic/stub_defaultPreferenceCollection1to2.py
```python
from xmantissa import prefs
def createDatabase(s):
prefs.DefaultPreferenceCollection(store=s)
from axiom.test.historic.stubloader import saveStub
if __name__ == '__main__':
saveStub(createDatabase)
```
#### File: test/historic/stub_mugshot2to3.py
```python
from twisted.python.filepath import FilePath
from axiom.test.historic.stubloader import saveStub
from xmantissa.people import Mugshot, Person
MUGSHOT_TYPE = u'image/png'
MUGSHOT_BODY_PATH_SEGMENTS = ('mugshot',)
def createDatabase(store):
"""
Make L{Person} and L{Mugshot} items. Set the C{body} and C{smallerBody}
attributes of the L{Mugshot} item to point at a copy of
I{xmantissa/test/resources/square.png} beneath the store's directory.
"""
atomicImageFile = store.newFile(*MUGSHOT_BODY_PATH_SEGMENTS)
imageFilePath = FilePath(__file__).parent().parent().child(
'resources').child('square.png')
atomicImageFile.write(imageFilePath.getContent())
atomicImageFile.close()
Mugshot(store=store,
person=Person(store=store),
body=atomicImageFile.finalpath,
smallerBody=atomicImageFile.finalpath,
type=MUGSHOT_TYPE)
if __name__ == '__main__':
saveStub(createDatabase, 13812)
```
#### File: test/historic/stub_port1to2.py
```python
from OpenSSL.crypto import FILETYPE_PEM
from twisted.internet.ssl import PrivateCertificate, KeyPair
from axiom.item import Item
from axiom.attributes import text
from axiom.dependency import installOn
from axiom.test.historic.stubloader import saveStub
from xmantissa.port import TCPPort, SSLPort
from xmantissa.website import WebSite
# Unfortunately, the test module for this store binds ports. So pick some
# improbably port numbers and hope they aren't bound. If they are, the test
# will fail. Hooray! -exarkun
TCP_PORT = 29415
SSL_PORT = 19224
def createDatabase(siteStore):
"""
Populate the given Store with a TCPPort and SSLPort.
"""
factory = WebSite(store=siteStore)
installOn(factory, siteStore)
installOn(
TCPPort(store=siteStore, portNumber=TCP_PORT, factory=factory),
siteStore)
certificatePath = siteStore.newFilePath('certificate')
key = KeyPair.generate()
cert = key.selfSignedCert(1)
certificatePath.setContent(
cert.dump(FILETYPE_PEM) +
key.dump(FILETYPE_PEM))
installOn(
SSLPort(store=siteStore, portNumber=SSL_PORT,
certificatePath=certificatePath,
factory=factory),
siteStore)
if __name__ == '__main__':
saveStub(createDatabase, 12731)
```
#### File: test/historic/stub_privateApplication4to5.py
```python
from axiom.test.historic.stubloader import saveStub
from axiom.dependency import installOn
from axiom.userbase import LoginSystem
from xmantissa.webapp import PrivateApplication
USERNAME = u'testuser'
DOMAIN = u'localhost'
PREFERRED_THEME = u'theme-preference'
HIT_COUNT = 8765
PRIVATE_KEY = 123456
def createDatabase(store):
"""
Instantiate a L{PrivateApplication} in C{store} and install it.
"""
loginSystem = LoginSystem(store=store)
installOn(loginSystem, store)
account = loginSystem.addAccount(USERNAME, DOMAIN, None)
subStore = account.avatars.open()
app = PrivateApplication(
store=subStore,
preferredTheme=PREFERRED_THEME,
privateKey=PRIVATE_KEY)
installOn(app, subStore)
if __name__ == '__main__':
saveStub(createDatabase, 16534)
```
#### File: test/historic/stub_ticket1to2.py
```python
from axiom.test.historic.stubloader import saveStub
from xmantissa.signup import SignupConfiguration, FreeTicketSignup, Multifactor
from xmantissa.provisioning import BenefactorFactory
from xmantissa.webadmin import AdministrativeBenefactor
def createDatabase(s):
mff = BenefactorFactory("", "", AdministrativeBenefactor)
sc = SignupConfiguration(store=s)
sc.installOn(s)
s.parent = s
signup = sc.createSignup(u'bob', FreeTicketSignup, {'prefixURL': u'/signup'},
{mff: {}}, None,
u'Sign Up')
t = signup.booth.createTicket(signup, u'<EMAIL>', signup.benefactor)
if __name__ == '__main__':
saveStub(createDatabase, 10876)
```
#### File: test/historic/stub_userinfo1to2.py
```python
from axiom.test.historic.stubloader import saveStub
from xmantissa.signup import UserInfo
FIRST = u'Alice'
LAST = u'Smith'
def createDatabase(store):
"""
Create a version 1 L{UserInfo} item in the given store.
"""
UserInfo(store=store, firstName=FIRST, lastName=LAST)
if __name__ == '__main__':
saveStub(createDatabase, 13447)
```
#### File: test/historic/test_developerapplication1to2.py
```python
from axiom.test.historic import stubloader
from xmantissa.webadmin import DeveloperApplication
from xmantissa.webapp import PrivateApplication
class DATestCase(stubloader.StubbedTest):
def testUpgrade(self):
"""
Ensure upgraded fields refer to correct items.
"""
self.assertEqual(self.store.findUnique(DeveloperApplication).privateApplication,
self.store.findUnique(PrivateApplication))
```
#### File: test/historic/test_frontpage1to2.py
```python
from axiom.test.historic import stubloader
from xmantissa.publicweb import FrontPage
class FrontPageTest(stubloader.StubbedTest):
"""
Upgrader test for L{xmantissa.publicweb.FrontPage}.
"""
def testUpgrade(self):
"""
All the attributes of L{xmantissa.publicweb.FrontPage} are
present after upgrading.
"""
fp = self.store.findUnique(FrontPage)
self.assertEqual(fp.publicViews, 17)
self.assertEqual(fp.privateViews, 42)
self.assertEqual(fp.prefixURL, u'')
self.assertEqual(fp.defaultApplication, None)
```
#### File: test/historic/test_messagequeue1to2.py
```python
from axiom.test.historic.stubloader import StubbedTest
from xmantissa.ixmantissa import IMessageRouter
from xmantissa.interstore import MessageQueue
from xmantissa.test.historic.stub_messagequeue1to2 import MESSAGE_COUNT
class MessageQueueUpgradeTests(StubbedTest):
def test_attributes(self):
"""
The value of the C{messageCounter} attribute is preserved by the
upgrade.
"""
self.assertEquals(
self.store.findUnique(MessageQueue).messageCounter,
MESSAGE_COUNT)
def test_powerup(self):
"""
The L{MessageQueue} is still a L{IMessageRouter} powerup on its store
after the upgrade.
"""
self.assertEquals(
[self.store.findUnique(MessageQueue)],
list(self.store.powerupsFor(IMessageRouter)))
```
#### File: test/historic/test_mugshot2to3.py
```python
from twisted.trial.unittest import SkipTest
from axiom.test.historic.stubloader import StubbedTest
from xmantissa.people import Mugshot, Person
from xmantissa.test.historic.stub_mugshot2to3 import (
MUGSHOT_TYPE, MUGSHOT_BODY_PATH_SEGMENTS)
class MugshotUpgraderTestCase(StubbedTest):
"""
Tests for L{Mugshot}'s version 2 to version 3 upgrader.
"""
def setUp(self):
"""
Skip the tests if PIL is unavailable.
"""
try:
import PIL
except ImportError:
raise SkipTest('PIL is not available')
return StubbedTest.setUp(self)
def test_attributesCopied(self):
"""
The C{person}, C{smallerBody} and C{type} attributes of L{Mugshot}
should have been copied over from the previous version.
"""
from PIL import Image
mugshot = self.store.findUnique(Mugshot)
self.assertIdentical(mugshot.person, self.store.findUnique(Person))
self.assertEqual(mugshot.type, MUGSHOT_TYPE)
self.assertEqual(
mugshot.body, self.store.newFilePath(*MUGSHOT_BODY_PATH_SEGMENTS))
# mugshot.body should be untouched, it should have the same dimensions
# as test/resources/square.png (240x240)
self.assertEqual(Image.open(mugshot.body.open()).size, (240, 240))
def test_smallerBodyAttribute(self):
"""
L{Mugshot.smallerBody} should point to an image with the same
dimensions as the current value of L{Mugshot.smallerSize}.
"""
from PIL import Image
mugshot = self.store.findUnique(Mugshot)
self.assertEqual(
Image.open(mugshot.smallerBody.open()).size,
(mugshot.smallerSize, mugshot.smallerSize))
```
#### File: test/historic/test_pyLuceneIndexer3to4.py
```python
from axiom.test.historic import stubloader
from xmantissa.fulltext import PyLuceneIndexer
class PyLuceneIndexerTestCase(stubloader.StubbedTest):
def testUpgrade(self):
index = self.store.findUnique(PyLuceneIndexer)
self.assertEqual(index.indexDirectory, 'foo.index')
# we called reset(), and there are no indexed items
self.assertEqual(index.indexCount, 0)
self.assertEqual(index.installedOn, self.store)
```
#### File: test/historic/test_remoteIndexer1to2.py
```python
from axiom.test.historic.stubloader import StubbedTest
from axiom.batch import processor
from xmantissa.fulltext import HypeIndexer, XapianIndexer, PyLuceneIndexer
from xmantissa.test.historic.stub_remoteIndexer1to2 import StubSource
class RemoteIndexerTestCase(StubbedTest):
"""
Test that each kind of remote indexer correctly becomes associated with an
item source when being upgraded to version two.
"""
def testUpgradeHype(self):
indexer = self.store.findUnique(HypeIndexer)
self.assertEquals(
[self.store.findUnique(StubSource)],
list(indexer.getSources()))
def testUpgradeXapian(self):
indexer = self.store.findUnique(XapianIndexer)
self.assertEquals(
[self.store.findUnique(StubSource)],
list(indexer.getSources()))
def testUpgradePyLucene(self):
indexer = self.store.findUnique(PyLuceneIndexer)
self.assertEquals(
[self.store.findUnique(StubSource)],
list(indexer.getSources()))
```
#### File: test/historic/test_statBucket1to2.py
```python
from axiom.test.historic import stubloader
from xmantissa.stats import StatBucket
class FreeTicketSignupTestCase(stubloader.StubbedTest):
def testUpgrade(self):
for bucket in self.store.query(StatBucket):
self.assertEqual(bucket.type, "axiom_commits")
```
#### File: test/historic/test_website4to5.py
```python
from twisted.application.service import IService
from twisted.cred.portal import IRealm
from nevow.inevow import IResource
from axiom.test.historic.stubloader import StubbedTest
from axiom.dependency import installedOn
from axiom.userbase import LoginSystem
from xmantissa.port import TCPPort, SSLPort
from xmantissa.web import SiteConfiguration
from xmantissa.website import WebSite
from xmantissa.publicweb import AnonymousSite
from xmantissa.ixmantissa import IMantissaSite, IWebViewer
from xmantissa.test.historic.stub_website4to5 import cert
class WebSiteUpgradeTests(StubbedTest):
expectedHostname = u"example.net"
def test_preservedAttributes(self):
"""
Test that some data from the simple parts of the schema is preserved.
"""
site = self.store.findUnique(SiteConfiguration)
self.assertEqual(site.httpLog, self.store.filesdir.child('httpd.log'))
self.assertEqual(site.hostname, self.expectedHostname)
def test_portNumber(self):
"""
Test that the WebSite's portNumber attribute is transformed into a
TCPPort instance.
"""
site = self.store.findUnique(SiteConfiguration)
ports = list(self.store.query(TCPPort, TCPPort.factory == site))
self.assertEqual(len(ports), 1)
self.assertEqual(ports[0].portNumber, 8088)
self.assertEqual(installedOn(ports[0]), self.store)
self.assertEqual(list(self.store.interfacesFor(ports[0])), [IService])
def test_securePortNumber(self):
"""
Test that the WebSite's securePortNumber attribute is transformed into
an SSLPort instance.
"""
site = self.store.findUnique(SiteConfiguration)
ports = list(self.store.query(SSLPort, SSLPort.factory == site))
self.assertEqual(len(ports), 1)
self.assertEqual(ports[0].portNumber, 6443)
certPath = self.store.newFilePath('server.pem')
self.assertEqual(ports[0].certificatePath, certPath)
self.assertEqual(certPath.getContent(), cert)
self.assertEqual(installedOn(ports[0]), self.store)
self.assertEqual(list(self.store.interfacesFor(ports[0])), [IService])
def test_deleted(self):
"""
The L{WebSite} should no longer exist in the site store.
"""
self.assertEqual(list(self.store.query(WebSite)), [])
def test_anonymousSite(self):
"""
An L{AnonymousSite} is created and installed on the site store.
"""
resource = self.store.findUnique(AnonymousSite)
self.assertEqual(list(self.store.interfacesFor(resource)),
[IResource, IMantissaSite, IWebViewer])
self.assertIdentical(installedOn(resource), self.store)
self.assertIdentical(resource.loginSystem, IRealm(self.store))
def test_singleLoginSystem(self):
"""
The upgrade should not create extra L{LoginSystem} items.
"""
self.assertEqual(self.store.query(LoginSystem).count(), 1)
def test_userStore(self):
"""
Test that WebSites in user stores upgrade without errors.
"""
ls = self.store.findUnique(LoginSystem)
substore = ls.accountByAddress(u'testuser', u'localhost').avatars.open()
d = substore.whenFullyUpgraded()
def fullyUpgraded(ignored):
web = substore.findUnique(WebSite)
self.assertEqual(web.hitCount, 321)
return d.addCallback(fullyUpgraded)
def tearDown(self):
d = StubbedTest.tearDown(self)
def flushit(ign):
from epsilon.cooperator import SchedulerStopped
self.flushLoggedErrors(SchedulerStopped)
return ign
return d.addCallback(flushit)
```
#### File: xmantissa/test/livetest_forms.py
```python
import textwrap
from nevow import loaders, tags
from nevow.livetrial import testcase
from xmantissa import liveform
class TextInput(testcase.TestCase):
jsClass = u'Mantissa.Test.Text'
def getWidgetDocument(self):
f = liveform.LiveForm(
self.submit,
[liveform.Parameter('argument',
liveform.TEXT_INPUT,
unicode,
'A text input field: ',
default=u'hello world')])
f.setFragmentParent(self)
return f
def submit(self, argument):
self.assertEquals(argument, u'hello world')
class MultiTextInput(testcase.TestCase):
jsClass = u'Mantissa.Test.MultiText'
def submit(self, sequence):
self.assertEquals(sequence, [1, 2, 3, 4])
def getWidgetDocument(self):
f = liveform.LiveForm(
self.submit,
(liveform.ListParameter('sequence',
int,
4,
'A bunch of text inputs: ',
defaults=(1, 2, 3, 4)),))
f.setFragmentParent(self)
return f
class TextArea(testcase.TestCase):
jsClass = u'Mantissa.Test.TextArea'
defaultText = textwrap.dedent(u"""
Come hither, sir.
Though it be honest, it is never good
To bring bad news. Give to a gracious message
An host of tongues; but let ill tidings tell
Themselves when they be felt.
""").strip()
def submit(self, argument):
self.assertEquals(argument, self.defaultText)
def getWidgetDocument(self):
f = liveform.LiveForm(
self.submit,
[liveform.Parameter('argument',
liveform.TEXTAREA_INPUT,
unicode,
'A text area: ',
default=self.defaultText)])
f.setFragmentParent(self)
return f
class Select(testcase.TestCase):
jsClass = u'Mantissa.Test.Select'
def submit(self, argument):
self.assertEquals(argument, u"apples")
def getWidgetDocument(self):
# XXX No support for rendering these yet!
f = liveform.LiveForm(
self.submit,
[liveform.Parameter('argument', None, unicode)])
f.docFactory = loaders.stan(tags.form(render=tags.directive('liveElement'))[
tags.select(name="argument")[
tags.option(value="apples")["apples"],
tags.option(value="oranges")["oranges"]],
tags.input(type='submit', render=tags.directive('submitbutton'))])
f.setFragmentParent(self)
return f
class Choice(testcase.TestCase):
jsClass = u'Mantissa.Test.Choice'
def submit(self, argument):
self.assertEquals(argument, 2)
def getWidgetDocument(self):
f = liveform.LiveForm(
self.submit,
[liveform.ChoiceParameter('argument',
[('One', 1, False),
('Two', 2, True),
('Three', 3, False)])])
f.setFragmentParent(self)
return f
class ChoiceMultiple(testcase.TestCase):
jsClass = u'Mantissa.Test.ChoiceMultiple'
def submit(self, argument):
self.assertIn(1, argument)
self.assertIn(3, argument)
def getWidgetDocument(self):
f = liveform.LiveForm(
self.submit,
[liveform.ChoiceParameter('argument',
[('One', 1, True),
('Two', 2, False),
('Three', 3, True)],
"Choosing multiples from a list.",
multiple=True)])
f.setFragmentParent(self)
return f
SPECIAL = object() # guaranteed to fuck up JSON if it ever gets there by
# accident.
class Traverse(testcase.TestCase):
jsClass = u'Mantissa.Test.Traverse'
def submit(self, argument, group):
self.assertEquals(argument, u'hello world')
self.assertEquals(group, SPECIAL)
def paramfilter(self, param1):
self.assertEquals(param1, u'goodbye world')
return SPECIAL
def getWidgetDocument(self):
f = liveform.LiveForm(
self.submit,
[liveform.Parameter('argument',
liveform.TEXT_INPUT,
unicode,
'A text input field: ',
default=u'hello world'),
liveform.Parameter('group',
liveform.FORM_INPUT,
liveform.LiveForm(self.paramfilter,
[liveform.Parameter
('param1',
liveform.TEXT_INPUT,
unicode,
'Another input field: ',
default=u'goodbye world')]),
'A form input group: ',
)])
f.setFragmentParent(self)
return f
class SetInputValues(testcase.TestCase):
jsClass = u'Mantissa.Test.SetInputValues'
def submit(self, choice, choiceMult, text, passwd, textArea, checkbox):
"""
Assert that all input values have been reversed/inverted
"""
self.assertEqual(choice, 1)
self.assertEqual(choiceMult, (2, 3))
self.assertEqual(text, 'dlrow olleh')
self.assertEqual(passwd, '<PASSWORD>')
self.assertEqual(textArea, '2 dlrow olleh')
self.failIf(checkbox)
def getWidgetDocument(self):
"""
Make a LiveForm with one of each kind of input, except for radio
buttons, since with the current liveform support for them it's
difficult to use them with a single form, and it's not so important to
do anything else right now
"""
f = liveform.LiveForm(
self.submit,
(liveform.ChoiceParameter(
'choice',
(('0', 0, True), ('1', 1, False))),
liveform.ChoiceParameter(
'choiceMult',
(('0', 0, True), ('1', 1, True),
('2', 2, False), ('3', 3, False)),
multiple=True),
liveform.Parameter(
'text',
liveform.TEXT_INPUT,
unicode,
default=u'hello world'),
liveform.Parameter(
'passwd',
liveform.PASSWORD_INPUT,
unicode,
default=u'secret key'),
liveform.Parameter(
'textArea',
liveform.TEXTAREA_INPUT,
unicode,
default=u'hello world 2'),
liveform.Parameter(
'checkbox',
liveform.CHECKBOX_INPUT,
bool,
default=True)))
f.setFragmentParent(self)
return f
class FormName(testcase.TestCase):
"""
Test that the form name is correctly set client-side
"""
jsClass = u'Mantissa.Test.FormName'
def getWidgetDocument(self):
"""
Make a nested form
"""
f = liveform.LiveForm(
lambda **k: None,
(liveform.Parameter(
'inner-form',
liveform.FORM_INPUT,
liveform.LiveForm(
lambda **k: None,
(liveform.Parameter(
'inner-parameter',
liveform.TEXT_INPUT,
unicode,
''),), ())),))
f.setFragmentParent(self)
return f
```
#### File: xmantissa/test/test_cachejs.py
```python
from hashlib import sha1
from twisted.trial.unittest import TestCase
from twisted.python.filepath import FilePath
from nevow.inevow import IRequest
from nevow.context import WovenContext
from nevow.testutil import FakeRequest
from xmantissa.cachejs import HashedJSModuleProvider, CachedJSModule
class JSCachingTestCase(TestCase):
"""
Tests for L{xmantissa.cachejs}.
"""
hostname = 'test-mantissa-js-caching.example.com'
def setUp(self):
"""
Create a L{HashedJSModuleProvider} and a dummy module.
"""
self.MODULE_NAME = 'Dummy.Module'
self.MODULE_CONTENT = '/* Hello, world. /*\n'
self.moduleFile = self.mktemp()
fObj = file(self.moduleFile, 'w')
fObj.write(self.MODULE_CONTENT)
fObj.close()
m = HashedJSModuleProvider()
self.moduleProvider = m
self._wasModified = CachedJSModule.wasModified.im_func
self.callsToWasModified = 0
def countCalls(other):
self.callsToWasModified += 1
return self._wasModified(other)
CachedJSModule.wasModified = countCalls
def tearDown(self):
"""
put L{CachedJSModule} back the way we found it
"""
CachedJSModule.wasModified = self._wasModified
def _render(self, resource):
"""
Test helper which tries to render the given resource.
"""
ctx = WovenContext()
req = FakeRequest(headers={'host': self.hostname})
ctx.remember(req, IRequest)
return req, resource.renderHTTP(ctx)
def test_hashExpiry(self):
"""
L{HashedJSModuleProvider.resourceFactory} should return a L{static.Data}
with an C{expires} value far in the future.
"""
self.moduleProvider.moduleCache[self.MODULE_NAME] = CachedJSModule(
self.MODULE_NAME, FilePath(self.moduleFile))
d, segs = self.moduleProvider.locateChild(None,
[sha1(self.MODULE_CONTENT).hexdigest(),
self.MODULE_NAME])
self.assertEqual([], segs)
d.time = lambda: 12345
req, result = self._render(d)
self.assertEquals(
req.headers['expires'],
'Tue, 31 Dec 1974 03:25:45 GMT')
self.assertEquals(
result,
'/* Hello, world. /*\n')
def test_getModule(self):
"""
L{HashedJSModuleProvider.getModule} should only load modules once;
subsequent calls should return the cached module object.
"""
module = self.moduleProvider.getModule("Mantissa.Test.Dummy")
self.failUnlessIdentical(module, self.moduleProvider.getModule(
"Mantissa.Test.Dummy"))
def test_dontStat(self):
"""
L{HashedJSModuleProvider.getModule} shouldn't hit the disk more than
once per module.
"""
module1 = self.moduleProvider.getModule("Mantissa.Test.Dummy")
module2 = self.moduleProvider.getModule("Mantissa.Test.Dummy")
self.assertEqual(self.callsToWasModified, 1)
```
#### File: xmantissa/test/test_interstore.py
```python
import gc
from datetime import timedelta
from zope.interface import implements
from twisted.trial.unittest import TestCase
from twisted.internet.defer import Deferred
from twisted.protocols.amp import Box, Command, Integer, String
from epsilon.extime import Time
from axiom.iaxiom import IScheduler
from axiom.store import Store
from axiom.errors import UnsatisfiedRequirement
from axiom.item import Item, POWERUP_BEFORE
from axiom.attributes import text, bytes, integer, boolean, inmemory
from axiom.userbase import LoginSystem, LoginMethod, LoginAccount
from axiom.dependency import installOn
from axiom.scheduler import TimedEvent
from xmantissa.interstore import (
# Public Names
MessageQueue, AMPMessenger, LocalMessageRouter, Value,
AMPReceiver, commandMethod, answerMethod, errorMethod,
SenderArgument, TargetArgument,
# Constants
AMP_MESSAGE_TYPE, AMP_ANSWER_TYPE, DELIVERY_ERROR,
# Error Types
ERROR_REMOTE_EXCEPTION, ERROR_NO_SHARE, ERROR_NO_USER, ERROR_BAD_SENDER,
# Private Names
_RETRANSMIT_DELAY, _QueuedMessage, _AlreadyAnswered, _FailedAnswer,
_AMPExposer, _AMPErrorExposer)
from xmantissa.sharing import getEveryoneRole, Identifier
from xmantissa.error import (
MessageTransportError, BadSender, UnknownMessageType, RevertAndRespond,
MalformedMessage)
from xmantissa.ixmantissa import IMessageReceiver, IMessageRouter
class SampleException(Exception):
"""
Something didn't happen because of a problem.
"""
class StubReceiver(Item):
"""
This is a message receiver that will store a message sent to it for
inspection by tests.
"""
implements(IMessageReceiver)
messageType = text(
doc="""
The message type which C{messageReceived} should put into its return
value.
""")
messageData = bytes(
doc="""
The message data which C{messageReceived} should put into its return
value.
""")
inconsistent = boolean(
doc="""
This value is set to True during the execution of C{messageReceived},
but False afterwards. If everything is properly transactional it
should never be observably false by other code.
""")
buggy = boolean(allowNone=False, default=False,
doc="""
C{messageReceived} should raise a L{SampleException}.
""")
badReturn = boolean(allowNone=False, default=False,
doc="""
C{messageReceived} should return L{None}.
""")
receivedCount = integer(default=0,
doc="""
This is a counter of the number of messages
received by C{messageReceived}.
""")
reciprocate = boolean(allowNone=False, default=False,
doc="""
C{messageReceived} should respond to its C{sender}
parameter with a symmetric message in addition to
answering.
""")
revertType = text(allowNone=True,
doc="""
If set, this specifies the type of the
L{RevertAndRespond} exception that C{messageReceived}
should raise.
""")
revertData = bytes(allowNone=True,
doc="""
If C{revertType} is set, this specifies the data of the
L{RevertAndRespond} exception that C{messageReceived}
should raise.
""")
def messageQueue(self):
"""
This is a temporary workaround; see ticket #2640 for details on the way
this method should be implemented in the future.
"""
return self.store.findUnique(MessageQueue)
def messageReceived(self, value, sender, receiver):
"""
A message was received. Increase the message counter and store its
contents.
"""
self.receivedCount += 1
self.messageType = value.type
self.messageData = value.data
self.inconsistent = True
if self.buggy:
raise SampleException("Sample Message")
if self.revertType is not None:
raise RevertAndRespond(Value(self.revertType,
self.revertData))
self.inconsistent = False
if self.badReturn:
return None
if self.reciprocate:
self.messageQueue().queueMessage(
receiver, sender, Value(value.type + u'.response',
value.data + ' response'))
return Value(u"custom.message.type", "canned response")
class StubSlowRouter(Item):
"""
Like L{LocalMessageRouter}, but don't actually deliver the messages until
the test forces them to be delivered.
By way of several parameters to `flushMessages`, this stub implementation
allows for all of the arbitrary ways in which a potential networked
implementation is allowed to behave - dropping messages, repeating
messages, and even failing in buggy ways.
Note: this must be kept in memory for the duration of any test using it.
@ivar messages: a list of (sender, target, value, messageID) tuples
received by routeMessage.
@ivar acks: a list of (deferred, (sender, target, value, messageID))
tuples, representing an answer received by routeAnswer and the deferred
that was returned to indicate its delivery.
"""
dummy = integer(
doc="""
No state on this item is persistent; this is just to satisfy Axiom's schema
requirement.
""")
messages = inmemory()
acks = inmemory()
def localRouter(self):
"""
Return a L{LocalMessageRouter} for this slow router's store.
"""
return LocalMessageRouter(self.store.findUnique(LoginSystem))
def activate(self):
"""
Initialize temporary list to queue messages.
"""
self.messages = []
self.acks = []
def routeMessage(self, sender, target, value, messageID):
"""
Stub implementation of L{IMessageRouter.routeMessage} that just appends
to a list in memory, and later delegates from that list to the local
router.
"""
self.messages.append((sender, target, value, messageID))
def routeAnswer(self, originalSender, originalTarget, value, messageID):
"""
Stub implementation of L{IMessageRouter.routeAnswer} that just
appends to a list in memory.
"""
D = Deferred()
self.acks.append((D, (originalSender, originalTarget, value,
messageID)))
return D
def flushMessages(self, dropAcks=False,
dropAckErrorType=MessageTransportError,
stallAcks=False,
repeatAcks=False):
"""
Delegate all messages queued in memory with routeMessage to the
specified local router.
@param dropAcks: a boolean, indicating whether to drop the answers
queued by routeAnswer.
@param dropAckErrorType: an exception type, indicating what exception
to errback the Deferreds returned by routeAnswer with.
@param stallAcks: a boolean, indicating whether to keep, but not act,
on the answers queued by routeAnswer.
@param repeatAcks: a boolean, indicating whether to repeat all of the
acks the next time flushMessages is called.
"""
m = self.messages[:]
self.messages = []
for message in m:
self.localRouter().routeMessage(*message)
if dropAcks:
for D, ack in self.acks:
D.errback(dropAckErrorType())
self.acks = []
if not stallAcks:
for D, ack in self.acks:
self.localRouter().routeAnswer(*ack).chainDeferred(D)
if repeatAcks:
# the Deferreds are used up, so we need a fresh batch for the
# next run-through (although these will be ignored)
self.acks = [(Deferred(), ack) for (D, ack) in self.acks]
else:
self.acks = []
def spuriousDeliveries(self):
"""
Simulate a faulty transport, and deliver all the currently pending
messages without paying attention to their results.
"""
for message in self.messages:
self.localRouter().routeMessage(*message)
class StubDeliveryConsequence(Item):
"""
This implements a delivery consequence.
@ivar responses: a tuple of (answer-type, answer-data, message-type,
message-data, sender, target), listing all the answers received by
answerReceived.
@ivar bucket: a list which will have this L{StubDeliveryConsequence}
appended to it when a successful message is processed.
"""
responses = inmemory()
bucket = inmemory()
invocations = integer(
"""
Counter, keeping track of how many times this consequence has been
invoked.
""",
default=0, allowNone=False)
succeeded = boolean(
"""
Did the action succeed? None if it hasn't completed, True if yes,
False if no.
""")
inconsistent = boolean(
"""
This should never be set to True. It's set to None by default, False
when the callback fully succeeds.
""")
buggy = boolean(
"""
Set this to cause 'success' to raise an exception.
""",
default=False,
allowNone=False)
def activate(self):
"""
Initialize the list of received responses.
"""
self.responses = []
self.bucket = []
def success(self):
"""
A response was received to the message. This will be executed in a
transaction. Raise an exception if this consequence is buggy.
"""
self.bucket.append(self)
self.inconsistent = True
self.invocations += 1
self.succeeded = True
if self.buggy:
raise SampleException()
self.inconsistent = False
def failure(self):
"""
The message could not be delivered for some reason. This will be
executed in a transaction. Raise an exception if this consequence is
buggy.
@param reason: an exception.
"""
self.invocations += 1
self.succeeded = False
def answerReceived(self, answerValue, originalValue,
originalSender, originalTarget):
"""
An answer was received.
"""
if answerValue.type == DELIVERY_ERROR:
self.failure()
else:
self.success()
# It's important that this happen after the "application" logic so that
# the tests will not see this set if an exception has been raised.
self.responses.append((answerValue.type, answerValue.data,
originalValue.type, originalValue.data,
originalSender, originalTarget))
class TimeFactory(object):
"""
Make a fake time factory.
"""
def __init__(self):
"""
Create a time factory with some default values.
"""
self.currentSeconds = 0.0
def advance(self):
"""
Advance the current time by one second.
"""
self.currentSeconds += 1.0
def next(self):
"""
Produce the next time in the sequence, then advance.
"""
self.advance()
return Time.fromPOSIXTimestamp(self.currentSeconds)
def peek(self):
"""
Return the value that will come from the next call to 'next'.
"""
return Time.fromPOSIXTimestamp(self.currentSeconds + 1)
class SingleSiteMessagingTests(TestCase):
"""
These are tests for messaging within a single configured site store.
"""
def setUp(self):
"""
Create a site store with two users that can send messages to each
other.
"""
self.siteStore = Store()
self.time = TimeFactory()
self.loginSystem = LoginSystem(store=self.siteStore)
installOn(self.loginSystem, self.siteStore)
self.aliceAccount = self.loginSystem.addAccount(
u"alice", u"example.com", u"asdf", internal=True)
self.bobAccount = self.loginSystem.addAccount(
u"bob", u"example.com", u"asdf", internal=True)
self.aliceStore, self.aliceQueue = self.accountify(
self.aliceAccount.avatars.open())
self.bobStore, self.bobQueue = self.accountify(
self.bobAccount.avatars.open())
# I need to make a target object with a message receiver installed on
# it. Then I need to share that object.
self.receiver = StubReceiver(store=self.bobStore)
getEveryoneRole(self.bobStore).shareItem(self.receiver, u"suitcase")
self.retransmitDelta = timedelta(seconds=_RETRANSMIT_DELAY)
def accountify(self, userStore):
"""
Add a MessageQueue to the given user store and stub out its scheduler's
time function.
"""
queue = MessageQueue(store=userStore)
installOn(queue, userStore)
IScheduler(userStore).now = self.time.peek
return userStore, queue
def runQueue(self, queue):
"""
Advance the current time and run the given message queue.
"""
self.time.advance()
return queue.run()
def test_bogusConfiguration(self):
"""
Delivering a message on a site without a L{LoginSystem} should cause an
L{UnsatisfiedRequirement} exception to be logged, and the message not
to be delivered.
"""
self.loginSystem.deleteFromStore()
self.aliceToBobWithConsequence()
self.runQueue(self.aliceQueue)
[err] = self.flushLoggedErrors(UnsatisfiedRequirement)
# The message should still be queued.
self.assertEqual(len(list(self.aliceStore.query(_QueuedMessage))),
1)
def unattachedUserStore(self):
"""
Create a store that is structured as if it was a user-store with
messaging enabled, but was opened un-attached from its parent store.
@return: a 2-tuple of (store, queue).
"""
carolStore, carolQueue = self.accountify(Store())
acct = LoginAccount(store=carolStore, password=u'<PASSWORD>')
lm = LoginMethod(store=carolStore, localpart=u'carol',
domain=u'example.com', internal=True, protocol=u'*',
account=acct, verified=True)
return carolStore, carolQueue
def test_accidentallyOpenAsSite(self):
"""
If a message delivery is somehow attempted with a user store
accidentally opened as a site store, delivery should fail.
Normally this will not happen, since the current implementation (as of
when this test was written) of the scheduler will not allow timed
events to run with a L{SubScheduler} installed rather than a
L{Scheduler}. However, eliminating this distinction is a long-term
goal, so this test is a defense against both future modifications and
other code which may emulate scheduler APIs.
"""
carolStore, carolQueue = self.unattachedUserStore()
sdc = StubDeliveryConsequence(store=carolStore)
carolQueue.queueMessage(
Identifier(u"nothing", u"carol", u"example.com"),
Identifier(u"suitcase", u"bob", u"example.com"),
Value(u'custom.message.type', "Some message contents"), sdc)
self.runQueue(carolQueue)
[err] = self.flushLoggedErrors(UnsatisfiedRequirement)
# The message should still be queued.
self.assertEqual(len(list(carolStore.query(_QueuedMessage))), 1)
def test_accidentallyAnswerAsSite(self):
"""
If an answer delivery is somehow attempted with a user store
accidentally opened as a site store, the delivery should result in a
transient failure.
This is even less likely than the case described in
L{test_accidentallyOpenAsSite}, but in the unlikely event that the
scheduler is manually run, it still shouldn't result in any errors
being logged or state being lost.
"""
carolStore, carolQueue = self.unattachedUserStore()
carolReceiver = StubReceiver(store=carolStore)
getEveryoneRole(carolStore).shareItem(carolReceiver, u'some-share-id')
bogusID = Identifier(u'nothing', u'nobody', u'nowhere')
carolID = Identifier(u'some-share-id', u'carol', u'example.com')
carolQueue.routeMessage(bogusID, carolID, Value(u'no.type', 'contents'), 1)
[err] = self.flushLoggedErrors(UnsatisfiedRequirement)
# The answer should still be queued.
self.assertEqual(len(list(carolStore.query(_AlreadyAnswered))), 1)
def test_queueMessageSimple(self):
"""
Queuing a message should create a _QueuedMessage object and schedule it
for delivery to its intended recipient.
"""
# Maybe I should do this by sharing an object in Alice's store and then
# wrapping a SharingView-type thing around it? it seems like there
# ought to be a purely model-level API for this, though.
self.aliceQueue.queueMessage(
Identifier(u"nothing", u"alice", u"example.com"),
Identifier(u"suitcase", u"bob", u"example.com"),
Value(u"custom.message.type", "This is an important message."))
# OK now let's find the message that was queued.
qm = self.aliceStore.findUnique(_QueuedMessage)
self.assertEqual(qm.senderUsername, u'alice')
self.assertEqual(qm.senderDomain, u'example.com')
# Can you do this? It seems like it might be inconvenient to always
# determine a resolvable "return address" - the test case here is a
# simulation of reasonable behavior; alice hasn't shared anything.
# self.assertEqual(qm.senderShareID, None)
self.assertEqual(qm.targetUsername, u"bob")
self.assertEqual(qm.targetDomain, u"example.com")
self.assertEqual(qm.targetShareID, u"suitcase")
self.assertEqual(qm.value.type, u"custom.message.type")
self.assertEqual(qm.value.data, "This is an important message.")
# It should be scheduled. Is there a timed event?
te = self.aliceStore.findUnique(
TimedEvent,
TimedEvent.runnable == self.aliceQueue)
# It should be scheduled immediately. This uses the real clock, but in
# a predictable way (i.e. if time does not go backwards, then this will
# work). If this test ever fails intermittently there _is_ a real
# problem.
self.assertNotIdentical(te.time, None)
self.failUnless(te.time <= Time())
runresult = self.runQueue(self.aliceQueue)
# It should succeed, it should not reschedule itself; the scheduler
# will delete things that return None from run(). It would be nice to
# integrate with the scheduler here, but that would potentially drag in
# dependencies on other systems not scheduling stuff.
self.assertEqual(runresult, None)
self.assertEqual(self.receiver.messageData,
"This is an important message.")
self.assertEqual(self.receiver.messageType, u"custom.message.type")
self.assertEqual(list(self.aliceStore.query(_QueuedMessage)), [])
def aliceToBobWithConsequence(self, buggy=False):
"""
Queue a message from Alice to Bob with a supplied
L{StubDeliveryConsequence} and return it.
"""
sdc = StubDeliveryConsequence(store=self.aliceStore,
buggy=buggy)
self.aliceQueue.queueMessage(
Identifier(u"nothing", u"alice", u"example.com"),
Identifier(u"suitcase", u"bob", u"example.com"),
Value(u'custom.message.type', "Some message contents"), sdc)
return sdc
def checkOneResponse(self, sdc,
expectedType=u"custom.message.type",
expectedData="canned response",
originalSender=
Identifier(u"nothing", u"alice", u"example.com"),
originalTarget=
Identifier(u"suitcase", u"bob", u"example.com"),
succeeded=None):
"""
This checks that the response received has the expected type and data,
and corresponds to the sender and target specified by
L{SingleSiteMessagingTests.aliceToBobWithConsequence}.
"""
if succeeded is None:
if expectedType == DELIVERY_ERROR:
succeeded = False
else:
succeeded = True
# First, let's make sure that transaction committed.
self.assertEqual(sdc.succeeded, succeeded)
self.assertEqual(sdc.responses,
[(expectedType, expectedData,
u'custom.message.type', # type
"Some message contents", # data
originalSender, originalTarget
)])
def test_queueMessageSuccessNotification(self):
"""
Queueing a message should emit a success notification to the supplied
'consequence' object.
"""
sdc = self.aliceToBobWithConsequence()
self.runQueue(self.aliceQueue)
self.assertEqual(sdc.succeeded, True)
self.checkOneResponse(sdc)
def test_queueMessageSuccessErrorHandling(self):
"""
If the supplied 'consequence' object is buggy, the error should be
logged so that the answer can be processed later, but not propagated to
the network layer.
"""
sdc = self.aliceToBobWithConsequence(True)
self.runQueue(self.aliceQueue)
# It should be run in a transaction so none of the stuff set by
# 'success' should be set
self.assertEqual(sdc.succeeded, None)
self.assertEqual(sdc.inconsistent, None)
self.assertEqual(sdc.invocations, 0)
[err] = self.flushLoggedErrors(SampleException)
# Make sure that no messages are queued.
self.assertEqual(list(self.aliceStore.query(_QueuedMessage)), [])
failures = list(self.aliceStore.query(_FailedAnswer))
self.assertEqual(len(failures), 1)
# Fix the bug. In normal operation this would require a code upgrade.
sdc.buggy = False
failures[0].redeliver()
self.checkOneResponse(sdc)
def test_alreadyAnsweredRemoval(self):
"""
L{_AlreadyAnswered} records should be removed after the deferred from
L{routeAnswer} is fired.
"""
slowRouter = self.stubSlowRouter()
sdc = self.aliceToBobWithConsequence()
self.runQueue(self.aliceQueue)
slowRouter.flushMessages(stallAcks=True)
self.assertEqual(len(slowRouter.acks), 1)
# sanity check
self.assertEqual(self.bobStore.query(_AlreadyAnswered).count(), 1)
slowRouter.flushMessages()
self.assertEqual(self.bobStore.query(_AlreadyAnswered).count(), 0)
def test_repeatedAnswer(self):
"""
If answers are repeated, they should only be processed once.
"""
slowRouter = self.stubSlowRouter()
sdc = self.aliceToBobWithConsequence()
self.runQueue(self.aliceQueue)
slowRouter.flushMessages(repeatAcks=True)
slowRouter.flushMessages()
self.assertEqual(sdc.invocations, 1)
def _reschedulingTest(self, errorType):
"""
Test for rescheduling of L{_AlreadyAnswered} results in the presence of
the given error from the router.
"""
slowRouter = self.stubSlowRouter()
sdc = self.aliceToBobWithConsequence()
self.runQueue(self.aliceQueue)
slowRouter.flushMessages(dropAcks=True, dropAckErrorType=errorType)
# It should be scheduled.
self.assertEqual(
len(list(IScheduler(self.bobQueue.store).scheduledTimes(self.bobQueue))),
1)
# Now let's run it and see if the ack gets redelivered.
self.runQueue(self.bobQueue)
slowRouter.flushMessages()
self.assertEqual(sdc.succeeded, True)
def test_alreadyAnsweredReschedule(self):
"""
L{_AlreadyAnswered} records should be scheduled for retransmission if
the L{Deferred} from L{routeAnswer} is errbacked with a
L{MessageTransportError}. No message should be logged, since this is a
transient and potentially expected error.
"""
self._reschedulingTest(MessageTransportError)
def test_alreadyAnsweredRescheduleAndLog(self):
"""
L{_AlreadyAnswered} records should be scheduled for retransmission if
the L{Deferred} from L{routeAnswer} is errbacked with an unknown
exception type, and the exception should be logged.
"""
self._reschedulingTest(SampleException)
[err] = self.flushLoggedErrors(SampleException)
def test_alreadyAnsweredRescheduleCrash(self):
"""
L{_AlreadyAnswered} records should be scheduled for retransmission if
the L{Deferred} from L{routeAnswer} dies without being callbacked or
errbacked (such as if the store were to crash).
"""
slowRouter = self.stubSlowRouter()
sdc = self.aliceToBobWithConsequence()
self.runQueue(self.aliceQueue)
slowRouter.flushMessages(stallAcks=True)
self.assertEqual(sdc.invocations, 0)
slowRouter.acks = []
gc.collect()
# Make sure the Deferred is well and truly gone.
self.assertIdentical(
self.bobStore.findUnique(_AlreadyAnswered).deliveryDeferred,
None)
self.runQueue(self.bobQueue)
slowRouter.flushMessages()
self.assertEqual(sdc.invocations, 1)
def test_noRemoteUser(self):
"""
What if the target user we're trying to talk to doesn't actually exist
in the system? The message delivery should fail.
"""
sdc = StubDeliveryConsequence(store=self.aliceStore)
self.aliceQueue.queueMessage(
Identifier(u"nothing", u"alice", u"example.com"),
Identifier(u"suitcase", u"bohb", u"example.com"),
Value(u"custom.message.type", "Some message contents"), sdc)
self.runQueue(self.aliceQueue)
self.assertEqual(sdc.succeeded, False)
self.checkOneResponse(
sdc, DELIVERY_ERROR, ERROR_NO_USER,
originalTarget=Identifier(u"suitcase", u"bohb", u"example.com"))
self.assertEqual(sdc.invocations, 1)
def test_noRemoteShare(self):
"""
Similarly, if there's nothing identified by the shareID specified, the
message delivery should fail.
"""
sdc = StubDeliveryConsequence(store=self.aliceStore)
self.aliceQueue.queueMessage(
Identifier(u"nothing", u"alice", u"example.com"),
Identifier(u"nothing", u"bob", u"example.com"),
Value(u"custom.message.type", "Some message contents"), sdc)
self.runQueue(self.aliceQueue)
self.assertEqual(sdc.succeeded, False)
self.checkOneResponse(
sdc, DELIVERY_ERROR, ERROR_NO_SHARE,
originalTarget=Identifier(u"nothing", u"bob", u"example.com"))
self.assertEqual(sdc.invocations, 1)
def buggyReceiverTest(self, exceptionType):
"""
Run a test expecting the receiver to fail.
"""
sdc = self.aliceToBobWithConsequence()
self.runQueue(self.aliceQueue)
self.assertEqual(sdc.succeeded, False)
self.checkOneResponse(sdc, DELIVERY_ERROR, ERROR_REMOTE_EXCEPTION)
[err] = self.flushLoggedErrors(exceptionType)
self.assertEqual(sdc.invocations, 1)
self.assertEqual(self.receiver.inconsistent, None)
def test_messageReceivedBadReturn(self):
"""
When L{messageReceived} does not properly return a 2-tuple, that
resulting exception should be reported to the delivery consequence of
the message. The target database should not be left in an inconsistent
state.
"""
self.receiver.badReturn = True
self.buggyReceiverTest(TypeError)
def test_messageReceivedException(self):
"""
When L{messageReceived} raises an exception, that exception should be
reported to the delivery consequence of the message. The target
database should not be left in an inconsistent state.
"""
self.receiver.buggy = True
self.buggyReceiverTest(SampleException)
def test_revertAndRespond(self):
"""
When L{messageReceived} raises the special L{RevertAndRespond}
exception, the values passed to the exception should be used to
generate the response, but the transaction should be reverted.
"""
t = self.receiver.revertType = u'custom.reverted.type'
d = self.receiver.revertData = "this is some data that I reverted"
sdc = self.aliceToBobWithConsequence()
self.runQueue(self.aliceQueue)
self.assertEqual(sdc.succeeded, True)
self.assertEqual(sdc.inconsistent, False)
self.assertEqual(sdc.invocations, 1)
self.assertEqual(self.receiver.inconsistent, None)
self.checkOneResponse(sdc, t, d)
def test_droppedException(self):
"""
When L{messageReceived} raises an exception, that exception should be
reported to the delivery consequence of the message, even if the
initial transmission of the error report is lost.
"""
slowRouter = self.stubSlowRouter()
self.receiver.buggy = True
sdc = self.aliceToBobWithConsequence()
self.runQueue(self.aliceQueue)
slowRouter.flushMessages(dropAcks=True)
[err] = self.flushLoggedErrors(SampleException)
self.runQueue(self.aliceQueue)
slowRouter.flushMessages()
self.assertEqual(sdc.invocations, 1)
self.assertEqual(sdc.succeeded, False)
self.checkOneResponse(sdc, DELIVERY_ERROR, ERROR_REMOTE_EXCEPTION)
def test_senderNotVerified(self):
"""
When the sender users name or domain do not match an internal, verified
login method of the originating store, sending a message via
queueMessage should resport an ERROR_BAD_SENDER.
"""
sdc = StubDeliveryConsequence(store=self.aliceStore)
self.aliceQueue.queueMessage(
Identifier(u"nothing", u"fred", u"example.com"),
Identifier(u"suitcase", u"bob", u"example.<EMAIL>"),
Value(u"custom.message.type", "Some message contents"),
sdc)
self.assertEqual(sdc.invocations, 0)
self.runQueue(self.aliceQueue)
self.assertEqual(self.receiver.receivedCount, 0)
self.assertEqual(sdc.succeeded, False)
self.assertEqual(sdc.invocations, 1)
self.checkOneResponse(sdc, DELIVERY_ERROR, ERROR_BAD_SENDER,
originalSender=Identifier(
u"nothing", u"fred", u"example.com"))
[err] = self.flushLoggedErrors(BadSender)
bs = err.value
self.assertEqual(bs.attemptedSender, u'<EMAIL>')
self.assertEqual(bs.allowedSenders, [u'<EMAIL>'])
self.assertEqual(str(bs),
"<EMAIL> attempted to send message "
"as <EMAIL>")
def stubSlowRouter(self):
"""
Replace this test's stub router with an artificially slowed-down
router.
"""
slowRouter = StubSlowRouter(store=self.siteStore)
self.siteStore.powerUp(slowRouter, IMessageRouter, POWERUP_BEFORE)
return slowRouter
def test_slowDelivery(self):
"""
If the site message-deliverer powerup returns a Deferred that takes a
while to fire, the L{MessageQueue} should respond by rescheduling
itself in the future.
"""
slowRouter = self.stubSlowRouter()
self.aliceQueue.queueMessage(
Identifier(u"nothing", u"alice", u"example.com"),
Identifier(u"suitcase", u"bob", u"example.com"),
Value(u"custom.message.type", "Message2"))
[time1] = IScheduler(self.aliceQueue.store).scheduledTimes(self.aliceQueue)
time2 = self.runQueue(self.aliceQueue)
self.assertEqual(time2 - self.time.peek(), self.retransmitDelta)
self.assertEqual(self.receiver.receivedCount, 0)
self.assertEqual(len(slowRouter.messages), 1)
slowRouter.flushMessages()
self.assertEqual(len(slowRouter.messages), 0)
self.assertEqual(self.receiver.receivedCount, 1)
def test_reallySlowDelivery(self):
"""
If the Deferred takes so long to fire that another retransmission
attempt takes place, the message should only be delivered once. If it
does fail, the next transmission attempt should actually transmit.
"""
slowRouter = self.stubSlowRouter()
c = self.aliceToBobWithConsequence()
self.runQueue(self.aliceQueue)
self.runQueue(self.aliceQueue)
slowRouter.flushMessages()
self.assertEqual(len(slowRouter.messages), 0)
self.assertEqual(self.receiver.receivedCount, 1)
self.assertEqual(c.invocations, 1)
self.assertEqual(c.succeeded, 1)
def test_multipleMessages(self):
"""
Sending multiple messages at the same time should result in the
messages being processed immediately, with no delay, but in order.
"""
slowRouter = self.stubSlowRouter()
bucket = []
c = self.aliceToBobWithConsequence()
c.bucket = bucket
c2 = self.aliceToBobWithConsequence()
c2.bucket = bucket
# Sanity check; make sure the message hasn't been processed yet.
self.assertEqual(bucket, [])
self.runQueue(self.aliceQueue)
slowRouter.flushMessages()
self.assertEqual(c.invocations, 1)
self.assertEqual(c.succeeded, 1)
self.assertEqual(c2.invocations, 1)
self.assertEqual(c2.succeeded, 1)
self.assertEqual(bucket, [c, c2])
self.assertEqual(self.runQueue(self.aliceQueue), None)
def test_multipleAnswers(self):
"""
Sending multiple messages at the same time should result in the
messages being processed immediately, with no delay, but in order.
"""
slowRouter = self.stubSlowRouter()
bucket = []
c = self.aliceToBobWithConsequence()
c.bucket = bucket
c2 = self.aliceToBobWithConsequence()
c2.bucket = bucket
# Sanity check; make sure the message hasn't been processed yet.
self.assertEqual(bucket, [])
self.runQueue(self.aliceQueue)
slowRouter.flushMessages(dropAcks=True)
[time1] = IScheduler(self.bobQueue.store).scheduledTimes(self.bobQueue)
time2 = self.runQueue(self.bobQueue)
self.assertEqual(time2 - self.time.peek(), self.retransmitDelta)
slowRouter.flushMessages()
self.assertEqual(c.invocations, 1)
self.assertEqual(c.succeeded, 1)
self.assertEqual(c2.invocations, 1)
self.assertEqual(c2.succeeded, 1)
self.assertEqual(bucket, [c, c2])
self.assertEqual(self.runQueue(self.aliceQueue), None)
def test_deliveryIdempotence(self):
"""
Delivering the same message to a substore twice should only result in
it being delivered to application code once.
"""
slowRouter = self.stubSlowRouter()
sdc = self.aliceToBobWithConsequence()
self.runQueue(self.aliceQueue)
slowRouter.spuriousDeliveries()
self.assertEqual(self.receiver.receivedCount, 1)
self.assertEqual(sdc.invocations, 0)
slowRouter.flushMessages()
self.assertEqual(self.receiver.receivedCount, 1)
self.assertEqual(sdc.invocations, 1)
self.checkOneResponse(sdc, )
def test_reciprocate(self):
"""
In addition to responding to the message with a return value, the
receiver should be able to remember the sender and emit reciprocal
messages.
"""
self.receiver.reciprocate = True
receiver2 = StubReceiver(store=self.aliceStore)
getEveryoneRole(self.aliceStore).shareItem(receiver2, u'nothing')
sdc = self.aliceToBobWithConsequence()
self.runQueue(self.aliceQueue)
self.runQueue(self.bobQueue)
self.assertEqual(receiver2.receivedCount, 1)
self.assertEqual(receiver2.messageType, u'custom.message.type.response')
self.assertEqual(receiver2.messageData,
'Some message contents response')
def test_unhandledDeliveryError(self):
"""
When a message cannot be delivered, but no consequence is supplied, the
error should be logged.
"""
self.receiver.buggy = True
self.aliceQueue.queueMessage(
Identifier(u"nothing", u"alice", u"example.com"),
Identifier(u"nothing", u"bob", u"example.com"),
Value(u'custom.message.type',
"Some message contents"))
self.runQueue(self.aliceQueue)
[err] = self.flushLoggedErrors(MessageTransportError)
def test_messageRemoteItem(self):
"""
When a message is sent to a shared item using
L{AMPMessenger.messageRemote}, and received by an L{AMPReceiver}, its
data should be delivered to an appropriately decorated method.
This is an integration test of the functionality covered by this suite
and the functionality covered by L{AMPMessagingTests}.
"""
aliceAMP = RealAMPReceiver(store=self.aliceStore)
getEveryoneRole(self.aliceStore).shareItem(aliceAMP, u'ally')
bobAMP = RealAMPReceiver(store=self.bobStore)
getEveryoneRole(self.bobStore).shareItem(bobAMP, u'bobby')
msgr = AMPMessenger(self.aliceQueue,
Identifier(u'ally', u'alice', u'example.com'),
Identifier(u'bobby', u'bob', u'example.com'))
msgr.messageRemote(SimpleCommand, int1=3, str2="hello")
self.runQueue(self.aliceQueue)
self.assertEqual(bobAMP.args, [(3, 'hello')])
class SimpleError(Exception):
"""
A simple error that should be detectable by L{SimpleCommand}.
"""
class SimpleCommand(Command):
"""
Sample simple command with a few arguments.
"""
arguments = [("int1", Integer()),
("str2", String())]
response = [("int3", Integer())]
errors = {SimpleError: "SIMPLE_ERROR"}
class SenderCommand(Command):
arguments = [("sender", SenderArgument())]
response = [("sender", SenderArgument())]
class TargetCommand(Command):
arguments = [("target", TargetArgument())]
response = [("target", TargetArgument())]
class TrivialCommand(Command):
"""
Trivial no-argument AMP command.
"""
errors = {SimpleError: "SIMPLE_ERROR"}
class RealAMPReceiver(Item, AMPReceiver):
"""
This is an integration testing item for making sure that decorated methods
on items receiving messages will work.
"""
dummy = integer()
args = inmemory()
def activate(self):
"""
Set up test state.
"""
self.args = []
@commandMethod.expose(SimpleCommand)
def doit(self, int1, str2):
"""
Simple responder for L{SimpleCommand}.
"""
self.args.append((int1, str2))
return dict(int3=int1+len(str2))
class MyAMPReceiver(AMPReceiver):
"""
A simple L{AMPReceiver} subclass with a few exposed methods.
"""
def __init__(self):
"""
Create a L{MyAMPReceiver}.
"""
self.commandArguments = []
self.commandAnswers = []
self.commandErrors = []
self.senders = []
self.targets = []
@commandMethod.expose(SimpleCommand)
def simpleCommand(self, int1, str2):
"""
Responder for a simple command.
"""
self.commandArguments.append((int1, str2))
return dict(int3=4)
@answerMethod.expose(SimpleCommand)
def simpleAnswer(self, int3):
"""
Responder for a simple answer.
"""
self.commandAnswers.append(int3)
@errorMethod.expose(SimpleCommand, SimpleError)
def simpleError(self, failure):
self.commandErrors.append(failure)
@commandMethod.expose(SenderCommand)
def commandWithSender(self, sender):
self.senders.append(sender)
return {}
@commandMethod.expose(TargetCommand)
def commandWithTarget(self, target):
self.targets.append(target)
return {}
@answerMethod.expose(SenderCommand)
def answerWithSender(self, sender):
self.senders.append(sender)
@answerMethod.expose(TargetCommand)
def answerWithTarget(self, target):
self.targets.append(target)
class ExpectedBuggyReceiver(AMPReceiver):
"""
This AMP responder will raise an expected exception type.
"""
@commandMethod.expose(TrivialCommand)
def raiseSimpleError(self):
raise SimpleError("simple description")
class UnexpectedBuggyReceiver(AMPReceiver):
"""
This AMP responder will raise an unexpected exception type.
"""
@commandMethod.expose(TrivialCommand)
def raiseRuntimeError(self):
raise RuntimeError()
class AMPMessagingTests(TestCase):
"""
Test cases for high-level AMP message parsing and emitting API.
"""
def setUp(self):
"""
Initialize the list of messages which this can deliver as a
pseudo-queue.
"""
self.messages = []
def queueMessage(self, sender, target, value,
consequence=None):
"""
Emulate L{MessageQueue.queueMessage}.
"""
self.messages.append((sender, target,
value.type, value.data, consequence))
def test_messageRemote(self):
"""
L{AMPMessenger.messageRemote} should queue a message with the provided
queue, sender, and target, serializing its arguments according to the
provided AMP command.
"""
sender = Identifier(u'test-sender', u'bob', u'example.com')
target = Identifier(u'test-target', u'alice', u'example.com')
msgr = AMPMessenger(self, sender, target)
msgr.messageRemote(SimpleCommand, int1=3, str2="hello")
expectedConsequence = None
self.assertEqual(self.messages,
[(sender, target, AMP_MESSAGE_TYPE,
Box(_command="SimpleCommand",
int1="3", str2="hello").serialize(),
expectedConsequence)])
def test_messageReceived(self):
"""
L{AMPReceiver.messageReceived} should dispatch commands to methods that
were appropriately decorated.
"""
amr = MyAMPReceiver()
questionBox = Box(_command=SimpleCommand.commandName,
int1="7", str2="test")
data = questionBox.serialize()
response = amr.messageReceived(
Value(AMP_MESSAGE_TYPE, data), None, None)
self.assertEqual(response.type, AMP_ANSWER_TYPE)
self.assertEqual(amr.commandArguments, [(7, "test")])
self.assertEqual(response.data, Box(int3="4").serialize())
def test_messageReceivedHandledError(self):
"""
L{AMPReceiver.messageReceived} should emit a responseType and
responseData of the appropriate type if the command in question has a
translation of the raised error.
"""
bug = ExpectedBuggyReceiver()
questionBox = Box(_command=TrivialCommand.commandName)
data = questionBox.serialize()
rar = self.assertRaises(RevertAndRespond, bug.messageReceived,
Value(AMP_MESSAGE_TYPE, data),
None, None)
self.assertEqual(rar.value.type, AMP_ANSWER_TYPE)
self.assertEqual(rar.value.data,
Box(_error_code="SIMPLE_ERROR",
_error_description="simple description")
.serialize())
def test_messageReceivedUnhandledError(self):
"""
L{AMPReceiver.messageReceived} should allow error not defined by its
command to be raised so that the normal L{ERROR_REMOTE_EXCEPTION}
behavior takes over.
"""
bug = UnexpectedBuggyReceiver()
questionBox = Box(_command=TrivialCommand.commandName)
data = questionBox.serialize()
self.assertRaises(RuntimeError, bug.messageReceived,
Value(AMP_MESSAGE_TYPE, data),
None, None)
def test_answerReceived(self):
"""
L{AMPReceiver.answerReceived} should dispatch answers to methods that
were appropriately decorated.
"""
originalMessageData = Box(_command=SimpleCommand.commandName).serialize()
amr = MyAMPReceiver()
answerBox = Box(int3="4")
data = answerBox.serialize()
amr.answerReceived(Value(AMP_ANSWER_TYPE, data),
Value(None, originalMessageData),
None, None)
self.assertEqual(amr.commandAnswers, [4])
def test_errorReceived(self):
"""
L{AMPReceiver.answerReceived} should dispatch answers that indicate an
AMP error to methods decorated by the L{errorMethod} decorator, not to
the L{answerMethod} decorator.
"""
originalMessageData = Box(_command=SimpleCommand.commandName).serialize()
amr = MyAMPReceiver()
data = Box(_error="SIMPLE_ERROR").serialize()
amr.answerReceived(Value(AMP_ANSWER_TYPE, data),
Value(None, originalMessageData),
None, None)
self.assertEqual(amr.commandAnswers, [])
amr.commandErrors.pop().trap(SimpleError)
self.assertEqual(amr.commandErrors, [])
def test_messageReceivedWrongType(self):
"""
An L{UnknownMessageType} should be raised when a message of the wrong
type is dispatched to an L{AMPReceiver}.
"""
amr = MyAMPReceiver()
questionBox = Box(_command=SimpleCommand.commandName,
int1="7", str2="test")
data = questionBox.serialize()
for badType in u'some.random.type', AMP_ANSWER_TYPE:
self.assertRaises(UnknownMessageType, amr.messageReceived,
Value(badType, data), None, None)
self.assertEqual(amr.commandArguments, [])
def test_messageReceivedBadData(self):
"""
A L{MalformedMessage} should be raised when a message that cannot be
interpreted as a single AMP box is received.
"""
amr = MyAMPReceiver()
for badData in ["", Box().serialize() + Box().serialize()]:
self.assertRaises(MalformedMessage, amr.messageReceived,
Value(AMP_MESSAGE_TYPE, badData), None, None)
def test_answerReceivedBadData(self):
"""
A L{MalformedMessage} should be raised when a message that cannot be
interpreted as a single AMP box is received.
"""
originalMessageData = Box(_command=SimpleCommand.commandName).serialize()
amr = MyAMPReceiver()
for badData in ["", Box().serialize() + Box().serialize()]:
self.assertRaises(MalformedMessage, amr.answerReceived,
Value(AMP_ANSWER_TYPE, badData),
Value(None, originalMessageData),
None, None)
def test_answerReceivedWrongType(self):
"""
An L{UnknownMessageType} exception should be raised when a answer of
the wrong type is dispatched to an L{AMPReceiver}.
"""
originalMessageData = Box(_command=SimpleCommand.commandName).serialize()
amr = MyAMPReceiver()
answerBox = Box(int3="4")
data = answerBox.serialize()
for badType in u'some.random.type', AMP_MESSAGE_TYPE:
self.assertRaises(UnknownMessageType, amr.answerReceived,
Value(badType, data),
Value(None, originalMessageData),
None, None)
self.assertEqual(amr.commandAnswers, [])
def test_messageReceivedSenderArgument(self):
"""
The special argument L{TargetArgument} should cause the L{sender}
argument to L{messageReceived} to be passed as an argument to the
responder method.
"""
amr = MyAMPReceiver()
shareident = Identifier(u'abc', u'def', u'ghi')
amr.messageReceived(
Value(AMP_MESSAGE_TYPE,
Box(_command=SenderCommand.commandName).serialize()),
shareident, None)
self.assertEqual([shareident], amr.senders)
def test_messageReceivedTargetArgument(self):
"""
The special argument L{TargetArgument} should cause the L{sender}
argument to L{messageReceived} to be passed as an argument to the
responder method.
"""
amr = MyAMPReceiver()
shareident = Identifier(u'abc', u'def', u'ghi')
amr.messageReceived(Value(AMP_MESSAGE_TYPE,
Box(_command=TargetCommand.commandName).serialize()),
None, shareident)
self.assertEqual([shareident], amr.targets)
def test_answerReceivedSenderArgument(self):
"""
The special argument L{SenderArgument} should cause the
L{originalSender} argument to L{answerReceived} to be passed as an
argument to the responder method.
"""
amr = MyAMPReceiver()
shareident = Identifier(u'abc', u'def', u'ghi')
amr.answerReceived(
Value(AMP_ANSWER_TYPE, Box().serialize()),
Value(None,
Box(_command=SenderCommand.commandName).serialize()),
shareident, None)
self.assertEqual([shareident], amr.senders)
def test_answerReceivedTargetArgument(self):
"""
The special argument L{TargetArgument} should cause the
L{originalTarget} argument to L{answerReceived} to be passed as an
argument to the responder method.
"""
amr = MyAMPReceiver()
shareident = Identifier(u'abc', u'def', u'ghi')
amr.answerReceived(
Value(AMP_ANSWER_TYPE, Box().serialize()),
Value(None,
Box(_command=TargetCommand.commandName).serialize()),
None,
shareident)
self.assertEqual([shareident], amr.targets)
class ExpositionTests(TestCase):
"""
Tests for exposing methods with the L{_AMPExposer.expose} decorator, and
retrieving them with the L{_AMPExposer.responderForName} lookup method.
"""
def setUp(self):
"""
Set up a local L{_AMPExposer} instance for testing.
"""
self.ampExposer = _AMPExposer("amp exposer for testing")
self.errorExposer = _AMPErrorExposer("lulz")
def test_exposeCommand(self):
"""
Exposing a method as a command object ought to make it accessible to
L{responderForName}, and add a matching C{command} attribute to that
result.
"""
class TestClass(object):
def __init__(self, x):
self.num = x
@self.ampExposer.expose(TrivialCommand)
def thunk(self):
return 'hi', self.num + 1
tc = TestClass(3)
callable = self.ampExposer.responderForName(tc, TrivialCommand.commandName)
self.assertEqual(callable(), ("hi", 4))
self.assertIdentical(callable.command, TrivialCommand)
def test_exposeError(self):
"""
A method exposed as an error handler for a particular type of error
should be able to be looked up by the combination of the command and
the error.
"""
class TestClass(object):
@self.errorExposer.expose(SimpleCommand, SimpleError)
def thunk(self):
raise SimpleError()
tc = TestClass()
thunk = self.errorExposer.errbackForName(tc, SimpleCommand.commandName, "SIMPLE_ERROR")
self.assertEqual(thunk.exception, SimpleError)
self.assertEqual(thunk.command, SimpleCommand)
self.assertRaises(SimpleError, thunk)
def test_exposeOnTwoTypes(self):
"""
An integration test with L{epsilon.expose}; sanity checking to make
sure that exposing different methods on different classes for the same
command name yields the same results.
"""
class TestClass(object):
@self.ampExposer.expose(TrivialCommand)
def thunk(self):
return 1
class TestClass2:
@self.ampExposer.expose(TrivialCommand)
def funk(self):
return 2
tc2 = TestClass2()
callable = self.ampExposer.responderForName(tc2, TrivialCommand.commandName)
self.assertEqual(callable(), 2)
tc = TestClass()
callable = self.ampExposer.responderForName(tc, TrivialCommand.commandName)
self.assertEqual(callable(), 1)
```
#### File: xmantissa/test/test_port.py
```python
import sys
import os
from StringIO import StringIO
from zope.interface import implements
from zope.interface.verify import verifyObject
from twisted.trial.unittest import TestCase
from twisted.python.filepath import FilePath
from twisted.application.service import IService, IServiceCollection
from twisted.internet.protocol import ServerFactory
from twisted.internet.defer import Deferred
from twisted.internet.ssl import CertificateOptions
from axiom.iaxiom import IAxiomaticCommand
from axiom.store import Store
from axiom.item import Item
from axiom.attributes import inmemory, integer
from axiom.dependency import installOn
from axiom.scripts.axiomatic import Options as AxiomaticOptions
from axiom.test.util import CommandStub
from xmantissa.ixmantissa import IProtocolFactoryFactory
from xmantissa.port import TCPPort, SSLPort, StringEndpointPort
from xmantissa.port import PortConfiguration
CERTIFICATE_DATA = """
-----BEGIN CERTIFICATE-----
<KEY>END CERTIFICATE-----
"""
PRIVATEKEY_DATA = """
-----<KEY>
"""
class DummyPort(object):
"""
Stub class used to track what reactor listen calls have been made and what
created ports have been stopped.
"""
stopping = None
def __init__(self, portNumber, factory, contextFactory=None, interface=''):
self.portNumber = portNumber
self.factory = factory
self.contextFactory = contextFactory
self.interface = interface
def stopListening(self):
assert self.stopping is None
self.stopping = Deferred()
return self.stopping
class DummyFactory(Item):
"""
Helper class used as a stand-in for a real protocol factory by the unit
tests.
"""
implements(IProtocolFactoryFactory)
dummyAttribute = integer(doc="""
Meaningless attribute which serves only to make this a valid Item subclass.
""")
realFactory = inmemory(doc="""
A reference to the protocol factory which L{getFactory} will return.
""")
def getFactory(self):
return self.realFactory
class PortTestsMixin:
"""
Test method-defining mixin class for port types with C{portNumber} and
C{factory} attributes.
Included are tests for various persistence-related behaviors as well as the
L{IService} implementation which all ports should have.
@ivar portType: The L{Item} subclass which will be tested.
@ivar lowPortNumber: A port number which requires privileges to bind on
POSIX. Used to test L{privilegedStartService}.
@ivar highPortNumber: A port number which does not require privileges to
bind on POSIX. Used to test the interaction between
L{privilegedStartService} and L{startService}.
@ivar dbdir: The path at which to create the test L{Store}. This must be
bound before L{setUp} is run, since that is the only method which examines
its value.
@ivar ports: A list of ports which have been bound using L{listen}.
created in L{setUp}.
"""
portType = None
lowPortNumber = 123
highPortNumber = 1234
someInterface = u'127.0.0.1'
def port(self, **kw):
"""
Create and return a new port instance with the given attribute values.
"""
return self.portType(**kw)
def listen(self, *a, **kw):
"""
Pretend to bind a port. Used as a stub implementation of a reactor
listen method. Subclasses should override and implement to append
useful information to C{self.ports}.
"""
raise NotImplementedError()
def checkPort(self, port, alternatePort=None):
"""
Assert that the given port has been properly created.
@type port: L{DummyPort}
@param port: A port which has been created by the code being tested.
@type alternatePort: C{int}
@param alternatePort: If not C{None}, the port number on which C{port}
should be listening.
"""
raise NotImplementedError()
def setUp(self):
self.filesdir = self.mktemp()
self.store = Store(filesdir=self.filesdir)
self.realFactory = ServerFactory()
self.factory = DummyFactory(store=self.store, realFactory=self.realFactory)
self.ports = []
def test_portNumberAttribute(self):
"""
Test that C{self.portType} remembers the port number it is told to
listen on.
"""
port = self.port(store=self.store, portNumber=self.lowPortNumber)
self.assertEqual(port.portNumber, self.lowPortNumber)
def test_interfaceAttribute(self):
"""
Test that C{self.portType} remembers the interface it is told to listen
on.
"""
port = self.port(store=self.store, interface=self.someInterface)
self.assertEqual(port.interface, self.someInterface)
def test_factoryAttribute(self):
"""
Test that C{self.portType} remembers the factory it is given to associate
with its port.
"""
port = self.port(store=self.store, factory=self.factory)
self.assertIdentical(port.factory, self.factory)
def test_service(self):
"""
Test that C{self.portType} becomes a service on the store it is installed on.
"""
port = self.port(store=self.store)
installOn(port, self.store)
self.assertEqual(
list(self.store.powerupsFor(IService)),
[port])
def test_setServiceParent(self):
"""
Test that the C{self.portType.setServiceParent} method adds the C{self.portType} to
the Axiom Store Service as a child.
"""
port = self.port(store=self.store)
port.setServiceParent(self.store)
self.failUnlessIn(port, list(IService(self.store)))
def test_disownServiceParent(self):
"""
Test that the C{self.portType.disownServiceParent} method removes the
C{self.portType} from the Axiom Store Service.
"""
port = self.port(store=self.store)
port.setServiceParent(self.store)
port.disownServiceParent()
self.failIfIn(port, list(IService(self.store)))
def test_serviceParent(self):
"""
Test that C{self.portType} is a child of the store service after it is
installed.
"""
port = self.port(store=self.store)
installOn(port, self.store)
service = IServiceCollection(self.store)
self.failUnlessIn(port, list(service))
def _start(self, portNumber, methodName):
port = self.port(store=self.store, portNumber=portNumber, factory=self.factory)
port._listen = self.listen
getattr(port, methodName)()
return self.ports
def _privilegedStartService(self, portNumber):
return self._start(portNumber, 'privilegedStartService')
def _startService(self, portNumber):
return self._start(portNumber, 'startService')
def test_startPrivilegedService(self):
"""
Test that C{self.portType} binds a low-numbered port with the reactor when it
is started with privilege.
"""
ports = self._privilegedStartService(self.lowPortNumber)
self.assertEqual(len(ports), 1)
self.checkPort(ports[0])
def test_dontStartPrivilegedService(self):
"""
Test that C{self.portType} doesn't bind a high-numbered port with the
reactor when it is started with privilege.
"""
ports = self._privilegedStartService(self.highPortNumber)
self.assertEqual(ports, [])
def test_startServiceLow(self):
"""
Test that C{self.portType} binds a low-numbered port with the reactor
when it is started without privilege.
"""
ports = self._startService(self.lowPortNumber)
self.assertEqual(len(ports), 1)
self.checkPort(ports[0])
def test_startServiceHigh(self):
"""
Test that C{self.portType} binds a high-numbered port with the reactor
when it is started without privilege.
"""
ports = self._startService(self.highPortNumber)
self.assertEqual(len(ports), 1)
self.checkPort(ports[0], self.highPortNumber)
def test_startServiceNoInterface(self):
"""
Test that C{self.portType} binds to all interfaces if no interface is
explicitly specified.
"""
port = self.port(store=self.store, portNumber=self.highPortNumber, factory=self.factory)
port._listen = self.listen
port.startService()
self.assertEqual(self.ports[0].interface, '')
def test_startServiceInterface(self):
"""
Test that C{self.portType} binds to only the specified interface when
instructed to.
"""
port = self.port(store=self.store, portNumber=self.highPortNumber, factory=self.factory, interface=self.someInterface)
port._listen = self.listen
port.startService()
self.assertEqual(self.ports[0].interface, self.someInterface)
def test_startedOnce(self):
"""
Test that C{self.portType} only binds one network port when
C{privilegedStartService} and C{startService} are both called.
"""
port = self.port(store=self.store, portNumber=self.lowPortNumber, factory=self.factory)
port._listen = self.listen
port.privilegedStartService()
self.assertEqual(len(self.ports), 1)
self.checkPort(self.ports[0])
port.startService()
self.assertEqual(len(self.ports), 1)
def test_stopService(self):
"""
Test that C{self.portType} cleans up its listening port when it is stopped.
"""
port = self.port(store=self.store, portNumber=self.lowPortNumber, factory=self.factory)
port._listen = self.listen
port.startService()
stopped = port.stopService()
stopping = self.ports[0].stopping
self.failIfIdentical(stopping, None)
self.assertIdentical(stopped, stopping)
def test_deletedFactory(self):
"""
Test that the deletion of a C{self.portType}'s factory item results in the
C{self.portType} being deleted.
"""
port = self.port(store=self.store, portNumber=self.lowPortNumber, factory=self.factory)
self.factory.deleteFromStore()
self.assertEqual(list(self.store.query(self.portType)), [])
def test_deletionDisownsParent(self):
"""
Test that a deleted C{self.portType} no longer shows up in the children list
of the service which used to be its parent.
"""
port = self.port(store=self.store, portNumber=self.lowPortNumber, factory=self.factory)
port.setServiceParent(self.store)
port.deleteFromStore()
service = IServiceCollection(self.store)
self.failIfIn(port, list(service))
class TCPPortTests(PortTestsMixin, TestCase):
"""
Tests for L{xmantissa.port.TCPPort}.
"""
portType = TCPPort
def checkPort(self, port, alternatePort=None):
if alternatePort is None:
alternatePort = self.lowPortNumber
self.assertEqual(port.portNumber, alternatePort)
self.assertEqual(port.factory, self.realFactory)
def listen(self, port, factory, interface=''):
self.ports.append(DummyPort(port, factory, interface=interface))
return self.ports[-1]
class SSLPortTests(PortTestsMixin, TestCase):
"""
Tests for L{xmantissa.port.SSLPort}.
"""
portType = SSLPort
def checkPort(self, port, alternatePort=None):
if alternatePort is None:
alternatePort = self.lowPortNumber
self.assertEqual(port.portNumber, alternatePort)
self.assertEqual(port.factory, self.realFactory)
self.failUnless(isinstance(port.contextFactory, CertificateOptions))
def port(self, certificatePath=None, **kw):
if certificatePath is None:
certificatePath = self.store.newFilePath('certificate.pem')
assert not certificatePath.exists()
certificatePath.setContent(CERTIFICATE_DATA + PRIVATEKEY_DATA)
return self.portType(certificatePath=certificatePath, **kw)
def listen(self, port, factory, contextFactory, interface=''):
self.ports.append(DummyPort(port, factory, contextFactory, interface=interface))
return self.ports[-1]
def test_certificatePathAttribute(self):
"""
Test that L{SSLPort} remembers the certificate filename it is given.
"""
certificatePath = self.store.newFilePath('foo', 'bar')
port = self.port(store=self.store, certificatePath=certificatePath)
self.assertEqual(port.certificatePath, certificatePath)
class _FakeService(object):
"""
Fake L{twisted.application.service.IService} implementation for testing
L{xmantissa.port.StringEndpointPort}'s wrapping behaviour.
"""
def __init__(self, description, factory):
self.description = description
self.factory = factory
self.privilegedStarted = False
self.started = False
self.stopped = False
def privilegedStartService(self):
self.privilegedStarted = True
def startService(self):
self.started = True
def stopService(self):
self.stopped = True
class StringEndpointPortTests(TestCase):
"""
Tests for L{xmantissa.port.StringEndpointPort}.
"""
def _fakeService(self, description, factory):
"""
A fake for L{twisted.application.strports.service} that just constructs
our fake service object.
"""
self._service = _FakeService(description, factory)
return self._service
def port(self, **kw):
store = Store()
factory = DummyFactory(store=store)
factory.realFactory = ServerFactory()
port = StringEndpointPort(store=store, factory=factory, **kw)
port._endpointService = self._fakeService
return port
def test_startService(self):
"""
The underlying endpoint service is started when
L{xmantissa.port.StringEndpointPort} is started.
"""
port = self.port(description=u'foo')
port.privilegedStartService()
self.assertTrue(self._service.privilegedStarted)
port.startService()
self.assertTrue(self._service.started)
def test_description(self):
"""
The underlying endpoint service is created with the description
specified by the L{xmantissa.port.StringEndpointPort}.
"""
port = self.port(description=u'foo')
port.startService()
self.assertEqual(u'foo', self._service.description)
def test_factory(self):
"""
The underlying endpoint service is created with the factory specified
by invoking C{getFactory()} on the C{factory} attribute.
"""
port = self.port(description=u'foo')
port.startService()
self.assertIdentical(self._service.factory, port.factory.realFactory)
def test_stopService(self):
"""
The underlying endpoint service is stopped when
L{xmantissa.port.StringEndpointPort} is stopped.
"""
port = self.port(description=u'foo')
port.startService()
port.stopService()
self.assertTrue(self._service.stopped)
class PortConfigurationCommandTests(TestCase):
"""
Tests for the I{axiomatic port} command.
"""
def setUp(self):
"""
Override C{sys.stdout} to capture anything written by the port
subcommand.
"""
self.oldColumns = os.environ.get('COLUMNS')
os.environ['COLUMNS'] = '80'
self.stdout = sys.stdout
sys.stdout = StringIO()
def tearDown(self):
"""
Restore the original value of C{sys.stdout}.
"""
sys.stdout = self.stdout
if self.oldColumns is not None:
os.environ['COLUMNS'] = self.oldColumns
def _makeConfig(self, store):
"""
Create a L{PortConfiguration} instance with a properly set C{parent}
attribute.
"""
config = PortConfiguration()
config.parent = CommandStub(store, "port")
return config
def assertSuccessStatus(self, options, arguments):
"""
Parse the given arguments with the given options object and assert that
L{SystemExit} is raised with an exit code of C{0}.
"""
self.assertFailStatus(0, options, arguments)
def assertFailStatus(self, code, options, arguments):
"""
Parse the given arguments with the given options object and assert that
L{SystemExit} is raised with the specified exit code.
"""
exc = self.assertRaises(SystemExit, options.parseOptions, arguments)
self.assertEqual(exc.args, (code,))
def assertSpacelessEqual(self, first, second):
"""
Assert the equality of two strings without respect to their whitespace
or order.
"""
self.assertEqual(set(first.split()), set(second.split()))
def test_providesCommandInterface(self):
"""
L{PortConfiguration} provides L{IAxiomaticCommand}.
"""
verifyObject(IAxiomaticCommand, PortConfiguration)
def test_axiomaticSubcommand(self):
"""
L{PortConfiguration} is available as a subcommand of I{axiomatic}.
"""
subCommands = AxiomaticOptions().subCommands
[options] = [cmd[2] for cmd in subCommands if cmd[0] == 'port']
self.assertIdentical(options, PortConfiguration)
_portHelpText = (
# This is a bit unfortunate. It depends on what Options in Twisted
# decides to spit out. Note particularly the seemingly random amount
# of trailing whitespace included on some lines. The intent of tests
# using this isn't really to ensure byte-identical results, but simply
# to verify that help text is going to be shown to a user. -exarkun
"Usage: axiomatic [options] port [options]\n"
"Options:\n"
" --version Display Twisted version and exit.\n"
" --help Display this help and exit.\n"
"\n"
"This command allows for the inspection and modification of the "
"configuration of\n"
"network services in an Axiom store.\n"
"Commands:\n"
" list Show existing ports and factories.\n"
" delete Delete existing ports.\n"
" create Create new ports.\n"
"\n")
def test_implicitPortHelp(self):
"""
When I{axiomatic port} is invoked with no arguments, usage information
is written to standard out and the process exits successfully.
"""
self.assertSuccessStatus(self._makeConfig(None), [])
self.assertSpacelessEqual(self._portHelpText, sys.stdout.getvalue())
def test_explicitPortHelp(self):
"""
When I{axiomatic port} is invoked with I{--help}, usage information is
written to standard out.
"""
self.assertSuccessStatus(self._makeConfig(None), ["--help"])
self.assertSpacelessEqual(self._portHelpText, sys.stdout.getvalue())
_listHelpText = (
"Usage: axiomatic [options] port [options] list [options]\n"
"Options:\n"
" --version Display Twisted version and exit.\n"
" --help Display this help and exit.\n"
"\n"
"Show the port/factory bindings in an Axiom store.\n"
"\n")
def test_explicitListHelp(self):
"""
When I{axiomatic port list} is invoked with I{--help}, usage
information for the C{list} subcommand is written to standard out.
"""
self.assertSuccessStatus(self._makeConfig(None), ["list", "--help"])
self.assertSpacelessEqual(self._listHelpText, sys.stdout.getvalue())
def test_listEmpty(self):
"""
When I{axiomatic port list} is invoked, the ports which are currently
configured in the system are displayed.
"""
store = Store()
self.assertSuccessStatus(self._makeConfig(store), ["list"])
self.assertIn("There are no ports configured.", sys.stdout.getvalue())
def test_listTCPPort(self):
"""
When I{axiomatic port list} is invoked for a L{Store} which has a
L{TCPPort} in it, the details of that port, including its factory, are
written to stdout.
"""
store = Store()
factory = DummyFactory(store=store)
port = TCPPort(
store=store, factory=factory, portNumber=1234, interface=u"foo")
self.assertSuccessStatus(self._makeConfig(store), ["list"])
self.assertEqual(
"%d) %r listening on:\n" % (factory.storeID, factory) +
" %d) TCP, interface %s, port %d\n" % (
port.storeID, port.interface, port.portNumber),
sys.stdout.getvalue())
def test_listSSLPort(self):
"""
When I{axiomatic port list} is invoked for a L{Store} which has an
L{SSLPort} in it, the details of that port, including its factory, are
written to stdout.
"""
store = Store(filesdir=self.mktemp())
factory = DummyFactory(store=store)
port = SSLPort(
store=store, factory=factory, portNumber=1234, interface=u"foo",
certificatePath=store.filesdir.child("bar"))
self.assertSuccessStatus(self._makeConfig(store), ["list"])
self.assertEqual(
"%d) %r listening on:\n" % (factory.storeID, factory) +
" %d) SSL, interface %s, port %d, certificate %s\n" % (
port.storeID, port.interface, port.portNumber,
port.certificatePath.path),
sys.stdout.getvalue())
def test_listStringEndpointPort(self):
"""
When I{axiomatic port list} is invoked for a L{Store} which has an
L{StringEndpointPort} in it, the endpoint description and factory are
written to stdout.
"""
store = Store()
factory = DummyFactory(store=store)
port = StringEndpointPort(
store=store, factory=factory, description=u'tcp:1234')
self.assertSuccessStatus(self._makeConfig(store), ["list"])
self.assertEqual(
"{:d}) {!r} listening on:\n".format(factory.storeID, factory) +
" {:d}) Endpoint {!r}\n".format(port.storeID, port.description),
sys.stdout.getvalue())
def test_listAnyInterface(self):
"""
I{axiomatic port list} displays a special string for a port bound to
C{INADDR_ANY}.
"""
store = Store()
factory = DummyFactory(store=store)
port = TCPPort(
store=store, factory=factory, portNumber=1234, interface=u"")
self.assertSuccessStatus(self._makeConfig(store), ["list"])
self.assertEqual(
"%d) %r listening on:\n" % (factory.storeID, factory) +
" %d) TCP, any interface, port %d\n" % (port.storeID, port.portNumber),
sys.stdout.getvalue())
def test_listSSLPortWithoutAttributes(self):
"""
If there is an L{SSLPort} with no certificate or no port number (a
rather invalid configuration), I{axiomatic port list} should show this
in its output without producing an error.
"""
store = Store()
factory = DummyFactory(store=store)
port = SSLPort(store=store, factory=factory)
self.assertSuccessStatus(self._makeConfig(store), ["list"])
self.assertEqual(
"%d) %r listening on:\n" % (factory.storeID, factory) +
" %d) SSL, any interface, NO PORT, NO CERTIFICATE\n" % (
port.storeID,),
sys.stdout.getvalue())
def test_listTwoPorts(self):
"""
I{axiomatic port list} displays two different ports bound to the same
factory together beneath that factory.
"""
store = Store()
factory = DummyFactory(store=store)
portOne = TCPPort(
store=store, factory=factory, portNumber=1234, interface=u"foo")
portTwo = TCPPort(
store=store, factory=factory, portNumber=2345, interface=u"bar")
self.assertSuccessStatus(self._makeConfig(store), ["list"])
self.assertEqual(
"%d) %r listening on:\n" % (factory.storeID, factory) +
" %d) TCP, interface %s, port %d\n" % (
portOne.storeID, portOne.interface, portOne.portNumber) +
" %d) TCP, interface %s, port %d\n" % (
portTwo.storeID, portTwo.interface, portTwo.portNumber),
sys.stdout.getvalue())
def test_listTwoFactories(self):
"""
I{axiomatic port list} displays two different factories separately from
each other.
"""
store = Store()
factoryOne = DummyFactory(store=store)
factoryTwo = DummyFactory(store=store)
portOne = TCPPort(
store=store, factory=factoryOne, portNumber=10, interface=u"foo")
portTwo = TCPPort(
store=store, factory=factoryTwo, portNumber=20, interface=u"bar")
self.assertSuccessStatus(self._makeConfig(store), ["list"])
self.assertEqual(
"%d) %r listening on:\n" % (factoryOne.storeID, factoryOne) +
" %d) TCP, interface %s, port %d\n" % (
portOne.storeID, portOne.interface, portOne.portNumber) +
"%d) %r listening on:\n" % (factoryTwo.storeID, factoryTwo) +
" %d) TCP, interface %s, port %d\n" % (
portTwo.storeID, portTwo.interface, portTwo.portNumber),
sys.stdout.getvalue())
def test_listUnlisteningFactory(self):
"""
I{axiomatic port list} displays factories even if they aren't associate
with any port.
"""
store = Store()
factory = DummyFactory(store=store)
store.powerUp(factory, IProtocolFactoryFactory)
self.assertSuccessStatus(self._makeConfig(store), ["list"])
self.assertEqual(
"%d) %r is not listening.\n" % (factory.storeID, factory),
sys.stdout.getvalue())
_deleteHelpText = (
"Usage: axiomatic [options] port [options] delete [options]\n"
"Options:\n"
" --port-identifier= Identify a port for deletion.\n"
" --version Display Twisted version and exit.\n"
" --help Display this help and exit.\n"
"\n"
"Delete an existing port binding from a factory. If a server is "
"currently running using the database from which the port is deleted, "
"the factory will *not* stop listening on that port until the server "
"is restarted.")
def test_explicitDeleteHelp(self):
"""
If I{axiomatic port delete} is invoked with I{--help}, usage
information for the C{delete} subcommand is written to standard out.
"""
store = Store()
self.assertSuccessStatus(self._makeConfig(store), ["delete", "--help"])
self.assertSpacelessEqual(self._deleteHelpText, sys.stdout.getvalue())
def test_implicitDeleteHelp(self):
"""
If I{axiomatic port delete} is invoked with no arguments, usage
information for the C{delete} subcommand is written to standard out.
"""
store = Store()
self.assertSuccessStatus(self._makeConfig(store), ["delete"])
self.assertSpacelessEqual(self._deleteHelpText, sys.stdout.getvalue())
def test_deletePorts(self):
"""
I{axiomatic port delete} deletes each ports with a C{storeID} which is
specified.
"""
store = Store(filesdir=self.mktemp())
factory = DummyFactory(store=store)
deleteTCP = TCPPort(
store=store, factory=factory, portNumber=10, interface=u"foo")
keepTCP = TCPPort(
store=store, factory=factory, portNumber=10, interface=u"bar")
deleteSSL = SSLPort(
store=store, factory=factory, portNumber=10, interface=u"baz",
certificatePath=store.filesdir.child("baz"))
keepSSL = SSLPort(
store=store, factory=factory, portNumber=10, interface=u"quux",
certificatePath=store.filesdir.child("quux"))
deleteEndpoint = StringEndpointPort(
store=store, factory=factory, description=u'tcp:1234')
keepEndpoint = StringEndpointPort(
store=store, factory=factory, description=u'tcp:1235')
self.assertSuccessStatus(
self._makeConfig(store),
["delete",
"--port-identifier", str(deleteTCP.storeID),
"--port-identifier", str(deleteSSL.storeID),
"--port-identifier", str(deleteEndpoint.storeID)])
self.assertEqual("Deleted.\n", sys.stdout.getvalue())
self.assertEqual(list(store.query(TCPPort)), [keepTCP])
self.assertEqual(list(store.query(SSLPort)), [keepSSL])
self.assertEqual(list(store.query(StringEndpointPort)), [keepEndpoint])
def test_cannotDeleteOtherStuff(self):
"""
I{axiomatic port delete} will not delete something which is neither a
L{TCPPort} nor an L{SSLPort} and does not delete anything if an invalid
port identifier is present in the command.
"""
store = Store()
factory = DummyFactory(store=store)
tcp = TCPPort(
store=store, factory=factory, interface=u"foo", portNumber=1234)
self.assertFailStatus(
1,
self._makeConfig(store),
["delete",
"--port-identifier", str(tcp.storeID),
"--port-identifier", str(factory.storeID)])
self.assertEqual(
"%d does not identify a port.\n" % (factory.storeID,),
sys.stdout.getvalue())
self.assertEqual(list(store.query(DummyFactory)), [factory])
self.assertEqual(list(store.query(TCPPort)), [tcp])
def test_cannotDeleteNonExistent(self):
"""
I{axiomatic port delete} writes a short error to standard output when a
port-identifier is specified for which there is no corresponding store
ID.
"""
store = Store()
self.assertFailStatus(
1, self._makeConfig(store),
["delete", "--port-identifier", "12345"])
self.assertEqual(
"12345 does not identify an item.\n",
sys.stdout.getvalue())
_createHelpText = (
"Usage: axiomatic [options] port [options] create [options]\n"
"Options:\n"
" --strport= A Twisted strports description of a "
"port to add.\n"
" --factory-identifier= Identifier for a protocol factory to "
"associate with\n"
" the new port.\n"
" --version Display Twisted version and exit.\n"
" --help Display this help and exit.\n"
"\n"
"Create a new port binding for an existing factory. If a server is "
"currently\n"
"running using the database in which the port is created, the "
"factory will *not*\n"
"be started on that port until the server is restarted.\n"
"\n")
def test_createImplicitHelp(self):
"""
If I{axiomatic port create} is invoked with no arguments, usage
information for the C{create} subcommand is written to standard out.
"""
self.assertSuccessStatus(self._makeConfig(None), ["create"])
self.assertSpacelessEqual(self._createHelpText, sys.stdout.getvalue())
def test_createExplicitHelp(self):
"""
If I{axiomatic port create} is invoked with C{--help} as an argument,
usage information for the C{add} subcommand is written to standard out.
"""
self.assertSuccessStatus(self._makeConfig(None), ["create", "--help"])
self.assertSpacelessEqual(self._createHelpText, sys.stdout.getvalue())
def test_createInvalidPortDescription(self):
"""
If an invalid string is given for the C{strport} option of I{axiomatic
port create}, a short error is written to standard output.
"""
store = Store()
factory = DummyFactory(store=store)
self.assertFailStatus(
1, self._makeConfig(store),
["create", "--strport", "xyz",
"--factory-identifier", str(factory.storeID)])
self.assertEqual(
"'xyz' is not a valid port description.\n", sys.stdout.getvalue())
def test_createNonExistentFactoryIdentifier(self):
"""
If a storeID which is not associated with any item is given for the
C{factory-identifier} option of I{axiomatic port create}, a short error
is written to standard output.
"""
store = Store()
self.assertFailStatus(
1, self._makeConfig(store),
["create", "--strport", "tcp:8080",
"--factory-identifier", "123"])
self.assertEqual(
"123 does not identify an item.\n", sys.stdout.getvalue())
def test_createNonFactoryIdentifier(self):
"""
If a storeID which is associated with an item which does not provide
L{IProtocolFactoryFactory} is given for the C{factory-identifier}
option of I{axiomatic port create}, a short error is written to
standard output.
"""
store = Store()
storeID = TCPPort(store=store).storeID
self.assertFailStatus(
1, self._makeConfig(store),
["create", "--strport", "tcp:8080",
"--factory-identifier", str(storeID)])
self.assertEqual(
"%d does not identify a factory.\n" % (storeID,),
sys.stdout.getvalue())
def test_createPort(self):
"""
I{axiomatic port create} creates a new
L{xmantissa.port.StringEndpointPort} with the specified description,
referring to the specified factory. The port is also powered up on the
store for L{IService}.
"""
store = Store()
factory = DummyFactory(store=store)
self.assertSuccessStatus(
self._makeConfig(store),
["create", "--strport", "tcp:8080",
"--factory-identifier", str(factory.storeID)])
self.assertEqual("Created.\n", sys.stdout.getvalue())
[port] = list(store.query(StringEndpointPort))
self.assertEqual(u'tcp:8080', port.description)
self.assertEqual(list(store.interfacesFor(port)), [IService])
```
#### File: xmantissa/test/test_q2q.py
```python
from twisted.trial import unittest
from axiom import store
from xmantissa import ixmantissa, endpoint
class MantissaQ2Q(unittest.TestCase):
def testInstallation(self):
d = self.mktemp()
s = store.Store(unicode(d))
q = endpoint.UniversalEndpointService(store=s)
q.installOn(s)
self.assertIdentical(ixmantissa.IQ2QService(s), q)
```
#### File: xmantissa/test/test_siteroot.py
```python
from twisted.trial import unittest
from nevow.testutil import FakeRequest
from axiom.store import Store
from axiom.item import Item
from axiom.attributes import text
from axiom.dependency import installOn
from xmantissa.website import PrefixURLMixin, WebSite
from xmantissa.ixmantissa import ISiteRootPlugin
from zope.interface import implements
class Dummy:
def __init__(self, pfx):
self.pfx = pfx
class PrefixTester(Item, PrefixURLMixin):
implements(ISiteRootPlugin)
sessioned = True
typeName = 'test_prefix_widget'
schemaVersion = 1
prefixURL = text()
def createResource(self):
return Dummy(self.prefixURL)
def installSite(self):
"""
Not using the dependency system for this class because multiple
instances can be installed.
"""
for iface, priority in self.__getPowerupInterfaces__([]):
self.store.powerUp(self, iface, priority)
class SiteRootTest(unittest.TestCase):
def test_prefixPriorityMath(self):
"""
L{WebSite.locateChild} returns the most specific L{ISiteRootPlugin}
based on I{prefixURL} and the request path segments.
"""
store = Store()
PrefixTester(store=store, prefixURL=u"hello").installSite()
PrefixTester(store=store, prefixURL=u"").installSite()
website = WebSite(store=store)
installOn(website, store)
res, segs = website.locateChild(FakeRequest(), ('hello',))
self.assertEquals(res.pfx, 'hello')
self.assertEquals(segs, ())
res, segs = website.locateChild(FakeRequest(), ('',))
self.assertEquals(res.pfx, '')
self.assertEquals(segs, ('',))
```
#### File: xmantissa/test/test_terminal.py
```python
from zope.interface import implements
from zope.interface.verify import verifyObject
from twisted.internet.protocol import ProcessProtocol
from twisted.trial.unittest import TestCase
from twisted.cred.credentials import UsernamePassword
from twisted.conch.interfaces import IConchUser, ISession
from twisted.conch.ssh.keys import Key
from twisted.conch.ssh.session import SSHSession
from twisted.conch.insults.helper import TerminalBuffer
from twisted.conch.insults.insults import ServerProtocol
from twisted.conch.manhole import ColoredManhole
from axiom.store import Store
from axiom.item import Item
from axiom.attributes import text, inmemory
from axiom.dependency import installOn
from axiom.userbase import LoginSystem, LoginMethod
from xmantissa.ixmantissa import IProtocolFactoryFactory, ITerminalServerFactory
from xmantissa.ixmantissa import IViewer
from xmantissa.sharing import getSelfRole
from xmantissa.terminal import SecureShellConfiguration, TerminalManhole
from xmantissa.terminal import ShellAccount, ShellServer, _ReturnToMenuWrapper
from xmantissa.terminal import _AuthenticatedShellViewer
class SecureShellConfigurationTests(TestCase):
"""
Tests for L{xmantissa.shell.SecureShellConfiguration} which defines how to
create an SSH server.
"""
_hostKey = (
"-----BEGIN RSA PRIVATE KEY-----\n"
"<KEY>"
"<KEY>"
"FN+Gl2ZFLFB3dwIBIwJgC+DFa4b4It+lv2Wllaquqf4m1G7iYzSxxCzm+JzLw5lN\n"
"bmsM0rX+Yk7bx3LcM6m34vyvhY6p/kQyjHo7/CkpaSQg4bnpOcqEq3oMf8E0c0lp\n"
"TQ1TdtfnKKrZZPTaVr7rAjEA7O19/tSLK6by1BpE1cb6W07GK1WcafYLxQLT64o+\n"
"GKxbrlsossc8gWJ8GDRjE2S5AjEA4JkYfYkgfucH941r9yDFrhr6FuOdwbLXDESZ\n"
"DyLhW/7DHiVIXlaLFnY+51PcTwWvAjBzFESDFsdBFpMz0j6w+j8WaBccXMhQuVYs\n"
"fbdjxs20NnWsdWuKCQAhljxGRVSxpfMCMBmrGL3jyTMTFtp2j/78bl0KZbl5GVf3\n"
"LoUPJ29xs1r4i1PnAPTWsM9d+I93TGDNcwIxAMRz4KO02tiLXG2igwDw/WWszrkr\n"
"r4ggaFDlt4QqoNz0l4tayqzbDV1XceLgP4cXcQ==\n"
"-----END RSA PRIVATE KEY-----\n")
def setUp(self):
"""
Create an in-memory L{Store} with a L{SecureShellConfiguration} in it.
"""
self.store = Store()
self.shell = SecureShellConfiguration(
store=self.store, hostKey=self._hostKey)
installOn(self.shell, self.store)
def test_interfaces(self):
"""
L{SecureShellConfiguration} implements L{IProtocolFactoryFactory}.
"""
self.assertTrue(verifyObject(IProtocolFactoryFactory, self.shell))
def test_powerup(self):
"""
L{installOn} powers up the target for L{IProtocolFactoryFactory} with
L{SecureShellConfiguration}.
"""
self.assertIn(
self.shell, list(self.store.powerupsFor(IProtocolFactoryFactory)))
def test_repr(self):
"""
The result of C{repr} on a L{SecureShellConfiguration} instance
includes only a fingerprint of the private key, not the entire value.
"""
self.assertEqual(
repr(self.shell),
"SecureShellConfiguration(storeID=%d, " % (self.shell.storeID,) +
"hostKeyFingerprint='<KEY>')")
def assertHostKey(self, shell, factory):
"""
Assert that the public and private keys provided by C{factory}
match those specified by C{shell} and that they are L{Key}
instances.
"""
privateKey = Key.fromString(shell.hostKey)
self.assertEqual(
factory.publicKeys, {'ssh-rsa': privateKey.public()})
self.assertEqual(factory.privateKeys, {'ssh-rsa': privateKey})
def test_getFactory(self):
"""
L{SecureShellConfiguration.getFactory} returns an L{SSHFactory} with
keys from L{SecureShellConfiguration.hostKey}.
"""
factory = self.shell.getFactory()
self.assertHostKey(self.shell, factory)
def test_keyGeneration(self):
"""
L{SecureShellConfiguration} generates its own key pair if one is not
supplied to C{__init__}.
"""
store = Store()
shell = SecureShellConfiguration(store=store)
installOn(shell, store)
factory = shell.getFactory()
self.assertHostKey(shell, factory)
def test_keyRotation(self):
"""
L{SecureShellConfiguration.rotate} generates a new key pair replacing
the old one.
"""
oldKey = self.shell.hostKey
self.shell.rotate()
newKey = self.shell.hostKey
self.assertNotEqual(oldKey, newKey)
factory = self.shell.getFactory()
self.assertHostKey(self.shell, factory)
def test_portal(self):
"""
The factory returned by L{SecureShellConfiguration.getFactory} has a
C{portal} attribute which allows logins authenticated in the usual
L{axiom.userbase} manner.
"""
localpart = u'foo bar'
domain = u'example.com'
password = u'<PASSWORD>'
loginSystem = self.store.findUnique(LoginSystem)
account = loginSystem.addAccount(
localpart, domain, password, internal=True)
subStore = account.avatars.open()
avatar = object()
subStore.inMemoryPowerUp(avatar, IConchUser)
factory = self.shell.getFactory()
login = factory.portal.login(
UsernamePassword(
'%s@%s' % (localpart.encode('ascii'), domain.encode('ascii')),
password),
None, IConchUser)
def cbLoggedIn(result):
self.assertIdentical(IConchUser, result[0])
self.assertIdentical(avatar, result[1])
login.addCallback(cbLoggedIn)
return login
class AuthenticatedShellViewerTests(TestCase):
"""
Tests for L{_AuthenticatedShellViewer}, an L{IViewer} implementation for
use with L{ITerminalServerFactory.buildTerminalProtocol}.
"""
def test_interface(self):
"""
L{_AuthenticatedShellViewer} instances provide L{IViewer}.
"""
self.assertTrue(verifyObject(IViewer, _AuthenticatedShellViewer([])))
def test_roleIn(self):
"""
L{_AuthenticatedShellViewer.roleIn} returns a L{Role} for one of the
account names passed to L{_AuthenticatedShellViewer.__init__}.
"""
store = Store()
viewer = _AuthenticatedShellViewer([(u"alice", u"example.com")])
role = viewer.roleIn(store)
self.assertEquals(role.externalID, u"<EMAIL>")
self.assertIdentical(role.store, store)
class ShellAccountTests(TestCase):
"""
Tests for L{ShellAccount} which provide a basic L{IConchUser} avatar.
"""
def setUp(self):
"""
Create an in-memory L{Store} with a L{ShellAccount} in it.
"""
self.store = Store()
self.account = ShellAccount(store=self.store)
installOn(self.account, self.store)
def test_interfaces(self):
"""
L{ShellAccount} powers up the item on which it is installed for
L{IConchUser} and the L{IConchUser} powerup is adaptable to
L{ISession}.
"""
avatar = IConchUser(self.store)
self.assertTrue(verifyObject(IConchUser, avatar))
session = ISession(avatar)
self.assertTrue(verifyObject(ISession, session))
def test_lookupSessionChannel(self):
"""
L{ShellAccount.lookupChannel} returns an L{SSHSession} instance. (This
is because L{SSHSession} implements handlers for the standard SSH
requests issued to set up a shell.)
"""
avatar = IConchUser(self.store)
channel = avatar.lookupChannel('session', 65536, 16384, '')
self.assertTrue(isinstance(channel, SSHSession))
def test_openShell(self):
"""
The L{ISession} adapter of the L{IConchUser} powerup implements
C{openShell} so as to associate the given L{IProcessProtocol} with a
transport.
"""
proto = ProcessProtocol()
session = ISession(IConchUser(self.store))
# XXX See Twisted ticket #3864
proto.session = session
proto.write = lambda bytes: None
# XXX See #2895.
session.getPty(None, (123, 456, 789, 1000), None)
session.openShell(proto)
self.assertNotIdentical(proto.transport, None)
class FakeTerminal(TerminalBuffer):
"""
A fake implementation of L{ITerminalTransport} used by the
L{_ReturnToMenuWrapper} tests.
"""
disconnected = False
def loseConnection(self):
self.disconnected = True
# Work around https://twistedmatrix.com/trac/ticket/8843
def __str__(self):
if hasattr(self, '__bytes__'):
return self.__bytes__()
else:
return TerminalBuffer.__str__(self)
class ReturnToMenuTests(TestCase):
"""
Tests for L{_ReturnToMenuWrapper} which wraps an L{ITerminalTransport} for
an L{ITerminalProtocol} and switches to another L{ITerminalProtocol} when
C{loseConnection} is called on it instead of disconnecting.
"""
def test_write(self):
"""
L{_ReturnToMenuWrapper.write} passes through to the wrapped terminal.
"""
terminal = FakeTerminal()
terminal.makeConnection(None)
wrapper = _ReturnToMenuWrapper(None, terminal)
wrapper.write('some bytes')
wrapper.write('some more')
self.assertIn('some bytessome more', str(terminal))
def test_loseConnection(self):
"""
L{_ReturnToMenuWrapper.loseConnection} does not disconnect the
terminal; instead it calls the C{reactivate} method of its C{shell}
attribute.
"""
class FakeShell(object):
activated = False
def reactivate(self):
self.activated = True
shell = FakeShell()
terminal = FakeTerminal()
wrapper = _ReturnToMenuWrapper(shell, terminal)
wrapper.loseConnection()
self.assertFalse(terminal.disconnected)
self.assertTrue(shell.activated)
class MockTerminalProtocol(object):
"""
Implementation of L{ITerminalProtocol} used in test L{ShellServer}'s
interactions with the interface.
@ivar terminal: The L{ITerminalTransport} passed to C{makeConnection}.
@ivar keystrokes: A C{list} of two-tuples giving each keystroke which this
protocol has received.
@ivar disconnected: A C{bool} indicating whether C{connectionLost} has been
called yet.
"""
def __init__(self):
self.keystrokes = []
self.terminal = None
self.disconnected = False
def makeConnection(self, terminal):
self.terminal = terminal
def connectionLost(self, reason):
self.disconnected = True
def keystrokeReceived(self, keyID, modifier):
self.keystrokes.append((keyID, modifier))
class MockTerminalServerFactory(object):
"""
Implementation of L{ITerminalServerFactory} used in test L{ShellServer}'s
interactions with the interface.
@ivar terminalProtocolInstance: The L{MockTerminalServer} created and
returned by C{buildTerminalProtocol}, or C{None} if that method has not
been called.
"""
implements(ITerminalServerFactory)
name = "mock"
shellViewer = None
terminalProtocolInstance = None
def buildTerminalProtocol(self, shellViewer):
self.shellViewer = shellViewer
self.terminalProtocolInstance = MockTerminalProtocol()
return self.terminalProtocolInstance
# Sanity check - this isn't a comprehensive (or even close) verification of
# MockTerminalServerFactory, but it at least points out obvious mistakes.
verifyObject(ITerminalServerFactory, MockTerminalServerFactory())
class MockTerminalServerFactoryItem(Item):
"""
An L{Item} implementation of L{ITerminalServerFactory} used by tests.
"""
powerupInterfaces = (ITerminalServerFactory,)
implements(*powerupInterfaces)
name = text()
shellViewer = inmemory(
doc="""
The L{IViewer} passed to L{buildTerminalProtocol}.
""")
terminalProtocolInstance = inmemory(
doc="""
The L{MockTerminalServer} created and returned by
C{buildTerminalProtocol}, or C{None} if that method has not been
called.
""")
def activate(self):
self.shellViewer = None
self.terminalProtocolInstance = None
def buildTerminalProtocol(self, shellViewer):
self.shellViewer = shellViewer
self.terminalProtocolInstance = MockTerminalProtocol()
return self.terminalProtocolInstance
# Sanity check - see above call to verifyObject.
verifyObject(ITerminalServerFactory, MockTerminalServerFactoryItem())
class ShellServerTests(TestCase):
"""
Tests for L{ShellServer} which is the top-level L{ITerminalProtocol},
interacting initially and directly with terminals by presenting a menu of
possible activities and delegating to other L{ITerminalProtocol}s which
appropriate.
"""
def test_switchTo(self):
"""
L{ShellServer.switchTo} takes a L{ITerminalServerFactory} and uses it
to create a new L{ITerminalProtocol} which it connects to a
L{_ReturnToMenuWrapper}. L{buildTerminalProtocol} is passed an
L{IViewer}.
"""
terminal = FakeTerminal()
store = Store()
# Put a login method into the store so it can have a role. See #2665.
LoginMethod(
store=store, internal=True, protocol=u'*', verified=True,
localpart=u'alice', domain=u'example.com',
# Not really an account, but simpler...
account=store)
server = ShellServer(store)
server.makeConnection(terminal)
factory = MockTerminalServerFactory()
server.switchTo(factory)
self.assertIdentical(factory.shellViewer.roleIn(store), getSelfRole(store))
self.assertTrue(isinstance(server._protocol, MockTerminalProtocol))
self.assertTrue(isinstance(server._protocol.terminal, _ReturnToMenuWrapper))
self.assertIdentical(server._protocol.terminal._shell, server)
self.assertIdentical(server._protocol.terminal._terminal, terminal)
def test_appButtons(self):
"""
L{ShellServer._appButtons} returns an iterator the elements of which
are L{Button} instances, one for each L{ITerminalServerFactory}
powerup. When one of these buttons is activated, L{ShellServer} is
switched to the corresponding L{ITerminalServerFactory}'s protocol.
"""
store = Store()
terminal = FakeTerminal()
server = ShellServer(store)
server.makeConnection(terminal)
firstFactory = MockTerminalServerFactoryItem(
store=store, name=u"first - \N{ROMAN NUMERAL ONE}")
installOn(firstFactory, store)
secondFactory = MockTerminalServerFactoryItem(
store=store, name=u"second - \N{ROMAN NUMERAL TWO}")
installOn(secondFactory, store)
buttons = list(server._appButtons())
self.assertEqual(len(buttons), 2)
# For now, we'll say the order isn't significant.
buttons.sort(key=lambda b: b.label)
self.assertEqual(
buttons[0].label, firstFactory.name.encode('utf-8'))
buttons[0].onPress()
server.keystrokeReceived('x', None)
self.assertEqual(
firstFactory.terminalProtocolInstance.keystrokes, [('x', None)])
self.assertEqual(
buttons[1].label, secondFactory.name.encode('utf-8'))
buttons[1].onPress()
server.keystrokeReceived('y', None)
self.assertEqual(
secondFactory.terminalProtocolInstance.keystrokes, [('y', None)])
def test_logoffButton(self):
"""
L{ShellServer._logoffButton} returns a L{Button} which, when activated,
disconnects the terminal.
"""
terminal = FakeTerminal()
server = ShellServer(Store())
server.makeConnection(terminal)
server._logoffButton().onPress()
self.assertTrue(terminal.disconnected)
def test_reactivate(self):
"""
L{ShellServer.reactivate} disconnects the protocol previously switched
to, drops the reference to it, and redraws the main menu.
"""
terminal = FakeTerminal()
server = ShellServer(Store())
server.makeConnection(terminal)
server.switchTo(MockTerminalServerFactory())
server.reactivate()
self.assertIdentical(server._protocol, None)
def test_keystrokeReceivedWindow(self):
"""
L{ShellServer.keystrokeReceived} delivers keystroke data to the main
menu widget when no protocol has been switched to.
"""
class FakeWidget(object):
def __init__(self):
self.keystrokes = []
def keystrokeReceived(self, keyID, modifier):
self.keystrokes.append((keyID, modifier))
terminal = FakeTerminal()
window = FakeWidget()
server = ShellServer(Store())
server._makeWindow = lambda: window
server.makeConnection(terminal)
server.keystrokeReceived(' ', ServerProtocol.ALT)
self.assertEqual(window.keystrokes, [(' ', ServerProtocol.ALT)])
def test_keystrokeReceivedProtocol(self):
"""
L{ShellServer.keystrokeReceived} delivers keystroke data to the
protocol built by the factory which has been switched to.
"""
factory = MockTerminalServerFactory()
terminal = FakeTerminal()
server = ShellServer(Store())
server.makeConnection(terminal)
server.switchTo(factory)
server.keystrokeReceived(' ', ServerProtocol.ALT)
self.assertEqual(
factory.terminalProtocolInstance.keystrokes,
[(' ', ServerProtocol.ALT)])
class ManholeTests(TestCase):
"""
Tests for L{TerminalManhole} which provides an L{ITerminalServerFactory}
for a protocol which gives a user an in-process Python REPL.
"""
def test_interface(self):
"""
L{TerminalManhole} implements L{ITerminalServerFactory}.
"""
self.assertTrue(verifyObject(ITerminalServerFactory, TerminalManhole()))
def test_buildTerminalProtocol(self):
"""
L{TerminalManhole.buildTerminalProtocol} returns a L{ColoredManhole}
with a namespace including the store the L{TerminalManhole} is in.
"""
store = Store()
factory = TerminalManhole(store=store)
viewer = object()
protocol = factory.buildTerminalProtocol(viewer)
self.assertTrue(isinstance(protocol, ColoredManhole))
self.assertEqual(protocol.namespace, {'db': store, 'viewer': viewer})
```
#### File: mantissa/xmantissa/webadmin.py
```python
import random, string
from zope.interface import implements
from twisted.python.components import registerAdapter
from twisted.python.util import sibpath
from twisted.python.filepath import FilePath
from twisted.python import log
from twisted.application.service import IService, Service
from twisted.conch import manhole
from twisted.cred.portal import IRealm
from nevow.page import renderer
from nevow.athena import expose
from epsilon import extime
from axiom.attributes import (integer, boolean, timestamp, bytes, reference,
inmemory, AND)
from axiom.item import Item, declareLegacyItem
from axiom import userbase
from axiom.dependency import installOn, dependsOn
from axiom.upgrade import registerUpgrader
from xmantissa.liveform import LiveForm, Parameter, ChoiceParameter
from xmantissa.liveform import TEXT_INPUT ,CHECKBOX_INPUT
from xmantissa import webtheme, liveform, webnav, offering, signup
from xmantissa.port import TCPPort, SSLPort
from xmantissa.product import ProductConfiguration, Product, Installation
from xmantissa.suspension import suspendJustTabProviders, unsuspendTabProviders
from xmantissa.tdb import AttributeColumn
from xmantissa.scrolltable import ScrollingFragment
from xmantissa.webapp import PrivateApplication
from xmantissa.website import WebSite, PrefixURLMixin
from xmantissa.terminal import TerminalManhole
from xmantissa.ixmantissa import (
INavigableElement, INavigableFragment, ISessionlessSiteRootPlugin,
IProtocolFactoryFactory)
from nevow import rend, athena, static, tags as T
class DeveloperSite(Item, PrefixURLMixin):
"""
Provides static content sessionlessly for the developer application.
"""
implements(ISessionlessSiteRootPlugin)
typeName = 'developer_site'
schemaVersion = 1
sessionless = True
prefixURL = 'static/webadmin'
# Counts of each kind of user. These are not maintained, they should be
# upgraded away at some point. -exarkun
developers = integer(default=0)
administrators = integer(default=0)
def createResource(self):
return static.File(sibpath(__file__, 'static'))
class AdminStatsApplication(Item):
"""
Obsolete. Only present for schema compatibility. Do not use.
"""
powerupInterfaces = (INavigableElement,)
implements(INavigableElement)
schemaVersion = 2
typeName = 'administrator_application'
updateInterval = integer(default=5)
privateApplication = dependsOn(PrivateApplication)
def getTabs(self):
return []
declareLegacyItem(AdminStatsApplication, 1,
dict(updateInterval=integer(default=5)))
def _adminStatsApplication1to2(old):
new = old.upgradeVersion(AdminStatsApplication.typeName, 1, 2,
updateInterval=old.updateInterval,
privateApplication=old.store.findOrCreate(PrivateApplication))
return new
registerUpgrader(_adminStatsApplication1to2, AdminStatsApplication.typeName, 1, 2)
class LocalUserBrowser(Item):
"""
XXX I am an unfortunate necessity.
This class shouldn't exist, and in fact, will be destroyed at the first
possible moment. It's stateless, existing only to serve as a web lookup
hook for the UserInteractionFragment view class.
"""
implements(INavigableElement)
typeName = 'local_user_browser'
schemaVersion = 2
privateApplication = dependsOn(PrivateApplication)
powerupInterfaces = (INavigableElement,)
def getTabs(self):
return [webnav.Tab('Admin', self.storeID, 0.0,
[webnav.Tab('Local Users', self.storeID, 0.1)],
authoritative=False)]
declareLegacyItem(LocalUserBrowser.typeName, 1,
dict(garbage=integer(default=0)))
def _localUserBrowser1to2(old):
new = old.upgradeVersion(LocalUserBrowser.typeName, 1, 2,
privateApplication=old.store.findOrCreate(PrivateApplication))
return new
registerUpgrader(_localUserBrowser1to2, LocalUserBrowser.typeName, 1, 2)
class UserInteractionFragment(webtheme.ThemedElement):
"""
Contains two other user-interface elements which allow existing users to be
browsed and new users to be created, respectively.
"""
fragmentName = 'admin-user-interaction'
def __init__(self, userBrowser):
"""
@param userBrowser: a LocalUserBrowser instance
"""
super(UserInteractionFragment, self).__init__()
self.browser = userBrowser
def userBrowser(self, request, tag):
"""
Render a TDB of local users.
"""
f = LocalUserBrowserFragment(self.browser)
f.docFactory = webtheme.getLoader(f.fragmentName)
f.setFragmentParent(self)
return f
renderer(userBrowser)
def userCreate(self, request, tag):
"""
Render a form for creating new users.
"""
userCreator = liveform.LiveForm(
self.createUser,
[liveform.Parameter(
"localpart",
liveform.TEXT_INPUT,
unicode,
"localpart"),
liveform.Parameter(
"domain",
liveform.TEXT_INPUT,
unicode,
"domain"),
liveform.Parameter(
"password",
liveform.PASSWORD_INPUT,
unicode,
"password")])
userCreator.setFragmentParent(self)
return userCreator
renderer(userCreate)
def createUser(self, localpart, domain, password=None):
"""
Create a new, blank user account with the given name and domain and, if
specified, with the given password.
@type localpart: C{unicode}
@param localpart: The local portion of the username. ie, the
C{'alice'} in C{'<EMAIL>'}.
@type domain: C{unicode}
@param domain: The domain portion of the username. ie, the
C{'example.com'} in C{'<EMAIL>'}.
@type password: C{unicode} or C{None}
@param password: The password to associate with the new account. If
C{None}, generate a new password automatically.
"""
loginSystem = self.browser.store.parent.findUnique(userbase.LoginSystem)
if password is None:
password = u''.join([random.choice(string.ascii_letters + string.digits) for i in xrange(8)])
loginSystem.addAccount(localpart, domain, password)
registerAdapter(UserInteractionFragment, LocalUserBrowser, INavigableFragment)
class LocalUserBrowserFragment(ScrollingFragment):
jsClass = u'Mantissa.Admin.LocalUserBrowser'
def __init__(self, userBrowser):
ScrollingFragment.__init__(self, userBrowser.store.parent,
userbase.LoginMethod,
userbase.LoginMethod.domain != None,
(userbase.LoginMethod.localpart,
userbase.LoginMethod.domain,
userbase.LoginMethod.verified),
defaultSortColumn=userbase.LoginMethod.domain,
defaultSortAscending=True)
def linkToItem(self, item):
# no IWebTranslator. better ideas?
# will (localpart, domain, protocol) always be unique?
return unicode(item.storeID)
def itemFromLink(self, link):
return self.store.getItemByID(int(link))
def doAction(self, loginMethod, actionClass):
"""
Show the form for the requested action.
"""
loginAccount = loginMethod.account
return actionClass(
self,
loginMethod.localpart + u'@' + loginMethod.domain,
loginAccount)
def action_installOn(self, loginMethod):
return self.doAction(loginMethod, EndowFragment)
def action_uninstallFrom(self, loginMethod):
return self.doAction(loginMethod, DepriveFragment)
def action_suspend(self, loginMethod):
return self.doAction(loginMethod, SuspendFragment)
def action_unsuspend(self, loginMethod):
return self.doAction(loginMethod, UnsuspendFragment)
class EndowDepriveFragment(webtheme.ThemedElement):
fragmentName = 'user-detail'
def __init__(self, fragmentParent, username, loginAccount, which):
super(EndowDepriveFragment, self).__init__(fragmentParent)
self.account = loginAccount
self.which = which
self.username = username
def _endow(self, **kw):
subs = self.account.avatars.open()
def endowall():
for product in kw.values():
if product is not None:
getattr(product, self.which)(subs)
subs.transact(endowall)
def productForm(self, request, tag):
"""
Render a L{liveform.LiveForm} -- the main purpose of this fragment --
which will allow the administrator to endow or deprive existing users
using Products.
"""
def makeRemover(i):
def remover(s3lected):
if s3lected:
return self.products[i]
return None
return remover
f = liveform.LiveForm(
self._endow,
[liveform.Parameter(
'products' + str(i),
liveform.FORM_INPUT,
liveform.LiveForm(
makeRemover(i),
[liveform.Parameter(
's3lected',
liveform.RADIO_INPUT,
bool,
repr(p),
)],
'',
),
)
for (i, p)
in enumerate(self.products)],
self.which.capitalize() + u' ' + self.username)
f.setFragmentParent(self)
return f
renderer(productForm)
class EndowFragment(EndowDepriveFragment):
def __init__(self, fragmentParent, username, loginAccount):
EndowDepriveFragment.__init__(self, fragmentParent,
username, loginAccount,
'installProductOn')
allProducts = list(self.account.store.query(Product))
self.products = [p for p in allProducts
if not self.account.avatars.open().findUnique(Installation,
Installation.types
== p.types,
None)]
self.desc = "Install on"
class DepriveFragment(EndowDepriveFragment):
def __init__(self, fragmentParent, username, loginAccount):
EndowDepriveFragment.__init__(self, fragmentParent,
username, loginAccount,
'removeProductFrom')
allProducts = list(self.account.store.query(Product))
self.products = [p for p in allProducts
if self.account.avatars.open().findUnique(Installation,
Installation.types
== p.types,
None)]
self.desc = "Remove from"
class SuspendFragment(EndowDepriveFragment):
def __init__(self, fragmentParent, username, loginAccount):
self.func = suspendJustTabProviders
EndowDepriveFragment.__init__(self, fragmentParent,
username, loginAccount,
'suspend')
allProducts = list(self.account.store.query(Product))
self.products = [p for p in allProducts
if self.account.avatars.open().findUnique(Installation,
AND(Installation.types == p.types,
Installation.suspended == False,
), [])]
self.desc = "Suspend"
def _endow(self, **kw):
subs = self.account.avatars.open()
def suspend():
for product in kw.values():
if product is not None:
i = subs.findUnique(Installation,
Installation.types == product.types,
None)
self.func(i)
subs.transact(suspend)
class UnsuspendFragment(SuspendFragment):
def __init__(self, fragmentParent, username, loginAccount):
self.func = unsuspendTabProviders
EndowDepriveFragment.__init__(self, fragmentParent,
username, loginAccount,
'unsuspend')
allProducts = list(self.account.store.query(Product))
self.products = [p for p in allProducts
if self.account.avatars.open().findUnique(Installation,
AND(Installation.types == p.types,
Installation.suspended == True),
None)]
self.desc = "Unsuspend"
class DeveloperApplication(Item):
"""
"""
implements(INavigableElement)
schemaVersion = 2
typeName = 'developer_application'
privateApplication = dependsOn(PrivateApplication)
statementCount = integer(default=0)
powerupInterfaces = (INavigableElement,)
def deletedFromStore(self, *a, **kw):
return super(DeveloperApplication, self).deletedFromStore(*a, **kw)
# INavigableElement
def getTabs(self):
return [webnav.Tab('Admin', self.storeID, 0.0,
[webnav.Tab('REPL', self.storeID, 0.0)],
authoritative=False)]
declareLegacyItem(DeveloperApplication.typeName, 1,
dict(statementCount=integer(default=0)))
def _developerApplication1to2(old):
new = old.upgradeVersion(DeveloperApplication.typeName, 1, 2,
statementCount=old.statementCount,
privateApplication=old.store.findOrCreate(PrivateApplication))
return new
registerUpgrader(_developerApplication1to2, DeveloperApplication.typeName, 1, 2)
class REPL(athena.LiveFragment):
"""
Provides an interactive Read-Eval-Print loop. On a web page (duh).
"""
implements(INavigableFragment)
jsClass = u'Mantissa.InterpreterWidget'
fragmentName = 'admin-python-repl'
live = 'athena'
def __init__(self, *a, **kw):
rend.Fragment.__init__(self, *a, **kw)
self.namespace = {'s': self.original.store, 'getStore': self.getStore}
self.interpreter = manhole.ManholeInterpreter(
self,
self.namespace)
def getStore(self, name, domain):
"""Convenience method for the REPL. I got tired of typing this string every time I logged in."""
return IRealm(self.original.store.parent).accountByAddress(name, domain).avatars.open()
def head(self):
return ()
def addOutput(self, output, async=False):
self.callRemote('addOutputLine', unicode(output, 'ascii'))
def evaluateInputLine(self, inputLine):
return self.interpreter.push(inputLine)
expose(evaluateInputLine)
registerAdapter(REPL, DeveloperApplication, INavigableFragment)
class Traceback(Item):
typeName = 'mantissa_traceback'
schemaVersion = 1
when = timestamp()
traceback = bytes()
collector = reference()
def __init__(self, store, collector, failure):
when = extime.Time()
traceback = failure.getTraceback()
super(Traceback, self).__init__(
store=store,
traceback=traceback,
when=when,
collector=collector)
class TracebackCollector(Item, Service):
implements(IService)
typeName = 'mantissa_traceback_collector'
schemaVersion = 1
tracebackCount = integer(default=0)
parent = inmemory()
running = inmemory()
name = inmemory()
powerupInterfaces = (IService,)
def installed(self):
self.setServiceParent(self.store)
def startService(self):
log.addObserver(self.emit)
def stopService(self):
log.removeObserver(self.emit)
def emit(self, event):
if event.get('isError') and event.get('failure') is not None:
f = event['failure']
def txn():
self.tracebackCount += 1
Traceback(store=self.store, collector=self, failure=f)
self.store.transact(txn)
def getTracebacks(self):
"""
Return an iterable of Tracebacks that have been collected.
"""
return self.store.query(Traceback,
Traceback.collector == self)
class TracebackViewer(Item):
implements(INavigableElement)
typeName = 'mantissa_tb_viewer'
schemaVersion = 2
allowDeletion = boolean(default=False)
privateApplication = dependsOn(PrivateApplication)
powerupInterfaces = (INavigableElement,)
def getTabs(self):
return [webnav.Tab('Admin', self.storeID, 0.0,
[webnav.Tab('Errors', self.storeID, 0.3)],
authoritative=False)]
def _getCollector(self):
def ifCreate(coll):
installOn(coll, self.store.parent)
return self.store.parent.findOrCreate(TracebackCollector, ifCreate)
# this needs to be moved somewhere else, topPanelContent is no more
#def topPanelContent(self):
# # XXX There should really be a juice protocol for this.
# return '%d errors logged' % (self._getCollector().tracebackCount,)
declareLegacyItem(TracebackViewer, 1,
dict(allowDeletion=boolean(default=False)))
def _tracebackViewer1to2(old):
return old.upgradeVersion(TracebackViewer.typeName, 1, 2,
allowDeletion=old.allowDeletion,
privateApplication=old.store.findOrCreate(PrivateApplication))
registerUpgrader(_tracebackViewer1to2, TracebackViewer.typeName, 1, 2)
class TracebackViewerFragment(rend.Fragment):
implements(INavigableFragment)
live = False
fragmentName = 'traceback-viewer'
def head(self):
return ()
def render_tracebacks(self, ctx, data):
for tb in self.original._getCollector().getTracebacks():
yield T.div[T.code[T.pre[tb.traceback]]]
registerAdapter(TracebackViewerFragment, TracebackViewer, INavigableFragment)
class PortConfiguration(Item):
"""
Marker powerup which allows those on whom it is installed to modify the
configuration of listening ports in this server.
"""
implements(INavigableElement)
powerupInterfaces = (INavigableElement,)
# Only present because Axiom requires at least one attribute on an Item.
garbage = integer(default=12345678653)
def getTabs(self):
"""
Add this object to the tab navigation so it can display configuration
information and allow configuration to be modified.
"""
return [webnav.Tab('Admin', self.storeID, 0.0,
[webnav.Tab('Ports', self.storeID, 0.4)],
authoritative=False)]
def createPort(self, portNumber, ssl, certPath, factory, interface=u''):
"""
Create a new listening port.
@type portNumber: C{int}
@param portNumber: Port number on which to listen.
@type ssl: C{bool}
@param ssl: Indicates whether this should be an SSL port or not.
@type certPath: C{str}
@param ssl: If C{ssl} is true, a path to a certificate file somewhere
within the site store's files directory. Ignored otherwise.
@param factory: L{Item} which provides L{IProtocolFactoryFactory} which
will be used to get a protocol factory to associate with this port.
@return: C{None}
"""
store = self.store.parent
if ssl:
port = SSLPort(store=store, portNumber=portNumber,
certificatePath=FilePath(certPath), factory=factory,
interface=interface)
else:
port = TCPPort(store=store, portNumber=portNumber, factory=factory,
interface=interface)
installOn(port, store)
class FactoryColumn(AttributeColumn):
"""
Display the name of the class of items referred to by a reference
attribute.
"""
def extractValue(self, model, item):
"""
Get the class name of the factory referenced by a port.
@param model: Either a TabularDataModel or a ScrollableView, depending
on what this column is part of.
@param item: A port item instance (as defined by L{xmantissa.port}).
@rtype: C{unicode}
@return: The name of the class of the item to which this column's
attribute refers.
"""
factory = super(FactoryColumn, self).extractValue(model, item)
return factory.__class__.__name__.decode('ascii')
class CertificateColumn(AttributeColumn):
"""
Display a path attribute as a unicode string.
"""
def extractValue(self, model, item):
"""
Get the path referenced by this column's attribute.
@param model: Either a TabularDataModel or a ScrollableView, depending
on what this column is part of.
@param item: A port item instance (as defined by L{xmantissa.port}).
@rtype: C{unicode}
"""
certPath = super(CertificateColumn, self).extractValue(model, item)
return certPath.path.decode('utf-8', 'replace')
class PortScrollingFragment(ScrollingFragment):
"""
A scrolling fragment for TCPPorts and SSLPorts which knows how to link to
them and how to delete them.
@ivar userStore: The store of the user viewing these ports.
@ivar siteStore: The site store, where TCPPorts and SSLPorts are loaded
from.
"""
jsClass = u'Mantissa.Admin.PortBrowser'
def __init__(self, userStore, portType, columns):
super(PortScrollingFragment, self).__init__(
userStore.parent,
portType,
None,
columns)
self.userStore = userStore
self.siteStore = userStore.parent
self.webTranslator = self.userStore.findUnique(PrivateApplication)
def itemFromLink(self, link):
"""
@type link: C{unicode}
@param link: A webID to translate into an item.
@rtype: L{Item}
@return: The item to which the given link referred.
"""
return self.siteStore.getItemByID(self.webTranslator.linkFrom(link))
def action_delete(self, port):
"""
Delete the given port.
"""
port.deleteFromStore()
class PortConfigurationFragment(webtheme.ThemedElement):
"""
Provide the view for L{PortConfiguration}.
Specifically, three renderers are offered: the first two, L{tcpPorts} and
L{sslPorts}, add a L{PortScrollingFragment} to their tag as a child; the
last, L{createPortForm} adds a L{LiveForm} for adding new ports to its tag
as a child.
@ivar portConf: The L{PortConfiguration} item.
@ivar store: The user store.
"""
implements(INavigableFragment)
fragmentName = 'port-configuration'
def __init__(self, portConf):
super(PortConfigurationFragment, self).__init__()
self.portConf = portConf
self.store = portConf.store
def head(self):
return ()
def tcpPorts(self, req, tag):
"""
Create and return a L{PortScrollingFragment} for the L{TCPPort} items
in site store.
"""
f = PortScrollingFragment(
self.store,
TCPPort,
(TCPPort.portNumber,
TCPPort.interface,
FactoryColumn(TCPPort.factory)))
f.setFragmentParent(self)
f.docFactory = webtheme.getLoader(f.fragmentName)
return tag[f]
renderer(tcpPorts)
def sslPorts(self, req, tag):
"""
Create and return a L{PortScrollingFragment} for the L{SSLPort} items
in the site store.
"""
f = PortScrollingFragment(
self.store,
SSLPort,
(SSLPort.portNumber,
SSLPort.interface,
CertificateColumn(SSLPort.certificatePath),
FactoryColumn(SSLPort.factory)))
f.setFragmentParent(self)
f.docFactory = webtheme.getLoader(f.fragmentName)
return tag[f]
renderer(sslPorts)
def createPortForm(self, req, tag):
"""
Create and return a L{LiveForm} for adding a new L{TCPPort} or
L{SSLPort} to the site store.
"""
def port(s):
n = int(s)
if n < 0 or n > 65535:
raise ValueError(s)
return n
factories = []
for f in self.store.parent.powerupsFor(IProtocolFactoryFactory):
factories.append((f.__class__.__name__.decode('ascii'),
f,
False))
f = LiveForm(
self.portConf.createPort,
[Parameter('portNumber', TEXT_INPUT, port, 'Port Number',
'Integer 0 <= n <= 65535 giving the TCP port to bind.'),
Parameter('interface', TEXT_INPUT, unicode, 'Interface',
'Hostname to bind to, or blank for all interfaces.'),
Parameter('ssl', CHECKBOX_INPUT, bool, 'SSL',
'Select to indicate port should use SSL.'),
# Text area? File upload? What?
Parameter('certPath', TEXT_INPUT, unicode, 'Certificate Path',
'Path to a certificate file on the server, if SSL is to be used.'),
ChoiceParameter('factory', factories, 'Protocol Factory',
'Which pre-existing protocol factory to associate with this port.')])
f.setFragmentParent(self)
# f.docFactory = webtheme.getLoader(f.fragmentName)
return tag[f]
renderer(createPortForm)
registerAdapter(PortConfigurationFragment, PortConfiguration, INavigableFragment)
class AdministrativeBenefactor(Item):
typeName = 'mantissa_administrative_benefactor'
schemaVersion = 1
endowed = integer(default=0)
powerupNames = ["xmantissa.webadmin.AdminStatsApplication",
"xmantissa.webadmin.DeveloperApplication",
"xmantissa.signup.SignupConfiguration",
"xmantissa.webadmin.TracebackViewer",
"xmantissa.webadmin.BatchManholePowerup",
"xmantissa.webadmin.LocalUserBrowser"]
def endowAdminPowerups(userStore):
powerups = [
# Install a web site for the individual user as well.
# This is necessary because although we have a top-level
# website for everybody, not all users should be allowed
# to log in through the web (like UNIX's "system users",
# "nobody", "database", etc.) Note, however, that there
# is no port number, because the WebSite's job in this
# case is to be a web *resource*, not a web *server*.
WebSite,
# Now we install the 'private application' plugin for
# 'admin', on admin's private store, This provides the URL
# "/private", but only when 'admin' is logged in. It is a
# hook to hang other applications on. (XXX Rename:
# PrivateApplication should probably be called
# PrivateAppShell)
PrivateApplication,
# This is a plugin *for* the PrivateApplication; it publishes
# objects via the tab-based navigation (a Python interactive
# interpreter).
DeveloperApplication,
#ProductConfiguration lets admins collect powerups into
#Products users can sign up for.
ProductConfiguration,
# And another one: SignupConfiguration allows the
# administrator to add signup forms which grant various
# kinds of account.
signup.SignupConfiguration,
# This one lets the administrator view unhandled
# exceptions which occur in the server.
TracebackViewer,
# Allow the administrator to set the ports associated with
# different network services.
PortConfiguration,
# And this one gives the administrator a page listing all
# users which exist in this site's credentials database.
LocalUserBrowser,
# Grant Python REPL access to the server.
TerminalManhole,
]
for powerup in powerups:
installOn(powerup(store=userStore), userStore)
# This is another PrivateApplication plugin. It allows
# the administrator to configure the services offered
# here.
oc = offering.OfferingConfiguration(store=userStore)
installOn(oc, userStore)
```
#### File: mantissa/xmantissa/webapp.py
```python
import os
from zope.interface import implements
from epsilon.structlike import record
from axiom.iaxiom import IPowerupIndirector
from axiom.item import Item, declareLegacyItem
from axiom.attributes import text, integer, reference
from axiom import upgrade
from axiom.dependency import dependsOn
from axiom.userbase import getAccountNames
from nevow.rend import Page
from nevow import athena
from nevow.inevow import IRequest
from nevow import tags as t
from nevow import url
from xmantissa.publicweb import CustomizedPublicPage, renderShortUsername
from xmantissa.ixmantissa import (
INavigableElement, ISiteRootPlugin, IWebTranslator, IStaticShellContent,
ITemplateNameResolver, ISiteURLGenerator, IWebViewer)
from xmantissa.website import PrefixURLMixin, JUST_SLASH, WebSite, APIKey
from xmantissa.website import MantissaLivePage
from xmantissa.webtheme import getInstalledThemes
from xmantissa.webnav import getTabs, startMenu, settingsLink, applicationNavigation
from xmantissa.sharing import getPrimaryRole
from xmantissa._webidgen import genkey, storeIDToWebID, webIDToStoreID
from xmantissa._webutil import MantissaViewHelper, WebViewerHelper
from xmantissa.offering import getInstalledOfferings
from xmantissa.webgestalt import AuthenticationApplication
from xmantissa.prefs import PreferenceAggregator, DefaultPreferenceCollection
from xmantissa.search import SearchAggregator
def _reorderForPreference(themeList, preferredThemeName):
"""
Re-order the input themeList according to the preferred theme.
Returns None.
"""
for theme in themeList:
if preferredThemeName == theme.themeName:
themeList.remove(theme)
themeList.insert(0, theme)
return
class _WebIDFormatException(TypeError):
"""
An inbound web ID was not formatted as expected.
"""
class _AuthenticatedWebViewer(WebViewerHelper):
"""
Implementation of L{IWebViewer} for authenticated users.
@ivar _privateApplication: the L{PrivateApplication} for the authenticated
user that this view is rendering.
"""
implements(IWebViewer)
def __init__(self, privateApp):
"""
@param privateApp: Probably something abstract but really it's just a
L{PrivateApplication}.
"""
WebViewerHelper.__init__(
self, privateApp.getDocFactory, privateApp._preferredThemes)
self._privateApplication = privateApp
# IWebViewer
def roleIn(self, userStore):
"""
Get the authenticated role for the user represented by this view in the
given user store.
"""
return getPrimaryRole(userStore, self._privateApplication._getUsername())
# Complete WebViewerHelper implementation
def _wrapNavFrag(self, frag, useAthena):
"""
Wrap the given L{INavigableFragment} in an appropriate
L{_FragmentWrapperMixin} subclass.
"""
username = self._privateApplication._getUsername()
cf = getattr(frag, 'customizeFor', None)
if cf is not None:
frag = cf(username)
if useAthena:
pageClass = GenericNavigationAthenaPage
else:
pageClass = GenericNavigationPage
return pageClass(self._privateApplication, frag,
self._privateApplication.getPageComponents(),
username)
class _ShellRenderingMixin(object):
"""
View mixin for Pages which use the I{shell} template.
This class provides somewhat sensible default implementations for a number
of the renderers required by the I{shell} template.
@ivar webapp: The L{PrivateApplication} of the user for whom this is a
view. This must provide the I{rootURL} method as well as
L{IWebTranslator} and L{ITemplateNameResolver}. This must be an item
in a user store associated with the site store (so that the site store
is available).
"""
fragmentName = 'main'
searchPattern = None
def __init__(self, webapp, pageComponents, username):
self.webapp = self.translator = self.resolver = webapp
self.pageComponents = pageComponents
self.username = username
def _siteStore(self):
"""
Get the site store from C{self.webapp}.
"""
return self.webapp.store.parent
def getDocFactory(self, fragmentName, default=None):
"""
Retrieve a Nevow document factory for the given name. This
implementation merely defers to the underlying L{PrivateApplication}.
@param fragmentName: a short string that names a fragment template.
@param default: value to be returned if the named template is not
found.
"""
return self.webapp.getDocFactory(fragmentName, default)
def render_content(self, ctx, data):
raise NotImplementedError("implement render_context in subclasses")
def render_title(self, ctx, data):
return ctx.tag[self.__class__.__name__]
def render_rootURL(self, ctx, data):
"""
Add the WebSite's root URL as a child of the given tag.
The root URL is the location of the resource beneath which all standard
Mantissa resources (such as the private application and static content)
is available. This can be important if a page is to be served at a
location which is different from the root URL in order to make links in
static XHTML templates resolve correctly (for example, by adding this
value as the href of a <base> tag).
"""
site = ISiteURLGenerator(self._siteStore())
return ctx.tag[site.rootURL(IRequest(ctx))]
def render_head(self, ctx, data):
return ctx.tag
def render_header(self, ctx, data):
staticShellContent = self.pageComponents.staticShellContent
if staticShellContent is None:
return ctx.tag
header = staticShellContent.getHeader()
if header is not None:
return ctx.tag[header]
else:
return ctx.tag
def render_startmenu(self, ctx, data):
"""
Add start-menu style navigation to the given tag.
@see {xmantissa.webnav.startMenu}
"""
return startMenu(
self.translator, self.pageComponents.navigation, ctx.tag)
def render_settingsLink(self, ctx, data):
"""
Add the URL of the settings page to the given tag.
@see L{xmantissa.webnav.settingsLink}
"""
return settingsLink(
self.translator, self.pageComponents.settings, ctx.tag)
def render_applicationNavigation(self, ctx, data):
"""
Add primary application navigation to the given tag.
@see L{xmantissa.webnav.applicationNavigation}
"""
return applicationNavigation(
ctx, self.translator, self.pageComponents.navigation)
def render_urchin(self, ctx, data):
"""
Render the code for recording Google Analytics statistics, if so
configured.
"""
key = APIKey.getKeyForAPI(self._siteStore(), APIKey.URCHIN)
if key is None:
return ''
return ctx.tag.fillSlots('urchin-key', key.apiKey)
def render_search(self, ctx, data):
searchAggregator = self.pageComponents.searchAggregator
if searchAggregator is None or not searchAggregator.providers():
return ''
return ctx.tag.fillSlots(
'form-action', self.translator.linkTo(searchAggregator.storeID))
def render_username(self, ctx, data):
return renderShortUsername(ctx, self.username)
def render_logout(self, ctx, data):
return ctx.tag
def render_authenticateLinks(self, ctx, data):
return ''
def _getVersions(self):
versions = []
for (name, offering) in getInstalledOfferings(self._siteStore()).iteritems():
if offering.version is not None:
v = offering.version
versions.append(str(v).replace(v.package, name))
return ' '.join(versions)
def render_footer(self, ctx, data):
footer = [self._getVersions()]
staticShellContent = self.pageComponents.staticShellContent
if staticShellContent is not None:
f = staticShellContent.getFooter()
if f is not None:
footer.append(f)
return ctx.tag[footer]
INSPECTROFY = os.environ.get('MANTISSA_DEV')
class _FragmentWrapperMixin(MantissaViewHelper):
def __init__(self, fragment, pageComponents):
self.fragment = fragment
fragment.page = self
self.pageComponents = pageComponents
def beforeRender(self, ctx):
return getattr(self.fragment, 'beforeRender', lambda x: None)(ctx)
def render_introspectionWidget(self, ctx, data):
"Until we have eliminated everything but GenericAthenaLivePage"
if INSPECTROFY:
return ctx.tag['No debugging on crap-ass bad pages']
else:
return ''
def render_head(self, ctx, data):
req = IRequest(ctx)
userStore = self.webapp.store
siteStore = userStore.parent
site = ISiteURLGenerator(siteStore)
l = self.pageComponents.themes
_reorderForPreference(l, self.webapp.preferredTheme)
extras = []
for theme in l:
extra = theme.head(req, site)
if extra is not None:
extras.append(extra)
headMethod = getattr(self.fragment, 'head', None)
if headMethod is not None:
extra = headMethod()
if extra is not None:
extras.append(extra)
return ctx.tag[extras]
def render_title(self, ctx, data):
"""
Return the current context tag containing C{self.fragment}'s C{title}
attribute, or "Divmod".
"""
return ctx.tag[getattr(self.fragment, 'title', 'Divmod')]
def render_content(self, ctx, data):
return ctx.tag[self.fragment]
class GenericNavigationPage(_FragmentWrapperMixin, Page, _ShellRenderingMixin):
def __init__(self, webapp, fragment, pageComponents, username):
Page.__init__(self, docFactory=webapp.getDocFactory('shell'))
_ShellRenderingMixin.__init__(self, webapp, pageComponents, username)
_FragmentWrapperMixin.__init__(self, fragment, pageComponents)
class GenericNavigationAthenaPage(_FragmentWrapperMixin,
MantissaLivePage,
_ShellRenderingMixin):
"""
This class provides the generic navigation elements for surrounding all
pages navigated under the /private/ namespace.
"""
def __init__(self, webapp, fragment, pageComponents, username):
"""
Top-level container for Mantissa application views.
@param webapp: a C{PrivateApplication}.
@param fragment: The C{Element} or C{Fragment} to display as content.
@param pageComponents a C{_PageComponent}.
This page draws its HTML from the 'shell' template in the preferred
theme for the store. If loaded in a browser that does not support
Athena, the page provided by the 'athena-unsupported' template will be
displayed instead.
@see: L{PrivateApplication.preferredTheme}
"""
userStore = webapp.store
siteStore = userStore.parent
MantissaLivePage.__init__(
self, ISiteURLGenerator(siteStore),
getattr(fragment, 'iface', None),
fragment,
jsModuleRoot=None,
docFactory=webapp.getDocFactory('shell'))
_ShellRenderingMixin.__init__(self, webapp, pageComponents, username)
_FragmentWrapperMixin.__init__(self, fragment, pageComponents)
self.unsupportedBrowserLoader = (webapp
.getDocFactory("athena-unsupported"))
def beforeRender(self, ctx):
"""
Call the C{beforeRender} implementations on L{MantissaLivePage} and
L{_FragmentWrapperMixin}.
"""
MantissaLivePage.beforeRender(self, ctx)
return _FragmentWrapperMixin.beforeRender(self, ctx)
def render_head(self, ctx, data):
ctx.tag[t.invisible(render=t.directive("liveglue"))]
return _FragmentWrapperMixin.render_head(self, ctx, data)
def render_introspectionWidget(self, ctx, data):
if INSPECTROFY:
f = athena.IntrospectionFragment()
f.setFragmentParent(self)
return ctx.tag[f]
else:
return ''
class _PrivateRootPage(Page, _ShellRenderingMixin):
"""
L{_PrivateRootPage} is the resource present for logged-in users at
"/private", providing a direct interface to the objects located in the
user's personal user-store.
It is created by L{PrivateApplication.createResourceWith}.
"""
addSlash = True
def __init__(self, webapp, pageComponents, username, webViewer):
self.username = username
self.webViewer = webViewer
Page.__init__(self, docFactory=webapp.getDocFactory('shell'))
_ShellRenderingMixin.__init__(self, webapp, pageComponents, username)
def child_(self, ctx):
navigation = self.pageComponents.navigation
if not navigation:
return self
# /private/XXXX ->
click = self.webapp.linkTo(navigation[0].storeID)
return url.URL.fromContext(ctx).click(click)
def render_content(self, ctx, data):
return """
You have no default root page set, and no navigation plugins installed. I
don't know what to do.
"""
def render_title(self, ctx, data):
return ctx.tag['Private Root Page (You Should Not See This)']
def childFactory(self, ctx, name):
"""
Return a shell page wrapped around the Item model described by the
webID, or return None if no such item can be found.
"""
try:
o = self.webapp.fromWebID(name)
except _WebIDFormatException:
return None
if o is None:
return None
return self.webViewer.wrapModel(o)
class _PageComponents(record('navigation searchAggregator staticShellContent settings themes')):
"""
I encapsulate various plugin objects that have some say
in determining the available functionality on a given page
"""
pass
class PrivateApplication(Item, PrefixURLMixin):
"""
This is the root of a private, navigable web application. It is designed
to be installed on avatar stores after installing WebSite.
To plug into it, install powerups of the type INavigableElement on the
user's store. Their tabs will be retrieved and items that are part of
those powerups will be linked to; provide adapters for said items to either
INavigableFragment or IResource.
Note: IResource adapters should be used sparingly, for example, for
specialized web resources which are not 'nodes' within the application; for
example, that need to set a custom content/type or that should not display
any navigation elements because they will be displayed only within IFRAME
nodes. Do _NOT_ use IResource adapters to provide a customized
look-and-feel; instead use mantissa themes. (XXX document webtheme.py more
thoroughly)
@ivar preferredTheme: A C{unicode} string naming the preferred theme for
this application. Templates and suchlike will be looked up for this theme
first.
@ivar privateKey: A random integer used to deterministically but
unpredictably perturb link generation to avoid being the target of XSS
attacks.
@ivar privateIndexPage: A reference to the Item whose IResource or
INavigableFragment adapter will be displayed on login and upon viewing the
'root' page, /private/.
"""
implements(ISiteRootPlugin, IWebTranslator, ITemplateNameResolver,
IPowerupIndirector)
powerupInterfaces = (IWebTranslator, ITemplateNameResolver, IWebViewer)
typeName = 'private_web_application'
schemaVersion = 5
preferredTheme = text()
privateKey = integer(defaultFactory=genkey)
website = dependsOn(WebSite)
customizedPublicPage = dependsOn(CustomizedPublicPage)
authenticationApplication = dependsOn(AuthenticationApplication)
preferenceAggregator = dependsOn(PreferenceAggregator)
defaultPreferenceCollection = dependsOn(DefaultPreferenceCollection)
searchAggregator = dependsOn(SearchAggregator)
#XXX Nothing ever uses this
privateIndexPage = reference()
prefixURL = 'private'
sessioned = True
sessionless = False
def getPageComponents(self):
navigation = getTabs(self.store.powerupsFor(INavigableElement))
staticShellContent = IStaticShellContent(self.store, None)
return _PageComponents(navigation,
self.searchAggregator,
staticShellContent,
self.store.findFirst(PreferenceAggregator),
getInstalledThemes(self.store.parent))
def _getUsername(self):
"""
Return a localpart@domain style string naming the owner of our store.
@rtype: C{unicode}
"""
for (l, d) in getAccountNames(self.store):
return l + u'@' + d
def createResourceWith(self, webViewer):
return _PrivateRootPage(self, self.getPageComponents(),
self._getUsername(), webViewer)
# ISiteRootPlugin
def produceResource(self, req, segments, webViewer):
if segments == JUST_SLASH:
return self.createResourceWith(webViewer), JUST_SLASH
else:
return super(PrivateApplication, self).produceResource(
req, segments, webViewer)
# IWebTranslator
def linkTo(self, obj):
# currently obj must be a storeID, but other types might come eventually
return '/%s/%s' % (self.prefixURL, storeIDToWebID(self.privateKey, obj))
def linkToWithActiveTab(self, childItem, parentItem):
"""
Return a URL which will point to the web facet of C{childItem},
with the selected nav tab being the one that represents C{parentItem}
"""
return self.linkTo(parentItem.storeID) + '/' + self.toWebID(childItem)
def linkFrom(self, webid):
return webIDToStoreID(self.privateKey, webid)
def fromWebID(self, webID):
storeID = self.linkFrom(webID)
if storeID is None:
# This is not a very good interface, but I don't want to change the
# calling code right now as I'm neither confident in its test
# coverage nor looking to go on a test-writing rampage through this
# code for a minor fix.
raise _WebIDFormatException("%r didn't look like a webID" % (webID,))
webitem = self.store.getItemByID(storeID, None)
return webitem
def toWebID(self, item):
return storeIDToWebID(self.privateKey, item.storeID)
def _preferredThemes(self):
"""
Return a list of themes in the order of preference that this user has
selected via L{PrivateApplication.preferredTheme}.
"""
themes = getInstalledThemes(self.store.parent)
_reorderForPreference(themes, self.preferredTheme)
return themes
#ITemplateNameResolver
def getDocFactory(self, fragmentName, default=None):
"""
Retrieve a Nevow document factory for the given name.
@param fragmentName: a short string that names a fragment template.
@param default: value to be returned if the named template is not
found.
"""
themes = self._preferredThemes()
for t in themes:
fact = t.getDocFactory(fragmentName, None)
if fact is not None:
return fact
return default
# IPowerupIndirector
def indirect(self, interface):
"""
Indirect the implementation of L{IWebViewer} to
L{_AuthenticatedWebViewer}.
"""
if interface == IWebViewer:
return _AuthenticatedWebViewer(self)
return self
PrivateApplicationV2 = declareLegacyItem(PrivateApplication.typeName, 2, dict(
installedOn = reference(),
preferredTheme = text(),
hitCount = integer(default=0),
privateKey = integer(),
privateIndexPage = reference(),
))
PrivateApplicationV3 = declareLegacyItem(PrivateApplication.typeName, 3, dict(
preferredTheme=text(),
hitCount=integer(default=0),
privateKey=integer(),
privateIndexPage=reference(),
customizedPublicPage=reference("dependsOn(CustomizedPublicPage)"),
authenticationApplication=reference("dependsOn(AuthenticationApplication)"),
preferenceAggregator=reference("dependsOn(PreferenceAggregator)"),
defaultPreferenceCollection=reference("dependsOn(DefaultPreferenceCollection)"),
searchAggregator=reference("dependsOn(SearchAggregator)"),
website=reference(),
))
def upgradePrivateApplication1To2(oldApp):
newApp = oldApp.upgradeVersion(
'private_web_application', 1, 2,
installedOn=oldApp.installedOn,
preferredTheme=oldApp.preferredTheme,
privateKey=oldApp.privateKey,
privateIndexPage=oldApp.privateIndexPage)
newApp.store.powerup(newApp.store.findOrCreate(
CustomizedPublicPage), ISiteRootPlugin, -257)
return newApp
upgrade.registerUpgrader(upgradePrivateApplication1To2, 'private_web_application', 1, 2)
def _upgradePrivateApplication2to3(old):
pa = old.upgradeVersion(PrivateApplication.typeName, 2, 3,
preferredTheme=old.preferredTheme,
privateKey=old.privateKey,
privateIndexPage=old.privateIndexPage)
pa.customizedPublicPage = old.store.findOrCreate(CustomizedPublicPage)
pa.authenticationApplication = old.store.findOrCreate(AuthenticationApplication)
pa.preferenceAggregator = old.store.findOrCreate(PreferenceAggregator)
pa.defaultPreferenceCollection = old.store.findOrCreate(DefaultPreferenceCollection)
pa.searchAggregator = old.store.findOrCreate(SearchAggregator)
pa.website = old.store.findOrCreate(WebSite)
return pa
upgrade.registerUpgrader(_upgradePrivateApplication2to3, PrivateApplication.typeName, 2, 3)
def upgradePrivateApplication3to4(old):
"""
Upgrade L{PrivateApplication} from schema version 3 to schema version 4.
Copy all existing attributes to the new version and use the
L{PrivateApplication} to power up the item it is installed on for
L{ITemplateNameResolver}.
"""
new = old.upgradeVersion(
PrivateApplication.typeName, 3, 4,
preferredTheme=old.preferredTheme,
privateKey=old.privateKey,
website=old.website,
customizedPublicPage=old.customizedPublicPage,
authenticationApplication=old.authenticationApplication,
preferenceAggregator=old.preferenceAggregator,
defaultPreferenceCollection=old.defaultPreferenceCollection,
searchAggregator=old.searchAggregator)
# Almost certainly this would be more correctly expressed as
# installedOn(new).powerUp(...), however the 2 to 3 upgrader failed to
# translate the installedOn attribute to state which installedOn can
# recognize, consequently installedOn(new) will return None for an item
# which was created at schema version 2 or earlier. It's not worth dealing
# with this inconsistency, since PrivateApplication is always only
# installed on its store. -exarkun
new.store.powerUp(new, ITemplateNameResolver)
return new
upgrade.registerUpgrader(upgradePrivateApplication3to4, PrivateApplication.typeName, 3, 4)
PrivateApplicationV4 = declareLegacyItem(
'private_web_application', 4,
dict(authenticationApplication=reference(),
customizedPublicPage=reference(),
defaultPreferenceCollection=reference(),
hitCount=integer(),
preferenceAggregator=reference(),
preferredTheme=text(),
privateIndexPage=reference(),
privateKey=integer(),
searchAggregator=reference(),
website=reference()))
def upgradePrivateApplication4to5(old):
"""
Install the newly required powerup.
"""
new = old.upgradeVersion(
PrivateApplication.typeName, 4, 5,
preferredTheme=old.preferredTheme,
privateKey=old.privateKey,
website=old.website,
customizedPublicPage=old.customizedPublicPage,
authenticationApplication=old.authenticationApplication,
preferenceAggregator=old.preferenceAggregator,
defaultPreferenceCollection=old.defaultPreferenceCollection,
searchAggregator=old.searchAggregator)
new.store.powerUp(new, IWebViewer)
return new
upgrade.registerUpgrader(upgradePrivateApplication4to5, PrivateApplication.typeName, 4, 5)
```
#### File: mantissa/xmantissa/_webutil.py
```python
from zope.interface import implements
from twisted.cred.portal import IRealm
from epsilon.structlike import record
from axiom.userbase import getDomainNames
from nevow import athena
from nevow.rend import NotFound
from nevow.inevow import IResource, IRequest
from xmantissa.ixmantissa import (IWebViewer, INavigableFragment,
ISiteRootPlugin)
from xmantissa.websharing import UserIndexPage
from xmantissa.error import CouldNotLoadFromThemes
class WebViewerHelper(object):
"""
This is a mixin for the common logic in the two providers of
L{IWebViewer} included with Mantissa,
L{xmantissa.publicweb._AnonymousWebViewer} and
L{xmantissa.webapp._AuthenticatedWebViewer}.
@ivar _getDocFactory: a 1-arg callable which returns a nevow loader.
@ivar _preferredThemes: a 0-arg callable which returns a list of nevow
themes.
"""
def __init__(self, _getDocFactory, _preferredThemes):
"""
"""
self._getDocFactory = _getDocFactory
self._preferredThemes = _preferredThemes
def _wrapNavFrag(self, fragment, useAthena):
"""
Subclasses must implement this to wrap a fragment.
@param fragment: an L{INavigableFragment} provider that should be
wrapped in the resulting page.
@param useAthena: Whether the resulting L{IResource} should be a
L{LivePage}.
@type useAthena: L{bool}
@return: a fragment to display to the user.
@rtype: L{IResource}
"""
def wrapModel(self, model):
"""
Converts application-provided model objects to L{IResource} providers.
"""
res = IResource(model, None)
if res is None:
frag = INavigableFragment(model)
fragmentName = getattr(frag, 'fragmentName', None)
if fragmentName is not None:
fragDocFactory = self._getDocFactory(fragmentName)
if fragDocFactory is not None:
frag.docFactory = fragDocFactory
if frag.docFactory is None:
raise CouldNotLoadFromThemes(frag, self._preferredThemes())
useAthena = isinstance(frag, (athena.LiveFragment, athena.LiveElement))
return self._wrapNavFrag(frag, useAthena)
else:
return res
class MantissaViewHelper(object):
"""
This is the superclass of all Mantissa resources which act as a wrapper
around an L{INavigableFragment} provider. This must be mixed in to some
hierarchy with a C{locateChild} method, since it expects to cooperate in
such a hierarchy.
Due to infelicities in the implementation of some (pre-existing)
subclasses, there is no __init__; but subclasses must set the 'fragment'
attribute in theirs.
"""
fragment = None
def locateChild(self, ctx, segments):
"""
Attempt to locate the child via the '.fragment' attribute, then fall
back to normal locateChild behavior.
"""
if self.fragment is not None:
# There are still a bunch of bogus subclasses of this class, which
# are used in a variety of distasteful ways. 'fragment' *should*
# always be set to something that isn't None, but there's no way to
# make sure that it will be for the moment. Every effort should be
# made to reduce public use of subclasses of this class (instead
# preferring to wrap content objects with
# IWebViewer.wrapModel()), so that the above check can be
# removed. -glyph
lc = getattr(self.fragment, 'locateChild', None)
if lc is not None:
x = lc(ctx, segments)
if x is not NotFound:
return x
return super(MantissaViewHelper, self).locateChild(ctx, segments)
class SiteRootMixin(object):
"""
Common functionality for L{AnonymousSite} and L{WebSite}.
"""
def locateChild(self, context, segments):
"""
Return a statically defined child or a child defined by a site root
plugin or an avatar from guard.
"""
request = IRequest(context)
webViewer = IWebViewer(self.store, None)
childAndSegments = self.siteProduceResource(request, segments, webViewer)
if childAndSegments is not None:
return childAndSegments
return NotFound
# IMantissaSite
def siteProduceResource(self, req, segments, webViewer):
"""
Retrieve a child resource and segments from rootChild_ methods on this
object and SiteRootPlugins.
@return: a 2-tuple of (resource, segments), suitable for return from
locateChild.
@param req: an L{IRequest} provider.
@param segments: a tuple of L{str}s, the segments from the request.
@param webViewer: an L{IWebViewer}, to be propagated through the child
lookup process.
"""
# rootChild_* is not the same as child_, because its signature is
# different. Maybe this should be done some other way.
shortcut = getattr(self, 'rootChild_' + segments[0], None)
if shortcut:
res = shortcut(req, webViewer)
if res is not None:
return res, segments[1:]
for plg in self.store.powerupsFor(ISiteRootPlugin):
produceResource = getattr(plg, 'produceResource', None)
if produceResource is not None:
childAndSegments = produceResource(req, segments, webViewer)
else:
childAndSegments = plg.resourceFactory(segments)
if childAndSegments is not None:
return childAndSegments
return None
# IPowerupIndirector
def indirect(self, interface):
"""
Create a L{VirtualHostWrapper} so it can have the first chance to
handle web requests.
"""
if interface is IResource:
siteStore = self.store.parent
if self.store.parent is None:
siteStore = self.store
return VirtualHostWrapper(
siteStore,
IWebViewer(self.store),
self)
return self
class VirtualHostWrapper(record('siteStore webViewer wrapped')):
"""
Resource wrapper which implements per-user virtual subdomains. This should
be wrapped around any resource which sits at the root of the hierarchy. It
will examine requests for their hostname and, when appropriate, redirect
handling of the query to the appropriate sharing resource.
@type siteStore: L{Store}
@ivar siteStore: The site store which will be queried to determine which
hostnames are associated with this server.
@type webViewer: L{IWebViewer}
@ivar webViewer: The web viewer representing the user.
@type wrapped: L{IResource} provider
@ivar wrapped: A resource to which traversal will be delegated if the
request is not for a user subdomain.
"""
implements(IResource)
def subdomain(self, hostname):
"""
Determine of which known domain the given hostname is a subdomain.
@return: A two-tuple giving the subdomain part and the domain part or
C{None} if the domain is not a subdomain of any known domain.
"""
hostname = hostname.split(":")[0]
for domain in getDomainNames(self.siteStore):
if hostname.endswith("." + domain):
username = hostname[:-len(domain) - 1]
if username != "www":
return username, domain
return None
def locateChild(self, context, segments):
"""
Delegate dispatch to a sharing resource if the request is for a user
subdomain, otherwise fall back to the wrapped resource's C{locateChild}
implementation.
"""
request = IRequest(context)
hostname = request.getHeader('host')
info = self.subdomain(hostname)
if info is not None:
username, domain = info
index = UserIndexPage(IRealm(self.siteStore),
self.webViewer)
resource = index.locateChild(None, [username])[0]
return resource, segments
return self.wrapped.locateChild(context, segments)
``` |
{
"source": "jonathanjosef91/LeetCode",
"score": 3
} |
#### File: LeetCode/old/LongestSubStringWithoutRepeatingsCharcters.py
```python
class Solution:
def lengthOfLongestSubstring(self, s: str) -> int:
if len(s) == 0:
return 0
NUM_OF_CHARS = 256
hist = [0] * NUM_OF_CHARS
l = 0
r = 0
max = 0
while r < len(s):
hist_i = ord(s[r])
hist[hist_i] += 1
while hist[hist_i] > 1:
hist[ord(s[l])] -= 1
l += 1
if r - l + 1 > max:
max = r - l + 1
r += 1
return max
```
#### File: LeetCode/old/matrixBlockSum100.py
```python
class Solution(object):
def matrixBlockSum(self, mat, K):
"""
:type mat: List[List[int]]
:type K: int
:rtype: List[List[int]]
"""
r = len(mat)
c = len(mat[0])
temp = [[0] * c for i in range(r)]
answer = [[0] * c for i in range(r)]
# initialise the first column
for i in range(r):
res = 0
for delta_c in range(K + 1):
if delta_c < c:
res += mat[i][delta_c]
temp[i][0] = res
# initialise slide the array by adding the new and removing the tail
for i in range(r):
res = temp[i][0]
for j in range(1, c):
# remove -k column + K column
remove_c = j - K - 1
if (0 <= remove_c < c):
res -= mat[i][remove_c]
add_c = j + K
if (0 <= add_c < c):
res += mat[i][add_c]
temp[i][j] = res
# initialise the first row by the same concept. but copy from the accumulated column value
for i in range(c):
res = 0
for delta_r in range(K + 1):
res += temp[delta_r][i]
answer[0][i] = res
for i in range(c):
res = answer[0][i]
for j in range(1, r):
# remove -k row + K row
remove_r = j - K - 1
if (0 <= remove_r < r):
res -= temp[remove_r][i]
add_r = j + K
if (0 <= add_r < r):
res += temp[add_r][i]
answer[j][i] = res
return answer
mat = [[1,2,3],[4,5,6],[7,8,9]]
sol = Solution()
print(sol.matrixBlockSum(mat,1))
```
#### File: LeetCode/old/mergeIntarvals.py
```python
class Solution(object):
def merge(self, intervals):
"""
:type intervals: List[List[int]]
:rtype: List[List[int]]
"""
if len(intervals) <= 0:
return []
intervals.sort()
out = []
start = end = intervals[0][0]
for i in intervals:
if i[0]<=end:
end = max(end,i[1])
else:
out.append([start,end])
start = i[0]
end = i[1]
out.append([start,end])
return out
sol = Solution()
intervals = [[1,3],[2,6],[8,10],[15,18]]
print(sol.merge(intervals))
```
#### File: LeetCode/old/productExceptSelf.py
```python
class Solution(object):
def productExceptSelf(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
zeros = 0
zeroIndex = 0
multi = 1
for i in range(len(nums)):
if nums[i] == 0:
zeros += 1
zeroIndex = i
else:
multi *= nums[i]
if zeros >= 2:
return [0]*len(nums)
if zeros == 1:
out = [0]*len(nums)
out[zeroIndex] = multi
return out
out = [1]*len(nums)
for i in range(1, len(out)):
out[i] = nums[i-1]*out[i-1]
last = 1
for i in range(len(nums)-2, -1, -1):
last = last * nums[i+1]
out[i] *= last
return out
sol = Solution()
arr = [6, 0, 7, 0]
print(sol.productExceptSelf(arr))
```
#### File: Solutions/L/LargestNumber.py
```python
import unittest
from Tags import *
from functools import cmp_to_key
"""
Author: <NAME>
Problem Description/link: https://leetcode.com/problems/largest-number/
"""
class Solution(object):
def getTags(self):
tags = [Difficulty.Medium, Topic.Math]
return tags
def _cmpNum(self, x, y):
if str(x) + str(y) > str(y) + str(x):
return 1
else:
return -1
def largestNumber(self, nums):
nums.sort(key=cmp_to_key(self._cmpNum), reverse=True)
s = ""
for i in nums:
s += str(i)
while len(s) > 1 and s[0] == "0":
s = s[1:]
return s
class test_largestNumber(unittest.TestCase):
def test_1(self):
self.assertEqual("12341123412341", Solution().largestNumber([12341,123411234]))
self.assertEqual("43243432", Solution().largestNumber([432,43243]))
self.assertEqual("0", Solution().largestNumber([0,0]))
self.assertEqual("9534330", Solution().largestNumber([3,30,34,5,9]))
self.assertEqual("210", Solution().largestNumber([10, 2]))
self.assertEqual("210", Solution().largestNumber([2, 10]))
```
#### File: Solutions/L/LongestPalindrome.py
```python
import unittest
from Tags import *
"""
Author: <NAME>
Problem Description/link: https://leetcode.com/problems/longest-palindromic-substring/
"""
class Solution(object):
def getTags(self):
tags = [Difficulty.Hard, Topic.DP, Topic.Strings]
return tags
def longestPalindrome(self, s):
if s == "":
return ""
isPali = [[False] * len(s) for i in range(len(s))]
max_pali = s[0]
max_l = 1
for i in range(len(s)):
isPali[i][i] = True
for j in range(1, len(s)):
for i in range(j - 1, -1, -1):
isPali[i][j] = (isPali[i + 1][j - 1] or j - i < 2) and s[i] == s[j]
if isPali[i][j]:
if j - i + 1 > max_l:
max_l = j - i
max_pali = s[i:j + 1]
return max_pali
class test_longestPalindrome(unittest.TestCase):
def test_1(self):
res = Solution().longestPalindrome("babad")
self.assertTrue(res in ["bab", "aba"])
self.assertEqual("bb", Solution().longestPalindrome("cbbd"))
self.assertEqual("bb", Solution().longestPalindrome("bb"))
self.assertEqual("ccc", Solution().longestPalindrome("ccc"))
self.assertEqual("aca", Solution().longestPalindrome("aacabdkacaa"))
``` |
{
"source": "jonathanjqchen/biomed-net-new-assets-script",
"score": 3
} |
#### File: jonathanjqchen/biomed-net-new-assets-script/main.py
```python
import os
import pandas as pd
import xlsxwriter
pd.set_option("display.expand_frame_repr", False)
def read_assets(file_path):
"""
Reads file from given path into dataframe
:return: Dataframe containing assets with columns "Model Number", "Asset Description", "Segment Description",
"Site Code", "Shop"
"""
df = pd.read_excel(file_path, usecols=["Model Number",
"Asset Description",
"Segment Description",
"Site Code",
"Shop"])
return df
def add_count_col(df):
"""
Groups given df by all fields and creates a new "Count" column indicating the number of assets in each group
:param df: Dataframe containing asset details
:return: Dataframe with identical asset/model/site combination grouped in single row and new "Count" column
indicating the number of such assets
"""
return df.\
groupby(["Model Number", "Asset Description", "Site Code", "Shop", "Segment Description"]).\
size().\
to_frame("Count").\
reset_index()
def create_dict(df):
"""
Converts given dataframe into a dictionary; see :return: for dictionary details
:param df: Dataframe with asset details and "Count" column
:return: Dictionary with key: "Model Number"
value: [["Model Number", "Asset Description", ...], ["Model Number", ...], ...]
Note: Value is a 2D list that contains unique entries for a given model number, if they exist
For example, model number "VC150" may have been purchased for two different sites "MSJ" and "SPH"
In this case, there will be two entries in the 2D list stored at key "VC150" in the dictionary
"""
assets_dict = {}
for index, row in df.iterrows():
if row["Model Number"] in assets_dict:
assets_dict.get(row["Model Number"]).append(row.tolist())
else:
assets_dict[row["Model Number"]] = []
assets_dict.get(row["Model Number"]).append(row.tolist())
return assets_dict
def update_count(new_assets_dict, retired_assets_dict):
"""
Iterates through each asset in new_assets_dict and decreases its "Count" if there is a corresponding asset in the
retired_assets_dict. After "Count" is updated, the corresponding asset is removed from retired_assets_dict.
:param: new_assets_dict: Dictionary containing newly accepted assets
:param: retired_assets_dict: Dictionary containing retired assets
:return: None
"""
# Iterate through each entry in the new assets dictionary
for key, val in new_assets_dict.items():
# Check to see if the newly purchased asset was also retired (key is model number)
if key in retired_assets_dict:
# Find the exact asset match by iterating through the dictionary value (2D list of asset details)
for retired_asset in retired_assets_dict.get(key):
for new_asset in new_assets_dict.get(key):
# Index 1 gives asset description, index 2 gives site, and index 3 gives shop code
if new_asset[1] == retired_asset[1] and \
new_asset[2] == retired_asset[2] and \
new_asset[3] == retired_asset[3]:
# Decrease new asset "Count" by retired asset "Count" (add since retired asset "Count" is neg)
new_asset[5] += retired_asset[5]
# Remove retired asset from retired_assets_dict after count in new_assets_dict has been updated
retired_assets_dict.get(key).remove(retired_asset)
# Delete key from retired_assets_dict if there are no more assets in its 2D list
if len(retired_assets_dict.get(key)) == 0:
del retired_assets_dict[key]
def merge_dict(new_assets_dict, retired_assets_dict):
"""
Merges retired_assets_dict into new_assets_dict
:param new_assets_dict: Dictionary containing new assets with updated "Count"
:param retired_assets_dict: Dictionary containing retired assets that did not have a corresponding asset in
new_assets_dict
:return: None
"""
# If retired_assets_dict isn't empty, merge it with new_assets_dict
if len(retired_assets_dict) > 0:
for key, val in retired_assets_dict.items():
if key not in new_assets_dict:
new_assets_dict[key] = val
else:
for asset_list in val:
new_assets_dict.get(key).append(asset_list)
def write_to_excel(new_assets_dict):
"""
Writes given dictionary to Excel in format that can be accepted by biomed-service-delivery-cost-model
:param new_assets_dict: Dict containing information about new and retired assets
:return: None
"""
# Initialization
net_new_file_path = r"{dir_path}\output\net_new_assets.xlsx".format(dir_path=os.getcwd())
workbook = xlsxwriter.Workbook(net_new_file_path)
worksheet = workbook.add_worksheet("Net New Assets")
# Formatting
heading = workbook.add_format({"bold": True, "font_color": "white", "bg_color": "#244062"})
# Write headers
headers = ["model_num", "asset_description", "quantity", "health_auth", "site_code", "shop_code"]
worksheet.write_row(0, 0, headers, heading)
# Write asset details
row = 1
col = 0
for key, val in new_assets_dict.items():
for asset_list in val:
row_data = [asset_list[0], # model_num
asset_list[1], # asset_description
asset_list[5], # quantity
asset_list[4], # health_auth
asset_list[2], # site_code
asset_list[3]] # shop_code
worksheet.write_row(row, col, row_data)
row += 1
# Set column width
worksheet.set_column(0, 0, 15) # A: model_num
worksheet.set_column(1, 1, 70) # B: asset_description
worksheet.set_column(2, 2, 10) # C: quantity
worksheet.set_column(3, 3, 13) # D: health_auth
worksheet.set_column(4, 5, 10) # E, F: site_code, shop_code
workbook.close()
def main():
print("Generating list of net new assets...")
# Get name of TMS exports of new and retired assets
new_assets_path = "new_assets/{file}".format(file=os.listdir("new_assets")[0])
retired_assets_path = "retired_assets/{file}".format(file=os.listdir("retired_assets")[0])
# Read TMS exports for new and retired assets into separate dataframes
new_assets_df = read_assets(new_assets_path)
retired_assets_df = read_assets(retired_assets_path)
# Group df by rows so that we have one row for each unique asset/model/site combination, add count column
new_assets_df = add_count_col(new_assets_df)
retired_assets_df = add_count_col(retired_assets_df)
# Convert all "Count" values in the retired df to a negative value
retired_assets_df["Count"] *= -1
# Convert df to dictionary with key: "Model Number" and value: [["Asset 1 details"], ["Asset 2 details"], ...]
# Note: Assets with same model but different sites are grouped separately, hence the 2D list
new_assets_dict = create_dict(new_assets_df)
retired_assets_dict = create_dict(retired_assets_df)
# If there is an exact asset match between the two dicts, decrease new assets "Count" by retired assets "Count"
# After "Count" has been updated, retired asset is removed from its dictionary
# This will give us the count of net new assets at each site in new_assets_dict
update_count(new_assets_dict, retired_assets_dict)
# Merge retired_assets_dict (only retired assets without corresponding new asset remain) into new_assets_dict
merge_dict(new_assets_dict, retired_assets_dict)
# Write new_assets_dict to Excel
write_to_excel(new_assets_dict)
input("Net new assets list successfully generated. Press 'Enter' to close this window.")
if __name__ == "__main__":
main()
``` |
{
"source": "jonathanjqchen/biomed-service-delivery-cost-model",
"score": 4
} |
#### File: jonathanjqchen/biomed-service-delivery-cost-model/asset.py
```python
class Asset:
"""
This class handles all data and behaviour associated with the assets for which the user wants to budget.
"""
def __init__(self, model_num, name, qty, health_auth, site_code, shop_code, sites_cc_dict):
"""
Initializes instance variables, calls strip() on strings to make sure there are no white spaces at the front or
end when importing data from Excel.
:param model_num: Model number
:param name: Asset description
:param qty: Quantity of assets being budgeted for
:param health_auth: Health authority to which the asset belongs
:param site_code: Three-letter site code
:param shop_code: Shop code, for determining the function (clinical, renal imaging) of an asset
:param sites_cc_dict: Dictionary reference that shows corresponding cost centres for a given site
"""
self.model_num = model_num # Model number
self.name = name.strip() # Asset description
self.qty = qty # Quantity
self.health_auth = health_auth.strip() # Health authority
self.site_code = site_code.strip() # Site code
self.shop_code = shop_code.strip() # Shop code
self.function = self.assign_function() # Function (clinical, renal, imaging)
self.cost_centre = self.assign_temp_cost_centre(sites_cc_dict).strip() # Cost centre
self.avg_support_hours = 0 # Number of work order hours per year
def assign_function(self):
"""
Assigns asset function (clinical, renal, imaging) based on the asset's shop code
:return: String denoting the asset's function
"""
if self.shop_code == "IMAG" or self.shop_code == "IMAG0" or self.shop_code == "IMAG1":
return "imaging"
elif self.shop_code == "REN" or self.shop_code == "FHA_R":
return "renal"
else:
return "clinical"
def assign_temp_cost_centre(self, sites_cc_dict):
"""
Assigns a string representing the cost centre name to cost_centre instance variable; actual CostCentre object
replaces this string once it has been instantiated and assign_permanent_cost_centre() is called
:param sites_cc_dict: Dictionary reference that shows corresponding cost centres for a given site
:return: String representing cost centre's name
"""
if self.function == "clinical":
index = 0 # Clinical cost centre is stored at index 0 of sites_cc_dict value list
elif self.function == "renal":
index = 1 # Renal cost centre is stored at index 1 of sites_cc_dict value list
else:
index = 2 # Imaging cost centre is stored at index 2 of sites_cc_dict value list
# Store list of cost centres for corresponding site_code in cost_centres
cost_centres = sites_cc_dict[self.site_code]
# Check that cost centre we need is actually a string (i.e. not a NaN); if valid, return the cost centre
if type(cost_centres[index]) == str:
return cost_centres[index]
# If not valid, try to return clinical cost centre
elif index != 0 and type(cost_centres[0]) == str:
return cost_centres[0]
# If not valid, try to return imaging cost centre
elif index != 2 and type(cost_centres[2]) == str:
return cost_centres[2]
# Else return renal cost centre
else:
return cost_centres[1]
def assign_permanent_cost_centre(self, cost_centre):
"""
Updates cost_centre instance variable to be an actual CostCentre object, not just a string
:param cost_centre: CostCentre object
"""
self.cost_centre = cost_centre
```
#### File: jonathanjqchen/biomed-service-delivery-cost-model/staff.py
```python
import pandas as pd
from abc import ABC, abstractmethod
"""
########################################################################################################################
##################################### MODULE SCOPE FUNCTIONS BELOW #####################################################
########################################################################################################################
"""
def get_benefits_multiplier():
"""
Parses for the benefits_multiplier inputted by the user in "Benefits Multiplier" worksheet in
regional_staff_salaries.xlsx, staff salaries are multiplied by benefits_multiplier to obtain total_compensation.
:return: Benefits multiplier as a float.
"""
# File path to regional_staff_salaries.xlsx
regional_staff_salaries_file_path = "model_inputs/labour_reports/staff_salaries.xlsx"
# Read benefits multiplier into dataframe
benefits_multiplier_df = pd.read_excel(regional_staff_salaries_file_path,
sheet_name="Benefits Multiplier",
header=None,
usecols="A:B",
nrows=1)
return benefits_multiplier_df.at[0, 1]
"""
########################################################################################################################
######################################## STAFF ABSTRACT CLASS BELOW ####################################################
########################################################################################################################
"""
class Staff(ABC):
# Multiplier to multiply salary by to get total compensation
benefits_multiplier = get_benefits_multiplier()
def __init__(self):
"""
Initializes instance variables
"""
self.annual_salary = self.compute_annual_salary()
self.total_compensation = self.compute_total_compensation()
@abstractmethod
def compute_annual_salary(self):
pass
def compute_total_compensation(self):
"""
Computes staff's total compensation.
:return: Float representing total compensation, the product between annual salary and the benefits multiplier.
"""
return self.annual_salary * self.benefits_multiplier
``` |
{
"source": "jonathanj/renamer",
"score": 2
} |
#### File: renamer/plugins/tv.py
```python
import string
import urllib
try:
import pymeta
from pymeta.grammar import OMeta
from pymeta.runtime import ParseError
pymeta # Ssssh, Pyflakes.
except ImportError:
pymeta = None
from twisted.internet import reactor
from twisted.web.client import Agent
from renamer import logging
from renamer.plugin import RenamingCommand
from renamer.errors import PluginError
from renamer.util import deliverBody, BodyReceiver
try:
from renamer._compiled_grammar.tv import Parser as FilenameGrammar
FilenameGrammar # Ssssh, Pyflakes.
except ImportError:
FilenameGrammar = None
filenameGrammar = """
complete_strict ::= <series_strict>:series <separator> <episode_strict>:episode
=> series, episode
complete_lenient ::= <series_lenient>:series <separator> <episode_lenient>:episode
=> series, episode
partial_silly ::= <series_silly>:series <separator> <episode_silly>:episode
=> series, episode
only_episode_silly ::= <episode_silly>:episode
=> None, episode
only_episode ::= <episode_strict>:episode
=> None, episode
only_series ::= (<series_word>:word <separator> => word)+:words
=> ' '.join(words), [None, None]
separator ::= <hard_separator> | <soft_separator>
soft_separator ::= '.' | ' ' | '-' | '_'
hard_separator ::= ('_' '-' '_'
|' ' '-' ' '
|'.' '-' '.')
series_strict ::= (<series_word>:word <separator> ~(<episode_strict> <separator>) => word)*:words <series_word>:word
=> ' '.join(words + [word])
series_lenient ::= (<series_word>:word <separator> ~(<episode_lenient> <separator>) => word)*:words <series_word>:word
=> ' '.join(words + [word])
series_silly ::= (<series_word>:word <soft_separator> ~(<episode_silly> <separator>) => word)*:words <separator>
=> ' '.join(words)
series_word ::= (<letter> | <digit>)+:name => ''.join(name)
episode_strict ::= (<episode_x> | <episode_x2> | <episode_lettered>):ep
=> map(''.join, ep)
episode_lenient ::= (<episode_strict> | <episode_numbers>):ep
=> map(''.join, ep)
episode_silly ::= <digit>+:ep
=> map(''.join, [ep, ep])
episode_lettered ::= ('S' | 's') <digit>+:season ('E' | 'e') <digit>+:episode
=> season, episode
episode_numbers ::= <digit>:a <digit>:b <digit>:c <digit>?:d
=> ([a, b], [c, d]) if d else ([a], [b, c])
episode_x ::= <digit>+:season 'x' <digit>+:episode
=> season, episode
episode_x2 ::= '[' <digit>+:season 'x' <digit>+:episode ']'
=> season, episode
"""
if pymeta is not None and FilenameGrammar is None:
class FilenameGrammar(OMeta.makeGrammar(filenameGrammar, globals())):
pass
class TVRage(RenamingCommand):
name = 'tvrage'
description = 'Rename TV episodes with TVRage metadata.'
longdesc = """
Extract TV episode information from filenames and rename them based on the
correct information from TVRage <http://tvrage.com/>.
Available placeholders for templates are:
series, season, padded_season, episode, padded_episode, title
"""
defaultNameTemplate = string.Template(
u'$series [${season}x${padded_episode}] - $title')
optParameters = [
('series', None, None, 'Override series name.'),
('season', None, None, 'Override season number.', int),
('episode', None, None, 'Override episode number.', int)]
def postOptions(self):
if pymeta is None:
raise PluginError(
'The "pymeta" package is required for this command')
if self['series'] is not None:
self['series'] = self.decodeCommandLine(self['series'])
self.agent = Agent(reactor)
def buildMapping(self, (seriesName, season, episode, episodeName)):
return dict(
series=seriesName,
season=season,
padded_season=u'%02d' % (season,),
episode=episode,
padded_episode=u'%02d' % (episode,),
title=episodeName)
def extractParts(self, filename, overrides=None):
"""
Get TV episode information from a filename.
"""
if overrides is None:
overrides = {}
rules = ['complete_strict', 'complete_lenient']
# We can only try the partial rules if there are some overrides.
if filter(None, overrides.values()):
rules.extend([
'only_episode',
'partial_silly',
'only_series',
'only_episode_silly'])
for rule in rules:
g = FilenameGrammar(filename)
logging.msg('Trying grammar rule "%s"' % (rule,),
verbosity=5)
try:
res, err = g.apply(rule)
except ParseError, e:
try:
logging.msg('Parsing error:', verbosity=5)
for line in (e.formatError(filename).strip()).splitlines():
logging.msg(line, verbosity=5)
except:
pass
continue
else:
series, (season, episode) = res
parts = (
overrides.get('series') or series,
overrides.get('season') or season,
overrides.get('episode') or episode)
if None not in parts:
logging.msg('Found parts in "%s": %r' % (filename, parts),
verbosity=4)
return parts
raise PluginError(
'No patterns could be found in "%s"' % (filename))
def extractMetadata(self, pageData):
"""
Extract TV episode metadata from a TVRage response.
"""
data = {}
for line in pageData.splitlines():
key, value = line.strip().split('@', 1)
data[key] = value.split('^')
series = data['Show Name'][0]
season, episode = map(int, data['Episode Info'][0].split('x'))
title = data['Episode Info'][1]
return series, season, episode, title
def buildURL(self, seriesName, season, episode):
"""
Construct the TVRage URL to the quickinfo page for the seriesName,
season and episode.
"""
ep = '%dx%02d' % (int(season), int(episode))
qs = urllib.urlencode({'show': seriesName, 'ep': ep})
return 'http://services.tvrage.com/tools/quickinfo.php?%s' % (qs,)
def lookupMetadata(self, seriesName, season, episode):
"""
Look up TV episode metadata on TVRage.
"""
url = self.buildURL(seriesName, season, episode)
logging.msg('Looking up TVRage metadata at %s' % (url,),
verbosity=4)
d = self.agent.request('GET', url)
d.addCallback(deliverBody, BodyReceiver)
d.addCallback(self.extractMetadata)
return d
# IRenamerCommand
def processArgument(self, arg):
seriesName, season, episode = self.extractParts(
arg.basename(), overrides=self)
d = self.lookupMetadata(seriesName, season, episode)
d.addCallback(self.buildMapping)
return d
```
#### File: renamer/test/test_actions.py
```python
from twisted.python.filepath import FilePath
from twisted.trial.unittest import TestCase
from renamer import errors
from renamer.application import Options
from renamer.plugins import actions
class _ActionTestMixin(object):
actionType = None
def setUp(self):
self.path = FilePath(self.mktemp())
self.path.makedirs()
self.options = Options(None)
self.src, self.dst = self.createFiles()
def createFiles(self):
"""
Create paths for source and destination files.
"""
return self.path.child('src'), self.path.child('dst')
def createAction(self, src=None, dst=None):
"""
Create an action from L{actionType}.
"""
if src is None:
src = self.src
if dst is None:
dst = self.dst
return self.actionType(src, dst)
def test_do(self):
"""
Perform the action.
"""
def test_doWithSubdirs(self):
"""
Performing an action involving a subdirectory results in that
subdirectory being created if it didn't already exist.
"""
self.dst = self.path.child('subdir').child('dst')
parent = self.dst.parent()
self.assertFalse(parent.exists())
self.test_do()
self.assertTrue(parent.exists())
self.assertEquals(parent.listdir(), ['dst'])
def test_doClobber(self):
"""
Performing an action raises L{renames.errors.NoClobber} when the
destination file already exists.
"""
self.dst.touch()
action = self.createAction()
self.assertRaises(
errors.NoClobber, action.do, self.options)
def test_undo(self):
"""
Perform the reverse action.
"""
def test_undoWithSubdirs(self):
"""
Performing a reverse action does not remove existing directories.
"""
self.dst = self.path.child('subdir').child('dst')
parent = self.dst.parent()
parent.makedirs()
self.assertTrue(parent.exists())
self.test_undo()
self.assertTrue(parent.exists())
self.assertEquals(parent.listdir(), [])
def test_undoClobber(self):
"""
Performing a reverse action raises L{renames.errors.NoClobber} when the
destination file already exists.
"""
self.src.touch()
action = self.createAction()
self.assertRaises(
errors.NoClobber, action.undo, self.options)
class MoveActionTests(_ActionTestMixin, TestCase):
"""
Tests for L{renamer.plugins.actions.MoveAction}.
"""
actionType = actions.MoveAction
def test_do(self):
self.src.touch()
self.assertTrue(self.src.exists())
self.assertFalse(self.dst.exists())
action = self.createAction()
action.do(self.options)
self.assertFalse(self.src.exists())
self.assertTrue(self.dst.exists())
def test_undo(self):
self.dst.touch()
self.assertFalse(self.src.exists())
self.assertTrue(self.dst.exists())
action = self.createAction()
action.undo(self.options)
self.assertTrue(self.src.exists())
self.assertFalse(self.dst.exists())
class SymlinkActionTests(_ActionTestMixin, TestCase):
"""
Tests for L{renamer.plugins.actions.SymlinkAction}.
"""
actionType = actions.SymlinkAction
def test_do(self):
self.src.touch()
self.assertTrue(self.src.exists())
self.assertFalse(self.dst.exists())
action = self.createAction()
action.do(self.options)
self.assertTrue(self.src.exists())
self.assertTrue(self.dst.exists())
self.assertTrue(self.dst.islink())
def test_undo(self):
self.src.touch()
self.src.linkTo(self.dst)
self.assertTrue(self.src.exists())
self.assertTrue(self.dst.exists())
self.assertTrue(self.dst.islink())
action = self.createAction()
action.undo(self.options)
self.assertTrue(self.src.exists())
self.assertFalse(self.dst.exists())
def test_undoClobber(self):
"""
Undoing a symlink cannot raise L{renamer.errors.NoClobber}.
"""
```
#### File: renamer/test/test_history.py
```python
from axiom.store import Store
from twisted.python.filepath import FilePath
from twisted.trial.unittest import TestCase
from renamer import errors, history, irenamer
from renamer.plugins.actions import SymlinkAction
def FakeOptions():
return {}
class FakeAction(object):
def do(self, options):
pass
def undo(self, options):
pass
class HistoryTests(TestCase):
"""
Tests for L{renamer.history.History}.
"""
def setUp(self):
self.store = Store()
self.history = history.History(store=self.store)
def test_newChangeset(self):
"""
L{renamer.history.History.newChangeset} creates a new changeset
instance and does not track it immediately.
"""
cs = self.history.newChangeset()
self.assertIdentical(type(cs), history.Changeset)
self.assertEquals(list(self.history.getChangesets()), [])
def test_pruneChangesets(self):
"""
L{renamer.history.History.pruneChangesets} removes empty changesets
(changesets without any actions) from the database.
"""
cs = self.history.newChangeset()
self.assertEquals(list(self.history.getChangesets()), [])
action = cs.newAction(
u'fake', FilePath(u'src'), FilePath(u'dst'), verify=False)
# Unused action.
cs.newAction(
u'fake', FilePath(u'src'), FilePath(u'dst'), verify=False)
self.assertEquals(list(cs.getActions()), [])
self.assertEquals(cs.numActions, 0)
def _adapter(action):
return FakeAction()
cs.do(action, FakeOptions(), _adapter=_adapter)
self.assertEquals(
list(cs.getActions()), [action])
self.assertEquals(cs.numActions, 1)
prunedChangesets, prunedActions = self.history.pruneChangesets()
self.assertEquals(prunedChangesets, 0)
self.assertEquals(prunedActions, 1)
self.assertEquals(list(self.history.getChangesets()), [cs])
cs.undo(action, FakeOptions(), _adapter=_adapter)
self.assertEquals(list(cs.getActions()), [])
self.assertEquals(cs.numActions, 0)
prunedChangesets, prunedActions = self.history.pruneChangesets()
self.assertEquals(prunedChangesets, 1)
self.assertEquals(prunedActions, 0)
self.assertEquals(list(self.history.getChangesets()), [])
class ChangesetTests(TestCase):
"""
Tests for L{renamer.history.Changeset}.
"""
def setUp(self):
self.store = Store()
self.history = history.History(store=self.store)
def test_newInvalidAction(self):
"""
L{renamer.history.Changeset.newAction} raises
L{renamer.errors.NoSuchAction} if the action name specified does not
refer to a valid action.
"""
cs = self.history.newChangeset()
self.assertRaises(errors.NoSuchAction,
cs.newAction, 'THIS_IS_NOT_REAL', FilePath(u'a'), FilePath(u'b'))
def test_representations(self):
"""
L{renamer.history.Changeset.asHumanly} returns a human-readable and
accurate representation of a changeset.
L{renamer.history.Changeset.__repr__} returns a useful and accurate
representation of a changeset.
"""
cs = self.history.newChangeset()
self.assertTrue(
cs.asHumanly().startswith(
u'Changeset with 0 action(s) ('))
self.assertEquals(
repr(cs),
'<Changeset 0 action(s) created=%r modified=%r>' % (
cs.created, cs.modified))
action = cs.newAction(
u'fake', FilePath(u'src'), FilePath(u'dst'), verify=False)
def _adapter(action):
return FakeAction()
cs.do(action, FakeOptions(), _adapter=_adapter)
self.assertTrue(
cs.asHumanly().startswith(
u'Changeset with 1 action(s) ('))
self.assertEquals(
repr(cs),
'<Changeset 1 action(s) created=%r modified=%r>' % (
cs.created, cs.modified))
class ActionTests(TestCase):
"""
Tests for L{renamer.history.Action}.
"""
def setUp(self):
self.store = Store()
self.history = history.History(store=self.store)
def test_adaption(self):
"""
Adapting a L{renamer.history.Action} object to
L{renamer.irenamer.IRenamingAction} results in an object implementing
C{IRenamingAction} that can perform forward and reverse actions.
"""
cs = self.history.newChangeset()
action = cs.newAction(u'symlink', FilePath(u'src'), FilePath(u'dst'))
a = irenamer.IRenamingAction(action)
self.assertIdentical(type(a), SymlinkAction)
self.assertTrue(irenamer.IRenamingAction.providedBy(type(a)))
def test_representations(self):
"""
L{renamer.history.Action.asHumanly} returns a human-readable and
accurate representation of an action.
L{renamer.history.Action.__repr__} returns a useful and accurate
representation of an action.
"""
cs = self.history.newChangeset()
src = FilePath(u'src')
dst = FilePath(u'dst')
action = cs.newAction(u'fake', src, dst, verify=False)
self.assertTrue(
action.asHumanly().startswith(
u'Fake: %s => %s (' % (src.path, dst.path)))
self.assertEquals(
repr(action),
'<Action name=%r src=%r dst=%r created=%r>' % (
action.name, action.src, action.dst, action.created))
``` |
{
"source": "jonathankamau/CodilityInPython",
"score": 4
} |
#### File: solutions/arrays/odd_occurrences_in_array.py
```python
def solution(A):
numbers = set([])
for n in A:
if n in numbers:
numbers.discard(n)
else:
numbers.add(n)
return numbers.pop()
print(solution([9, 3, 9, 3, 9, 7, 9]))
```
#### File: solutions/caterpillarmethod/min_abs_sum_of_two.py
```python
def solution(A):
min_abs_sum = 2000000000
A = sorted(A)
head = 0
tail = len(A) - 1
while head <= tail:
min_abs_sum = min(min_abs_sum, abs(A[head] + A[tail]))
if A[head] + A[tail] < 0:
head += 1
else:
tail -= 1
return min_abs_sum
print(solution([-7, 3, -1, 5, -11, 4, -9, 14, 17, -2])) # Solution should be 1
print(solution([8, 3, 5, 16, 11])) # Solution should be 6
print(solution([-7, -5, -6, -2, -9])) # Solution should be 4
print(solution([-7, 3, -6, 1, 0, -9])) # Solution should be 0
print(solution([-22, 3, 4, 5])) # Solution should be 6
```
#### File: solutions/countingelements/frog_river_one.py
```python
def solution(X, A):
river_positions = [False] * (X + 1)
for time in range(len(A)):
pos = A[time]
if not river_positions[pos]:
river_positions[pos] = True
X -= 1
if X == 0: return time
return -1
print(solution(5, [1, 3, 1, 4, 2, 3, 5, 4]))
print(solution(1, [1, 1, 1]))
print(solution(3, [1, 2, 1]))
```
#### File: solutions/sorting/number_of_disc_intersections_alt.py
```python
class Disc():
def __init__(self, low_x, high_x):
self.low_x = low_x
self.high_x = high_x
def index_less_than(sortedDiscList, i, start, last):
mid = start + (last - start) // 2
if last <= start and sortedDiscList[mid].low_x > i:
return mid - 1
elif last <= start:
return mid
elif sortedDiscList[mid].low_x > i:
return index_less_than(sortedDiscList, i, start, mid - 1)
else:
return index_less_than(sortedDiscList, i, mid + 1, last)
def solution(A):
discs = []
for i in range(len(A)):
discs.append(Disc(i - A[i], i + A[i]))
discs = sorted(discs, key=lambda d: d.low_x)
total = 0
for i in range(len(discs)):
total += index_less_than(discs, discs[i].high_x + 0.5, 0, len(discs) - 1) - i
if total > 10000000:
total = -1
break
return total
print(solution([1, 5, 2, 1, 4, 0]))
print(solution([0] * 100000))
``` |
{
"source": "jonathankamau/CP2-bucket-list-api",
"score": 3
} |
#### File: app/mod_auth/controller.py
```python
from flask import Blueprint, jsonify, abort, request
from app import app
from app.mod_auth.models import User
mod_auth = Blueprint('auth', __name__, url_prefix='/auth')
"""
provides the routes to register and login a user. It also display the token for API
"""
@app.errorhandler(401)
def custom401error(exception):
return jsonify(exception.description), 401
@app.errorhandler(400)
def custom400error(exception):
return jsonify(exception.description), 400
@mod_auth.route('/register/', methods=['POST'])
def register():
"""
Creates a new user when provided username and password via POST
Returns: JSON response with status of register user
"""
username = request.form.get('username')
password = <PASSWORD>('password')
if not username:
return abort(400, {
'error': {
'message': 'username data field missing/empty from POST request'
}
})
if not password:
abort(400, {
'error': {
'message': 'password data field missing/empty from POST request'
}
})
if User.query.filter_by(username=username).scalar():
abort(400, {
'error': {
'message': 'username already registered'
}
})
user = User(username, password)
user.save()
user.refresh_from_db()
return jsonify({
'username': user.username,
'message': 'new user created successfully'
}), 201
@mod_auth.route('/login/', methods=['POST'])
def login():
"""
validate the username and password supplied via POST to authenticate the user
Returns:
JSON response with username, and token
"""
username = request.form.get('username')
password = request.form.get('password')
if not username:
abort(400, {
'error': {
'message': 'username data field missing/empty from POST request'
}
})
if not password:
abort(400, {
'error': {
'message': 'password data field missing/empty from POST request'
}
})
user = User.query.filter_by(username=username).first()
if user and user.verify_password_hash(password):
return jsonify({
'message': 'login successful. Use token for authentication for the API',
'username': user.username,
'token': user.token.decode()
})
abort(401, {
'error': {
'message': 'invalid username/password combination'
}
})
```
#### File: mod_bucketlists/tests/test_bucketlist.py
```python
from app.test_config import BaseTestCase
class BucketListTestCase(BaseTestCase):
def test_creates_new_bucketlist_with_token(self):
data = {
'bucket_name': 'Christmas'
}
response = self.client.post('/bucketlists/', data=data, headers=self.token, follow_redirects=True)
self.assertEqual(201, response.status_code)
response = response.data.decode('utf-8')
self.assertIn(data['bucket_name'], response)
self.assertIn('date_created', response)
def test_gets_bucketlist_names_for_the_user(self):
response = self.client.get('/bucketlists/', headers=self.token, follow_redirects=True)
response = response.data.decode('utf-8')
self.assertIn('Checkpoint', response)
self.assertIn('created_by', response)
self.assertIn('date_created', response)
def test_search_bucketlist_by_name(self):
response = self.client.get('/bucketlists/?q=Check', headers=self.token, follow_redirects=True)
response = response.data.decode('utf-8')
self.assertIn('Checkpoint', response)
self.assertIn('created_by', response)
self.assertIn('date_created', response)
self.assertIn('next', response)
self.assertIn('prev', response)
def test_error_on_bucketlist_creation_with_invalid_token(self):
data = {
'bucket_name': 'Christmas'
}
response = self.client.post('/bucketlists/', data=data, headers=self.invalid_token, follow_redirects=True)
self.assertEqual(403, response.status_code)
response = response.data.decode('utf-8')
self.assertIn('error', response)
self.assertIn('invalid token', response)
def test_error_on_bucketlist_creation_with_expired_token(self):
data = {
'bucket_name': 'Christmas'
}
response = self.client.post('/bucketlists/', data=data, headers=self.expired_token, follow_redirects=True)
self.assertEqual(403, response.status_code)
response = response.data.decode('utf-8')
self.assertIn('error', response)
self.assertIn('expired token', response)
```
#### File: mod_bucketlists/tests/test_single_bucketlist.py
```python
from app.test_config import BaseTestCase
class SingleBucketListTestCase(BaseTestCase):
def test_get_single_bucketlist(self):
response = self.client.get('/bucketlists/1', headers=self.token, follow_redirects=True)
response = response.data.decode('utf-8')
self.assertIn('items', response)
self.assertIn('date_created', response)
self.assertIn('created_by', response)
def test_error_on_getting_non_existent_bucketlist(self):
response = self.client.get('/bucketlists/1000', headers=self.token, follow_redirects=True)
self.assertEqual(404, response.status_code)
response = response.data.decode('utf-8')
self.assertIn('error', response)
self.assertIn('bucket list not found', response)
def test_error_on_getting_bucketlist_with_invalid_token(self):
response = self.client.get('/bucketlists/1', headers=self.invalid_token, follow_redirects=True)
self.assertEqual(403, response.status_code)
response = response.data.decode('utf-8')
self.assertIn('error', response)
self.assertIn('invalid token', response)
def test_error_on_getting_bucketlist_with_expired_token(self):
response = self.client.get('/bucketlists/1', headers=self.expired_token, follow_redirects=True)
self.assertEqual(403, response.status_code)
response = response.data.decode('utf-8')
self.assertIn('error', response)
self.assertIn('expired token', response)
def test_updates_bucketlist_name(self):
data = {'name': 'New Bucket'}
response = self.client.put('/bucketlists/1', data=data, headers=self.token, follow_redirects=True)
response = response.data.decode('utf-8')
self.assertIn(data['name'], response)
def test_error_on_updating_bucketlist_name_to_existing_name(self):
data = {'name': 'Checkpoint'}
response = self.client.put('/bucketlists/1', data=data, headers=self.token, follow_redirects=True)
self.assertEqual(403, response.status_code)
response = response.data.decode('utf-8')
self.assertIn('error', response)
self.assertIn('new BucketList name is equal to new name', response)
def test_deletes_users_bucketlist(self):
response = self.client.delete('/bucketlists/1', headers=self.token, follow_redirects=True)
response = response.data.decode('utf-8')
self.assertIn('successfully deleted bucketlist', response)
``` |
{
"source": "jonathankamau/cp2-bucketlist-application",
"score": 3
} |
#### File: cp2-bucketlist-application/app/models.py
```python
import datetime
import os
from app import db
from werkzeug.security import generate_password_hash, check_password_hash
from itsdangerous import (TimedJSONWebSignatureSerializer
as Serializer, BadSignature, SignatureExpired)
class User(db.Model):
""" User Model for storing user related details """
__tablename__ = "users"
# database fields
user_id = db.Column(db.Integer, primary_key=True, autoincrement=True)
firstname = db.Column(db.String(255), nullable=False)
lastname = db.Column(db.String(255), nullable=False)
username = db.Column(db.String(255), unique=True, nullable=False)
password = db.Column(db.String(255), nullable=False)
registered_on = db.Column(db.DateTime, nullable=False)
bucketlists = db.relationship('Bucketlist', order_by='Bucketlist.bucketlist_id')
def __init__(self, firstname, lastname, username, password):
""" initialize the model fields """
self.firstname = firstname
self.lastname = lastname
self.username = username
self.password = generate_password_hash(password)
self.registered_on = datetime.datetime.now()
@property
def get_password(self):
""" retrieves the password """
return self.password
@property
def get_id(self):
""" retrieves the user id """
return self.user_id
def check_password(self, password):
""" upon login, checks if the password given is same as that in the database """
if check_password_hash(self.password, password):
return True
return False
def save(self):
""" saves the user details to the database """
db.session.add(self)
db.session.commit()
def generate_token(self, expiration=6000):
serial = Serializer(os.getenv('SECRET'), expires_in=expiration)
return "Bearer "+serial.dumps({'id': self.user_id}).decode('utf-8')
@staticmethod
def verify_auth_token(token):
serial = Serializer(os.getenv('SECRET'))
try:
data = serial.loads(token)
except SignatureExpired:
return "Expired Token!" # valid token, but expired
except BadSignature:
return "Invalid Token!" # invalid token
user = User.query.get(data['id'])
return user
class Bucketlist(db.Model):
"""This class represents the bucketlist table, it maps to the table"""
__tablename__ = 'bucketlists'
bucketlist_id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(255))
user_id = db.Column(db.Integer, db.ForeignKey('users.user_id'))
created_by = db.Column(db.String(255))
date_created = db.Column(db.DateTime, default=db.func.current_timestamp())
date_modified = db.Column(
db.DateTime, default=db.func.current_timestamp(),
onupdate=db.func.current_timestamp())
user = db.relationship('User')
bucketlist_items = db.relationship('BucketlistItems', backref='items', lazy='select', order_by="desc(BucketlistItems.date_modified)")
def __init__(self, name, user_id, created_by):
"""initialize with name of bucketlist and author."""
self.name = name
self.created_by = created_by
self.user_id = user_id
def save(self):
db.session.add(self)
db.session.commit()
@staticmethod
def get_all():
return Bucketlist.query.all()
def delete(self):
db.session.delete(self)
db.session.commit()
def __repr__(self):
return "<Bucketlist: {}>".format(self.name)
class BucketlistItems(db.Model):
"""This class represents the bucketlist items table, it maps to the table"""
__tablename__ = 'bucketlist_items'
item_id = db.Column(db.Integer, primary_key=True)
bucketlist_id = db.Column(db.Integer, db.ForeignKey('bucketlists.bucketlist_id'))
name = db.Column(db.String(255))
description = db.Column(db.String(255))
date_created = db.Column(db.DateTime, default=db.func.current_timestamp())
date_modified = db.Column(
db.DateTime, default=db.func.current_timestamp(),
onupdate=db.func.current_timestamp())
done = db.Column(db.Boolean, unique=False, default=False)
user_id = db.Column(db.Integer, db.ForeignKey('users.user_id'))
created_by = db.Column(db.String(255))
def __init__(self, name, bucketlist_id, user_id, description, created_by):
"""initialize with name."""
self.name = name
self.bucketlist_id = bucketlist_id
self.description = description
self.created_by = created_by
self.user_id = user_id
def save(self):
db.session.add(self)
db.session.commit()
def delete(self):
db.session.delete(self)
db.session.commit()
def __repr__(self):
return "<Items: {}>".format(self.name)
```
#### File: cp2-bucketlist-application/tests/test_models.py
```python
import unittest
import os
import json
from werkzeug.security import generate_password_hash, check_password_hash
from app import create_app, db
from app.models import User, Bucketlist, BucketlistItems
class ModelTests(unittest.TestCase):
"""This class represents test case for bucketlist configurations"""
def setUp(self):
"""Define test variables and initialize app."""
self.app = create_app(config_name="testing")
self.client = self.app.test_client
self.register_data = {'firstname': 'John',
'lastname': 'Kamau',
'username': 'kamjon',
'password': '<PASSWORD>'
}
self.login_data = {'username': 'kamjon',
'password': '<PASSWORD>'}
# binds the app to the current context
with self.app.app_context():
# create all tables
db.create_all()
self.register_response = self.client().post('/bucketlist_api/v1.0/auth/register',
data=self.register_data)
self.login_response = self.client().post('/bucketlist_api/v1.0/auth/login',
data=self.login_data)
result = json.loads(self.login_response.data)
self.token = result['token']
self.current_user = User('John','Kamau', 'kamjon', '<PASSWORD>')
def test_get_password(self):
password = self.register_data['password']
self.assertTrue(check_password_hash(self.current_user.get_password, password))
``` |
{
"source": "jonathankamau/EpisodeTracker",
"score": 2
} |
#### File: src/episodetrackerapp/serializers.py
```python
from episodetrackerapp.models import UserProfile, MySeries, LoggedEpisodes
from rest_framework.validators import UniqueTogetherValidator
from rest_framework import serializers
class UserSerializer(serializers.ModelSerializer):
def create(self, user_details):
user = UserProfile.objects.create(
first_name = user_details['first_name'],
last_name = user_details['last_name'],
username = user_details['username'],
email = user_details['email']
)
user.set_password(user_details['password'])
user.save()
return user
class Meta:
model = UserProfile
validators = [ UniqueTogetherValidator(queryset=UserProfile.objects.all(),
fields = ('first_name', 'last_name', 'username', 'email', 'password')
)]
fields = ('first_name', 'last_name', 'username', 'email', 'password')
class MySeriesSerializer(serializers.ModelSerializer):
class Meta:
model = MySeries
fields = ('series_id', 'series_name','series_image', 'description', 'watch_status')
class LoggedEpisodesSerializer(serializers.ModelSerializer):
class Meta:
model = LoggedEpisodes
fields = ('season_id', 'episode_id', 'series')
``` |
{
"source": "jonathankamau/MovieBuff-API",
"score": 3
} |
#### File: api/endpoints/search.py
```python
import os
import requests
from flask import g, jsonify, request
from flask_restplus import Resource
from api.utils import token_required, set_result_in_cache, UserDetails
class Search(Resource):
@token_required
def get(self):
movie_name = request.args.get('movie_name', type=str)
if not movie_name:
return {'error': 'Movie name has not been given!'}
else:
movie_details = requests.get('https://api.themoviedb.org/3/search/movie?api_key='+os.getenv('MOVIES_API_KEY')+'&query='+movie_name)
set_result_in_cache(movie_details.json())
return movie_details.json()['results']
```
#### File: api/endpoints/user.py
```python
from flask import jsonify, request
from flask_restplus import Resource
from werkzeug.security import generate_password_hash, check_password_hash
from api.utils import UserDetails, reg_schema, login_schema
class Users(Resource):
def post(self, operation):
if operation == 'register':
user_details = request.get_json()
result, errors = reg_schema.load(user_details)
if errors:
return jsonify(errors)
else:
user = UserDetails.query.filter_by(
username=result['username']).first()
if user:
return {"message": "User already exists!"}, 400
else:
new_user = UserDetails(firstname=result['first_name'],
lastname=result['last_name'],
username=result['username'],
password=generate_password_hash(
result['password']))
new_user.save()
return {"response": "user created successfully!"}, 201
elif operation == 'login':
user_details = request.get_json()
result, errors = login_schema.load(user_details)
if errors:
return jsonify(errors)
else:
user = UserDetails.query.filter_by(
username=result['username']).first()
if check_password_hash(user.password,
user_details['password']):
token = user.generate_token()
# gives message response
return {'token': token,
'message': "You have logged in successfully"
}, 200
else:
return {'error': 'invalid username or password!'}, 400
def put(self):
user_details = request.get_json()
user = User.query.filter_by(
id=g.current_user.id).first_or_404()
user.username = user_details['username']
user.save()
return {"response": "user updated successfully!"}, 201
```
#### File: api/utils/auth.py
```python
from flask import g, jsonify, request
from functools import wraps
from api.utils.models import UserDetails
user_details = UserDetails()
def token_required(f):
@wraps(f)
def decorated(*args, **kwargs):
authorization_token = request.headers.get('Authorization')
if not authorization_token:
message = "Bad request. Header does not contain " \
"authorization token"
return {"response": message}, 400
g.current_user = user_details.verify_auth_token(authorization_token)
if g.current_user in ['Invalid Token!', 'Expired Token!']:
message = ("You are not authorized to access this page",
g.current_user)
return {"response": message}, 400
return f(*args, **kwargs)
return decorated
```
#### File: api/utils/models.py
```python
import os
import uuid
from sqlalchemy.exc import SQLAlchemyError
from flask_sqlalchemy import SQLAlchemy
from itsdangerous import BadSignature, SignatureExpired
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
db = SQLAlchemy()
def uuid_generator():
return str(uuid.uuid1())
class Base(db.Model):
__abstract__ = True
id = db.Column(db.String, primary_key=True, default=uuid_generator)
def save(self):
saved = None
try:
db.session.add(self)
db.session.commit()
saved = True
except Exception:
db.session.rollback()
saved = False
return saved
def delete(self):
deleted = None
try:
db.session.delete(self)
db.session.commit()
deleted = True
except Exception:
db.session.rollback()
deleted = False
return deleted
class UserDetails(Base):
__tablename__ = 'user_details'
firstname = db.Column(db.String)
lastname = db.Column(db.String)
username = db.Column(db.String)
password = db.Column(db.String)
favourite_movie = db.relationship('FavouriteMovies')
def generate_token(self, expiration=6000):
serial = Serializer(os.getenv('SECRET'), expires_in=expiration)
return serial.dumps({'id': self.id}).decode('utf-8')
def verify_auth_token(self, token):
serial = Serializer(os.getenv('SECRET'))
try:
data = serial.loads(token)
except SignatureExpired:
return "Expired Token!" # valid token, but expired
except BadSignature:
return "Invalid Token!" # invalid token
user = UserDetails.query.filter_by(id=data['id']).first()
return user
class MovieDetails(Base):
__tablename__ = 'movie_details'
movie_id = db.Column(db.Integer, unique=True)
movie_title = db.Column(db.String)
vote_average = db.Column(db.String)
release_date = db.Column(db.DateTime)
overview = db.Column(db.String)
category_id = db.Column(
db.String,
db.ForeignKey('movie_category.id')
)
movie_category_details = db.relationship('MovieCategory')
favourite_movie = db.relationship('FavouriteMovies',
backref='movie_detail')
class MovieCategory(Base):
__tablename__ = 'movie_category'
category_name = db.Column(db.String)
movie_details = db.relationship('MovieDetails', backref='movie_category')
class FavouriteMovies(Base):
__tablename__ = 'favourite_movie'
user_id = db.Column(db.String, db.ForeignKey('user_details.id'))
movie_id = db.Column(db.String, db.ForeignKey('movie_details.id'))
ranking_number = db.Column(db.Integer)
users = db.relationship('UserDetails')
movie_details = db.relationship('MovieDetails')
```
#### File: MovieBuff-API/tests/test_view_favourite_movies.py
```python
import json
from tests.base_test import BaseTestCase
class TestViewFavouritesList(BaseTestCase):
def setUp(self):
BaseTestCase.setUp(self)
def test_retrieve_movies_list(self):
"""
Test that the favourite movies list for a user is
retrieved successfully.
"""
response_message = json.loads(
self.get_favourite_movies.get_data(
as_text=True
)
)
self.assertEqual(self.get_favourite_movies.status_code, 200)
self.assertEqual('Your movie list has been retrieved successfully',
response_message['message'],
msg="Movie list has not been retrieved!"
)
``` |
{
"source": "jonathankamau/note-taking-app",
"score": 2
} |
#### File: note-taking-app/app/models.py
```python
from django.db import models
from django.utils import timezone
from django.contrib.auth.models import User
class BaseModel(models.Model):
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
class Meta:
abstract = True
class UserProfile(BaseModel, User):
class Meta:
db_table = 'users'
def __str__(self):
return "User: {}".format(self.user.username)
class Note(BaseModel):
title = models.CharField(max_length=50)
org_name = models.CharField(max_length=50)
purpose = models.TextField(blank=False)
content = models.TextField(blank=False)
total_attendance = models.PositiveIntegerField(blank=False)
user = models.ForeignKey(User, on_delete=models.CASCADE)
meeting_category = models.CharField(max_length=50)
class Meta:
db_table = 'notes'
def __str__(self):
return "My Notes: {}".format(self.id)
class MeetingCategory(models.Model):
date_created = models.DateTimeField(default=timezone.now, editable=False)
date_modified = models.DateTimeField(default=timezone.now, editable=False)
name = models.CharField(max_length=50)
description = models.TextField(blank=True)
class Meta:
db_table = 'meeting_categories'
def __str__(self):
return "Meeting Categories: {}".format(self.id)
```
#### File: app/tests/test_base.py
```python
from django.test import Client, TestCase
from django.urls import reverse
class BaseTestClass(TestCase):
def setUp(self):
self.client = Client()
fixtures = ['/app/data/categories.json',]
user_registration_details = {'first_name': 'Jay',
'last_name': 'Kline',
'username': 'kline',
'email': '<EMAIL>',
'password': '<PASSWORD>'
}
registration_wrong_creds = {'first_name': 'Jay',
'last_name': 'Kline',
'username': 'kline',
'email': 'jay.kline',
'password': 'j'
}
user_login_details = {
'username': 'kline',
'password': '<PASSWORD>'
}
self.register = self.client.post(
reverse('register'), user_registration_details)
self.reg_wrong_creds = self.client.post(
reverse('register'), registration_wrong_creds)
self.user_login = self.client.post(
reverse('login'), user_login_details)
```
#### File: note-taking-app/app/views.py
```python
from django.shortcuts import (render,
redirect, get_object_or_404)
from django.utils.decorators import method_decorator
from django.contrib.auth import (decorators, forms,
login, logout, authenticate)
from django.views.generic import View
from django.http import HttpResponse
from django.contrib import messages
from app.models import User, Note, MeetingCategory, models
from app.forms import RegistrationForm, NotesForm
authentication_form = forms.AuthenticationForm
login_required = decorators.login_required
def logout_user(request):
logout(request)
return redirect('home')
def home(request):
if request.user.is_authenticated:
return redirect('dashboard')
return render(request, 'home.html')
def check_for_404(request, exception, template_name="404.html"):
return render(request, '404.html')
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.