repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
---|---|---|---|---|
pkill-nine/qutebrowser
|
refs/heads/qutebrowser-custom
|
qutebrowser/misc/split.py
|
2
|
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2017 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Our own fork of shlex.split with some added and removed features."""
import re
from qutebrowser.utils import log
class ShellLexer:
"""A lexical analyzer class for simple shell-like syntaxes.
Based on Python's shlex, but cleaned up, removed some features, and added
some features useful for qutebrowser.
Attributes:
FIXME
"""
def __init__(self, s):
self.string = s
self.whitespace = ' \t\r'
self.quotes = '\'"'
self.escape = '\\'
self.escapedquotes = '"'
self.keep = False
self.quoted = None
self.escapedstate = None
self.token = None
self.state = None
self.reset()
def reset(self):
"""Reset the state machine state to the defaults."""
self.quoted = False
self.escapedstate = ' '
self.token = ''
self.state = ' '
def __iter__(self): # pragma: no mccabe
"""Read a raw token from the input stream."""
# pylint: disable=too-many-branches,too-many-statements
self.reset()
for nextchar in self.string:
if self.state == ' ':
if self.keep:
self.token += nextchar
if nextchar in self.whitespace:
if self.token or self.quoted:
yield self.token
self.reset()
elif nextchar in self.escape:
self.escapedstate = 'a'
self.state = nextchar
elif nextchar in self.quotes:
self.state = nextchar
else:
self.token = nextchar
self.state = 'a'
elif self.state in self.quotes:
self.quoted = True
if nextchar == self.state:
if self.keep:
self.token += nextchar
self.state = 'a'
elif (nextchar in self.escape and
self.state in self.escapedquotes):
if self.keep:
self.token += nextchar
self.escapedstate = self.state
self.state = nextchar
else:
self.token += nextchar
elif self.state in self.escape:
# In posix shells, only the quote itself or the escape
# character may be escaped within quotes.
if (self.escapedstate in self.quotes and
nextchar != self.state and
nextchar != self.escapedstate and not self.keep):
self.token += self.state
self.token += nextchar
self.state = self.escapedstate
elif self.state == 'a':
if nextchar in self.whitespace:
self.state = ' '
assert self.token or self.quoted
yield self.token
self.reset()
if self.keep:
yield nextchar
elif nextchar in self.quotes:
if self.keep:
self.token += nextchar
self.state = nextchar
elif nextchar in self.escape:
if self.keep:
self.token += nextchar
self.escapedstate = 'a'
self.state = nextchar
else:
self.token += nextchar
else:
raise AssertionError("Invalid state {!r}!".format(self.state))
if self.state in self.escape and not self.keep:
self.token += self.state
if self.token or self.quoted:
yield self.token
def split(s, keep=False):
"""Split a string via ShellLexer.
Args:
keep: Whether to keep special chars in the split output.
"""
lexer = ShellLexer(s)
lexer.keep = keep
tokens = list(lexer)
if not tokens:
return []
out = []
spaces = ""
log.shlexer.vdebug("{!r} -> {!r}".format(s, tokens))
for t in tokens:
if t.isspace():
spaces += t
else:
out.append(spaces + t)
spaces = ""
if spaces:
out.append(spaces)
return out
def _combine_ws(parts, whitespace):
"""Combine whitespace in a list with the element following it.
Args:
parts: A list of strings.
whitespace: A string containing what's considered whitespace.
Return:
The modified list.
"""
out = []
ws = ''
for part in parts:
if not part:
continue
elif part in whitespace:
ws += part
else:
out.append(ws + part)
ws = ''
if ws:
out.append(ws)
return out
def simple_split(s, keep=False, maxsplit=None):
"""Split a string on whitespace, optionally keeping the whitespace.
Args:
s: The string to split.
keep: Whether to keep whitespace.
maxsplit: The maximum count of splits.
Return:
A list of split strings.
"""
whitespace = '\n\t '
if maxsplit == 0:
# re.split with maxsplit=0 splits everything, while str.split splits
# nothing (which is the behavior we want).
if keep:
return [s]
else:
return [s.strip(whitespace)]
elif maxsplit is None:
maxsplit = 0
if keep:
pattern = '([' + whitespace + '])'
parts = re.split(pattern, s, maxsplit)
return _combine_ws(parts, whitespace)
else:
pattern = '[' + whitespace + ']'
parts = re.split(pattern, s, maxsplit)
parts[-1] = parts[-1].rstrip()
return [p for p in parts if p]
|
phamelin/ardupilot
|
refs/heads/master
|
Tools/FilterTestTool/FilterTest.py
|
30
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" ArduPilot IMU Filter Test Class
This program is free software: you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation, either version 3 of the License, or (at your option) any later
version.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
this program. If not, see <http://www.gnu.org/licenses/>.
"""
__author__ = "Guglielmo Cassinelli"
__contact__ = "[email protected]"
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider
from matplotlib.animation import FuncAnimation
from scipy import signal
from BiquadFilter import BiquadFilterType, BiquadFilter
sliders = [] # matplotlib sliders must be global
anim = None # matplotlib animations must be global
class FilterTest:
FILTER_DEBOUNCE = 10 # ms
FILT_SHAPE_DT_FACTOR = 1 # increase to reduce filter shape size
FFT_N = 512
filters = {}
def __init__(self, acc_t, acc_x, acc_y, acc_z, gyr_t, gyr_x, gyr_y, gyr_z, acc_freq, gyr_freq,
acc_lpf_cutoff, gyr_lpf_cutoff,
acc_notch_freq, acc_notch_att, acc_notch_band,
gyr_notch_freq, gyr_notch_att, gyr_notch_band,
log_name, accel_notch=False, second_notch=False):
self.filter_color_map = plt.get_cmap('summer')
self.filters["acc"] = [
BiquadFilter(acc_lpf_cutoff, acc_freq)
]
if accel_notch:
self.filters["acc"].append(
BiquadFilter(acc_notch_freq, acc_freq, BiquadFilterType.PEAK, acc_notch_att, acc_notch_band),
)
self.filters["gyr"] = [
BiquadFilter(gyr_lpf_cutoff, gyr_freq),
BiquadFilter(gyr_notch_freq, gyr_freq, BiquadFilterType.PEAK, gyr_notch_att, gyr_notch_band)
]
if second_notch:
self.filters["acc"].append(
BiquadFilter(acc_notch_freq * 2, acc_freq, BiquadFilterType.PEAK, acc_notch_att, acc_notch_band)
)
self.filters["gyr"].append(
BiquadFilter(gyr_notch_freq * 2, gyr_freq, BiquadFilterType.PEAK, gyr_notch_att, gyr_notch_band)
)
self.ACC_t = acc_t
self.ACC_x = acc_x
self.ACC_y = acc_y
self.ACC_z = acc_z
self.GYR_t = gyr_t
self.GYR_x = gyr_x
self.GYR_y = gyr_y
self.GYR_z = gyr_z
self.GYR_freq = gyr_freq
self.ACC_freq = acc_freq
self.gyr_dt = 1. / gyr_freq
self.acc_dt = 1. / acc_freq
self.timer = None
self.updated_artists = []
# INIT
self.init_plot(log_name)
def test_acc_filters(self):
filt_xs = self.test_filters(self.filters["acc"], self.ACC_t, self.ACC_x)
filt_ys = self.test_filters(self.filters["acc"], self.ACC_t, self.ACC_y)
filt_zs = self.test_filters(self.filters["acc"], self.ACC_t, self.ACC_z)
return filt_xs, filt_ys, filt_zs
def test_gyr_filters(self):
filt_xs = self.test_filters(self.filters["gyr"], self.GYR_t, self.GYR_x)
filt_ys = self.test_filters(self.filters["gyr"], self.GYR_t, self.GYR_y)
filt_zs = self.test_filters(self.filters["gyr"], self.GYR_t, self.GYR_z)
return filt_xs, filt_ys, filt_zs
def test_filters(self, filters, Ts, Xs):
for f in filters:
f.reset()
x_filtered = []
for i, t in enumerate(Ts):
x = Xs[i]
x_f = x
for filt in filters:
x_f = filt.apply(x_f)
x_filtered.append(x_f)
return x_filtered
def get_filter_shape(self, filter):
samples = int(filter.get_sample_freq()) # resolution of filter shape based on sample rate
x_space = np.linspace(0.0, samples // 2, samples // int(2 * self.FILT_SHAPE_DT_FACTOR))
return x_space, filter.freq_response(x_space)
def init_signal_plot(self, ax, Ts, Xs, Ys, Zs, Xs_filtered, Ys_filtered, Zs_filtered, label):
ax.plot(Ts, Xs, linewidth=1, label="{}X".format(label), alpha=0.5)
ax.plot(Ts, Ys, linewidth=1, label="{}Y".format(label), alpha=0.5)
ax.plot(Ts, Zs, linewidth=1, label="{}Z".format(label), alpha=0.5)
filtered_x_ax, = ax.plot(Ts, Xs_filtered, linewidth=1, label="{}X filtered".format(label), alpha=1)
filtered_y_ax, = ax.plot(Ts, Ys_filtered, linewidth=1, label="{}Y filtered".format(label), alpha=1)
filtered_z_ax, = ax.plot(Ts, Zs_filtered, linewidth=1, label="{}Z filtered".format(label), alpha=1)
ax.legend(prop={'size': 8})
return filtered_x_ax, filtered_y_ax, filtered_z_ax
def fft_to_xdata(self, fft):
n = len(fft)
norm_factor = 2. / n
return norm_factor * np.abs(fft[:n // 2])
def plot_fft(self, ax, x, fft, label):
fft_ax, = ax.plot(x, self.fft_to_xdata(fft), label=label)
return fft_ax
def init_fft(self, ax, Ts, Xs, Ys, Zs, sample_rate, dt, Xs_filtered, Ys_filtered, Zs_filtered, label):
_freqs_raw_x, _times_raw_x, _stft_raw_x = signal.stft(Xs, sample_rate, window='hann', nperseg=self.FFT_N)
raw_fft_x = np.average(np.abs(_stft_raw_x), axis=1)
_freqs_raw_y, _times_raw_y, _stft_raw_y = signal.stft(Ys, sample_rate, window='hann', nperseg=self.FFT_N)
raw_fft_y = np.average(np.abs(_stft_raw_y), axis=1)
_freqs_raw_z, _times_raw_z, _stft_raw_z = signal.stft(Zs, sample_rate, window='hann', nperseg=self.FFT_N)
raw_fft_z = np.average(np.abs(_stft_raw_z), axis=1)
_freqs_x, _times_x, _stft_x = signal.stft(Xs_filtered, sample_rate, window='hann', nperseg=self.FFT_N)
filtered_fft_x = np.average(np.abs(_stft_x), axis=1)
_freqs_y, _times_y, _stft_y = signal.stft(Ys_filtered, sample_rate, window='hann', nperseg=self.FFT_N)
filtered_fft_y = np.average(np.abs(_stft_y), axis=1)
_freqs_z, _times_z, _stft_z = signal.stft(Zs_filtered, sample_rate, window='hann', nperseg=self.FFT_N)
filtered_fft_z = np.average(np.abs(_stft_z), axis=1)
ax.plot(_freqs_raw_x, raw_fft_x, alpha=0.5, linewidth=1, label="{}x FFT".format(label))
ax.plot(_freqs_raw_y, raw_fft_y, alpha=0.5, linewidth=1, label="{}y FFT".format(label))
ax.plot(_freqs_raw_z, raw_fft_z, alpha=0.5, linewidth=1, label="{}z FFT".format(label))
filtered_fft_ax_x, = ax.plot(_freqs_x, filtered_fft_x, label="filt. {}x FFT".format(label))
filtered_fft_ax_y, = ax.plot(_freqs_y, filtered_fft_y, label="filt. {}y FFT".format(label))
filtered_fft_ax_z, = ax.plot(_freqs_z, filtered_fft_z, label="filt. {}z FFT".format(label))
# FFT
# samples = len(Ts)
# x_space = np.linspace(0.0, 1.0 / (2.0 * dt), samples // 2)
# filtered_data = np.hanning(len(Xs_filtered)) * Xs_filtered
# raw_fft = np.fft.fft(np.hanning(len(Xs)) * Xs)
# filtered_fft = np.fft.fft(filtered_data, n=self.FFT_N)
# self.plot_fft(ax, x_space, raw_fft, "{} FFT".format(label))
# fft_freq = np.fft.fftfreq(self.FFT_N, d=dt)
# x_space
# filtered_fft_ax = self.plot_fft(ax, fft_freq[:self.FFT_N // 2], filtered_fft, "filtered {} FFT".format(label))
ax.set_xlabel("frequency")
# ax.set_xscale("log")
# ax.xaxis.set_major_formatter(ScalarFormatter())
ax.legend(prop={'size': 8})
return filtered_fft_ax_x, filtered_fft_ax_y, filtered_fft_ax_z
def init_filter_shape(self, ax, filter, color):
center = filter.get_center_freq()
x_space, lpf_shape = self.get_filter_shape(filter)
plot_slpf_shape, = ax.plot(x_space, lpf_shape, c=color, label="LPF shape")
xvline_lpf_cutoff = ax.axvline(x=center, linestyle="--", c=color) # LPF cutoff freq
return plot_slpf_shape, xvline_lpf_cutoff
def create_slider(self, name, rect, max, value, color, callback):
global sliders
ax_slider = self.fig.add_axes(rect, facecolor='lightgoldenrodyellow')
slider = Slider(ax_slider, name, 0, max, valinit=np.sqrt(max * value), valstep=1, color=color)
slider.valtext.set_text(value)
# slider.drawon = False
def changed(val, cbk, max, slider):
# non linear slider to better control small values
val = int(val ** 2 / max)
slider.valtext.set_text(val)
cbk(val)
slider.on_changed(lambda val, cbk=callback, max=max, s=slider: changed(val, cbk, max, s))
sliders.append(slider)
def delay_update(self, update_cbk):
def _delayed_update(self, cbk):
self.timer.stop()
cbk()
# delay actual filtering
if self.fig:
if self.timer:
self.timer.stop()
self.timer = self.fig.canvas.new_timer(interval=self.FILTER_DEBOUNCE)
self.timer.add_callback(lambda self=self: _delayed_update(self, update_cbk))
self.timer.start()
def update_filter_shape(self, filter, shape, center_line):
x_data, new_shape = self.get_filter_shape(filter)
shape.set_ydata(new_shape)
center_line.set_xdata(filter.get_center_freq())
self.updated_artists.extend([
shape,
center_line,
])
def update_signal_and_fft_plot(self, filters_key, time_list, sample_lists, signal_shapes, fft_shapes, shape,
center_line, sample_rate):
# print("update_signal_and_fft_plot", self.filters[filters_key][0].get_center_freq())
Xs, Ys, Zs = sample_lists
signal_shape_x, signal_shape_y, signal_shape_z = signal_shapes
fft_shape_x, fft_shape_y, fft_shape_z = fft_shapes
Xs_filtered = self.test_filters(self.filters[filters_key], time_list, Xs)
Ys_filtered = self.test_filters(self.filters[filters_key], time_list, Ys)
Zs_filtered = self.test_filters(self.filters[filters_key], time_list, Zs)
signal_shape_x.set_ydata(Xs_filtered)
signal_shape_y.set_ydata(Ys_filtered)
signal_shape_z.set_ydata(Zs_filtered)
self.updated_artists.extend([signal_shape_x, signal_shape_y, signal_shape_z])
_freqs_x, _times_x, _stft_x = signal.stft(Xs_filtered, sample_rate, window='hann', nperseg=self.FFT_N)
filtered_fft_x = np.average(np.abs(_stft_x), axis=1)
_freqs_y, _times_y, _stft_y = signal.stft(Ys_filtered, sample_rate, window='hann', nperseg=self.FFT_N)
filtered_fft_y = np.average(np.abs(_stft_y), axis=1)
_freqs_z, _times_z, _stft_z = signal.stft(Zs_filtered, sample_rate, window='hann', nperseg=self.FFT_N)
filtered_fft_z = np.average(np.abs(_stft_z), axis=1)
fft_shape_x.set_ydata(filtered_fft_x)
fft_shape_y.set_ydata(filtered_fft_y)
fft_shape_z.set_ydata(filtered_fft_z)
self.updated_artists.extend([
fft_shape_x, fft_shape_y, fft_shape_z,
shape, center_line,
])
# self.fig.canvas.draw()
def animation_update(self):
updated_artists = self.updated_artists.copy()
# if updated_artists:
# print("animation update")
# reset updated artists
self.updated_artists = []
return updated_artists
def update_filter(self, val, cbk, filter, shape, center_line, filters_key, time_list, sample_lists, signal_shapes,
fft_shapes):
# this callback sets the parameter controlled by the slider
cbk(val)
# print("filter update",val)
# update filter shape and delay fft update
self.update_filter_shape(filter, shape, center_line)
sample_freq = filter.get_sample_freq()
self.delay_update(
lambda self=self: self.update_signal_and_fft_plot(filters_key, time_list, sample_lists, signal_shapes,
fft_shapes, shape, center_line, sample_freq))
def create_filter_control(self, name, filter, rect, max, default, shape, center_line, cbk, filters_key, time_list,
sample_lists, signal_shapes, fft_shapes, filt_color):
self.create_slider(name, rect, max, default, filt_color, lambda val, cbk=cbk, self=self, filter=filter, shape=shape,
center_line=center_line, filters_key=filters_key,
time_list=time_list, sample_list=sample_lists,
signal_shape=signal_shapes, fft_shape=fft_shapes:
self.update_filter(val, cbk, filter, shape, center_line, filters_key,
time_list, sample_list, signal_shape, fft_shape))
def create_controls(self, filters_key, base_rect, padding, ax_fft, time_list, sample_lists, signal_shapes,
fft_shapes):
ax_filter = ax_fft.twinx()
ax_filter.set_navigate(False)
ax_filter.set_yticks([])
num_filters = len(self.filters[filters_key])
for i, filter in enumerate(self.filters[filters_key]):
filt_type = filter.get_type()
filt_color = self.filter_color_map(i / num_filters)
filt_shape, filt_cutoff = self.init_filter_shape(ax_filter, filter, filt_color)
if filt_type == BiquadFilterType.PEAK:
name = "Notch"
else:
name = "LPF"
# control for center freq is common to all filters
self.create_filter_control("{} freq".format(name), filter, base_rect, 500, filter.get_center_freq(),
filt_shape, filt_cutoff,
lambda val, filter=filter: filter.set_center_freq(val),
filters_key, time_list, sample_lists, signal_shapes, fft_shapes, filt_color)
# move down of control height + padding
base_rect[1] -= (base_rect[3] + padding)
if filt_type == BiquadFilterType.PEAK:
self.create_filter_control("{} att (db)".format(name), filter, base_rect, 100, filter.get_attenuation(),
filt_shape, filt_cutoff,
lambda val, filter=filter: filter.set_attenuation(val),
filters_key, time_list, sample_lists, signal_shapes, fft_shapes, filt_color)
base_rect[1] -= (base_rect[3] + padding)
self.create_filter_control("{} band".format(name), filter, base_rect, 300, filter.get_bandwidth(),
filt_shape, filt_cutoff,
lambda val, filter=filter: filter.set_bandwidth(val),
filters_key, time_list, sample_lists, signal_shapes, fft_shapes, filt_color)
base_rect[1] -= (base_rect[3] + padding)
def create_spectrogram(self, data, name, sample_rate):
freqs, times, Sx = signal.spectrogram(np.array(data), fs=sample_rate, window='hanning',
nperseg=self.FFT_N, noverlap=self.FFT_N - self.FFT_N // 10,
detrend=False, scaling='spectrum')
f, ax = plt.subplots(figsize=(4.8, 2.4))
ax.pcolormesh(times, freqs, 10 * np.log10(Sx), cmap='viridis')
ax.set_title(name)
ax.set_ylabel('Frequency (Hz)')
ax.set_xlabel('Time (s)')
def init_plot(self, log_name):
self.fig = plt.figure(figsize=(14, 9))
self.fig.canvas.set_window_title("ArduPilot Filter Test Tool - {}".format(log_name))
self.fig.canvas.draw()
rows = 2
cols = 3
raw_acc_index = 1
fft_acc_index = raw_acc_index + 1
raw_gyr_index = cols + 1
fft_gyr_index = raw_gyr_index + 1
# signal
self.ax_acc = self.fig.add_subplot(rows, cols, raw_acc_index)
self.ax_gyr = self.fig.add_subplot(rows, cols, raw_gyr_index, sharex=self.ax_acc)
accx_filtered, accy_filtered, accz_filtered = self.test_acc_filters()
self.ax_filtered_accx, self.ax_filtered_accy, self.ax_filtered_accz = self.init_signal_plot(self.ax_acc,
self.ACC_t,
self.ACC_x,
self.ACC_y,
self.ACC_z,
accx_filtered,
accy_filtered,
accz_filtered,
"AccX")
gyrx_filtered, gyry_filtered, gyrz_filtered = self.test_gyr_filters()
self.ax_filtered_gyrx, self.ax_filtered_gyry, self.ax_filtered_gyrz = self.init_signal_plot(self.ax_gyr,
self.GYR_t,
self.GYR_x,
self.GYR_y,
self.GYR_z,
gyrx_filtered,
gyry_filtered,
gyrz_filtered,
"GyrX")
# FFT
self.ax_acc_fft = self.fig.add_subplot(rows, cols, fft_acc_index)
self.ax_gyr_fft = self.fig.add_subplot(rows, cols, fft_gyr_index)
self.acc_filtered_fft_ax_x, self.acc_filtered_fft_ax_y, self.acc_filtered_fft_ax_z = self.init_fft(
self.ax_acc_fft, self.ACC_t, self.ACC_x, self.ACC_y, self.ACC_z, self.ACC_freq, self.acc_dt, accx_filtered,
accy_filtered, accz_filtered, "AccX")
self.gyr_filtered_fft_ax_x, self.gyr_filtered_fft_ax_y, self.gyr_filtered_fft_ax_z = self.init_fft(
self.ax_gyr_fft, self.GYR_t, self.GYR_x, self.GYR_y, self.GYR_z, self.GYR_freq, self.gyr_dt, gyrx_filtered,
gyry_filtered, gyrz_filtered, "GyrX")
self.fig.tight_layout()
# TODO add y z
self.create_controls("acc", [0.75, 0.95, 0.2, 0.02], 0.01, self.ax_acc_fft, self.ACC_t,
(self.ACC_x, self.ACC_y, self.ACC_z),
(self.ax_filtered_accx, self.ax_filtered_accy, self.ax_filtered_accz),
(self.acc_filtered_fft_ax_x, self.acc_filtered_fft_ax_y, self.acc_filtered_fft_ax_z))
self.create_controls("gyr", [0.75, 0.45, 0.2, 0.02], 0.01, self.ax_gyr_fft, self.GYR_t,
(self.GYR_x, self.GYR_y, self.GYR_z),
(self.ax_filtered_gyrx, self.ax_filtered_gyry, self.ax_filtered_gyrz),
(self.gyr_filtered_fft_ax_x, self.gyr_filtered_fft_ax_y, self.gyr_filtered_fft_ax_z))
# setup animation for continuous update
global anim
anim = FuncAnimation(self.fig, lambda frame, self=self: self.animation_update(), interval=1, blit=False)
# Work in progress here...
# self.create_spectrogram(self.GYR_x, "GyrX", self.GYR_freq)
# self.create_spectrogram(gyrx_filtered, "GyrX filtered", self.GYR_freq)
# self.create_spectrogram(self.ACC_x, "AccX", self.ACC_freq)
# self.create_spectrogram(accx_filtered, "AccX filtered", self.ACC_freq)
plt.show()
self.print_filter_param_info()
def print_filter_param_info(self):
if len(self.filters["acc"]) > 2 or len(self.filters["gyr"]) > 2:
print("Testing too many filters unsupported from firmware, cannot calculate parameters to set them")
return
print("To have the last filter settings in the graphs set the following parameters:\n")
for f in self.filters["acc"]:
filt_type = f.get_type()
if filt_type == BiquadFilterType.PEAK: # NOTCH
print("INS_NOTCA_ENABLE,", 1)
print("INS_NOTCA_FREQ,", f.get_center_freq())
print("INS_NOTCA_BW,", f.get_bandwidth())
print("INS_NOTCA_ATT,", f.get_attenuation())
else: # LPF
print("INS_ACCEL_FILTER,", f.get_center_freq())
for f in self.filters["gyr"]:
filt_type = f.get_type()
if filt_type == BiquadFilterType.PEAK: # NOTCH
print("INS_NOTCH_ENABLE,", 1)
print("INS_NOTCH_FREQ,", f.get_center_freq())
print("INS_NOTCH_BW,", f.get_bandwidth())
print("INS_NOTCH_ATT,", f.get_attenuation())
else: # LPF
print("INS_GYRO_FILTER,", f.get_center_freq())
print("\n+---------+")
print("| WARNING |")
print("+---------+")
print("Always check the onboard FFT to setup filters, this tool only simulate effects of filtering.")
|
aktech/sympy
|
refs/heads/master
|
sympy/utilities/tests/test_timeutils.py
|
122
|
"""Tests for simple tools for timing functions' execution. """
import sys
from sympy.utilities.timeutils import timed
def test_timed():
result = timed(lambda: 1 + 1, limit=100000)
assert result[0] == 100000 and result[3] == "ns"
result = timed("1 + 1", limit=100000)
assert result[0] == 100000 and result[3] == "ns"
|
tritemio/FRETBursts
|
refs/heads/master
|
fretbursts/fit/test_exp_fitting.py
|
2
|
"""
Unittest for exp_fitting.py
"""
from __future__ import print_function
import pytest
import numpy as np
import scipy.stats as SS
from fretbursts.fit.exp_fitting import expon_fit, expon_fit_cdf, expon_fit_hist
sample_size = 5000
sample_tau = 3.
sample_min = 2.
max_relative_error = 0.05
@pytest.fixture(scope="module")
def sample():
np.random.seed(1)
return SS.expon.rvs(size=sample_size, scale=sample_tau)
def test_expon_fit(sample):
lambda_fit, resid, x_resid, size = expon_fit(sample, s_min=sample_min)
tau_fit = 1./lambda_fit
relative_error = np.abs(tau_fit-sample_tau)/sample_tau
print('\n [expon_fit] Fit (tau): %.2f - Relative error: %.2f %%' % \
(tau_fit, relative_error*100))
assert relative_error < max_relative_error
def test_expon_fit_cdf(sample):
lambda_fit, resid, x_resid, size = expon_fit_cdf(sample, s_min=sample_min)
tau_fit = 1./lambda_fit
relative_error = np.abs(tau_fit-sample_tau)/sample_tau
print('\n [expon_fit_cdf] Fit (tau): %.2f - Relative error: %.2f %%' % \
(tau_fit, relative_error*100))
assert relative_error < max_relative_error
def test_expon_fit_hist(sample):
binw = sample_tau/20.
bins = np.arange(0, sample_tau*6, binw)
lambda_fit, resid, x_resid, size = expon_fit_hist(sample, s_min=sample_min, bins=bins)
tau_fit = 1./lambda_fit
relative_error = np.abs(tau_fit-sample_tau)/sample_tau
print('\n [expon_fit_hist] Fit (tau): %.2f - Relative error: %.2f %%' % \
(tau_fit, relative_error*100))
assert relative_error < max_relative_error
def test_expon_fit_histw(sample):
binw = sample_tau/20.
bins = np.arange(0, sample_tau*6, binw)
lambda_fit, resid, x_resid, size = expon_fit_hist(sample, s_min=sample_min, bins=bins,
weights='hist_counts')
tau_fit = 1./lambda_fit
relative_error = np.abs(tau_fit-sample_tau)/sample_tau
print('\n [expon_fit_hist] Fit (tau): %.2f - Relative error: %.2f %%' % \
(tau_fit, relative_error*100))
assert relative_error < max_relative_error
if __name__ == '__main__':
pytest.main("-x -v -s fretbursts/fit/test_exp_fitting.py")
|
germn/python-for-android
|
refs/heads/master
|
pythonforandroid/recipes/android/src/android/broadcast.py
|
3
|
# -------------------------------------------------------------------
# Broadcast receiver bridge
from jnius import autoclass, PythonJavaClass, java_method
from android.config import JAVA_NAMESPACE, JNI_NAMESPACE
class BroadcastReceiver(object):
class Callback(PythonJavaClass):
__javainterfaces__ = [JNI_NAMESPACE + '/GenericBroadcastReceiverCallback']
__javacontext__ = 'app'
def __init__(self, callback, *args, **kwargs):
self.callback = callback
PythonJavaClass.__init__(self, *args, **kwargs)
@java_method('(Landroid/content/Context;Landroid/content/Intent;)V')
def onReceive(self, context, intent):
self.callback(context, intent)
def __init__(self, callback, actions=None, categories=None):
super().__init__()
self.callback = callback
if not actions and not categories:
raise Exception('You need to define at least actions or categories')
def _expand_partial_name(partial_name):
if '.' in partial_name:
return partial_name # Its actually a full dotted name
else:
name = 'ACTION_{}'.format(partial_name.upper())
if not hasattr(Intent, name):
raise Exception('The intent {} doesnt exist'.format(name))
return getattr(Intent, name)
# resolve actions/categories first
Intent = autoclass('android.content.Intent')
resolved_actions = [_expand_partial_name(x) for x in actions or []]
resolved_categories = [_expand_partial_name(x) for x in categories or []]
# resolve android API
GenericBroadcastReceiver = autoclass(JAVA_NAMESPACE + '.GenericBroadcastReceiver')
IntentFilter = autoclass('android.content.IntentFilter')
HandlerThread = autoclass('android.os.HandlerThread')
# create a thread for handling events from the receiver
self.handlerthread = HandlerThread('handlerthread')
# create a listener
self.listener = BroadcastReceiver.Callback(self.callback)
self.receiver = GenericBroadcastReceiver(self.listener)
self.receiver_filter = IntentFilter()
for x in resolved_actions:
self.receiver_filter.addAction(x)
for x in resolved_categories:
self.receiver_filter.addCategory(x)
def start(self):
Handler = autoclass('android.os.Handler')
self.handlerthread.start()
self.handler = Handler(self.handlerthread.getLooper())
self.context.registerReceiver(
self.receiver, self.receiver_filter, None, self.handler)
def stop(self):
self.context.unregisterReceiver(self.receiver)
self.handlerthread.quit()
@property
def context(self):
from os import environ
if 'PYTHON_SERVICE_ARGUMENT' in environ:
PythonService = autoclass(JAVA_NAMESPACE + '.PythonService')
return PythonService.mService
PythonActivity = autoclass(JAVA_NAMESPACE + '.PythonActivity')
return PythonActivity.mActivity
|
jmartinezchaine/OpenERP
|
refs/heads/master
|
openerp/pychart/afm/__init__.py
|
15
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
__all__ = ["Courier_Oblique", "AvantGarde_BookOblique", "Times_Italic", "Helvetica_Bold", "NewCenturySchlbk_Roman", "Helvetica", "Helvetica_Narrow", "AvantGarde_Demi", "Times_BoldItalic", "Helvetica_Narrow_Bold", "Helvetica_Light", "Bookman_DemiItalic", "Utopia_Regular", "Times_Roman", "Palatino_Italic", "Courier_Bold", "ZapfChancery_MediumItalic", "NewCenturySchlbk_Italic", "NewCenturySchlbk_BoldItalic", "Helvetica_Narrow_BoldOblique", "Courier", "AvantGarde_DemiOblique", "Courier_BoldOblique", "Bookman_LightItalic", "Symbol", "Utopia_Bold", "Times_Bold", "Helvetica_BoldOblique", "Utopia_BoldItalic", "AvantGarde_Book", "Bookman_Demi", "Palatino_Roman", "Bookman_Light", "Utopia_Italic", "NewCenturySchlbk_Bold", "Helvetica_LightOblique", "ZapfDingbats", "Helvetica_Narrow_Oblique", "Helvetica_Oblique", "Palatino_BoldItalic", "Palatino_Bold", "dir"]
|
billyhunt/osf.io
|
refs/heads/develop
|
tests/framework_tests/test_email.py
|
5
|
# -*- coding: utf-8 -*-
import unittest
import smtplib
import mock
from nose.tools import * # flake8: noqa (PEP8 asserts)
import sendgrid
from framework.email.tasks import send_email, _send_with_sendgrid
from website import settings
from tests.base import fake
# Check if local mail server is running
SERVER_RUNNING = True
try:
s = smtplib.SMTP(settings.MAIL_SERVER)
s.quit()
except Exception as err:
SERVER_RUNNING = False
class TestEmail(unittest.TestCase):
@unittest.skipIf(not SERVER_RUNNING,
"Mailserver isn't running. Run \"invoke mailserver\".")
@unittest.skipIf(not settings.USE_EMAIL,
"settings.USE_EMAIL is False")
def test_sending_email(self):
assert_true(send_email("[email protected]", "[email protected]", subject='no subject',
message="<h1>Greetings!</h1>", ttls=False, login=False))
def test_send_with_sendgrid_success(self):
mock_client = mock.MagicMock()
mock_client.send.return_value = 200, 'success'
from_addr, to_addr = fake.email(), fake.email()
category1, category2 = fake.word(), fake.word()
subject = fake.bs()
message = fake.text()
ret = _send_with_sendgrid(
from_addr=from_addr,
to_addr=to_addr,
subject=subject,
message=message,
mimetype='txt',
client=mock_client,
categories=(category1, category2)
)
assert_true(ret)
mock_client.send.assert_called_once()
# First call's argument should be a Mail object with
# the correct configuration
first_call_arg = mock_client.send.call_args[0][0]
assert_is_instance(first_call_arg, sendgrid.Mail)
assert_equal(first_call_arg.from_email, from_addr)
assert_equal(first_call_arg.to[0], to_addr)
assert_equal(first_call_arg.subject, subject)
assert_equal(first_call_arg.text, message)
# Categories are set
assert_equal(first_call_arg.smtpapi.data['category'], (category1, category2))
def test_send_with_sendgrid_failure_returns_false(self):
mock_client = mock.MagicMock()
mock_client.send.return_value = 400, 'failed'
from_addr, to_addr = fake.email(), fake.email()
subject = fake.bs()
message = fake.text()
ret = _send_with_sendgrid(
from_addr=from_addr,
to_addr=to_addr,
subject=subject,
message=message,
mimetype='txt',
client=mock_client
)
assert_false(ret)
if __name__ == '__main__':
unittest.main()
|
michael-ball/sublime-text
|
refs/heads/master
|
sublime-text-3/Packages/Python PEP8 Autoformat/libs/py33/lib2to3/fixes/fix_intern.py
|
69
|
# Copyright 2006 Georg Brandl.
# Licensed to PSF under a Contributor Agreement.
"""Fixer for intern().
intern(s) -> sys.intern(s)"""
# Local imports
from .. import pytree
from .. import fixer_base
from ..fixer_util import Name, Attr, touch_import
class FixIntern(fixer_base.BaseFix):
BM_compatible = True
order = "pre"
PATTERN = """
power< 'intern'
trailer< lpar='('
( not(arglist | argument<any '=' any>) obj=any
| obj=arglist<(not argument<any '=' any>) any ','> )
rpar=')' >
after=any*
>
"""
def transform(self, node, results):
syms = self.syms
obj = results["obj"].clone()
if obj.type == syms.arglist:
newarglist = obj.clone()
else:
newarglist = pytree.Node(syms.arglist, [obj.clone()])
after = results["after"]
if after:
after = [n.clone() for n in after]
new = pytree.Node(syms.power,
Attr(Name("sys"), Name("intern")) +
[pytree.Node(syms.trailer,
[results["lpar"].clone(),
newarglist,
results["rpar"].clone()])] + after)
new.prefix = node.prefix
touch_import(None, 'sys', node)
return new
|
BhupeshGupta/frappe
|
refs/heads/develop
|
frappe/installer.py
|
15
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
# called from wnf.py
# lib/wnf.py --install [rootpassword] [dbname] [source]
from __future__ import unicode_literals
import os, json
import frappe
import frappe.database
import getpass
import importlib
from frappe.model.db_schema import DbManager
from frappe.model.sync import sync_for
from frappe.utils.fixtures import sync_fixtures
from frappe.website import render, statics
def install_db(root_login="root", root_password=None, db_name=None, source_sql=None,
admin_password=None, verbose=True, force=0, site_config=None, reinstall=False):
frappe.flags.in_install_db = True
make_conf(db_name, site_config=site_config)
if reinstall:
frappe.connect(db_name=db_name)
dbman = DbManager(frappe.local.db)
dbman.create_database(db_name)
else:
frappe.local.db = make_connection(root_login, root_password)
frappe.local.session = frappe._dict({'user':'Administrator'})
create_database_and_user(force, verbose)
frappe.conf.admin_password = frappe.conf.admin_password or admin_password
frappe.connect(db_name=db_name)
import_db_from_sql(source_sql, verbose)
remove_missing_apps()
create_auth_table()
frappe.flags.in_install_db = False
def get_current_host():
return frappe.db.sql("select user()")[0][0].split('@')[1]
def create_database_and_user(force, verbose):
db_name = frappe.local.conf.db_name
dbman = DbManager(frappe.local.db)
if force or (db_name not in dbman.get_database_list()):
dbman.delete_user(db_name, get_current_host())
dbman.drop_database(db_name)
else:
raise Exception("Database %s already exists" % (db_name,))
dbman.create_user(db_name, frappe.conf.db_password, get_current_host())
if verbose: print "Created user %s" % db_name
dbman.create_database(db_name)
if verbose: print "Created database %s" % db_name
dbman.grant_all_privileges(db_name, db_name, get_current_host())
dbman.flush_privileges()
if verbose: print "Granted privileges to user %s and database %s" % (db_name, db_name)
# close root connection
frappe.db.close()
def create_auth_table():
frappe.db.sql_ddl("""create table if not exists __Auth (
`user` VARCHAR(180) NOT NULL PRIMARY KEY,
`password` VARCHAR(180) NOT NULL
) ENGINE=InnoDB DEFAULT CHARSET=utf8""")
def import_db_from_sql(source_sql, verbose):
if verbose: print "Starting database import..."
db_name = frappe.conf.db_name
if not source_sql:
source_sql = os.path.join(os.path.dirname(frappe.__file__), 'data', 'Framework.sql')
DbManager(frappe.local.db).restore_database(db_name, source_sql, db_name, frappe.conf.db_password)
if verbose: print "Imported from database %s" % source_sql
def make_connection(root_login, root_password):
if root_login:
if not root_password:
root_password = frappe.conf.get("root_password") or None
if not root_password:
root_password = getpass.getpass("MySQL root password: ")
return frappe.database.Database(user=root_login, password=root_password)
def install_app(name, verbose=False, set_as_patched=True):
frappe.flags.in_install_app = name
frappe.clear_cache()
app_hooks = frappe.get_hooks(app_name=name)
installed_apps = frappe.get_installed_apps()
if name not in frappe.get_all_apps(with_frappe=True):
raise Exception("App not in apps.txt")
if name in installed_apps:
print "App Already Installed"
frappe.msgprint("App {0} already installed".format(name))
return
if name != "frappe":
frappe.only_for("System Manager")
for before_install in app_hooks.before_install or []:
frappe.get_attr(before_install)()
if name != "frappe":
add_module_defs(name)
sync_for(name, force=True, sync_everything=True, verbose=verbose)
add_to_installed_apps(name)
if set_as_patched:
set_all_patches_as_completed(name)
for after_install in app_hooks.after_install or []:
frappe.get_attr(after_install)()
print "Installing Fixtures..."
sync_fixtures(name)
frappe.flags.in_install_app = False
def add_to_installed_apps(app_name, rebuild_website=True):
installed_apps = frappe.get_installed_apps()
if not app_name in installed_apps:
installed_apps.append(app_name)
frappe.db.set_global("installed_apps", json.dumps(installed_apps))
frappe.db.commit()
if rebuild_website:
render.clear_cache()
statics.sync().start()
frappe.db.commit()
frappe.clear_cache()
def set_all_patches_as_completed(app):
patch_path = os.path.join(frappe.get_pymodule_path(app), "patches.txt")
if os.path.exists(patch_path):
for patch in frappe.get_file_items(patch_path):
frappe.get_doc({
"doctype": "Patch Log",
"patch": patch
}).insert()
frappe.db.commit()
def make_conf(db_name=None, db_password=None, site_config=None):
site = frappe.local.site
make_site_config(db_name, db_password, site_config)
sites_path = frappe.local.sites_path
frappe.destroy()
frappe.init(site, sites_path=sites_path)
def make_site_config(db_name=None, db_password=None, site_config=None):
frappe.create_folder(os.path.join(frappe.local.site_path))
site_file = os.path.join(frappe.local.site_path, "site_config.json")
if not os.path.exists(site_file):
if not (site_config and isinstance(site_config, dict)):
site_config = get_conf_params(db_name, db_password)
with open(site_file, "w") as f:
f.write(json.dumps(site_config, indent=1, sort_keys=True))
def get_conf_params(db_name=None, db_password=None):
if not db_name:
db_name = raw_input("Database Name: ")
if not db_name:
raise Exception("Database Name Required")
if not db_password:
from frappe.utils import random_string
db_password = random_string(16)
return {"db_name": db_name, "db_password": db_password}
def make_site_dirs():
site_public_path = os.path.join(frappe.local.site_path, 'public')
site_private_path = os.path.join(frappe.local.site_path, 'private')
for dir_path in (
os.path.join(site_private_path, 'backups'),
os.path.join(site_public_path, 'files')):
if not os.path.exists(dir_path):
os.makedirs(dir_path)
locks_dir = frappe.get_site_path('locks')
if not os.path.exists(locks_dir):
os.makedirs(locks_dir)
def add_module_defs(app):
modules = frappe.get_module_list(app)
for module in modules:
d = frappe.new_doc("Module Def")
d.app_name = app
d.module_name = module
d.save()
def remove_missing_apps():
apps = ('frappe_subscription',)
installed_apps = frappe.get_installed_apps()
for app in apps:
if app in installed_apps:
try:
importlib.import_module(app)
except ImportError:
installed_apps.remove(app)
frappe.db.set_global("installed_apps", json.dumps(installed_apps))
|
NeCTAR-RC/horizon
|
refs/heads/nectar/stein
|
horizon/test/test_dashboards/cats/kittens/views.py
|
7
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from horizon import views
class IndexView(views.APIView):
# A very simple class-based view...
template_name = 'cats/kittens/index.html'
def get_data(self, request, context, *args, **kwargs):
# Add data to the context here...
return context
|
AltSchool/django-allauth
|
refs/heads/master
|
allauth/account/management/commands/account_unsetmultipleprimaryemails.py
|
10
|
from django.core.management.base import BaseCommand
from django.db.models import Count
from allauth.account.models import EmailAddress
from allauth.account.utils import user_email
from allauth.utils import get_user_model
class Command(BaseCommand):
def handle(self, *args, **options):
for user in self.get_users_with_multiple_primary_email():
self.unprimary_extra_primary_emails(user)
def get_users_with_multiple_primary_email(self):
user_pks = []
for email_address_dict in EmailAddress.objects.filter(
primary=True).values('user').annotate(
Count('user')).filter(user__count__gt=1):
user_pks.append(email_address_dict['user'])
return get_user_model().objects.filter(pk__in=user_pks)
def unprimary_extra_primary_emails(self, user):
primary_email_addresses = EmailAddress.objects.filter(
user=user,
primary=True)
for primary_email_address in primary_email_addresses:
if primary_email_address.email == user_email(user):
break
else:
# Didn't find the main email addresses and break the for loop
print(
"WARNING: Multiple primary without a user.email match for"
"user pk %s; (tried: %s, using: %s)") % (
user.pk,
", ".join([
email_address.email
for email_address
in primary_email_addresses]),
primary_email_address)
primary_email_addresses.exclude(
pk=primary_email_address.pk).update(primary=False)
|
shanot/imp
|
refs/heads/develop
|
modules/core/test/expensive_test_ev_restraint.py
|
2
|
from __future__ import print_function
import IMP
import IMP.test
import IMP.core
import IMP.container
class ExcludedVolumeRestraintTests(IMP.test.TestCase):
"""Tests for excluded volume restraints"""
def _create_rigid_body(self, m, n, r):
ls = IMP.algebra.Sphere3D(IMP.algebra.Vector3D(0, 0, 0), r)
ap = []
for i in range(0, n):
cs = IMP.algebra.Sphere3D(IMP.algebra.get_random_vector_on(ls), r)
d = IMP.core.XYZR.setup_particle(IMP.Particle(m), cs)
ap.append(d)
rbp = IMP.core.RigidBody.setup_particle(IMP.Particle(m), ap)
# rbp.set_log_level(IMP.SILENT)
rbp.set_coordinates_are_optimized(True)
return rbp
def _create_xyzrs(self, m, n, r):
ap = []
for i in range(0, n):
d = IMP.core.XYZR.setup_particle(IMP.Particle(m),
IMP.algebra.Sphere3D(IMP.algebra.Vector3D(0, 0, 0), r))
d.set_coordinates_are_optimized(True)
ap.append(d)
return ap
def _setup_ev_restraint(self):
# IMP.set_log_level(IMP.VERBOSE)
m = IMP.Model()
m.set_log_level(IMP.SILENT)
all = []
rbs = []
xyzrs = []
for i in range(0, 5):
rb = self._create_rigid_body(m, 10, 1)
rbs.append(rb)
all.extend(rb.get_rigid_members())
xyzrs = self._create_xyzrs(m, 10, 1)
all.extend(xyzrs)
allc = IMP.container.ListSingletonContainer(m, all)
r = IMP.core.ExcludedVolumeRestraint(allc, 1)
r.set_log_level(IMP.SILENT)
bb = IMP.algebra.BoundingBox3D(IMP.algebra.Vector3D(0, 0, 0),
IMP.algebra.Vector3D(100, 100, 100))
bbr = IMP.container.SingletonsRestraint(
IMP.core.BoundingBox3DSingletonScore(
IMP.core.HarmonicUpperBound(0, 1),
bb),
allc)
sf = IMP.core.RestraintsScoringFunction([r, bbr])
return (m, r, sf, xyzrs, rbs)
def _setup_movers(self, m, xyzrs, rbs):
mvs = []
for p in xyzrs:
mc = IMP.core.BallMover(m, [p], 1)
mc.set_log_level(IMP.SILENT)
mvs.append(mc)
for p in rbs:
mc = IMP.core.RigidBodyMover(m, IMP.core.RigidBody(p), 1, .1)
mvs.append(mc)
return mvs
def test_ev(self):
"""Testing excluded volume restraint"""
(m, r, sf, xyzrs, rbs) = self._setup_ev_restraint()
print("mc")
o = IMP.core.MonteCarlo(m)
o.set_scoring_function(sf)
mvs = self._setup_movers(m, xyzrs, rbs)
o.set_movers(mvs)
print("opt")
# rely on internal checks
print(o.optimize(1000))
print("inspect", sf.evaluate(False))
def test_evs(self):
"""Testing excluded volume serial restraint"""
(m, r, sf, xyzrs, rbs) = self._setup_ev_restraint()
print("mc")
o = IMP.core.MonteCarlo(m)
o.set_scoring_function(sf)
mvs = self._setup_movers(m, xyzrs, rbs)
sm = IMP.core.SerialMover(mvs)
o.set_movers([sm])
print("opt")
# rely on internal checks
print(o.optimize(1000))
print("inspect", sf.evaluate(False))
if __name__ == '__main__':
IMP.test.main()
|
yoer/hue
|
refs/heads/master
|
desktop/core/ext-py/Django-1.6.10/tests/tablespaces/models.py
|
150
|
from django.db import models
# Since the test database doesn't have tablespaces, it's impossible for Django
# to create the tables for models where db_tablespace is set. To avoid this
# problem, we mark the models as unmanaged, and temporarily revert them to
# managed during each test. We also set them to use the same tables as the
# "reference" models to avoid errors when other tests run 'syncdb'
# (proxy_models_inheritance does).
class ScientistRef(models.Model):
name = models.CharField(max_length=50)
class ArticleRef(models.Model):
title = models.CharField(max_length=50, unique=True)
code = models.CharField(max_length=50, unique=True)
authors = models.ManyToManyField(ScientistRef, related_name='articles_written_set')
reviewers = models.ManyToManyField(ScientistRef, related_name='articles_reviewed_set')
class Scientist(models.Model):
name = models.CharField(max_length=50)
class Meta:
db_table = 'tablespaces_scientistref'
db_tablespace = 'tbl_tbsp'
managed = False
class Article(models.Model):
title = models.CharField(max_length=50, unique=True)
code = models.CharField(max_length=50, unique=True, db_tablespace='idx_tbsp')
authors = models.ManyToManyField(Scientist, related_name='articles_written_set')
reviewers = models.ManyToManyField(Scientist, related_name='articles_reviewed_set', db_tablespace='idx_tbsp')
class Meta:
db_table = 'tablespaces_articleref'
db_tablespace = 'tbl_tbsp'
managed = False
# Also set the tables for automatically created models
Authors = Article._meta.get_field('authors').rel.through
Authors._meta.db_table = 'tablespaces_articleref_authors'
Reviewers = Article._meta.get_field('reviewers').rel.through
Reviewers._meta.db_table = 'tablespaces_articleref_reviewers'
|
ajvpot/CTFd
|
refs/heads/master
|
CTFd/api/v1/helpers/schemas.py
|
2
|
from typing import Container, Dict, Type
from pydantic import BaseModel, create_model
from sqlalchemy.inspection import inspect
from sqlalchemy.orm.properties import ColumnProperty
def sqlalchemy_to_pydantic(
db_model: Type, *, include: Dict[str, type] = None, exclude: Container[str] = None
) -> Type[BaseModel]:
"""
Mostly copied from https://github.com/tiangolo/pydantic-sqlalchemy
"""
if exclude is None:
exclude = []
mapper = inspect(db_model)
fields = {}
for attr in mapper.attrs:
if isinstance(attr, ColumnProperty):
if attr.columns:
column = attr.columns[0]
python_type = column.type.python_type
name = attr.key
if name in exclude:
continue
default = None
if column.default is None and not column.nullable:
default = ...
fields[name] = (python_type, default)
if bool(include):
for name, python_type in include.items():
default = None
fields[name] = (python_type, default)
pydantic_model = create_model(
db_model.__name__, **fields # type: ignore
)
return pydantic_model
|
temasek/android_external_chromium_org
|
refs/heads/cm-11.0
|
third_party/protobuf/python/google/protobuf/message.py
|
227
|
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# TODO(robinson): We should just make these methods all "pure-virtual" and move
# all implementation out, into reflection.py for now.
"""Contains an abstract base class for protocol messages."""
__author__ = '[email protected] (Will Robinson)'
class Error(Exception): pass
class DecodeError(Error): pass
class EncodeError(Error): pass
class Message(object):
"""Abstract base class for protocol messages.
Protocol message classes are almost always generated by the protocol
compiler. These generated types subclass Message and implement the methods
shown below.
TODO(robinson): Link to an HTML document here.
TODO(robinson): Document that instances of this class will also
have an Extensions attribute with __getitem__ and __setitem__.
Again, not sure how to best convey this.
TODO(robinson): Document that the class must also have a static
RegisterExtension(extension_field) method.
Not sure how to best express at this point.
"""
# TODO(robinson): Document these fields and methods.
__slots__ = []
DESCRIPTOR = None
def __deepcopy__(self, memo=None):
clone = type(self)()
clone.MergeFrom(self)
return clone
def __eq__(self, other_msg):
"""Recursively compares two messages by value and structure."""
raise NotImplementedError
def __ne__(self, other_msg):
# Can't just say self != other_msg, since that would infinitely recurse. :)
return not self == other_msg
def __hash__(self):
raise TypeError('unhashable object')
def __str__(self):
"""Outputs a human-readable representation of the message."""
raise NotImplementedError
def __unicode__(self):
"""Outputs a human-readable representation of the message."""
raise NotImplementedError
def MergeFrom(self, other_msg):
"""Merges the contents of the specified message into current message.
This method merges the contents of the specified message into the current
message. Singular fields that are set in the specified message overwrite
the corresponding fields in the current message. Repeated fields are
appended. Singular sub-messages and groups are recursively merged.
Args:
other_msg: Message to merge into the current message.
"""
raise NotImplementedError
def CopyFrom(self, other_msg):
"""Copies the content of the specified message into the current message.
The method clears the current message and then merges the specified
message using MergeFrom.
Args:
other_msg: Message to copy into the current one.
"""
if self is other_msg:
return
self.Clear()
self.MergeFrom(other_msg)
def Clear(self):
"""Clears all data that was set in the message."""
raise NotImplementedError
def SetInParent(self):
"""Mark this as present in the parent.
This normally happens automatically when you assign a field of a
sub-message, but sometimes you want to make the sub-message
present while keeping it empty. If you find yourself using this,
you may want to reconsider your design."""
raise NotImplementedError
def IsInitialized(self):
"""Checks if the message is initialized.
Returns:
The method returns True if the message is initialized (i.e. all of its
required fields are set).
"""
raise NotImplementedError
# TODO(robinson): MergeFromString() should probably return None and be
# implemented in terms of a helper that returns the # of bytes read. Our
# deserialization routines would use the helper when recursively
# deserializing, but the end user would almost always just want the no-return
# MergeFromString().
def MergeFromString(self, serialized):
"""Merges serialized protocol buffer data into this message.
When we find a field in |serialized| that is already present
in this message:
- If it's a "repeated" field, we append to the end of our list.
- Else, if it's a scalar, we overwrite our field.
- Else, (it's a nonrepeated composite), we recursively merge
into the existing composite.
TODO(robinson): Document handling of unknown fields.
Args:
serialized: Any object that allows us to call buffer(serialized)
to access a string of bytes using the buffer interface.
TODO(robinson): When we switch to a helper, this will return None.
Returns:
The number of bytes read from |serialized|.
For non-group messages, this will always be len(serialized),
but for messages which are actually groups, this will
generally be less than len(serialized), since we must
stop when we reach an END_GROUP tag. Note that if
we *do* stop because of an END_GROUP tag, the number
of bytes returned does not include the bytes
for the END_GROUP tag information.
"""
raise NotImplementedError
def ParseFromString(self, serialized):
"""Like MergeFromString(), except we clear the object first."""
self.Clear()
self.MergeFromString(serialized)
def SerializeToString(self):
"""Serializes the protocol message to a binary string.
Returns:
A binary string representation of the message if all of the required
fields in the message are set (i.e. the message is initialized).
Raises:
message.EncodeError if the message isn't initialized.
"""
raise NotImplementedError
def SerializePartialToString(self):
"""Serializes the protocol message to a binary string.
This method is similar to SerializeToString but doesn't check if the
message is initialized.
Returns:
A string representation of the partial message.
"""
raise NotImplementedError
# TODO(robinson): Decide whether we like these better
# than auto-generated has_foo() and clear_foo() methods
# on the instances themselves. This way is less consistent
# with C++, but it makes reflection-type access easier and
# reduces the number of magically autogenerated things.
#
# TODO(robinson): Be sure to document (and test) exactly
# which field names are accepted here. Are we case-sensitive?
# What do we do with fields that share names with Python keywords
# like 'lambda' and 'yield'?
#
# nnorwitz says:
# """
# Typically (in python), an underscore is appended to names that are
# keywords. So they would become lambda_ or yield_.
# """
def ListFields(self):
"""Returns a list of (FieldDescriptor, value) tuples for all
fields in the message which are not empty. A singular field is non-empty
if HasField() would return true, and a repeated field is non-empty if
it contains at least one element. The fields are ordered by field
number"""
raise NotImplementedError
def HasField(self, field_name):
"""Checks if a certain field is set for the message. Note if the
field_name is not defined in the message descriptor, ValueError will be
raised."""
raise NotImplementedError
def ClearField(self, field_name):
raise NotImplementedError
def HasExtension(self, extension_handle):
raise NotImplementedError
def ClearExtension(self, extension_handle):
raise NotImplementedError
def ByteSize(self):
"""Returns the serialized size of this message.
Recursively calls ByteSize() on all contained messages.
"""
raise NotImplementedError
def _SetListener(self, message_listener):
"""Internal method used by the protocol message implementation.
Clients should not call this directly.
Sets a listener that this message will call on certain state transitions.
The purpose of this method is to register back-edges from children to
parents at runtime, for the purpose of setting "has" bits and
byte-size-dirty bits in the parent and ancestor objects whenever a child or
descendant object is modified.
If the client wants to disconnect this Message from the object tree, she
explicitly sets callback to None.
If message_listener is None, unregisters any existing listener. Otherwise,
message_listener must implement the MessageListener interface in
internal/message_listener.py, and we discard any listener registered
via a previous _SetListener() call.
"""
raise NotImplementedError
def __getstate__(self):
"""Support the pickle protocol."""
return dict(serialized=self.SerializePartialToString())
def __setstate__(self, state):
"""Support the pickle protocol."""
self.__init__()
self.ParseFromString(state['serialized'])
|
lucychambers/lucychambers.github.io
|
refs/heads/master
|
.bundle/ruby/2.0.0/gems/pygments.rb-0.6.0/vendor/simplejson/simplejson/tests/test_check_circular.py
|
414
|
from unittest import TestCase
import simplejson as json
def default_iterable(obj):
return list(obj)
class TestCheckCircular(TestCase):
def test_circular_dict(self):
dct = {}
dct['a'] = dct
self.assertRaises(ValueError, json.dumps, dct)
def test_circular_list(self):
lst = []
lst.append(lst)
self.assertRaises(ValueError, json.dumps, lst)
def test_circular_composite(self):
dct2 = {}
dct2['a'] = []
dct2['a'].append(dct2)
self.assertRaises(ValueError, json.dumps, dct2)
def test_circular_default(self):
json.dumps([set()], default=default_iterable)
self.assertRaises(TypeError, json.dumps, [set()])
def test_circular_off_default(self):
json.dumps([set()], default=default_iterable, check_circular=False)
self.assertRaises(TypeError, json.dumps, [set()], check_circular=False)
|
rx2130/Leetcode
|
refs/heads/master
|
python/280 Wiggle Sort.py
|
1
|
class Solution(object):
# Op1: O(n log(n))
def wiggleSort(self, nums):
"""
:type nums: List[int]
:rtype: void Do not return anything, modify nums in-place instead.
"""
s = sorted(nums)
l, r = 0, len(s) - 1
i = 0
while i < len(nums):
nums[i] = s[l]
l += 1
i += 2
j = 1
while j < len(nums):
nums[j] = s[r]
r -= 1
j += 2
def wiggleSort2(self, nums):
for i in range(len(nums)):
a = nums[i - 1]
if (i % 2 == 1) == (a > nums[i]):
nums[i - 1] = nums[i]
nums[i] = a
def wiggleSort3(self, nums):
for i in range(1, len(nums)):
if (i & 1) == (nums[i - 1] > nums[i]):
nums[i - 1], nums[i] = nums[i], nums[i - 1]
def wiggleSort4(self, nums):
n = len(nums)
for i in range(n - 1):
if (i % 2 == 0 and nums[i] > nums[i + 1]) or \
(i % 2 == 1 and nums[i] < nums[i + 1]):
nums[i], nums[i + 1] = nums[i + 1], nums[i]
print(nums[i], nums[i + 1], nums)
nums = [3, 5, 2, 1, 6, 4]
#nums = [1, 2, 3, 4, 5, 6]
test = Solution()
test.wiggleSort3(nums)
print(nums)
|
Maccimo/intellij-community
|
refs/heads/master
|
plugins/hg4idea/testData/bin/mercurial/streamclone.py
|
88
|
# streamclone.py - streaming clone server support for mercurial
#
# Copyright 2006 Vadim Gelfer <[email protected]>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
import util, error
from i18n import _
from mercurial import store
class StreamException(Exception):
def __init__(self, code):
Exception.__init__(self)
self.code = code
def __str__(self):
return '%i\n' % self.code
# if server supports streaming clone, it advertises "stream"
# capability with value that is version+flags of repo it is serving.
# client only streams if it can read that repo format.
# stream file format is simple.
#
# server writes out line that says how many files, how many total
# bytes. separator is ascii space, byte counts are strings.
#
# then for each file:
#
# server writes out line that says filename, how many bytes in
# file. separator is ascii nul, byte count is string.
#
# server writes out raw file data.
def allowed(ui):
return ui.configbool('server', 'uncompressed', True, untrusted=True)
def stream_out(repo):
'''stream out all metadata files in repository.
writes to file-like object, must support write() and optional flush().'''
if not allowed(repo.ui):
raise StreamException(1)
entries = []
total_bytes = 0
try:
# get consistent snapshot of repo, lock during scan
lock = repo.lock()
try:
repo.ui.debug('scanning\n')
for name, ename, size in repo.store.walk():
entries.append((name, size))
total_bytes += size
finally:
lock.release()
except error.LockError:
raise StreamException(2)
yield '0\n'
repo.ui.debug('%d files, %d bytes to transfer\n' %
(len(entries), total_bytes))
yield '%d %d\n' % (len(entries), total_bytes)
for name, size in entries:
repo.ui.debug('sending %s (%d bytes)\n' % (name, size))
# partially encode name over the wire for backwards compat
yield '%s\0%d\n' % (store.encodedir(name), size)
for chunk in util.filechunkiter(repo.sopener(name), limit=size):
yield chunk
|
indictranstech/fbd_frappe
|
refs/heads/develop
|
frappe/desk/form/__init__.py
|
1829
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
|
eeshangarg/oh-mainline
|
refs/heads/master
|
vendor/packages/distribute/setuptools/tests/server.py
|
62
|
"""Basic http server for tests to simulate PyPI or custom indexes
"""
import urllib2
import sys
from threading import Thread
from BaseHTTPServer import HTTPServer
from SimpleHTTPServer import SimpleHTTPRequestHandler
class IndexServer(HTTPServer):
"""Basic single-threaded http server simulating a package index
You can use this server in unittest like this::
s = IndexServer()
s.start()
index_url = s.base_url() + 'mytestindex'
# do some test requests to the index
# The index files should be located in setuptools/tests/indexes
s.stop()
"""
def __init__(self):
HTTPServer.__init__(self, ('', 0), SimpleHTTPRequestHandler)
self._run = True
def serve(self):
while True:
self.handle_request()
if not self._run: break
def start(self):
self.thread = Thread(target=self.serve)
self.thread.start()
def stop(self):
"""self.shutdown is not supported on python < 2.6"""
self._run = False
try:
if sys.version > '2.6':
urllib2.urlopen('http://127.0.0.1:%s/' % self.server_port,
None, 5)
else:
urllib2.urlopen('http://127.0.0.1:%s/' % self.server_port)
except urllib2.URLError:
pass
self.thread.join()
def base_url(self):
port = self.server_port
return 'http://127.0.0.1:%s/setuptools/tests/indexes/' % port
|
pozdnyakov/chromium-crosswalk
|
refs/heads/master
|
build/android/pylib/base/sharded_tests_queue.py
|
11
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A module that contains a queue for running sharded tests."""
import multiprocessing
class ShardedTestsQueue(object):
"""A queue for managing pending tests across different runners.
This class should only be used when sharding.
Attributes:
num_devices: an integer; the number of attached Android devices.
tests: a list of tests to be run.
tests_queue: if sharding, a JoinableQueue object that holds tests from
|tests|. Otherwise, a list holding tests.
results_queue: a Queue object to hold TestRunResults objects.
"""
_STOP_SENTINEL = 'STOP' # sentinel value for iter()
def __init__(self, num_devices, tests):
self.num_devices = num_devices
self.tests_queue = multiprocessing.Queue()
for test in tests:
self.tests_queue.put(test)
for _ in xrange(self.num_devices):
self.tests_queue.put(ShardedTestsQueue._STOP_SENTINEL)
def __iter__(self):
"""Returns an iterator with the test cases."""
return iter(self.tests_queue.get, ShardedTestsQueue._STOP_SENTINEL)
|
oursu/Gem_code
|
refs/heads/master
|
inputs/prepare_input_for_SAMNet.py
|
1
|
from optparse import OptionParser
import os
import networkx as nx
import sys
'''
Author:Oana Ursu
'''
def main():
parser=OptionParser()
parser.add_option('--input',dest='input',help='Input')
parser.add_option('--input_type',dest='input_type',help='Input type. Can be phen or DE')
parser.add_option('--PPI',dest='ppi',help='PPI pkl',
default='/nfs/vendata/oursu/oana/Gem_paper/data/interactome/9606.mitab.01192011.uniq_miscore-localirefindex3-20110831.digraphno_UBC,EP300_symbol.pkl')
parser.add_option('--expressed',dest='expressed',help='Expressed genes',default='/nfs/vendata/oursu/oana/paper_analysis/networks/2013-04-30/_FDR_0.05thresh0.3_vehMin_0.5_lfc_0.585_minFPKM_0.1_p_0.05_pseudocounts_exprProtsFPKM_0.1/0.1FPKM.genes')
parser.add_option('--TFgene',dest='TFgene',help='TFgene network')
parser.add_option('--out',dest='out',help='Out name')
opts,args=parser.parse_args()
#Get PPI nodes
if opts.input_type=='phen' or opts.input_type=='TFgene':
ppi=nx.read_gpickle(opts.ppi)
ppi_nodes=ppi.nodes()
#Get expressed
expressed=set()
for line in open(opts.expressed,'r').readlines():
expressed.add(line.strip())
#setup the analysis for TFgene files here
if opts.input_type=='TFgene':
print 'processing tfgene'
out=open(opts.out,'w')
counte=0
for line in open(opts.input,'r').readlines():
items=line.strip().split()
tf=items[0]
gene=items[1]
score=items[2]
if tf not in ppi_nodes:
if tf=='RARB':
print 'losing '+tf+' because not in PPI'
continue
#if gene not in expressed:
# continue
if tf not in expressed:
if tf=='RARB':
print 'losing '+tf+' because not expressed'
continue
out.write(tf+'\t'+gene+'\t'+score+'\n')
out.close()
sys.exit()
#Get TFgene
TFgene_genes=set()
if opts.input_type=='DE':
for line in open(opts.TFgene,'r').readlines():
TFgene_genes.add(line.strip().split('\t')[1])
#Read in input
input_genes={}
for line in open(opts.input,'r').readlines():
items=line.strip().split('\t')
if items[0]=='NA':
continue
input_genes[items[0]]={}
input_genes[items[0]]['score']=items[1]
print 'New dataset ---------------------------------'
#Check if expressed
for input_gene in input_genes.keys():
input_gene_split=input_gene.split('_')
#print input_gene_split
for gene in input_gene_split:
#print 'checking '+gene
if gene in expressed:
print gene+' is expressed'
input_genes[input_gene]['expressed']=True
else:
print gene+' is NOT expressed'
#For phen, check if in interactome
if opts.input_type=='phen':
for input_gene in input_genes.keys():
genes=input_gene.split('_')
#Find first gene name that is in interactome and keep it
for gene in genes:
if gene in ppi_nodes:
if 'inNet' not in input_genes[input_gene].keys():
input_genes[input_gene]['inNet']=[]
#ONLY ADD IT IF EXPRESSED TOO
if gene in expressed:
input_genes[input_gene]['inNet'].append(gene)
#For DE genes, check it is in the TFgene network
if opts.input_type=='DE':
for input_gene in input_genes.keys():
if input_gene in TFgene_genes:
if 'inNet' not in input_genes[input_gene].keys():
input_genes[input_gene]['inNet']=[]
input_genes[input_gene]['inNet'].append(input_gene)
#If multiple inputs are in the network under the same name, take the one with the highest score
gene_to_highest_scoring_input={}
for input_gene in input_genes.keys():
if 'inNet' not in input_genes[input_gene].keys():
continue
else:
genes=input_genes[input_gene]['inNet']
for gene in genes:
if gene not in gene_to_highest_scoring_input.keys():
gene_to_highest_scoring_input[gene]=input_gene
if float(input_genes[input_gene]['score'])>float(input_genes[gene_to_highest_scoring_input[gene]]['score']):
gene_to_highest_scoring_input[gene]=input_gene
out=open(opts.out,'w')
#Write down the input to keep
for input_gene in input_genes.keys():
#print input_gene
#print input_genes[input_gene]
written=False
if 'expressed' in input_genes[input_gene].keys():
if input_genes[input_gene]['expressed']==True:
if 'inNet' in input_genes[input_gene].keys():
for gene in input_genes[input_gene]['inNet']:
if not written:
if gene_to_highest_scoring_input[gene]==input_gene:
out.write(gene+'\t'+input_genes[input_gene]['score']+'\n')
written=True
out.close()
if __name__=='__main__':
main()
|
wrh3c/Maslow-GroundControl
|
refs/heads/master
|
UIElements/viewMenu.py
|
1
|
from kivy.uix.gridlayout import GridLayout
from UIElements.loadDialog import LoadDialog
from UIElements.pageableTextPopup import PageableTextPopup
from kivy.uix.popup import Popup
import re
from DataStructures.makesmithInitFuncs import MakesmithInitFuncs
from os import path
class ViewMenu(GridLayout, MakesmithInitFuncs):
page = 1
def openFile(self):
'''
Open The Pop-up To Load A File
Creates a new pop-up which can be used to open a file.
'''
content = LoadDialog(load=self.load, cancel=self.dismiss_popup)
content.path = path.dirname(self.data.gcodeFile)
if content.path is "":
content.path = path.expanduser('~')
self._popup = Popup(title="Load file", content=content,
size_hint=(0.9, 0.9))
self._popup.open()
def reloadGcode(self):
'''
Trigger a reloading of the gcode file
'''
filePath = self.data.gcodeFile
self.data.gcodeFile = ""
self.data.gcodeFile = filePath
#close the parent popup
self.parentWidget.close()
def load(self, filePath, filename):
'''
Load A File (Any Type)
Takes in a file path (from pop-up) and handles the file appropriately for the given file-type.
'''
#close the open file popup
self.dismiss_popup()
#locate the file
filename = filename[0]
fileExtension = path.splitext(filename)[1]
validExtensions = self.data.config.get('Ground Control Settings', 'validExtensions').replace(" ", "").split(',')
if fileExtension in validExtensions:
self.data.gcodeFile = filename
self.data.config.set('Maslow Settings', 'openFile', str(self.data.gcodeFile))
self.data.config.write()
else:
self.data.message_queue.put("Message: Ground control can only open gcode files with extensions: " + self.data.config.get('Ground Control Settings', 'validExtensions'))
#close the parent popup
self.parentWidget.close()
def resetView(self):
'''
Reset the gcode canvas view. Most of the work is done in the .kv file.
'''
#close the parent popup
self.parentWidget.close()
def show_gcode(self):
'''
Display the currently loaded gcode in a popup
It would be cool if you could run the program stepping through using this popup
'''
popupText = ""
if len(self.data.gcode) is 0:
popupText = "No gcode to display"
else:
if self.page<=1:
line = 0
else:
line = (self.page-1)*447
popupText = "...\n...\n...\n"
if line>len(self.data.gcode):
line = len(self.data.gcode)-447
for lineNum, gcodeLine in enumerate(self.data.gcode):
if lineNum>=line and lineNum<line+447:
popupText = popupText + str(lineNum+1) + ': ' + gcodeLine + "\n"
elif lineNum>=line+447:
popupText = popupText + "...\n...\n...\n"
break
content = PageableTextPopup(cancel = self.dismiss_popup,
prev = self.show_gcode_prev,
next = self.show_gcode_next,
text = popupText)
titleString = 'Gcode File: ' + self.data.gcodeFile +'\nLines: '+str(line+1)+' - '+str(lineNum)+' of '+str(len(self.data.gcode))
self._popup = Popup(title=titleString, content=content,
size_hint=(0.9, 0.9))
self._popup.open()
def show_gcode_next(self,*args):
if (self.page)*447<len(self.data.gcode):
self.page += 1
self._popup.dismiss()
self.show_gcode()
def show_gcode_prev(self,*args):
if self.page > 1:
self.page -= 1
self._popup.dismiss()
self.show_gcode()
def dismiss_popup(self):
'''
Close The Pop-up
'''
self.page = 1
self._popup.dismiss()
|
vivekanand1101/neutron
|
refs/heads/master
|
neutron/services/service_base.py
|
16
|
# Copyright 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import importutils
import six
from neutron.api import extensions
from neutron.db import servicetype_db as sdb
from neutron.i18n import _LE, _LI
from neutron.services import provider_configuration as pconf
LOG = logging.getLogger(__name__)
@six.add_metaclass(abc.ABCMeta)
class ServicePluginBase(extensions.PluginInterface):
"""Define base interface for any Advanced Service plugin."""
supported_extension_aliases = []
@abc.abstractmethod
def get_plugin_type(self):
"""Return one of predefined service types.
See neutron/plugins/common/constants.py
"""
pass
@abc.abstractmethod
def get_plugin_description(self):
"""Return string description of the plugin."""
pass
def get_workers(self):
"""Returns a collection of NeutronWorkers"""
return ()
def load_drivers(service_type, plugin):
"""Loads drivers for specific service.
Passes plugin instance to driver's constructor
"""
service_type_manager = sdb.ServiceTypeManager.get_instance()
providers = (service_type_manager.
get_service_providers(
None,
filters={'service_type': [service_type]})
)
if not providers:
msg = (_("No providers specified for '%s' service, exiting") %
service_type)
LOG.error(msg)
raise SystemExit(1)
drivers = {}
for provider in providers:
try:
drivers[provider['name']] = importutils.import_object(
provider['driver'], plugin
)
LOG.debug("Loaded '%(provider)s' provider for service "
"%(service_type)s",
{'provider': provider['driver'],
'service_type': service_type})
except ImportError:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Error loading provider '%(provider)s' for "
"service %(service_type)s"),
{'provider': provider['driver'],
'service_type': service_type})
default_provider = None
try:
provider = service_type_manager.get_default_service_provider(
None, service_type)
default_provider = provider['name']
except pconf.DefaultServiceProviderNotFound:
LOG.info(_LI("Default provider is not specified for service type %s"),
service_type)
return drivers, default_provider
|
lucafavatella/intellij-community
|
refs/heads/cli-wip
|
python/helpers/profiler/thrift/transport/TSSLSocket.py
|
9
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import absolute_import
import os
import socket
import ssl
from thrift.transport import TSocket
from thrift.transport.TTransport import TTransportException
class TSSLSocket(TSocket.TSocket):
"""
SSL implementation of client-side TSocket
This class creates outbound sockets wrapped using the
python standard ssl module for encrypted connections.
The protocol used is set using the class variable
SSL_VERSION, which must be one of ssl.PROTOCOL_* and
defaults to ssl.PROTOCOL_TLSv1 for greatest security.
"""
SSL_VERSION = ssl.PROTOCOL_TLSv1
def __init__(self,
host='localhost',
port=9090,
validate=True,
ca_certs=None,
keyfile=None,
certfile=None,
unix_socket=None):
"""Create SSL TSocket
@param validate: Set to False to disable SSL certificate validation
@type validate: bool
@param ca_certs: Filename to the Certificate Authority pem file, possibly a
file downloaded from: http://curl.haxx.se/ca/cacert.pem This is passed to
the ssl_wrap function as the 'ca_certs' parameter.
@type ca_certs: str
@param keyfile: The private key
@type keyfile: str
@param certfile: The cert file
@type certfile: str
Raises an IOError exception if validate is True and the ca_certs file is
None, not present or unreadable.
"""
self.validate = validate
self.is_valid = False
self.peercert = None
if not validate:
self.cert_reqs = ssl.CERT_NONE
else:
self.cert_reqs = ssl.CERT_REQUIRED
self.ca_certs = ca_certs
self.keyfile = keyfile
self.certfile = certfile
if validate:
if ca_certs is None or not os.access(ca_certs, os.R_OK):
raise IOError('Certificate Authority ca_certs file "%s" '
'is not readable, cannot validate SSL '
'certificates.' % (ca_certs))
TSocket.TSocket.__init__(self, host, port, unix_socket)
def open(self):
try:
res0 = self._resolveAddr()
for res in res0:
sock_family, sock_type = res[0:2]
ip_port = res[4]
plain_sock = socket.socket(sock_family, sock_type)
self.handle = ssl.wrap_socket(plain_sock,
ssl_version=self.SSL_VERSION,
do_handshake_on_connect=True,
ca_certs=self.ca_certs,
keyfile=self.keyfile,
certfile=self.certfile,
cert_reqs=self.cert_reqs)
self.handle.settimeout(self._timeout)
try:
self.handle.connect(ip_port)
except socket.error, e:
if res is not res0[-1]:
continue
else:
raise e
break
except socket.error, e:
if self._unix_socket:
message = 'Could not connect to secure socket %s: %s' \
% (self._unix_socket, e)
else:
message = 'Could not connect to %s:%d: %s' % (self.host, self.port, e)
raise TTransportException(type=TTransportException.NOT_OPEN,
message=message)
if self.validate:
self._validate_cert()
def _validate_cert(self):
"""internal method to validate the peer's SSL certificate, and to check the
commonName of the certificate to ensure it matches the hostname we
used to make this connection. Does not support subjectAltName records
in certificates.
raises TTransportException if the certificate fails validation.
"""
cert = self.handle.getpeercert()
self.peercert = cert
if 'subject' not in cert:
raise TTransportException(
type=TTransportException.NOT_OPEN,
message='No SSL certificate found from %s:%s' % (self.host, self.port))
fields = cert['subject']
for field in fields:
# ensure structure we get back is what we expect
if not isinstance(field, tuple):
continue
cert_pair = field[0]
if len(cert_pair) < 2:
continue
cert_key, cert_value = cert_pair[0:2]
if cert_key != 'commonName':
continue
certhost = cert_value
# this check should be performed by some sort of Access Manager
if certhost == self.host:
# success, cert commonName matches desired hostname
self.is_valid = True
return
else:
raise TTransportException(
type=TTransportException.UNKNOWN,
message='Hostname we connected to "%s" doesn\'t match certificate '
'provided commonName "%s"' % (self.host, certhost))
raise TTransportException(
type=TTransportException.UNKNOWN,
message='Could not validate SSL certificate from '
'host "%s". Cert=%s' % (self.host, cert))
class TSSLServerSocket(TSocket.TServerSocket):
"""SSL implementation of TServerSocket
This uses the ssl module's wrap_socket() method to provide SSL
negotiated encryption.
"""
SSL_VERSION = ssl.PROTOCOL_TLSv1
def __init__(self,
host=None,
port=9090,
certfile='cert.pem',
unix_socket=None):
"""Initialize a TSSLServerSocket
@param certfile: filename of the server certificate, defaults to cert.pem
@type certfile: str
@param host: The hostname or IP to bind the listen socket to,
i.e. 'localhost' for only allowing local network connections.
Pass None to bind to all interfaces.
@type host: str
@param port: The port to listen on for inbound connections.
@type port: int
"""
self.setCertfile(certfile)
TSocket.TServerSocket.__init__(self, host, port)
def setCertfile(self, certfile):
"""Set or change the server certificate file used to wrap new connections.
@param certfile: The filename of the server certificate,
i.e. '/etc/certs/server.pem'
@type certfile: str
Raises an IOError exception if the certfile is not present or unreadable.
"""
if not os.access(certfile, os.R_OK):
raise IOError('No such certfile found: %s' % (certfile))
self.certfile = certfile
def accept(self):
plain_client, addr = self.handle.accept()
try:
client = ssl.wrap_socket(plain_client, certfile=self.certfile,
server_side=True, ssl_version=self.SSL_VERSION)
except ssl.SSLError, ssl_exc:
# failed handshake/ssl wrap, close socket to client
plain_client.close()
# raise ssl_exc
# We can't raise the exception, because it kills most TServer derived
# serve() methods.
# Instead, return None, and let the TServer instance deal with it in
# other exception handling. (but TSimpleServer dies anyway)
return None
result = TSocket.TSocket()
result.setHandle(client)
return result
|
TangXT/edx-platform
|
refs/heads/master
|
lms/djangoapps/instructor/tests/test_api.py
|
8
|
# -*- coding: utf-8 -*-
"""
Unit tests for instructor.api methods.
"""
# pylint: disable=E1111
import unittest
import json
import requests
import datetime
import ddt
import random
from urllib import quote
from django.test import TestCase
from nose.tools import raises
from mock import Mock, patch
from django.conf import settings
from django.test.utils import override_settings
from django.core.urlresolvers import reverse
from django.http import HttpRequest, HttpResponse
from django_comment_common.models import FORUM_ROLE_COMMUNITY_TA, Role
from django_comment_common.utils import seed_permissions_roles
from django.core import mail
from django.utils.timezone import utc
from django.test import RequestFactory
from django.contrib.auth.models import User
from courseware.tests.modulestore_config import TEST_DATA_MIXED_MODULESTORE
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from courseware.tests.helpers import LoginEnrollmentTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from student.tests.factories import UserFactory
from courseware.tests.factories import StaffFactory, InstructorFactory, BetaTesterFactory
from student.roles import CourseBetaTesterRole
from microsite_configuration import microsite
from instructor.tests.utils import FakeContentTask, FakeEmail, FakeEmailInfo
from student.models import CourseEnrollment, CourseEnrollmentAllowed
from courseware.models import StudentModule
# modules which are mocked in test cases.
import instructor_task.api
from instructor.access import allow_access
import instructor.views.api
from instructor.views.api import _split_input_list, common_exceptions_400
from instructor_task.api_helper import AlreadyRunningError
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from shoppingcart.models import CourseRegistrationCode, RegistrationCodeRedemption, Order, PaidCourseRegistration, Coupon
from course_modes.models import CourseMode
from .test_tools import msk_from_problem_urlname, get_extended_due
@common_exceptions_400
def view_success(request): # pylint: disable=W0613
"A dummy view for testing that returns a simple HTTP response"
return HttpResponse('success')
@common_exceptions_400
def view_user_doesnotexist(request): # pylint: disable=W0613
"A dummy view that raises a User.DoesNotExist exception"
raise User.DoesNotExist()
@common_exceptions_400
def view_alreadyrunningerror(request): # pylint: disable=W0613
"A dummy view that raises an AlreadyRunningError exception"
raise AlreadyRunningError()
class TestCommonExceptions400(unittest.TestCase):
"""
Testing the common_exceptions_400 decorator.
"""
def setUp(self):
self.request = Mock(spec=HttpRequest)
self.request.META = {}
def test_happy_path(self):
resp = view_success(self.request)
self.assertEqual(resp.status_code, 200)
def test_user_doesnotexist(self):
self.request.is_ajax.return_value = False
resp = view_user_doesnotexist(self.request)
self.assertEqual(resp.status_code, 400)
self.assertIn("User does not exist", resp.content)
def test_user_doesnotexist_ajax(self):
self.request.is_ajax.return_value = True
resp = view_user_doesnotexist(self.request)
self.assertEqual(resp.status_code, 400)
result = json.loads(resp.content)
self.assertIn("User does not exist", result["error"])
def test_alreadyrunningerror(self):
self.request.is_ajax.return_value = False
resp = view_alreadyrunningerror(self.request)
self.assertEqual(resp.status_code, 400)
self.assertIn("Task is already running", resp.content)
def test_alreadyrunningerror_ajax(self):
self.request.is_ajax.return_value = True
resp = view_alreadyrunningerror(self.request)
self.assertEqual(resp.status_code, 400)
result = json.loads(resp.content)
self.assertIn("Task is already running", result["error"])
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
@patch.dict(settings.FEATURES, {'ENABLE_INSTRUCTOR_EMAIL': True, 'REQUIRE_COURSE_EMAIL_AUTH': False})
class TestInstructorAPIDenyLevels(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Ensure that users cannot access endpoints they shouldn't be able to.
"""
def setUp(self):
self.course = CourseFactory.create()
self.user = UserFactory.create()
CourseEnrollment.enroll(self.user, self.course.id)
self.problem_location = msk_from_problem_urlname(
self.course.id,
'robot-some-problem-urlname'
)
self.problem_urlname = self.problem_location.to_deprecated_string()
_module = StudentModule.objects.create(
student=self.user,
course_id=self.course.id,
module_state_key=self.problem_location,
state=json.dumps({'attempts': 10}),
)
# Endpoints that only Staff or Instructors can access
self.staff_level_endpoints = [
('students_update_enrollment', {'identifiers': '[email protected]', 'action': 'enroll'}),
('get_grading_config', {}),
('get_students_features', {}),
('get_distribution', {}),
('get_student_progress_url', {'unique_student_identifier': self.user.username}),
('reset_student_attempts', {'problem_to_reset': self.problem_urlname, 'unique_student_identifier': self.user.email}),
('update_forum_role_membership', {'unique_student_identifier': self.user.email, 'rolename': 'Moderator', 'action': 'allow'}),
('list_forum_members', {'rolename': FORUM_ROLE_COMMUNITY_TA}),
('proxy_legacy_analytics', {'aname': 'ProblemGradeDistribution'}),
('send_email', {'send_to': 'staff', 'subject': 'test', 'message': 'asdf'}),
('list_instructor_tasks', {}),
('list_background_email_tasks', {}),
('list_report_downloads', {}),
('calculate_grades_csv', {}),
]
# Endpoints that only Instructors can access
self.instructor_level_endpoints = [
('bulk_beta_modify_access', {'identifiers': '[email protected]', 'action': 'add'}),
('modify_access', {'unique_student_identifier': self.user.email, 'rolename': 'beta', 'action': 'allow'}),
('list_course_role_members', {'rolename': 'beta'}),
('rescore_problem', {'problem_to_reset': self.problem_urlname, 'unique_student_identifier': self.user.email}),
]
def _access_endpoint(self, endpoint, args, status_code, msg):
"""
Asserts that accessing the given `endpoint` gets a response of `status_code`.
endpoint: string, endpoint for instructor dash API
args: dict, kwargs for `reverse` call
status_code: expected HTTP status code response
msg: message to display if assertion fails.
"""
url = reverse(endpoint, kwargs={'course_id': self.course.id.to_deprecated_string()})
if endpoint in ['send_email']:
response = self.client.post(url, args)
else:
response = self.client.get(url, args)
self.assertEqual(
response.status_code,
status_code,
msg=msg
)
def test_student_level(self):
"""
Ensure that an enrolled student can't access staff or instructor endpoints.
"""
self.client.login(username=self.user.username, password='test')
for endpoint, args in self.staff_level_endpoints:
self._access_endpoint(
endpoint,
args,
403,
"Student should not be allowed to access endpoint " + endpoint
)
for endpoint, args in self.instructor_level_endpoints:
self._access_endpoint(
endpoint,
args,
403,
"Student should not be allowed to access endpoint " + endpoint
)
def test_staff_level(self):
"""
Ensure that a staff member can't access instructor endpoints.
"""
staff_member = StaffFactory(course_key=self.course.id)
CourseEnrollment.enroll(staff_member, self.course.id)
self.client.login(username=staff_member.username, password='test')
# Try to promote to forums admin - not working
# update_forum_role(self.course.id, staff_member, FORUM_ROLE_ADMINISTRATOR, 'allow')
for endpoint, args in self.staff_level_endpoints:
# TODO: make these work
if endpoint in ['update_forum_role_membership', 'proxy_legacy_analytics', 'list_forum_members']:
continue
self._access_endpoint(
endpoint,
args,
200,
"Staff member should be allowed to access endpoint " + endpoint
)
for endpoint, args in self.instructor_level_endpoints:
self._access_endpoint(
endpoint,
args,
403,
"Staff member should not be allowed to access endpoint " + endpoint
)
def test_instructor_level(self):
"""
Ensure that an instructor member can access all endpoints.
"""
inst = InstructorFactory(course_key=self.course.id)
CourseEnrollment.enroll(inst, self.course.id)
self.client.login(username=inst.username, password='test')
for endpoint, args in self.staff_level_endpoints:
# TODO: make these work
if endpoint in ['update_forum_role_membership', 'proxy_legacy_analytics']:
continue
self._access_endpoint(
endpoint,
args,
200,
"Instructor should be allowed to access endpoint " + endpoint
)
for endpoint, args in self.instructor_level_endpoints:
# TODO: make this work
if endpoint in ['rescore_problem']:
continue
self._access_endpoint(
endpoint,
args,
200,
"Instructor should be allowed to access endpoint " + endpoint
)
@ddt.ddt
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class TestInstructorAPIEnrollment(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test enrollment modification endpoint.
This test does NOT exhaustively test state changes, that is the
job of test_enrollment. This tests the response and action switch.
"""
def setUp(self):
self.request = RequestFactory().request()
self.course = CourseFactory.create()
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
self.enrolled_student = UserFactory(username='EnrolledStudent', first_name='Enrolled', last_name='Student')
CourseEnrollment.enroll(
self.enrolled_student,
self.course.id
)
self.notenrolled_student = UserFactory(username='NotEnrolledStudent', first_name='NotEnrolled', last_name='Student')
# Create invited, but not registered, user
cea = CourseEnrollmentAllowed(email='[email protected]', course_id=self.course.id)
cea.save()
self.allowed_email = '[email protected]'
self.notregistered_email = '[email protected]'
self.assertEqual(User.objects.filter(email=self.notregistered_email).count(), 0)
# Email URL values
self.site_name = microsite.get_value(
'SITE_NAME',
settings.SITE_NAME
)
self.about_path = '/courses/MITx/999/Robot_Super_Course/about'
self.course_path = '/courses/MITx/999/Robot_Super_Course/'
# uncomment to enable enable printing of large diffs
# from failed assertions in the event of a test failure.
# (comment because pylint C0103)
# self.maxDiff = None
def tearDown(self):
"""
Undo all patches.
"""
patch.stopall()
def test_missing_params(self):
""" Test missing all query parameters. """
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url)
self.assertEqual(response.status_code, 400)
def test_bad_action(self):
""" Test with an invalid action. """
action = 'robot-not-an-action'
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {'identifiers': self.enrolled_student.email, 'action': action})
self.assertEqual(response.status_code, 400)
def test_invalid_email(self):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {'identifiers': 'percivaloctavius@', 'action': 'enroll', 'email_students': False})
self.assertEqual(response.status_code, 200)
# test the response data
expected = {
"action": "enroll",
'auto_enroll': False,
"results": [
{
"identifier": 'percivaloctavius@',
"invalidIdentifier": True,
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_invalid_username(self):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {'identifiers': 'percivaloctavius', 'action': 'enroll', 'email_students': False})
self.assertEqual(response.status_code, 200)
# test the response data
expected = {
"action": "enroll",
'auto_enroll': False,
"results": [
{
"identifier": 'percivaloctavius',
"invalidIdentifier": True,
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_enroll_with_username(self):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {'identifiers': self.notenrolled_student.username, 'action': 'enroll', 'email_students': False})
self.assertEqual(response.status_code, 200)
# test the response data
expected = {
"action": "enroll",
'auto_enroll': False,
"results": [
{
"identifier": self.notenrolled_student.username,
"before": {
"enrollment": False,
"auto_enroll": False,
"user": True,
"allowed": False,
},
"after": {
"enrollment": True,
"auto_enroll": False,
"user": True,
"allowed": False,
}
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_enroll_without_email(self):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {'identifiers': self.notenrolled_student.email, 'action': 'enroll', 'email_students': False})
print "type(self.notenrolled_student.email): {}".format(type(self.notenrolled_student.email))
self.assertEqual(response.status_code, 200)
# test that the user is now enrolled
user = User.objects.get(email=self.notenrolled_student.email)
self.assertTrue(CourseEnrollment.is_enrolled(user, self.course.id))
# test the response data
expected = {
"action": "enroll",
"auto_enroll": False,
"results": [
{
"identifier": self.notenrolled_student.email,
"before": {
"enrollment": False,
"auto_enroll": False,
"user": True,
"allowed": False,
},
"after": {
"enrollment": True,
"auto_enroll": False,
"user": True,
"allowed": False,
}
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 0)
@ddt.data('http', 'https')
def test_enroll_with_email(self, protocol):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
params = {'identifiers': self.notenrolled_student.email, 'action': 'enroll', 'email_students': True}
environ = {'wsgi.url_scheme': protocol}
response = self.client.get(url, params, **environ)
print "type(self.notenrolled_student.email): {}".format(type(self.notenrolled_student.email))
self.assertEqual(response.status_code, 200)
# test that the user is now enrolled
user = User.objects.get(email=self.notenrolled_student.email)
self.assertTrue(CourseEnrollment.is_enrolled(user, self.course.id))
# test the response data
expected = {
"action": "enroll",
"auto_enroll": False,
"results": [
{
"identifier": self.notenrolled_student.email,
"before": {
"enrollment": False,
"auto_enroll": False,
"user": True,
"allowed": False,
},
"after": {
"enrollment": True,
"auto_enroll": False,
"user": True,
"allowed": False,
}
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
'You have been enrolled in Robot Super Course'
)
self.assertEqual(
mail.outbox[0].body,
"Dear NotEnrolled Student\n\nYou have been enrolled in Robot Super Course "
"at edx.org by a member of the course staff. "
"The course should now appear on your edx.org dashboard.\n\n"
"To start accessing course materials, please visit "
"{proto}://{site}{course_path}\n\n----\n"
"This email was automatically sent from edx.org to NotEnrolled Student".format(
proto=protocol, site=self.site_name, course_path=self.course_path
)
)
@ddt.data('http', 'https')
def test_enroll_with_email_not_registered(self, protocol):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
params = {'identifiers': self.notregistered_email, 'action': 'enroll', 'email_students': True}
environ = {'wsgi.url_scheme': protocol}
response = self.client.get(url, params, **environ)
self.assertEqual(response.status_code, 200)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
'You have been invited to register for Robot Super Course'
)
self.assertEqual(
mail.outbox[0].body,
"Dear student,\n\nYou have been invited to join Robot Super Course at edx.org by a member of the course staff.\n\n"
"To finish your registration, please visit {proto}://{site}/register and fill out the "
"registration form making sure to use [email protected] in the E-mail field.\n"
"Once you have registered and activated your account, "
"visit {proto}://{site}{about_path} to join the course.\n\n----\n"
"This email was automatically sent from edx.org to [email protected]".format(
proto=protocol, site=self.site_name, about_path=self.about_path
)
)
@ddt.data('http', 'https')
@patch.dict(settings.FEATURES, {'ENABLE_MKTG_SITE': True})
def test_enroll_email_not_registered_mktgsite(self, protocol):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
params = {'identifiers': self.notregistered_email, 'action': 'enroll', 'email_students': True}
environ = {'wsgi.url_scheme': protocol}
response = self.client.get(url, params, **environ)
self.assertEqual(response.status_code, 200)
self.assertEqual(
mail.outbox[0].body,
"Dear student,\n\nYou have been invited to join Robot Super Course at edx.org by a member of the course staff.\n\n"
"To finish your registration, please visit {proto}://{site}/register and fill out the registration form "
"making sure to use [email protected] in the E-mail field.\n"
"You can then enroll in Robot Super Course.\n\n----\n"
"This email was automatically sent from edx.org to [email protected]".format(
proto=protocol, site=self.site_name
)
)
@ddt.data('http', 'https')
def test_enroll_with_email_not_registered_autoenroll(self, protocol):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
params = {'identifiers': self.notregistered_email, 'action': 'enroll', 'email_students': True, 'auto_enroll': True}
environ = {'wsgi.url_scheme': protocol}
response = self.client.get(url, params, **environ)
print "type(self.notregistered_email): {}".format(type(self.notregistered_email))
self.assertEqual(response.status_code, 200)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
'You have been invited to register for Robot Super Course'
)
self.assertEqual(
mail.outbox[0].body,
"Dear student,\n\nYou have been invited to join Robot Super Course at edx.org by a member of the course staff.\n\n"
"To finish your registration, please visit {proto}://{site}/register and fill out the registration form "
"making sure to use [email protected] in the E-mail field.\n"
"Once you have registered and activated your account, you will see Robot Super Course listed on your dashboard.\n\n----\n"
"This email was automatically sent from edx.org to [email protected]".format(
proto=protocol, site=self.site_name
)
)
def test_unenroll_without_email(self):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {'identifiers': self.enrolled_student.email, 'action': 'unenroll', 'email_students': False})
print "type(self.enrolled_student.email): {}".format(type(self.enrolled_student.email))
self.assertEqual(response.status_code, 200)
# test that the user is now unenrolled
user = User.objects.get(email=self.enrolled_student.email)
self.assertFalse(CourseEnrollment.is_enrolled(user, self.course.id))
# test the response data
expected = {
"action": "unenroll",
"auto_enroll": False,
"results": [
{
"identifier": self.enrolled_student.email,
"before": {
"enrollment": True,
"auto_enroll": False,
"user": True,
"allowed": False,
},
"after": {
"enrollment": False,
"auto_enroll": False,
"user": True,
"allowed": False,
}
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 0)
def test_unenroll_with_email(self):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {'identifiers': self.enrolled_student.email, 'action': 'unenroll', 'email_students': True})
print "type(self.enrolled_student.email): {}".format(type(self.enrolled_student.email))
self.assertEqual(response.status_code, 200)
# test that the user is now unenrolled
user = User.objects.get(email=self.enrolled_student.email)
self.assertFalse(CourseEnrollment.is_enrolled(user, self.course.id))
# test the response data
expected = {
"action": "unenroll",
"auto_enroll": False,
"results": [
{
"identifier": self.enrolled_student.email,
"before": {
"enrollment": True,
"auto_enroll": False,
"user": True,
"allowed": False,
},
"after": {
"enrollment": False,
"auto_enroll": False,
"user": True,
"allowed": False,
}
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
'You have been un-enrolled from Robot Super Course'
)
self.assertEqual(
mail.outbox[0].body,
"Dear Enrolled Student\n\nYou have been un-enrolled in Robot Super Course "
"at edx.org by a member of the course staff. "
"The course will no longer appear on your edx.org dashboard.\n\n"
"Your other courses have not been affected.\n\n----\n"
"This email was automatically sent from edx.org to Enrolled Student"
)
def test_unenroll_with_email_allowed_student(self):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {'identifiers': self.allowed_email, 'action': 'unenroll', 'email_students': True})
print "type(self.allowed_email): {}".format(type(self.allowed_email))
self.assertEqual(response.status_code, 200)
# test the response data
expected = {
"action": "unenroll",
"auto_enroll": False,
"results": [
{
"identifier": self.allowed_email,
"before": {
"enrollment": False,
"auto_enroll": False,
"user": False,
"allowed": True,
},
"after": {
"enrollment": False,
"auto_enroll": False,
"user": False,
"allowed": False,
}
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
'You have been un-enrolled from Robot Super Course'
)
self.assertEqual(
mail.outbox[0].body,
"Dear Student,\n\nYou have been un-enrolled from course Robot Super Course by a member of the course staff. "
"Please disregard the invitation previously sent.\n\n----\n"
"This email was automatically sent from edx.org to [email protected]"
)
@ddt.data('http', 'https')
@patch('instructor.enrollment.uses_shib')
def test_enroll_with_email_not_registered_with_shib(self, protocol, mock_uses_shib):
mock_uses_shib.return_value = True
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
params = {'identifiers': self.notregistered_email, 'action': 'enroll', 'email_students': True}
environ = {'wsgi.url_scheme': protocol}
response = self.client.get(url, params, **environ)
self.assertEqual(response.status_code, 200)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
'You have been invited to register for Robot Super Course'
)
self.assertEqual(
mail.outbox[0].body,
"Dear student,\n\nYou have been invited to join Robot Super Course at edx.org by a member of the course staff.\n\n"
"To access the course visit {proto}://{site}{about_path} and register for the course.\n\n----\n"
"This email was automatically sent from edx.org to [email protected]".format(
proto=protocol, site=self.site_name, about_path=self.about_path
)
)
@patch('instructor.enrollment.uses_shib')
@patch.dict(settings.FEATURES, {'ENABLE_MKTG_SITE': True})
def test_enroll_email_not_registered_shib_mktgsite(self, mock_uses_shib):
# Try with marketing site enabled and shib on
mock_uses_shib.return_value = True
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
# Try with marketing site enabled
with patch.dict('django.conf.settings.FEATURES', {'ENABLE_MKTG_SITE': True}):
response = self.client.get(url, {'identifiers': self.notregistered_email, 'action': 'enroll', 'email_students': True})
self.assertEqual(response.status_code, 200)
self.assertEqual(
mail.outbox[0].body,
"Dear student,\n\nYou have been invited to join Robot Super Course at edx.org by a member of the course staff.\n\n----\n"
"This email was automatically sent from edx.org to [email protected]"
)
@ddt.data('http', 'https')
@patch('instructor.enrollment.uses_shib')
def test_enroll_with_email_not_registered_with_shib_autoenroll(self, protocol, mock_uses_shib):
mock_uses_shib.return_value = True
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
params = {'identifiers': self.notregistered_email, 'action': 'enroll', 'email_students': True, 'auto_enroll': True}
environ = {'wsgi.url_scheme': protocol}
response = self.client.get(url, params, **environ)
print "type(self.notregistered_email): {}".format(type(self.notregistered_email))
self.assertEqual(response.status_code, 200)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
'You have been invited to register for Robot Super Course'
)
self.assertEqual(
mail.outbox[0].body,
"Dear student,\n\nYou have been invited to join Robot Super Course at edx.org by a member of the course staff.\n\n"
"To access the course visit {proto}://{site}{course_path} and login.\n\n----\n"
"This email was automatically sent from edx.org to [email protected]".format(
proto=protocol, site=self.site_name, course_path=self.course_path
)
)
@ddt.ddt
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class TestInstructorAPIBulkBetaEnrollment(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test bulk beta modify access endpoint.
"""
def setUp(self):
self.course = CourseFactory.create()
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
self.beta_tester = BetaTesterFactory(course_key=self.course.id)
CourseEnrollment.enroll(
self.beta_tester,
self.course.id
)
self.assertTrue(CourseBetaTesterRole(self.course.id).has_user(self.beta_tester))
self.notenrolled_student = UserFactory(username='NotEnrolledStudent')
self.notregistered_email = '[email protected]'
self.assertEqual(User.objects.filter(email=self.notregistered_email).count(), 0)
self.request = RequestFactory().request()
# Email URL values
self.site_name = microsite.get_value(
'SITE_NAME',
settings.SITE_NAME
)
self.about_path = '/courses/MITx/999/Robot_Super_Course/about'
self.course_path = '/courses/MITx/999/Robot_Super_Course/'
# uncomment to enable enable printing of large diffs
# from failed assertions in the event of a test failure.
# (comment because pylint C0103)
# self.maxDiff = None
def test_missing_params(self):
""" Test missing all query parameters. """
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url)
self.assertEqual(response.status_code, 400)
def test_bad_action(self):
""" Test with an invalid action. """
action = 'robot-not-an-action'
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {'identifiers': self.beta_tester.email, 'action': action})
self.assertEqual(response.status_code, 400)
def add_notenrolled(self, response, identifier):
"""
Test Helper Method (not a test, called by other tests)
Takes a client response from a call to bulk_beta_modify_access with 'email_students': False,
and the student identifier (email or username) given as 'identifiers' in the request.
Asserts the reponse returns cleanly, that the student was added as a beta tester, and the
response properly contains their identifier, 'error': False, and 'userDoesNotExist': False.
Additionally asserts no email was sent.
"""
self.assertEqual(response.status_code, 200)
self.assertTrue(CourseBetaTesterRole(self.course.id).has_user(self.notenrolled_student))
# test the response data
expected = {
"action": "add",
"results": [
{
"identifier": identifier,
"error": False,
"userDoesNotExist": False
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 0)
def test_add_notenrolled_email(self):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {'identifiers': self.notenrolled_student.email, 'action': 'add', 'email_students': False})
self.add_notenrolled(response, self.notenrolled_student.email)
self.assertFalse(CourseEnrollment.is_enrolled(self.notenrolled_student, self.course.id))
def test_add_notenrolled_email_autoenroll(self):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {'identifiers': self.notenrolled_student.email, 'action': 'add', 'email_students': False, 'auto_enroll': True})
self.add_notenrolled(response, self.notenrolled_student.email)
self.assertTrue(CourseEnrollment.is_enrolled(self.notenrolled_student, self.course.id))
def test_add_notenrolled_username(self):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {'identifiers': self.notenrolled_student.username, 'action': 'add', 'email_students': False})
self.add_notenrolled(response, self.notenrolled_student.username)
self.assertFalse(CourseEnrollment.is_enrolled(self.notenrolled_student, self.course.id))
def test_add_notenrolled_username_autoenroll(self):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {'identifiers': self.notenrolled_student.username, 'action': 'add', 'email_students': False, 'auto_enroll': True})
self.add_notenrolled(response, self.notenrolled_student.username)
self.assertTrue(CourseEnrollment.is_enrolled(self.notenrolled_student, self.course.id))
@ddt.data('http', 'https')
def test_add_notenrolled_with_email(self, protocol):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
params = {'identifiers': self.notenrolled_student.email, 'action': 'add', 'email_students': True}
environ = {'wsgi.url_scheme': protocol}
response = self.client.get(url, params, **environ)
self.assertEqual(response.status_code, 200)
self.assertTrue(CourseBetaTesterRole(self.course.id).has_user(self.notenrolled_student))
# test the response data
expected = {
"action": "add",
"results": [
{
"identifier": self.notenrolled_student.email,
"error": False,
"userDoesNotExist": False
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
'You have been invited to a beta test for Robot Super Course'
)
self.assertEqual(
mail.outbox[0].body,
u"Dear {student_name}\n\nYou have been invited to be a beta tester "
"for Robot Super Course at edx.org by a member of the course staff.\n\n"
"Visit {proto}://{site}{about_path} to join "
"the course and begin the beta test.\n\n----\n"
"This email was automatically sent from edx.org to {student_email}".format(
student_name=self.notenrolled_student.profile.name,
student_email=self.notenrolled_student.email,
proto=protocol,
site=self.site_name,
about_path=self.about_path
)
)
@ddt.data('http', 'https')
def test_add_notenrolled_with_email_autoenroll(self, protocol):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
params = {'identifiers': self.notenrolled_student.email, 'action': 'add', 'email_students': True, 'auto_enroll': True}
environ = {'wsgi.url_scheme': protocol}
response = self.client.get(url, params, **environ)
self.assertEqual(response.status_code, 200)
self.assertTrue(CourseBetaTesterRole(self.course.id).has_user(self.notenrolled_student))
# test the response data
expected = {
"action": "add",
"results": [
{
"identifier": self.notenrolled_student.email,
"error": False,
"userDoesNotExist": False
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
'You have been invited to a beta test for Robot Super Course'
)
self.assertEqual(
mail.outbox[0].body,
u"Dear {student_name}\n\nYou have been invited to be a beta tester "
"for Robot Super Course at edx.org by a member of the course staff.\n\n"
"To start accessing course materials, please visit "
"{proto}://{site}{course_path}\n\n----\n"
"This email was automatically sent from edx.org to {student_email}".format(
student_name=self.notenrolled_student.profile.name,
student_email=self.notenrolled_student.email,
proto=protocol,
site=self.site_name,
course_path=self.course_path
)
)
@patch.dict(settings.FEATURES, {'ENABLE_MKTG_SITE': True})
def test_add_notenrolled_email_mktgsite(self):
# Try with marketing site enabled
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {'identifiers': self.notenrolled_student.email, 'action': 'add', 'email_students': True})
self.assertEqual(response.status_code, 200)
self.assertEqual(
mail.outbox[0].body,
u"Dear {0}\n\nYou have been invited to be a beta tester "
"for Robot Super Course at edx.org by a member of the course staff.\n\n"
"Visit edx.org to enroll in the course and begin the beta test.\n\n----\n"
"This email was automatically sent from edx.org to {1}".format(
self.notenrolled_student.profile.name,
self.notenrolled_student.email,
)
)
def test_enroll_with_email_not_registered(self):
# User doesn't exist
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {'identifiers': self.notregistered_email, 'action': 'add', 'email_students': True})
self.assertEqual(response.status_code, 200)
# test the response data
expected = {
"action": "add",
"results": [
{
"identifier": self.notregistered_email,
"error": True,
"userDoesNotExist": True
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 0)
def test_remove_without_email(self):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {'identifiers': self.beta_tester.email, 'action': 'remove', 'email_students': False})
self.assertEqual(response.status_code, 200)
# Works around a caching bug which supposedly can't happen in prod. The instance here is not ==
# the instance fetched from the email above which had its cache cleared
if hasattr(self.beta_tester, '_roles'):
del self.beta_tester._roles
self.assertFalse(CourseBetaTesterRole(self.course.id).has_user(self.beta_tester))
# test the response data
expected = {
"action": "remove",
"results": [
{
"identifier": self.beta_tester.email,
"error": False,
"userDoesNotExist": False
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 0)
def test_remove_with_email(self):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {'identifiers': self.beta_tester.email, 'action': 'remove', 'email_students': True})
self.assertEqual(response.status_code, 200)
# Works around a caching bug which supposedly can't happen in prod. The instance here is not ==
# the instance fetched from the email above which had its cache cleared
if hasattr(self.beta_tester, '_roles'):
del self.beta_tester._roles
self.assertFalse(CourseBetaTesterRole(self.course.id).has_user(self.beta_tester))
# test the response data
expected = {
"action": "remove",
"results": [
{
"identifier": self.beta_tester.email,
"error": False,
"userDoesNotExist": False
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
'You have been removed from a beta test for Robot Super Course'
)
self.assertEqual(
mail.outbox[0].body,
"Dear {full_name}\n\nYou have been removed as a beta tester for "
"Robot Super Course at edx.org by a member of the course staff. "
"The course will remain on your dashboard, but you will no longer "
"be part of the beta testing group.\n\n"
"Your other courses have not been affected.\n\n----\n"
"This email was automatically sent from edx.org to {email_address}".format(
full_name=self.beta_tester.profile.name,
email_address=self.beta_tester.email
)
)
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class TestInstructorAPILevelsAccess(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test endpoints whereby instructors can change permissions
of other users.
This test does NOT test whether the actions had an effect on the
database, that is the job of test_access.
This tests the response and action switch.
Actually, modify_access does not have a very meaningful
response yet, so only the status code is tested.
"""
def setUp(self):
self.course = CourseFactory.create()
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
self.other_instructor = InstructorFactory(course_key=self.course.id)
self.other_staff = StaffFactory(course_key=self.course.id)
self.other_user = UserFactory()
def test_modify_access_noparams(self):
""" Test missing all query parameters. """
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url)
self.assertEqual(response.status_code, 400)
def test_modify_access_bad_action(self):
""" Test with an invalid action parameter. """
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'unique_student_identifier': self.other_staff.email,
'rolename': 'staff',
'action': 'robot-not-an-action',
})
self.assertEqual(response.status_code, 400)
def test_modify_access_bad_role(self):
""" Test with an invalid action parameter. """
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'unique_student_identifier': self.other_staff.email,
'rolename': 'robot-not-a-roll',
'action': 'revoke',
})
self.assertEqual(response.status_code, 400)
def test_modify_access_allow(self):
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'unique_student_identifier': self.other_user.email,
'rolename': 'staff',
'action': 'allow',
})
self.assertEqual(response.status_code, 200)
def test_modify_access_allow_with_uname(self):
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'unique_student_identifier': self.other_instructor.username,
'rolename': 'staff',
'action': 'allow',
})
self.assertEqual(response.status_code, 200)
def test_modify_access_revoke(self):
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'unique_student_identifier': self.other_staff.email,
'rolename': 'staff',
'action': 'revoke',
})
self.assertEqual(response.status_code, 200)
def test_modify_access_revoke_with_username(self):
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'unique_student_identifier': self.other_staff.username,
'rolename': 'staff',
'action': 'revoke',
})
self.assertEqual(response.status_code, 200)
def test_modify_access_with_fake_user(self):
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'unique_student_identifier': 'GandalfTheGrey',
'rolename': 'staff',
'action': 'revoke',
})
self.assertEqual(response.status_code, 200)
expected = {
'unique_student_identifier': 'GandalfTheGrey',
'userDoesNotExist': True,
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_modify_access_with_inactive_user(self):
self.other_user.is_active = False
self.other_user.save() # pylint: disable=no-member
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'unique_student_identifier': self.other_user.username,
'rolename': 'beta',
'action': 'allow',
})
self.assertEqual(response.status_code, 200)
expected = {
'unique_student_identifier': self.other_user.username,
'inactiveUser': True,
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_modify_access_revoke_not_allowed(self):
""" Test revoking access that a user does not have. """
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'unique_student_identifier': self.other_staff.email,
'rolename': 'instructor',
'action': 'revoke',
})
self.assertEqual(response.status_code, 200)
def test_modify_access_revoke_self(self):
"""
Test that an instructor cannot remove instructor privelages from themself.
"""
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'unique_student_identifier': self.instructor.email,
'rolename': 'instructor',
'action': 'revoke',
})
self.assertEqual(response.status_code, 200)
# check response content
expected = {
'unique_student_identifier': self.instructor.username,
'rolename': 'instructor',
'action': 'revoke',
'removingSelfAsInstructor': True,
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_list_course_role_members_noparams(self):
""" Test missing all query parameters. """
url = reverse('list_course_role_members', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url)
self.assertEqual(response.status_code, 400)
def test_list_course_role_members_bad_rolename(self):
""" Test with an invalid rolename parameter. """
url = reverse('list_course_role_members', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'rolename': 'robot-not-a-rolename',
})
self.assertEqual(response.status_code, 400)
def test_list_course_role_members_staff(self):
url = reverse('list_course_role_members', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'rolename': 'staff',
})
self.assertEqual(response.status_code, 200)
# check response content
expected = {
'course_id': self.course.id.to_deprecated_string(),
'staff': [
{
'username': self.other_staff.username,
'email': self.other_staff.email,
'first_name': self.other_staff.first_name,
'last_name': self.other_staff.last_name,
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_list_course_role_members_beta(self):
url = reverse('list_course_role_members', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'rolename': 'beta',
})
self.assertEqual(response.status_code, 200)
# check response content
expected = {
'course_id': self.course.id.to_deprecated_string(),
'beta': []
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_update_forum_role_membership(self):
"""
Test update forum role membership with user's email and username.
"""
# Seed forum roles for course.
seed_permissions_roles(self.course.id)
# Test add discussion admin with email.
self.assert_update_forum_role_membership(self.other_user.email, "Administrator", "allow")
# Test revoke discussion admin with email.
self.assert_update_forum_role_membership(self.other_user.email, "Administrator", "revoke")
# Test add discussion moderator with username.
self.assert_update_forum_role_membership(self.other_user.username, "Moderator", "allow")
# Test revoke discussion moderator with username.
self.assert_update_forum_role_membership(self.other_user.username, "Moderator", "revoke")
# Test add discussion community TA with email.
self.assert_update_forum_role_membership(self.other_user.email, "Community TA", "allow")
# Test revoke discussion community TA with username.
self.assert_update_forum_role_membership(self.other_user.username, "Community TA", "revoke")
def assert_update_forum_role_membership(self, unique_student_identifier, rolename, action):
"""
Test update forum role membership.
Get unique_student_identifier, rolename and action and update forum role.
"""
url = reverse('update_forum_role_membership', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(
url,
{
'unique_student_identifier': unique_student_identifier,
'rolename': rolename,
'action': action,
}
)
# Status code should be 200.
self.assertEqual(response.status_code, 200)
user_roles = self.other_user.roles.filter(course_id=self.course.id).values_list("name", flat=True)
if action == 'allow':
self.assertIn(rolename, user_roles)
elif action == 'revoke':
self.assertNotIn(rolename, user_roles)
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class TestInstructorAPILevelsDataDump(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test endpoints that show data without side effects.
"""
def setUp(self):
self.course = CourseFactory.create()
self.course_mode = CourseMode(course_id=self.course.id,
mode_slug="honor",
mode_display_name="honor cert",
min_price=40)
self.course_mode.save()
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
self.cart = Order.get_cart_for_user(self.instructor)
self.coupon_code = 'abcde'
self.coupon = Coupon(code=self.coupon_code, description='testing code', course_id=self.course.id,
percentage_discount=10, created_by=self.instructor, is_active=True)
self.coupon.save()
self.students = [UserFactory() for _ in xrange(6)]
for student in self.students:
CourseEnrollment.enroll(student, self.course.id)
def test_get_ecommerce_purchase_features_csv(self):
"""
Test that the response from get_purchase_transaction is in csv format.
"""
PaidCourseRegistration.add_to_order(self.cart, self.course.id)
self.cart.purchase(first='FirstNameTesting123', street1='StreetTesting123')
url = reverse('get_purchase_transaction', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url + '/csv', {})
self.assertEqual(response['Content-Type'], 'text/csv')
def test_get_ecommerce_purchase_features_with_coupon_info(self):
"""
Test that some minimum of information is formatted
correctly in the response to get_purchase_transaction.
"""
PaidCourseRegistration.add_to_order(self.cart, self.course.id)
url = reverse('get_purchase_transaction', kwargs={'course_id': self.course.id.to_deprecated_string()})
# using coupon code
resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': self.coupon_code})
self.assertEqual(resp.status_code, 200)
self.cart.purchase(first='FirstNameTesting123', street1='StreetTesting123')
response = self.client.get(url, {})
res_json = json.loads(response.content)
self.assertIn('students', res_json)
for res in res_json['students']:
self.validate_purchased_transaction_response(res, self.cart, self.instructor, self.coupon_code)
def test_get_ecommerce_purchases_features_without_coupon_info(self):
"""
Test that some minimum of information is formatted
correctly in the response to get_purchase_transaction.
"""
url = reverse('get_purchase_transaction', kwargs={'course_id': self.course.id.to_deprecated_string()})
carts, instructors = ([] for i in range(2))
# purchasing the course by different users
for _ in xrange(3):
test_instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=test_instructor.username, password='test')
cart = Order.get_cart_for_user(test_instructor)
carts.append(cart)
instructors.append(test_instructor)
PaidCourseRegistration.add_to_order(cart, self.course.id)
cart.purchase(first='FirstNameTesting123', street1='StreetTesting123')
response = self.client.get(url, {})
res_json = json.loads(response.content)
self.assertIn('students', res_json)
for res, i in zip(res_json['students'], xrange(3)):
self.validate_purchased_transaction_response(res, carts[i], instructors[i], 'None')
def validate_purchased_transaction_response(self, res, cart, user, code):
"""
validate purchased transactions attribute values with the response object
"""
item = cart.orderitem_set.all().select_subclasses()[0]
self.assertEqual(res['coupon_code'], code)
self.assertEqual(res['username'], user.username)
self.assertEqual(res['email'], user.email)
self.assertEqual(res['list_price'], item.list_price)
self.assertEqual(res['unit_cost'], item.unit_cost)
self.assertEqual(res['order_id'], cart.id)
self.assertEqual(res['orderitem_id'], item.id)
def test_get_students_features(self):
"""
Test that some minimum of information is formatted
correctly in the response to get_students_features.
"""
url = reverse('get_students_features', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {})
res_json = json.loads(response.content)
self.assertIn('students', res_json)
for student in self.students:
student_json = [
x for x in res_json['students']
if x['username'] == student.username
][0]
self.assertEqual(student_json['username'], student.username)
self.assertEqual(student_json['email'], student.email)
@patch.object(instructor.views.api, 'anonymous_id_for_user', Mock(return_value='42'))
@patch.object(instructor.views.api, 'unique_id_for_user', Mock(return_value='41'))
def test_get_anon_ids(self):
"""
Test the CSV output for the anonymized user ids.
"""
url = reverse('get_anon_ids', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {})
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith(
'"User ID","Anonymized User ID","Course Specific Anonymized User ID"'
'\n"2","41","42"\n'
))
self.assertTrue(body.endswith('"7","41","42"\n'))
def test_list_report_downloads(self):
url = reverse('list_report_downloads', kwargs={'course_id': self.course.id.to_deprecated_string()})
with patch('instructor_task.models.LocalFSReportStore.links_for') as mock_links_for:
mock_links_for.return_value = [
('mock_file_name_1', 'https://1.mock.url'),
('mock_file_name_2', 'https://2.mock.url'),
]
response = self.client.get(url, {})
expected_response = {
"downloads": [
{
"url": "https://1.mock.url",
"link": "<a href=\"https://1.mock.url\">mock_file_name_1</a>",
"name": "mock_file_name_1"
},
{
"url": "https://2.mock.url",
"link": "<a href=\"https://2.mock.url\">mock_file_name_2</a>",
"name": "mock_file_name_2"
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected_response)
def test_calculate_grades_csv_success(self):
url = reverse('calculate_grades_csv', kwargs={'course_id': self.course.id.to_deprecated_string()})
with patch('instructor_task.api.submit_calculate_grades_csv') as mock_cal_grades:
mock_cal_grades.return_value = True
response = self.client.get(url, {})
success_status = "Your grade report is being generated! You can view the status of the generation task in the 'Pending Instructor Tasks' section."
self.assertIn(success_status, response.content)
def test_calculate_grades_csv_already_running(self):
url = reverse('calculate_grades_csv', kwargs={'course_id': self.course.id.to_deprecated_string()})
with patch('instructor_task.api.submit_calculate_grades_csv') as mock_cal_grades:
mock_cal_grades.side_effect = AlreadyRunningError()
response = self.client.get(url, {})
already_running_status = "A grade report generation task is already in progress. Check the 'Pending Instructor Tasks' table for the status of the task. When completed, the report will be available for download in the table below."
self.assertIn(already_running_status, response.content)
def test_get_students_features_csv(self):
"""
Test that some minimum of information is formatted
correctly in the response to get_students_features.
"""
url = reverse('get_students_features', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url + '/csv', {})
self.assertEqual(response['Content-Type'], 'text/csv')
def test_get_distribution_no_feature(self):
"""
Test that get_distribution lists available features
when supplied no feature parameter.
"""
url = reverse('get_distribution', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
res_json = json.loads(response.content)
self.assertEqual(type(res_json['available_features']), list)
url = reverse('get_distribution', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url + u'?feature=')
self.assertEqual(response.status_code, 200)
res_json = json.loads(response.content)
self.assertEqual(type(res_json['available_features']), list)
def test_get_distribution_unavailable_feature(self):
"""
Test that get_distribution fails gracefully with
an unavailable feature.
"""
url = reverse('get_distribution', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {'feature': 'robot-not-a-real-feature'})
self.assertEqual(response.status_code, 400)
def test_get_distribution_gender(self):
"""
Test that get_distribution fails gracefully with
an unavailable feature.
"""
url = reverse('get_distribution', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {'feature': 'gender'})
self.assertEqual(response.status_code, 200)
res_json = json.loads(response.content)
self.assertEqual(res_json['feature_results']['data']['m'], 6)
self.assertEqual(res_json['feature_results']['choices_display_names']['m'], 'Male')
self.assertEqual(res_json['feature_results']['data']['no_data'], 0)
self.assertEqual(res_json['feature_results']['choices_display_names']['no_data'], 'No Data')
def test_get_student_progress_url(self):
""" Test that progress_url is in the successful response. """
url = reverse('get_student_progress_url', kwargs={'course_id': self.course.id.to_deprecated_string()})
url += "?unique_student_identifier={}".format(
quote(self.students[0].email.encode("utf-8"))
)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
res_json = json.loads(response.content)
self.assertIn('progress_url', res_json)
def test_get_student_progress_url_from_uname(self):
""" Test that progress_url is in the successful response. """
url = reverse('get_student_progress_url', kwargs={'course_id': self.course.id.to_deprecated_string()})
url += "?unique_student_identifier={}".format(
quote(self.students[0].username.encode("utf-8"))
)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
res_json = json.loads(response.content)
self.assertIn('progress_url', res_json)
def test_get_student_progress_url_noparams(self):
""" Test that the endpoint 404's without the required query params. """
url = reverse('get_student_progress_url', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url)
self.assertEqual(response.status_code, 400)
def test_get_student_progress_url_nostudent(self):
""" Test that the endpoint 400's when requesting an unknown email. """
url = reverse('get_student_progress_url', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url)
self.assertEqual(response.status_code, 400)
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class TestInstructorAPIRegradeTask(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test endpoints whereby instructors can change student grades.
This includes resetting attempts and starting rescore tasks.
This test does NOT test whether the actions had an effect on the
database, that is the job of task tests and test_enrollment.
"""
def setUp(self):
self.course = CourseFactory.create()
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
self.student = UserFactory()
CourseEnrollment.enroll(self.student, self.course.id)
self.problem_location = msk_from_problem_urlname(
self.course.id,
'robot-some-problem-urlname'
)
self.problem_urlname = self.problem_location.to_deprecated_string()
self.module_to_reset = StudentModule.objects.create(
student=self.student,
course_id=self.course.id,
module_state_key=self.problem_location,
state=json.dumps({'attempts': 10}),
)
def test_reset_student_attempts_deletall(self):
""" Make sure no one can delete all students state on a problem. """
url = reverse('reset_student_attempts', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'problem_to_reset': self.problem_urlname,
'all_students': True,
'delete_module': True,
})
self.assertEqual(response.status_code, 400)
def test_reset_student_attempts_single(self):
""" Test reset single student attempts. """
url = reverse('reset_student_attempts', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'problem_to_reset': self.problem_urlname,
'unique_student_identifier': self.student.email,
})
self.assertEqual(response.status_code, 200)
# make sure problem attempts have been reset.
changed_module = StudentModule.objects.get(pk=self.module_to_reset.pk)
self.assertEqual(
json.loads(changed_module.state)['attempts'],
0
)
# mock out the function which should be called to execute the action.
@patch.object(instructor_task.api, 'submit_reset_problem_attempts_for_all_students')
def test_reset_student_attempts_all(self, act):
""" Test reset all student attempts. """
url = reverse('reset_student_attempts', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'problem_to_reset': self.problem_urlname,
'all_students': True,
})
self.assertEqual(response.status_code, 200)
self.assertTrue(act.called)
def test_reset_student_attempts_missingmodule(self):
""" Test reset for non-existant problem. """
url = reverse('reset_student_attempts', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'problem_to_reset': 'robot-not-a-real-module',
'unique_student_identifier': self.student.email,
})
self.assertEqual(response.status_code, 400)
def test_reset_student_attempts_delete(self):
""" Test delete single student state. """
url = reverse('reset_student_attempts', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'problem_to_reset': self.problem_urlname,
'unique_student_identifier': self.student.email,
'delete_module': True,
})
self.assertEqual(response.status_code, 200)
# make sure the module has been deleted
self.assertEqual(
StudentModule.objects.filter(
student=self.module_to_reset.student,
course_id=self.module_to_reset.course_id,
# module_id=self.module_to_reset.module_id,
).count(),
0
)
def test_reset_student_attempts_nonsense(self):
""" Test failure with both unique_student_identifier and all_students. """
url = reverse('reset_student_attempts', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'problem_to_reset': self.problem_urlname,
'unique_student_identifier': self.student.email,
'all_students': True,
})
self.assertEqual(response.status_code, 400)
@patch.object(instructor_task.api, 'submit_rescore_problem_for_student')
def test_rescore_problem_single(self, act):
""" Test rescoring of a single student. """
url = reverse('rescore_problem', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'problem_to_reset': self.problem_urlname,
'unique_student_identifier': self.student.email,
})
self.assertEqual(response.status_code, 200)
self.assertTrue(act.called)
@patch.object(instructor_task.api, 'submit_rescore_problem_for_student')
def test_rescore_problem_single_from_uname(self, act):
""" Test rescoring of a single student. """
url = reverse('rescore_problem', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'problem_to_reset': self.problem_urlname,
'unique_student_identifier': self.student.username,
})
self.assertEqual(response.status_code, 200)
self.assertTrue(act.called)
@patch.object(instructor_task.api, 'submit_rescore_problem_for_all_students')
def test_rescore_problem_all(self, act):
""" Test rescoring for all students. """
url = reverse('rescore_problem', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'problem_to_reset': self.problem_urlname,
'all_students': True,
})
self.assertEqual(response.status_code, 200)
self.assertTrue(act.called)
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
@patch.dict(settings.FEATURES, {'ENABLE_INSTRUCTOR_EMAIL': True, 'REQUIRE_COURSE_EMAIL_AUTH': False})
class TestInstructorSendEmail(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Checks that only instructors have access to email endpoints, and that
these endpoints are only accessible with courses that actually exist,
only with valid email messages.
"""
def setUp(self):
self.course = CourseFactory.create()
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
test_subject = u'\u1234 test subject'
test_message = u'\u6824 test message'
self.full_test_message = {
'send_to': 'staff',
'subject': test_subject,
'message': test_message,
}
def test_send_email_as_logged_in_instructor(self):
url = reverse('send_email', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, self.full_test_message)
self.assertEqual(response.status_code, 200)
def test_send_email_but_not_logged_in(self):
self.client.logout()
url = reverse('send_email', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, self.full_test_message)
self.assertEqual(response.status_code, 403)
def test_send_email_but_not_staff(self):
self.client.logout()
student = UserFactory()
self.client.login(username=student.username, password='test')
url = reverse('send_email', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, self.full_test_message)
self.assertEqual(response.status_code, 403)
def test_send_email_but_course_not_exist(self):
url = reverse('send_email', kwargs={'course_id': 'GarbageCourse/DNE/NoTerm'})
response = self.client.post(url, self.full_test_message)
self.assertNotEqual(response.status_code, 200)
def test_send_email_no_sendto(self):
url = reverse('send_email', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {
'subject': 'test subject',
'message': 'test message',
})
self.assertEqual(response.status_code, 400)
def test_send_email_no_subject(self):
url = reverse('send_email', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {
'send_to': 'staff',
'message': 'test message',
})
self.assertEqual(response.status_code, 400)
def test_send_email_no_message(self):
url = reverse('send_email', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {
'send_to': 'staff',
'subject': 'test subject',
})
self.assertEqual(response.status_code, 400)
class MockCompletionInfo(object):
"""Mock for get_task_completion_info"""
times_called = 0
def mock_get_task_completion_info(self, *args): # pylint: disable=unused-argument
"""Mock for get_task_completion_info"""
self.times_called += 1
if self.times_called % 2 == 0:
return True, 'Task Completed'
return False, 'Task Errored In Some Way'
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class TestInstructorAPITaskLists(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test instructor task list endpoint.
"""
class FakeTask(object):
""" Fake task object """
FEATURES = [
'task_type',
'task_input',
'task_id',
'requester',
'task_state',
'created',
'status',
'task_message',
'duration_sec'
]
def __init__(self, completion):
for feature in self.FEATURES:
setattr(self, feature, 'expected')
# created needs to be a datetime
self.created = datetime.datetime(2013, 10, 25, 11, 42, 35)
# set 'status' and 'task_message' attrs
success, task_message = completion()
if success:
self.status = "Complete"
else:
self.status = "Incomplete"
self.task_message = task_message
# Set 'task_output' attr, which will be parsed to the 'duration_sec' attr.
self.task_output = '{"duration_ms": 1035000}'
self.duration_sec = 1035000 / 1000.0
def make_invalid_output(self):
"""Munge task_output to be invalid json"""
self.task_output = 'HI MY NAME IS INVALID JSON'
# This should be given the value of 'unknown' if the task output
# can't be properly parsed
self.duration_sec = 'unknown'
def to_dict(self):
""" Convert fake task to dictionary representation. """
attr_dict = {key: getattr(self, key) for key in self.FEATURES}
attr_dict['created'] = attr_dict['created'].isoformat()
return attr_dict
def setUp(self):
self.course = CourseFactory.create()
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
self.student = UserFactory()
CourseEnrollment.enroll(self.student, self.course.id)
self.problem_location = msk_from_problem_urlname(
self.course.id,
'robot-some-problem-urlname'
)
self.problem_urlname = self.problem_location.to_deprecated_string()
self.module = StudentModule.objects.create(
student=self.student,
course_id=self.course.id,
module_state_key=self.problem_location,
state=json.dumps({'attempts': 10}),
)
mock_factory = MockCompletionInfo()
self.tasks = [self.FakeTask(mock_factory.mock_get_task_completion_info) for _ in xrange(7)]
self.tasks[-1].make_invalid_output()
def tearDown(self):
"""
Undo all patches.
"""
patch.stopall()
@patch.object(instructor_task.api, 'get_running_instructor_tasks')
def test_list_instructor_tasks_running(self, act):
""" Test list of all running tasks. """
act.return_value = self.tasks
url = reverse('list_instructor_tasks', kwargs={'course_id': self.course.id.to_deprecated_string()})
mock_factory = MockCompletionInfo()
with patch('instructor.views.instructor_task_helpers.get_task_completion_info') as mock_completion_info:
mock_completion_info.side_effect = mock_factory.mock_get_task_completion_info
response = self.client.get(url, {})
self.assertEqual(response.status_code, 200)
# check response
self.assertTrue(act.called)
expected_tasks = [ftask.to_dict() for ftask in self.tasks]
actual_tasks = json.loads(response.content)['tasks']
for exp_task, act_task in zip(expected_tasks, actual_tasks):
self.assertDictEqual(exp_task, act_task)
self.assertEqual(actual_tasks, expected_tasks)
@patch.object(instructor_task.api, 'get_instructor_task_history')
def test_list_background_email_tasks(self, act):
"""Test list of background email tasks."""
act.return_value = self.tasks
url = reverse('list_background_email_tasks', kwargs={'course_id': self.course.id.to_deprecated_string()})
mock_factory = MockCompletionInfo()
with patch('instructor.views.instructor_task_helpers.get_task_completion_info') as mock_completion_info:
mock_completion_info.side_effect = mock_factory.mock_get_task_completion_info
response = self.client.get(url, {})
self.assertEqual(response.status_code, 200)
# check response
self.assertTrue(act.called)
expected_tasks = [ftask.to_dict() for ftask in self.tasks]
actual_tasks = json.loads(response.content)['tasks']
for exp_task, act_task in zip(expected_tasks, actual_tasks):
self.assertDictEqual(exp_task, act_task)
self.assertEqual(actual_tasks, expected_tasks)
@patch.object(instructor_task.api, 'get_instructor_task_history')
def test_list_instructor_tasks_problem(self, act):
""" Test list task history for problem. """
act.return_value = self.tasks
url = reverse('list_instructor_tasks', kwargs={'course_id': self.course.id.to_deprecated_string()})
mock_factory = MockCompletionInfo()
with patch('instructor.views.instructor_task_helpers.get_task_completion_info') as mock_completion_info:
mock_completion_info.side_effect = mock_factory.mock_get_task_completion_info
response = self.client.get(url, {
'problem_location_str': self.problem_urlname,
})
self.assertEqual(response.status_code, 200)
# check response
self.assertTrue(act.called)
expected_tasks = [ftask.to_dict() for ftask in self.tasks]
actual_tasks = json.loads(response.content)['tasks']
for exp_task, act_task in zip(expected_tasks, actual_tasks):
self.assertDictEqual(exp_task, act_task)
self.assertEqual(actual_tasks, expected_tasks)
@patch.object(instructor_task.api, 'get_instructor_task_history')
def test_list_instructor_tasks_problem_student(self, act):
""" Test list task history for problem AND student. """
act.return_value = self.tasks
url = reverse('list_instructor_tasks', kwargs={'course_id': self.course.id.to_deprecated_string()})
mock_factory = MockCompletionInfo()
with patch('instructor.views.instructor_task_helpers.get_task_completion_info') as mock_completion_info:
mock_completion_info.side_effect = mock_factory.mock_get_task_completion_info
response = self.client.get(url, {
'problem_location_str': self.problem_urlname,
'unique_student_identifier': self.student.email,
})
self.assertEqual(response.status_code, 200)
# check response
self.assertTrue(act.called)
expected_tasks = [ftask.to_dict() for ftask in self.tasks]
actual_tasks = json.loads(response.content)['tasks']
for exp_task, act_task in zip(expected_tasks, actual_tasks):
self.assertDictEqual(exp_task, act_task)
self.assertEqual(actual_tasks, expected_tasks)
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
@patch.object(instructor_task.api, 'get_instructor_task_history')
class TestInstructorEmailContentList(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test the instructor email content history endpoint.
"""
def setUp(self):
self.course = CourseFactory.create()
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
self.tasks = {}
self.emails = {}
self.emails_info = {}
def tearDown(self):
"""
Undo all patches.
"""
patch.stopall()
def setup_fake_email_info(self, num_emails):
""" Initialize the specified number of fake emails """
for email_id in range(num_emails):
num_sent = random.randint(1, 15401)
self.tasks[email_id] = FakeContentTask(email_id, num_sent, 'expected')
self.emails[email_id] = FakeEmail(email_id)
self.emails_info[email_id] = FakeEmailInfo(self.emails[email_id], num_sent)
def get_matching_mock_email(self, **kwargs):
""" Returns the matching mock emails for the given id """
email_id = kwargs.get('id', 0)
return self.emails[email_id]
def get_email_content_response(self, num_emails, task_history_request):
""" Calls the list_email_content endpoint and returns the repsonse """
self.setup_fake_email_info(num_emails)
task_history_request.return_value = self.tasks.values()
url = reverse('list_email_content', kwargs={'course_id': self.course.id.to_deprecated_string()})
with patch('instructor.views.api.CourseEmail.objects.get') as mock_email_info:
mock_email_info.side_effect = self.get_matching_mock_email
response = self.client.get(url, {})
self.assertEqual(response.status_code, 200)
return response
def test_content_list_one_email(self, task_history_request):
""" Test listing of bulk emails when email list has one email """
response = self.get_email_content_response(1, task_history_request)
self.assertTrue(task_history_request.called)
email_info = json.loads(response.content)['emails']
# Emails list should have one email
self.assertEqual(len(email_info), 1)
# Email content should be what's expected
expected_message = self.emails[0].html_message
returned_email_info = email_info[0]
received_message = returned_email_info[u'email'][u'html_message']
self.assertEqual(expected_message, received_message)
def test_content_list_no_emails(self, task_history_request):
""" Test listing of bulk emails when email list empty """
response = self.get_email_content_response(0, task_history_request)
self.assertTrue(task_history_request.called)
email_info = json.loads(response.content)['emails']
# Emails list should be empty
self.assertEqual(len(email_info), 0)
def test_content_list_email_content_many(self, task_history_request):
""" Test listing of bulk emails sent large amount of emails """
response = self.get_email_content_response(50, task_history_request)
self.assertTrue(task_history_request.called)
expected_email_info = [email_info.to_dict() for email_info in self.emails_info.values()]
actual_email_info = json.loads(response.content)['emails']
self.assertEqual(len(actual_email_info), 50)
for exp_email, act_email in zip(expected_email_info, actual_email_info):
self.assertDictEqual(exp_email, act_email)
self.assertEqual(actual_email_info, expected_email_info)
def test_list_email_content_error(self, task_history_request):
""" Test handling of error retrieving email """
invalid_task = FakeContentTask(0, 0, 'test')
invalid_task.make_invalid_input()
task_history_request.return_value = [invalid_task]
url = reverse('list_email_content', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {})
self.assertEqual(response.status_code, 200)
self.assertTrue(task_history_request.called)
returned_email_info = json.loads(response.content)['emails']
self.assertEqual(len(returned_email_info), 1)
returned_info = returned_email_info[0]
for info in ['created', 'sent_to', 'email', 'number_sent']:
self.assertEqual(returned_info[info], None)
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
@override_settings(ANALYTICS_SERVER_URL="http://robotanalyticsserver.netbot:900/")
@override_settings(ANALYTICS_API_KEY="robot_api_key")
class TestInstructorAPIAnalyticsProxy(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test instructor analytics proxy endpoint.
"""
class FakeProxyResponse(object):
""" Fake successful requests response object. """
def __init__(self):
self.status_code = requests.status_codes.codes.OK
self.content = '{"test_content": "robot test content"}'
class FakeBadProxyResponse(object):
""" Fake strange-failed requests response object. """
def __init__(self):
self.status_code = 'notok.'
self.content = '{"test_content": "robot test content"}'
def setUp(self):
self.course = CourseFactory.create()
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
@patch.object(instructor.views.api.requests, 'get')
def test_analytics_proxy_url(self, act):
""" Test legacy analytics proxy url generation. """
act.return_value = self.FakeProxyResponse()
url = reverse('proxy_legacy_analytics', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'aname': 'ProblemGradeDistribution'
})
self.assertEqual(response.status_code, 200)
# check request url
expected_url = "{url}get?aname={aname}&course_id={course_id!s}&apikey={api_key}".format(
url="http://robotanalyticsserver.netbot:900/",
aname="ProblemGradeDistribution",
course_id=self.course.id.to_deprecated_string(),
api_key="robot_api_key",
)
act.assert_called_once_with(expected_url)
@patch.object(instructor.views.api.requests, 'get')
def test_analytics_proxy(self, act):
"""
Test legacy analytics content proxyin, actg.
"""
act.return_value = self.FakeProxyResponse()
url = reverse('proxy_legacy_analytics', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'aname': 'ProblemGradeDistribution'
})
self.assertEqual(response.status_code, 200)
# check response
self.assertTrue(act.called)
expected_res = {'test_content': "robot test content"}
self.assertEqual(json.loads(response.content), expected_res)
@patch.object(instructor.views.api.requests, 'get')
def test_analytics_proxy_reqfailed(self, act):
""" Test proxy when server reponds with failure. """
act.return_value = self.FakeBadProxyResponse()
url = reverse('proxy_legacy_analytics', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'aname': 'ProblemGradeDistribution'
})
self.assertEqual(response.status_code, 500)
@patch.object(instructor.views.api.requests, 'get')
def test_analytics_proxy_missing_param(self, act):
""" Test proxy when missing the aname query parameter. """
act.return_value = self.FakeProxyResponse()
url = reverse('proxy_legacy_analytics', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {})
self.assertEqual(response.status_code, 400)
self.assertFalse(act.called)
class TestInstructorAPIHelpers(TestCase):
""" Test helpers for instructor.api """
def test_split_input_list(self):
strings = []
lists = []
strings.append("[email protected], [email protected]\[email protected]\r [email protected]\r, [email protected]")
lists.append(['[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]'])
for (stng, lst) in zip(strings, lists):
self.assertEqual(_split_input_list(stng), lst)
def test_split_input_list_unicode(self):
self.assertEqual(_split_input_list('[email protected], [email protected]'), ['[email protected]', '[email protected]'])
self.assertEqual(_split_input_list(u'[email protected], [email protected]'), ['[email protected]', '[email protected]'])
self.assertEqual(_split_input_list(u'[email protected], [email protected]'), [u'[email protected]', '[email protected]'])
scary_unistuff = unichr(40960) + u'abcd' + unichr(1972)
self.assertEqual(_split_input_list(scary_unistuff), [scary_unistuff])
def test_msk_from_problem_urlname(self):
course_id = SlashSeparatedCourseKey('MITx', '6.002x', '2013_Spring')
name = 'L2Node1'
output = 'i4x://MITx/6.002x/problem/L2Node1'
self.assertEqual(msk_from_problem_urlname(course_id, name).to_deprecated_string(), output)
@raises(ValueError)
def test_msk_from_problem_urlname_error(self):
args = ('notagoodcourse', 'L2Node1')
msk_from_problem_urlname(*args)
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class TestDueDateExtensions(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test data dumps for reporting.
"""
def setUp(self):
"""
Fixtures.
"""
due = datetime.datetime(2010, 5, 12, 2, 42, tzinfo=utc)
course = CourseFactory.create()
week1 = ItemFactory.create(due=due)
week2 = ItemFactory.create(due=due)
week3 = ItemFactory.create(due=due)
course.children = [week1.location.to_deprecated_string(), week2.location.to_deprecated_string(),
week3.location.to_deprecated_string()]
homework = ItemFactory.create(
parent_location=week1.location,
due=due
)
week1.children = [homework.location.to_deprecated_string()]
user1 = UserFactory.create()
StudentModule(
state='{}',
student_id=user1.id,
course_id=course.id,
module_state_key=week1.location).save()
StudentModule(
state='{}',
student_id=user1.id,
course_id=course.id,
module_state_key=week2.location).save()
StudentModule(
state='{}',
student_id=user1.id,
course_id=course.id,
module_state_key=week3.location).save()
StudentModule(
state='{}',
student_id=user1.id,
course_id=course.id,
module_state_key=homework.location).save()
user2 = UserFactory.create()
StudentModule(
state='{}',
student_id=user2.id,
course_id=course.id,
module_state_key=week1.location).save()
StudentModule(
state='{}',
student_id=user2.id,
course_id=course.id,
module_state_key=homework.location).save()
user3 = UserFactory.create()
StudentModule(
state='{}',
student_id=user3.id,
course_id=course.id,
module_state_key=week1.location).save()
StudentModule(
state='{}',
student_id=user3.id,
course_id=course.id,
module_state_key=homework.location).save()
self.course = course
self.week1 = week1
self.homework = homework
self.week2 = week2
self.user1 = user1
self.user2 = user2
self.instructor = InstructorFactory(course_key=course.id)
self.client.login(username=self.instructor.username, password='test')
def test_change_due_date(self):
url = reverse('change_due_date', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'student': self.user1.username,
'url': self.week1.location.to_deprecated_string(),
'due_datetime': '12/30/2013 00:00'
})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(datetime.datetime(2013, 12, 30, 0, 0, tzinfo=utc),
get_extended_due(self.course, self.week1, self.user1))
def test_reset_date(self):
self.test_change_due_date()
url = reverse('reset_due_date', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'student': self.user1.username,
'url': self.week1.location.to_deprecated_string(),
})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(None,
get_extended_due(self.course, self.week1, self.user1))
def test_show_unit_extensions(self):
self.test_change_due_date()
url = reverse('show_unit_extensions',
kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {'url': self.week1.location.to_deprecated_string()})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(json.loads(response.content), {
u'data': [{u'Extended Due Date': u'2013-12-30 00:00',
u'Full Name': self.user1.profile.name,
u'Username': self.user1.username}],
u'header': [u'Username', u'Full Name', u'Extended Due Date'],
u'title': u'Users with due date extensions for %s' %
self.week1.display_name})
def test_show_student_extensions(self):
self.test_change_due_date()
url = reverse('show_student_extensions',
kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {'student': self.user1.username})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(json.loads(response.content), {
u'data': [{u'Extended Due Date': u'2013-12-30 00:00',
u'Unit': self.week1.display_name}],
u'header': [u'Unit', u'Extended Due Date'],
u'title': u'Due date extensions for %s (%s)' % (
self.user1.profile.name, self.user1.username)})
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
@override_settings(REGISTRATION_CODE_LENGTH=8)
class TestCourseRegistrationCodes(ModuleStoreTestCase):
"""
Test data dumps for E-commerce Course Registration Codes.
"""
def setUp(self):
"""
Fixtures.
"""
self.course = CourseFactory.create()
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
# Active Registration Codes
for i in range(12):
course_registration_code = CourseRegistrationCode(
code='MyCode0{}'.format(i), course_id=self.course.id.to_deprecated_string(),
transaction_group_name='Test Group', created_by=self.instructor
)
course_registration_code.save()
for i in range(5):
order = Order(user=self.instructor, status='purchased')
order.save()
# Spent(used) Registration Codes
for i in range(5):
i += 1
registration_code_redemption = RegistrationCodeRedemption(
order_id=i, registration_code_id=i, redeemed_by=self.instructor
)
registration_code_redemption.save()
def test_generate_course_registration_codes_csv(self):
"""
Test to generate a response of all the generated course registration codes
"""
url = reverse('generate_registration_codes',
kwargs={'course_id': self.course.id.to_deprecated_string()})
data = {'course_registration_code_number': 15.0, 'transaction_group_name': 'Test Group'}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith('"code","course_id","transaction_group_name","created_by","redeemed_by"'))
self.assertEqual(len(body.split('\n')), 17)
@patch.object(instructor.views.api, 'random_code_generator', Mock(side_effect=['first', 'second', 'third', 'fourth']))
def test_generate_course_registration_codes_matching_existing_coupon_code(self):
"""
Test the generated course registration code is already in the Coupon Table
"""
url = reverse('generate_registration_codes',
kwargs={'course_id': self.course.id.to_deprecated_string()})
coupon = Coupon(code='first', course_id=self.course.id.to_deprecated_string(), created_by=self.instructor)
coupon.save()
data = {'course_registration_code_number': 3, 'transaction_group_name': 'Test Group'}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith('"code","course_id","transaction_group_name","created_by","redeemed_by"'))
self.assertEqual(len(body.split('\n')), 5) # 1 for headers, 1 for new line at the end and 3 for the actual data
@patch.object(instructor.views.api, 'random_code_generator', Mock(side_effect=['first', 'first', 'second', 'third']))
def test_generate_course_registration_codes_integrity_error(self):
"""
Test for the Integrity error against the generated code
"""
url = reverse('generate_registration_codes',
kwargs={'course_id': self.course.id.to_deprecated_string()})
data = {'course_registration_code_number': 2, 'transaction_group_name': 'Test Group'}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith('"code","course_id","transaction_group_name","created_by","redeemed_by"'))
self.assertEqual(len(body.split('\n')), 4)
def test_spent_course_registration_codes_csv(self):
"""
Test to generate a response of all the spent course registration codes
"""
url = reverse('spent_registration_codes',
kwargs={'course_id': self.course.id.to_deprecated_string()})
data = {'spent_transaction_group_name': ''}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith('"code","course_id","transaction_group_name","created_by","redeemed_by"'))
self.assertEqual(len(body.split('\n')), 7)
for i in range(9):
course_registration_code = CourseRegistrationCode(
code='TestCode{}'.format(i), course_id=self.course.id.to_deprecated_string(),
transaction_group_name='Group Alpha', created_by=self.instructor
)
course_registration_code.save()
for i in range(9):
order = Order(user=self.instructor, status='purchased')
order.save()
# Spent(used) Registration Codes
for i in range(9):
i += 13
registration_code_redemption = RegistrationCodeRedemption(
order_id=i, registration_code_id=i, redeemed_by=self.instructor
)
registration_code_redemption.save()
data = {'spent_transaction_group_name': 'Group Alpha'}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith('"code","course_id","transaction_group_name","created_by","redeemed_by"'))
self.assertEqual(len(body.split('\n')), 11)
def test_active_course_registration_codes_csv(self):
"""
Test to generate a response of all the active course registration codes
"""
url = reverse('active_registration_codes',
kwargs={'course_id': self.course.id.to_deprecated_string()})
data = {'active_transaction_group_name': ''}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith('"code","course_id","transaction_group_name","created_by","redeemed_by"'))
self.assertEqual(len(body.split('\n')), 9)
for i in range(9):
course_registration_code = CourseRegistrationCode(
code='TestCode{}'.format(i), course_id=self.course.id.to_deprecated_string(),
transaction_group_name='Group Alpha', created_by=self.instructor
)
course_registration_code.save()
data = {'active_transaction_group_name': 'Group Alpha'}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith('"code","course_id","transaction_group_name","created_by","redeemed_by"'))
self.assertEqual(len(body.split('\n')), 11)
def test_get_all_course_registration_codes_csv(self):
"""
Test to generate a response of all the course registration codes
"""
url = reverse('get_registration_codes',
kwargs={'course_id': self.course.id.to_deprecated_string()})
data = {'download_transaction_group_name': ''}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith('"code","course_id","transaction_group_name","created_by","redeemed_by"'))
self.assertEqual(len(body.split('\n')), 14)
for i in range(9):
course_registration_code = CourseRegistrationCode(
code='TestCode{}'.format(i), course_id=self.course.id.to_deprecated_string(),
transaction_group_name='Group Alpha', created_by=self.instructor
)
course_registration_code.save()
data = {'download_transaction_group_name': 'Group Alpha'}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith('"code","course_id","transaction_group_name","created_by","redeemed_by"'))
self.assertEqual(len(body.split('\n')), 11)
|
TwilioDevEd/api-snippets
|
refs/heads/master
|
rest/usage-triggers/list-post-example-1/list-post-example-1.6.x.py
|
1
|
# Download the Python helper library from twilio.com/docs/python/install
import os
from twilio.rest import Client
# Your Account Sid and Auth Token from twilio.com/user/account
# To set up environmental variables, see http://twil.io/secure
account_sid = os.environ['TWILIO_ACCOUNT_SID']
auth_token = os.environ['TWILIO_AUTH_TOKEN']
client = Client(account_sid, auth_token)
trigger = client.usage.triggers.create(
trigger_value="1000",
usage_category="sms",
callback_url="http://www.example.com/"
)
print(trigger.sid)
|
amith01994/intellij-community
|
refs/heads/master
|
python/testData/copyPaste/singleLine/Indent11.dst.py
|
747
|
class C:
def foo(self):
<caret>y = 2
|
ArcherSys/ArcherSys
|
refs/heads/master
|
skulpt/test/run/t230.py
|
1
|
def f(n):
for i in range(n):
yield i
g = f(5)
print g.next()
print g.next()
print g.next()
print g.next()
|
pcn/graphite-web
|
refs/heads/add_kairosdb_support
|
webapp/graphite/metrics/urls.py
|
8
|
"""Copyright 2009 Chris Davis
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
from django.conf.urls.defaults import *
urlpatterns = patterns('graphite.metrics.views',
('^index\.json$', 'index_json'),
('^search/?$', 'search_view'),
('^find/?$', 'find_view'),
('^expand/?$', 'expand_view'),
('^get-metadata/?$', 'get_metadata_view'),
('^set-metadata/?$', 'set_metadata_view'),
('', 'find_view'),
)
|
alsrgv/tensorflow
|
refs/heads/master
|
tensorflow/python/ops/distributions/normal.py
|
8
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Normal (Gaussian) distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.ops.distributions import kullback_leibler
from tensorflow.python.ops.distributions import special_math
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
__all__ = [
"Normal",
"NormalWithSoftplusScale",
]
@tf_export(v1=["distributions.Normal"])
class Normal(distribution.Distribution):
"""The Normal distribution with location `loc` and `scale` parameters.
#### Mathematical details
The probability density function (pdf) is,
```none
pdf(x; mu, sigma) = exp(-0.5 (x - mu)**2 / sigma**2) / Z
Z = (2 pi sigma**2)**0.5
```
where `loc = mu` is the mean, `scale = sigma` is the std. deviation, and, `Z`
is the normalization constant.
The Normal distribution is a member of the [location-scale family](
https://en.wikipedia.org/wiki/Location-scale_family), i.e., it can be
constructed as,
```none
X ~ Normal(loc=0, scale=1)
Y = loc + scale * X
```
#### Examples
Examples of initialization of one or a batch of distributions.
```python
import tensorflow_probability as tfp
tfd = tfp.distributions
# Define a single scalar Normal distribution.
dist = tfd.Normal(loc=0., scale=3.)
# Evaluate the cdf at 1, returning a scalar.
dist.cdf(1.)
# Define a batch of two scalar valued Normals.
# The first has mean 1 and standard deviation 11, the second 2 and 22.
dist = tfd.Normal(loc=[1, 2.], scale=[11, 22.])
# Evaluate the pdf of the first distribution on 0, and the second on 1.5,
# returning a length two tensor.
dist.prob([0, 1.5])
# Get 3 samples, returning a 3 x 2 tensor.
dist.sample([3])
```
Arguments are broadcast when possible.
```python
# Define a batch of two scalar valued Normals.
# Both have mean 1, but different standard deviations.
dist = tfd.Normal(loc=1., scale=[11, 22.])
# Evaluate the pdf of both distributions on the same point, 3.0,
# returning a length 2 tensor.
dist.prob(3.0)
```
"""
@deprecation.deprecated(
"2019-01-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.distributions`.",
warn_once=True)
def __init__(self,
loc,
scale,
validate_args=False,
allow_nan_stats=True,
name="Normal"):
"""Construct Normal distributions with mean and stddev `loc` and `scale`.
The parameters `loc` and `scale` must be shaped in a way that supports
broadcasting (e.g. `loc + scale` is a valid operation).
Args:
loc: Floating point tensor; the means of the distribution(s).
scale: Floating point tensor; the stddevs of the distribution(s).
Must contain only positive values.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`,
statistics (e.g., mean, mode, variance) use the value "`NaN`" to
indicate the result is undefined. When `False`, an exception is raised
if one or more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
TypeError: if `loc` and `scale` have different `dtype`.
"""
parameters = dict(locals())
with ops.name_scope(name, values=[loc, scale]) as name:
with ops.control_dependencies([check_ops.assert_positive(scale)] if
validate_args else []):
self._loc = array_ops.identity(loc, name="loc")
self._scale = array_ops.identity(scale, name="scale")
check_ops.assert_same_float_dtype([self._loc, self._scale])
super(Normal, self).__init__(
dtype=self._scale.dtype,
reparameterization_type=distribution.FULLY_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=[self._loc, self._scale],
name=name)
@staticmethod
def _param_shapes(sample_shape):
return dict(
zip(("loc", "scale"), ([ops.convert_to_tensor(
sample_shape, dtype=dtypes.int32)] * 2)))
@property
def loc(self):
"""Distribution parameter for the mean."""
return self._loc
@property
def scale(self):
"""Distribution parameter for standard deviation."""
return self._scale
def _batch_shape_tensor(self):
return array_ops.broadcast_dynamic_shape(
array_ops.shape(self.loc),
array_ops.shape(self.scale))
def _batch_shape(self):
return array_ops.broadcast_static_shape(
self.loc.get_shape(),
self.scale.get_shape())
def _event_shape_tensor(self):
return constant_op.constant([], dtype=dtypes.int32)
def _event_shape(self):
return tensor_shape.scalar()
def _sample_n(self, n, seed=None):
shape = array_ops.concat([[n], self.batch_shape_tensor()], 0)
sampled = random_ops.random_normal(
shape=shape, mean=0., stddev=1., dtype=self.loc.dtype, seed=seed)
return sampled * self.scale + self.loc
def _log_prob(self, x):
return self._log_unnormalized_prob(x) - self._log_normalization()
def _log_cdf(self, x):
return special_math.log_ndtr(self._z(x))
def _cdf(self, x):
return special_math.ndtr(self._z(x))
def _log_survival_function(self, x):
return special_math.log_ndtr(-self._z(x))
def _survival_function(self, x):
return special_math.ndtr(-self._z(x))
def _log_unnormalized_prob(self, x):
return -0.5 * math_ops.square(self._z(x))
def _log_normalization(self):
return 0.5 * math.log(2. * math.pi) + math_ops.log(self.scale)
def _entropy(self):
# Use broadcasting rules to calculate the full broadcast scale.
scale = self.scale * array_ops.ones_like(self.loc)
return 0.5 * math.log(2. * math.pi * math.e) + math_ops.log(scale)
def _mean(self):
return self.loc * array_ops.ones_like(self.scale)
def _quantile(self, p):
return self._inv_z(special_math.ndtri(p))
def _stddev(self):
return self.scale * array_ops.ones_like(self.loc)
def _mode(self):
return self._mean()
def _z(self, x):
"""Standardize input `x` to a unit normal."""
with ops.name_scope("standardize", values=[x]):
return (x - self.loc) / self.scale
def _inv_z(self, z):
"""Reconstruct input `x` from a its normalized version."""
with ops.name_scope("reconstruct", values=[z]):
return z * self.scale + self.loc
class NormalWithSoftplusScale(Normal):
"""Normal with softplus applied to `scale`."""
@deprecation.deprecated(
"2019-01-01",
"Use `tfd.Normal(loc, tf.nn.softplus(scale)) "
"instead.",
warn_once=True)
def __init__(self,
loc,
scale,
validate_args=False,
allow_nan_stats=True,
name="NormalWithSoftplusScale"):
parameters = dict(locals())
with ops.name_scope(name, values=[scale]) as name:
super(NormalWithSoftplusScale, self).__init__(
loc=loc,
scale=nn.softplus(scale, name="softplus_scale"),
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=name)
self._parameters = parameters
@kullback_leibler.RegisterKL(Normal, Normal)
def _kl_normal_normal(n_a, n_b, name=None):
"""Calculate the batched KL divergence KL(n_a || n_b) with n_a and n_b Normal.
Args:
n_a: instance of a Normal distribution object.
n_b: instance of a Normal distribution object.
name: (optional) Name to use for created operations.
default is "kl_normal_normal".
Returns:
Batchwise KL(n_a || n_b)
"""
with ops.name_scope(name, "kl_normal_normal", [n_a.loc, n_b.loc]):
one = constant_op.constant(1, dtype=n_a.dtype)
two = constant_op.constant(2, dtype=n_a.dtype)
half = constant_op.constant(0.5, dtype=n_a.dtype)
s_a_squared = math_ops.square(n_a.scale)
s_b_squared = math_ops.square(n_b.scale)
ratio = s_a_squared / s_b_squared
return (math_ops.squared_difference(n_a.loc, n_b.loc) / (two * s_b_squared)
+ half * (ratio - one - math_ops.log(ratio)))
|
aboutsajjad/Bridge
|
refs/heads/master
|
app_packages/werkzeug/wrappers.py
|
84
|
# -*- coding: utf-8 -*-
"""
werkzeug.wrappers
~~~~~~~~~~~~~~~~~
The wrappers are simple request and response objects which you can
subclass to do whatever you want them to do. The request object contains
the information transmitted by the client (webbrowser) and the response
object contains all the information sent back to the browser.
An important detail is that the request object is created with the WSGI
environ and will act as high-level proxy whereas the response object is an
actual WSGI application.
Like everything else in Werkzeug these objects will work correctly with
unicode data. Incoming form data parsed by the response object will be
decoded into an unicode object if possible and if it makes sense.
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from functools import update_wrapper
from datetime import datetime, timedelta
from werkzeug.http import HTTP_STATUS_CODES, \
parse_accept_header, parse_cache_control_header, parse_etags, \
parse_date, generate_etag, is_resource_modified, unquote_etag, \
quote_etag, parse_set_header, parse_authorization_header, \
parse_www_authenticate_header, remove_entity_headers, \
parse_options_header, dump_options_header, http_date, \
parse_if_range_header, parse_cookie, dump_cookie, \
parse_range_header, parse_content_range_header, dump_header
from werkzeug.urls import url_decode, iri_to_uri, url_join
from werkzeug.formparser import FormDataParser, default_stream_factory
from werkzeug.utils import cached_property, environ_property, \
header_property, get_content_type
from werkzeug.wsgi import get_current_url, get_host, \
ClosingIterator, get_input_stream, get_content_length, _RangeWrapper
from werkzeug.datastructures import MultiDict, CombinedMultiDict, Headers, \
EnvironHeaders, ImmutableMultiDict, ImmutableTypeConversionDict, \
ImmutableList, MIMEAccept, CharsetAccept, LanguageAccept, \
ResponseCacheControl, RequestCacheControl, CallbackDict, \
ContentRange, iter_multi_items
from werkzeug._internal import _get_environ
from werkzeug._compat import to_bytes, string_types, text_type, \
integer_types, wsgi_decoding_dance, wsgi_get_bytes, \
to_unicode, to_native, BytesIO
def _run_wsgi_app(*args):
"""This function replaces itself to ensure that the test module is not
imported unless required. DO NOT USE!
"""
global _run_wsgi_app
from werkzeug.test import run_wsgi_app as _run_wsgi_app
return _run_wsgi_app(*args)
def _warn_if_string(iterable):
"""Helper for the response objects to check if the iterable returned
to the WSGI server is not a string.
"""
if isinstance(iterable, string_types):
from warnings import warn
warn(Warning('response iterable was set to a string. This appears '
'to work but means that the server will send the '
'data to the client char, by char. This is almost '
'never intended behavior, use response.data to assign '
'strings to the response object.'), stacklevel=2)
def _assert_not_shallow(request):
if request.shallow:
raise RuntimeError('A shallow request tried to consume '
'form data. If you really want to do '
'that, set `shallow` to False.')
def _iter_encoded(iterable, charset):
for item in iterable:
if isinstance(item, text_type):
yield item.encode(charset)
else:
yield item
def _clean_accept_ranges(accept_ranges):
if accept_ranges is True:
return "bytes"
elif accept_ranges is False:
return "none"
elif isinstance(accept_ranges, text_type):
return to_native(accept_ranges)
raise ValueError("Invalid accept_ranges value")
class BaseRequest(object):
"""Very basic request object. This does not implement advanced stuff like
entity tag parsing or cache controls. The request object is created with
the WSGI environment as first argument and will add itself to the WSGI
environment as ``'werkzeug.request'`` unless it's created with
`populate_request` set to False.
There are a couple of mixins available that add additional functionality
to the request object, there is also a class called `Request` which
subclasses `BaseRequest` and all the important mixins.
It's a good idea to create a custom subclass of the :class:`BaseRequest`
and add missing functionality either via mixins or direct implementation.
Here an example for such subclasses::
from werkzeug.wrappers import BaseRequest, ETagRequestMixin
class Request(BaseRequest, ETagRequestMixin):
pass
Request objects are **read only**. As of 0.5 modifications are not
allowed in any place. Unlike the lower level parsing functions the
request object will use immutable objects everywhere possible.
Per default the request object will assume all the text data is `utf-8`
encoded. Please refer to `the unicode chapter <unicode.txt>`_ for more
details about customizing the behavior.
Per default the request object will be added to the WSGI
environment as `werkzeug.request` to support the debugging system.
If you don't want that, set `populate_request` to `False`.
If `shallow` is `True` the environment is initialized as shallow
object around the environ. Every operation that would modify the
environ in any way (such as consuming form data) raises an exception
unless the `shallow` attribute is explicitly set to `False`. This
is useful for middlewares where you don't want to consume the form
data by accident. A shallow request is not populated to the WSGI
environment.
.. versionchanged:: 0.5
read-only mode was enforced by using immutables classes for all
data.
"""
#: the charset for the request, defaults to utf-8
charset = 'utf-8'
#: the error handling procedure for errors, defaults to 'replace'
encoding_errors = 'replace'
#: the maximum content length. This is forwarded to the form data
#: parsing function (:func:`parse_form_data`). When set and the
#: :attr:`form` or :attr:`files` attribute is accessed and the
#: parsing fails because more than the specified value is transmitted
#: a :exc:`~werkzeug.exceptions.RequestEntityTooLarge` exception is raised.
#:
#: Have a look at :ref:`dealing-with-request-data` for more details.
#:
#: .. versionadded:: 0.5
max_content_length = None
#: the maximum form field size. This is forwarded to the form data
#: parsing function (:func:`parse_form_data`). When set and the
#: :attr:`form` or :attr:`files` attribute is accessed and the
#: data in memory for post data is longer than the specified value a
#: :exc:`~werkzeug.exceptions.RequestEntityTooLarge` exception is raised.
#:
#: Have a look at :ref:`dealing-with-request-data` for more details.
#:
#: .. versionadded:: 0.5
max_form_memory_size = None
#: the class to use for `args` and `form`. The default is an
#: :class:`~werkzeug.datastructures.ImmutableMultiDict` which supports
#: multiple values per key. alternatively it makes sense to use an
#: :class:`~werkzeug.datastructures.ImmutableOrderedMultiDict` which
#: preserves order or a :class:`~werkzeug.datastructures.ImmutableDict`
#: which is the fastest but only remembers the last key. It is also
#: possible to use mutable structures, but this is not recommended.
#:
#: .. versionadded:: 0.6
parameter_storage_class = ImmutableMultiDict
#: the type to be used for list values from the incoming WSGI environment.
#: By default an :class:`~werkzeug.datastructures.ImmutableList` is used
#: (for example for :attr:`access_list`).
#:
#: .. versionadded:: 0.6
list_storage_class = ImmutableList
#: the type to be used for dict values from the incoming WSGI environment.
#: By default an
#: :class:`~werkzeug.datastructures.ImmutableTypeConversionDict` is used
#: (for example for :attr:`cookies`).
#:
#: .. versionadded:: 0.6
dict_storage_class = ImmutableTypeConversionDict
#: The form data parser that shoud be used. Can be replaced to customize
#: the form date parsing.
form_data_parser_class = FormDataParser
#: Optionally a list of hosts that is trusted by this request. By default
#: all hosts are trusted which means that whatever the client sends the
#: host is will be accepted.
#:
#: This is the recommended setup as a webserver should manually be set up
#: to only route correct hosts to the application, and remove the
#: `X-Forwarded-Host` header if it is not being used (see
#: :func:`werkzeug.wsgi.get_host`).
#:
#: .. versionadded:: 0.9
trusted_hosts = None
#: Indicates whether the data descriptor should be allowed to read and
#: buffer up the input stream. By default it's enabled.
#:
#: .. versionadded:: 0.9
disable_data_descriptor = False
def __init__(self, environ, populate_request=True, shallow=False):
self.environ = environ
if populate_request and not shallow:
self.environ['werkzeug.request'] = self
self.shallow = shallow
def __repr__(self):
# make sure the __repr__ even works if the request was created
# from an invalid WSGI environment. If we display the request
# in a debug session we don't want the repr to blow up.
args = []
try:
args.append("'%s'" % to_native(self.url, self.url_charset))
args.append('[%s]' % self.method)
except Exception:
args.append('(invalid WSGI environ)')
return '<%s %s>' % (
self.__class__.__name__,
' '.join(args)
)
@property
def url_charset(self):
"""The charset that is assumed for URLs. Defaults to the value
of :attr:`charset`.
.. versionadded:: 0.6
"""
return self.charset
@classmethod
def from_values(cls, *args, **kwargs):
"""Create a new request object based on the values provided. If
environ is given missing values are filled from there. This method is
useful for small scripts when you need to simulate a request from an URL.
Do not use this method for unittesting, there is a full featured client
object (:class:`Client`) that allows to create multipart requests,
support for cookies etc.
This accepts the same options as the
:class:`~werkzeug.test.EnvironBuilder`.
.. versionchanged:: 0.5
This method now accepts the same arguments as
:class:`~werkzeug.test.EnvironBuilder`. Because of this the
`environ` parameter is now called `environ_overrides`.
:return: request object
"""
from werkzeug.test import EnvironBuilder
charset = kwargs.pop('charset', cls.charset)
kwargs['charset'] = charset
builder = EnvironBuilder(*args, **kwargs)
try:
return builder.get_request(cls)
finally:
builder.close()
@classmethod
def application(cls, f):
"""Decorate a function as responder that accepts the request as first
argument. This works like the :func:`responder` decorator but the
function is passed the request object as first argument and the
request object will be closed automatically::
@Request.application
def my_wsgi_app(request):
return Response('Hello World!')
:param f: the WSGI callable to decorate
:return: a new WSGI callable
"""
#: return a callable that wraps the -2nd argument with the request
#: and calls the function with all the arguments up to that one and
#: the request. The return value is then called with the latest
#: two arguments. This makes it possible to use this decorator for
#: both methods and standalone WSGI functions.
def application(*args):
request = cls(args[-2])
with request:
return f(*args[:-2] + (request,))(*args[-2:])
return update_wrapper(application, f)
def _get_file_stream(self, total_content_length, content_type, filename=None,
content_length=None):
"""Called to get a stream for the file upload.
This must provide a file-like class with `read()`, `readline()`
and `seek()` methods that is both writeable and readable.
The default implementation returns a temporary file if the total
content length is higher than 500KB. Because many browsers do not
provide a content length for the files only the total content
length matters.
:param total_content_length: the total content length of all the
data in the request combined. This value
is guaranteed to be there.
:param content_type: the mimetype of the uploaded file.
:param filename: the filename of the uploaded file. May be `None`.
:param content_length: the length of this file. This value is usually
not provided because webbrowsers do not provide
this value.
"""
return default_stream_factory(total_content_length, content_type,
filename, content_length)
@property
def want_form_data_parsed(self):
"""Returns True if the request method carries content. As of
Werkzeug 0.9 this will be the case if a content type is transmitted.
.. versionadded:: 0.8
"""
return bool(self.environ.get('CONTENT_TYPE'))
def make_form_data_parser(self):
"""Creates the form data parser. Instanciates the
:attr:`form_data_parser_class` with some parameters.
.. versionadded:: 0.8
"""
return self.form_data_parser_class(self._get_file_stream,
self.charset,
self.encoding_errors,
self.max_form_memory_size,
self.max_content_length,
self.parameter_storage_class)
def _load_form_data(self):
"""Method used internally to retrieve submitted data. After calling
this sets `form` and `files` on the request object to multi dicts
filled with the incoming form data. As a matter of fact the input
stream will be empty afterwards. You can also call this method to
force the parsing of the form data.
.. versionadded:: 0.8
"""
# abort early if we have already consumed the stream
if 'form' in self.__dict__:
return
_assert_not_shallow(self)
if self.want_form_data_parsed:
content_type = self.environ.get('CONTENT_TYPE', '')
content_length = get_content_length(self.environ)
mimetype, options = parse_options_header(content_type)
parser = self.make_form_data_parser()
data = parser.parse(self._get_stream_for_parsing(),
mimetype, content_length, options)
else:
data = (self.stream, self.parameter_storage_class(),
self.parameter_storage_class())
# inject the values into the instance dict so that we bypass
# our cached_property non-data descriptor.
d = self.__dict__
d['stream'], d['form'], d['files'] = data
def _get_stream_for_parsing(self):
"""This is the same as accessing :attr:`stream` with the difference
that if it finds cached data from calling :meth:`get_data` first it
will create a new stream out of the cached data.
.. versionadded:: 0.9.3
"""
cached_data = getattr(self, '_cached_data', None)
if cached_data is not None:
return BytesIO(cached_data)
return self.stream
def close(self):
"""Closes associated resources of this request object. This
closes all file handles explicitly. You can also use the request
object in a with statement which will automatically close it.
.. versionadded:: 0.9
"""
files = self.__dict__.get('files')
for key, value in iter_multi_items(files or ()):
value.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
self.close()
@cached_property
def stream(self):
"""
If the incoming form data was not encoded with a known mimetype
the data is stored unmodified in this stream for consumption. Most
of the time it is a better idea to use :attr:`data` which will give
you that data as a string. The stream only returns the data once.
Unlike :attr:`input_stream` this stream is properly guarded that you
can't accidentally read past the length of the input. Werkzeug will
internally always refer to this stream to read data which makes it
possible to wrap this object with a stream that does filtering.
.. versionchanged:: 0.9
This stream is now always available but might be consumed by the
form parser later on. Previously the stream was only set if no
parsing happened.
"""
_assert_not_shallow(self)
return get_input_stream(self.environ)
input_stream = environ_property('wsgi.input', """
The WSGI input stream.
In general it's a bad idea to use this one because you can easily read past
the boundary. Use the :attr:`stream` instead.
""")
@cached_property
def args(self):
"""The parsed URL parameters (the part in the URL after the question
mark).
By default an
:class:`~werkzeug.datastructures.ImmutableMultiDict`
is returned from this function. This can be changed by setting
:attr:`parameter_storage_class` to a different type. This might
be necessary if the order of the form data is important.
"""
return url_decode(wsgi_get_bytes(self.environ.get('QUERY_STRING', '')),
self.url_charset, errors=self.encoding_errors,
cls=self.parameter_storage_class)
@cached_property
def data(self):
"""
Contains the incoming request data as string in case it came with
a mimetype Werkzeug does not handle.
"""
if self.disable_data_descriptor:
raise AttributeError('data descriptor is disabled')
# XXX: this should eventually be deprecated.
# We trigger form data parsing first which means that the descriptor
# will not cache the data that would otherwise be .form or .files
# data. This restores the behavior that was there in Werkzeug
# before 0.9. New code should use :meth:`get_data` explicitly as
# this will make behavior explicit.
return self.get_data(parse_form_data=True)
def get_data(self, cache=True, as_text=False, parse_form_data=False):
"""This reads the buffered incoming data from the client into one
bytestring. By default this is cached but that behavior can be
changed by setting `cache` to `False`.
Usually it's a bad idea to call this method without checking the
content length first as a client could send dozens of megabytes or more
to cause memory problems on the server.
Note that if the form data was already parsed this method will not
return anything as form data parsing does not cache the data like
this method does. To implicitly invoke form data parsing function
set `parse_form_data` to `True`. When this is done the return value
of this method will be an empty string if the form parser handles
the data. This generally is not necessary as if the whole data is
cached (which is the default) the form parser will used the cached
data to parse the form data. Please be generally aware of checking
the content length first in any case before calling this method
to avoid exhausting server memory.
If `as_text` is set to `True` the return value will be a decoded
unicode string.
.. versionadded:: 0.9
"""
rv = getattr(self, '_cached_data', None)
if rv is None:
if parse_form_data:
self._load_form_data()
rv = self.stream.read()
if cache:
self._cached_data = rv
if as_text:
rv = rv.decode(self.charset, self.encoding_errors)
return rv
@cached_property
def form(self):
"""The form parameters. By default an
:class:`~werkzeug.datastructures.ImmutableMultiDict`
is returned from this function. This can be changed by setting
:attr:`parameter_storage_class` to a different type. This might
be necessary if the order of the form data is important.
Please keep in mind that file uploads will not end up here, but instead
in the :attr:`files` attribute.
.. versionchanged:: 0.9
Previous to Werkzeug 0.9 this would only contain form data for POST
and PUT requests.
"""
self._load_form_data()
return self.form
@cached_property
def values(self):
"""A :class:`werkzeug.datastructures.CombinedMultiDict` that combines
:attr:`args` and :attr:`form`."""
args = []
for d in self.args, self.form:
if not isinstance(d, MultiDict):
d = MultiDict(d)
args.append(d)
return CombinedMultiDict(args)
@cached_property
def files(self):
""":class:`~werkzeug.datastructures.MultiDict` object containing
all uploaded files. Each key in :attr:`files` is the name from the
``<input type="file" name="">``. Each value in :attr:`files` is a
Werkzeug :class:`~werkzeug.datastructures.FileStorage` object.
It basically behaves like a standard file object you know from Python,
with the difference that it also has a
:meth:`~werkzeug.datastructures.FileStorage.save` function that can
store the file on the filesystem.
Note that :attr:`files` will only contain data if the request method was
POST, PUT or PATCH and the ``<form>`` that posted to the request had
``enctype="multipart/form-data"``. It will be empty otherwise.
See the :class:`~werkzeug.datastructures.MultiDict` /
:class:`~werkzeug.datastructures.FileStorage` documentation for
more details about the used data structure.
"""
self._load_form_data()
return self.files
@cached_property
def cookies(self):
"""A :class:`dict` with the contents of all cookies transmitted with
the request."""
return parse_cookie(self.environ, self.charset,
self.encoding_errors,
cls=self.dict_storage_class)
@cached_property
def headers(self):
"""The headers from the WSGI environ as immutable
:class:`~werkzeug.datastructures.EnvironHeaders`.
"""
return EnvironHeaders(self.environ)
@cached_property
def path(self):
"""Requested path as unicode. This works a bit like the regular path
info in the WSGI environment but will always include a leading slash,
even if the URL root is accessed.
"""
raw_path = wsgi_decoding_dance(self.environ.get('PATH_INFO') or '',
self.charset, self.encoding_errors)
return '/' + raw_path.lstrip('/')
@cached_property
def full_path(self):
"""Requested path as unicode, including the query string."""
return self.path + u'?' + to_unicode(self.query_string, self.url_charset)
@cached_property
def script_root(self):
"""The root path of the script without the trailing slash."""
raw_path = wsgi_decoding_dance(self.environ.get('SCRIPT_NAME') or '',
self.charset, self.encoding_errors)
return raw_path.rstrip('/')
@cached_property
def url(self):
"""The reconstructed current URL as IRI.
See also: :attr:`trusted_hosts`.
"""
return get_current_url(self.environ,
trusted_hosts=self.trusted_hosts)
@cached_property
def base_url(self):
"""Like :attr:`url` but without the querystring
See also: :attr:`trusted_hosts`.
"""
return get_current_url(self.environ, strip_querystring=True,
trusted_hosts=self.trusted_hosts)
@cached_property
def url_root(self):
"""The full URL root (with hostname), this is the application
root as IRI.
See also: :attr:`trusted_hosts`.
"""
return get_current_url(self.environ, True,
trusted_hosts=self.trusted_hosts)
@cached_property
def host_url(self):
"""Just the host with scheme as IRI.
See also: :attr:`trusted_hosts`.
"""
return get_current_url(self.environ, host_only=True,
trusted_hosts=self.trusted_hosts)
@cached_property
def host(self):
"""Just the host including the port if available.
See also: :attr:`trusted_hosts`.
"""
return get_host(self.environ, trusted_hosts=self.trusted_hosts)
query_string = environ_property(
'QUERY_STRING', '', read_only=True,
load_func=wsgi_get_bytes, doc='The URL parameters as raw bytestring.')
method = environ_property(
'REQUEST_METHOD', 'GET', read_only=True,
load_func=lambda x: x.upper(),
doc="The request method. (For example ``'GET'`` or ``'POST'``).")
@cached_property
def access_route(self):
"""If a forwarded header exists this is a list of all ip addresses
from the client ip to the last proxy server.
"""
if 'HTTP_X_FORWARDED_FOR' in self.environ:
addr = self.environ['HTTP_X_FORWARDED_FOR'].split(',')
return self.list_storage_class([x.strip() for x in addr])
elif 'REMOTE_ADDR' in self.environ:
return self.list_storage_class([self.environ['REMOTE_ADDR']])
return self.list_storage_class()
@property
def remote_addr(self):
"""The remote address of the client."""
return self.environ.get('REMOTE_ADDR')
remote_user = environ_property('REMOTE_USER', doc='''
If the server supports user authentication, and the script is
protected, this attribute contains the username the user has
authenticated as.''')
scheme = environ_property('wsgi.url_scheme', doc='''
URL scheme (http or https).
.. versionadded:: 0.7''')
is_xhr = property(lambda x: x.environ.get('HTTP_X_REQUESTED_WITH', '')
.lower() == 'xmlhttprequest', doc='''
True if the request was triggered via a JavaScript XMLHttpRequest.
This only works with libraries that support the `X-Requested-With`
header and set it to "XMLHttpRequest". Libraries that do that are
prototype, jQuery and Mochikit and probably some more.''')
is_secure = property(lambda x: x.environ['wsgi.url_scheme'] == 'https',
doc='`True` if the request is secure.')
is_multithread = environ_property('wsgi.multithread', doc='''
boolean that is `True` if the application is served by
a multithreaded WSGI server.''')
is_multiprocess = environ_property('wsgi.multiprocess', doc='''
boolean that is `True` if the application is served by
a WSGI server that spawns multiple processes.''')
is_run_once = environ_property('wsgi.run_once', doc='''
boolean that is `True` if the application will be executed only
once in a process lifetime. This is the case for CGI for example,
but it's not guaranteed that the execution only happens one time.''')
class BaseResponse(object):
"""Base response class. The most important fact about a response object
is that it's a regular WSGI application. It's initialized with a couple
of response parameters (headers, body, status code etc.) and will start a
valid WSGI response when called with the environ and start response
callable.
Because it's a WSGI application itself processing usually ends before the
actual response is sent to the server. This helps debugging systems
because they can catch all the exceptions before responses are started.
Here a small example WSGI application that takes advantage of the
response objects::
from werkzeug.wrappers import BaseResponse as Response
def index():
return Response('Index page')
def application(environ, start_response):
path = environ.get('PATH_INFO') or '/'
if path == '/':
response = index()
else:
response = Response('Not Found', status=404)
return response(environ, start_response)
Like :class:`BaseRequest` which object is lacking a lot of functionality
implemented in mixins. This gives you a better control about the actual
API of your response objects, so you can create subclasses and add custom
functionality. A full featured response object is available as
:class:`Response` which implements a couple of useful mixins.
To enforce a new type of already existing responses you can use the
:meth:`force_type` method. This is useful if you're working with different
subclasses of response objects and you want to post process them with a
known interface.
Per default the response object will assume all the text data is `utf-8`
encoded. Please refer to `the unicode chapter <unicode.txt>`_ for more
details about customizing the behavior.
Response can be any kind of iterable or string. If it's a string it's
considered being an iterable with one item which is the string passed.
Headers can be a list of tuples or a
:class:`~werkzeug.datastructures.Headers` object.
Special note for `mimetype` and `content_type`: For most mime types
`mimetype` and `content_type` work the same, the difference affects
only 'text' mimetypes. If the mimetype passed with `mimetype` is a
mimetype starting with `text/`, the charset parameter of the response
object is appended to it. In contrast the `content_type` parameter is
always added as header unmodified.
.. versionchanged:: 0.5
the `direct_passthrough` parameter was added.
:param response: a string or response iterable.
:param status: a string with a status or an integer with the status code.
:param headers: a list of headers or a
:class:`~werkzeug.datastructures.Headers` object.
:param mimetype: the mimetype for the response. See notice above.
:param content_type: the content type for the response. See notice above.
:param direct_passthrough: if set to `True` :meth:`iter_encoded` is not
called before iteration which makes it
possible to pass special iterators through
unchanged (see :func:`wrap_file` for more
details.)
"""
#: the charset of the response.
charset = 'utf-8'
#: the default status if none is provided.
default_status = 200
#: the default mimetype if none is provided.
default_mimetype = 'text/plain'
#: if set to `False` accessing properties on the response object will
#: not try to consume the response iterator and convert it into a list.
#:
#: .. versionadded:: 0.6.2
#:
#: That attribute was previously called `implicit_seqence_conversion`.
#: (Notice the typo). If you did use this feature, you have to adapt
#: your code to the name change.
implicit_sequence_conversion = True
#: Should this response object correct the location header to be RFC
#: conformant? This is true by default.
#:
#: .. versionadded:: 0.8
autocorrect_location_header = True
#: Should this response object automatically set the content-length
#: header if possible? This is true by default.
#:
#: .. versionadded:: 0.8
automatically_set_content_length = True
def __init__(self, response=None, status=None, headers=None,
mimetype=None, content_type=None, direct_passthrough=False):
if isinstance(headers, Headers):
self.headers = headers
elif not headers:
self.headers = Headers()
else:
self.headers = Headers(headers)
if content_type is None:
if mimetype is None and 'content-type' not in self.headers:
mimetype = self.default_mimetype
if mimetype is not None:
mimetype = get_content_type(mimetype, self.charset)
content_type = mimetype
if content_type is not None:
self.headers['Content-Type'] = content_type
if status is None:
status = self.default_status
if isinstance(status, integer_types):
self.status_code = status
else:
self.status = status
self.direct_passthrough = direct_passthrough
self._on_close = []
# we set the response after the headers so that if a class changes
# the charset attribute, the data is set in the correct charset.
if response is None:
self.response = []
elif isinstance(response, (text_type, bytes, bytearray)):
self.set_data(response)
else:
self.response = response
def call_on_close(self, func):
"""Adds a function to the internal list of functions that should
be called as part of closing down the response. Since 0.7 this
function also returns the function that was passed so that this
can be used as a decorator.
.. versionadded:: 0.6
"""
self._on_close.append(func)
return func
def __repr__(self):
if self.is_sequence:
body_info = '%d bytes' % sum(map(len, self.iter_encoded()))
else:
body_info = 'streamed' if self.is_streamed else 'likely-streamed'
return '<%s %s [%s]>' % (
self.__class__.__name__,
body_info,
self.status
)
@classmethod
def force_type(cls, response, environ=None):
"""Enforce that the WSGI response is a response object of the current
type. Werkzeug will use the :class:`BaseResponse` internally in many
situations like the exceptions. If you call :meth:`get_response` on an
exception you will get back a regular :class:`BaseResponse` object, even
if you are using a custom subclass.
This method can enforce a given response type, and it will also
convert arbitrary WSGI callables into response objects if an environ
is provided::
# convert a Werkzeug response object into an instance of the
# MyResponseClass subclass.
response = MyResponseClass.force_type(response)
# convert any WSGI application into a response object
response = MyResponseClass.force_type(response, environ)
This is especially useful if you want to post-process responses in
the main dispatcher and use functionality provided by your subclass.
Keep in mind that this will modify response objects in place if
possible!
:param response: a response object or wsgi application.
:param environ: a WSGI environment object.
:return: a response object.
"""
if not isinstance(response, BaseResponse):
if environ is None:
raise TypeError('cannot convert WSGI application into '
'response objects without an environ')
response = BaseResponse(*_run_wsgi_app(response, environ))
response.__class__ = cls
return response
@classmethod
def from_app(cls, app, environ, buffered=False):
"""Create a new response object from an application output. This
works best if you pass it an application that returns a generator all
the time. Sometimes applications may use the `write()` callable
returned by the `start_response` function. This tries to resolve such
edge cases automatically. But if you don't get the expected output
you should set `buffered` to `True` which enforces buffering.
:param app: the WSGI application to execute.
:param environ: the WSGI environment to execute against.
:param buffered: set to `True` to enforce buffering.
:return: a response object.
"""
return cls(*_run_wsgi_app(app, environ, buffered))
def _get_status_code(self):
return self._status_code
def _set_status_code(self, code):
self._status_code = code
try:
self._status = '%d %s' % (code, HTTP_STATUS_CODES[code].upper())
except KeyError:
self._status = '%d UNKNOWN' % code
status_code = property(_get_status_code, _set_status_code,
doc='The HTTP Status code as number')
del _get_status_code, _set_status_code
def _get_status(self):
return self._status
def _set_status(self, value):
self._status = to_native(value)
try:
self._status_code = int(self._status.split(None, 1)[0])
except ValueError:
self._status_code = 0
self._status = '0 %s' % self._status
status = property(_get_status, _set_status, doc='The HTTP Status code')
del _get_status, _set_status
def get_data(self, as_text=False):
"""The string representation of the request body. Whenever you call
this property the request iterable is encoded and flattened. This
can lead to unwanted behavior if you stream big data.
This behavior can be disabled by setting
:attr:`implicit_sequence_conversion` to `False`.
If `as_text` is set to `True` the return value will be a decoded
unicode string.
.. versionadded:: 0.9
"""
self._ensure_sequence()
rv = b''.join(self.iter_encoded())
if as_text:
rv = rv.decode(self.charset)
return rv
def set_data(self, value):
"""Sets a new string as response. The value set must either by a
unicode or bytestring. If a unicode string is set it's encoded
automatically to the charset of the response (utf-8 by default).
.. versionadded:: 0.9
"""
# if an unicode string is set, it's encoded directly so that we
# can set the content length
if isinstance(value, text_type):
value = value.encode(self.charset)
else:
value = bytes(value)
self.response = [value]
if self.automatically_set_content_length:
self.headers['Content-Length'] = str(len(value))
data = property(get_data, set_data, doc='''
A descriptor that calls :meth:`get_data` and :meth:`set_data`. This
should not be used and will eventually get deprecated.
''')
def calculate_content_length(self):
"""Returns the content length if available or `None` otherwise."""
try:
self._ensure_sequence()
except RuntimeError:
return None
return sum(len(x) for x in self.response)
def _ensure_sequence(self, mutable=False):
"""This method can be called by methods that need a sequence. If
`mutable` is true, it will also ensure that the response sequence
is a standard Python list.
.. versionadded:: 0.6
"""
if self.is_sequence:
# if we need a mutable object, we ensure it's a list.
if mutable and not isinstance(self.response, list):
self.response = list(self.response)
return
if self.direct_passthrough:
raise RuntimeError('Attempted implicit sequence conversion '
'but the response object is in direct '
'passthrough mode.')
if not self.implicit_sequence_conversion:
raise RuntimeError('The response object required the iterable '
'to be a sequence, but the implicit '
'conversion was disabled. Call '
'make_sequence() yourself.')
self.make_sequence()
def make_sequence(self):
"""Converts the response iterator in a list. By default this happens
automatically if required. If `implicit_sequence_conversion` is
disabled, this method is not automatically called and some properties
might raise exceptions. This also encodes all the items.
.. versionadded:: 0.6
"""
if not self.is_sequence:
# if we consume an iterable we have to ensure that the close
# method of the iterable is called if available when we tear
# down the response
close = getattr(self.response, 'close', None)
self.response = list(self.iter_encoded())
if close is not None:
self.call_on_close(close)
def iter_encoded(self):
"""Iter the response encoded with the encoding of the response.
If the response object is invoked as WSGI application the return
value of this method is used as application iterator unless
:attr:`direct_passthrough` was activated.
"""
if __debug__:
_warn_if_string(self.response)
# Encode in a separate function so that self.response is fetched
# early. This allows us to wrap the response with the return
# value from get_app_iter or iter_encoded.
return _iter_encoded(self.response, self.charset)
def set_cookie(self, key, value='', max_age=None, expires=None,
path='/', domain=None, secure=False, httponly=False):
"""Sets a cookie. The parameters are the same as in the cookie `Morsel`
object in the Python standard library but it accepts unicode data, too.
:param key: the key (name) of the cookie to be set.
:param value: the value of the cookie.
:param max_age: should be a number of seconds, or `None` (default) if
the cookie should last only as long as the client's
browser session.
:param expires: should be a `datetime` object or UNIX timestamp.
:param path: limits the cookie to a given path, per default it will
span the whole domain.
:param domain: if you want to set a cross-domain cookie. For example,
``domain=".example.com"`` will set a cookie that is
readable by the domain ``www.example.com``,
``foo.example.com`` etc. Otherwise, a cookie will only
be readable by the domain that set it.
:param secure: If `True`, the cookie will only be available via HTTPS
:param httponly: disallow JavaScript to access the cookie. This is an
extension to the cookie standard and probably not
supported by all browsers.
"""
self.headers.add('Set-Cookie', dump_cookie(key,
value=value,
max_age=max_age,
expires=expires,
path=path,
domain=domain,
secure=secure,
httponly=httponly,
charset=self.charset))
def delete_cookie(self, key, path='/', domain=None):
"""Delete a cookie. Fails silently if key doesn't exist.
:param key: the key (name) of the cookie to be deleted.
:param path: if the cookie that should be deleted was limited to a
path, the path has to be defined here.
:param domain: if the cookie that should be deleted was limited to a
domain, that domain has to be defined here.
"""
self.set_cookie(key, expires=0, max_age=0, path=path, domain=domain)
@property
def is_streamed(self):
"""If the response is streamed (the response is not an iterable with
a length information) this property is `True`. In this case streamed
means that there is no information about the number of iterations.
This is usually `True` if a generator is passed to the response object.
This is useful for checking before applying some sort of post
filtering that should not take place for streamed responses.
"""
try:
len(self.response)
except (TypeError, AttributeError):
return True
return False
@property
def is_sequence(self):
"""If the iterator is buffered, this property will be `True`. A
response object will consider an iterator to be buffered if the
response attribute is a list or tuple.
.. versionadded:: 0.6
"""
return isinstance(self.response, (tuple, list))
def close(self):
"""Close the wrapped response if possible. You can also use the object
in a with statement which will automatically close it.
.. versionadded:: 0.9
Can now be used in a with statement.
"""
if hasattr(self.response, 'close'):
self.response.close()
for func in self._on_close:
func()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
self.close()
def freeze(self):
"""Call this method if you want to make your response object ready for
being pickled. This buffers the generator if there is one. It will
also set the `Content-Length` header to the length of the body.
.. versionchanged:: 0.6
The `Content-Length` header is now set.
"""
# we explicitly set the length to a list of the *encoded* response
# iterator. Even if the implicit sequence conversion is disabled.
self.response = list(self.iter_encoded())
self.headers['Content-Length'] = str(sum(map(len, self.response)))
def get_wsgi_headers(self, environ):
"""This is automatically called right before the response is started
and returns headers modified for the given environment. It returns a
copy of the headers from the response with some modifications applied
if necessary.
For example the location header (if present) is joined with the root
URL of the environment. Also the content length is automatically set
to zero here for certain status codes.
.. versionchanged:: 0.6
Previously that function was called `fix_headers` and modified
the response object in place. Also since 0.6, IRIs in location
and content-location headers are handled properly.
Also starting with 0.6, Werkzeug will attempt to set the content
length if it is able to figure it out on its own. This is the
case if all the strings in the response iterable are already
encoded and the iterable is buffered.
:param environ: the WSGI environment of the request.
:return: returns a new :class:`~werkzeug.datastructures.Headers`
object.
"""
headers = Headers(self.headers)
location = None
content_location = None
content_length = None
status = self.status_code
# iterate over the headers to find all values in one go. Because
# get_wsgi_headers is used each response that gives us a tiny
# speedup.
for key, value in headers:
ikey = key.lower()
if ikey == u'location':
location = value
elif ikey == u'content-location':
content_location = value
elif ikey == u'content-length':
content_length = value
# make sure the location header is an absolute URL
if location is not None:
old_location = location
if isinstance(location, text_type):
# Safe conversion is necessary here as we might redirect
# to a broken URI scheme (for instance itms-services).
location = iri_to_uri(location, safe_conversion=True)
if self.autocorrect_location_header:
current_url = get_current_url(environ, root_only=True)
if isinstance(current_url, text_type):
current_url = iri_to_uri(current_url)
location = url_join(current_url, location)
if location != old_location:
headers['Location'] = location
# make sure the content location is a URL
if content_location is not None and \
isinstance(content_location, text_type):
headers['Content-Location'] = iri_to_uri(content_location)
# remove entity headers and set content length to zero if needed.
# Also update content_length accordingly so that the automatic
# content length detection does not trigger in the following
# code.
if 100 <= status < 200 or status == 204:
headers['Content-Length'] = content_length = u'0'
elif status == 304:
remove_entity_headers(headers)
# if we can determine the content length automatically, we
# should try to do that. But only if this does not involve
# flattening the iterator or encoding of unicode strings in
# the response. We however should not do that if we have a 304
# response.
if self.automatically_set_content_length and \
self.is_sequence and content_length is None and status != 304:
try:
content_length = sum(len(to_bytes(x, 'ascii'))
for x in self.response)
except UnicodeError:
# aha, something non-bytestringy in there, too bad, we
# can't safely figure out the length of the response.
pass
else:
headers['Content-Length'] = str(content_length)
return headers
def get_app_iter(self, environ):
"""Returns the application iterator for the given environ. Depending
on the request method and the current status code the return value
might be an empty response rather than the one from the response.
If the request method is `HEAD` or the status code is in a range
where the HTTP specification requires an empty response, an empty
iterable is returned.
.. versionadded:: 0.6
:param environ: the WSGI environment of the request.
:return: a response iterable.
"""
status = self.status_code
if environ['REQUEST_METHOD'] == 'HEAD' or \
100 <= status < 200 or status in (204, 304):
iterable = ()
elif self.direct_passthrough:
if __debug__:
_warn_if_string(self.response)
return self.response
else:
iterable = self.iter_encoded()
return ClosingIterator(iterable, self.close)
def get_wsgi_response(self, environ):
"""Returns the final WSGI response as tuple. The first item in
the tuple is the application iterator, the second the status and
the third the list of headers. The response returned is created
specially for the given environment. For example if the request
method in the WSGI environment is ``'HEAD'`` the response will
be empty and only the headers and status code will be present.
.. versionadded:: 0.6
:param environ: the WSGI environment of the request.
:return: an ``(app_iter, status, headers)`` tuple.
"""
headers = self.get_wsgi_headers(environ)
app_iter = self.get_app_iter(environ)
return app_iter, self.status, headers.to_wsgi_list()
def __call__(self, environ, start_response):
"""Process this response as WSGI application.
:param environ: the WSGI environment.
:param start_response: the response callable provided by the WSGI
server.
:return: an application iterator
"""
app_iter, status, headers = self.get_wsgi_response(environ)
start_response(status, headers)
return app_iter
class AcceptMixin(object):
"""A mixin for classes with an :attr:`~BaseResponse.environ` attribute
to get all the HTTP accept headers as
:class:`~werkzeug.datastructures.Accept` objects (or subclasses
thereof).
"""
@cached_property
def accept_mimetypes(self):
"""List of mimetypes this client supports as
:class:`~werkzeug.datastructures.MIMEAccept` object.
"""
return parse_accept_header(self.environ.get('HTTP_ACCEPT'), MIMEAccept)
@cached_property
def accept_charsets(self):
"""List of charsets this client supports as
:class:`~werkzeug.datastructures.CharsetAccept` object.
"""
return parse_accept_header(self.environ.get('HTTP_ACCEPT_CHARSET'),
CharsetAccept)
@cached_property
def accept_encodings(self):
"""List of encodings this client accepts. Encodings in a HTTP term
are compression encodings such as gzip. For charsets have a look at
:attr:`accept_charset`.
"""
return parse_accept_header(self.environ.get('HTTP_ACCEPT_ENCODING'))
@cached_property
def accept_languages(self):
"""List of languages this client accepts as
:class:`~werkzeug.datastructures.LanguageAccept` object.
.. versionchanged 0.5
In previous versions this was a regular
:class:`~werkzeug.datastructures.Accept` object.
"""
return parse_accept_header(self.environ.get('HTTP_ACCEPT_LANGUAGE'),
LanguageAccept)
class ETagRequestMixin(object):
"""Add entity tag and cache descriptors to a request object or object with
a WSGI environment available as :attr:`~BaseRequest.environ`. This not
only provides access to etags but also to the cache control header.
"""
@cached_property
def cache_control(self):
"""A :class:`~werkzeug.datastructures.RequestCacheControl` object
for the incoming cache control headers.
"""
cache_control = self.environ.get('HTTP_CACHE_CONTROL')
return parse_cache_control_header(cache_control, None,
RequestCacheControl)
@cached_property
def if_match(self):
"""An object containing all the etags in the `If-Match` header.
:rtype: :class:`~werkzeug.datastructures.ETags`
"""
return parse_etags(self.environ.get('HTTP_IF_MATCH'))
@cached_property
def if_none_match(self):
"""An object containing all the etags in the `If-None-Match` header.
:rtype: :class:`~werkzeug.datastructures.ETags`
"""
return parse_etags(self.environ.get('HTTP_IF_NONE_MATCH'))
@cached_property
def if_modified_since(self):
"""The parsed `If-Modified-Since` header as datetime object."""
return parse_date(self.environ.get('HTTP_IF_MODIFIED_SINCE'))
@cached_property
def if_unmodified_since(self):
"""The parsed `If-Unmodified-Since` header as datetime object."""
return parse_date(self.environ.get('HTTP_IF_UNMODIFIED_SINCE'))
@cached_property
def if_range(self):
"""The parsed `If-Range` header.
.. versionadded:: 0.7
:rtype: :class:`~werkzeug.datastructures.IfRange`
"""
return parse_if_range_header(self.environ.get('HTTP_IF_RANGE'))
@cached_property
def range(self):
"""The parsed `Range` header.
.. versionadded:: 0.7
:rtype: :class:`~werkzeug.datastructures.Range`
"""
return parse_range_header(self.environ.get('HTTP_RANGE'))
class UserAgentMixin(object):
"""Adds a `user_agent` attribute to the request object which contains the
parsed user agent of the browser that triggered the request as a
:class:`~werkzeug.useragents.UserAgent` object.
"""
@cached_property
def user_agent(self):
"""The current user agent."""
from werkzeug.useragents import UserAgent
return UserAgent(self.environ)
class AuthorizationMixin(object):
"""Adds an :attr:`authorization` property that represents the parsed
value of the `Authorization` header as
:class:`~werkzeug.datastructures.Authorization` object.
"""
@cached_property
def authorization(self):
"""The `Authorization` object in parsed form."""
header = self.environ.get('HTTP_AUTHORIZATION')
return parse_authorization_header(header)
class StreamOnlyMixin(object):
"""If mixed in before the request object this will change the bahavior
of it to disable handling of form parsing. This disables the
:attr:`files`, :attr:`form` attributes and will just provide a
:attr:`stream` attribute that however is always available.
.. versionadded:: 0.9
"""
disable_data_descriptor = True
want_form_data_parsed = False
class ETagResponseMixin(object):
"""Adds extra functionality to a response object for etag and cache
handling. This mixin requires an object with at least a `headers`
object that implements a dict like interface similar to
:class:`~werkzeug.datastructures.Headers`.
If you want the :meth:`freeze` method to automatically add an etag, you
have to mixin this method before the response base class. The default
response class does not do that.
"""
@property
def cache_control(self):
"""The Cache-Control general-header field is used to specify
directives that MUST be obeyed by all caching mechanisms along the
request/response chain.
"""
def on_update(cache_control):
if not cache_control and 'cache-control' in self.headers:
del self.headers['cache-control']
elif cache_control:
self.headers['Cache-Control'] = cache_control.to_header()
return parse_cache_control_header(self.headers.get('cache-control'),
on_update,
ResponseCacheControl)
def _wrap_response(self, start, length):
"""Wrap existing Response in case of Range Request context."""
if self.status_code == 206:
self.response = _RangeWrapper(self.response, start, length)
def _is_range_request_processable(self, environ):
"""Return ``True`` if `Range` header is present and if underlying
resource is considered unchanged when compared with `If-Range` header.
"""
return (
'HTTP_IF_RANGE' not in environ
or not is_resource_modified(
environ, self.headers.get('etag'), None,
self.headers.get('last-modified'), ignore_if_range=False
)
) and 'HTTP_RANGE' in environ
def _process_range_request(self, environ, complete_length=None, accept_ranges=None):
"""Handle Range Request related headers (RFC7233). If `Accept-Ranges`
header is valid, and Range Request is processable, we set the headers
as described by the RFC, and wrap the underlying response in a
RangeWrapper.
Returns ``True`` if Range Request can be fulfilled, ``False`` otherwise.
:raises: :class:`~werkzeug.exceptions.RequestedRangeNotSatisfiable`
if `Range` header could not be parsed or satisfied.
"""
from werkzeug.exceptions import RequestedRangeNotSatisfiable
if accept_ranges is None:
return False
self.headers['Accept-Ranges'] = accept_ranges
if not self._is_range_request_processable(environ) or complete_length is None:
return False
parsed_range = parse_range_header(environ.get('HTTP_RANGE'))
if parsed_range is None:
raise RequestedRangeNotSatisfiable(complete_length)
range_tuple = parsed_range.range_for_length(complete_length)
content_range_header = parsed_range.to_content_range_header(complete_length)
if range_tuple is None or content_range_header is None:
raise RequestedRangeNotSatisfiable(complete_length)
content_length = range_tuple[1] - range_tuple[0]
# Be sure not to send 206 response
# if requested range is the full content.
if content_length != complete_length:
self.headers['Content-Length'] = content_length
self.content_range = content_range_header
self.status_code = 206
self._wrap_response(range_tuple[0], content_length)
return True
return False
def make_conditional(self, request_or_environ, accept_ranges=False,
complete_length=None):
"""Make the response conditional to the request. This method works
best if an etag was defined for the response already. The `add_etag`
method can be used to do that. If called without etag just the date
header is set.
This does nothing if the request method in the request or environ is
anything but GET or HEAD.
For optimal performance when handling range requests, it's recommended
that your response data object implements `seekable`, `seek` and `tell`
methods as described by :py:class:`io.IOBase`. Objects returned by
:meth:`~werkzeug.wsgi.wrap_file` automatically implement those methods.
It does not remove the body of the response because that's something
the :meth:`__call__` function does for us automatically.
Returns self so that you can do ``return resp.make_conditional(req)``
but modifies the object in-place.
:param request_or_environ: a request object or WSGI environment to be
used to make the response conditional
against.
:param accept_ranges: This parameter dictates the value of
`Accept-Ranges` header. If ``False`` (default),
the header is not set. If ``True``, it will be set
to ``"bytes"``. If ``None``, it will be set to
``"none"``. If it's a string, it will use this
value.
:param complete_length: Will be used only in valid Range Requests.
It will set `Content-Range` complete length
value and compute `Content-Length` real value.
This parameter is mandatory for successful
Range Requests completion.
:raises: :class:`~werkzeug.exceptions.RequestedRangeNotSatisfiable`
if `Range` header could not be parsed or satisfied.
"""
environ = _get_environ(request_or_environ)
if environ['REQUEST_METHOD'] in ('GET', 'HEAD'):
# if the date is not in the headers, add it now. We however
# will not override an already existing header. Unfortunately
# this header will be overriden by many WSGI servers including
# wsgiref.
if 'date' not in self.headers:
self.headers['Date'] = http_date()
accept_ranges = _clean_accept_ranges(accept_ranges)
is206 = self._process_range_request(environ, complete_length, accept_ranges)
if not is206 and not is_resource_modified(
environ, self.headers.get('etag'), None, self.headers.get('last-modified')
):
self.status_code = 304
if self.automatically_set_content_length and 'content-length' not in self.headers:
length = self.calculate_content_length()
if length is not None:
self.headers['Content-Length'] = length
return self
def add_etag(self, overwrite=False, weak=False):
"""Add an etag for the current response if there is none yet."""
if overwrite or 'etag' not in self.headers:
self.set_etag(generate_etag(self.get_data()), weak)
def set_etag(self, etag, weak=False):
"""Set the etag, and override the old one if there was one."""
self.headers['ETag'] = quote_etag(etag, weak)
def get_etag(self):
"""Return a tuple in the form ``(etag, is_weak)``. If there is no
ETag the return value is ``(None, None)``.
"""
return unquote_etag(self.headers.get('ETag'))
def freeze(self, no_etag=False):
"""Call this method if you want to make your response object ready for
pickeling. This buffers the generator if there is one. This also
sets the etag unless `no_etag` is set to `True`.
"""
if not no_etag:
self.add_etag()
super(ETagResponseMixin, self).freeze()
accept_ranges = header_property('Accept-Ranges', doc='''
The `Accept-Ranges` header. Even though the name would indicate
that multiple values are supported, it must be one string token only.
The values ``'bytes'`` and ``'none'`` are common.
.. versionadded:: 0.7''')
def _get_content_range(self):
def on_update(rng):
if not rng:
del self.headers['content-range']
else:
self.headers['Content-Range'] = rng.to_header()
rv = parse_content_range_header(self.headers.get('content-range'),
on_update)
# always provide a content range object to make the descriptor
# more user friendly. It provides an unset() method that can be
# used to remove the header quickly.
if rv is None:
rv = ContentRange(None, None, None, on_update=on_update)
return rv
def _set_content_range(self, value):
if not value:
del self.headers['content-range']
elif isinstance(value, string_types):
self.headers['Content-Range'] = value
else:
self.headers['Content-Range'] = value.to_header()
content_range = property(_get_content_range, _set_content_range, doc='''
The `Content-Range` header as
:class:`~werkzeug.datastructures.ContentRange` object. Even if the
header is not set it wil provide such an object for easier
manipulation.
.. versionadded:: 0.7''')
del _get_content_range, _set_content_range
class ResponseStream(object):
"""A file descriptor like object used by the :class:`ResponseStreamMixin` to
represent the body of the stream. It directly pushes into the response
iterable of the response object.
"""
mode = 'wb+'
def __init__(self, response):
self.response = response
self.closed = False
def write(self, value):
if self.closed:
raise ValueError('I/O operation on closed file')
self.response._ensure_sequence(mutable=True)
self.response.response.append(value)
self.response.headers.pop('Content-Length', None)
def writelines(self, seq):
for item in seq:
self.write(item)
def close(self):
self.closed = True
def flush(self):
if self.closed:
raise ValueError('I/O operation on closed file')
def isatty(self):
if self.closed:
raise ValueError('I/O operation on closed file')
return False
@property
def encoding(self):
return self.response.charset
class ResponseStreamMixin(object):
"""Mixin for :class:`BaseRequest` subclasses. Classes that inherit from
this mixin will automatically get a :attr:`stream` property that provides
a write-only interface to the response iterable.
"""
@cached_property
def stream(self):
"""The response iterable as write-only stream."""
return ResponseStream(self)
class CommonRequestDescriptorsMixin(object):
"""A mixin for :class:`BaseRequest` subclasses. Request objects that
mix this class in will automatically get descriptors for a couple of
HTTP headers with automatic type conversion.
.. versionadded:: 0.5
"""
content_type = environ_property('CONTENT_TYPE', doc='''
The Content-Type entity-header field indicates the media type of
the entity-body sent to the recipient or, in the case of the HEAD
method, the media type that would have been sent had the request
been a GET.''')
@cached_property
def content_length(self):
"""The Content-Length entity-header field indicates the size of the
entity-body in bytes or, in the case of the HEAD method, the size of
the entity-body that would have been sent had the request been a
GET.
"""
return get_content_length(self.environ)
content_encoding = environ_property('HTTP_CONTENT_ENCODING', doc='''
The Content-Encoding entity-header field is used as a modifier to the
media-type. When present, its value indicates what additional content
codings have been applied to the entity-body, and thus what decoding
mechanisms must be applied in order to obtain the media-type
referenced by the Content-Type header field.
.. versionadded:: 0.9''')
content_md5 = environ_property('HTTP_CONTENT_MD5', doc='''
The Content-MD5 entity-header field, as defined in RFC 1864, is an
MD5 digest of the entity-body for the purpose of providing an
end-to-end message integrity check (MIC) of the entity-body. (Note:
a MIC is good for detecting accidental modification of the
entity-body in transit, but is not proof against malicious attacks.)
.. versionadded:: 0.9''')
referrer = environ_property('HTTP_REFERER', doc='''
The Referer[sic] request-header field allows the client to specify,
for the server's benefit, the address (URI) of the resource from which
the Request-URI was obtained (the "referrer", although the header
field is misspelled).''')
date = environ_property('HTTP_DATE', None, parse_date, doc='''
The Date general-header field represents the date and time at which
the message was originated, having the same semantics as orig-date
in RFC 822.''')
max_forwards = environ_property('HTTP_MAX_FORWARDS', None, int, doc='''
The Max-Forwards request-header field provides a mechanism with the
TRACE and OPTIONS methods to limit the number of proxies or gateways
that can forward the request to the next inbound server.''')
def _parse_content_type(self):
if not hasattr(self, '_parsed_content_type'):
self._parsed_content_type = \
parse_options_header(self.environ.get('CONTENT_TYPE', ''))
@property
def mimetype(self):
"""Like :attr:`content_type`, but without parameters (eg, without
charset, type etc.) and always lowercase. For example if the content
type is ``text/HTML; charset=utf-8`` the mimetype would be
``'text/html'``.
"""
self._parse_content_type()
return self._parsed_content_type[0].lower()
@property
def mimetype_params(self):
"""The mimetype parameters as dict. For example if the content
type is ``text/html; charset=utf-8`` the params would be
``{'charset': 'utf-8'}``.
"""
self._parse_content_type()
return self._parsed_content_type[1]
@cached_property
def pragma(self):
"""The Pragma general-header field is used to include
implementation-specific directives that might apply to any recipient
along the request/response chain. All pragma directives specify
optional behavior from the viewpoint of the protocol; however, some
systems MAY require that behavior be consistent with the directives.
"""
return parse_set_header(self.environ.get('HTTP_PRAGMA', ''))
class CommonResponseDescriptorsMixin(object):
"""A mixin for :class:`BaseResponse` subclasses. Response objects that
mix this class in will automatically get descriptors for a couple of
HTTP headers with automatic type conversion.
"""
def _get_mimetype(self):
ct = self.headers.get('content-type')
if ct:
return ct.split(';')[0].strip()
def _set_mimetype(self, value):
self.headers['Content-Type'] = get_content_type(value, self.charset)
def _get_mimetype_params(self):
def on_update(d):
self.headers['Content-Type'] = \
dump_options_header(self.mimetype, d)
d = parse_options_header(self.headers.get('content-type', ''))[1]
return CallbackDict(d, on_update)
mimetype = property(_get_mimetype, _set_mimetype, doc='''
The mimetype (content type without charset etc.)''')
mimetype_params = property(_get_mimetype_params, doc='''
The mimetype parameters as dict. For example if the content
type is ``text/html; charset=utf-8`` the params would be
``{'charset': 'utf-8'}``.
.. versionadded:: 0.5
''')
location = header_property('Location', doc='''
The Location response-header field is used to redirect the recipient
to a location other than the Request-URI for completion of the request
or identification of a new resource.''')
age = header_property('Age', None, parse_date, http_date, doc='''
The Age response-header field conveys the sender's estimate of the
amount of time since the response (or its revalidation) was
generated at the origin server.
Age values are non-negative decimal integers, representing time in
seconds.''')
content_type = header_property('Content-Type', doc='''
The Content-Type entity-header field indicates the media type of the
entity-body sent to the recipient or, in the case of the HEAD method,
the media type that would have been sent had the request been a GET.
''')
content_length = header_property('Content-Length', None, int, str, doc='''
The Content-Length entity-header field indicates the size of the
entity-body, in decimal number of OCTETs, sent to the recipient or,
in the case of the HEAD method, the size of the entity-body that would
have been sent had the request been a GET.''')
content_location = header_property('Content-Location', doc='''
The Content-Location entity-header field MAY be used to supply the
resource location for the entity enclosed in the message when that
entity is accessible from a location separate from the requested
resource's URI.''')
content_encoding = header_property('Content-Encoding', doc='''
The Content-Encoding entity-header field is used as a modifier to the
media-type. When present, its value indicates what additional content
codings have been applied to the entity-body, and thus what decoding
mechanisms must be applied in order to obtain the media-type
referenced by the Content-Type header field.''')
content_md5 = header_property('Content-MD5', doc='''
The Content-MD5 entity-header field, as defined in RFC 1864, is an
MD5 digest of the entity-body for the purpose of providing an
end-to-end message integrity check (MIC) of the entity-body. (Note:
a MIC is good for detecting accidental modification of the
entity-body in transit, but is not proof against malicious attacks.)
''')
date = header_property('Date', None, parse_date, http_date, doc='''
The Date general-header field represents the date and time at which
the message was originated, having the same semantics as orig-date
in RFC 822.''')
expires = header_property('Expires', None, parse_date, http_date, doc='''
The Expires entity-header field gives the date/time after which the
response is considered stale. A stale cache entry may not normally be
returned by a cache.''')
last_modified = header_property('Last-Modified', None, parse_date,
http_date, doc='''
The Last-Modified entity-header field indicates the date and time at
which the origin server believes the variant was last modified.''')
def _get_retry_after(self):
value = self.headers.get('retry-after')
if value is None:
return
elif value.isdigit():
return datetime.utcnow() + timedelta(seconds=int(value))
return parse_date(value)
def _set_retry_after(self, value):
if value is None:
if 'retry-after' in self.headers:
del self.headers['retry-after']
return
elif isinstance(value, datetime):
value = http_date(value)
else:
value = str(value)
self.headers['Retry-After'] = value
retry_after = property(_get_retry_after, _set_retry_after, doc='''
The Retry-After response-header field can be used with a 503 (Service
Unavailable) response to indicate how long the service is expected
to be unavailable to the requesting client.
Time in seconds until expiration or date.''')
def _set_property(name, doc=None):
def fget(self):
def on_update(header_set):
if not header_set and name in self.headers:
del self.headers[name]
elif header_set:
self.headers[name] = header_set.to_header()
return parse_set_header(self.headers.get(name), on_update)
def fset(self, value):
if not value:
del self.headers[name]
elif isinstance(value, string_types):
self.headers[name] = value
else:
self.headers[name] = dump_header(value)
return property(fget, fset, doc=doc)
vary = _set_property('Vary', doc='''
The Vary field value indicates the set of request-header fields that
fully determines, while the response is fresh, whether a cache is
permitted to use the response to reply to a subsequent request
without revalidation.''')
content_language = _set_property('Content-Language', doc='''
The Content-Language entity-header field describes the natural
language(s) of the intended audience for the enclosed entity. Note
that this might not be equivalent to all the languages used within
the entity-body.''')
allow = _set_property('Allow', doc='''
The Allow entity-header field lists the set of methods supported
by the resource identified by the Request-URI. The purpose of this
field is strictly to inform the recipient of valid methods
associated with the resource. An Allow header field MUST be
present in a 405 (Method Not Allowed) response.''')
del _set_property, _get_mimetype, _set_mimetype, _get_retry_after, \
_set_retry_after
class WWWAuthenticateMixin(object):
"""Adds a :attr:`www_authenticate` property to a response object."""
@property
def www_authenticate(self):
"""The `WWW-Authenticate` header in a parsed form."""
def on_update(www_auth):
if not www_auth and 'www-authenticate' in self.headers:
del self.headers['www-authenticate']
elif www_auth:
self.headers['WWW-Authenticate'] = www_auth.to_header()
header = self.headers.get('www-authenticate')
return parse_www_authenticate_header(header, on_update)
class Request(BaseRequest, AcceptMixin, ETagRequestMixin,
UserAgentMixin, AuthorizationMixin,
CommonRequestDescriptorsMixin):
"""Full featured request object implementing the following mixins:
- :class:`AcceptMixin` for accept header parsing
- :class:`ETagRequestMixin` for etag and cache control handling
- :class:`UserAgentMixin` for user agent introspection
- :class:`AuthorizationMixin` for http auth handling
- :class:`CommonRequestDescriptorsMixin` for common headers
"""
class PlainRequest(StreamOnlyMixin, Request):
"""A request object without special form parsing capabilities.
.. versionadded:: 0.9
"""
class Response(BaseResponse, ETagResponseMixin, ResponseStreamMixin,
CommonResponseDescriptorsMixin,
WWWAuthenticateMixin):
"""Full featured response object implementing the following mixins:
- :class:`ETagResponseMixin` for etag and cache control handling
- :class:`ResponseStreamMixin` to add support for the `stream` property
- :class:`CommonResponseDescriptorsMixin` for various HTTP descriptors
- :class:`WWWAuthenticateMixin` for HTTP authentication support
"""
|
Immortalin/python-for-android
|
refs/heads/master
|
python3-alpha/python3-src/Lib/subprocess.py
|
43
|
# subprocess - Subprocesses with accessible I/O streams
#
# For more information about this module, see PEP 324.
#
# Copyright (c) 2003-2005 by Peter Astrand <[email protected]>
#
# Licensed to PSF under a Contributor Agreement.
# See http://www.python.org/2.4/license for licensing details.
r"""subprocess - Subprocesses with accessible I/O streams
This module allows you to spawn processes, connect to their
input/output/error pipes, and obtain their return codes. This module
intends to replace several other, older modules and functions, like:
os.system
os.spawn*
Information about how the subprocess module can be used to replace these
modules and functions can be found below.
Using the subprocess module
===========================
This module defines one class called Popen:
class Popen(args, bufsize=0, executable=None,
stdin=None, stdout=None, stderr=None,
preexec_fn=None, close_fds=True, shell=False,
cwd=None, env=None, universal_newlines=False,
startupinfo=None, creationflags=0,
restore_signals=True, start_new_session=False, pass_fds=()):
Arguments are:
args should be a string, or a sequence of program arguments. The
program to execute is normally the first item in the args sequence or
string, but can be explicitly set by using the executable argument.
On POSIX, with shell=False (default): In this case, the Popen class
uses os.execvp() to execute the child program. args should normally
be a sequence. A string will be treated as a sequence with the string
as the only item (the program to execute).
On POSIX, with shell=True: If args is a string, it specifies the
command string to execute through the shell. If args is a sequence,
the first item specifies the command string, and any additional items
will be treated as additional shell arguments.
On Windows: the Popen class uses CreateProcess() to execute the child
program, which operates on strings. If args is a sequence, it will be
converted to a string using the list2cmdline method. Please note that
not all MS Windows applications interpret the command line the same
way: The list2cmdline is designed for applications using the same
rules as the MS C runtime.
bufsize, if given, has the same meaning as the corresponding argument
to the built-in open() function: 0 means unbuffered, 1 means line
buffered, any other positive value means use a buffer of
(approximately) that size. A negative bufsize means to use the system
default, which usually means fully buffered. The default value for
bufsize is 0 (unbuffered).
stdin, stdout and stderr specify the executed programs' standard
input, standard output and standard error file handles, respectively.
Valid values are PIPE, an existing file descriptor (a positive
integer), an existing file object, and None. PIPE indicates that a
new pipe to the child should be created. With None, no redirection
will occur; the child's file handles will be inherited from the
parent. Additionally, stderr can be STDOUT, which indicates that the
stderr data from the applications should be captured into the same
file handle as for stdout.
On POSIX, if preexec_fn is set to a callable object, this object will be
called in the child process just before the child is executed. The use
of preexec_fn is not thread safe, using it in the presence of threads
could lead to a deadlock in the child process before the new executable
is executed.
If close_fds is true, all file descriptors except 0, 1 and 2 will be
closed before the child process is executed. The default for close_fds
varies by platform: Always true on POSIX. True when stdin/stdout/stderr
are None on Windows, false otherwise.
pass_fds is an optional sequence of file descriptors to keep open between the
parent and child. Providing any pass_fds implicitly sets close_fds to true.
if shell is true, the specified command will be executed through the
shell.
If cwd is not None, the current directory will be changed to cwd
before the child is executed.
On POSIX, if restore_signals is True all signals that Python sets to
SIG_IGN are restored to SIG_DFL in the child process before the exec.
Currently this includes the SIGPIPE, SIGXFZ and SIGXFSZ signals. This
parameter does nothing on Windows.
On POSIX, if start_new_session is True, the setsid() system call will be made
in the child process prior to executing the command.
If env is not None, it defines the environment variables for the new
process.
If universal_newlines is true, the file objects stdout and stderr are
opened as a text files, but lines may be terminated by any of '\n',
the Unix end-of-line convention, '\r', the old Macintosh convention or
'\r\n', the Windows convention. All of these external representations
are seen as '\n' by the Python program. Note: This feature is only
available if Python is built with universal newline support (the
default). Also, the newlines attribute of the file objects stdout,
stdin and stderr are not updated by the communicate() method.
The startupinfo and creationflags, if given, will be passed to the
underlying CreateProcess() function. They can specify things such as
appearance of the main window and priority for the new process.
(Windows only)
This module also defines some shortcut functions:
call(*popenargs, **kwargs):
Run command with arguments. Wait for command to complete, then
return the returncode attribute.
The arguments are the same as for the Popen constructor. Example:
>>> retcode = subprocess.call(["ls", "-l"])
check_call(*popenargs, **kwargs):
Run command with arguments. Wait for command to complete. If the
exit code was zero then return, otherwise raise
CalledProcessError. The CalledProcessError object will have the
return code in the returncode attribute.
The arguments are the same as for the Popen constructor. Example:
>>> subprocess.check_call(["ls", "-l"])
0
getstatusoutput(cmd):
Return (status, output) of executing cmd in a shell.
Execute the string 'cmd' in a shell with os.popen() and return a 2-tuple
(status, output). cmd is actually run as '{ cmd ; } 2>&1', so that the
returned output will contain output or error messages. A trailing newline
is stripped from the output. The exit status for the command can be
interpreted according to the rules for the C function wait(). Example:
>>> subprocess.getstatusoutput('ls /bin/ls')
(0, '/bin/ls')
>>> subprocess.getstatusoutput('cat /bin/junk')
(256, 'cat: /bin/junk: No such file or directory')
>>> subprocess.getstatusoutput('/bin/junk')
(256, 'sh: /bin/junk: not found')
getoutput(cmd):
Return output (stdout or stderr) of executing cmd in a shell.
Like getstatusoutput(), except the exit status is ignored and the return
value is a string containing the command's output. Example:
>>> subprocess.getoutput('ls /bin/ls')
'/bin/ls'
check_output(*popenargs, **kwargs):
Run command with arguments and return its output as a byte string.
If the exit code was non-zero it raises a CalledProcessError. The
CalledProcessError object will have the return code in the returncode
attribute and output in the output attribute.
The arguments are the same as for the Popen constructor. Example:
>>> output = subprocess.check_output(["ls", "-l", "/dev/null"])
Exceptions
----------
Exceptions raised in the child process, before the new program has
started to execute, will be re-raised in the parent. Additionally,
the exception object will have one extra attribute called
'child_traceback', which is a string containing traceback information
from the childs point of view.
The most common exception raised is OSError. This occurs, for
example, when trying to execute a non-existent file. Applications
should prepare for OSErrors.
A ValueError will be raised if Popen is called with invalid arguments.
check_call() and check_output() will raise CalledProcessError, if the
called process returns a non-zero return code.
Security
--------
Unlike some other popen functions, this implementation will never call
/bin/sh implicitly. This means that all characters, including shell
metacharacters, can safely be passed to child processes.
Popen objects
=============
Instances of the Popen class have the following methods:
poll()
Check if child process has terminated. Returns returncode
attribute.
wait()
Wait for child process to terminate. Returns returncode attribute.
communicate(input=None)
Interact with process: Send data to stdin. Read data from stdout
and stderr, until end-of-file is reached. Wait for process to
terminate. The optional input argument should be a string to be
sent to the child process, or None, if no data should be sent to
the child.
communicate() returns a tuple (stdout, stderr).
Note: The data read is buffered in memory, so do not use this
method if the data size is large or unlimited.
The following attributes are also available:
stdin
If the stdin argument is PIPE, this attribute is a file object
that provides input to the child process. Otherwise, it is None.
stdout
If the stdout argument is PIPE, this attribute is a file object
that provides output from the child process. Otherwise, it is
None.
stderr
If the stderr argument is PIPE, this attribute is file object that
provides error output from the child process. Otherwise, it is
None.
pid
The process ID of the child process.
returncode
The child return code. A None value indicates that the process
hasn't terminated yet. A negative value -N indicates that the
child was terminated by signal N (POSIX only).
Replacing older functions with the subprocess module
====================================================
In this section, "a ==> b" means that b can be used as a replacement
for a.
Note: All functions in this section fail (more or less) silently if
the executed program cannot be found; this module raises an OSError
exception.
In the following examples, we assume that the subprocess module is
imported with "from subprocess import *".
Replacing /bin/sh shell backquote
---------------------------------
output=`mycmd myarg`
==>
output = Popen(["mycmd", "myarg"], stdout=PIPE).communicate()[0]
Replacing shell pipe line
-------------------------
output=`dmesg | grep hda`
==>
p1 = Popen(["dmesg"], stdout=PIPE)
p2 = Popen(["grep", "hda"], stdin=p1.stdout, stdout=PIPE)
output = p2.communicate()[0]
Replacing os.system()
---------------------
sts = os.system("mycmd" + " myarg")
==>
p = Popen("mycmd" + " myarg", shell=True)
pid, sts = os.waitpid(p.pid, 0)
Note:
* Calling the program through the shell is usually not required.
* It's easier to look at the returncode attribute than the
exitstatus.
A more real-world example would look like this:
try:
retcode = call("mycmd" + " myarg", shell=True)
if retcode < 0:
print("Child was terminated by signal", -retcode, file=sys.stderr)
else:
print("Child returned", retcode, file=sys.stderr)
except OSError as e:
print("Execution failed:", e, file=sys.stderr)
Replacing os.spawn*
-------------------
P_NOWAIT example:
pid = os.spawnlp(os.P_NOWAIT, "/bin/mycmd", "mycmd", "myarg")
==>
pid = Popen(["/bin/mycmd", "myarg"]).pid
P_WAIT example:
retcode = os.spawnlp(os.P_WAIT, "/bin/mycmd", "mycmd", "myarg")
==>
retcode = call(["/bin/mycmd", "myarg"])
Vector example:
os.spawnvp(os.P_NOWAIT, path, args)
==>
Popen([path] + args[1:])
Environment example:
os.spawnlpe(os.P_NOWAIT, "/bin/mycmd", "mycmd", "myarg", env)
==>
Popen(["/bin/mycmd", "myarg"], env={"PATH": "/usr/bin"})
"""
import sys
mswindows = (sys.platform == "win32")
import io
import os
import traceback
import gc
import signal
import builtins
import warnings
import errno
# Exception classes used by this module.
class CalledProcessError(Exception):
"""This exception is raised when a process run by check_call() or
check_output() returns a non-zero exit status.
The exit status will be stored in the returncode attribute;
check_output() will also store the output in the output attribute.
"""
def __init__(self, returncode, cmd, output=None):
self.returncode = returncode
self.cmd = cmd
self.output = output
def __str__(self):
return "Command '%s' returned non-zero exit status %d" % (self.cmd, self.returncode)
if mswindows:
import threading
import msvcrt
import _subprocess
class STARTUPINFO:
dwFlags = 0
hStdInput = None
hStdOutput = None
hStdError = None
wShowWindow = 0
class pywintypes:
error = IOError
else:
import select
_has_poll = hasattr(select, 'poll')
import fcntl
import pickle
try:
import _posixsubprocess
except ImportError:
_posixsubprocess = None
warnings.warn("The _posixsubprocess module is not being used. "
"Child process reliability may suffer if your "
"program uses threads.", RuntimeWarning)
# When select or poll has indicated that the file is writable,
# we can write up to _PIPE_BUF bytes without risk of blocking.
# POSIX defines PIPE_BUF as >= 512.
_PIPE_BUF = getattr(select, 'PIPE_BUF', 512)
_FD_CLOEXEC = getattr(fcntl, 'FD_CLOEXEC', 1)
def _set_cloexec(fd, cloexec):
old = fcntl.fcntl(fd, fcntl.F_GETFD)
if cloexec:
fcntl.fcntl(fd, fcntl.F_SETFD, old | _FD_CLOEXEC)
else:
fcntl.fcntl(fd, fcntl.F_SETFD, old & ~_FD_CLOEXEC)
if _posixsubprocess:
_create_pipe = _posixsubprocess.cloexec_pipe
else:
def _create_pipe():
fds = os.pipe()
_set_cloexec(fds[0], True)
_set_cloexec(fds[1], True)
return fds
__all__ = ["Popen", "PIPE", "STDOUT", "call", "check_call", "getstatusoutput",
"getoutput", "check_output", "CalledProcessError"]
if mswindows:
from _subprocess import (CREATE_NEW_CONSOLE, CREATE_NEW_PROCESS_GROUP,
STD_INPUT_HANDLE, STD_OUTPUT_HANDLE,
STD_ERROR_HANDLE, SW_HIDE,
STARTF_USESTDHANDLES, STARTF_USESHOWWINDOW)
__all__.extend(["CREATE_NEW_CONSOLE", "CREATE_NEW_PROCESS_GROUP",
"STD_INPUT_HANDLE", "STD_OUTPUT_HANDLE",
"STD_ERROR_HANDLE", "SW_HIDE",
"STARTF_USESTDHANDLES", "STARTF_USESHOWWINDOW"])
try:
MAXFD = os.sysconf("SC_OPEN_MAX")
except:
MAXFD = 256
_active = []
def _cleanup():
for inst in _active[:]:
res = inst._internal_poll(_deadstate=sys.maxsize)
if res is not None and res >= 0:
try:
_active.remove(inst)
except ValueError:
# This can happen if two threads create a new Popen instance.
# It's harmless that it was already removed, so ignore.
pass
PIPE = -1
STDOUT = -2
def _eintr_retry_call(func, *args):
while True:
try:
return func(*args)
except (OSError, IOError) as e:
if e.errno == errno.EINTR:
continue
raise
def call(*popenargs, **kwargs):
"""Run command with arguments. Wait for command to complete, then
return the returncode attribute.
The arguments are the same as for the Popen constructor. Example:
retcode = call(["ls", "-l"])
"""
return Popen(*popenargs, **kwargs).wait()
def check_call(*popenargs, **kwargs):
"""Run command with arguments. Wait for command to complete. If
the exit code was zero then return, otherwise raise
CalledProcessError. The CalledProcessError object will have the
return code in the returncode attribute.
The arguments are the same as for the Popen constructor. Example:
check_call(["ls", "-l"])
"""
retcode = call(*popenargs, **kwargs)
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise CalledProcessError(retcode, cmd)
return 0
def check_output(*popenargs, **kwargs):
r"""Run command with arguments and return its output as a byte string.
If the exit code was non-zero it raises a CalledProcessError. The
CalledProcessError object will have the return code in the returncode
attribute and output in the output attribute.
The arguments are the same as for the Popen constructor. Example:
>>> check_output(["ls", "-l", "/dev/null"])
b'crw-rw-rw- 1 root root 1, 3 Oct 18 2007 /dev/null\n'
The stdout argument is not allowed as it is used internally.
To capture standard error in the result, use stderr=STDOUT.
>>> check_output(["/bin/sh", "-c",
... "ls -l non_existent_file ; exit 0"],
... stderr=STDOUT)
b'ls: non_existent_file: No such file or directory\n'
"""
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
process = Popen(*popenargs, stdout=PIPE, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise CalledProcessError(retcode, cmd, output=output)
return output
def list2cmdline(seq):
"""
Translate a sequence of arguments into a command line
string, using the same rules as the MS C runtime:
1) Arguments are delimited by white space, which is either a
space or a tab.
2) A string surrounded by double quotation marks is
interpreted as a single argument, regardless of white space
contained within. A quoted string can be embedded in an
argument.
3) A double quotation mark preceded by a backslash is
interpreted as a literal double quotation mark.
4) Backslashes are interpreted literally, unless they
immediately precede a double quotation mark.
5) If backslashes immediately precede a double quotation mark,
every pair of backslashes is interpreted as a literal
backslash. If the number of backslashes is odd, the last
backslash escapes the next double quotation mark as
described in rule 3.
"""
# See
# http://msdn.microsoft.com/en-us/library/17w5ykft.aspx
# or search http://msdn.microsoft.com for
# "Parsing C++ Command-Line Arguments"
result = []
needquote = False
for arg in seq:
bs_buf = []
# Add a space to separate this argument from the others
if result:
result.append(' ')
needquote = (" " in arg) or ("\t" in arg) or not arg
if needquote:
result.append('"')
for c in arg:
if c == '\\':
# Don't know if we need to double yet.
bs_buf.append(c)
elif c == '"':
# Double backslashes.
result.append('\\' * len(bs_buf)*2)
bs_buf = []
result.append('\\"')
else:
# Normal char
if bs_buf:
result.extend(bs_buf)
bs_buf = []
result.append(c)
# Add remaining backslashes, if any.
if bs_buf:
result.extend(bs_buf)
if needquote:
result.extend(bs_buf)
result.append('"')
return ''.join(result)
# Various tools for executing commands and looking at their output and status.
#
# NB This only works (and is only relevant) for POSIX.
def getstatusoutput(cmd):
"""Return (status, output) of executing cmd in a shell.
Execute the string 'cmd' in a shell with os.popen() and return a 2-tuple
(status, output). cmd is actually run as '{ cmd ; } 2>&1', so that the
returned output will contain output or error messages. A trailing newline
is stripped from the output. The exit status for the command can be
interpreted according to the rules for the C function wait(). Example:
>>> import subprocess
>>> subprocess.getstatusoutput('ls /bin/ls')
(0, '/bin/ls')
>>> subprocess.getstatusoutput('cat /bin/junk')
(256, 'cat: /bin/junk: No such file or directory')
>>> subprocess.getstatusoutput('/bin/junk')
(256, 'sh: /bin/junk: not found')
"""
pipe = os.popen('{ ' + cmd + '; } 2>&1', 'r')
text = pipe.read()
sts = pipe.close()
if sts is None: sts = 0
if text[-1:] == '\n': text = text[:-1]
return sts, text
def getoutput(cmd):
"""Return output (stdout or stderr) of executing cmd in a shell.
Like getstatusoutput(), except the exit status is ignored and the return
value is a string containing the command's output. Example:
>>> import subprocess
>>> subprocess.getoutput('ls /bin/ls')
'/bin/ls'
"""
return getstatusoutput(cmd)[1]
_PLATFORM_DEFAULT_CLOSE_FDS = object()
class Popen(object):
def __init__(self, args, bufsize=0, executable=None,
stdin=None, stdout=None, stderr=None,
preexec_fn=None, close_fds=_PLATFORM_DEFAULT_CLOSE_FDS,
shell=False, cwd=None, env=None, universal_newlines=False,
startupinfo=None, creationflags=0,
restore_signals=True, start_new_session=False,
pass_fds=()):
"""Create new Popen instance."""
_cleanup()
self._child_created = False
if bufsize is None:
bufsize = 0 # Restore default
if not isinstance(bufsize, int):
raise TypeError("bufsize must be an integer")
if mswindows:
if preexec_fn is not None:
raise ValueError("preexec_fn is not supported on Windows "
"platforms")
any_stdio_set = (stdin is not None or stdout is not None or
stderr is not None)
if close_fds is _PLATFORM_DEFAULT_CLOSE_FDS:
if any_stdio_set:
close_fds = False
else:
close_fds = True
elif close_fds and any_stdio_set:
raise ValueError(
"close_fds is not supported on Windows platforms"
" if you redirect stdin/stdout/stderr")
else:
# POSIX
if close_fds is _PLATFORM_DEFAULT_CLOSE_FDS:
close_fds = True
if pass_fds and not close_fds:
warnings.warn("pass_fds overriding close_fds.", RuntimeWarning)
close_fds = True
if startupinfo is not None:
raise ValueError("startupinfo is only supported on Windows "
"platforms")
if creationflags != 0:
raise ValueError("creationflags is only supported on Windows "
"platforms")
self.stdin = None
self.stdout = None
self.stderr = None
self.pid = None
self.returncode = None
self.universal_newlines = universal_newlines
# Input and output objects. The general principle is like
# this:
#
# Parent Child
# ------ -----
# p2cwrite ---stdin---> p2cread
# c2pread <--stdout--- c2pwrite
# errread <--stderr--- errwrite
#
# On POSIX, the child objects are file descriptors. On
# Windows, these are Windows file handles. The parent objects
# are file descriptors on both platforms. The parent objects
# are -1 when not using PIPEs. The child objects are -1
# when not redirecting.
(p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite) = self._get_handles(stdin, stdout, stderr)
# We wrap OS handles *before* launching the child, otherwise a
# quickly terminating child could make our fds unwrappable
# (see #8458).
if mswindows:
if p2cwrite != -1:
p2cwrite = msvcrt.open_osfhandle(p2cwrite.Detach(), 0)
if c2pread != -1:
c2pread = msvcrt.open_osfhandle(c2pread.Detach(), 0)
if errread != -1:
errread = msvcrt.open_osfhandle(errread.Detach(), 0)
if p2cwrite != -1:
self.stdin = io.open(p2cwrite, 'wb', bufsize)
if self.universal_newlines:
self.stdin = io.TextIOWrapper(self.stdin, write_through=True)
if c2pread != -1:
self.stdout = io.open(c2pread, 'rb', bufsize)
if universal_newlines:
self.stdout = io.TextIOWrapper(self.stdout)
if errread != -1:
self.stderr = io.open(errread, 'rb', bufsize)
if universal_newlines:
self.stderr = io.TextIOWrapper(self.stderr)
try:
self._execute_child(args, executable, preexec_fn, close_fds,
pass_fds, cwd, env, universal_newlines,
startupinfo, creationflags, shell,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite,
restore_signals, start_new_session)
except:
# Cleanup if the child failed starting
for f in filter(None, [self.stdin, self.stdout, self.stderr]):
try:
f.close()
except EnvironmentError:
# Ignore EBADF or other errors
pass
raise
def _translate_newlines(self, data, encoding):
data = data.replace(b"\r\n", b"\n").replace(b"\r", b"\n")
return data.decode(encoding)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
if self.stdout:
self.stdout.close()
if self.stderr:
self.stderr.close()
if self.stdin:
self.stdin.close()
# Wait for the process to terminate, to avoid zombies.
self.wait()
def __del__(self, _maxsize=sys.maxsize, _active=_active):
# If __init__ hasn't had a chance to execute (e.g. if it
# was passed an undeclared keyword argument), we don't
# have a _child_created attribute at all.
if not getattr(self, '_child_created', False):
# We didn't get to successfully create a child process.
return
# In case the child hasn't been waited on, check if it's done.
self._internal_poll(_deadstate=_maxsize)
if self.returncode is None and _active is not None:
# Child is still running, keep us alive until we can wait on it.
_active.append(self)
def communicate(self, input=None):
"""Interact with process: Send data to stdin. Read data from
stdout and stderr, until end-of-file is reached. Wait for
process to terminate. The optional input argument should be a
string to be sent to the child process, or None, if no data
should be sent to the child.
communicate() returns a tuple (stdout, stderr)."""
# Optimization: If we are only using one pipe, or no pipe at
# all, using select() or threads is unnecessary.
if [self.stdin, self.stdout, self.stderr].count(None) >= 2:
stdout = None
stderr = None
if self.stdin:
if input:
try:
self.stdin.write(input)
except IOError as e:
if e.errno != errno.EPIPE and e.errno != errno.EINVAL:
raise
self.stdin.close()
elif self.stdout:
stdout = _eintr_retry_call(self.stdout.read)
self.stdout.close()
elif self.stderr:
stderr = _eintr_retry_call(self.stderr.read)
self.stderr.close()
self.wait()
return (stdout, stderr)
return self._communicate(input)
def poll(self):
return self._internal_poll()
if mswindows:
#
# Windows methods
#
def _get_handles(self, stdin, stdout, stderr):
"""Construct and return tuple with IO objects:
p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite
"""
if stdin is None and stdout is None and stderr is None:
return (-1, -1, -1, -1, -1, -1)
p2cread, p2cwrite = -1, -1
c2pread, c2pwrite = -1, -1
errread, errwrite = -1, -1
if stdin is None:
p2cread = _subprocess.GetStdHandle(_subprocess.STD_INPUT_HANDLE)
if p2cread is None:
p2cread, _ = _subprocess.CreatePipe(None, 0)
elif stdin == PIPE:
p2cread, p2cwrite = _subprocess.CreatePipe(None, 0)
elif isinstance(stdin, int):
p2cread = msvcrt.get_osfhandle(stdin)
else:
# Assuming file-like object
p2cread = msvcrt.get_osfhandle(stdin.fileno())
p2cread = self._make_inheritable(p2cread)
if stdout is None:
c2pwrite = _subprocess.GetStdHandle(_subprocess.STD_OUTPUT_HANDLE)
if c2pwrite is None:
_, c2pwrite = _subprocess.CreatePipe(None, 0)
elif stdout == PIPE:
c2pread, c2pwrite = _subprocess.CreatePipe(None, 0)
elif isinstance(stdout, int):
c2pwrite = msvcrt.get_osfhandle(stdout)
else:
# Assuming file-like object
c2pwrite = msvcrt.get_osfhandle(stdout.fileno())
c2pwrite = self._make_inheritable(c2pwrite)
if stderr is None:
errwrite = _subprocess.GetStdHandle(_subprocess.STD_ERROR_HANDLE)
if errwrite is None:
_, errwrite = _subprocess.CreatePipe(None, 0)
elif stderr == PIPE:
errread, errwrite = _subprocess.CreatePipe(None, 0)
elif stderr == STDOUT:
errwrite = c2pwrite
elif isinstance(stderr, int):
errwrite = msvcrt.get_osfhandle(stderr)
else:
# Assuming file-like object
errwrite = msvcrt.get_osfhandle(stderr.fileno())
errwrite = self._make_inheritable(errwrite)
return (p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite)
def _make_inheritable(self, handle):
"""Return a duplicate of handle, which is inheritable"""
return _subprocess.DuplicateHandle(_subprocess.GetCurrentProcess(),
handle, _subprocess.GetCurrentProcess(), 0, 1,
_subprocess.DUPLICATE_SAME_ACCESS)
def _find_w9xpopen(self):
"""Find and return absolut path to w9xpopen.exe"""
w9xpopen = os.path.join(
os.path.dirname(_subprocess.GetModuleFileName(0)),
"w9xpopen.exe")
if not os.path.exists(w9xpopen):
# Eeek - file-not-found - possibly an embedding
# situation - see if we can locate it in sys.exec_prefix
w9xpopen = os.path.join(os.path.dirname(sys.exec_prefix),
"w9xpopen.exe")
if not os.path.exists(w9xpopen):
raise RuntimeError("Cannot locate w9xpopen.exe, which is "
"needed for Popen to work with your "
"shell or platform.")
return w9xpopen
def _execute_child(self, args, executable, preexec_fn, close_fds,
pass_fds, cwd, env, universal_newlines,
startupinfo, creationflags, shell,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite,
unused_restore_signals, unused_start_new_session):
"""Execute program (MS Windows version)"""
assert not pass_fds, "pass_fds not supported on Windows."
if not isinstance(args, str):
args = list2cmdline(args)
# Process startup details
if startupinfo is None:
startupinfo = STARTUPINFO()
if -1 not in (p2cread, c2pwrite, errwrite):
startupinfo.dwFlags |= _subprocess.STARTF_USESTDHANDLES
startupinfo.hStdInput = p2cread
startupinfo.hStdOutput = c2pwrite
startupinfo.hStdError = errwrite
if shell:
startupinfo.dwFlags |= _subprocess.STARTF_USESHOWWINDOW
startupinfo.wShowWindow = _subprocess.SW_HIDE
comspec = os.environ.get("COMSPEC", "cmd.exe")
args = '{} /c "{}"'.format (comspec, args)
if (_subprocess.GetVersion() >= 0x80000000 or
os.path.basename(comspec).lower() == "command.com"):
# Win9x, or using command.com on NT. We need to
# use the w9xpopen intermediate program. For more
# information, see KB Q150956
# (http://web.archive.org/web/20011105084002/http://support.microsoft.com/support/kb/articles/Q150/9/56.asp)
w9xpopen = self._find_w9xpopen()
args = '"%s" %s' % (w9xpopen, args)
# Not passing CREATE_NEW_CONSOLE has been known to
# cause random failures on win9x. Specifically a
# dialog: "Your program accessed mem currently in
# use at xxx" and a hopeful warning about the
# stability of your system. Cost is Ctrl+C won't
# kill children.
creationflags |= _subprocess.CREATE_NEW_CONSOLE
# Start the process
try:
hp, ht, pid, tid = _subprocess.CreateProcess(executable, args,
# no special security
None, None,
int(not close_fds),
creationflags,
env,
cwd,
startupinfo)
except pywintypes.error as e:
# Translate pywintypes.error to WindowsError, which is
# a subclass of OSError. FIXME: We should really
# translate errno using _sys_errlist (or similar), but
# how can this be done from Python?
raise WindowsError(*e.args)
finally:
# Child is launched. Close the parent's copy of those pipe
# handles that only the child should have open. You need
# to make sure that no handles to the write end of the
# output pipe are maintained in this process or else the
# pipe will not close when the child process exits and the
# ReadFile will hang.
if p2cread != -1:
p2cread.Close()
if c2pwrite != -1:
c2pwrite.Close()
if errwrite != -1:
errwrite.Close()
# Retain the process handle, but close the thread handle
self._child_created = True
self._handle = hp
self.pid = pid
ht.Close()
def _internal_poll(self, _deadstate=None,
_WaitForSingleObject=_subprocess.WaitForSingleObject,
_WAIT_OBJECT_0=_subprocess.WAIT_OBJECT_0,
_GetExitCodeProcess=_subprocess.GetExitCodeProcess):
"""Check if child process has terminated. Returns returncode
attribute.
This method is called by __del__, so it can only refer to objects
in its local scope.
"""
if self.returncode is None:
if _WaitForSingleObject(self._handle, 0) == _WAIT_OBJECT_0:
self.returncode = _GetExitCodeProcess(self._handle)
return self.returncode
def wait(self):
"""Wait for child process to terminate. Returns returncode
attribute."""
if self.returncode is None:
_subprocess.WaitForSingleObject(self._handle,
_subprocess.INFINITE)
self.returncode = _subprocess.GetExitCodeProcess(self._handle)
return self.returncode
def _readerthread(self, fh, buffer):
buffer.append(fh.read())
fh.close()
def _communicate(self, input):
stdout = None # Return
stderr = None # Return
if self.stdout:
stdout = []
stdout_thread = threading.Thread(target=self._readerthread,
args=(self.stdout, stdout))
stdout_thread.daemon = True
stdout_thread.start()
if self.stderr:
stderr = []
stderr_thread = threading.Thread(target=self._readerthread,
args=(self.stderr, stderr))
stderr_thread.daemon = True
stderr_thread.start()
if self.stdin:
if input is not None:
try:
self.stdin.write(input)
except IOError as e:
if e.errno != errno.EPIPE:
raise
self.stdin.close()
if self.stdout:
stdout_thread.join()
if self.stderr:
stderr_thread.join()
# All data exchanged. Translate lists into strings.
if stdout is not None:
stdout = stdout[0]
if stderr is not None:
stderr = stderr[0]
self.wait()
return (stdout, stderr)
def send_signal(self, sig):
"""Send a signal to the process
"""
if sig == signal.SIGTERM:
self.terminate()
elif sig == signal.CTRL_C_EVENT:
os.kill(self.pid, signal.CTRL_C_EVENT)
elif sig == signal.CTRL_BREAK_EVENT:
os.kill(self.pid, signal.CTRL_BREAK_EVENT)
else:
raise ValueError("Unsupported signal: {}".format(sig))
def terminate(self):
"""Terminates the process
"""
_subprocess.TerminateProcess(self._handle, 1)
kill = terminate
else:
#
# POSIX methods
#
def _get_handles(self, stdin, stdout, stderr):
"""Construct and return tuple with IO objects:
p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite
"""
p2cread, p2cwrite = -1, -1
c2pread, c2pwrite = -1, -1
errread, errwrite = -1, -1
if stdin is None:
pass
elif stdin == PIPE:
p2cread, p2cwrite = _create_pipe()
elif isinstance(stdin, int):
p2cread = stdin
else:
# Assuming file-like object
p2cread = stdin.fileno()
if stdout is None:
pass
elif stdout == PIPE:
c2pread, c2pwrite = _create_pipe()
elif isinstance(stdout, int):
c2pwrite = stdout
else:
# Assuming file-like object
c2pwrite = stdout.fileno()
if stderr is None:
pass
elif stderr == PIPE:
errread, errwrite = _create_pipe()
elif stderr == STDOUT:
errwrite = c2pwrite
elif isinstance(stderr, int):
errwrite = stderr
else:
# Assuming file-like object
errwrite = stderr.fileno()
return (p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite)
def _close_fds(self, fds_to_keep):
start_fd = 3
for fd in sorted(fds_to_keep):
if fd >= start_fd:
os.closerange(start_fd, fd)
start_fd = fd + 1
if start_fd <= MAXFD:
os.closerange(start_fd, MAXFD)
def _execute_child(self, args, executable, preexec_fn, close_fds,
pass_fds, cwd, env, universal_newlines,
startupinfo, creationflags, shell,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite,
restore_signals, start_new_session):
"""Execute program (POSIX version)"""
if isinstance(args, str):
args = [args]
else:
args = list(args)
if shell:
args = ["/system/bin/sh", "-c"] + args # Android Hack 5-Apr-2012 ... there should be a more elegant approach.
if executable:
args[0] = executable
if executable is None:
executable = args[0]
# For transferring possible exec failure from child to parent.
# Data format: "exception name:hex errno:description"
# Pickle is not used; it is complex and involves memory allocation.
errpipe_read, errpipe_write = _create_pipe()
try:
try:
if _posixsubprocess:
# We must avoid complex work that could involve
# malloc or free in the child process to avoid
# potential deadlocks, thus we do all this here.
# and pass it to fork_exec()
if env is not None:
env_list = [os.fsencode(k) + b'=' + os.fsencode(v)
for k, v in env.items()]
else:
env_list = None # Use execv instead of execve.
executable = os.fsencode(executable)
if os.path.dirname(executable):
executable_list = (executable,)
else:
# This matches the behavior of os._execvpe().
executable_list = tuple(
os.path.join(os.fsencode(dir), executable)
for dir in os.get_exec_path(env))
fds_to_keep = set(pass_fds)
fds_to_keep.add(errpipe_write)
self.pid = _posixsubprocess.fork_exec(
args, executable_list,
close_fds, sorted(fds_to_keep), cwd, env_list,
p2cread, p2cwrite, c2pread, c2pwrite,
errread, errwrite,
errpipe_read, errpipe_write,
restore_signals, start_new_session, preexec_fn)
else:
# Pure Python implementation: It is not thread safe.
# This implementation may deadlock in the child if your
# parent process has any other threads running.
gc_was_enabled = gc.isenabled()
# Disable gc to avoid bug where gc -> file_dealloc ->
# write to stderr -> hang. See issue1336
gc.disable()
try:
self.pid = os.fork()
except:
if gc_was_enabled:
gc.enable()
raise
self._child_created = True
if self.pid == 0:
# Child
try:
# Close parent's pipe ends
if p2cwrite != -1:
os.close(p2cwrite)
if c2pread != -1:
os.close(c2pread)
if errread != -1:
os.close(errread)
os.close(errpipe_read)
# When duping fds, if there arises a situation
# where one of the fds is either 0, 1 or 2, it
# is possible that it is overwritten (#12607).
if c2pwrite == 0:
c2pwrite = os.dup(c2pwrite)
if errwrite == 0 or errwrite == 1:
errwrite = os.dup(errwrite)
# Dup fds for child
def _dup2(a, b):
# dup2() removes the CLOEXEC flag but
# we must do it ourselves if dup2()
# would be a no-op (issue #10806).
if a == b:
_set_cloexec(a, False)
elif a != -1:
os.dup2(a, b)
_dup2(p2cread, 0)
_dup2(c2pwrite, 1)
_dup2(errwrite, 2)
# Close pipe fds. Make sure we don't close the
# same fd more than once, or standard fds.
closed = set()
for fd in [p2cread, c2pwrite, errwrite]:
if fd > 2 and fd not in closed:
os.close(fd)
closed.add(fd)
# Close all other fds, if asked for
if close_fds:
fds_to_keep = set(pass_fds)
fds_to_keep.add(errpipe_write)
self._close_fds(fds_to_keep)
if cwd is not None:
os.chdir(cwd)
# This is a copy of Python/pythonrun.c
# _Py_RestoreSignals(). If that were exposed
# as a sys._py_restoresignals func it would be
# better.. but this pure python implementation
# isn't likely to be used much anymore.
if restore_signals:
signals = ('SIGPIPE', 'SIGXFZ', 'SIGXFSZ')
for sig in signals:
if hasattr(signal, sig):
signal.signal(getattr(signal, sig),
signal.SIG_DFL)
if start_new_session and hasattr(os, 'setsid'):
os.setsid()
if preexec_fn:
preexec_fn()
if env is None:
os.execvp(executable, args)
else:
os.execvpe(executable, args, env)
except:
try:
exc_type, exc_value = sys.exc_info()[:2]
if isinstance(exc_value, OSError):
errno_num = exc_value.errno
else:
errno_num = 0
message = '%s:%x:%s' % (exc_type.__name__,
errno_num, exc_value)
message = message.encode(errors="surrogatepass")
os.write(errpipe_write, message)
except Exception:
# We MUST not allow anything odd happening
# above to prevent us from exiting below.
pass
# This exitcode won't be reported to applications
# so it really doesn't matter what we return.
os._exit(255)
# Parent
if gc_was_enabled:
gc.enable()
finally:
# be sure the FD is closed no matter what
os.close(errpipe_write)
if p2cread != -1 and p2cwrite != -1:
os.close(p2cread)
if c2pwrite != -1 and c2pread != -1:
os.close(c2pwrite)
if errwrite != -1 and errread != -1:
os.close(errwrite)
# Wait for exec to fail or succeed; possibly raising an
# exception (limited in size)
data = bytearray()
while True:
part = _eintr_retry_call(os.read, errpipe_read, 50000)
data += part
if not part or len(data) > 50000:
break
finally:
# be sure the FD is closed no matter what
os.close(errpipe_read)
if data:
try:
_eintr_retry_call(os.waitpid, self.pid, 0)
except OSError as e:
if e.errno != errno.ECHILD:
raise
try:
exception_name, hex_errno, err_msg = data.split(b':', 2)
except ValueError:
print('Bad exception data:', repr(data))
exception_name = b'RuntimeError'
hex_errno = b'0'
err_msg = b'Unknown'
child_exception_type = getattr(
builtins, exception_name.decode('ascii'),
RuntimeError)
for fd in (p2cwrite, c2pread, errread):
if fd != -1:
os.close(fd)
err_msg = err_msg.decode(errors="surrogatepass")
if issubclass(child_exception_type, OSError) and hex_errno:
errno_num = int(hex_errno, 16)
if errno_num != 0:
err_msg = os.strerror(errno_num)
if errno_num == errno.ENOENT:
err_msg += ': ' + repr(args[0])
raise child_exception_type(errno_num, err_msg)
raise child_exception_type(err_msg)
def _handle_exitstatus(self, sts, _WIFSIGNALED=os.WIFSIGNALED,
_WTERMSIG=os.WTERMSIG, _WIFEXITED=os.WIFEXITED,
_WEXITSTATUS=os.WEXITSTATUS):
# This method is called (indirectly) by __del__, so it cannot
# refer to anything outside of its local scope."""
if _WIFSIGNALED(sts):
self.returncode = -_WTERMSIG(sts)
elif _WIFEXITED(sts):
self.returncode = _WEXITSTATUS(sts)
else:
# Should never happen
raise RuntimeError("Unknown child exit status!")
def _internal_poll(self, _deadstate=None, _waitpid=os.waitpid,
_WNOHANG=os.WNOHANG, _os_error=os.error):
"""Check if child process has terminated. Returns returncode
attribute.
This method is called by __del__, so it cannot reference anything
outside of the local scope (nor can any methods it calls).
"""
if self.returncode is None:
try:
pid, sts = _waitpid(self.pid, _WNOHANG)
if pid == self.pid:
self._handle_exitstatus(sts)
except _os_error:
if _deadstate is not None:
self.returncode = _deadstate
return self.returncode
def wait(self):
"""Wait for child process to terminate. Returns returncode
attribute."""
if self.returncode is None:
try:
pid, sts = _eintr_retry_call(os.waitpid, self.pid, 0)
except OSError as e:
if e.errno != errno.ECHILD:
raise
# This happens if SIGCLD is set to be ignored or waiting
# for child processes has otherwise been disabled for our
# process. This child is dead, we can't get the status.
sts = 0
self._handle_exitstatus(sts)
return self.returncode
def _communicate(self, input):
if self.stdin:
# Flush stdio buffer. This might block, if the user has
# been writing to .stdin in an uncontrolled fashion.
self.stdin.flush()
if not input:
self.stdin.close()
if _has_poll:
stdout, stderr = self._communicate_with_poll(input)
else:
stdout, stderr = self._communicate_with_select(input)
# All data exchanged. Translate lists into strings.
if stdout is not None:
stdout = b''.join(stdout)
if stderr is not None:
stderr = b''.join(stderr)
# Translate newlines, if requested.
# This also turns bytes into strings.
if self.universal_newlines:
if stdout is not None:
stdout = self._translate_newlines(stdout,
self.stdout.encoding)
if stderr is not None:
stderr = self._translate_newlines(stderr,
self.stderr.encoding)
self.wait()
return (stdout, stderr)
def _communicate_with_poll(self, input):
stdout = None # Return
stderr = None # Return
fd2file = {}
fd2output = {}
poller = select.poll()
def register_and_append(file_obj, eventmask):
poller.register(file_obj.fileno(), eventmask)
fd2file[file_obj.fileno()] = file_obj
def close_unregister_and_remove(fd):
poller.unregister(fd)
fd2file[fd].close()
fd2file.pop(fd)
if self.stdin and input:
register_and_append(self.stdin, select.POLLOUT)
select_POLLIN_POLLPRI = select.POLLIN | select.POLLPRI
if self.stdout:
register_and_append(self.stdout, select_POLLIN_POLLPRI)
fd2output[self.stdout.fileno()] = stdout = []
if self.stderr:
register_and_append(self.stderr, select_POLLIN_POLLPRI)
fd2output[self.stderr.fileno()] = stderr = []
input_offset = 0
while fd2file:
try:
ready = poller.poll()
except select.error as e:
if e.args[0] == errno.EINTR:
continue
raise
# XXX Rewrite these to use non-blocking I/O on the
# file objects; they are no longer using C stdio!
for fd, mode in ready:
if mode & select.POLLOUT:
chunk = input[input_offset : input_offset + _PIPE_BUF]
try:
input_offset += os.write(fd, chunk)
except OSError as e:
if e.errno == errno.EPIPE:
close_unregister_and_remove(fd)
else:
raise
else:
if input_offset >= len(input):
close_unregister_and_remove(fd)
elif mode & select_POLLIN_POLLPRI:
data = os.read(fd, 4096)
if not data:
close_unregister_and_remove(fd)
fd2output[fd].append(data)
else:
# Ignore hang up or errors.
close_unregister_and_remove(fd)
return (stdout, stderr)
def _communicate_with_select(self, input):
read_set = []
write_set = []
stdout = None # Return
stderr = None # Return
if self.stdin and input:
write_set.append(self.stdin)
if self.stdout:
read_set.append(self.stdout)
stdout = []
if self.stderr:
read_set.append(self.stderr)
stderr = []
input_offset = 0
while read_set or write_set:
try:
rlist, wlist, xlist = select.select(read_set, write_set, [])
except select.error as e:
if e.args[0] == errno.EINTR:
continue
raise
# XXX Rewrite these to use non-blocking I/O on the
# file objects; they are no longer using C stdio!
if self.stdin in wlist:
chunk = input[input_offset : input_offset + _PIPE_BUF]
try:
bytes_written = os.write(self.stdin.fileno(), chunk)
except OSError as e:
if e.errno == errno.EPIPE:
self.stdin.close()
write_set.remove(self.stdin)
else:
raise
else:
input_offset += bytes_written
if input_offset >= len(input):
self.stdin.close()
write_set.remove(self.stdin)
if self.stdout in rlist:
data = os.read(self.stdout.fileno(), 1024)
if not data:
self.stdout.close()
read_set.remove(self.stdout)
stdout.append(data)
if self.stderr in rlist:
data = os.read(self.stderr.fileno(), 1024)
if not data:
self.stderr.close()
read_set.remove(self.stderr)
stderr.append(data)
return (stdout, stderr)
def send_signal(self, sig):
"""Send a signal to the process
"""
os.kill(self.pid, sig)
def terminate(self):
"""Terminate the process with SIGTERM
"""
self.send_signal(signal.SIGTERM)
def kill(self):
"""Kill the process with SIGKILL
"""
self.send_signal(signal.SIGKILL)
def _demo_posix():
#
# Example 1: Simple redirection: Get process list
#
plist = Popen(["ps"], stdout=PIPE).communicate()[0]
print("Process list:")
print(plist)
#
# Example 2: Change uid before executing child
#
if os.getuid() == 0:
p = Popen(["id"], preexec_fn=lambda: os.setuid(100))
p.wait()
#
# Example 3: Connecting several subprocesses
#
print("Looking for 'hda'...")
p1 = Popen(["dmesg"], stdout=PIPE)
p2 = Popen(["grep", "hda"], stdin=p1.stdout, stdout=PIPE)
print(repr(p2.communicate()[0]))
#
# Example 4: Catch execution error
#
print()
print("Trying a weird file...")
try:
print(Popen(["/this/path/does/not/exist"]).communicate())
except OSError as e:
if e.errno == errno.ENOENT:
print("The file didn't exist. I thought so...")
print("Child traceback:")
print(e.child_traceback)
else:
print("Error", e.errno)
else:
print("Gosh. No error.", file=sys.stderr)
def _demo_windows():
#
# Example 1: Connecting several subprocesses
#
print("Looking for 'PROMPT' in set output...")
p1 = Popen("set", stdout=PIPE, shell=True)
p2 = Popen('find "PROMPT"', stdin=p1.stdout, stdout=PIPE)
print(repr(p2.communicate()[0]))
#
# Example 2: Simple execution of program
#
print("Executing calc...")
p = Popen("calc")
p.wait()
if __name__ == "__main__":
if mswindows:
_demo_windows()
else:
_demo_posix()
|
vipins/ccccms
|
refs/heads/master
|
env/Lib/site-packages/setuptools/script template.py
|
486
|
# EASY-INSTALL-SCRIPT: %(spec)r,%(script_name)r
__requires__ = """%(spec)r"""
import pkg_resources
pkg_resources.run_script("""%(spec)r""", """%(script_name)r""")
|
jabbalaci/jabbapylib
|
refs/heads/master
|
tests/text/test_utils.py
|
1
|
# -*- coding: utf-8 -*-
from jabbapylib.text import utils
def test_inc_string():
assert utils.inc_string('a') == 'b'
assert utils.inc_string('f') == 'g'
assert utils.inc_string('z') == 'aa'
assert utils.inc_string('zz') == 'aaa'
assert utils.inc_string('af') == 'ag'
assert utils.inc_string('ajhfsdhgf') == 'ajhfsdhgg'
assert utils.inc_string('ajhfsdhgz') == 'ajhfsdhha'
|
dfalt974/SickRage
|
refs/heads/master
|
lib/future/types/newrange.py
|
56
|
"""
Nearly identical to xrange.py, by Dan Crosta, from
https://github.com/dcrosta/xrange.git
This is included here in the ``future`` package rather than pointed to as
a dependency because there is no package for ``xrange`` on PyPI. It is
also tweaked to appear like a regular Python 3 ``range`` object rather
than a Python 2 xrange.
From Dan Crosta's README:
"A pure-Python implementation of Python 2.7's xrange built-in, with
some features backported from the Python 3.x range built-in (which
replaced xrange) in that version."
Read more at
https://late.am/post/2012/06/18/what-the-heck-is-an-xrange
"""
from __future__ import absolute_import
from collections import Sequence, Iterator
from itertools import islice
from future.backports.misc import count # with step parameter on Py2.6
# For backward compatibility with python-future versions < 0.14.4:
_count = count
class newrange(Sequence):
"""
Pure-Python backport of Python 3's range object. See `the CPython
documentation for details:
<http://docs.python.org/py3k/library/functions.html#range>`_
"""
def __init__(self, *args):
if len(args) == 1:
start, stop, step = 0, args[0], 1
elif len(args) == 2:
start, stop, step = args[0], args[1], 1
elif len(args) == 3:
start, stop, step = args
else:
raise TypeError('range() requires 1-3 int arguments')
try:
start, stop, step = int(start), int(stop), int(step)
except ValueError:
raise TypeError('an integer is required')
if step == 0:
raise ValueError('range() arg 3 must not be zero')
elif step < 0:
stop = min(stop, start)
else:
stop = max(stop, start)
self._start = start
self._stop = stop
self._step = step
self._len = (stop - start) // step + bool((stop - start) % step)
@property
def start(self):
return self._start
@property
def stop(self):
return self._stop
@property
def step(self):
return self._step
def __repr__(self):
if self._step == 1:
return 'range(%d, %d)' % (self._start, self._stop)
return 'range(%d, %d, %d)' % (self._start, self._stop, self._step)
def __eq__(self, other):
return (isinstance(other, newrange) and
(self._len == 0 == other._len or
(self._start, self._step, self._len) ==
(other._start, other._step, self._len)))
def __len__(self):
return self._len
def index(self, value):
"""Return the 0-based position of integer `value` in
the sequence this range represents."""
diff = value - self._start
quotient, remainder = divmod(diff, self._step)
if remainder == 0 and 0 <= quotient < self._len:
return abs(quotient)
raise ValueError('%r is not in range' % value)
def count(self, value):
"""Return the number of ocurrences of integer `value`
in the sequence this range represents."""
# a value can occur exactly zero or one times
return int(value in self)
def __contains__(self, value):
"""Return ``True`` if the integer `value` occurs in
the sequence this range represents."""
try:
self.index(value)
return True
except ValueError:
return False
def __reversed__(self):
return iter(self[::-1])
def __getitem__(self, index):
"""Return the element at position ``index`` in the sequence
this range represents, or raise :class:`IndexError` if the
position is out of range."""
if isinstance(index, slice):
return self.__getitem_slice(index)
if index < 0:
# negative indexes access from the end
index = self._len + index
if index < 0 or index >= self._len:
raise IndexError('range object index out of range')
return self._start + index * self._step
def __getitem_slice(self, slce):
"""Return a range which represents the requested slce
of the sequence represented by this range.
"""
scaled_indices = (self._step * n for n in slce.indices(self._len))
start_offset, stop_offset, new_step = scaled_indices
return newrange(self._start + start_offset,
self._start + stop_offset,
new_step)
def __iter__(self):
"""Return an iterator which enumerates the elements of the
sequence this range represents."""
return range_iterator(self)
class range_iterator(Iterator):
"""An iterator for a :class:`range`.
"""
def __init__(self, range_):
self._stepper = islice(count(range_.start, range_.step), len(range_))
def __iter__(self):
return self
def next(self):
return next(self._stepper)
__all__ = ['newrange']
|
pkexcellent/luigi
|
refs/heads/master
|
test/contrib/_webhdfs_test.py
|
22
|
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
import os
import posixpath
import time
from helpers import unittest
from luigi.contrib import webhdfs
class TestWebHdfsTarget(unittest.TestCase):
'''
This test requires a running Hadoop cluster with WebHdfs enabled
This test requires the luigi.cfg file to have a `hdfs` section
with the namenode_host, namenode_port and user settings.
'''
def setUp(self):
self.testDir = "/tmp/luigi-test".format()
self.path = os.path.join(self.testDir, 'out.txt')
self.client = webhdfs.WebHdfsClient()
self.target = webhdfs.WebHdfsTarget(self.path)
def tearDown(self):
if self.client.exists(self.testDir):
self.client.remove(self.testDir, recursive=True)
def test_write(self):
self.assertFalse(self.client.exists(self.path))
output = self.target.open('w')
output.write('this is line 1\n')
output.write('this is line #2\n')
output.close()
self.assertTrue(self.client.exists(self.path))
def test_read(self):
self.test_write()
input_ = self.target.open('r')
all_test = 'this is line 1\nthis is line #2\n'
self.assertEqual(all_test, input_.read())
input_.close()
def test_read_lines(self):
self.test_write()
input_ = self.target.open('r')
lines = list(input_.readlines())
self.assertEqual(lines[0], 'this is line 1')
self.assertEqual(lines[1], 'this is line #2')
input_.close()
|
tashaxe/Red-DiscordBot
|
refs/heads/develop
|
lib/websockets/test_client_server.py
|
8
|
import asyncio
import logging
import os
import ssl
import unittest
import unittest.mock
from .client import *
from .exceptions import ConnectionClosed, InvalidHandshake
from .http import USER_AGENT, read_response
from .server import *
# Avoid displaying stack traces at the ERROR logging level.
logging.basicConfig(level=logging.CRITICAL)
testcert = os.path.join(os.path.dirname(__file__), 'testcert.pem')
@asyncio.coroutine
def handler(ws, path):
if path == '/attributes':
yield from ws.send(repr((ws.host, ws.port, ws.secure)))
elif path == '/headers':
yield from ws.send(str(ws.request_headers))
yield from ws.send(str(ws.response_headers))
elif path == '/raw_headers':
yield from ws.send(repr(ws.raw_request_headers))
yield from ws.send(repr(ws.raw_response_headers))
elif path == '/subprotocol':
yield from ws.send(repr(ws.subprotocol))
else:
yield from ws.send((yield from ws.recv()))
class ClientServerTests(unittest.TestCase):
secure = False
def setUp(self):
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
def tearDown(self):
self.loop.close()
def run_loop_once(self):
# Process callbacks scheduled with call_soon by appending a callback
# to stop the event loop then running it until it hits that callback.
self.loop.call_soon(self.loop.stop)
self.loop.run_forever()
def start_server(self, **kwds):
server = serve(handler, 'localhost', 8642, **kwds)
self.server = self.loop.run_until_complete(server)
def start_client(self, path='', **kwds):
client = connect('ws://localhost:8642/' + path, **kwds)
self.client = self.loop.run_until_complete(client)
def stop_client(self):
try:
self.loop.run_until_complete(
asyncio.wait_for(self.client.worker_task, timeout=1))
except asyncio.TimeoutError: # pragma: no cover
self.fail("Client failed to stop")
def stop_server(self):
self.server.close()
try:
self.loop.run_until_complete(
asyncio.wait_for(self.server.wait_closed(), timeout=1))
except asyncio.TimeoutError: # pragma: no cover
self.fail("Server failed to stop")
def test_basic(self):
self.start_server()
self.start_client()
self.loop.run_until_complete(self.client.send("Hello!"))
reply = self.loop.run_until_complete(self.client.recv())
self.assertEqual(reply, "Hello!")
self.stop_client()
self.stop_server()
def test_server_close_while_client_connected(self):
self.start_server()
self.start_client()
self.stop_server()
def test_explicit_event_loop(self):
self.start_server(loop=self.loop)
self.start_client(loop=self.loop)
self.loop.run_until_complete(self.client.send("Hello!"))
reply = self.loop.run_until_complete(self.client.recv())
self.assertEqual(reply, "Hello!")
self.stop_client()
self.stop_server()
def test_protocol_attributes(self):
self.start_server()
self.start_client('attributes')
expected_attrs = ('localhost', 8642, self.secure)
client_attrs = (self.client.host, self.client.port, self.client.secure)
self.assertEqual(client_attrs, expected_attrs)
server_attrs = self.loop.run_until_complete(self.client.recv())
self.assertEqual(server_attrs, repr(expected_attrs))
self.stop_client()
self.stop_server()
def test_protocol_headers(self):
self.start_server()
self.start_client('headers')
client_req = self.client.request_headers
client_resp = self.client.response_headers
self.assertEqual(client_req['User-Agent'], USER_AGENT)
self.assertEqual(client_resp['Server'], USER_AGENT)
server_req = self.loop.run_until_complete(self.client.recv())
server_resp = self.loop.run_until_complete(self.client.recv())
self.assertEqual(server_req, str(client_req))
self.assertEqual(server_resp, str(client_resp))
self.stop_client()
self.stop_server()
def test_protocol_raw_headers(self):
self.start_server()
self.start_client('raw_headers')
client_req = self.client.raw_request_headers
client_resp = self.client.raw_response_headers
self.assertEqual(dict(client_req)['User-Agent'], USER_AGENT)
self.assertEqual(dict(client_resp)['Server'], USER_AGENT)
server_req = self.loop.run_until_complete(self.client.recv())
server_resp = self.loop.run_until_complete(self.client.recv())
self.assertEqual(server_req, repr(client_req))
self.assertEqual(server_resp, repr(client_resp))
self.stop_client()
self.stop_server()
def test_protocol_custom_request_headers_dict(self):
self.start_server()
self.start_client('raw_headers', extra_headers={'X-Spam': 'Eggs'})
req_headers = self.loop.run_until_complete(self.client.recv())
self.loop.run_until_complete(self.client.recv())
self.assertIn("('X-Spam', 'Eggs')", req_headers)
self.stop_client()
self.stop_server()
def test_protocol_custom_request_headers_list(self):
self.start_server()
self.start_client('raw_headers', extra_headers=[('X-Spam', 'Eggs')])
req_headers = self.loop.run_until_complete(self.client.recv())
self.loop.run_until_complete(self.client.recv())
self.assertIn("('X-Spam', 'Eggs')", req_headers)
self.stop_client()
self.stop_server()
def test_protocol_custom_response_headers_callable_dict(self):
self.start_server(extra_headers=lambda p, r: {'X-Spam': 'Eggs'})
self.start_client('raw_headers')
self.loop.run_until_complete(self.client.recv())
resp_headers = self.loop.run_until_complete(self.client.recv())
self.assertIn("('X-Spam', 'Eggs')", resp_headers)
self.stop_client()
self.stop_server()
def test_protocol_custom_response_headers_callable_list(self):
self.start_server(extra_headers=lambda p, r: [('X-Spam', 'Eggs')])
self.start_client('raw_headers')
self.loop.run_until_complete(self.client.recv())
resp_headers = self.loop.run_until_complete(self.client.recv())
self.assertIn("('X-Spam', 'Eggs')", resp_headers)
self.stop_client()
self.stop_server()
def test_protocol_custom_response_headers_dict(self):
self.start_server(extra_headers={'X-Spam': 'Eggs'})
self.start_client('raw_headers')
self.loop.run_until_complete(self.client.recv())
resp_headers = self.loop.run_until_complete(self.client.recv())
self.assertIn("('X-Spam', 'Eggs')", resp_headers)
self.stop_client()
self.stop_server()
def test_protocol_custom_response_headers_list(self):
self.start_server(extra_headers=[('X-Spam', 'Eggs')])
self.start_client('raw_headers')
self.loop.run_until_complete(self.client.recv())
resp_headers = self.loop.run_until_complete(self.client.recv())
self.assertIn("('X-Spam', 'Eggs')", resp_headers)
self.stop_client()
self.stop_server()
def test_no_subprotocol(self):
self.start_server()
self.start_client('subprotocol')
server_subprotocol = self.loop.run_until_complete(self.client.recv())
self.assertEqual(server_subprotocol, repr(None))
self.assertEqual(self.client.subprotocol, None)
self.stop_client()
self.stop_server()
def test_subprotocol_found(self):
self.start_server(subprotocols=['superchat', 'chat'])
self.start_client('subprotocol', subprotocols=['otherchat', 'chat'])
server_subprotocol = self.loop.run_until_complete(self.client.recv())
self.assertEqual(server_subprotocol, repr('chat'))
self.assertEqual(self.client.subprotocol, 'chat')
self.stop_client()
self.stop_server()
def test_subprotocol_not_found(self):
self.start_server(subprotocols=['superchat'])
self.start_client('subprotocol', subprotocols=['otherchat'])
server_subprotocol = self.loop.run_until_complete(self.client.recv())
self.assertEqual(server_subprotocol, repr(None))
self.assertEqual(self.client.subprotocol, None)
self.stop_client()
self.stop_server()
def test_subprotocol_not_offered(self):
self.start_server()
self.start_client('subprotocol', subprotocols=['otherchat', 'chat'])
server_subprotocol = self.loop.run_until_complete(self.client.recv())
self.assertEqual(server_subprotocol, repr(None))
self.assertEqual(self.client.subprotocol, None)
self.stop_client()
self.stop_server()
def test_subprotocol_not_requested(self):
self.start_server(subprotocols=['superchat', 'chat'])
self.start_client('subprotocol')
server_subprotocol = self.loop.run_until_complete(self.client.recv())
self.assertEqual(server_subprotocol, repr(None))
self.assertEqual(self.client.subprotocol, None)
self.stop_client()
self.stop_server()
@unittest.mock.patch.object(WebSocketServerProtocol, 'select_subprotocol')
def test_subprotocol_error(self, _select_subprotocol):
_select_subprotocol.return_value = 'superchat'
self.start_server(subprotocols=['superchat'])
with self.assertRaises(InvalidHandshake):
self.start_client('subprotocol', subprotocols=['otherchat'])
self.run_loop_once()
self.stop_server()
@unittest.mock.patch('websockets.server.read_request')
def test_server_receives_malformed_request(self, _read_request):
_read_request.side_effect = ValueError("read_request failed")
self.start_server()
with self.assertRaises(InvalidHandshake):
self.start_client()
self.stop_server()
@unittest.mock.patch('websockets.client.read_response')
def test_client_receives_malformed_response(self, _read_response):
_read_response.side_effect = ValueError("read_response failed")
self.start_server()
with self.assertRaises(InvalidHandshake):
self.start_client()
self.run_loop_once()
self.stop_server()
@unittest.mock.patch('websockets.client.build_request')
def test_client_sends_invalid_handshake_request(self, _build_request):
def wrong_build_request(set_header):
return '42'
_build_request.side_effect = wrong_build_request
self.start_server()
with self.assertRaises(InvalidHandshake):
self.start_client()
self.stop_server()
@unittest.mock.patch('websockets.server.build_response')
def test_server_sends_invalid_handshake_response(self, _build_response):
def wrong_build_response(set_header, key):
return build_response(set_header, '42')
_build_response.side_effect = wrong_build_response
self.start_server()
with self.assertRaises(InvalidHandshake):
self.start_client()
self.stop_server()
@unittest.mock.patch('websockets.client.read_response')
def test_server_does_not_switch_protocols(self, _read_response):
@asyncio.coroutine
def wrong_read_response(stream):
code, headers = yield from read_response(stream)
return 400, headers
_read_response.side_effect = wrong_read_response
self.start_server()
with self.assertRaises(InvalidHandshake):
self.start_client()
self.run_loop_once()
self.stop_server()
@unittest.mock.patch('websockets.server.WebSocketServerProtocol.send')
def test_server_handler_crashes(self, send):
send.side_effect = ValueError("send failed")
self.start_server()
self.start_client()
self.loop.run_until_complete(self.client.send("Hello!"))
with self.assertRaises(ConnectionClosed):
self.loop.run_until_complete(self.client.recv())
self.stop_client()
self.stop_server()
# Connection ends with an unexpected error.
self.assertEqual(self.client.close_code, 1011)
@unittest.mock.patch('websockets.server.WebSocketServerProtocol.close')
def test_server_close_crashes(self, close):
close.side_effect = ValueError("close failed")
self.start_server()
self.start_client()
self.loop.run_until_complete(self.client.send("Hello!"))
reply = self.loop.run_until_complete(self.client.recv())
self.assertEqual(reply, "Hello!")
self.stop_client()
self.stop_server()
# Connection ends with an abnormal closure.
self.assertEqual(self.client.close_code, 1006)
@unittest.mock.patch.object(WebSocketClientProtocol, 'handshake')
def test_client_closes_connection_before_handshake(self, handshake):
self.start_server()
self.start_client()
# We have mocked the handshake() method to prevent the client from
# performing the opening handshake. Force it to close the connection.
self.loop.run_until_complete(self.client.close_connection(force=True))
self.stop_client()
# The server should stop properly anyway. It used to hang because the
# worker handling the connection was waiting for the opening handshake.
self.stop_server()
@unittest.mock.patch('websockets.server.read_request')
def test_server_shuts_down_during_opening_handshake(self, _read_request):
_read_request.side_effect = asyncio.CancelledError
self.start_server()
self.server.closing = True
with self.assertRaises(InvalidHandshake) as raised:
self.start_client()
self.stop_server()
# Opening handshake fails with 503 Service Unavailable
self.assertEqual(str(raised.exception), "Bad status code: 503")
def test_server_shuts_down_during_connection_handling(self):
self.start_server()
self.start_client()
self.server.close()
with self.assertRaises(ConnectionClosed):
self.loop.run_until_complete(self.client.recv())
self.stop_client()
self.stop_server()
# Websocket connection terminates with 1001 Going Away.
self.assertEqual(self.client.close_code, 1001)
@unittest.skipUnless(os.path.exists(testcert), "test certificate is missing")
class SSLClientServerTests(ClientServerTests):
secure = True
@property
def server_context(self):
ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ssl_context.load_cert_chain(testcert)
return ssl_context
@property
def client_context(self):
ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ssl_context.load_verify_locations(testcert)
ssl_context.verify_mode = ssl.CERT_REQUIRED
return ssl_context
def start_server(self, *args, **kwds):
kwds['ssl'] = self.server_context
server = serve(handler, 'localhost', 8642, **kwds)
self.server = self.loop.run_until_complete(server)
def start_client(self, path='', **kwds):
kwds['ssl'] = self.client_context
client = connect('wss://localhost:8642/' + path, **kwds)
self.client = self.loop.run_until_complete(client)
def test_ws_uri_is_rejected(self):
self.start_server()
client = connect('ws://localhost:8642/', ssl=self.client_context)
with self.assertRaises(ValueError):
self.loop.run_until_complete(client)
self.stop_server()
class ClientServerOriginTests(unittest.TestCase):
def setUp(self):
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
def tearDown(self):
self.loop.close()
def test_checking_origin_succeeds(self):
server = self.loop.run_until_complete(
serve(handler, 'localhost', 8642, origins=['http://localhost']))
client = self.loop.run_until_complete(
connect('ws://localhost:8642/', origin='http://localhost'))
self.loop.run_until_complete(client.send("Hello!"))
self.assertEqual(self.loop.run_until_complete(client.recv()), "Hello!")
self.loop.run_until_complete(client.close())
server.close()
self.loop.run_until_complete(server.wait_closed())
def test_checking_origin_fails(self):
server = self.loop.run_until_complete(
serve(handler, 'localhost', 8642, origins=['http://localhost']))
with self.assertRaisesRegex(InvalidHandshake, "Bad status code: 403"):
self.loop.run_until_complete(
connect('ws://localhost:8642/', origin='http://otherhost'))
server.close()
self.loop.run_until_complete(server.wait_closed())
def test_checking_lack_of_origin_succeeds(self):
server = self.loop.run_until_complete(
serve(handler, 'localhost', 8642, origins=['']))
client = self.loop.run_until_complete(connect('ws://localhost:8642/'))
self.loop.run_until_complete(client.send("Hello!"))
self.assertEqual(self.loop.run_until_complete(client.recv()), "Hello!")
self.loop.run_until_complete(client.close())
server.close()
self.loop.run_until_complete(server.wait_closed())
try:
from .py35.client_server import ClientServerContextManager
except (SyntaxError, ImportError): # pragma: no cover
pass
else:
class ClientServerContextManagerTests(ClientServerContextManager,
unittest.TestCase):
pass
|
gwind/YWeb
|
refs/heads/master
|
yweb/yweb/utils/markup.py
|
1
|
# coding: utf-8
from markdown import Markdown
import docutils.core
# 系统中要安装 python-pygments
YMK = Markdown( extensions=['fenced_code', 'tables', 'codehilite'],
extension_configs={
'codehilite': [
# ('force_linenos', True),
],
},
safe_mode='escape' )
def rst2html(body):
'''
http://stackoverflow.com/questions/6654519/parsing-restructuredtext-into-html
publish_string, publish_parts
>>> from docutils.core import publish_string
>>> publish_string("*anurag*",writer_name='html')
>>> print publish_parts("*anurag*",writer_name='html')['html_body']
<p><em>anurag</em></p>
'''
return docutils.core.publish_parts(body, writer_name='html')['html_body']
def generate_html(body, markup_language=1):
if markup_language == 1: # Markdown
return YMK.convert( body )
elif markup_language == 2: # reStructuredText
return rst2html( body )
else:
return body
|
obi-two/Rebelion
|
refs/heads/master
|
data/scripts/templates/object/tangible/furniture/all/shared_frn_all_lamp_tbl_s02.py
|
2
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/furniture/all/shared_frn_all_lamp_tbl_s02.iff"
result.attribute_template_id = 6
result.stfName("frn_n","frn_lamp_table")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
unreal666/outwiker
|
refs/heads/master
|
plugins/externaltools/externaltools/controller.py
|
3
|
# -*- coding: utf-8 -*-
from outwiker.gui.preferences.preferencepanelinfo import PreferencePanelInfo
from outwiker.pages.wiki.wikipage import WikiWikiPage
from outwiker.pages.wiki.defines import MENU_WIKI
from outwiker.utilites.actionsguicontroller import (ActionsGUIController,
ActionGUIInfo)
from .i18n import get_
from .menutoolscontroller import MenuToolsController
from .commandexec.commandcontroller import CommandController
from .commandexec.actions import (
CommandExecAction,
MacrosPageAction,
MacrosHtmlAction,
MacrosAttachAction,
MacrosFolderAction
)
from . import defines
class Controller(object):
"""
Этот класс отвечает за основную работу плагина
"""
def __init__(self, plugin, application):
self._plugin = plugin
self._application = application
self._page = None
self._menuToolsController = MenuToolsController(self._application)
self._commandController = CommandController(self._application)
self._GUIController = ActionsGUIController(
self._application,
WikiWikiPage.getTypeString(),
)
def initialize(self):
global _
_ = get_()
self._menuToolsController.initialize()
self._commandController.initialize()
self._initialize_guicontroller()
self._application.onPreferencesDialogCreate += self.__onPreferencesDialogCreate
def _initialize_guicontroller(self):
action_gui_info = [
ActionGUIInfo(CommandExecAction(self._application),
defines.MENU_EXTERNALTOOLS
),
ActionGUIInfo(MacrosPageAction(self._application),
defines.MENU_EXTERNALTOOLS
),
ActionGUIInfo(MacrosHtmlAction(self._application),
defines.MENU_EXTERNALTOOLS
),
ActionGUIInfo(MacrosAttachAction(self._application),
defines.MENU_EXTERNALTOOLS
),
ActionGUIInfo(MacrosFolderAction(self._application),
defines.MENU_EXTERNALTOOLS
),
]
new_menus = [(defines.MENU_EXTERNALTOOLS, _('ExternalTools'), MENU_WIKI)]
if self._application.mainWindow is not None:
self._GUIController.initialize(action_gui_info,
new_menus=new_menus)
def destroy(self):
self._menuToolsController.destroy()
self._commandController.destroy()
self._destroy_guicontroller()
self._application.onPreferencesDialogCreate -= self.__onPreferencesDialogCreate
def _destroy_guicontroller(self):
if self._application.mainWindow is not None:
self._GUIController.destroy()
def __onPreferencesDialogCreate(self, dialog):
from .preferencespanel import PreferencesPanel
prefPanel = PreferencesPanel(dialog.treeBook, self._application.config)
panelName = _(u"External Tools [Plugin]")
panelsList = [PreferencePanelInfo(prefPanel, panelName)]
dialog.appendPreferenceGroup(panelName, panelsList)
|
jobscore/sync-engine
|
refs/heads/master
|
migrations/versions/010_store_raw_contact_data.py
|
11
|
"""Store raw contact data.
Revision ID: 3b511977a01f
Revises: 169cac0cd87e
Create Date: 2014-04-16 15:36:22.188971
"""
# revision identifiers, used by Alembic.
revision = '3b511977a01f'
down_revision = '169cac0cd87e'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('contact', sa.Column('raw_data', sa.Text(), nullable=True))
def downgrade():
op.drop_column('contact', 'raw_data')
|
ytmimi/Thesis2017
|
refs/heads/refactor
|
setup.py
|
1
|
from setuptools import setup
setup(name='ma_option_vol',
version='1.0.6',
#project desctiption
description='Thesis Code',
long_description='Code to help with the collection and analysis of data for my Fall 2017 Grossman School of Business Honors Thesis',
#author info
author='Yacin Tmimi',
author_email='[email protected]',
#homepage for the project
url='https://github.com/ytmimi/Thesis2017',
#open source license
license='MIT',
#local packages to be installed
packages=['ma_option_vol', 'company_data'],
#python modules that the code needs to run properly
install_requires=['openpyxl','datetime','os','re'],
#non python files to be included with the source distribution
# package_data={
# 'company_data': ['sample/*.xlsx'],
# },
#files stored in a directory that doesn't include an __init__.py file
include_package_data=True,
data_files=[('', ['LICENSE.txt','README.md']),
('company_data',['sample/*.xlsx','Treasury Rates.xlsx'])],
)
|
Pafcholini/emotion_kernel_tw_p
|
refs/heads/master
|
tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/EventClass.py
|
4653
|
# EventClass.py
#
# This is a library defining some events types classes, which could
# be used by other scripts to analyzing the perf samples.
#
# Currently there are just a few classes defined for examples,
# PerfEvent is the base class for all perf event sample, PebsEvent
# is a HW base Intel x86 PEBS event, and user could add more SW/HW
# event classes based on requirements.
import struct
# Event types, user could add more here
EVTYPE_GENERIC = 0
EVTYPE_PEBS = 1 # Basic PEBS event
EVTYPE_PEBS_LL = 2 # PEBS event with load latency info
EVTYPE_IBS = 3
#
# Currently we don't have good way to tell the event type, but by
# the size of raw buffer, raw PEBS event with load latency data's
# size is 176 bytes, while the pure PEBS event's size is 144 bytes.
#
def create_event(name, comm, dso, symbol, raw_buf):
if (len(raw_buf) == 144):
event = PebsEvent(name, comm, dso, symbol, raw_buf)
elif (len(raw_buf) == 176):
event = PebsNHM(name, comm, dso, symbol, raw_buf)
else:
event = PerfEvent(name, comm, dso, symbol, raw_buf)
return event
class PerfEvent(object):
event_num = 0
def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_GENERIC):
self.name = name
self.comm = comm
self.dso = dso
self.symbol = symbol
self.raw_buf = raw_buf
self.ev_type = ev_type
PerfEvent.event_num += 1
def show(self):
print "PMU event: name=%12s, symbol=%24s, comm=%8s, dso=%12s" % (self.name, self.symbol, self.comm, self.dso)
#
# Basic Intel PEBS (Precise Event-based Sampling) event, whose raw buffer
# contains the context info when that event happened: the EFLAGS and
# linear IP info, as well as all the registers.
#
class PebsEvent(PerfEvent):
pebs_num = 0
def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_PEBS):
tmp_buf=raw_buf[0:80]
flags, ip, ax, bx, cx, dx, si, di, bp, sp = struct.unpack('QQQQQQQQQQ', tmp_buf)
self.flags = flags
self.ip = ip
self.ax = ax
self.bx = bx
self.cx = cx
self.dx = dx
self.si = si
self.di = di
self.bp = bp
self.sp = sp
PerfEvent.__init__(self, name, comm, dso, symbol, raw_buf, ev_type)
PebsEvent.pebs_num += 1
del tmp_buf
#
# Intel Nehalem and Westmere support PEBS plus Load Latency info which lie
# in the four 64 bit words write after the PEBS data:
# Status: records the IA32_PERF_GLOBAL_STATUS register value
# DLA: Data Linear Address (EIP)
# DSE: Data Source Encoding, where the latency happens, hit or miss
# in L1/L2/L3 or IO operations
# LAT: the actual latency in cycles
#
class PebsNHM(PebsEvent):
pebs_nhm_num = 0
def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_PEBS_LL):
tmp_buf=raw_buf[144:176]
status, dla, dse, lat = struct.unpack('QQQQ', tmp_buf)
self.status = status
self.dla = dla
self.dse = dse
self.lat = lat
PebsEvent.__init__(self, name, comm, dso, symbol, raw_buf, ev_type)
PebsNHM.pebs_nhm_num += 1
del tmp_buf
|
jorik041/glances
|
refs/heads/master
|
glances/exports/glances_statsd.py
|
11
|
# -*- coding: utf-8 -*-
#
# This file is part of Glances.
#
# Copyright (C) 2015 Nicolargo <[email protected]>
#
# Glances is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Glances is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Statsd interface class."""
# Import sys libs
import sys
from numbers import Number
try:
from configparser import NoOptionError, NoSectionError
except ImportError: # Python 2
from ConfigParser import NoOptionError, NoSectionError
# Import Glances lib
from glances.core.glances_logging import logger
from glances.exports.glances_export import GlancesExport
from statsd import StatsClient
class Export(GlancesExport):
"""This class manages the Statsd export module."""
def __init__(self, config=None, args=None):
"""Init the Statsd export IF."""
GlancesExport.__init__(self, config=config, args=args)
# Load the InfluxDB configuration file
self.host = None
self.port = None
self.prefix = None
self.export_enable = self.load_conf()
if not self.export_enable:
sys.exit(2)
# Default prefix for stats is 'glances'
if self.prefix is None:
self.prefix = 'glances'
# Init the Statsd client
self.client = StatsClient(self.host,
int(self.port),
prefix=self.prefix)
def load_conf(self, section="statsd"):
"""Load the Statsd configuration in the Glances configuration file."""
if self.config is None:
return False
try:
self.host = self.config.get_value(section, 'host')
self.port = self.config.get_value(section, 'port')
except NoSectionError:
logger.critical("No Statsd configuration found")
return False
except NoOptionError as e:
logger.critical("Error in the Statsd configuration (%s)" % e)
return False
else:
logger.debug("Load Statsd from the Glances configuration file")
# Prefix is optional
try:
self.prefix = self.config.get_value(section, 'prefix')
except NoOptionError:
pass
return True
def init(self, prefix='glances'):
"""Init the connection to the Statsd server."""
if not self.export_enable:
return None
return StatsClient(self.host,
self.port,
prefix=prefix)
def export(self, name, columns, points):
"""Export the stats to the Statsd server."""
for i in range(0, len(columns)):
if not isinstance(points[i], Number):
continue
stat_name = '{0}.{1}'.format(name, columns[i])
stat_value = points[i]
try:
self.client.gauge(stat_name, stat_value)
except Exception as e:
logger.error("Can not export stats to Statsd (%s)" % e)
|
jimi-c/ansible
|
refs/heads/devel
|
test/integration/targets/plugin_loader/override/filter_plugins/core.py
|
147
|
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
def do_flag(myval):
return 'flagged'
class FilterModule(object):
''' Ansible core jinja2 filters '''
def filters(self):
return {
# jinja2 overrides
'flag': do_flag,
'flatten': do_flag,
}
|
chrisxue815/leetcode_python
|
refs/heads/master
|
problems/test_0027_swap.py
|
1
|
import unittest
class Solution:
def removeElement(self, nums, val):
"""
:type nums: List[int]
:type val: int
:rtype: int
"""
lo = 0
hi = len(nums) - 1
while lo <= hi:
if nums[lo] == val:
nums[lo], nums[hi] = nums[hi], nums[lo]
hi -= 1
else:
lo += 1
return lo
class Test(unittest.TestCase):
def test(self):
self._test([3, 2, 2, 3], 3, [2, 2])
def _test(self, nums, val, expected):
actual = Solution().removeElement(nums, val)
self.assertEqual(len(expected), actual)
self.assertEqual(expected, nums[:actual])
if __name__ == '__main__':
unittest.main()
|
woozzu/pylearn2
|
refs/heads/master
|
pylearn2/utils/tests/test_general.py
|
45
|
"""
Tests for pylearn2.utils.general functions.
"""
from pylearn2.utils import contains_nan, contains_inf, isfinite
import numpy as np
def test_contains_nan():
"""
Tests that pylearn2.utils.contains_nan correctly
identifies `np.nan` values in an array.
"""
arr = np.random.random(100)
assert not contains_nan(arr)
arr[0] = np.nan
assert contains_nan(arr)
def test_contains_inf():
"""
Tests that pylearn2.utils.contains_inf correctly
identifies `np.inf` values in an array.
"""
arr = np.random.random(100)
assert not contains_inf(arr)
arr[0] = np.nan
assert not contains_inf(arr)
arr[1] = np.inf
assert contains_inf(arr)
arr[1] = -np.inf
assert contains_inf(arr)
def test_isfinite():
"""
Tests that pylearn2.utils.isfinite correctly
identifies `np.nan` and `np.inf` values in an array.
"""
arr = np.random.random(100)
assert isfinite(arr)
arr[0] = np.nan
assert not isfinite(arr)
arr[0] = np.inf
assert not isfinite(arr)
arr[0] = -np.inf
assert not isfinite(arr)
|
Lekanich/intellij-community
|
refs/heads/master
|
python/testData/findUsages/ClassUsages.py
|
83
|
class C<caret>ow:
def __init__(self):
pass
c = Cow()
|
Celthi/youtube-dl-GUI
|
refs/heads/master
|
youtube_dl/extractor/comedycentral.py
|
20
|
from __future__ import unicode_literals
import re
from .mtv import MTVServicesInfoExtractor
from ..compat import (
compat_str,
compat_urllib_parse,
)
from ..utils import (
ExtractorError,
float_or_none,
unified_strdate,
)
class ComedyCentralIE(MTVServicesInfoExtractor):
_VALID_URL = r'''(?x)https?://(?:www\.)?cc\.com/
(video-clips|episodes|cc-studios|video-collections|full-episodes)
/(?P<title>.*)'''
_FEED_URL = 'http://comedycentral.com/feeds/mrss/'
_TEST = {
'url': 'http://www.cc.com/video-clips/kllhuv/stand-up-greg-fitzsimmons--uncensored---too-good-of-a-mother',
'md5': 'c4f48e9eda1b16dd10add0744344b6d8',
'info_dict': {
'id': 'cef0cbb3-e776-4bc9-b62e-8016deccb354',
'ext': 'mp4',
'title': 'CC:Stand-Up|Greg Fitzsimmons: Life on Stage|Uncensored - Too Good of a Mother',
'description': 'After a certain point, breastfeeding becomes c**kblocking.',
},
}
class ComedyCentralShowsIE(MTVServicesInfoExtractor):
IE_DESC = 'The Daily Show / The Colbert Report'
# urls can be abbreviations like :thedailyshow or :colbert
# urls for episodes like:
# or urls for clips like: http://www.thedailyshow.com/watch/mon-december-10-2012/any-given-gun-day
# or: http://www.colbertnation.com/the-colbert-report-videos/421667/november-29-2012/moon-shattering-news
# or: http://www.colbertnation.com/the-colbert-report-collections/422008/festival-of-lights/79524
_VALID_URL = r'''(?x)^(:(?P<shortname>tds|thedailyshow|cr|colbert|colbertnation|colbertreport)
|https?://(:www\.)?
(?P<showname>thedailyshow|thecolbertreport)\.(?:cc\.)?com/
((?:full-)?episodes/(?:[0-9a-z]{6}/)?(?P<episode>.*)|
(?P<clip>
(?:(?:guests/[^/]+|videos|video-playlists|special-editions|news-team/[^/]+)/[^/]+/(?P<videotitle>[^/?#]+))
|(the-colbert-report-(videos|collections)/(?P<clipID>[0-9]+)/[^/]*/(?P<cntitle>.*?))
|(watch/(?P<date>[^/]*)/(?P<tdstitle>.*))
)|
(?P<interview>
extended-interviews/(?P<interID>[0-9a-z]+)/(?:playlist_tds_extended_)?(?P<interview_title>.*?)(/.*?)?)))
'''
_TESTS = [{
'url': 'http://thedailyshow.cc.com/watch/thu-december-13-2012/kristen-stewart',
'md5': '4e2f5cb088a83cd8cdb7756132f9739d',
'info_dict': {
'id': 'ab9ab3e7-5a98-4dbe-8b21-551dc0523d55',
'ext': 'mp4',
'upload_date': '20121213',
'description': 'Kristen Stewart learns to let loose in "On the Road."',
'uploader': 'thedailyshow',
'title': 'thedailyshow kristen-stewart part 1',
}
}, {
'url': 'http://thedailyshow.cc.com/extended-interviews/xm3fnq/andrew-napolitano-extended-interview',
'only_matching': True,
}, {
'url': 'http://thecolbertreport.cc.com/videos/29w6fx/-realhumanpraise-for-fox-news',
'only_matching': True,
}, {
'url': 'http://thecolbertreport.cc.com/videos/gh6urb/neil-degrasse-tyson-pt--1?xrs=eml_col_031114',
'only_matching': True,
}, {
'url': 'http://thedailyshow.cc.com/guests/michael-lewis/3efna8/exclusive---michael-lewis-extended-interview-pt--3',
'only_matching': True,
}, {
'url': 'http://thedailyshow.cc.com/episodes/sy7yv0/april-8--2014---denis-leary',
'only_matching': True,
}, {
'url': 'http://thecolbertreport.cc.com/episodes/8ase07/april-8--2014---jane-goodall',
'only_matching': True,
}, {
'url': 'http://thedailyshow.cc.com/video-playlists/npde3s/the-daily-show-19088-highlights',
'only_matching': True,
}, {
'url': 'http://thedailyshow.cc.com/video-playlists/t6d9sg/the-daily-show-20038-highlights/be3cwo',
'only_matching': True,
}, {
'url': 'http://thedailyshow.cc.com/special-editions/2l8fdb/special-edition---a-look-back-at-food',
'only_matching': True,
}, {
'url': 'http://thedailyshow.cc.com/news-team/michael-che/7wnfel/we-need-to-talk-about-israel',
'only_matching': True,
}]
_available_formats = ['3500', '2200', '1700', '1200', '750', '400']
_video_extensions = {
'3500': 'mp4',
'2200': 'mp4',
'1700': 'mp4',
'1200': 'mp4',
'750': 'mp4',
'400': 'mp4',
}
_video_dimensions = {
'3500': (1280, 720),
'2200': (960, 540),
'1700': (768, 432),
'1200': (640, 360),
'750': (512, 288),
'400': (384, 216),
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
if mobj.group('shortname'):
if mobj.group('shortname') in ('tds', 'thedailyshow'):
url = 'http://thedailyshow.cc.com/full-episodes/'
else:
url = 'http://thecolbertreport.cc.com/full-episodes/'
mobj = re.match(self._VALID_URL, url, re.VERBOSE)
assert mobj is not None
if mobj.group('clip'):
if mobj.group('videotitle'):
epTitle = mobj.group('videotitle')
elif mobj.group('showname') == 'thedailyshow':
epTitle = mobj.group('tdstitle')
else:
epTitle = mobj.group('cntitle')
dlNewest = False
elif mobj.group('interview'):
epTitle = mobj.group('interview_title')
dlNewest = False
else:
dlNewest = not mobj.group('episode')
if dlNewest:
epTitle = mobj.group('showname')
else:
epTitle = mobj.group('episode')
show_name = mobj.group('showname')
webpage, htmlHandle = self._download_webpage_handle(url, epTitle)
if dlNewest:
url = htmlHandle.geturl()
mobj = re.match(self._VALID_URL, url, re.VERBOSE)
if mobj is None:
raise ExtractorError('Invalid redirected URL: ' + url)
if mobj.group('episode') == '':
raise ExtractorError('Redirected URL is still not specific: ' + url)
epTitle = (mobj.group('episode') or mobj.group('videotitle')).rpartition('/')[-1]
mMovieParams = re.findall('(?:<param name="movie" value="|var url = ")(http://media.mtvnservices.com/([^"]*(?:episode|video).*?:.*?))"', webpage)
if len(mMovieParams) == 0:
# The Colbert Report embeds the information in a without
# a URL prefix; so extract the alternate reference
# and then add the URL prefix manually.
altMovieParams = re.findall('data-mgid="([^"]*(?:episode|video|playlist).*?:.*?)"', webpage)
if len(altMovieParams) == 0:
raise ExtractorError('unable to find Flash URL in webpage ' + url)
else:
mMovieParams = [("http://media.mtvnservices.com/" + altMovieParams[0], altMovieParams[0])]
uri = mMovieParams[0][1]
# Correct cc.com in uri
uri = re.sub(r'(episode:[^.]+)(\.cc)?\.com', r'\1.cc.com', uri)
index_url = 'http://%s.cc.com/feeds/mrss?%s' % (show_name, compat_urllib_parse.urlencode({'uri': uri}))
idoc = self._download_xml(
index_url, epTitle,
'Downloading show index', 'Unable to download episode index')
title = idoc.find('./channel/title').text
description = idoc.find('./channel/description').text
entries = []
item_els = idoc.findall('.//item')
for part_num, itemEl in enumerate(item_els):
upload_date = unified_strdate(itemEl.findall('./pubDate')[0].text)
thumbnail = itemEl.find('.//{http://search.yahoo.com/mrss/}thumbnail').attrib.get('url')
content = itemEl.find('.//{http://search.yahoo.com/mrss/}content')
duration = float_or_none(content.attrib.get('duration'))
mediagen_url = content.attrib['url']
guid = itemEl.find('./guid').text.rpartition(':')[-1]
cdoc = self._download_xml(
mediagen_url, epTitle,
'Downloading configuration for segment %d / %d' % (part_num + 1, len(item_els)))
turls = []
for rendition in cdoc.findall('.//rendition'):
finfo = (rendition.attrib['bitrate'], rendition.findall('./src')[0].text)
turls.append(finfo)
formats = []
for format, rtmp_video_url in turls:
w, h = self._video_dimensions.get(format, (None, None))
formats.append({
'format_id': 'vhttp-%s' % format,
'url': self._transform_rtmp_url(rtmp_video_url),
'ext': self._video_extensions.get(format, 'mp4'),
'height': h,
'width': w,
})
formats.append({
'format_id': 'rtmp-%s' % format,
'url': rtmp_video_url.replace('viacomccstrm', 'viacommtvstrm'),
'ext': self._video_extensions.get(format, 'mp4'),
'height': h,
'width': w,
})
self._sort_formats(formats)
virtual_id = show_name + ' ' + epTitle + ' part ' + compat_str(part_num + 1)
entries.append({
'id': guid,
'title': virtual_id,
'formats': formats,
'uploader': show_name,
'upload_date': upload_date,
'duration': duration,
'thumbnail': thumbnail,
'description': description,
})
return {
'_type': 'playlist',
'entries': entries,
'title': show_name + ' ' + title,
'description': description,
}
|
trishnaguha/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/f5/bigip_sys_daemon_log_tmm.py
|
14
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2018, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: bigip_sys_daemon_log_tmm
short_description: Manage BIG-IP tmm daemon log settings
description:
- Manage BIG-IP tmm log settings.
version_added: 2.8
options:
arp_log_level:
description:
- Specifies the lowest level of ARP messages from the tmm daemon
to include in the system log.
choices:
- debug
- error
- informational
- notice
- warning
http_compression_log_level:
description:
- Specifies the lowest level of HTTP compression messages from the tmm daemon
to include in the system log.
choices:
- debug
- error
- informational
- notice
- warning
http_log_level:
description:
- Specifies the lowest level of HTTP messages from the tmm daemon
to include in the system log.
choices:
- debug
- error
- informational
- notice
- warning
ip_log_level:
description:
- Specifies the lowest level of IP address messages from the tmm daemon
to include in the system log.
choices:
- debug
- informational
- notice
- warning
irule_log_level:
description:
- Specifies the lowest level of iRule messages from the tmm daemon
to include in the system log.
choices:
- debug
- error
- informational
- notice
- warning
layer4_log_level:
description:
- Specifies the lowest level of Layer 4 messages from the tmm daemon
to include in the system log.
choices:
- debug
- informational
- notice
net_log_level:
description:
- Specifies the lowest level of network messages from the tmm daemon
to include in the system log.
choices:
- critical
- debug
- error
- informational
- notice
- warning
os_log_level:
description:
- Specifies the lowest level of operating system messages from the tmm daemon
to include in the system log.
choices:
- alert
- critical
- debug
- emergency
- error
- informational
- notice
- warning
pva_log_level:
description:
- Specifies the lowest level of PVA messages from the tmm daemon
to include in the system log.
choices:
- debug
- informational
- notice
ssl_log_level:
description:
- Specifies the lowest level of SSL messages from the tmm daemon
to include in the system log.
choices:
- alert
- critical
- debug
- emergency
- error
- informational
- notice
- warning
state:
description:
- The state of the log level on the system. When C(present), guarantees
that an existing log level is set to C(value).
default: present
choices:
- present
extends_documentation_fragment: f5
author:
- Wojciech Wypior (@wojtek0806)
'''
EXAMPLES = r'''
- name: Set SSL log level to debug
bigip_sys_daemon_log_tmm:
provider:
password: secret
server: lb.mydomain.com
user: admin
ssl_log_level: debug
delegate_to: localhost
'''
RETURN = r'''
arp_log_level:
description: Lowest level of ARP messages from the tmm daemon to log.
returned: changed
type: str
sample: error
http_compression_log_level:
description: Lowest level of HTTP compression messages from the tmm daemon to log.
returned: changed
type: str
sample: debug
http_log_level:
description: Lowest level of HTTP messages from the tmm daemon to log.
returned: changed
type: str
sample: notice
ip_log_level:
description: Lowest level of IP address messages from the tmm daemon to log.
returned: changed
type: str
sample: warning
irule_log_level:
description: Lowest level of iRule messages from the tmm daemon to log.
returned: changed
type: str
sample: error
layer4_log_level:
description: Lowest level of Layer 4 messages from the tmm daemon to log.
returned: changed
type: str
sample: notice
net_log_level:
description: Lowest level of network messages from the tmm daemon to log.
returned: changed
type: str
sample: critical
os_log_level:
description: Lowest level of operating system messages from the tmm daemon to log.
returned: changed
type: str
sample: critical
pva_log_level:
description: Lowest level of PVA messages from the tmm daemon to log.
returned: changed
type: str
sample: debug
ssl_log_level:
description: Lowest level of SSL messages from the tmm daemon to log.
returned: changed
type: str
sample: critical
'''
from ansible.module_utils.basic import AnsibleModule
try:
from library.module_utils.network.f5.bigip import F5RestClient
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import cleanup_tokens
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.common import exit_json
from library.module_utils.network.f5.common import fail_json
except ImportError:
from ansible.module_utils.network.f5.bigip import F5RestClient
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import cleanup_tokens
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.common import exit_json
from ansible.module_utils.network.f5.common import fail_json
class Parameters(AnsibleF5Parameters):
api_map = {
'arpLogLevel': 'arp_log_level',
'httpCompressionLogLevel': 'http_compression_log_level',
'httpLogLevel': 'http_log_level',
'ipLogLevel': 'ip_log_level',
'iruleLogLevel': 'irule_log_level',
'layer4LogLevel': 'layer4_log_level',
'netLogLevel': 'net_log_level',
'osLogLevel': 'os_log_level',
'pvaLogLevel': 'pva_log_level',
'sslLogLevel': 'ssl_log_level',
}
api_attributes = [
'arpLogLevel',
'httpCompressionLogLevel',
'httpLogLevel',
'ipLogLevel',
'iruleLogLevel',
'layer4LogLevel',
'netLogLevel',
'osLogLevel',
'pvaLogLevel',
'sslLogLevel',
]
returnables = [
'arp_log_level',
'http_compression_log_level',
'http_log_level',
'ip_log_level',
'irule_log_level',
'layer4_log_level',
'net_log_level',
'os_log_level',
'pva_log_level',
'ssl_log_level',
]
updatables = [
'arp_log_level',
'http_compression_log_level',
'http_log_level',
'ip_log_level',
'irule_log_level',
'layer4_log_level',
'net_log_level',
'os_log_level',
'pva_log_level',
'ssl_log_level',
]
class ApiParameters(Parameters):
pass
class ModuleParameters(Parameters):
pass
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
class UsableChanges(Changes):
pass
class ReportableChanges(Changes):
pass
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = kwargs.get('client', None)
self.want = ModuleParameters(params=self.module.params)
self.have = ApiParameters()
self.changes = UsableChanges()
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def exec_module(self):
result = dict()
changed = self.present()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.client.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def present(self):
return self.update()
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def update_on_device(self):
params = self.changes.api_params()
uri = "https://{0}:{1}/mgmt/tm/sys/daemon-log-settings/tmm".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def read_current_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/sys/daemon-log-settings/tmm".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return ApiParameters(params=response)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
self.choices_min = ['debug', 'informational', 'notice']
self.choices_common = self.choices_min + ['warning', 'error']
self.choices_all = self.choices_common + ['alert', 'critical', 'emergency']
argument_spec = dict(
arp_log_level=dict(
choices=self.choices_common
),
http_compression_log_level=dict(
choices=self.choices_common
),
http_log_level=dict(
choices=self.choices_common
),
ip_log_level=dict(
choices=self.choices_min + ['warning']
),
irule_log_level=dict(
choices=self.choices_common
),
layer4_log_level=dict(
choices=self.choices_min
),
net_log_level=dict(
choices=self.choices_common + ['critical']
),
os_log_level=dict(
choices=self.choices_all
),
pva_log_level=dict(
choices=self.choices_min
),
ssl_log_level=dict(
choices=self.choices_all
),
state=dict(default='present', choices=['present'])
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
)
client = F5RestClient(**module.params)
try:
mm = ModuleManager(module=module, client=client)
results = mm.exec_module()
cleanup_tokens(client)
exit_json(module, results, client)
except F5ModuleError as ex:
cleanup_tokens(client)
fail_json(module, ex, client)
if __name__ == '__main__':
main()
|
plotly/python-api
|
refs/heads/master
|
packages/python/plotly/plotly/validators/indicator/gauge/step/line/__init__.py
|
24
|
import sys
if sys.version_info < (3, 7):
from ._width import WidthValidator
from ._color import ColorValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__, [], ["._width.WidthValidator", "._color.ColorValidator"]
)
|
Schizo/MediaBrowser
|
refs/heads/master
|
python/Temp/sandboxShit.py
|
1
|
#!/usr/bin/env python
############################################################################
#
# Copyright (C) 2004-2005 Trolltech AS. All rights reserved.
#
# This file is part of the example classes of the Qt Toolkit.
#
# This file may be used under the terms of the GNU General Public
# License version 2.0 as published by the Free Software Foundation
# and appearing in the file LICENSE.GPL included in the packaging of
# self file. Please review the following information to ensure GNU
# General Public Licensing requirements will be met:
# http://www.trolltech.com/products/qt/opensource.html
#
# If you are unsure which license is appropriate for your use, please
# review the following information:
# http://www.trolltech.com/products/qt/licensing.html or contact the
# sales department at [email protected].
#
# This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
# WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
#
############################################################################
# This is only needed for Python v2 but is harmless for Python v3.
import sip
sip.setapi('QString', 2)
sip.setapi('QVariant', 2)
from PyQt4 import QtCore, QtGui
import pixelator_rc
ItemSize = 256
class PixelDelegate(QtGui.QAbstractItemDelegate):
def __init__(self, parent=None):
super(PixelDelegate, self).__init__(parent)
self.pixelSize = 12
def paint(self, painter, option, index):
if option.state & QtGui.QStyle.State_Selected:
painter.fillRect(option.rect, option.palette.highlight())
size = min(option.rect.width(), option.rect.height())
brightness = index.model().data(index, QtCore.Qt.DisplayRole)
radius = (size/2.0) - (brightness/255.0 * size/2.0)
if radius == 0.0:
return
painter.save()
painter.setRenderHint(QtGui.QPainter.Antialiasing)
painter.setPen(QtCore.Qt.NoPen)
if option.state & QtGui.QStyle.State_Selected:
painter.setBrush(option.palette.highlightedText())
else:
painter.setBrush(QtGui.QBrush(QtCore.Qt.black))
painter.drawEllipse(QtCore.QRectF(
option.rect.x() + option.rect.width()/2 - radius,
option.rect.y() + option.rect.height()/2 - radius,
2*radius, 2*radius))
painter.restore()
def sizeHint(self, option, index):
return QtCore.QSize(self.pixelSize, self.pixelSize)
def setPixelSize(self, size):
self.pixelSize = size
class ImageModel(QtCore.QAbstractTableModel):
def __init__(self, parent=None):
super(ImageModel, self).__init__(parent)
self.modelImage = QtGui.QImage()
def setImage(self, image):
self.modelImage = QtGui.QImage(image)
self.reset()
def rowCount(self, parent):
return self.modelImage.height()
def columnCount(self, parent):
return self.modelImage.width()
def data(self, index, role):
if not index.isValid() or role != QtCore.Qt.DisplayRole:
return None
return QtGui.qGray(self.modelImage.pixel(index.column(), index.row()))
def headerData(self, section, orientation, role):
if role == QtCore.Qt.SizeHintRole:
return QtCore.QSize(1, 1)
return None
class MainWindow(QtGui.QMainWindow):
def __init__(self):
super(MainWindow, self).__init__()
self.currentPath = QtCore.QDir.homePath()
self.model = ImageModel(self)
centralWidget = QtGui.QWidget()
self.view = QtGui.QTableView()
self.view.setShowGrid(False)
self.view.horizontalHeader().hide()
self.view.verticalHeader().hide()
self.view.horizontalHeader().setMinimumSectionSize(1)
self.view.verticalHeader().setMinimumSectionSize(1)
self.view.setModel(self.model)
delegate = PixelDelegate(self)
self.view.setItemDelegate(delegate)
pixelSizeLabel = QtGui.QLabel("Pixel size:")
pixelSizeSpinBox = QtGui.QSpinBox()
pixelSizeSpinBox.setMinimum(4)
pixelSizeSpinBox.setMaximum(32)
pixelSizeSpinBox.setValue(12)
fileMenu = QtGui.QMenu("&File", self)
openAction = fileMenu.addAction("&Open...")
openAction.setShortcut("Ctrl+O")
self.printAction = fileMenu.addAction("&Print...")
self.printAction.setEnabled(False)
self.printAction.setShortcut("Ctrl+P")
quitAction = fileMenu.addAction("E&xit")
quitAction.setShortcut("Ctrl+Q")
helpMenu = QtGui.QMenu("&Help", self)
aboutAction = helpMenu.addAction("&About")
self.menuBar().addMenu(fileMenu)
self.menuBar().addSeparator()
self.menuBar().addMenu(helpMenu)
openAction.triggered.connect(self.chooseImage)
self.printAction.triggered.connect(self.printImage)
quitAction.triggered.connect(QtGui.qApp.quit)
aboutAction.triggered.connect(self.showAboutBox)
pixelSizeSpinBox.valueChanged.connect(delegate.setPixelSize)
pixelSizeSpinBox.valueChanged.connect(self.updateView)
controlsLayout = QtGui.QHBoxLayout()
controlsLayout.addWidget(pixelSizeLabel)
controlsLayout.addWidget(pixelSizeSpinBox)
controlsLayout.addStretch(1)
mainLayout = QtGui.QVBoxLayout()
mainLayout.addWidget(self.view)
mainLayout.addLayout(controlsLayout)
centralWidget.setLayout(mainLayout)
self.setCentralWidget(centralWidget)
self.setWindowTitle("Pixelator")
self.resize(640, 480)
def chooseImage(self):
fileName = QtGui.QFileDialog.getOpenFileName(self, "Choose an Image",
self.currentPath, '*')
if fileName:
self.openImage(fileName)
def openImage(self, fileName):
image = QtGui.QImage()
if image.load(fileName):
self.model.setImage(image)
if not fileName.startswith(':/'):
self.currentPath = fileName
self.setWindowTitle("%s - Pixelator" % self.currentPath)
self.printAction.setEnabled(True)
self.updateView()
def printImage(self):
if self.model.rowCount(QtCore.QModelIndex()) * self.model.columnCount(QtCore.QModelIndex()) > 90000:
answer = QtGui.QMessageBox.question(self, "Large Image Size",
"The printed image may be very large. Are you sure that "
"you want to print it?",
QtGui.QMessageBox.Yes | QtGui.QMessageBox.No)
if answer == QtGui.QMessageBox.No:
return
printer = QtGui.QPrinter(QtGui.QPrinter.HighResolution)
dlg = QtGui.QPrintDialog(printer, self)
dlg.setWindowTitle("Print Image")
if dlg.exec_() != QtGui.QDialog.Accepted:
return
painter = QtGui.QPainter()
painter.begin(printer)
rows = self.model.rowCount(QtCore.QModelIndex())
columns = self.model.columnCount(QtCore.QModelIndex())
sourceWidth = (columns+1) * ItemSize
sourceHeight = (rows+1) * ItemSize
painter.save()
xscale = printer.pageRect().width() / float(sourceWidth)
yscale = printer.pageRect().height() / float(sourceHeight)
scale = min(xscale, yscale)
painter.translate(printer.pageRect().x()+printer.pageRect().width()/2,
printer.pageRect().y()+printer.pageRect().height()/2)
painter.scale(scale, scale)
painter.translate(-sourceWidt/2, -sourceHeight/2)
option = QtGui.QStyleOptionViewItem()
parent = QtCore.QModelIndex()
progress = QtGui.QProgressDialog("Printing...", "Cancel", 0, rows,
self)
y = ItemSize / 2.0
for row in range(rows):
progress.setValue(row)
QtGui.qApp.processEvents()
if progress.wasCanceled():
break
x = ItemSize / 2.0
for col in range(columns):
option.rect = QtCore.QRect(x, y, ItemSize, ItemSize)
self.view.itemDelegate.paint(painter, option,
self.model.index(row, column, parent))
x = x + ItemSize
y = y + ItemSize
progress.setValue(rows)
painter.restore()
painter.end()
if progress.wasCanceled():
QtGui.QMessageBox.information(self, "Printing canceled",
"The printing process was canceled.",
QtGui.QMessageBox.Cancel)
def showAboutBox(self):
QtGui.QMessageBox.about(self, "About the Pixelator example",
"This example demonstrates how a standard view and a custom\n"
"delegate can be used to produce a specialized "
"representation\nof data in a simple custom model.")
def updateView(self):
self.view.resizeColumnsToContents()
self.view.resizeRowsToContents()
if __name__ == '__main__':
import sys
app = QtGui.QApplication(sys.argv)
window = MainWindow()
window.show()
window.openImage(':/images/qt.png')
sys.exit(app.exec_())
|
The-Compiler/pytest-qt
|
refs/heads/master
|
src/pytestqt/__init__.py
|
2
|
# _version is automatically generated by setuptools_scm
from pytestqt._version import version
__version__ = version
|
nviennot/rethinkdb
|
refs/heads/next
|
lib/retester/cloud_retester.py
|
4
|
# Copyright 2010-2012 RethinkDB, all rights reserved.
import subprocess, shlex, signal, os, time, shutil, tempfile, sys, traceback, types, gitroot, random, atexit, stat
base_directory = os.path.dirname(os.path.join(os.getcwd(), sys.argv[0])) + "/../test"
use_local_retester = os.getenv("USE_CLOUD", "false") == "false"
# The following functions are for external use: setup_testing_nodes(), terminate_testing_nodes(). do_test_cloud(), report_cloud()
# + the following functions imported from retester are compatible and can be used in combination with cloud tests: do_test()
# In order to enable running tests in Amazon's EC2, set the USE_CLOUD environment variable
# Please configure in ec2_configuration.py!
from cloud_config import ec2_configuration
import cloud_node_data
testing_nodes_ec2_instance_type = ec2_configuration.testing_nodes_ec2_instance_type
testing_nodes_ec2_count = ec2_configuration.testing_nodes_ec2_count
testing_nodes_ec2_image_name = ec2_configuration.testing_nodes_ec2_image_name
testing_nodes_ec2_image_user_name = ec2_configuration.testing_nodes_ec2_image_user_name
testing_nodes_ec2_key_pair_name = ec2_configuration.testing_nodes_ec2_key_pair_name
testing_nodes_ec2_security_group_name = ec2_configuration.testing_nodes_ec2_security_group_name
testing_nodes_ec2_region = ec2_configuration.testing_nodes_ec2_region
testing_nodes_ec2_access_key = ec2_configuration.testing_nodes_ec2_access_key
testing_nodes_ec2_private_key = ec2_configuration.testing_nodes_ec2_private_key
private_ssh_key_filename = ec2_configuration.private_ssh_key_filename
round_robin_locking_timeout = 2
wrapper_script_filename = "cloud_retester_run_test_wrapper.py" # must be just the name of the file, no path!
# END of configuration options
from stat import *
import paramiko # Using Paramiko for SSH2
import boto, boto.ec2 # Using Boto for AWS commands
from vcoptparse import *
from retester import *
import retester
reports = []
test_references = []
testing_nodes_ec2_reservations = []
testing_nodes = []
remaining_nodes_to_allocate = testing_nodes_ec2_count
next_node_to_issue_to = 0
node_allocation_tries_count = 0
def put_file_compressed(ssh_transport, local_path, destination_path, retry = 3):
gzip = None
try:
assert not not local_path
assert not not destination_path
session = ssh_transport.open_session()
try:
gzip = subprocess.Popen(["gzip", "-c", local_path], stdout=subprocess.PIPE)
session = ssh_transport.open_session()
session.exec_command('gzip -cd > "%s" && chmod %s "%s"\n' % (destination_path, oct(os.stat(local_path).st_mode)[-4:], destination_path))
while True:
buf = gzip.stdout.read(65536)
if not buf:
break
else:
session.sendall(buf)
gzip.stdout.close()
finally:
session.close()
except (IOError, EOFError, paramiko.SSHException, Exception) as e:
if retry > 0:
return put_file_compressed(ssh_transport, local_path, destination_path, retry-1)
else:
raise e
finally:
if gzip and gzip.poll() == None:
gzip.kill()
def get_file_compressed(ssh_transport, remote_path, destination_path, retry = 3):
gzip = None
try:
assert not not remote_path
assert not not destination_path
session = ssh_transport.open_session()
try:
with open(destination_path, "wb") as out:
gzip = subprocess.Popen(["gzip", "-cd"], stdin=subprocess.PIPE, stdout=out)
session = ssh_transport.open_session()
session.exec_command('gzip -c "%s"\n' % remote_path)
while True:
buf = session.recv(65536)
if not buf:
break
else:
gzip.stdin.write(buf)
gzip.stdin.close()
gzip.wait()
finally:
session.close()
except (IOError, EOFError, paramiko.SSHException, Exception) as e:
if retry > 0:
return get_file_compressed(ssh_transport, remote_path, destination_path, retry-1)
else:
raise e
finally:
if gzip and gzip.poll() == None:
gzip.kill()
class TestReference:
def __init__(self, command):
self.single_runs = []
self.command = command
self.results = []
class TestingNode:
def __init__(self, hostname, port, username, private_ssh_key_filename):
self.hostname = hostname
self.port = port
self.username = username
#print "Created TestingNode with hostname %s, port %i, username %s" % (hostname, port, username)
# read private key from file to get access to the node
if True: # Always use RSA for now
self.private_ssh_key = paramiko.RSAKey(filename=private_ssh_key_filename)
else:
self.private_ssh_key = paramiko.DSSKey(filename=private_ssh_key_filename)
self.global_lock_file = "/tmp/cloudtest_lock"
system_random = random.SystemRandom()
self.global_build_path = "/tmp/cloudtest_build_" + str(system_random.randint(10000000, 99999999));
self.global_bench_path = "/tmp/cloudtest_bench_" + str(system_random.randint(10000000, 99999999));
self.global_test_path = "/tmp/cloudtest_test_" + str(system_random.randint(10000000, 99999999));
#print "Installing build into %s\n" % self.global_build_path
self.basedata_installed = False
self.ssh_transport = None
def __del__(self):
if self.ssh_transport != None:
self.ssh_transport.close()
def get_transport(self, retry = 3):
if self.ssh_transport != None:
return self.ssh_transport
try:
# open SSH transport
self.ssh_transport = paramiko.Transport((self.hostname, self.port))
self.ssh_transport.use_compression()
self.ssh_transport.set_keepalive(60)
self.ssh_transport.connect(username=self.username, pkey=self.private_ssh_key)
except (IOError, EOFError, paramiko.SSHException, Exception) as e:
self.ssh_transport = None
time.sleep(90) # Wait a bit in case the network needs time to recover
if retry > 0:
return self.get_transport(retry-1)
else:
raise e
return self.ssh_transport
# returns a tupel (return code, output)
def run_command(self, command, retry = 3):
ssh_transport = self.get_transport()
try:
# open SSH channel
ssh_channel = ssh_transport.open_session()
# issue the command to the node
ssh_channel.exec_command(command)
# read back command result:
# do not timeout while reading (probably default anyway?)
ssh_channel.settimeout(None)
# read output until we get an EOF
command_output = ""
output_read = ssh_channel.recv(4096) # No do-while loops in Python?
while len(output_read) > 0:
command_output += output_read
output_read = ssh_channel.recv(4096)
# retrieve exit code
command_exit_status = ssh_channel.recv_exit_status() # side effect: waits until command has finished
ssh_channel.close()
#self.ssh_transport.close()
return (command_exit_status, command_output)
except (IOError, EOFError, paramiko.SSHException, Exception) as e:
self.ssh_transport = None
if retry > 0:
return self.run_command(command, retry-1)
else:
raise e
def put_file(self, local_path, destination_path, retry = 3):
print "Sending file %r -> %r to cloud..." % (local_path, destination_path)
put_file_compressed(self.get_transport(), local_path, destination_path, retry)
#self.put_file_sftp(local_path, destination_path, retry)
def put_file_sftp(self, local_path, destination_path, retry = 3):
ssh_transport = self.get_transport()
try:
# open SFTP session
sftp_session = paramiko.SFTPClient.from_transport(ssh_transport)
# do the operation
sftp_session.put(local_path, destination_path)
sftp_session.chmod(destination_path, os.stat(local_path)[ST_MODE])
sftp_session.close()
except (IOError, EOFError, paramiko.SSHException, Exception) as e:
self.ssh_transport = None
if retry > 0:
return self.put_file(local_path, destination_path, retry-1)
else:
raise e
def get_file(self, remote_path, destination_path, retry = 3):
print "Getting file %r -> %r from cloud..." % (remote_path, destination_path)
get_file_compressed(self.get_transport(), remote_path, destination_path, retry)
#self.get_file_sftp(remote_path, destination_path, retry)
def get_file_sftp(self, remote_path, destination_path, retry = 3):
ssh_transport = self.get_transport()
try:
# open SFTP session
sftp_session = paramiko.SFTPClient.from_transport(ssh_transport)
# do the operation
sftp_session.get(remote_path, destination_path)
sftp_session.close()
except (IOError, EOFError, paramiko.SSHException, Exception) as e:
self.ssh_transport = None
if retry > 0:
return self.get_file(remote_path, destination_path, retry-1)
else:
raise e
def get_file_gz(self, remote_path, destination_path, retry = 3):
f = open(destination_path, "w")
result = self.run_command("gzip -c \"%s\"" % remote_path) # TODO: Escape filename
if not result[0] == 0:
print "gzip returned an error"
# TODO: Handle properly
return
f.write(result[1])
f.close()
def put_directory(self, local_path, destination_path, retry = 3):
print "Sending directory %r to cloud..." % local_path
ssh_transport = self.get_transport()
try:
# open SFTP session
sftp_session = paramiko.SFTPClient.from_transport(ssh_transport)
# do the operation
for root, dirs, files in os.walk(local_path):
for name in files:
sftp_session.put(os.path.join(root, name), os.path.join(destination_path + root[len(local_path):], name))
sftp_session.chmod(os.path.join(destination_path + root[len(local_path):], name), os.stat(os.path.join(root, name))[ST_MODE])
for name in dirs:
#print "mk remote dir %s" % os.path.join(destination_path + root[len(local_path):], name)
sftp_session.mkdir(os.path.join(destination_path + root[len(local_path):], name))
sftp_session.close()
except (IOError, EOFError, paramiko.SSHException, Exception) as e:
self.ssh_transport = None
if retry > 0:
return self.put_directory(local_path, destination_path, retry-1)
else:
raise e
def list_directory(self, remote_path, retry = 3):
ssh_transport = self.get_transport()
try:
sftp_session = paramiko.SFTPClient.from_transport(ssh_transport)
list = sftp_session.listdir_attr(remote_path)
sftp_session.close()
return list
except (IOError, EOFError, paramiko.SSHException, Exception) as e:
self.ssh_transport = None
if retry > 0:
return self.list_directory(remote_path, retry-1)
else:
raise e
def make_directory(self, remote_path, retry = 3):
ssh_transport = self.get_transport()
try:
# open SFTP session
sftp_session = paramiko.SFTPClient.from_transport(ssh_transport)
# do the operation
sftp_session.mkdir(remote_path)
sftp_session.close()
except (IOError, EOFError, paramiko.SSHException, Exception) as e:
self.ssh_transport = None
if retry > 0:
return self.make_directory(remote_path, retry-1)
else:
raise e
def make_directory_recursively(self, remote_path):
# rely on mkdir command to do the work...
mkdir_result = self.run_command("mkdir -p %s" % remote_path.replace(" ", "\\ "))
if mkdir_result[0] != 0:
print ("Unable to create directory")
# TODO: Throw exception or something,,,
def acquire_lock(self, locking_timeout = 0):
lock_sleeptime = 1
lock_command = "lockfile -%i -r -1 %s" % (lock_sleeptime, self.global_lock_file.replace(" ", "\' "))
if locking_timeout > 0:
lock_command = "lockfile -%i -r %i %s" % (lock_sleeptime, locking_timeout / lock_sleeptime, self.global_lock_file.replace(" ", "\' "))
locking_result = self.run_command(lock_command)
return locking_result[0] == 0
def get_release_lock_command(self):
return "rm -f %s" % self.global_lock_file.replace(" ", "\' ")
def release_lock(self):
command_result = self.run_command(self.get_release_lock_command())
if command_result[0] != 0:
print "Unable to release lock (maybe the node wasn't locked before?). Ignoring this."
def create_testing_nodes_from_reservation(ec2_reservation):
global testing_nodes
global testing_nodes_ec2_image_user_name
global private_ssh_key_filename
for instance in ec2_reservation.instances:
if instance.state == "running":
new_testing_node = TestingNode(instance.public_dns_name, 22, testing_nodes_ec2_image_user_name, private_ssh_key_filename)
testing_nodes.append(new_testing_node)
def setup_testing_nodes():
global testing_nodes
global use_local_retester
if use_local_retester:
return
start_testing_nodes()
def start_testing_nodes():
global testing_nodes
global testing_nodes_ec2_reservations
global remaining_nodes_to_allocate
global testing_nodes_ec2_image_name
global testing_nodes_ec2_instance_type
global testing_nodes_ec2_key_pair_name
global testing_nodes_ec2_security_group_name
global testing_nodes_ec2_region
global testing_nodes_ec2_access_key
global testing_nodes_ec2_private_key
global node_allocation_tries_count
if remaining_nodes_to_allocate == 0:
return
# Reserve nodes in EC2
print "Trying to allocate %i testing nodes" % remaining_nodes_to_allocate
try:
ec2_connection = boto.ec2.connect_to_region(testing_nodes_ec2_region, aws_access_key_id=testing_nodes_ec2_access_key, aws_secret_access_key=testing_nodes_ec2_private_key)
# Query AWS to start all instances
ec2_image = ec2_connection.get_image(testing_nodes_ec2_image_name)
ec2_reservation = ec2_image.run(min_count=1, max_count=remaining_nodes_to_allocate, key_name=testing_nodes_ec2_key_pair_name, security_groups=[testing_nodes_ec2_security_group_name], instance_type=testing_nodes_ec2_instance_type)
testing_nodes_ec2_reservations.append(ec2_reservation)
# query AWS to wait for all instances to be available
for instance in ec2_reservation.instances:
while instance.state != "running":
time.sleep(5)
instance.update()
if instance.state == "terminated":
# Something went wrong :-(
print "Could not allocate the requested number of nodes. Retrying later..."
break
# Got a node running
remaining_nodes_to_allocate -= 1
create_testing_nodes_from_reservation(ec2_reservation)
# Give it another 120 seconds to start up...
time.sleep(120)
except (Exception) as e:
print "An exception occured while trying to request a node from EC: \n%s" % e
# Check that all testing nodes are up
nodes_to_remove = []
for node in testing_nodes:
# send a testing command
try:
command_result = node.run_command("echo -n Are you up?")
if command_result[1] != "Are you up?":
print "Node %s is misfunctioning." % node.hostname
nodes_to_remove.append(node)
else:
print "Node %s is up" % node.hostname
except (IOError, EOFError, paramiko.SSHException, Exception) as e:
print "Node %s is not responding." % node.hostname
nodes_to_remove.append(node)
for node_to_remove in nodes_to_remove:
testing_nodes.remove(node_to_remove)
remaining_nodes_to_allocate += 1
if len(testing_nodes) == 0:
terminate_testing_nodes()
node_allocation_tries_count += 1
if node_allocation_tries_count > 5:
raise Exception("Could not allocate any testing nodes after %d tries." % node_allocation_tries_count)
else:
print "Could not allocate any nodes, retrying..."
time.sleep(120)
start_testing_nodes()
return
def terminate_testing_nodes():
global testing_nodes
global testing_nodes_ec2_reservations
global testing_nodes_ec2_region
global testing_nodes_ec2_access_key
global testing_nodes_ec2_private_key
for testing_nodes_ec2_reservation in testing_nodes_ec2_reservations:
if testing_nodes_ec2_reservation:
print "Terminating EC2 nodes"
ec2_connection = boto.ec2.connect_to_region(testing_nodes_ec2_region, aws_access_key_id=testing_nodes_ec2_access_key, aws_secret_access_key=testing_nodes_ec2_private_key)
# Query AWS to stop all instances
testing_nodes_ec2_reservation.stop_all()
testing_nodes_ec2_reservations = []
testing_nodes = None
def cleanup_testing_node(node):
node.run_command("rm -rf " + node.global_build_path)
node.run_command("rm -rf " + node.global_bench_path)
node.run_command("rm -rf " + node.global_test_path)
def scp_basedata_to_testing_node(source_node, target_node):
# Put private SSH key to source_node...
source_node.run_command("rm -f private_ssh_key.pem")
source_node.put_file(private_ssh_key_filename, "private_ssh_key.pem")
command_result = source_node.run_command("chmod 500 private_ssh_key.pem")
if command_result[0] != 0:
print "Unable to change access mode of private SSH key on remote node"
# Scp stuff to target node
for path_to_copy in [("/tmp/cloudtest_libs", "/tmp/cloudtest_libs"), ("/tmp/cloudtest_bin", "/tmp/cloudtest_bin"), ("/tmp/cloudtest_python", "/tmp/cloudtest_python"), (source_node.global_build_path, target_node.global_build_path), (source_node.global_bench_path, target_node.global_bench_path), (source_node.global_test_path, target_node.global_test_path)]:
command_result = source_node.run_command("scp -r -C -q -o stricthostkeychecking=no -P %i -i private_ssh_key.pem %s %s@%s:%s" % (target_node.port, path_to_copy[0], target_node.username, target_node.hostname, path_to_copy[1]))
if command_result[0] != 0:
print "Failed using scp to copy data from %s to %s: %s" % (source_node.hostname, target_node.hostname, command_result[1])
return False
target_node.basedata_installed = True
return True
def copy_basedata_to_testing_node(node):
global testing_nodes
print "Sending base data to node %s" % node.hostname
# Check if we can use scp_basedata_to_testing_node instead:
for source_node in testing_nodes:
if source_node.basedata_installed:
print "Scp-ing base data from source node " + source_node.hostname
if scp_basedata_to_testing_node(source_node, node):
return
node.basedata_installed = True
# Copy dependencies as specified in ec2_configuration
node.make_directory_recursively("/tmp/cloudtest_libs")
for (source_path, target_path) in ec2_configuration.cloudtest_lib_dependencies:
node.make_directory_recursively("/tmp/cloudtest_libs/" + os.path.dirname(target_path))
node.put_file(source_path, "/tmp/cloudtest_libs/" + target_path)
node.make_directory_recursively("/tmp/cloudtest_bin")
for (source_path, target_path) in ec2_configuration.cloudtest_bin_dependencies:
node.make_directory_recursively("/tmp/cloudtest_bin/" + os.path.dirname(target_path))
node.put_file(source_path, "/tmp/cloudtest_bin/" + target_path)
command_result = node.run_command("chmod +x /tmp/cloudtest_bin/*")
if command_result[0] != 0:
print "Unable to make cloudtest_bin files executable"
node.make_directory_recursively("/tmp/cloudtest_python")
for (source_path, target_path) in ec2_configuration.cloudtest_python_dependencies:
node.make_directory_recursively("/tmp/cloudtest_python/" + os.path.dirname(target_path))
node.put_file(source_path, "/tmp/cloudtest_python/" + target_path)
# Copy build hierarchy
node.make_directory(node.global_build_path)
#node.put_directory(base_directory + "/../build", node.global_build_path)
# Just copy essential files to save time...
for config in os.listdir(base_directory + "/../build"):
if os.path.isdir(base_directory + "/../build/" + config):
try:
node.make_directory(node.global_build_path + "/" + config)
node.put_file(base_directory + "/../build/" + config + "/rethinkdb", node.global_build_path + "/" + config + "/rethinkdb")
#node.put_file(base_directory + "/../build/" + config + "/rethinkdb-extract", node.global_build_path + "/" + config + "/rethinkdb-extract")
#node.put_file(base_directory + "/../build/" + config + "/rethinkdb-fsck", node.global_build_path + "/" + config + "/rethinkdb-fsck")
command_result = node.run_command("chmod +x " + node.global_build_path + "/" + config + "/*")
if command_result[0] != 0:
print "Unable to make rethinkdb executable"
except:
print "RethinkDB configuration %s could not be installed" % config
# Copy benchmark stuff
node.make_directory(node.global_bench_path)
node.make_directory(node.global_bench_path + "/stress-client")
node.put_file(base_directory + "/../bench/stress-client/stress", node.global_bench_path + "/stress-client/stress")
command_result = node.run_command("chmod +x " + node.global_bench_path + "/*/*")
if command_result[0] != 0:
print "Unable to make bench files executable"
try:
node.put_file(base_directory + "/../bench/stress-client/libstress.so", node.global_bench_path + "/stress-client/libstress.so")
node.put_file(base_directory + "/../bench/stress-client/stress.py", node.global_bench_path + "/stress-client/stress.py")
except Exception as e:
print "Failed copying stress auxiliary files: %s" % e
# Copy test hierarchy
node.make_directory(node.global_test_path)
node.put_directory(base_directory, node.global_test_path)
# Install the wrapper script
node.put_file(os.path.dirname(cloud_node_data.__file__) + "/" + wrapper_script_filename, "%s/%s" % (node.global_test_path, wrapper_script_filename));
def copy_per_test_data_to_testing_node(node, test_reference):
# Link build hierarchy
command_result = node.run_command("ln -s %s cloud_retest/%s/build" % (node.global_build_path, test_reference))
if command_result[0] != 0:
print "Unable to link build environment"
raise Exception("Unable to link build environment")
# Link bench hierarchy
command_result = node.run_command("ln -s %s cloud_retest/%s/bench" % (node.global_bench_path, test_reference))
if command_result[0] != 0:
print "Unable to link bench environment"
raise Exception("Unable to link bench environment")
# copy over the global test hierarchy
node.make_directory_recursively("cloud_retest/%s/test" % test_reference)
command_result = node.run_command("cp -af %s/* cloud_retest/%s/test" % (node.global_test_path, test_reference))
if command_result[0] != 0:
print "Unable to copy test environment"
raise Exception("Unable to copy test environment")
def retrieve_results_from_node(node):
global testing_nodes
global reports
global test_references
for test_reference in test_references:
single_runs_to_remove = []
for single_run in test_reference.single_runs:
run_node = single_run[0]
if run_node == node:
try:
test_reference.results.append(get_report_for_test(single_run))
single_runs_to_remove.append(single_run)
# Clean test
run_node.run_command("rm -rf cloud_retest/%s" % single_run[1])
except (IOError, EOFError, paramiko.SSHException, Exception) as e:
print "Unable to retrieve result for %s from node %s:" % (single_run[1], single_run[0].hostname)
traceback.print_exc()
for single_run_to_remove in single_runs_to_remove:
test_reference.single_runs.remove(single_run_to_remove)
def start_test_on_node(node, test_command, test_timeout = None, locking_timeout = 0):
global testing_nodes
global remaining_nodes_to_allocate
if locking_timeout == None:
locking_timeout = 0
#print ("trying to acquire lock with timeout %i" % locking_timeout)
if node.acquire_lock(locking_timeout) == False:
return False
#print ("Got lock!")
# Check if we can retrieve previous test results for this node now
retrieve_results_from_node(node)
try:
# Initialize node if not happened before...
if node.basedata_installed == False:
copy_basedata_to_testing_node(node)
# Generate random reference
system_random = random.SystemRandom()
test_reference = "cloudtest_" + str(system_random.randint(10000000, 99999999))
# Create test directory and check that it isn't taken
directory_created = False
while not directory_created:
node.make_directory_recursively("cloud_retest")
try:
node.make_directory("cloud_retest/%s" % test_reference)
directory_created = True
except IOError:
directory_created = False
test_reference = "cloudtest_" + str(system_random.randint(10000000, 99999999)) # Try another reference
print "Starting test with test reference %s on node %s" % (test_reference, node.hostname)
# Prepare for test...
copy_per_test_data_to_testing_node(node, test_reference)
# Store test_command and test_timeout into files on the remote node for the wrapper script to pick it up
command_result = node.run_command("echo -n %s > cloud_retest/%s/test/test_command" % \
(retester.shell_escape(test_command), test_reference))
if command_result[0] != 0:
print "Unable to store command"
# TODO: Throw an exception
if test_timeout == None:
command_result = node.run_command("echo -n \"\" > cloud_retest/%s/test/test_timeout" % (test_reference))
else:
command_result = node.run_command("echo -n %i > cloud_retest/%s/test/test_timeout" % (test_timeout, test_reference))
if command_result[0] != 0:
print "Unable to store timeout"
# TODO: Throw an exception
# Run test and release lock after it has finished
command_result = node.run_command("sh -c \"nohup sh -c \\\"(cd %s; LD_LIBRARY_PATH=/tmp/cloudtest_libs:$LD_LIBRARY_PATH PATH=/tmp/cloudtest_bin:$PATH PYTHONPATH=/tmp/cloudtest_python:$PYTHONPATH VALGRIND_LIB=/tmp/cloudtest_libs/valgrind python %s; %s)&\\\" > /dev/null 2> /dev/null\"" % ("cloud_retest/%s/test" % test_reference, wrapper_script_filename.replace(" ", "\\ "), node.get_release_lock_command()))
except (IOError, EOFError, paramiko.SSHException, Exception) as e:
print "Starting test failed: %s" % e
test_reference = "Failed"
try:
node.release_lock()
except (IOError, EOFError, paramiko.SSHException, Exception):
print "Unable to release lock on node %s. Node is now defunct." % node.hostname
testing_nodes.remove(node)
remaining_nodes_to_allocate += 1
return (node, test_reference)
# TODO: Move budget to configuration file
gz_budget_left = 512 * 1024 * 1024 # 512 MiB
def get_report_for_test(test_reference):
print "Downloading results for test %s" % test_reference[1]
if test_reference[1] == "Failed":
result = Result(0.0, "fail", "The test could not be started on EC2.")
return result
node = test_reference[0]
result_result = node.run_command("cat cloud_retest/" + test_reference[1] + "/test/result_result")[1]
result_description = node.run_command("cat cloud_retest/" + test_reference[1] + "/test/result_description")[1]
if result_description == "":
result_description = None
result = Result(0.0, result_result, result_description)
# Get running time
try:
result.running_time = float(node.run_command("cat cloud_retest/" + test_reference[1] + "/test/result_running_time")[1])
except ValueError:
print "Got invalid start_time for test %s" % test_reference[1]
result.running_time = 0.0
# Collect a few additional results into a temporary directory
result.output_dir = SmartTemporaryDirectory("out_")
def get_directory(remote_path, destination_path):
global gz_budget_left
assert os.path.isdir(destination_path)
for file in node.list_directory(remote_path):
max_file_size = 100000
r_path = os.path.join(remote_path, file.filename)
d_path = os.path.join(destination_path, file.filename)
assert not os.path.exists(d_path)
if stat.S_ISDIR(file.st_mode):
os.mkdir(d_path)
get_directory(r_path, d_path)
elif file.st_size <= max_file_size:
node.get_file(r_path, d_path)
elif gz_budget_left > 0:
# Retrieve complete gzipped file as long as we have some budget left for this
node.get_file_gz(r_path, "%s.gz" % d_path)
gz_budget_left -= (os.stat("%s.gz" % d_path))[ST_SIZE]
else:
f = open(d_path, "w")
res = node.run_command("head -c %d \"%s\"" % (max_file_size / 2, r_path))
if res[0] == 0: f.write(res[1])
else: f.write("[cloud_retester failed to retrieve part of this file: %s]" % res[0])
f.write("\n\n[cloud_retester omitted %d bytes of this file]\n\n" % (file.st_size - max_file_size + (max_file_size % 2)))
res = node.run_command("tail -c %d \"%s\"" % (max_file_size / 2, r_path))
if res[0] == 0: f.write(res[1])
else: f.write("[cloud_retester failed to retrieve part of this file: %s]" % res[0])
f.close()
if result_result == "fail":
get_directory(os.path.join("cloud_retest", test_reference[1], "test", "output_from_test"), result.output_dir.path)
else:
for file_name in ["server_output.txt", "creator_output.txt", "test_output.txt", "fsck_output.txt"]:
command_result = node.run_command("cat 'cloud_retest/" + test_reference[1] + "/test/output_from_test/" + file_name + "'")
if command_result[0] == 0:
open(result.output_dir.path + "/" + file_name, 'w').write(command_result[1])
return result
def issue_test_to_some_node(test_command, test_timeout = 0):
global testing_nodes
global next_node_to_issue_to
global round_robin_locking_timeout
# Start remaining nodes
setup_testing_nodes()
test_successfully_issued = False
while test_successfully_issued == False:
# wait for a limited amount of time until that node is free to get work
test_reference = start_test_on_node(testing_nodes[next_node_to_issue_to], test_command, test_timeout, round_robin_locking_timeout)
if test_reference != False:
test_successfully_issued = True
# use next node for the next try
next_node_to_issue_to = (next_node_to_issue_to + 1) % len(testing_nodes)
# return the reference required to retrieve results later, contains node and report dir
return test_reference
def wait_for_nodes_to_finish():
global testing_nodes
print "Waiting for testing nodes to finish"
for node in testing_nodes:
try:
node.acquire_lock()
node.release_lock()
except (IOError, EOFError, paramiko.SSHException, Exception) as e:
print "Node %s is broken" % node.hostname
def collect_reports_from_nodes():
global testing_nodes
global reports
global test_references
print "Collecting reports"
for test_reference in test_references:
single_runs_to_remove = []
for single_run in test_reference.single_runs:
try:
test_reference.results.append(get_report_for_test(single_run))
single_runs_to_remove.append(single_run)
# Clean test
node = single_run[0]
node.run_command("rm -rf cloud_retest/%s" % single_run[1])
except (IOError, EOFError, paramiko.SSHException, Exception) as e:
print "Unable to retrieve result for %s from node %s:" % (single_run[1], single_run[0].hostname)
traceback.print_exc()
for single_run_to_remove in single_runs_to_remove:
test_reference.single_runs.remove(single_run_to_remove)
# Generate report object for all results of this test
reports.append((test_reference.command, test_reference.results))
# Clean node
for node in testing_nodes:
try:
cleanup_testing_node(node)
except (IOError, EOFError, paramiko.SSHException, Exception) as e:
print "Unable to cleanup node %s:" % node.hostname
traceback.print_exc()
terminate_testing_nodes()
# Safety stuff... (make sure that nodes get destroyed in EC2 eventually)
# This is not 100% fool-proof (id est does not catch all ways of killing the process), take care!
atexit.register(terminate_testing_nodes)
# modified variant of plain retester function...
# returns as soon as all repetitions of the test have been issued to some testing node
def do_test_cloud(cmd, cmd_args={}, cmd_format="gnu", repeat=1, timeout=60):
global test_references
global use_local_retester
if use_local_retester:
return do_test(cmd, cmd_args, cmd_format, repeat, timeout)
# Build up the command line
command = cmd
cmd_args_keys = [k for k in cmd_args]
cmd_args_keys.sort()
for arg in cmd_args_keys:
command += " "
# GNU cmd line builder
if cmd_format == "gnu":
if(isinstance(cmd_args[arg], types.BooleanType)):
if cmd_args[arg]:
command += "--%s" % arg
else:
command += "--%s \"%s\"" % (arg, retester.shell_escape(str(cmd_args[arg])))
# Make cmd line builder
elif cmd_format == "make":
command += "%s=%s" % (arg, str(cmd_args[arg]))
# Invalid cmd line builder
else:
print "Invalid command line formatter"
raise NameError()
# Run the test
if repeat == 1: print "Running %r..." % command
else: print "Running %r (repeating %d times)..." % (command, repeat)
if timeout > 60: print "(This test may take up to %d seconds each time.)" % timeout
test_reference = TestReference(command)
for i in xrange(repeat):
test_reference.single_runs.append(issue_test_to_some_node(command, timeout))
test_references.append(test_reference)
# modified variant of plain retester function...
def report_cloud():
global use_local_retester
if use_local_retester:
return report()
wait_for_nodes_to_finish()
# fill reports list
collect_reports_from_nodes()
# Invoke report() from plain retester to do the rest of the work...
retester.reports.extend(reports)
report()
|
djabber/Dashboard
|
refs/heads/master
|
bottle/dash/lib/python2.7/encodings/gb18030.py
|
816
|
#
# gb18030.py: Python Unicode Codec for GB18030
#
# Written by Hye-Shik Chang <[email protected]>
#
import _codecs_cn, codecs
import _multibytecodec as mbc
codec = _codecs_cn.getcodec('gb18030')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name='gb18030',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
|
kaplun/invenio
|
refs/heads/master
|
modules/websubmit/lib/websubmit_icon_creator.py
|
3
|
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2008, 2009, 2010, 2011, 2012 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""This is websubmit_icon_creator.py
This tool is used to create an icon of a picture file.
+ Python API:
Please see create_icon().
+ CLI API:
## $ python ~invenio/lib/python/invenio/websubmit_icon_creator.py \\
## --icon-scale=200 \\
## --icon-name=test-icon \\
## --icon-file-format=jpg \\
## test-image.jpg
## $ python ~invenio/lib/python/invenio/websubmit_icon_creator.py \\
## --icon-scale=200 \\
## --icon-name=test-icon2 \\
## --icon-file-format=gif \\
## --multipage-icon \\
## --multipage-icon-delay=50 \\
## test-image2.pdf
"""
__revision__ = "$Id$"
import os.path, sys, getopt, shutil, tempfile, re
from invenio.config import \
CFG_TMPDIR, \
CFG_PATH_PS2PDF, \
CFG_PATH_PDFTK, \
CFG_PATH_CONVERT
from invenio.shellutils import escape_shell_arg
from invenio.websubmit_config import InvenioWebSubmitIconCreatorError
CFG_ALLOWED_FILE_EXTENSIONS = ["pdf", "gif", "jpg", \
"jpeg", "ps", "png", "bmp", \
"eps", "epsi", "epsf", \
"tiff", "tif"]
## ***** Functions related to the icon creation process: *****
# Accepted format for the ImageMagick 'scale' parameter:
re_imagemagic_scale_parameter_format = re.compile(r'x?\d+(x\d*)?(^|!|>|<|@|%)?$')
def create_working_directory():
"""Create a "working directory" in which the files related to the icon-
creation process can be stored, and return the full path to it.
The working directory will be created in ~invenio/var/tmp.
If it cannot be created there, an exception
(InvenioWebSubmitIconCreatorError) will be raised.
The working directory will have the prefix
"websubmit_icon_creator_", and could be given a name something like:
- websubmit_icon_creator_Tzs3St
@return: (string) - the full path to the working directory.
@Exceptions raised: InvenioWebSubmitIconCreatorError.
"""
## Create the temporary directory in which to place the files related to
## icon creation in ~invenio/var/tmp:
path_workingdir = None
try:
path_workingdir = tempfile.mkdtemp(prefix="websubmit_icon_creator_", \
dir="%s" % CFG_TMPDIR)
except OSError, err:
## Unable to create the temporary directory in ~invenio/var/tmp
msg = "Error: Unable to create a temporary working directory in " \
"which to carry out the icon creation process. An attempt was " \
"made to create the directory in [%s]; the error encountered " \
"was <%s>. Icon creation has failed." % (CFG_TMPDIR, str(err))
raise InvenioWebSubmitIconCreatorError(msg)
## return the path to the working-directory:
return path_workingdir
def copy_file_to_directory(source_file, destination_dir):
"""Attempt to copy an ordinary file from one location to a destination
directory, returning the name of the copied file if successful.
@param source_file: (string) - the name of the file to be copied
to the destination directory.
@param destination_dir: (string) - the path of the directory into
which the source file is to be copied.
@return: (string) - the name of the source file after it has been
copied to the destination directory (i.e. no leading path information.)
@Exceptions raised: (IOError) - upon failure to successfully copy the
source file to the destination directory.
"""
## Divide the input filename into path and basename:
(dummy, name_source_file) = os.path.split(source_file)
if name_source_file == "":
## The source file is just a path - not a valid filename.
msg = """Error: the name of the file to be copied was invalid."""
raise IOError(msg)
## Test to see whether source file is a real file and is readable:
if os.access("%s" % source_file, os.R_OK):
## File is readable. Copy it locally to the destination directory:
try:
shutil.copyfile("%s" % source_file, \
"%s/%s" % (destination_dir, name_source_file))
except IOError:
## Unable to copy the source file to the destination directory.
msg = """Error: Unable to copy source file [%s] to """ \
"""the destination directory [%s].""" \
% (source_file, destination_dir)
raise IOError(msg)
else:
## Unable to read the source file.
msg = """Error: Unable to copy source file [%s] to """ \
"""destination directory [%s]. (File not readable.)""" \
% (source_file, destination_dir)
raise IOError(msg)
## Now that the source file has been successfully copied to the destination
## directory, return its base name:
return name_source_file
def build_icon(path_workingdir,
source_filename,
source_filetype,
icon_name,
icon_filetype,
multipage_icon,
multipage_icon_delay,
icon_scale):
"""Whereas create_icon acts as the API for icon creation and therefore
deals with argument washing, temporary working directory creation,
etc, the build_icon function takes care of the actual creation of the
icon file itself by calling various shell tools.
To accomplish this, it relies upon the following parameters:
@param path_workingdir: (string) - the path to the working directory
in which all files related to the icon creation are stored.
@param source_filename: (string) - the filename of the original image
file.
@param source_filetype: (string) - the file type of the original image
file.
@param icon_name: (string) - the name that is to be given to the icon.
@param icon_filetype: (string) - the file type of the icon that is
to be created.
@param multipage_icon: (boolean) - a flag indicating whether or not
an icon with multiple pages (i.e. an animated gif icon) should be
created.
@param multipage_icon_delay: (integer) - the delay to be used between
frame changing for an icon with multiple pages (i.e. an animated gif.)
@param icon_scale: (integer) - the scaling information for the created
icon.
@return: (string) - the name of the created icon file (which will have
been created in the working directory "path_workingdir".)
@Exceptions raised: (InvenioWebSubmitIconCreatorError) - raised when
the icon creation process fails.
"""
##
## If the source file is a PS, convert it into a PDF:
if source_filetype == "ps":
## Convert the subject file from PostScript to PDF:
if source_filename[-3:].lower() == ".ps":
## The name of the file to be stamped has a PostScript extension.
## Strip it and give the name of the PDF file to be created a
## PDF extension:
created_pdfname = "%s.pdf" % source_filename[:-3]
elif len(source_filename.split(".")) > 1:
## The file name has an extension - strip it and add a PDF
## extension:
raw_name = source_filename[:source_filename.rfind(".")]
if raw_name != "":
created_pdfname = "%s.pdf" % raw_name
else:
## It would appear that the file had no extension and that its
## name started with a period. Just use the original name with
## a .pdf suffix:
created_pdfname = "%s.pdf" % source_filename
else:
## No extension - use the original name with a .pdf suffix:
created_pdfname = "%s.pdf" % source_filename
## Build the distilling command:
cmd_distill = """%(distiller)s %(ps-file-path)s """ \
"""%(pdf-file-path)s 2>/dev/null""" % \
{ 'distiller' : CFG_PATH_PS2PDF,
'ps-file-path' : escape_shell_arg("%s/%s" % \
(path_workingdir, \
source_filename)),
'pdf-file-path' : escape_shell_arg("%s/%s" % \
(path_workingdir, \
created_pdfname)),
}
## Distill the PS into a PDF:
errcode_distill = os.system(cmd_distill)
## Test to see whether the PS was distilled into a PDF without error:
if errcode_distill or \
not os.access("%s/%s" % (path_workingdir, created_pdfname), os.F_OK):
## The PDF file was not correctly created in the working directory.
## Unable to continue.
msg = "Error: Unable to correctly convert PostScript file [%s] to" \
" PDF. Cannot create icon." % source_filename
raise InvenioWebSubmitIconCreatorError(msg)
## Now assign the name of the created PDF file to subject_file:
source_filename = created_pdfname
##
## Treat the name of the icon:
if icon_name in (None, ""):
## Since no name has been provided for the icon, give it the same name
## as the source file, but with the prefix "icon-":
icon_name = "icon-%s" % source_filename
## Now if the icon name has an extension, strip it and add that of the
## icon file type:
if len(icon_name.split(".")) > 1:
## The icon file name has an extension - strip it and add the icon
## file type extension:
raw_name = icon_name[:icon_name.rfind(".")]
if raw_name != "":
icon_name = "%s.%s" % (raw_name, icon_filetype)
else:
## It would appear that the file had no extension and that its
## name started with a period. Just use the original name with
## the icon file type's suffix:
icon_name = "%s.%s" % (icon_name, icon_filetype)
else:
## The icon name had no extension. Use the original name with the
## icon file type's suffix:
icon_name = "%s.%s" % (icon_name, icon_filetype)
##
## If the source file type is PS or PDF, it may be necessary to separate
## the first page from the rest of the document and keep it for use as
## the icon. Do this if necessary:
if source_filetype in ("ps", "pdf") and \
(icon_filetype != "gif" or not multipage_icon):
## Either (a) the icon type isn't GIF (in which case it cannot
## be animated and must therefore be created _only_ from the
## document's first page; or (b) the icon type is GIF, but the
## icon is to be created from the first page of the document only.
## The first page of the PDF document must be separated and is to
## be used for icon creation:
source_file_first_page = "p1-%s" % source_filename
## Perform the separation:
cmd_get_first_page = \
"%(pdftk)s A=%(source-file-path)s " \
"cat A1 output %(first-page-path)s " \
"2>/dev/null" \
% { 'pdftk' : CFG_PATH_PDFTK,
'source-file-path' : escape_shell_arg("%s/%s" % \
(path_workingdir, source_filename)),
'first-page-path' : escape_shell_arg("%s/%s" % \
(path_workingdir, \
source_file_first_page)),
}
errcode_get_first_page = os.system(cmd_get_first_page)
## Check that the separation was successful:
if errcode_get_first_page or \
not os.access("%s/%s" % (path_workingdir, \
source_file_first_page), os.F_OK):
## Separation was unsuccessful.
msg = "Error: Unable to create an icon for file [%s/%s] - it " \
"wasn't possible to separate the first page from the " \
"rest of the document (error code [%s].)" \
% (path_workingdir, source_filename, errcode_get_first_page)
raise InvenioWebSubmitIconCreatorError(msg)
else:
## Successfully extracted the first page. Treat it as the source
## file for icon creation from now on:
source_filename = source_file_first_page
##
## Create the icon:
## If a delay is necessary for an animated gif icon, create the
## delay string:
delay_info = ""
if source_filetype in ("ps", "pdf") and \
icon_filetype == "gif" and multipage_icon:
## Include delay information:
delay_info = "-delay %s" % escape_shell_arg(str(multipage_icon_delay))
## Command for icon creation:
cmd_create_icon = "%(convert)s -colorspace rgb -auto-orient -scale %(scale)s %(delay)s " \
"%(source-file-path)s %(icon-file-path)s 2>/dev/null" \
% { 'convert' : CFG_PATH_CONVERT,
'scale' : \
escape_shell_arg(icon_scale),
'delay' : delay_info,
'source-file-path' : \
escape_shell_arg("%s/%s" \
% (path_workingdir, \
source_filename)),
'icon-file-path' : \
escape_shell_arg("%s/%s" \
% (path_workingdir, \
icon_name)),
}
errcode_create_icon = os.system(cmd_create_icon)
## Check that the icon creation was successful:
if errcode_create_icon or \
not os.access("%s/%s" % (path_workingdir, icon_name), os.F_OK):
## Icon creation was unsuccessful.
msg = "Error: Unable to create an icon for file [%s/%s] (error " \
"code [%s].)" \
% (path_workingdir, source_filename, errcode_create_icon)
raise InvenioWebSubmitIconCreatorError(msg)
##
## The icon was successfully created. Return its name:
return icon_name
def create_icon(options):
"""The driver for the icon creation process. This is effectively the
function that is responsible for coordinating the icon creation.
It is the API for the creation of an icon.
@param options: (dictionary) - a dictionary of options that are required
by the function in order to carry out the icon-creation process.
The dictionary must have the following structure:
+ input-file: (string) - the path to the input file (i.e. that
which is to be stamped;
+ icon-name: (string) - the name of the icon that is to be created
by the program. This is optional - if not provided,
a default name will be applied to the icon file instead;
+ multipage-icon: (boolean) - used only when the original file
is a PDF or PS file. If False, the created icon will feature ONLY
the first page of the PDF. If True, ALL pages of the PDF will
be included in the created icon. Note: If the icon type is not
gif, this flag will be forced as False.
+ multipage-icon-delay: (integer) - used only when the original
file is a PDF or PS AND use-first-page-only is False AND
the icon type is gif.
This allows the user to specify the delay between "pages"
of a multi-page (animated) icon.
+ icon-scale: ('geometry') - the scaling information to be used for the
creation of the new icon. Type 'geometry' as defined in ImageMagick.
(eg. 320 or 320x240 or 100> or 5%)
+ icon-file-format: (string) - the file format of the icon that is
to be created. Legal values are:
* pdf
* gif
* jpg
* jpeg
* ps
* png
* bmp
+ verbosity: (integer) - the verbosity level under which the program
is to run;
So, an example of the returned dictionary could be something like:
{ 'input-file' : "demo-picture-file.jpg",
'icon-name' : "icon-demo-picture-file",
'icon-file-format' : "gif",
'multipage-icon' : True,
'multipage-icon-delay' : 100,
'icon-scale' : 180,
'verbosity' : 0,
}
@return: (tuple) - consisting of two strings:
1. the path to the working directory in which all files related to
icon creation are stored;
2. The name of the "icon" file;
@Exceptions raised: (InvenioWebSubmitIconCreatorError)
be raised or propagated by this function when the icon creation process
fails for one reason or another.
"""
## SANITY CHECKS:
## Does the options dictionary contain all expected keys?
##
## A list of the names of the expected options:
expected_option_names = ['input-file', \
'icon-name', \
'icon-file-format', \
'multipage-icon', \
'multipage-icon-delay', \
'icon-scale', \
'verbosity']
expected_option_names.sort()
## A list of the option names that have been received:
received_option_names = options.keys()
received_option_names.sort()
if expected_option_names != received_option_names:
## Error: he dictionary of options had an illegal structure:
msg = """Error: Unexpected value received for "options" parameter."""
raise InvenioWebSubmitIconCreatorError(msg)
## Do we have an input file to work on?
if options["input-file"] in (None, ""):
## No input file - stop the icon creation:
msg = "Error: unable to determine the name of the file from which " \
"the icon is to be created."
raise InvenioWebSubmitIconCreatorError(msg)
else:
## Get the file type of the input file:
tmp_file_extension = options["input-file"].split(".")[-1]
## allow also Invenio files that use the format: filename.ext;format;subformat;version
tmp_file_extension = tmp_file_extension.split(';')[0]
if tmp_file_extension.lower() not in CFG_ALLOWED_FILE_EXTENSIONS:
## Ilegal input file type.
msg = "Error: icons can be only be created from %s files, " \
"not [%s]." % (str(CFG_ALLOWED_FILE_EXTENSIONS), \
tmp_file_extension.lower())
raise InvenioWebSubmitIconCreatorError(msg)
else:
subject_filetype = tmp_file_extension.lower()
## Wash the requested icon name:
if type(options["icon-name"]) is not str:
options["icon-name"] = ""
else:
(dummy, name_iconfile) = os.path.split(options["icon-name"])
if name_iconfile != "":
## Take just the basename component of the icon file:
options["icon-name"] = name_iconfile
## Do we have an icon file format?
icon_format = options["icon-file-format"]
if icon_format in (None, ""):
## gif by default:
options["icon-file-format"] = "gif"
elif str(icon_format).lower() not in CFG_ALLOWED_FILE_EXTENSIONS:
## gif if an invalid icon type was supplied:
options["icon-file-format"] = "gif"
else:
## Use the provided icon type:
options["icon-file-format"] = icon_format.lower()
## Wash the use-first-page-only flag according to the type of the
## requested icon:
if options["icon-file-format"] != "gif":
## Since the request icon isn't a gif file, it can't be animated
## and should be created from the first "page" of the original file:
options["multipage-icon"] = False
else:
## The requested icon is a gif. Verify that the multipage-icon
## flag is a boolean value. If not, set it to False by default:
if type(options["multipage-icon"]) is not bool:
## Non-boolean value: default to False:
options["multipage-icon"] = False
## Wash the delay time for frames in an animated gif icon:
if type(options["multipage-icon-delay"]) is not int:
## Invalid value - set it to default:
options["multipage-icon-delay"] = 100
elif options["multipage-icon-delay"] < 0:
## Can't have negative delays:
options["multipage-icon-delay"] = 100
## Wash the icon scaling information:
if not re_imagemagic_scale_parameter_format.match(options["icon-scale"]):
## Ivalid value - set it to default:
options["icon-scale"] = "180"
## OK. Begin the icon creation process:
##
## Create a working directory for the icon creation process and get the
## full path to it:
path_workingdir = create_working_directory()
## Copy the file from which the icon is to be created into the
## working directory:
try:
basename_source_file = \
copy_file_to_directory(options["input-file"], path_workingdir)
except IOError, err:
## Unable to copy the source file to the working directory.
msg = "Icon creation failed: unable to copy the source image file " \
"to the working directory. Got this error: [%s]" % str(err)
raise InvenioWebSubmitIconCreatorError(msg)
## Create the icon and get its name:
icon_name = build_icon(path_workingdir, \
basename_source_file, \
subject_filetype, \
options["icon-name"], \
options["icon-file-format"], \
options["multipage-icon"], \
options["multipage-icon-delay"], \
options["icon-scale"])
## Return a tuple containing the working directory and the name of the
## icon file to the caller:
return (path_workingdir, icon_name)
## ***** Functions Specific to CLI calling of the program: *****
def usage(wmsg="", err_code=0):
"""Print a "usage" message (along with an optional additional warning/error
message) to stderr and exit with a given error code.
@param wmsg: (string) - some kind of warning message for the user.
@param err_code: (integer) - an error code to be passed to sys.exit,
which is called after the usage message has been printed.
@return: None.
"""
## Wash the warning message:
if wmsg != "":
wmsg = wmsg.strip() + "\n"
## The usage message:
msg = """ Usage:
python ~invenio/lib/python/invenio/websubmit_icon_creator.py \\
[options] input-file.jpg
websubmit_icon_creator.py is used to create an icon for an image.
Options:
-h, --help Print this help.
-V, --version Print version information.
-v, --verbose=LEVEL Verbose level (0=min, 1=default, 9=max).
[NOT IMPLEMENTED]
-s, --icon-scale
Scaling information for the icon that is to
be created. Must be an integer. Defaults to
180.
-m, --multipage-icon
A flag to indicate that the icon should
consist of multiple pages. Will only be
respected if the requested icon type is GIF
and the input file is a PS or PDF consisting
of several pages.
-d, --multipage-icon-delay=VAL
If the icon consists of several pages and is
an animated GIF, a delay between frames can
be specified. Must be an integer. Defaults
to 100.
-f, --icon-file-format=FORMAT
The file format of the icon to be created.
Must be one of:
[pdf, gif, jpg, jpeg, ps, png, bmp]
Defaults to gif.
-o, --icon-name=XYZ
The optional name to be given to the created
icon file. If this is omitted, the icon file
will be given the same name as the input
file, but will be prefixed by "icon-";
Examples:
python ~invenio/lib/python/invenio/websubmit_icon_creator.py \\
--icon-scale=200 \\
--icon-name=test-icon \\
--icon-file-format=jpg \\
test-image.jpg
python ~invenio/lib/python/invenio/websubmit_icon_creator.py \\
--icon-scale=200 \\
--icon-name=test-icon2 \\
--icon-file-format=gif \\
--multipage-icon \\
--multipage-icon-delay=50 \\
test-image2.pdf
"""
sys.stderr.write(wmsg + msg)
sys.exit(err_code)
def get_cli_options():
"""From the options and arguments supplied by the user via the CLI,
build a dictionary of options to drive websubmit-icon-creator.
For reference, the CLI options available to the user are as follows:
-h, --help -> Display help/usage message and exit;
-V, --version -> Display version information and exit;
-v, --verbose= -> Set verbosity level (0=min, 1=default,
9=max).
-s, --icon-scale -> Scaling information for the icon that
is to be created. Must be of
type 'geometry', as understood
by ImageMagick (Eg. 320 or
320x240 or 100>). Defaults to
180.
-m, --multipage-icon -> A flag to indicate that the icon should
consist of multiple pages. Will only be
respected if the requested icon type is
GIF and the input file is a PS or PDF
consisting of several pages.
-d, --multipage-icon-delay= -> If the icon consists of several pages
and is an animated GIF, a delay between
frames can be specified. Must be an
integer. Defaults to 100.
-f, --icon-file-format= -> The file format of the icon to be
created. Must be one of:
[pdf, gif, jpg, jpeg, ps, png, bmp]
Defaults to gif.
-o, --icon-name= -> The optional name to be given to the
created icon file. If this is omitted,
the icon file will be given the same
name as the input file, but will be
prefixed by "icon-";
@return: (dictionary) of input options and flags, set as
appropriate. The dictionary has the following structure:
+ input-file: (string) - the path to the input file (i.e. that
which is to be stamped;
+ icon-name: (string) - the name of the icon that is to be created
by the program. This is optional - if not provided,
a default name will be applied to the icon file instead;
+ multipage-icon: (boolean) - used only when the original file
is a PDF or PS file. If False, the created icon will feature ONLY
the first page of the PDF. If True, ALL pages of the PDF will
be included in the created icon. Note: If the icon type is not
gif, this flag will be forced as False.
+ multipage-icon-delay: (integer) - used only when the original
file is a PDF or PS AND use-first-page-only is False AND
the icon type is gif.
This allows the user to specify the delay between "pages"
of a multi-page (animated) icon.
+ icon-scale: (integer) - the scaling information to be used for the
creation of the new icon.
+ icon-file-format: (string) - the file format of the icon that is
to be created. Legal values are:
[pdf, gif, jpg, jpeg, ps, png, bmp]
+ verbosity: (integer) - the verbosity level under which the program
is to run;
So, an example of the returned dictionary could be something like:
{ 'input-file' : "demo-picture-file.jpg",
'icon-name' : "icon-demo-picture-file",
'icon-file-format' : "gif",
'multipage-icon' : True,
'multipage-icon-delay' : 100,
'icon-scale' : 180,
'verbosity' : 0,
}
"""
## dictionary of important values relating to cli call of program:
options = { 'input-file' : "",
'icon-name' : "",
'icon-file-format' : "",
'multipage-icon' : False,
'multipage-icon-delay' : 100,
'icon-scale' : 180,
'verbosity' : 0,
}
## Get the options and arguments provided by the user via the CLI:
try:
myoptions, myargs = getopt.getopt(sys.argv[1:], "hVv:s:md:f:o:", \
["help",
"version",
"verbosity=",
"icon-scale=",
"multipage-icon",
"multipage-icon-delay=",
"icon-file-format=",
"icon-name="])
except getopt.GetoptError, err:
## Invalid option provided - usage message
usage(wmsg="Error: %(msg)s." % { 'msg' : str(err) })
## Get the input file from the arguments list (it should be the
## first argument):
if len(myargs) > 0:
options["input-file"] = myargs[0]
## Extract the details of the options:
for opt in myoptions:
if opt[0] in ("-V","--version"):
## version message and exit
sys.stdout.write("%s\n" % __revision__)
sys.stdout.flush()
sys.exit(0)
elif opt[0] in ("-h","--help"):
## help message and exit
usage()
elif opt[0] in ("-v", "--verbosity"):
## Get verbosity level:
if not opt[1].isdigit():
options['verbosity'] = 0
elif int(opt[1]) not in xrange(0, 10):
options['verbosity'] = 0
else:
options['verbosity'] = int(opt[1])
elif opt[0] in ("-o", "--icon-name"):
## Get the name of the icon that is to be created:
options["icon-name"] = opt[1]
elif opt[0] in ("-f", "--icon-file-format"):
## The file format of the icon file:
if str(opt[1]).lower() not in CFG_ALLOWED_FILE_EXTENSIONS:
## Illegal file format requested for icon:
usage()
else:
## gif if an invalid icon type was supplied:
options["icon-file-format"] = str(opt[1]).lower()
elif opt[0] in ("-m","--multipage-icon"):
## The user would like a multipage (animated) icon:
options['multipage-icon'] = True
elif opt[0] in ("-d", "--multipage-icon-delay"):
## The delay to be used in the case of a multipage (animated) icon:
try:
frame_delay = int(opt[1])
except ValueError:
## Invalid value for delay supplied. Usage message.
usage()
else:
if frame_delay >= 0:
options['multipage-icon-delay'] = frame_delay
elif opt[0] in ("-s", "--icon-scale"):
## The scaling information for the icon:
if re_imagemagic_scale_parameter_format.match(opt[1]):
options['icon-scale'] = opt[1]
else:
usage()
##
## Done. Return the dictionary of options:
return options
def create_icon_cli():
"""The function responsible for triggering the icon creation process when
called via the CLI.
This function will effectively get the CLI options, then pass them to
function that is responsible for coordinating the icon creation process
itself.
Once stamping has been completed, an attempt will be made to copy the
icon file to the current working directory. If this can't be done, the
path to the icon will be printed to stdout instead.
"""
## Get CLI options and arguments:
input_options = get_cli_options()
## Create the icon file and obtain the name of the working directory in
## which the icon file is situated and the name of the icon file:
try:
(working_dir, icon_file) = create_icon(input_options)
except InvenioWebSubmitIconCreatorError, err:
## Something went wrong:
sys.stderr.write("Icon creation failed: [%s]\n" % str(err))
sys.stderr.flush()
sys.exit(1)
if not os.access("./%s" % icon_file, os.F_OK):
## Copy the icon file into the current directory:
try:
shutil.copyfile("%s/%s" % (working_dir, icon_file), \
"./%s" % icon_file)
except IOError:
## Report that it wasn't possible to copy the icon file locally
## and offer the user a path to it:
msg = "It was not possible to copy the icon file to the " \
"current working directory.\nYou can find it here: " \
"[%s/%s].\n" \
% (working_dir, icon_file)
sys.stderr.write(msg)
sys.stderr.flush()
else:
## A file exists in curdir with the same name as the final icon file.
## Just print out a message stating this fact, along with the path to
## the icon file in the temporary working directory:
msg = "The icon file [%s] has not been copied to the current " \
"working directory because a file with this name already " \
"existed there.\nYou can find the icon file here: " \
"[%s/%s].\n" % (icon_file, working_dir, icon_file)
sys.stderr.write(msg)
sys.stderr.flush()
## Start proceedings for CLI calls:
if __name__ == "__main__":
create_icon_cli()
|
tchernomax/ansible
|
refs/heads/devel
|
lib/ansible/plugins/lookup/conjur_variable.py
|
28
|
# (c) 2018, Jason Vanderhoof <[email protected]>, Oren Ben Meir <[email protected]>
# (c) 2018 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
lookup: conjur_variable
version_added: "2.5"
short_description: Fetch credentials from CyberArk Conjur.
description:
- "Retrieves credentials from Conjur using the controlling host's Conjur identity. Conjur info: U(https://www.conjur.org/)."
requirements:
- 'The controlling host running Ansible has a Conjur identity. (More: U(https://developer.conjur.net/key_concepts/machine_identity.html))'
options:
_term:
description: Variable path
required: True
identity_file:
description: Path to the Conjur identity file. The identity file follows the netrc file format convention.
type: path
default: /etc/conjur.identity
required: False
ini:
- section: conjur,
key: identity_file_path
env:
- name: CONJUR_IDENTITY_FILE
config_file:
description: Path to the Conjur configuration file. The configuration file is a YAML file.
type: path
default: /etc/conjur.conf
required: False
ini:
- section: conjur,
key: config_file_path
env:
- name: CONJUR_CONFIG_FILE
"""
EXAMPLES = """
- debug:
msg: "{{ lookup('conjur_variable', '/path/to/secret') }}"
"""
RETURN = """
_raw:
description:
- Value stored in Conjur.
"""
import os.path
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
from base64 import b64encode
from netrc import netrc
from os import environ
from time import time
from ansible.module_utils.six.moves.urllib.parse import quote_plus
import yaml
from ansible.module_utils.urls import open_url
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
# Load configuration and return as dictionary if file is present on file system
def _load_conf_from_file(conf_path):
display.vvv('conf file: {0}'.format(conf_path))
if not os.path.exists(conf_path):
raise AnsibleError('Conjur configuration file `{0}` was not found on the controlling host'
.format(conf_path))
display.vvvv('Loading configuration from: {0}'.format(conf_path))
with open(conf_path) as f:
config = yaml.safe_load(f.read())
if 'account' not in config or 'appliance_url' not in config:
raise AnsibleError('{0} on the controlling host must contain an `account` and `appliance_url` entry'
.format(conf_path))
return config
# Load identity and return as dictionary if file is present on file system
def _load_identity_from_file(identity_path, appliance_url):
display.vvvv('identity file: {0}'.format(identity_path))
if not os.path.exists(identity_path):
raise AnsibleError('Conjur identity file `{0}` was not found on the controlling host'
.format(identity_path))
display.vvvv('Loading identity from: {0} for {1}'.format(identity_path, appliance_url))
conjur_authn_url = '{0}/authn'.format(appliance_url)
identity = netrc(identity_path)
if identity.authenticators(conjur_authn_url) is None:
raise AnsibleError('The netrc file on the controlling host does not contain an entry for: {0}'
.format(conjur_authn_url))
id, account, api_key = identity.authenticators(conjur_authn_url)
if not id or not api_key:
raise AnsibleError('{0} on the controlling host must contain a `login` and `password` entry for {1}'
.format(identity_path, appliance_url))
return {'id': id, 'api_key': api_key}
# Use credentials to retrieve temporary authorization token
def _fetch_conjur_token(conjur_url, account, username, api_key):
conjur_url = '{0}/authn/{1}/{2}/authenticate'.format(conjur_url, account, username)
display.vvvv('Authentication request to Conjur at: {0}, with user: {1}'.format(conjur_url, username))
response = open_url(conjur_url, data=api_key, method='POST')
code = response.getcode()
if code != 200:
raise AnsibleError('Failed to authenticate as \'{0}\' (got {1} response)'
.format(username, code))
return response.read()
# Retrieve Conjur variable using the temporary token
def _fetch_conjur_variable(conjur_variable, token, conjur_url, account):
token = b64encode(token)
headers = {'Authorization': 'Token token="{0}"'.format(token)}
display.vvvv('Header: {0}'.format(headers))
url = '{0}/secrets/{1}/variable/{2}'.format(conjur_url, account, quote_plus(conjur_variable))
display.vvvv('Conjur Variable URL: {0}'.format(url))
response = open_url(url, headers=headers, method='GET')
if response.getcode() == 200:
display.vvvv('Conjur variable {0} was successfully retrieved'.format(conjur_variable))
return [response.read()]
if response.getcode() == 401:
raise AnsibleError('Conjur request has invalid authorization credentials')
if response.getcode() == 403:
raise AnsibleError('The controlling host\'s Conjur identity does not have authorization to retrieve {0}'
.format(conjur_variable))
if response.getcode() == 404:
raise AnsibleError('The variable {0} does not exist'.format(conjur_variable))
return {}
class LookupModule(LookupBase):
def run(self, terms, variables=None, **kwargs):
conf_file = self.get_option('config_file')
conf = _load_conf_from_file(conf_file)
identity_file = self.get_option('identity_file')
identity = _load_identity_from_file(identity_file, conf['appliance_url'])
token = _fetch_conjur_token(conf['appliance_url'], conf['account'], identity['id'], identity['api_key'])
return _fetch_conjur_variable(terms[0], token, conf['appliance_url'], conf['account'])
|
umlfri/umlfri2
|
refs/heads/master
|
umlfri2/types/geometry/line.py
|
1
|
from .point import Point
class Line:
def __init__(self, x1, y1, x2, y2):
self.__x1 = x1
self.__y1 = y1
self.__x2 = x2
self.__y2 = y2
@staticmethod
def from_point_point(p1, p2):
return Line(p1.x, p1.y, p2.x, p2.y)
@property
def first(self):
return Point(self.__x1, self.__y1)
@property
def second(self):
return Point(self.__x2, self.__y2)
def get_abc(self):
# computed by wolframalpha
# http://www.wolframalpha.com/input/?i=solve+x1%2Bb*y1%2Bc%3D0%2Cx2%2Bb*y2%2Bc%3D0+for+a%2Cb%2Cc
if self.__y1 == self.__y2:
return 0, 1, -self.__y1
else:
return 1, \
(self.__x2 - self.__x1) / (self.__y1 - self.__y2), \
(self.__x1*self.__y2 - self.__x2*self.__y1) / (self.__y1 - self.__y2)
def intersect(self, other):
if isinstance(other, Line):
a1, b1, c1 = self.get_abc()
a2, b2, c2 = other.get_abc()
# computed by wolframalpha
# http://www.wolframalpha.com/input/?i=solve+a1*x%2Bb1*y%2Bc1%3D0%2Ca2*x%2Bb2*y%2Bc2%3D0+for+x%2Cy
if a2*b1 == a1*b2:
return
x = int(round((b2*c1 - b1*c2)/(a2*b1 - a1*b2)))
y = int(round((a2*c1 - a1*c2)/(a1*b2 - a2*b1)))
if min(self.__x1, self.__x2) <= x <= max(self.__x1, self.__x2) and \
min(other.__x1, other.__x2) <= x <= max(other.__x1, other.__x2) and \
min(self.__y1, self.__y2) <= y <= max(self.__y1, self.__y2) and \
min(other.__y1, other.__y2) <= y <= max(other.__y1, other.__y2):
yield Point(x, y)
else:
yield from other.intersect(self)
def get_nearest_point_to(self, other):
if isinstance(other, Point):
a, b, c = self.get_abc()
x = other.x
y = other.y
t = a*a + b*b
# closest point according to wikipedia
# https://en.wikipedia.org/wiki/Distance_from_a_point_to_a_line
closest_x = (b*(b*x - a*y) - a*c) / t
closest_y = (a*(-b*x + a*y) - b*c) / t
if self.__x2 > self.__x1 > closest_x:
return self.first
if self.__x1 > self.__x2 > closest_x:
return self.second
if self.__x2 < self.__x1 < closest_x:
return self.first
if self.__x1 < self.__x2 < closest_x:
return self.second
if self.__y2 > self.__y1 > closest_y:
return self.first
if self.__y1 > self.__y2 > closest_y:
return self.second
if self.__y2 < self.__y1 < closest_y:
return self.first
if self.__y1 < self.__y2 < closest_y:
return self.second
return Point(closest_x, closest_y)
else:
return other.get_distance_to(self)
def get_distance_to(self, other):
if isinstance(other, Point):
return (other - self.get_nearest_point_to(other)).length
else:
return other.get_distance_to(self)
def __str__(self):
return "[{0}, {1}], [{2}, {3}]".format(self.__x1, self.__y1, self.__x2, self.__y2)
def __repr__(self):
return "<Line {0}>".format(self)
|
chanceraine/nupic
|
refs/heads/master
|
tests/unit/nupic/regions/knn_anomaly_classifier_region_test.py
|
35
|
#!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Unit tests for the clamodel module."""
import sys
import copy
from datetime import datetime
import unittest2 as unittest
import numpy
from mock import Mock, patch, ANY, call
from nupic.support.unittesthelpers.testcasebase import (unittest,
TestOptionParser)
from nupic.frameworks.opf.opfutils import InferenceType
from nupic.regions.KNNAnomalyClassifierRegion import (
KNNAnomalyClassifierRegion,
_CLAClassificationRecord)
from nupic.frameworks.opf.opfutils import InferenceType
from nupic.frameworks.opf.exceptions import (CLAModelInvalidRangeError)
class KNNAnomalyClassifierRegionTest(unittest.TestCase):
"""KNNAnomalyClassifierRegion unit tests."""
def setUp(self):
self.params = dict(
trainRecords=10,
anomalyThreshold=1.1,
cacheSize=10000,
k=1,
distanceMethod='rawOverlap',
distanceNorm=1,
doBinarization=1,
replaceDuplicates=0,
maxStoredPatterns=1000)
self.helper = KNNAnomalyClassifierRegion(**self.params)
def testInit(self):
params = dict(
trainRecords=100,
anomalyThreshold=101,
cacheSize=102,
classificationVectorType=1,
k=1,
distanceMethod='rawOverlap',
distanceNorm=1,
doBinarization=1,
replaceDuplicates=0,
maxStoredPatterns=1000)
helper = KNNAnomalyClassifierRegion(**params)
self.assertEqual(helper.trainRecords, params['trainRecords'])
self.assertEqual(helper.anomalyThreshold, params['anomalyThreshold'])
self.assertEqual(helper.cacheSize, params['cacheSize'])
self.assertEqual(helper.classificationVectorType,
params['classificationVectorType'])
@patch.object(KNNAnomalyClassifierRegion, 'classifyState')
@patch.object(KNNAnomalyClassifierRegion, 'getParameter')
@patch.object(KNNAnomalyClassifierRegion, 'constructClassificationRecord')
def testCompute(self, constructRecord, getParam, classifyState):
params = {
'trainRecords': 0
}
getParam.side_effect = params.get
state = {
"ROWID": 0,
"anomalyScore": 1.0,
"anomalyVector": [1,4,5],
"anomalyLabel": "Label"
}
record = _CLAClassificationRecord(**state)
constructRecord.return_value = record
self.helper.compute(dict(), dict())
classifyState.assert_called_once_with(record)
self.assertEqual(self.helper.labelResults, state['anomalyLabel'])
def testGetLabels(self):
# No _recordsCache
self.helper._recordsCache = []
self.assertEqual(self.helper.getLabels(), \
{'isProcessing': False, 'recordLabels': []})
# Invalid ranges
self.helper._recordsCache = [Mock(ROWID=10)]
self.assertRaises(CLAModelInvalidRangeError,
self.helper.getLabels, start=100, end=100)
self.helper._recordsCache = [Mock(ROWID=10)]
self.assertRaises(CLAModelInvalidRangeError,
self.helper.getLabels, start=-100, end=-100)
self.helper._recordsCache = [Mock(ROWID=10)]
self.assertRaises(CLAModelInvalidRangeError,
self.helper.getLabels, start=100, end=-100)
# Valid no threshold labels
values = {
'categoryRecencyList': [4, 5, 7],
}
self.helper.saved_categories = ['TestCategory']
categoryList = [1, 1, 1]
classifier = self.helper._knnclassifier
classifier.getParameter = Mock(side_effect=values.get)
classifier._knn._categoryList = categoryList
results = self.helper.getLabels()
self.assertTrue('isProcessing' in results)
self.assertTrue('recordLabels' in results)
self.assertEqual(len(results['recordLabels']),
len(values['categoryRecencyList']))
for record in results['recordLabels']:
self.assertTrue(record['ROWID'] in values['categoryRecencyList'])
self.assertEqual(record['labels'], self.helper.saved_categories)
@patch.object(KNNAnomalyClassifierRegion, '_getStateAnomalyVector')
@patch.object(KNNAnomalyClassifierRegion, 'constructClassificationRecord')
@patch.object(KNNAnomalyClassifierRegion, 'classifyState')
def testAddLabel(self, classifyState, constructVector, getVector):
# Setup Mocks
getVector.return_value = numpy.array([0, 0, 0, 1, 0, 0, 1])
knn = self.helper._knnclassifier._knn
knn.learn = Mock()
# Invalid ranges
self.helper._recordsCache = []
self.assertRaises(CLAModelInvalidRangeError,
self.helper.addLabel, start=100, end=100, labelName="test")
self.helper._recordsCache = [Mock(ROWID=10)]
self.assertRaises(CLAModelInvalidRangeError,
self.helper.addLabel, start=100, end=100, labelName="test")
self.helper._recordsCache = [Mock(ROWID=10)]
self.assertRaises(CLAModelInvalidRangeError,
self.helper.addLabel, start=-100, end=-100, labelName="test")
self.helper._recordsCache = [Mock(ROWID=10)]
self.assertRaises(CLAModelInvalidRangeError,
self.helper.addLabel, start=100, end=-100, labelName="test")
# Valid no threshold labels
self.helper._recordsCache = [
Mock(ROWID=10, anomalyLabel=["Test"], setByUser=False),
Mock(ROWID=11, anomalyLabel=[], setByUser=False),
Mock(ROWID=12, anomalyLabel=["Test"], setByUser=True)]
results = self.helper.addLabel(11, 12, "Added")
# Verifies records were updated
self.assertEqual(results, None)
self.assertTrue('Added' in self.helper._recordsCache[1].anomalyLabel)
self.assertTrue(self.helper._recordsCache[1].setByUser)
# Verifies record added to KNN classifier
knn.learn.assert_called_once_with(ANY, ANY, rowID=11)
# Verifies records after added label is recomputed
classifyState.assert_called_once_with(self.helper._recordsCache[2])
@patch.object(KNNAnomalyClassifierRegion, 'constructClassificationRecord')
@patch.object(KNNAnomalyClassifierRegion, 'classifyState')
def testRemoveLabel(self, classifyState, constructClassificationRecord):
knn = self.helper._knnclassifier._knn
knn._numPatterns = 3
knn._categoryRecencyList = [10, 11, 12]
knn.removeIds = Mock(side_effect = self.mockRemoveIds)
self.helper._recordsCache = []
self.assertRaises(CLAModelInvalidRangeError,
self.helper.removeLabels,)
# Invalid ranges
self.helper._recordsCache = [Mock(ROWID=10)]
self.assertRaises(CLAModelInvalidRangeError,
self.helper.removeLabels, start=100, end=100)
self.helper._recordsCache = [Mock(ROWID=10)]
self.assertRaises(CLAModelInvalidRangeError,
self.helper.removeLabels, start=-100, end=-100)
self.helper._recordsCache = [Mock(ROWID=10)]
self.assertRaises(CLAModelInvalidRangeError,
self.helper.removeLabels, start=100, end=-100)
# Valid no threshold labels
self.helper._recordsCache = [
Mock(ROWID=10, anomalyLabel=["Test"], setByUser=False),
Mock(ROWID=11, anomalyLabel=["Test"], setByUser=False),
Mock(ROWID=12, anomalyLabel=["Test"], setByUser=True)]
results = self.helper.removeLabels(11, 12, "Test")
self.assertEqual(results, None)
self.assertTrue('Test' not in self.helper._recordsCache[1].anomalyLabel)
# Verifies records removed from KNN classifier
self.assertEqual(knn.removeIds.mock_calls, [call([11]), call([])])
# Verifies records after removed record are updated
classifyState.assert_called_once_with(self.helper._recordsCache[2])
@patch.object(KNNAnomalyClassifierRegion, 'constructClassificationRecord')
@patch.object(KNNAnomalyClassifierRegion, 'classifyState')
def testRemoveLabelNoFilter(self, classifyState,
constructClassificationRecord):
knn = self.helper._knnclassifier._knn
knn._numPatterns = 3
knn._categoryRecencyList = [10, 11, 12]
knn.removeIds = Mock(side_effect=self.mockRemoveIds)
# Valid no threshold labels
self.helper._recordsCache = [
Mock(ROWID=10, anomalyLabel=["Test"], setByUser=False),
Mock(ROWID=11, anomalyLabel=["Test"], setByUser=False),
Mock(ROWID=12, anomalyLabel=["Test"], setByUser=True)]
results = self.helper.removeLabels(11, 12)
self.assertEqual(results, None)
self.assertTrue('Test' not in self.helper._recordsCache[1].anomalyLabel)
# Verifies records removed from KNN classifier
self.assertEqual(knn.removeIds.mock_calls, [call([11]), call([])])
# Verifies records after removed record are updated
classifyState.assert_called_once_with(self.helper._recordsCache[2])
@patch.object(KNNAnomalyClassifierRegion, 'classifyState')
def testSetGetThreshold(self, classifyState):
self.helper._recordsCache = [Mock(), Mock(), Mock()]
self.helper.setParameter('anomalyThreshold', None, 1.0)
self.assertAlmostEqual(self.helper.anomalyThreshold, 1.0)
self.assertEqual(len(classifyState.mock_calls),
len(self.helper._recordsCache))
self.assertAlmostEqual(self.helper.getParameter('anomalyThreshold'), 1.0)
self.assertRaises(Exception, self.helper.setParameter,
'anomalyThreshold', None, 'invalid')
@patch.object(KNNAnomalyClassifierRegion, 'classifyState')
def testSetGetWaitRecords(self, classifyState):
self.helper._recordsCache = [
Mock(ROWID=10, anomalyLabel=["Test"], setByUser=False),
Mock(ROWID=11, anomalyLabel=["Test"], setByUser=False),
Mock(ROWID=12, anomalyLabel=["Test"], setByUser=True)]
self.helper.setParameter('trainRecords', None, 20)
self.assertEqual(self.helper.trainRecords, 20)
self.assertEqual(len(classifyState.mock_calls),
len(self.helper._recordsCache))
self.assertEqual(self.helper.getParameter('trainRecords'), 20)
# Test invalid parameter type
self.assertRaises(Exception, self.helper.setParameter,
'trainRecords', None, 'invalid')
# Test invalid value before first record ROWID in cache
state = {
"ROWID": 1000,
"anomalyScore": 1.0,
"anomalyVector": [1,4,5],
"anomalyLabel": "Label"
}
record = _CLAClassificationRecord(**state)
self.helper._recordsCache = [state]
self.assertRaises(Exception, self.helper.setParameter,
'trainRecords', None, 0)
@patch.object(KNNAnomalyClassifierRegion, 'constructClassificationRecord')
def testSetGetWaitRecordsRecalculate(self, getRecord):
"""
This test ensures that records in classifier are removed when they are no
longer being used when the trainRecords is set.
"""
self.helper.cacheSize = 5
self.helper.anomalyThreshold = 0.8
self.helper._anomalyVectorLength = 20
records = [
Mock(ROWID=10, anomalyLabel=["Test"], anomalyScore=1, setByUser=False, anomalyVector=numpy.array([1,4])),
Mock(ROWID=11, anomalyLabel=["Test"], anomalyScore=0, setByUser=False, anomalyVector=numpy.array([1,2])),
Mock(ROWID=12, anomalyLabel=["Test"], anomalyScore=0, setByUser=False, anomalyVector=numpy.array([1,4])),
Mock(ROWID=13, anomalyLabel=["Test"], anomalyScore=0, setByUser=False, anomalyVector=numpy.array([1,2,6,7])),
Mock(ROWID=14, anomalyLabel=["Test"], anomalyScore=1, setByUser=False, anomalyVector=numpy.array([1,10])),
Mock(ROWID=15, anomalyLabel=["Test"], anomalyScore=0, setByUser=False, anomalyVector=numpy.array([1,3])),
Mock(ROWID=16, anomalyLabel=["Test"], anomalyScore=0, setByUser=False, anomalyVector=numpy.array([1,4])),
Mock(ROWID=17, anomalyLabel=["Test"], anomalyScore=0, setByUser=False, anomalyVector=numpy.array([10])),
Mock(ROWID=18, anomalyLabel=["Test"], anomalyScore=0, setByUser=False, anomalyVector=numpy.array([1,4]))]
getRecord.side_effect = records
for i in records:
self.helper.compute(dict(), dict())
self.assertEqual(self.helper._knnclassifier._knn._numPatterns, 6)
self.assertEqual(
self.helper._knnclassifier.getParameter('categoryRecencyList'),
[10, 12, 14, 16, 17, 18],
"Classifier incorrectly classified test records."
)
# Now set trainRecords and should remove the labels outside of cache
# and relabel points.
self.helper.setParameter('trainRecords', None, 14)
self.assertEqual(self.helper._knnclassifier._knn._numPatterns, 2)
self.assertEqual(
self.helper._knnclassifier.getParameter('categoryRecencyList'),
[14, 17],
"Classifier incorrectly reclassified test records after setting "
"trainRecords")
@patch.object(KNNAnomalyClassifierRegion, '_addRecordToKNN')
@patch.object(KNNAnomalyClassifierRegion, '_deleteRecordsFromKNN')
@patch.object(KNNAnomalyClassifierRegion, '_recomputeRecordFromKNN')
@patch.object(KNNAnomalyClassifierRegion, '_categoryToLabelList')
def testUpdateState(self, toLabelList, recompute, deleteRecord, addRecord):
record = {
"ROWID": 0,
"anomalyScore": 1.0,
"anomalyVector": "",
"anomalyLabel": ["Label"],
"setByUser": False
}
# Test record not labeled and not above threshold
deleteRecord.reset_mock()
addRecord.reset_mock()
self.helper.trainRecords = 0
self.helper.anomalyThreshold = 1.1
toLabelList.return_value = []
state = _CLAClassificationRecord(**record)
self.helper.classifyState(state)
self.assertEqual(state.anomalyLabel, [])
deleteRecord.assert_called_once_with([state])
# Test record not labeled and above threshold
deleteRecord.reset_mock()
addRecord.reset_mock()
self.helper.anomalyThreshold = 0.5
toLabelList.return_value = []
state = _CLAClassificationRecord(**record)
self.helper.classifyState(state)
self.assertEqual(state.anomalyLabel, \
[KNNAnomalyClassifierRegion.AUTO_THRESHOLD_CLASSIFIED_LABEL])
addRecord.assert_called_once_with(state)
# Test record not labeled and above threshold during wait period
deleteRecord.reset_mock()
addRecord.reset_mock()
self.helper.trainRecords = 10
self.helper.anomalyThreshold = 0.5
toLabelList.return_value = []
state = _CLAClassificationRecord(**record)
self.helper.classifyState(state)
self.assertEqual(state.anomalyLabel, [])
self.assertTrue(not addRecord.called)
# Test record labeled and not above threshold
deleteRecord.reset_mock()
addRecord.reset_mock()
self.helper.trainRecords = 0
self.helper.anomalyThreshold = 1.1
toLabelList.return_value = ["Label"]
state = _CLAClassificationRecord(**record)
self.helper.classifyState(state)
self.assertEqual(state.anomalyLabel, ["Label"])
self.assertTrue(not addRecord.called)
# Test setByUser
deleteRecord.reset_mock()
addRecord.reset_mock()
self.helper.anomalyThreshold = 1.1
toLabelList.return_value = ["Label 2"]
recordCopy = copy.deepcopy(record)
recordCopy['setByUser'] = True
state = _CLAClassificationRecord(**recordCopy)
self.helper.classifyState(state)
self.assertEqual(state.anomalyLabel,
[recordCopy["anomalyLabel"][0], toLabelList.return_value[0]])
addRecord.assert_called_once_with(state)
# Test removal of above threshold
deleteRecord.reset_mock()
addRecord.reset_mock()
self.helper.anomalyThreshold = 1.1
toLabelList.return_value = []
recordCopy = copy.deepcopy(record)
recordCopy['setByUser'] = True
recordCopy['anomalyLabel'] = \
[KNNAnomalyClassifierRegion.AUTO_THRESHOLD_CLASSIFIED_LABEL,
KNNAnomalyClassifierRegion.AUTO_THRESHOLD_CLASSIFIED_LABEL + \
KNNAnomalyClassifierRegion.AUTO_TAG]
state = _CLAClassificationRecord(**recordCopy)
self.helper.classifyState(state)
self.assertEqual(state.anomalyLabel, [])
# Auto classified threshold
deleteRecord.reset_mock()
addRecord.reset_mock()
self.helper.anomalyThreshold = 1.1
toLabelList.return_value = \
[KNNAnomalyClassifierRegion.AUTO_THRESHOLD_CLASSIFIED_LABEL]
recordCopy = copy.deepcopy(record)
recordCopy['setByUser'] = True
recordCopy['anomalyLabel'] = \
[KNNAnomalyClassifierRegion.AUTO_THRESHOLD_CLASSIFIED_LABEL]
state = _CLAClassificationRecord(**recordCopy)
self.helper.classifyState(state)
self.assertEqual(state.anomalyLabel,
[KNNAnomalyClassifierRegion.AUTO_THRESHOLD_CLASSIFIED_LABEL + \
KNNAnomalyClassifierRegion.AUTO_TAG])
addRecord.assert_called_once_with(state)
# Test precedence of threshold label above auto threshold label
deleteRecord.reset_mock()
addRecord.reset_mock()
self.helper.anomalyThreshold = 0.8
toLabelList.return_value = \
[KNNAnomalyClassifierRegion.AUTO_THRESHOLD_CLASSIFIED_LABEL,
KNNAnomalyClassifierRegion.AUTO_THRESHOLD_CLASSIFIED_LABEL + \
KNNAnomalyClassifierRegion.AUTO_TAG]
recordCopy = copy.deepcopy(record)
recordCopy['setByUser'] = True
recordCopy['anomalyLabel'] = \
[KNNAnomalyClassifierRegion.AUTO_THRESHOLD_CLASSIFIED_LABEL]
state = _CLAClassificationRecord(**recordCopy)
self.helper.classifyState(state)
self.assertEqual(state.anomalyLabel,
[KNNAnomalyClassifierRegion.AUTO_THRESHOLD_CLASSIFIED_LABEL])
addRecord.assert_called_once_with(state)
@patch.object(KNNAnomalyClassifierRegion, '_getStateAnomalyVector')
def testAddRecordToKNN(self, getAnomalyVector):
getAnomalyVector.return_value = numpy.array([0, 1, 0, 0, 1, 0, 1, 1])
values = {
'categoryRecencyList': [1, 2, 3]
}
classifier = self.helper._knnclassifier
classifier.getParameter = Mock(side_effect=values.get)
classifier._knn.learn = Mock()
classifier._knn.prototypeSetCategory = Mock()
state = {
"ROWID": 5,
"anomalyScore": 1.0,
"anomalyVector": numpy.array([1, 5, 7, 8]),
"anomalyLabel": ["Label"],
"setByUser": False
}
record = _CLAClassificationRecord(**state)
# Test with record not already in KNN
self.helper._addRecordToKNN(record)
classifier._knn.learn.assert_called_once_with(getAnomalyVector.return_value,
ANY, rowID=state['ROWID'])
self.assertTrue(not classifier._knn.prototypeSetCategory.called)
classifier._knn.learn.reset_mock()
# Test with record already in KNN
values = {
'categoryRecencyList': [1, 2, 3, 5]
}
classifier.getParameter.side_effect = values.get
self.helper._addRecordToKNN(record)
classifier._knn.prototypeSetCategory.assert_called_once_with(
state['ROWID'], ANY)
self.assertTrue(not classifier._knn.learn.called)
@patch.object(KNNAnomalyClassifierRegion, '_getStateAnomalyVector')
def testDeleteRangeFromKNN(self, getAnomalyVector):
getAnomalyVector.return_value = "Vector"
values = {
'categoryRecencyList': [1, 2, 3]
}
classifier = self.helper._knnclassifier
classifier.getParameter = Mock(side_effect=values.get)
classifier._knn._numPatterns = len(values['categoryRecencyList'])
classifier._knn.removeIds = Mock(side_effect=self.mockRemoveIds)
# Test with record not already in KNN
self.helper._deleteRangeFromKNN(start=1, end=3)
classifier._knn.removeIds.assert_called_once_with([1, 2])
classifier._knn.removeIds.reset_mock()
# Test with record already in KNN
values = {
'categoryRecencyList': [1, 2, 3, 5]
}
classifier.getParameter.side_effect = values.get
self.helper._deleteRangeFromKNN(start=1)
classifier._knn.removeIds.assert_called_once_with([1, 2, 3, 5])
@patch.object(KNNAnomalyClassifierRegion, '_getStateAnomalyVector')
def testRecomputeRecordFromKNN(self, getAnomalyVector):
getAnomalyVector.return_value = "Vector"
self.helper.trainRecords = 0
values = {
'categoryRecencyList': [1, 2, 3, 5, 6, 7, 8, 9],
'latestDists': numpy.array([0.7, 0.2, 0.5, 1, 0.3, 0.2, 0.1]),
'categories': ['A','B','C','D','E','F','G']
}
classifier = self.helper._knnclassifier
classifier.getLatestDistances = Mock(return_value=values['latestDists'])
classifier.getCategoryList = Mock(return_value=values['categories'])
classifier.getParameter = Mock(side_effect=values.get)
classifier.setParameter = Mock()
classifier.compute = Mock()
state = {
"ROWID": 5,
"anomalyScore": 1.0,
"anomalyVector": "",
"anomalyLabel": ["Label"],
"setByUser": False
}
record = _CLAClassificationRecord(**state)
# Test finding best category before record - exists
self.helper._classificationMaxDist = 0.4
self.helper._autoDetectWaitRecords = 0
result = self.helper._recomputeRecordFromKNN(record)
self.assertEqual(result, 'B')
# Test finding best category before record - does not exists
self.helper._classificationMaxDist = 0.1
result = self.helper._recomputeRecordFromKNN(record)
self.assertEqual(result, None)
# Test finding best category before record - not record before
record.ROWID = 0
self.helper._classificationMaxDist = 0.1
result = self.helper._recomputeRecordFromKNN(record)
self.assertEqual(result, None)
def testConstructClassificationVector(self):
modelParams = {
'__numRunCalls': 0
}
spVals = {
'params': {
'activeOutputCount': 5
},
'output': {
'bottomUpOut': numpy.array([1, 1, 0, 0, 1])
}
}
tpVals = {
'params': {
'cellsPerColumn': 2,
'columnCount': 2
},
'output': {
'lrnActive': numpy.array([1, 0, 0, 1]),
'topDownOut': numpy.array([1, 0, 0, 0, 1])
}
}
inputs = dict(
spBottomUpOut=spVals['output']['bottomUpOut'],
tpTopDownOut=tpVals['output']['topDownOut'],
tpLrnActiveStateT=tpVals['output']['lrnActive']
)
self.helper._activeColumnCount = 5
# Test TP Cell vector
self.helper.classificationVectorType = 1
vector = self.helper.constructClassificationRecord(inputs)
self.assertEqual(vector.anomalyVector,
tpVals['output']['lrnActive'].nonzero()[0].tolist())
# Test SP and TP Column Error vector
self.helper.classificationVectorType = 2
self.helper._prevPredictedColumns = numpy.array(
[1, 0, 0, 0, 1]).nonzero()[0]
vector = self.helper.constructClassificationRecord(inputs)
self.assertEqual(vector.anomalyVector, [0, 1, 4])
self.helper._prevPredictedColumns = numpy.array(
[1, 0, 1, 0, 0]).nonzero()[0]
vector = self.helper.constructClassificationRecord(inputs)
self.assertEqual(vector.anomalyVector, [0, 1, 4, 7])
self.helper.classificationVectorType = 3
self.assertRaises(TypeError, self.helper.constructClassificationRecord,
inputs)
@patch.object(KNNAnomalyClassifierRegion ,'classifyState')
@patch.object(KNNAnomalyClassifierRegion, 'constructClassificationRecord')
def testCompute(self, createRecord, updateState):
state = {
"ROWID": 0,
"anomalyScore": 1.0,
"anomalyVector": numpy.array([1, 0, 0, 0, 1]),
"anomalyLabel": "Label"
}
record = _CLAClassificationRecord(**state)
createRecord.return_value = record
inputs = dict()
outputs= dict()
# Test add first record
self.helper.cacheSize = 10
self.helper.trainRecords = 0
self.helper._recordsCache = []
self.helper.compute(inputs, outputs)
self.assertEqual(self.helper._recordsCache[-1], record)
self.assertEqual(len(self.helper._recordsCache), 1)
updateState.assert_called_once_with(self.helper._recordsCache[-1])
# Test add record before wait records
updateState.reset_mock()
self.helper.cacheSize = 10
self.helper.trainRecords = 10
self.helper._recordsCache = []
self.helper.compute(inputs, outputs)
self.assertEqual(self.helper._recordsCache[-1], record)
self.assertEqual(len(self.helper._recordsCache), 1)
self.helper.compute(inputs, outputs)
self.assertEqual(self.helper._recordsCache[-1], record)
self.assertEqual(len(self.helper._recordsCache), 2)
self.assertTrue(not updateState.called)
# Test exceeded cache length
updateState.reset_mock()
self.helper.cacheSize = 1
self.helper._recordsCache = []
self.helper.compute(inputs, outputs)
self.assertEqual(self.helper._recordsCache[-1], record)
self.assertEqual(len(self.helper._recordsCache), 1)
self.helper.compute(inputs, outputs)
self.assertEqual(self.helper._recordsCache[-1], record)
self.assertEqual(len(self.helper._recordsCache), 1)
self.assertTrue(not updateState.called)
def testCategoryToList(self):
result = self.helper._categoryToLabelList(None)
self.assertEqual(result, [])
self.helper.saved_categories = ['A', 'B', 'C']
result = self.helper._categoryToLabelList(1)
self.assertEqual(result, ['A'])
result = self.helper._categoryToLabelList(4)
self.assertEqual(result, ['C'])
result = self.helper._categoryToLabelList(5)
self.assertEqual(result, ['A','C'])
def testGetAnomalyVector(self):
state = {
"ROWID": 0,
"anomalyScore": 1.0,
"anomalyVector": [1,4,5],
"anomalyLabel": "Label"
}
record = _CLAClassificationRecord(**state)
self.helper._anomalyVectorLength = 10
vector = self.helper._getStateAnomalyVector(record)
self.assertEqual(len(vector), self.helper._anomalyVectorLength)
self.assertEqual(vector.nonzero()[0].tolist(), record.anomalyVector)
# Tests for configuration
# ===========================================================================
def testSetState(self):
# No Version set
state = dict(_classificationDelay=100)
state['_knnclassifierProps'] = self.params
self.helper._vectorType = None
self.helper.__setstate__(state)
self.assertEqual(self.helper.classificationVectorType, 1)
self.assertEqual(self.helper._version,
KNNAnomalyClassifierRegion.__VERSION__)
# Version 1
state = dict(_version=1, _classificationDelay=100)
state['_knnclassifierProps'] = self.params
self.helper.__setstate__(state)
self.assertEqual(self.helper._version,
KNNAnomalyClassifierRegion.__VERSION__)
# Invalid Version
state = dict(_version="invalid")
state['_knnclassifierProps'] = self.params
self.assertRaises(Exception, self.helper.__setstate__, state)
# Tests for _CLAClassificationRecord class
# ===========================================================================
def testCLAClassificationRecord(self):
record = {
"ROWID": 0,
"anomalyScore": 1.0,
"anomalyVector": "Vector",
"anomalyLabel": "Label"
}
state = _CLAClassificationRecord(**record)
self.assertEqual(state.ROWID, record['ROWID'])
self.assertEqual(state.anomalyScore, record['anomalyScore'])
self.assertEqual(state.anomalyVector, record['anomalyVector'])
self.assertEqual(state.anomalyLabel, record['anomalyLabel'])
self.assertEqual(state.setByUser, False)
record = {
"ROWID": 0,
"anomalyScore": 1.0,
"anomalyVector": "Vector",
"anomalyLabel": "Label",
"setByUser": True
}
state = _CLAClassificationRecord(**record)
self.assertEqual(state.ROWID, record['ROWID'])
self.assertEqual(state.anomalyScore, record['anomalyScore'])
self.assertEqual(state.anomalyVector, record['anomalyVector'])
self.assertEqual(state.anomalyLabel, record['anomalyLabel'])
self.assertEqual(state.setByUser, record['setByUser'])
def testCLAClassificationRecordGetState(self):
record = {
"ROWID": 0,
"anomalyScore": 1.0,
"anomalyVector": "Vector",
"anomalyLabel": "Label",
"setByUser": False
}
state = _CLAClassificationRecord(**record)
self.assertEqual(state.__getstate__(), record)
def testCLAClassificationRecordSetState(self):
record = {
"ROWID": None,
"anomalyScore": None,
"anomalyVector": None,
"anomalyLabel": None,
"setByUser": None
}
state = _CLAClassificationRecord(**record)
record = {
"ROWID": 0,
"anomalyScore": 1.0,
"anomalyVector": "Vector",
"anomalyLabel": "Label",
"setByUser": False
}
state.__setstate__(record)
self.assertEqual(state.ROWID, record['ROWID'])
self.assertEqual(state.anomalyScore, record['anomalyScore'])
self.assertEqual(state.anomalyVector, record['anomalyVector'])
self.assertEqual(state.anomalyLabel, record['anomalyLabel'])
self.assertEqual(state.setByUser, record['setByUser'])
def mockRemoveIds(self, ids):
self.helper._knnclassifier._knn._numPatterns -= len(ids)
knnClassifier = self.helper._knnclassifier
for idx in ids:
if idx in self.helper._knnclassifier.getParameter('categoryRecencyList'):
knnClassifier.getParameter('categoryRecencyList').remove(idx)
if __name__ == '__main__':
parser = TestOptionParser()
options, args = parser.parse_args()
# Form the command line for the unit test framework
args = [sys.argv[0]] + args
unittest.main(argv=args)
|
WatanabeYasumasa/edx-platform
|
refs/heads/gacco2/master
|
cms/urls_dev.py
|
201
|
"""
URLconf for development-only views.
This gets imported by urls.py and added to its URLconf if we are running in
development mode; otherwise, it is ignored.
"""
from django.conf.urls import url
urlpatterns = (
url(r'^dev_mode$', 'contentstore.views.dev.dev_mode', name='dev_mode'),
url(r'^template/(?P<template>.+)$', 'contentstore.views.dev.dev_show_template'),
)
|
moijes12/treeherder
|
refs/heads/master
|
tests/webapp/api/test_bug_job_map_api.py
|
11
|
import json
import random
from time import time
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from rest_framework.test import APIClient
def test_create_bug_job_map_no_auth(eleven_jobs_stored, jm):
"""
test creating a single note via endpoint
"""
client = APIClient()
job = jm.get_job_list(0, 1)[0]
bug_job_map_obj = {
"job_id": job["id"],
"bug_id": 1,
"type": "manual"
}
resp = client.post(
reverse("bug-job-map-list", kwargs={"project": jm.project}),
bug_job_map_obj, expect_errors=True)
assert resp.status_code == 403
jm.disconnect()
def test_create_bug_job_map(eleven_jobs_stored, mock_message_broker, jm):
"""
test creating a single note via endpoint
"""
client = APIClient()
user = User.objects.create(username="MyName", email="[email protected]")
client.force_authenticate(user=user)
job = jm.get_job_list(0, 1)[0]
bug_job_map_obj = {
u"job_id": job["id"],
u"bug_id": 1L,
u"type": u"manual"
}
client.post(
reverse("bug-job-map-list", kwargs={"project": jm.project}),
bug_job_map_obj
)
bug_job_map_obj["who"] = user.email
user.delete()
actual_obj = jm.get_bug_job_map_list(0, 1)[0]
del actual_obj["submit_timestamp"]
assert bug_job_map_obj == actual_obj
jm.disconnect()
def test_create_bug_job_map_dupe(eleven_jobs_stored, mock_message_broker, jm):
"""
test creating the same bug map skips it
"""
client = APIClient()
user = User.objects.create(username="MyName", email="[email protected]")
client.force_authenticate(user=user)
job = jm.get_job_list(0, 1)[0]
bug_job_map_obj = {
u"job_id": job["id"],
u"bug_id": 1L,
u"type": u"manual",
}
client.post(
reverse("bug-job-map-list", kwargs={"project": jm.project}),
bug_job_map_obj
)
client.post(
reverse("bug-job-map-list", kwargs={"project": jm.project}),
bug_job_map_obj
)
bug_job_map_obj["who"] = user.email
user.delete()
actual_obj = jm.get_bug_job_map_list(0, 1)[0]
del actual_obj["submit_timestamp"]
assert bug_job_map_obj == actual_obj
jm.disconnect()
def test_bug_job_map_list(webapp, jm, eleven_jobs_stored):
"""
test retrieving a list of bug_job_map
"""
jobs = jm.get_job_list(0, 10)
bugs = [random.randint(0, 100) for i in range(0, len(jobs))]
submit_timestamp = int(time())
who = "[email protected]"
expected = list()
for i, v in enumerate(jobs):
jm.insert_bug_job_map(v["id"], bugs[i],
"manual", submit_timestamp, who)
expected.append({
"job_id": v["id"],
"bug_id": bugs[i],
"type": "manual",
"submit_timestamp": submit_timestamp,
"who": who
})
submit_timestamp += 1
resp = webapp.get(
reverse("bug-job-map-list", kwargs={"project": jm.project}))
# The order of the bug-job-map list is not guaranteed.
assert sorted(resp.json) == sorted(expected)
jm.disconnect()
def test_bug_job_map_detail(webapp, jm, eleven_jobs_stored):
"""
test retrieving a list of bug_job_map
"""
job_id = jm.get_job_list(0, 1)[0]["id"]
bug_id = random.randint(0, 100)
expected = list()
submit_timestamp = int(time())
who = "[email protected]"
jm.insert_bug_job_map(job_id, bug_id, "manual", submit_timestamp, who)
pk = "{0}-{1}".format(job_id, bug_id)
resp = webapp.get(
reverse("bug-job-map-detail", kwargs={
"project": jm.project,
"pk": pk
})
)
expected = {
"job_id": job_id,
"bug_id": bug_id,
"type": "manual",
"submit_timestamp": submit_timestamp,
"who": who}
assert resp.json == expected
jm.disconnect()
def test_bug_job_map_delete(webapp, eleven_jobs_stored,
jm, mock_message_broker):
"""
test retrieving a list of bug_job_map
"""
client = APIClient()
user = User.objects.create(username="MyName", is_staff=True)
client.force_authenticate(user=user)
job_id = jm.get_job_list(0, 1)[0]["id"]
bug_id = random.randint(0, 100)
submit_timestamp = int(time())
who = "[email protected]"
jm.insert_bug_job_map(job_id, bug_id,
"manual", submit_timestamp, who)
pk = "{0}-{1}".format(job_id, bug_id)
resp = client.delete(
reverse("bug-job-map-detail", kwargs={
"project": jm.project,
"pk": pk
})
)
user.delete()
content = json.loads(resp.content)
assert content == {"message": "Bug job map deleted"}
jm.disconnect()
def test_bug_job_map_delete_no_auth(jm, eleven_jobs_stored):
"""
test retrieving a list of bug_job_map
"""
client = APIClient()
job_id = jm.get_job_list(0, 1)[0]["id"]
bug_id = random.randint(0, 100)
submit_timestamp = int(time())
who = "[email protected]"
jm.insert_bug_job_map(job_id, bug_id, "manual",
submit_timestamp, who)
pk = "{0}-{1}".format(job_id, bug_id)
resp = client.delete(
reverse("bug-job-map-detail", kwargs={
"project": jm.project,
"pk": pk
})
)
assert resp.status_code == 403
jm.disconnect()
|
Beauhurst/django
|
refs/heads/master
|
django/contrib/flatpages/models.py
|
86
|
from django.contrib.sites.models import Site
from django.db import models
from django.urls import get_script_prefix
from django.utils.encoding import iri_to_uri
from django.utils.translation import gettext_lazy as _
class FlatPage(models.Model):
url = models.CharField(_('URL'), max_length=100, db_index=True)
title = models.CharField(_('title'), max_length=200)
content = models.TextField(_('content'), blank=True)
enable_comments = models.BooleanField(_('enable comments'), default=False)
template_name = models.CharField(
_('template name'),
max_length=70,
blank=True,
help_text=_(
"Example: 'flatpages/contact_page.html'. If this isn't provided, "
"the system will use 'flatpages/default.html'."
),
)
registration_required = models.BooleanField(
_('registration required'),
help_text=_("If this is checked, only logged-in users will be able to view the page."),
default=False,
)
sites = models.ManyToManyField(Site, verbose_name=_('sites'))
class Meta:
db_table = 'django_flatpage'
verbose_name = _('flat page')
verbose_name_plural = _('flat pages')
ordering = ('url',)
def __str__(self):
return "%s -- %s" % (self.url, self.title)
def get_absolute_url(self):
# Handle script prefix manually because we bypass reverse()
return iri_to_uri(get_script_prefix().rstrip('/') + self.url)
|
okwasi/gyp
|
refs/heads/master
|
test/same-gyp-name/gyptest-default.py
|
318
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Build a .gyp that depends on 2 gyp files with the same name.
"""
import TestGyp
test = TestGyp.TestGyp()
test.run_gyp('all.gyp', chdir='src')
test.relocate('src', 'relocate/src')
test.build('all.gyp', chdir='relocate/src')
expect1 = """\
Hello from main1.cc
"""
expect2 = """\
Hello from main2.cc
"""
if test.format == 'xcode':
chdir1 = 'relocate/src/subdir1'
chdir2 = 'relocate/src/subdir2'
else:
chdir1 = chdir2 = 'relocate/src'
test.run_built_executable('program1', chdir=chdir1, stdout=expect1)
test.run_built_executable('program2', chdir=chdir2, stdout=expect2)
test.pass_test()
|
alzeih/ava
|
refs/heads/master
|
manage.py
|
5
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ava.settings.base")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
dgzurita/odoo
|
refs/heads/8.0
|
addons/stock_dropshipping/wizard/stock_invoice_onshipping.py
|
270
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class stock_invoice_onshipping(osv.osv_memory):
_inherit = "stock.invoice.onshipping"
def _get_journal_type(self, cr, uid, context=None):
if context is None:
context = {}
res_ids = context and context.get('active_ids', [])
pick_obj = self.pool.get('stock.picking')
pickings = pick_obj.browse(cr, uid, res_ids, context=context)
pick = pickings and pickings[0]
src_usage = pick.move_lines[0].location_id.usage
dest_usage = pick.move_lines[0].location_dest_id.usage
if src_usage == 'supplier' and dest_usage == 'customer':
pick_purchase = pick.move_lines and pick.move_lines[0].purchase_line_id and pick.move_lines[0].purchase_line_id.order_id.invoice_method == 'picking'
if pick_purchase:
return 'purchase'
else:
return 'sale'
else:
return super(stock_invoice_onshipping, self)._get_journal_type(cr, uid, context=context)
_defaults = {
'journal_type': _get_journal_type,
}
|
ebu/PlugIt
|
refs/heads/master
|
tests/service_external/__init__.py
|
1
|
"""Test the simple service from an external point of view (Using HTTP requests)"""
from test_service_from_http import *
|
IIIIIIIIll/sdy_notes_liaoxf
|
refs/heads/master
|
LiaoXueFeng/Advanced_properties/List_Comprehensions.py
|
1
|
L=list(range(1, 11))
print(L)
L=[]
for x in range(1,11):
L.append(x*x)
print(L)
print([x * x for x in range(1, 11) if x % 2 == 0])
|
MuzammilKhan/Ventriloquy
|
refs/heads/master
|
mysite/wordclips/utils/inputparser.py
|
1
|
class InputParser:
"""
Class for parsing a string of words separated by a delimiter
"""
def __init__(self, delim):
self.delim = delim
def parse(self, s):
"""
Parsing the string using the delimiter
Parameters
------
s -- a string of tokens to be parse (separated)
Return
------
wl -- a list of token separated using the delimiter
"""
wl = s.split(self.delim)
return wl
def parseDefault(self, s):
"""
Parsing the string using the default delimiter, the strength of default
delimiter is that the TAB and carrige return will also be ignored.
Parameters
------
Return
------
wl -- a list of token separated using the delimiter
"""
wl = s.split()
return wl
|
sovietspy2/uzletiProject
|
refs/heads/master
|
python/Lib/MimeWriter.py
|
67
|
"""Generic MIME writer.
This module defines the class MimeWriter. The MimeWriter class implements
a basic formatter for creating MIME multi-part files. It doesn't seek around
the output file nor does it use large amounts of buffer space. You must write
the parts out in the order that they should occur in the final file.
MimeWriter does buffer the headers you add, allowing you to rearrange their
order.
"""
import mimetools
__all__ = ["MimeWriter"]
import warnings
warnings.warn("the MimeWriter module is deprecated; use the email package instead",
DeprecationWarning, 2)
class MimeWriter:
"""Generic MIME writer.
Methods:
__init__()
addheader()
flushheaders()
startbody()
startmultipartbody()
nextpart()
lastpart()
A MIME writer is much more primitive than a MIME parser. It
doesn't seek around on the output file, and it doesn't use large
amounts of buffer space, so you have to write the parts in the
order they should occur on the output file. It does buffer the
headers you add, allowing you to rearrange their order.
General usage is:
f = <open the output file>
w = MimeWriter(f)
...call w.addheader(key, value) 0 or more times...
followed by either:
f = w.startbody(content_type)
...call f.write(data) for body data...
or:
w.startmultipartbody(subtype)
for each part:
subwriter = w.nextpart()
...use the subwriter's methods to create the subpart...
w.lastpart()
The subwriter is another MimeWriter instance, and should be
treated in the same way as the toplevel MimeWriter. This way,
writing recursive body parts is easy.
Warning: don't forget to call lastpart()!
XXX There should be more state so calls made in the wrong order
are detected.
Some special cases:
- startbody() just returns the file passed to the constructor;
but don't use this knowledge, as it may be changed.
- startmultipartbody() actually returns a file as well;
this can be used to write the initial 'if you can read this your
mailer is not MIME-aware' message.
- If you call flushheaders(), the headers accumulated so far are
written out (and forgotten); this is useful if you don't need a
body part at all, e.g. for a subpart of type message/rfc822
that's (mis)used to store some header-like information.
- Passing a keyword argument 'prefix=<flag>' to addheader(),
start*body() affects where the header is inserted; 0 means
append at the end, 1 means insert at the start; default is
append for addheader(), but insert for start*body(), which use
it to determine where the Content-Type header goes.
"""
def __init__(self, fp):
self._fp = fp
self._headers = []
def addheader(self, key, value, prefix=0):
"""Add a header line to the MIME message.
The key is the name of the header, where the value obviously provides
the value of the header. The optional argument prefix determines
where the header is inserted; 0 means append at the end, 1 means
insert at the start. The default is to append.
"""
lines = value.split("\n")
while lines and not lines[-1]: del lines[-1]
while lines and not lines[0]: del lines[0]
for i in range(1, len(lines)):
lines[i] = " " + lines[i].strip()
value = "\n".join(lines) + "\n"
line = key + ": " + value
if prefix:
self._headers.insert(0, line)
else:
self._headers.append(line)
def flushheaders(self):
"""Writes out and forgets all headers accumulated so far.
This is useful if you don't need a body part at all; for example,
for a subpart of type message/rfc822 that's (mis)used to store some
header-like information.
"""
self._fp.writelines(self._headers)
self._headers = []
def startbody(self, ctype, plist=[], prefix=1):
"""Returns a file-like object for writing the body of the message.
The content-type is set to the provided ctype, and the optional
parameter, plist, provides additional parameters for the
content-type declaration. The optional argument prefix determines
where the header is inserted; 0 means append at the end, 1 means
insert at the start. The default is to insert at the start.
"""
for name, value in plist:
ctype = ctype + ';\n %s=\"%s\"' % (name, value)
self.addheader("Content-Type", ctype, prefix=prefix)
self.flushheaders()
self._fp.write("\n")
return self._fp
def startmultipartbody(self, subtype, boundary=None, plist=[], prefix=1):
"""Returns a file-like object for writing the body of the message.
Additionally, this method initializes the multi-part code, where the
subtype parameter provides the multipart subtype, the boundary
parameter may provide a user-defined boundary specification, and the
plist parameter provides optional parameters for the subtype. The
optional argument, prefix, determines where the header is inserted;
0 means append at the end, 1 means insert at the start. The default
is to insert at the start. Subparts should be created using the
nextpart() method.
"""
self._boundary = boundary or mimetools.choose_boundary()
return self.startbody("multipart/" + subtype,
[("boundary", self._boundary)] + plist,
prefix=prefix)
def nextpart(self):
"""Returns a new instance of MimeWriter which represents an
individual part in a multipart message.
This may be used to write the part as well as used for creating
recursively complex multipart messages. The message must first be
initialized with the startmultipartbody() method before using the
nextpart() method.
"""
self._fp.write("\n--" + self._boundary + "\n")
return self.__class__(self._fp)
def lastpart(self):
"""This is used to designate the last part of a multipart message.
It should always be used when writing multipart messages.
"""
self._fp.write("\n--" + self._boundary + "--\n")
if __name__ == '__main__':
import test.test_MimeWriter
|
anderson7ru/bienestarues
|
refs/heads/master
|
cuentas_usuarioapp/admin.py
|
1
|
from django.contrib import admin
from cuentas_usuarioapp.models import UsuarioEmpleado
# Register your models here.
class UsuarioAdmin(admin.ModelAdmin):
list_display = ('codigoEmpleado','codigoUsuario')
admin.site.register(UsuarioEmpleado,UsuarioAdmin)
|
ice9js/servo
|
refs/heads/master
|
tests/wpt/css-tests/tools/html5lib/html5lib/treewalkers/pulldom.py
|
1729
|
from __future__ import absolute_import, division, unicode_literals
from xml.dom.pulldom import START_ELEMENT, END_ELEMENT, \
COMMENT, IGNORABLE_WHITESPACE, CHARACTERS
from . import _base
from ..constants import voidElements
class TreeWalker(_base.TreeWalker):
def __iter__(self):
ignore_until = None
previous = None
for event in self.tree:
if previous is not None and \
(ignore_until is None or previous[1] is ignore_until):
if previous[1] is ignore_until:
ignore_until = None
for token in self.tokens(previous, event):
yield token
if token["type"] == "EmptyTag":
ignore_until = previous[1]
previous = event
if ignore_until is None or previous[1] is ignore_until:
for token in self.tokens(previous, None):
yield token
elif ignore_until is not None:
raise ValueError("Illformed DOM event stream: void element without END_ELEMENT")
def tokens(self, event, next):
type, node = event
if type == START_ELEMENT:
name = node.nodeName
namespace = node.namespaceURI
attrs = {}
for attr in list(node.attributes.keys()):
attr = node.getAttributeNode(attr)
attrs[(attr.namespaceURI, attr.localName)] = attr.value
if name in voidElements:
for token in self.emptyTag(namespace,
name,
attrs,
not next or next[1] is not node):
yield token
else:
yield self.startTag(namespace, name, attrs)
elif type == END_ELEMENT:
name = node.nodeName
namespace = node.namespaceURI
if name not in voidElements:
yield self.endTag(namespace, name)
elif type == COMMENT:
yield self.comment(node.nodeValue)
elif type in (IGNORABLE_WHITESPACE, CHARACTERS):
for token in self.text(node.nodeValue):
yield token
else:
yield self.unknown(type)
|
JackDanger/sentry
|
refs/heads/master
|
src/sentry/web/frontend/group_plugin_action.py
|
6
|
from __future__ import absolute_import, division
from django.http import Http404, HttpResponseRedirect
from django.shortcuts import get_object_or_404
from sudo.utils import is_safe_url
from sentry.models import Group, GroupMeta
from sentry.plugins import plugins
from sentry.web.frontend.base import ProjectView
class GroupPluginActionView(ProjectView):
required_scope = 'event:read'
def handle(self, request, organization, team, project, group_id, slug):
group = get_object_or_404(Group, pk=group_id, project=project)
try:
plugin = plugins.get(slug)
except KeyError:
raise Http404('Plugin not found')
GroupMeta.objects.populate_cache([group])
response = plugin.get_view_response(request, group)
if response:
return response
redirect = request.META.get('HTTP_REFERER', '')
if not is_safe_url(redirect, host=request.get_host()):
redirect = '/{}/{}/'.format(
organization.slug,
group.project.slug,
)
return HttpResponseRedirect(redirect)
|
xin3liang/platform_external_chromium_org_tools_gyp
|
refs/heads/master
|
test/mac/gyptest-app-error.py
|
164
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that invalid strings files cause the build to fail.
"""
import TestCmd
import TestGyp
import sys
if sys.platform == 'darwin':
expected_error = 'Old-style plist parser: missing semicolon in dictionary'
saw_expected_error = [False] # Python2 has no "nonlocal" keyword.
def match(a, b):
if a == b:
return True
if not TestCmd.is_List(a):
a = a.split('\n')
if not TestCmd.is_List(b):
b = b.split('\n')
if expected_error in '\n'.join(a) + '\n'.join(b):
saw_expected_error[0] = True
return True
return False
test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'], match=match)
test.run_gyp('test-error.gyp', chdir='app-bundle')
test.build('test-error.gyp', test.ALL, chdir='app-bundle')
# Ninja pipes stderr of subprocesses to stdout.
if test.format == 'ninja' and expected_error in test.stdout():
saw_expected_error[0] = True
if saw_expected_error[0]:
test.pass_test()
else:
test.fail_test()
|
openhatch/oh-mainline
|
refs/heads/master
|
vendor/packages/docutils/test/functional/tests/standalone_rst_pseudoxml.py
|
18
|
exec(open('functional/tests/_standalone_rst_defaults.py').read())
# Source and destination file names.
test_source = "standalone_rst_pseudoxml.txt"
test_destination = "standalone_rst_pseudoxml.txt"
# Keyword parameters passed to publish_file.
writer_name = "pseudoxml"
# Settings
# enable INFO-level system messages in this test:
settings_overrides['report_level'] = 1
|
siosio/intellij-community
|
refs/heads/master
|
python/helpers/py2only/docutils/transforms/misc.py
|
183
|
# $Id: misc.py 6314 2010-04-26 10:04:17Z milde $
# Author: David Goodger <[email protected]>
# Copyright: This module has been placed in the public domain.
"""
Miscellaneous transforms.
"""
__docformat__ = 'reStructuredText'
from docutils import nodes
from docutils.transforms import Transform, TransformError
class CallBack(Transform):
"""
Inserts a callback into a document. The callback is called when the
transform is applied, which is determined by its priority.
For use with `nodes.pending` elements. Requires a ``details['callback']``
entry, a bound method or function which takes one parameter: the pending
node. Other data can be stored in the ``details`` attribute or in the
object hosting the callback method.
"""
default_priority = 990
def apply(self):
pending = self.startnode
pending.details['callback'](pending)
pending.parent.remove(pending)
class ClassAttribute(Transform):
"""
Move the "class" attribute specified in the "pending" node into the
immediately following non-comment element.
"""
default_priority = 210
def apply(self):
pending = self.startnode
parent = pending.parent
child = pending
while parent:
# Check for appropriate following siblings:
for index in range(parent.index(child) + 1, len(parent)):
element = parent[index]
if (isinstance(element, nodes.Invisible) or
isinstance(element, nodes.system_message)):
continue
element['classes'] += pending.details['class']
pending.parent.remove(pending)
return
else:
# At end of section or container; apply to sibling
child = parent
parent = parent.parent
error = self.document.reporter.error(
'No suitable element following "%s" directive'
% pending.details['directive'],
nodes.literal_block(pending.rawsource, pending.rawsource),
line=pending.line)
pending.replace_self(error)
class Transitions(Transform):
"""
Move transitions at the end of sections up the tree. Complain
on transitions after a title, at the beginning or end of the
document, and after another transition.
For example, transform this::
<section>
...
<transition>
<section>
...
into this::
<section>
...
<transition>
<section>
...
"""
default_priority = 830
def apply(self):
for node in self.document.traverse(nodes.transition):
self.visit_transition(node)
def visit_transition(self, node):
index = node.parent.index(node)
error = None
if (index == 0 or
isinstance(node.parent[0], nodes.title) and
(index == 1 or
isinstance(node.parent[1], nodes.subtitle) and
index == 2)):
assert (isinstance(node.parent, nodes.document) or
isinstance(node.parent, nodes.section))
error = self.document.reporter.error(
'Document or section may not begin with a transition.',
source=node.source, line=node.line)
elif isinstance(node.parent[index - 1], nodes.transition):
error = self.document.reporter.error(
'At least one body element must separate transitions; '
'adjacent transitions are not allowed.',
source=node.source, line=node.line)
if error:
# Insert before node and update index.
node.parent.insert(index, error)
index += 1
assert index < len(node.parent)
if index != len(node.parent) - 1:
# No need to move the node.
return
# Node behind which the transition is to be moved.
sibling = node
# While sibling is the last node of its parent.
while index == len(sibling.parent) - 1:
sibling = sibling.parent
# If sibling is the whole document (i.e. it has no parent).
if sibling.parent is None:
# Transition at the end of document. Do not move the
# transition up, and place an error behind.
error = self.document.reporter.error(
'Document may not end with a transition.',
line=node.line)
node.parent.insert(node.parent.index(node) + 1, error)
return
index = sibling.parent.index(sibling)
# Remove the original transition node.
node.parent.remove(node)
# Insert the transition after the sibling.
sibling.parent.insert(index + 1, node)
|
Universal-Model-Converter/UMC3.0a
|
refs/heads/master
|
data/Python/x86/Lib/site-packages/scipy/sparse/linalg/eigen/arpack/setup.py
|
3
|
#!/usr/bin/env python
from __future__ import division, print_function, absolute_import
from os.path import join
from scipy._build_utils import needs_g77_abi_wrapper
def configuration(parent_package='',top_path=None):
from numpy.distutils.system_info import get_info, NotFoundError
from numpy.distutils.misc_util import Configuration
config = Configuration('arpack',parent_package,top_path)
lapack_opt = get_info('lapack_opt')
if not lapack_opt:
raise NotFoundError('no lapack/blas resources found')
config = Configuration('arpack', parent_package, top_path)
arpack_sources=[join('ARPACK','SRC', '*.f')]
arpack_sources.extend([join('ARPACK','UTIL', '*.f')])
arpack_sources.extend([join('ARPACK','LAPACK', '*.f')])
if needs_g77_abi_wrapper(lapack_opt):
arpack_sources += [join('ARPACK', 'FWRAPPERS', 'veclib_cabi_f.f'),
join('ARPACK', 'FWRAPPERS', 'veclib_cabi_c.c')]
else:
arpack_sources += [join('ARPACK', 'FWRAPPERS', 'dummy.f')]
config.add_library('arpack_scipy', sources=arpack_sources,
include_dirs=[join('ARPACK', 'SRC')],
depends = [join('ARPACK', 'FWRAPPERS',
'veclib_cabi_f.f'),
join('ARPACK', 'FWRAPPERS',
'veclib_cabi_c.c'),
join('ARPACK', 'FWRAPPERS',
'dummy.f')])
config.add_extension('_arpack',
sources='arpack.pyf.src',
libraries=['arpack_scipy'],
extra_info = lapack_opt
)
config.add_data_dir('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
|
victorbergelin/scikit-learn
|
refs/heads/master
|
sklearn/preprocessing/tests/test_function_transformer.py
|
176
|
from nose.tools import assert_equal
import numpy as np
from sklearn.preprocessing import FunctionTransformer
def _make_func(args_store, kwargs_store, func=lambda X, *a, **k: X):
def _func(X, *args, **kwargs):
args_store.append(X)
args_store.extend(args)
kwargs_store.update(kwargs)
return func(X)
return _func
def test_delegate_to_func():
# (args|kwargs)_store will hold the positional and keyword arguments
# passed to the function inside the FunctionTransformer.
args_store = []
kwargs_store = {}
X = np.arange(10).reshape((5, 2))
np.testing.assert_array_equal(
FunctionTransformer(_make_func(args_store, kwargs_store)).transform(X),
X,
'transform should have returned X unchanged',
)
# The function should only have recieved X.
assert_equal(
args_store,
[X],
'Incorrect positional arguments passed to func: {args}'.format(
args=args_store,
),
)
assert_equal(
kwargs_store,
{},
'Unexpected keyword arguments passed to func: {args}'.format(
args=kwargs_store,
),
)
# reset the argument stores.
args_store[:] = [] # python2 compatible inplace list clear.
kwargs_store.clear()
y = object()
np.testing.assert_array_equal(
FunctionTransformer(
_make_func(args_store, kwargs_store),
pass_y=True,
).transform(X, y),
X,
'transform should have returned X unchanged',
)
# The function should have recieved X and y.
assert_equal(
args_store,
[X, y],
'Incorrect positional arguments passed to func: {args}'.format(
args=args_store,
),
)
assert_equal(
kwargs_store,
{},
'Unexpected keyword arguments passed to func: {args}'.format(
args=kwargs_store,
),
)
def test_np_log():
X = np.arange(10).reshape((5, 2))
# Test that the numpy.log example still works.
np.testing.assert_array_equal(
FunctionTransformer(np.log1p).transform(X),
np.log1p(X),
)
|
valexandersaulys/prudential_insurance_kaggle
|
refs/heads/master
|
venv/lib/python2.7/site-packages/sklearn/linear_model/tests/test_ridge.py
|
6
|
import numpy as np
import scipy.sparse as sp
from scipy import linalg
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns
from sklearn import datasets
from sklearn.metrics import mean_squared_error
from sklearn.metrics import make_scorer
from sklearn.metrics import get_scorer
from sklearn.linear_model.base import LinearRegression
from sklearn.linear_model.ridge import ridge_regression
from sklearn.linear_model.ridge import Ridge
from sklearn.linear_model.ridge import _RidgeGCV
from sklearn.linear_model.ridge import RidgeCV
from sklearn.linear_model.ridge import RidgeClassifier
from sklearn.linear_model.ridge import RidgeClassifierCV
from sklearn.linear_model.ridge import _solve_cholesky
from sklearn.linear_model.ridge import _solve_cholesky_kernel
from sklearn.datasets import make_regression
from sklearn.grid_search import GridSearchCV
from sklearn.cross_validation import KFold
diabetes = datasets.load_diabetes()
X_diabetes, y_diabetes = diabetes.data, diabetes.target
ind = np.arange(X_diabetes.shape[0])
rng = np.random.RandomState(0)
rng.shuffle(ind)
ind = ind[:200]
X_diabetes, y_diabetes = X_diabetes[ind], y_diabetes[ind]
iris = datasets.load_iris()
X_iris = sp.csr_matrix(iris.data)
y_iris = iris.target
DENSE_FILTER = lambda X: X
SPARSE_FILTER = lambda X: sp.csr_matrix(X)
def test_ridge():
# Ridge regression convergence test using score
# TODO: for this test to be robust, we should use a dataset instead
# of np.random.
rng = np.random.RandomState(0)
alpha = 1.0
for solver in ("svd", "sparse_cg", "cholesky", "lsqr", "sag"):
# With more samples than features
n_samples, n_features = 6, 5
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=alpha, solver=solver)
ridge.fit(X, y)
assert_equal(ridge.coef_.shape, (X.shape[1], ))
assert_greater(ridge.score(X, y), 0.47)
if solver in ("cholesky", "sag"):
# Currently the only solvers to support sample_weight.
ridge.fit(X, y, sample_weight=np.ones(n_samples))
assert_greater(ridge.score(X, y), 0.47)
# With more features than samples
n_samples, n_features = 5, 10
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=alpha, solver=solver)
ridge.fit(X, y)
assert_greater(ridge.score(X, y), .9)
if solver in ("cholesky", "sag"):
# Currently the only solvers to support sample_weight.
ridge.fit(X, y, sample_weight=np.ones(n_samples))
assert_greater(ridge.score(X, y), 0.9)
def test_primal_dual_relationship():
y = y_diabetes.reshape(-1, 1)
coef = _solve_cholesky(X_diabetes, y, alpha=[1e-2])
K = np.dot(X_diabetes, X_diabetes.T)
dual_coef = _solve_cholesky_kernel(K, y, alpha=[1e-2])
coef2 = np.dot(X_diabetes.T, dual_coef).T
assert_array_almost_equal(coef, coef2)
def test_ridge_singular():
# test on a singular matrix
rng = np.random.RandomState(0)
n_samples, n_features = 6, 6
y = rng.randn(n_samples // 2)
y = np.concatenate((y, y))
X = rng.randn(n_samples // 2, n_features)
X = np.concatenate((X, X), axis=0)
ridge = Ridge(alpha=0)
ridge.fit(X, y)
assert_greater(ridge.score(X, y), 0.9)
def test_ridge_sample_weights():
rng = np.random.RandomState(0)
for solver in ("cholesky", ):
for n_samples, n_features in ((6, 5), (5, 10)):
for alpha in (1.0, 1e-2):
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
sample_weight = 1 + rng.rand(n_samples)
coefs = ridge_regression(X, y,
alpha=alpha,
sample_weight=sample_weight,
solver=solver)
# Sample weight can be implemented via a simple rescaling
# for the square loss.
coefs2 = ridge_regression(
X * np.sqrt(sample_weight)[:, np.newaxis],
y * np.sqrt(sample_weight),
alpha=alpha, solver=solver)
assert_array_almost_equal(coefs, coefs2)
# Test for fit_intercept = True
est = Ridge(alpha=alpha, solver=solver)
est.fit(X, y, sample_weight=sample_weight)
# Check using Newton's Method
# Quadratic function should be solved in a single step.
# Initialize
sample_weight = np.sqrt(sample_weight)
X_weighted = sample_weight[:, np.newaxis] * (
np.column_stack((np.ones(n_samples), X)))
y_weighted = y * sample_weight
# Gradient is (X*coef-y)*X + alpha*coef_[1:]
# Remove coef since it is initialized to zero.
grad = -np.dot(y_weighted, X_weighted)
# Hessian is (X.T*X) + alpha*I except that the first
# diagonal element should be zero, since there is no
# penalization of intercept.
diag = alpha * np.ones(n_features + 1)
diag[0] = 0.
hess = np.dot(X_weighted.T, X_weighted)
hess.flat[::n_features + 2] += diag
coef_ = - np.dot(linalg.inv(hess), grad)
assert_almost_equal(coef_[0], est.intercept_)
assert_array_almost_equal(coef_[1:], est.coef_)
def test_ridge_shapes():
# Test shape of coef_ and intercept_
rng = np.random.RandomState(0)
n_samples, n_features = 5, 10
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
Y1 = y[:, np.newaxis]
Y = np.c_[y, 1 + y]
ridge = Ridge()
ridge.fit(X, y)
assert_equal(ridge.coef_.shape, (n_features,))
assert_equal(ridge.intercept_.shape, ())
ridge.fit(X, Y1)
assert_equal(ridge.coef_.shape, (1, n_features))
assert_equal(ridge.intercept_.shape, (1, ))
ridge.fit(X, Y)
assert_equal(ridge.coef_.shape, (2, n_features))
assert_equal(ridge.intercept_.shape, (2, ))
def test_ridge_intercept():
# Test intercept with multiple targets GH issue #708
rng = np.random.RandomState(0)
n_samples, n_features = 5, 10
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
Y = np.c_[y, 1. + y]
ridge = Ridge()
ridge.fit(X, y)
intercept = ridge.intercept_
ridge.fit(X, Y)
assert_almost_equal(ridge.intercept_[0], intercept)
assert_almost_equal(ridge.intercept_[1], intercept + 1.)
def test_toy_ridge_object():
# Test BayesianRegression ridge classifier
# TODO: test also n_samples > n_features
X = np.array([[1], [2]])
Y = np.array([1, 2])
clf = Ridge(alpha=0.0)
clf.fit(X, Y)
X_test = [[1], [2], [3], [4]]
assert_almost_equal(clf.predict(X_test), [1., 2, 3, 4])
assert_equal(len(clf.coef_.shape), 1)
assert_equal(type(clf.intercept_), np.float64)
Y = np.vstack((Y, Y)).T
clf.fit(X, Y)
X_test = [[1], [2], [3], [4]]
assert_equal(len(clf.coef_.shape), 2)
assert_equal(type(clf.intercept_), np.ndarray)
def test_ridge_vs_lstsq():
# On alpha=0., Ridge and OLS yield the same solution.
rng = np.random.RandomState(0)
# we need more samples than features
n_samples, n_features = 5, 4
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=0., fit_intercept=False)
ols = LinearRegression(fit_intercept=False)
ridge.fit(X, y)
ols.fit(X, y)
assert_almost_equal(ridge.coef_, ols.coef_)
ridge.fit(X, y)
ols.fit(X, y)
assert_almost_equal(ridge.coef_, ols.coef_)
def test_ridge_individual_penalties():
# Tests the ridge object using individual penalties
rng = np.random.RandomState(42)
n_samples, n_features, n_targets = 20, 10, 5
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples, n_targets)
penalties = np.arange(n_targets)
coef_cholesky = np.array([
Ridge(alpha=alpha, solver="cholesky").fit(X, target).coef_
for alpha, target in zip(penalties, y.T)])
coefs_indiv_pen = [
Ridge(alpha=penalties, solver=solver, tol=1e-8).fit(X, y).coef_
for solver in ['svd', 'sparse_cg', 'lsqr', 'cholesky', 'sag']]
for coef_indiv_pen in coefs_indiv_pen:
assert_array_almost_equal(coef_cholesky, coef_indiv_pen)
# Test error is raised when number of targets and penalties do not match.
ridge = Ridge(alpha=penalties[:-1])
assert_raises(ValueError, ridge.fit, X, y)
def _test_ridge_loo(filter_):
# test that can work with both dense or sparse matrices
n_samples = X_diabetes.shape[0]
ret = []
ridge_gcv = _RidgeGCV(fit_intercept=False)
ridge = Ridge(alpha=1.0, fit_intercept=False)
# generalized cross-validation (efficient leave-one-out)
decomp = ridge_gcv._pre_compute(X_diabetes, y_diabetes)
errors, c = ridge_gcv._errors(1.0, y_diabetes, *decomp)
values, c = ridge_gcv._values(1.0, y_diabetes, *decomp)
# brute-force leave-one-out: remove one example at a time
errors2 = []
values2 = []
for i in range(n_samples):
sel = np.arange(n_samples) != i
X_new = X_diabetes[sel]
y_new = y_diabetes[sel]
ridge.fit(X_new, y_new)
value = ridge.predict([X_diabetes[i]])[0]
error = (y_diabetes[i] - value) ** 2
errors2.append(error)
values2.append(value)
# check that efficient and brute-force LOO give same results
assert_almost_equal(errors, errors2)
assert_almost_equal(values, values2)
# generalized cross-validation (efficient leave-one-out,
# SVD variation)
decomp = ridge_gcv._pre_compute_svd(X_diabetes, y_diabetes)
errors3, c = ridge_gcv._errors_svd(ridge.alpha, y_diabetes, *decomp)
values3, c = ridge_gcv._values_svd(ridge.alpha, y_diabetes, *decomp)
# check that efficient and SVD efficient LOO give same results
assert_almost_equal(errors, errors3)
assert_almost_equal(values, values3)
# check best alpha
ridge_gcv.fit(filter_(X_diabetes), y_diabetes)
alpha_ = ridge_gcv.alpha_
ret.append(alpha_)
# check that we get same best alpha with custom loss_func
f = ignore_warnings
scoring = make_scorer(mean_squared_error, greater_is_better=False)
ridge_gcv2 = RidgeCV(fit_intercept=False, scoring=scoring)
f(ridge_gcv2.fit)(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv2.alpha_, alpha_)
# check that we get same best alpha with custom score_func
func = lambda x, y: -mean_squared_error(x, y)
scoring = make_scorer(func)
ridge_gcv3 = RidgeCV(fit_intercept=False, scoring=scoring)
f(ridge_gcv3.fit)(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv3.alpha_, alpha_)
# check that we get same best alpha with a scorer
scorer = get_scorer('mean_squared_error')
ridge_gcv4 = RidgeCV(fit_intercept=False, scoring=scorer)
ridge_gcv4.fit(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv4.alpha_, alpha_)
# check that we get same best alpha with sample weights
ridge_gcv.fit(filter_(X_diabetes), y_diabetes,
sample_weight=np.ones(n_samples))
assert_equal(ridge_gcv.alpha_, alpha_)
# simulate several responses
Y = np.vstack((y_diabetes, y_diabetes)).T
ridge_gcv.fit(filter_(X_diabetes), Y)
Y_pred = ridge_gcv.predict(filter_(X_diabetes))
ridge_gcv.fit(filter_(X_diabetes), y_diabetes)
y_pred = ridge_gcv.predict(filter_(X_diabetes))
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T,
Y_pred, decimal=5)
return ret
def _test_ridge_cv(filter_):
n_samples = X_diabetes.shape[0]
ridge_cv = RidgeCV()
ridge_cv.fit(filter_(X_diabetes), y_diabetes)
ridge_cv.predict(filter_(X_diabetes))
assert_equal(len(ridge_cv.coef_.shape), 1)
assert_equal(type(ridge_cv.intercept_), np.float64)
cv = KFold(n_samples, 5)
ridge_cv.set_params(cv=cv)
ridge_cv.fit(filter_(X_diabetes), y_diabetes)
ridge_cv.predict(filter_(X_diabetes))
assert_equal(len(ridge_cv.coef_.shape), 1)
assert_equal(type(ridge_cv.intercept_), np.float64)
def _test_ridge_diabetes(filter_):
ridge = Ridge(fit_intercept=False)
ridge.fit(filter_(X_diabetes), y_diabetes)
return np.round(ridge.score(filter_(X_diabetes), y_diabetes), 5)
def _test_multi_ridge_diabetes(filter_):
# simulate several responses
Y = np.vstack((y_diabetes, y_diabetes)).T
n_features = X_diabetes.shape[1]
ridge = Ridge(fit_intercept=False)
ridge.fit(filter_(X_diabetes), Y)
assert_equal(ridge.coef_.shape, (2, n_features))
Y_pred = ridge.predict(filter_(X_diabetes))
ridge.fit(filter_(X_diabetes), y_diabetes)
y_pred = ridge.predict(filter_(X_diabetes))
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T,
Y_pred, decimal=3)
def _test_ridge_classifiers(filter_):
n_classes = np.unique(y_iris).shape[0]
n_features = X_iris.shape[1]
for clf in (RidgeClassifier(), RidgeClassifierCV()):
clf.fit(filter_(X_iris), y_iris)
assert_equal(clf.coef_.shape, (n_classes, n_features))
y_pred = clf.predict(filter_(X_iris))
assert_greater(np.mean(y_iris == y_pred), .79)
n_samples = X_iris.shape[0]
cv = KFold(n_samples, 5)
clf = RidgeClassifierCV(cv=cv)
clf.fit(filter_(X_iris), y_iris)
y_pred = clf.predict(filter_(X_iris))
assert_true(np.mean(y_iris == y_pred) >= 0.8)
def _test_tolerance(filter_):
ridge = Ridge(tol=1e-5, fit_intercept=False)
ridge.fit(filter_(X_diabetes), y_diabetes)
score = ridge.score(filter_(X_diabetes), y_diabetes)
ridge2 = Ridge(tol=1e-3, fit_intercept=False)
ridge2.fit(filter_(X_diabetes), y_diabetes)
score2 = ridge2.score(filter_(X_diabetes), y_diabetes)
assert_true(score >= score2)
# ignore warning that solvers are changed to SAG for
# temporary fix
@ignore_warnings
def test_dense_sparse():
for test_func in (_test_ridge_loo,
_test_ridge_cv,
_test_ridge_diabetes,
_test_multi_ridge_diabetes,
_test_ridge_classifiers,
_test_tolerance):
# test dense matrix
ret_dense = test_func(DENSE_FILTER)
# test sparse matrix
ret_sparse = test_func(SPARSE_FILTER)
# test that the outputs are the same
if ret_dense is not None and ret_sparse is not None:
assert_array_almost_equal(ret_dense, ret_sparse, decimal=3)
def test_ridge_cv_sparse_svd():
X = sp.csr_matrix(X_diabetes)
ridge = RidgeCV(gcv_mode="svd")
assert_raises(TypeError, ridge.fit, X)
def test_ridge_sparse_svd():
X = sp.csc_matrix(rng.rand(100, 10))
y = rng.rand(100)
ridge = Ridge(solver='svd', fit_intercept=False)
assert_raises(TypeError, ridge.fit, X, y)
def test_class_weights():
# Test class weights.
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = RidgeClassifier(class_weight=None)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf = RidgeClassifier(class_weight={1: 0.001})
clf.fit(X, y)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
# check if class_weight = 'balanced' can handle negative labels.
clf = RidgeClassifier(class_weight='balanced')
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# class_weight = 'balanced', and class_weight = None should return
# same values when y has equal number of all labels
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0], [1.0, 1.0]])
y = [1, 1, -1, -1]
clf = RidgeClassifier(class_weight=None)
clf.fit(X, y)
clfa = RidgeClassifier(class_weight='balanced')
clfa.fit(X, y)
assert_equal(len(clfa.classes_), 2)
assert_array_almost_equal(clf.coef_, clfa.coef_)
assert_array_almost_equal(clf.intercept_, clfa.intercept_)
def test_class_weight_vs_sample_weight():
"""Check class_weights resemble sample_weights behavior."""
for clf in (RidgeClassifier, RidgeClassifierCV):
# Iris is balanced, so no effect expected for using 'balanced' weights
clf1 = clf()
clf1.fit(iris.data, iris.target)
clf2 = clf(class_weight='balanced')
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.coef_, clf2.coef_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = clf()
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = clf(class_weight=class_weight)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.coef_, clf2.coef_)
# Check that sample_weight and class_weight are multiplicative
clf1 = clf()
clf1.fit(iris.data, iris.target, sample_weight ** 2)
clf2 = clf(class_weight=class_weight)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.coef_, clf2.coef_)
def test_class_weights_cv():
# Test class weights for cross validated ridge classifier.
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = RidgeClassifierCV(class_weight=None, alphas=[.01, .1, 1])
clf.fit(X, y)
# we give a small weights to class 1
clf = RidgeClassifierCV(class_weight={1: 0.001}, alphas=[.01, .1, 1, 10])
clf.fit(X, y)
assert_array_equal(clf.predict([[-.2, 2]]), np.array([-1]))
def test_ridgecv_store_cv_values():
# Test _RidgeCV's store_cv_values attribute.
rng = rng = np.random.RandomState(42)
n_samples = 8
n_features = 5
x = rng.randn(n_samples, n_features)
alphas = [1e-1, 1e0, 1e1]
n_alphas = len(alphas)
r = RidgeCV(alphas=alphas, store_cv_values=True)
# with len(y.shape) == 1
y = rng.randn(n_samples)
r.fit(x, y)
assert_equal(r.cv_values_.shape, (n_samples, n_alphas))
# with len(y.shape) == 2
n_responses = 3
y = rng.randn(n_samples, n_responses)
r.fit(x, y)
assert_equal(r.cv_values_.shape, (n_samples, n_responses, n_alphas))
def test_ridgecv_sample_weight():
rng = np.random.RandomState(0)
alphas = (0.1, 1.0, 10.0)
# There are different algorithms for n_samples > n_features
# and the opposite, so test them both.
for n_samples, n_features in ((6, 5), (5, 10)):
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
sample_weight = 1 + rng.rand(n_samples)
cv = KFold(n_samples, 5)
ridgecv = RidgeCV(alphas=alphas, cv=cv)
ridgecv.fit(X, y, sample_weight=sample_weight)
# Check using GridSearchCV directly
parameters = {'alpha': alphas}
fit_params = {'sample_weight': sample_weight}
gs = GridSearchCV(Ridge(), parameters, fit_params=fit_params,
cv=cv)
gs.fit(X, y)
assert_equal(ridgecv.alpha_, gs.best_estimator_.alpha)
assert_array_almost_equal(ridgecv.coef_, gs.best_estimator_.coef_)
def test_raises_value_error_if_sample_weights_greater_than_1d():
# Sample weights must be either scalar or 1D
n_sampless = [2, 3]
n_featuress = [3, 2]
rng = np.random.RandomState(42)
for n_samples, n_features in zip(n_sampless, n_featuress):
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
sample_weights_OK = rng.randn(n_samples) ** 2 + 1
sample_weights_OK_1 = 1.
sample_weights_OK_2 = 2.
sample_weights_not_OK = sample_weights_OK[:, np.newaxis]
sample_weights_not_OK_2 = sample_weights_OK[np.newaxis, :]
ridge = Ridge(alpha=1)
# make sure the "OK" sample weights actually work
ridge.fit(X, y, sample_weights_OK)
ridge.fit(X, y, sample_weights_OK_1)
ridge.fit(X, y, sample_weights_OK_2)
def fit_ridge_not_ok():
ridge.fit(X, y, sample_weights_not_OK)
def fit_ridge_not_ok_2():
ridge.fit(X, y, sample_weights_not_OK_2)
assert_raise_message(ValueError,
"Sample weights must be 1D array or scalar",
fit_ridge_not_ok)
assert_raise_message(ValueError,
"Sample weights must be 1D array or scalar",
fit_ridge_not_ok_2)
def test_sparse_design_with_sample_weights():
# Sample weights must work with sparse matrices
n_sampless = [2, 3]
n_featuress = [3, 2]
rng = np.random.RandomState(42)
sparse_matrix_converters = [sp.coo_matrix,
sp.csr_matrix,
sp.csc_matrix,
sp.lil_matrix,
sp.dok_matrix
]
sparse_ridge = Ridge(alpha=1., fit_intercept=False)
dense_ridge = Ridge(alpha=1., fit_intercept=False)
for n_samples, n_features in zip(n_sampless, n_featuress):
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
sample_weights = rng.randn(n_samples) ** 2 + 1
for sparse_converter in sparse_matrix_converters:
X_sparse = sparse_converter(X)
sparse_ridge.fit(X_sparse, y, sample_weight=sample_weights)
dense_ridge.fit(X, y, sample_weight=sample_weights)
assert_array_almost_equal(sparse_ridge.coef_, dense_ridge.coef_,
decimal=6)
def test_raises_value_error_if_solver_not_supported():
# Tests whether a ValueError is raised if a non-identified solver
# is passed to ridge_regression
wrong_solver = "This is not a solver (MagritteSolveCV QuantumBitcoin)"
exception = ValueError
message = "Solver %s not understood" % wrong_solver
def func():
X = np.eye(3)
y = np.ones(3)
ridge_regression(X, y, alpha=1., solver=wrong_solver)
assert_raise_message(exception, message, func)
def test_sparse_cg_max_iter():
reg = Ridge(solver="sparse_cg", max_iter=1)
reg.fit(X_diabetes, y_diabetes)
assert_equal(reg.coef_.shape[0], X_diabetes.shape[1])
@ignore_warnings
def test_n_iter():
# Test that self.n_iter_ is correct.
n_targets = 2
X, y = X_diabetes, y_diabetes
y_n = np.tile(y, (n_targets, 1)).T
for max_iter in range(1, 4):
for solver in ('sag', 'lsqr'):
reg = Ridge(solver=solver, max_iter=max_iter, tol=1e-12)
reg.fit(X, y_n)
assert_array_equal(reg.n_iter_, np.tile(max_iter, n_targets))
for solver in ('sparse_cg', 'svd', 'cholesky'):
reg = Ridge(solver=solver, max_iter=1, tol=1e-1)
reg.fit(X, y_n)
assert_equal(reg.n_iter_, None)
def test_ridge_fit_intercept_sparse():
X, y = make_regression(n_samples=1000, n_features=2, n_informative=2,
bias=10., random_state=42)
X_csr = sp.csr_matrix(X)
dense = Ridge(alpha=1., tol=1.e-15, solver='sag', fit_intercept=True)
sparse = Ridge(alpha=1., tol=1.e-15, solver='sag', fit_intercept=True)
dense.fit(X, y)
sparse.fit(X_csr, y)
assert_almost_equal(dense.intercept_, sparse.intercept_)
assert_array_almost_equal(dense.coef_, sparse.coef_)
# test the solver switch and the corresponding warning
sparse = Ridge(alpha=1., tol=1.e-15, solver='lsqr', fit_intercept=True)
assert_warns(UserWarning, sparse.fit, X_csr, y)
assert_almost_equal(dense.intercept_, sparse.intercept_)
assert_array_almost_equal(dense.coef_, sparse.coef_)
|
GoogleCloudPlatform/training-data-analyst
|
refs/heads/master
|
courses/machine_learning/deepdive2/structured/solutions/serving/application/lib/pyasn1/compat/dateandtime.py
|
26
|
#
# This file is part of pyasn1 software.
#
# Copyright (c) 2005-2019, Ilya Etingof <[email protected]>
# License: http://snmplabs.com/pyasn1/license.html
#
import time
from datetime import datetime
from sys import version_info
__all__ = ['strptime']
if version_info[:2] <= (2, 4):
def strptime(text, dateFormat):
return datetime(*(time.strptime(text, dateFormat)[0:6]))
else:
def strptime(text, dateFormat):
return datetime.strptime(text, dateFormat)
|
dvitme/odoo-addons
|
refs/heads/8.0
|
partner_credit_limit/__init__.py
|
40
|
# -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in module root
# directory
##############################################################################
from . import sale
|
dionisos2/python_libs
|
refs/heads/master
|
checking.py
|
1
|
def is_all_instance(iterable, aclass):
ok = True
for element in iterable:
ok &= isinstance(element, aclass)
return ok
|
andrewleech/SickRage
|
refs/heads/master
|
lib/requests/packages/chardet/constants.py
|
3007
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
_debug = 0
eDetecting = 0
eFoundIt = 1
eNotMe = 2
eStart = 0
eError = 1
eItsMe = 2
SHORTCUT_THRESHOLD = 0.95
|
ryfeus/lambda-packs
|
refs/heads/master
|
Tensorflow_Pandas_Numpy/source3.6/tensorflow/contrib/learn/python/learn/estimators/run_config.py
|
33
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Run Config."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import json
import os
import six
from tensorflow.contrib.framework.python.framework import experimental
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.estimator import run_config as core_run_config
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import server_lib
# A list of the property names in RunConfig user allows to change. They will
# not affect the execution framework, so when execution framework checks the
# `uid` of the RunConfig, it should be ingored.
_DEFAULT_UID_WHITE_LIST = [
'tf_random_seed',
'save_summary_steps',
'save_checkpoints_steps',
'save_checkpoints_secs',
'session_config',
'keep_checkpoint_max',
'keep_checkpoint_every_n_hours',
'log_step_count_steps',
]
class Environment(object):
# For running general distributed training.
CLOUD = 'cloud'
# For running Google-internal distributed training.
GOOGLE = 'google'
# For running on local desktop.
LOCAL = 'local'
class TaskType(object):
MASTER = 'master'
PS = 'ps'
WORKER = 'worker'
class ClusterConfig(object):
"""This class specifies the configurations for a distributed run.
If you're using an `Estimator`, you should probably use the subclass
RunConfig instead.
"""
def __init__(self, master=None, evaluation_master=None):
"""Constructor.
Sets the properties `cluster_spec`, `is_chief`, `master` (if `None` in the
args), `num_ps_replicas`, `task_id`, and `task_type` based on the
`TF_CONFIG` environment variable, if the pertinent information is
present. The `TF_CONFIG` environment variable is a JSON object with
attributes: `cluster`, `environment`, and `task`.
`cluster` is a JSON serialized version of `ClusterSpec`'s Python dict from
`server_lib.py`, mapping task types (usually one of the TaskType enums) to a
list of task addresses.
`environment` specifies the runtime environment for the job (usually one of
the `Environment` enums). Defaults to `LOCAL`.
`task` has two attributes: `type` and `index`, where `type` can be any of
the task types in `cluster`. When `TF_CONFIG` contains said information, the
following properties are set on this class:
* `task_type` is set to `TF_CONFIG['task']['type']`. Defaults to `None`.
* `task_id` is set to `TF_CONFIG['task']['index']`. Defaults to 0.
* `cluster_spec` is parsed from `TF_CONFIG['cluster']`. Defaults to {}.
* `master` is determined by looking up `task_type` and `task_id` in the
`cluster_spec`. Defaults to ''.
* `num_ps_replicas` is set by counting the number of nodes listed
in the `ps` attribute of `cluster_spec`. Defaults to 0.
* `num_worker_replicas` is set by counting the number of nodes listed
in the `worker` attribute of `cluster_spec`. Defaults to 0.
* `is_chief` is deteremined based on `task_type`, `type_id`, and
`environment`.
Example:
```
cluster = {'ps': ['host1:2222', 'host2:2222'],
'worker': ['host3:2222', 'host4:2222', 'host5:2222']}
os.environ['TF_CONFIG'] = json.dumps(
{'cluster': cluster,
'task': {'type': 'worker', 'index': 1}})
config = ClusterConfig()
assert config.master == 'host4:2222'
assert config.task_id == 1
assert config.num_ps_replicas == 2
assert config.num_worker_replicas == 3
assert config.cluster_spec == server_lib.ClusterSpec(cluster)
assert config.task_type == 'worker'
assert not config.is_chief
```
Args:
master: TensorFlow master. Defaults to empty string for local.
evaluation_master: The master on which to perform evaluation.
"""
# If not explicitly specified in the constructor and the TF_CONFIG
# environment variable is present, load cluster_spec from TF_CONFIG.
config = json.loads(os.environ.get('TF_CONFIG') or '{}')
# Set task_type and task_id if the TF_CONFIG environment variable is
# present. Otherwise, use the respective default (None / 0).
task_env = config.get('task', {})
self._task_type = task_env.get('type', None)
self._task_id = self.get_task_id()
self._cluster_spec = server_lib.ClusterSpec(config.get('cluster', {}))
self._master = (master if master is not None else
_get_master(self._cluster_spec, self._task_type,
self._task_id) or '')
self._num_ps_replicas = _count_ps(self._cluster_spec) or 0
self._num_worker_replicas = _count_worker(self._cluster_spec) or 0
# Set is_chief.
self._environment = config.get('environment', Environment.LOCAL)
self._is_chief = None
if self._task_type is None:
self._is_chief = (self._task_id == 0)
elif self._environment == Environment.CLOUD:
# When the TF_CONFIG environment variable is set, we can set the
# default of is_chief to 0 when task_type is "master" and task_id is 0.
self._is_chief = (self._task_type == TaskType.MASTER and
self._task_id == 0)
else:
# Legacy behavior is that is_chief is None if task_id == 0.
self._is_chief = (self._task_type == TaskType.WORKER and
self._task_id == 0)
self._evaluation_master = evaluation_master or ''
@property
def cluster_spec(self):
return self._cluster_spec
@property
def environment(self):
return self._environment
@property
def evaluation_master(self):
return self._evaluation_master
@property
def is_chief(self):
return self._is_chief
@property
def master(self):
return self._master
@property
def num_ps_replicas(self):
return self._num_ps_replicas
@property
def num_worker_replicas(self):
return self._num_worker_replicas
@property
def task_id(self):
return self._task_id
@property
def task_type(self):
return self._task_type
@staticmethod
def get_task_id():
"""Returns task index from `TF_CONFIG` environmental variable.
If you have a ClusterConfig instance, you can just access its task_id
property instead of calling this function and re-parsing the environmental
variable.
Returns:
`TF_CONFIG['task']['index']`. Defaults to 0.
"""
config = json.loads(os.environ.get('TF_CONFIG') or '{}')
task_env = config.get('task', {})
task_index = task_env.get('index')
return int(task_index) if task_index else 0
class RunConfig(ClusterConfig, core_run_config.RunConfig):
"""This class specifies the configurations for an `Estimator` run.
This class is the implementation of @{tf.estimator.RunConfig} interface.
"""
_USE_DEFAULT = 0
def __init__(self,
master=None,
num_cores=0,
log_device_placement=False,
gpu_memory_fraction=1,
tf_random_seed=None,
save_summary_steps=100,
save_checkpoints_secs=_USE_DEFAULT,
save_checkpoints_steps=None,
keep_checkpoint_max=5,
keep_checkpoint_every_n_hours=10000,
log_step_count_steps=100,
evaluation_master='',
model_dir=None,
session_config=None):
"""Constructor.
The superclass `ClusterConfig` may set properties like `cluster_spec`,
`is_chief`, `master` (if `None` in the args), `num_ps_replicas`, `task_id`,
and `task_type` based on the `TF_CONFIG` environment variable. See
`ClusterConfig` for more details.
N.B.: If `save_checkpoints_steps` or `save_checkpoints_secs` is set,
`keep_checkpoint_max` might need to be adjusted accordingly, especially in
distributed training. For example, setting `save_checkpoints_secs` as 60
without adjusting `keep_checkpoint_max` (defaults to 5) leads to situation
that checkpoint would be garbage collected after 5 minutes. In distributed
training, the evaluation job starts asynchronously and might fail to load or
find the checkpoint due to race condition.
Args:
master: TensorFlow master. Defaults to empty string for local.
num_cores: Number of cores to be used. If 0, the system picks an
appropriate number (default: 0).
log_device_placement: Log the op placement to devices (default: False).
gpu_memory_fraction: Fraction of GPU memory used by the process on
each GPU uniformly on the same machine.
tf_random_seed: Random seed for TensorFlow initializers.
Setting this value allows consistency between reruns.
save_summary_steps: Save summaries every this many steps.
save_checkpoints_secs: Save checkpoints every this many seconds. Can not
be specified with `save_checkpoints_steps`.
save_checkpoints_steps: Save checkpoints every this many steps. Can not be
specified with `save_checkpoints_secs`.
keep_checkpoint_max: The maximum number of recent checkpoint files to
keep. As new files are created, older files are deleted. If None or 0,
all checkpoint files are kept. Defaults to 5 (that is, the 5 most recent
checkpoint files are kept.)
keep_checkpoint_every_n_hours: Number of hours between each checkpoint
to be saved. The default value of 10,000 hours effectively disables
the feature.
log_step_count_steps: The frequency, in number of global steps, that the
global step/sec will be logged during training.
evaluation_master: the master on which to perform evaluation.
model_dir: directory where model parameters, graph etc are saved. If
`None`, will use `model_dir` property in `TF_CONFIG` environment
variable. If both are set, must have same value. If both are `None`, see
`Estimator` about where the model will be saved.
session_config: a ConfigProto used to set session parameters, or None.
Note - using this argument, it is easy to provide settings which break
otherwise perfectly good models. Use with care.
"""
super(RunConfig, self).__init__(
master=master, evaluation_master=evaluation_master)
gpu_options = config_pb2.GPUOptions(
per_process_gpu_memory_fraction=gpu_memory_fraction)
self._tf_config = config_pb2.ConfigProto(
log_device_placement=log_device_placement,
inter_op_parallelism_threads=num_cores,
intra_op_parallelism_threads=num_cores,
gpu_options=gpu_options)
self._tf_random_seed = tf_random_seed
self._save_summary_steps = save_summary_steps
self._save_checkpoints_secs = save_checkpoints_secs
self._log_step_count_steps = log_step_count_steps
self._session_config = session_config
if save_checkpoints_secs == RunConfig._USE_DEFAULT:
if save_checkpoints_steps is None:
self._save_checkpoints_secs = 600
else:
self._save_checkpoints_secs = None
self._save_checkpoints_steps = save_checkpoints_steps
# TODO(weiho): Remove these after ModelFn refactoring, when users can
# create Scaffold and Saver in their model_fn to set these.
self._keep_checkpoint_max = keep_checkpoint_max
self._keep_checkpoint_every_n_hours = keep_checkpoint_every_n_hours
self._model_dir = _get_model_dir(model_dir)
@experimental
def uid(self, whitelist=None):
"""Generates a 'Unique Identifier' based on all internal fields.
Caller should use the uid string to check `RunConfig` instance integrity
in one session use, but should not rely on the implementation details, which
is subject to change.
Args:
whitelist: A list of the string names of the properties uid should not
include. If `None`, defaults to `_DEFAULT_UID_WHITE_LIST`, which
includes most properties user allowes to change.
Returns:
A uid string.
"""
if whitelist is None:
whitelist = _DEFAULT_UID_WHITE_LIST
state = {k: v for k, v in self.__dict__.items() if not k.startswith('__')}
# Pop out the keys in whitelist.
for k in whitelist:
state.pop('_' + k, None)
ordered_state = collections.OrderedDict(
sorted(state.items(), key=lambda t: t[0]))
# For class instance without __repr__, some special cares are required.
# Otherwise, the object address will be used.
if '_cluster_spec' in ordered_state:
ordered_state['_cluster_spec'] = collections.OrderedDict(
sorted(ordered_state['_cluster_spec'].as_dict().items(),
key=lambda t: t[0]))
return ', '.join(
'%s=%r' % (k, v) for (k, v) in six.iteritems(ordered_state))
@property
def model_dir(self):
return self._model_dir
@property
def tf_config(self):
return self._tf_config
@property
def tf_random_seed(self):
return self._tf_random_seed
@property
def save_summary_steps(self):
return self._save_summary_steps
@property
def save_checkpoints_secs(self):
return self._save_checkpoints_secs
@property
def save_checkpoints_steps(self):
return self._save_checkpoints_steps
@property
def session_config(self):
return self._session_config
@property
def keep_checkpoint_max(self):
return self._keep_checkpoint_max
@property
def keep_checkpoint_every_n_hours(self):
return self._keep_checkpoint_every_n_hours
@property
def log_step_count_steps(self):
return self._log_step_count_steps
def _count_ps(cluster_spec):
"""Counts the number of parameter servers in cluster_spec."""
return len(cluster_spec.as_dict().get('ps', [])) if cluster_spec else 0
def _count_worker(cluster_spec):
"""Counts the number of workers in cluster_spec.
Workers with TaskType.WORKER and TaskType.MASTER are included in the return
value.
Args:
cluster_spec: a ClusterSpec instance that describes current deployment.
Returns:
The total number of eligible workers.
If 'cluster_spec' was None, then 0 is returned.
"""
return (len(cluster_spec.as_dict().get('worker', [])) +
len(cluster_spec.as_dict().get('master', []))) if cluster_spec else 0
def _get_master(cluster_spec, task_type, task_id):
"""Returns the appropriate string for the TensorFlow master."""
if not cluster_spec:
return ''
# If there is only one node in the cluster, do things locally.
jobs = cluster_spec.jobs
if len(jobs) == 1 and len(cluster_spec.job_tasks(jobs[0])) == 1:
return ''
# Lookup the master in cluster_spec using task_type and task_id,
# if possible.
if task_type:
if task_type not in jobs:
raise ValueError(
'%s is not a valid task_type in the cluster_spec:\n'
'%s\n\n'
'Note that these values may be coming from the TF_CONFIG environment '
'variable.' % (task_type, cluster_spec))
addresses = cluster_spec.job_tasks(task_type)
if task_id >= len(addresses) or task_id < 0:
raise ValueError(
'%d is not a valid task_id for task_type %s in the '
'cluster_spec:\n'
'%s\n\n'
'Note that these value may be coming from the TF_CONFIG environment '
'variable.' % (task_id, task_type, cluster_spec))
return 'grpc://' + addresses[task_id]
# For backwards compatibility, we return empty string if task_type was
# not set (task_type did not previously exist).
return ''
def _get_model_dir(model_dir):
"""Returns `model_dir` based user provided `model_dir` or `TF_CONFIG`."""
model_dir_in_tf_config = json.loads(
os.environ.get('TF_CONFIG') or '{}').get('model_dir', None)
if model_dir_in_tf_config is not None:
if model_dir is not None and model_dir_in_tf_config != model_dir:
raise ValueError(
'`model_dir` provided in RunConfig construct, if set, '
'must have the same value as the model_dir in TF_CONFIG. '
'model_dir: {}\nTF_CONFIG["model_dir"]: {}.\n'.format(
model_dir, model_dir_in_tf_config))
logging.info('Using model_dir in TF_CONFIG: %s', model_dir_in_tf_config)
return model_dir or model_dir_in_tf_config
|
aiorchestra/aiorchestra-openstack-plugin
|
refs/heads/master
|
openstack_plugin/compute/instances.py
|
1
|
# Author: Denys Makogon
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from aiorchestra.core import utils
COMPUTE_ACTIVE = 'ACTIVE'
COMPUTE_BUILD = 'BUILD'
COMPUTE_SHUTOFF = 'SHUTOFF'
SERVER_TASK_STATE_POWERING_ON = 'powering-on'
async def create(context, novaclient, glanceclient, name_or_id, flavor,
image, ssh_keyname=None, nics=None, use_existing=False,
files=None, config_drive=False, userdata=None):
"""
Creates compute instance
:param context: OrchestraContext
:param novaclient: Authorized Nova client
:param glanceclient: Authorized Glance client
:param name_or_id: Instance name or ID
:param flavor: Instance flavor
:param image: Instance image
:param ssh_keyname: name of SSH keypair to be injected
:param nics: Neutron port definitions for an instance
:param use_existing: weather use existing instance or create new
:param files: dict of file injections
:param config_drive: use config driver or not
:return: instance
"""
if not use_existing:
glanceclient.images.get(image)
context.logger.debug('Image "{0}" exists.'
.format(image))
novaclient.flavors.get(flavor)
context.logger.debug('Flavor "{0}" exists.'
.format(flavor))
instance = novaclient.servers.create(
name_or_id, image, flavor,
key_name=ssh_keyname,
nics=nics, files=files,
config_drive=config_drive,
userdata=userdata,
)
context.logger.info('Compute instance "{0}" created.'
.format(name_or_id))
else:
instance = novaclient.servers.get(name_or_id)
return instance
async def start(context, novaclient, name_or_id,
use_existing=False,
task_retry_interval=None,
task_retries=None):
"""
:param context:
:param novaclient:
:param name_or_id:
:param use_existing:
:param task_retry_interval:
:param task_retries:
:return:
"""
if use_existing:
context.logger.info('Using existing instance in its original state.')
return
async def wait_until_active():
instance = novaclient.servers.get(name_or_id)
server_task_state = getattr(instance, 'OS-EXT-STS:task_state')
if instance.status == COMPUTE_ACTIVE:
return True
if instance.status == COMPUTE_BUILD:
return False
if (instance.status == COMPUTE_BUILD and
server_task_state != SERVER_TASK_STATE_POWERING_ON):
instance.start()
return False
if (instance.status == COMPUTE_BUILD or
server_task_state == SERVER_TASK_STATE_POWERING_ON):
return False
await utils.retry(wait_until_active, exceptions=(Exception,),
task_retries=task_retries,
task_retry_interval=task_retry_interval)
context.logger.info('Compute instance started.'.format(name_or_id))
async def delete(context, novaclient, name_or_id,
use_existing=False,
task_retry_interval=None,
task_retries=None):
"""
Deletes compute instance
:param context:
:param novaclient:
:param name_or_id:
:param use_existing:
:param task_retry_interval:
:param task_retries:
:return:
"""
if use_existing:
context.logger.info('Compute instance "{0}" remains as is, '
'because it is external resource.'
.format(name_or_id))
return
instance = novaclient.servers.get(name_or_id)
try:
instance.delete()
except Exception as ex:
context.logger.debug(str(ex))
# we don't really care if instance was stopped or not,
# next operation will delete it
pass
async def is_gone():
try:
novaclient.servers.get(name_or_id)
return False
except Exception as ex:
context.logger.debug(str(ex))
return True
await utils.retry(is_gone, exceptions=(Exception,),
task_retries=task_retries,
task_retry_interval=task_retry_interval)
context.logger.info('Compute instance "{0}" deleted.'
.format(name_or_id))
async def stop(context, novaclient, name_or_id,
use_existing=False,
task_retry_interval=None,
task_retries=None):
"""
Stops compute instance
:param context:
:param novaclient:
:param name_or_id:
:param use_existing:
:param task_retry_interval:
:param task_retries:
:return:
"""
if use_existing:
context.logger.info('Leaving compute instance "{0}" as is because it '
'is external resource.'.format(name_or_id))
return
context.logger.info('Attempting to stop compute '
'instance "{0}".'.format(name_or_id))
try:
instance = novaclient.servers.get(name_or_id)
instance.stop()
except Exception as ex:
context.logger.debug(str(ex))
# we don't really care if instance was stopped or not,
# next operation will delete it
pass
async def wait_until_task_finished():
instance = novaclient.servers.get(name_or_id)
server_task_state = getattr(instance, 'OS-EXT-STS:task_state')
return server_task_state is None
await utils.retry(wait_until_task_finished, exceptions=(Exception, ),
task_retry_interval=task_retry_interval,
task_retries=task_retries)
context.logger.info('Compute instance "{0}" stopped.'
.format(name_or_id))
|
karandesai-96/joblib
|
refs/heads/master
|
joblib/test/test_numpy_pickle_compat.py
|
8
|
"""Test the old numpy pickler, compatibility version."""
import random
# numpy_pickle is not a drop-in replacement of pickle, as it takes
# filenames instead of open files as arguments.
from joblib import numpy_pickle_compat
def test_z_file(tmpdir):
# Test saving and loading data with Zfiles.
filename = tmpdir.join('test.pkl').strpath
data = numpy_pickle_compat.asbytes('Foo, \n Bar, baz, \n\nfoobar')
with open(filename, 'wb') as f:
numpy_pickle_compat.write_zfile(f, data)
with open(filename, 'rb') as f:
data_read = numpy_pickle_compat.read_zfile(f)
assert data == data_read
|
dansimau/coke
|
refs/heads/master
|
docs/conf.py
|
1
|
# -*- coding: utf-8 -*-
#
# Coke documentation build configuration file, created by
# sphinx-quickstart on Tue Nov 26 23:26:59 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Coke'
copyright = u'2013, Daniel Simmons'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'nature'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Cokedoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Coke.tex', u'Coke Documentation',
u'Daniel Simmons', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'coke', u'Coke Documentation',
[u'Daniel Simmons'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Coke', u'Coke Documentation',
u'Daniel Simmons', 'Coke', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
|
joshuajan/odoo
|
refs/heads/master
|
addons/mrp/wizard/change_production_qty.py
|
53
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
class change_production_qty(osv.osv_memory):
_name = 'change.production.qty'
_description = 'Change Quantity of Products'
_columns = {
'product_qty': fields.float('Product Qty', digits_compute=dp.get_precision('Product Unit of Measure'), required=True),
}
def default_get(self, cr, uid, fields, context=None):
""" To get default values for the object.
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param fields: List of fields for which we want default values
@param context: A standard dictionary
@return: A dictionary which of fields with values.
"""
if context is None:
context = {}
res = super(change_production_qty, self).default_get(cr, uid, fields, context=context)
prod_obj = self.pool.get('mrp.production')
prod = prod_obj.browse(cr, uid, context.get('active_id'), context=context)
if 'product_qty' in fields:
res.update({'product_qty': prod.product_qty})
return res
def _update_product_to_produce(self, cr, uid, prod, qty, context=None):
move_lines_obj = self.pool.get('stock.move')
for m in prod.move_created_ids:
move_lines_obj.write(cr, uid, [m.id], {'product_uom_qty': qty})
def change_prod_qty(self, cr, uid, ids, context=None):
"""
Changes the Quantity of Product.
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param ids: List of IDs selected
@param context: A standard dictionary
@return:
"""
record_id = context and context.get('active_id',False)
assert record_id, _('Active Id not found')
prod_obj = self.pool.get('mrp.production')
bom_obj = self.pool.get('mrp.bom')
move_obj = self.pool.get('stock.move')
for wiz_qty in self.browse(cr, uid, ids, context=context):
prod = prod_obj.browse(cr, uid, record_id, context=context)
prod_obj.write(cr, uid, [prod.id], {'product_qty': wiz_qty.product_qty})
prod_obj.action_compute(cr, uid, [prod.id])
for move in prod.move_lines:
bom_point = prod.bom_id
bom_id = prod.bom_id.id
if not bom_point:
bom_id = bom_obj._bom_find(cr, uid, prod.product_uom.id, product_id=prod.product_id.id)
if not bom_id:
raise osv.except_osv(_('Error!'), _("Cannot find bill of material for this product."))
prod_obj.write(cr, uid, [prod.id], {'bom_id': bom_id})
bom_point = bom_obj.browse(cr, uid, [bom_id])[0]
if not bom_id:
raise osv.except_osv(_('Error!'), _("Cannot find bill of material for this product."))
factor = prod.product_qty * prod.product_uom.factor / bom_point.product_uom.factor
product_details, workcenter_details = \
bom_obj._bom_explode(cr, uid, bom_point, prod.product_id, factor / bom_point.product_qty, [])
for r in product_details:
if r['product_id'] == move.product_id.id:
move_obj.write(cr, uid, [move.id], {'product_uom_qty': r['product_qty']})
if prod.move_prod_id:
move_obj.write(cr, uid, [prod.move_prod_id.id], {'product_uom_qty' : wiz_qty.product_qty})
self._update_product_to_produce(cr, uid, prod, wiz_qty.product_qty, context=context)
return {}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
osuripple/pep.py
|
refs/heads/master
|
constants/slotStatuses.py
|
1
|
FREE = 1
LOCKED = 2
NOT_READY = 4
READY = 8
NO_MAP = 16
PLAYING = 32
OCCUPIED = 124
PLAYING_QUIT = 128
|
helldorado/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/cloudengine/ce_evpn_bgp_rr.py
|
26
|
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ce_evpn_bgp_rr
version_added: "2.4"
short_description: Manages RR for the VXLAN Network on HUAWEI CloudEngine switches.
description:
- Configure an RR in BGP-EVPN address family view on HUAWEI CloudEngine switches.
author: Zhijin Zhou (@QijunPan)
notes:
- Ensure that BGP view is existed.
- The peer, peer_type, and reflect_client arguments must all exist or not exist.
options:
as_number:
description:
- Specifies the number of the AS, in integer format.
The value is an integer that ranges from 1 to 4294967295.
required: true
bgp_instance:
description:
- Specifies the name of a BGP instance.
The value of instance-name can be an integer 1 or a string of 1 to 31.
bgp_evpn_enable:
description:
- Enable or disable the BGP-EVPN address family.
choices: ['enable','disable']
default: 'enable'
peer_type:
description:
- Specify the peer type.
choices: ['group_name','ipv4_address']
peer:
description:
- Specifies the IPv4 address or the group name of a peer.
reflect_client:
description:
- Configure the local device as the route reflector and the peer or peer group as the client of the route reflector.
choices: ['enable','disable']
policy_vpn_target:
description:
- Enable or disable the VPN-Target filtering.
choices: ['enable','disable']
'''
EXAMPLES = '''
- name: BGP RR test
hosts: cloudengine
connection: local
gather_facts: no
vars:
cli:
host: "{{ inventory_hostname }}"
port: "{{ ansible_ssh_port }}"
username: "{{ username }}"
password: "{{ password }}"
transport: cli
tasks:
- name: "Configure BGP-EVPN address family view and ensure that BGP view has existed."
ce_evpn_bgp_rr:
as_number: 20
bgp_evpn_enable: enable
provider: "{{ cli }}"
- name: "Configure reflect client and ensure peer has existed."
ce_evpn_bgp_rr:
as_number: 20
peer_type: ipv4_address
peer: 192.8.3.3
reflect_client: enable
provider: "{{ cli }}"
- name: "Configure the VPN-Target filtering."
ce_evpn_bgp_rr:
as_number: 20
policy_vpn_target: enable
provider: "{{ cli }}"
- name: "Configure an RR in BGP-EVPN address family view."
ce_evpn_bgp_rr:
as_number: 20
bgp_evpn_enable: enable
peer_type: ipv4_address
peer: 192.8.3.3
reflect_client: enable
policy_vpn_target: disable
provider: "{{ cli }}"
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {
"as_number": "20",
"bgp_evpn_enable": "enable",
"bgp_instance": null,
"peer": "192.8.3.3",
"peer_type": "ipv4_address",
"policy_vpn_target": "disable",
"reflect_client": "enable"
}
existing:
description: k/v pairs of existing attributes on the device
returned: always
type: dict
sample: {
"as_number": "20",
"bgp_evpn_enable": "disable",
"bgp_instance": null,
"peer": null,
"peer_type": null,
"policy_vpn_target": "disable",
"reflect_client": "disable"
}
end_state:
description: k/v pairs of end attributes on the device
returned: always
type: dict
sample: {
"as_number": "20",
"bgp_evpn_enable": "enable",
"bgp_instance": null,
"peer": "192.8.3.3",
"peer_type": "ipv4_address",
"policy_vpn_target": "disable",
"reflect_client": "enable"
}
updates:
description: command list sent to the device
returned: always
type: list
sample: [
"bgp 20",
" l2vpn-family evpn",
" peer 192.8.3.3 enable",
" peer 192.8.3.3 reflect-client",
" undo policy vpn-target"
]
changed:
description: check to see if a change was made on the device
returned: always
type: bool
sample: true
'''
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.cloudengine.ce import get_config, load_config, ce_argument_spec
def is_config_exist(cmp_cfg, test_cfg):
"""is configuration exist"""
if not cmp_cfg or not test_cfg:
return False
return bool(test_cfg in cmp_cfg)
class EvpnBgpRr(object):
"""Manange RR in BGP-EVPN address family view"""
def __init__(self, argument_spec):
self.spec = argument_spec
self.module = None
self.__init_module__()
# RR configuration parameters
self.as_number = self.module.params['as_number']
self.bgp_instance = self.module.params['bgp_instance']
self.peer_type = self.module.params['peer_type']
self.peer = self.module.params['peer']
self.bgp_evpn_enable = self.module.params['bgp_evpn_enable']
self.reflect_client = self.module.params['reflect_client']
self.policy_vpn_target = self.module.params['policy_vpn_target']
self.commands = list()
self.config = None
self.bgp_evpn_config = ""
self.cur_config = dict()
self.conf_exist = False
# state
self.changed = False
self.updates_cmd = list()
self.results = dict()
self.proposed = dict()
self.existing = dict()
self.end_state = dict()
def __init_module__(self):
"""Init module"""
self.module = AnsibleModule(
argument_spec=self.spec, supports_check_mode=True)
def cli_load_config(self, commands):
"""Load config by cli"""
if not self.module.check_mode:
load_config(self.module, commands)
def is_bgp_view_exist(self):
"""Judge whether BGP view has existed"""
if self.bgp_instance:
view_cmd = "bgp %s instance %s" % (
self.as_number, self.bgp_instance)
else:
view_cmd = "bgp %s" % self.as_number
return is_config_exist(self.config, view_cmd)
def is_l2vpn_family_evpn_exist(self):
"""Judge whether BGP-EVPN address family view has existed"""
view_cmd = "l2vpn-family evpn"
return is_config_exist(self.config, view_cmd)
def is_reflect_client_exist(self):
"""Judge whether reflect client is configured"""
view_cmd = "peer %s reflect-client" % self.peer
return is_config_exist(self.bgp_evpn_config, view_cmd)
def is_policy_vpn_target_exist(self):
"""Judge whether the VPN-Target filtering is enabled"""
view_cmd = "undo policy vpn-target"
if is_config_exist(self.bgp_evpn_config, view_cmd):
return False
else:
return True
def get_config_in_bgp_view(self):
"""Get configuration in BGP view"""
flags = list()
exp = " | section include"
if self.as_number:
if self.bgp_instance:
exp += " bgp %s instance %s" % (self.as_number,
self.bgp_instance)
else:
exp += " bgp %s" % self.as_number
flags.append(exp)
config = get_config(self.module, flags)
return config
def get_config_in_bgp_evpn_view(self):
"""Get configuration in BGP_EVPN view"""
self.bgp_evpn_config = ""
if not self.config:
return ""
index = self.config.find("l2vpn-family evpn")
if index == -1:
return ""
return self.config[index:]
def get_current_config(self):
"""Get current configuration"""
if not self.as_number:
self.module.fail_json(msg='Error: The value of as-number cannot be empty.')
self.cur_config['bgp_exist'] = False
self.cur_config['bgp_evpn_enable'] = 'disable'
self.cur_config['reflect_client'] = 'disable'
self.cur_config['policy_vpn_target'] = 'disable'
self.cur_config['peer_type'] = None
self.cur_config['peer'] = None
self.config = self.get_config_in_bgp_view()
if not self.is_bgp_view_exist():
return
self.cur_config['bgp_exist'] = True
if not self.is_l2vpn_family_evpn_exist():
return
self.cur_config['bgp_evpn_enable'] = 'enable'
self.bgp_evpn_config = self.get_config_in_bgp_evpn_view()
if self.is_reflect_client_exist():
self.cur_config['reflect_client'] = 'enable'
self.cur_config['peer_type'] = self.peer_type
self.cur_config['peer'] = self.peer
if self.is_policy_vpn_target_exist():
self.cur_config['policy_vpn_target'] = 'enable'
def get_existing(self):
"""Get existing config"""
self.existing = dict(as_number=self.as_number,
bgp_instance=self.bgp_instance,
peer_type=self.cur_config['peer_type'],
peer=self.cur_config['peer'],
bgp_evpn_enable=self.cur_config[
'bgp_evpn_enable'],
reflect_client=self.cur_config['reflect_client'],
policy_vpn_target=self.cur_config[
'policy_vpn_target'])
def get_proposed(self):
"""Get proposed config"""
self.proposed = dict(as_number=self.as_number,
bgp_instance=self.bgp_instance,
peer_type=self.peer_type,
peer=self.peer,
bgp_evpn_enable=self.bgp_evpn_enable,
reflect_client=self.reflect_client,
policy_vpn_target=self.policy_vpn_target)
def get_end_state(self):
"""Get end config"""
self.get_current_config()
self.end_state = dict(as_number=self.as_number,
bgp_instance=self.bgp_instance,
peer_type=self.cur_config['peer_type'],
peer=self.cur_config['peer'],
bgp_evpn_enable=self.cur_config[
'bgp_evpn_enable'],
reflect_client=self.cur_config['reflect_client'],
policy_vpn_target=self.cur_config['policy_vpn_target'])
def show_result(self):
"""Show result"""
self.results['changed'] = self.changed
self.results['proposed'] = self.proposed
self.results['existing'] = self.existing
self.results['end_state'] = self.end_state
if self.changed:
self.results['updates'] = self.updates_cmd
else:
self.results['updates'] = list()
self.module.exit_json(**self.results)
def judge_if_config_exist(self):
"""Judge whether configuration has existed"""
if self.bgp_evpn_enable and self.bgp_evpn_enable != self.cur_config['bgp_evpn_enable']:
return False
if self.bgp_evpn_enable == 'disable' and self.cur_config['bgp_evpn_enable'] == 'disable':
return True
if self.reflect_client and self.reflect_client == 'enable':
if self.peer_type and self.peer_type != self.cur_config['peer_type']:
return False
if self.peer and self.peer != self.cur_config['peer']:
return False
if self.reflect_client and self.reflect_client != self.cur_config['reflect_client']:
return False
if self.policy_vpn_target and self.policy_vpn_target != self.cur_config['policy_vpn_target']:
return False
return True
def cli_add_command(self, command, undo=False):
"""Add command to self.update_cmd and self.commands"""
if undo and command.lower() not in ["quit", "return"]:
cmd = "undo " + command
else:
cmd = command
self.commands.append(cmd) # set to device
if command.lower() not in ["quit", "return"]:
self.updates_cmd.append(cmd) # show updates result
def config_rr(self):
"""Configure RR"""
if self.conf_exist:
return
if self.bgp_instance:
view_cmd = "bgp %s instance %s" % (
self.as_number, self.bgp_instance)
else:
view_cmd = "bgp %s" % self.as_number
self.cli_add_command(view_cmd)
if self.bgp_evpn_enable == 'disable':
self.cli_add_command(" undo l2vpn-family evpn")
else:
self.cli_add_command(" l2vpn-family evpn")
if self.reflect_client and self.reflect_client != self.cur_config['reflect_client']:
if self.reflect_client == 'enable':
self.cli_add_command(" peer %s enable" % self.peer)
self.cli_add_command(
" peer %s reflect-client" % self.peer)
else:
self.cli_add_command(
" undo peer %s reflect-client" % self.peer)
self.cli_add_command(" undo peer %s enable" % self.peer)
if self.cur_config['bgp_evpn_enable'] == 'enable':
if self.policy_vpn_target and self.policy_vpn_target != self.cur_config['policy_vpn_target']:
if self.policy_vpn_target == 'enable':
self.cli_add_command(" policy vpn-target")
else:
self.cli_add_command(" undo policy vpn-target")
else:
if self.policy_vpn_target and self.policy_vpn_target == 'disable':
self.cli_add_command(" undo policy vpn-target")
if self.commands:
self.cli_load_config(self.commands)
self.changed = True
def check_is_ipv4_addr(self):
"""Check ipaddress validate"""
rule1 = r'(25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9]?[0-9])\.'
rule2 = r'(25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9]?[0-9])'
ipv4_regex = '%s%s%s%s%s%s' % ('^', rule1, rule1, rule1, rule2, '$')
return bool(re.match(ipv4_regex, self.peer))
def check_params(self):
"""Check all input params"""
if self.cur_config['bgp_exist'] == 'false':
self.module.fail_json(msg="Error: BGP view doesnot exist.")
if self.bgp_instance:
if len(self.bgp_instance) < 1 or len(self.bgp_instance) > 31:
self.module.fail_json(
msg="Error: The length of BGP instance-name must be between 1 or a string of 1 to and 31.")
if self.as_number:
if len(self.as_number) > 11 or len(self.as_number) == 0:
self.module.fail_json(
msg='Error: The len of as_number %s is out of [1 - 11].' % self.as_number)
tmp_dict1 = dict(peer_type=self.peer_type,
peer=self.peer,
reflect_client=self.reflect_client)
tmp_dict2 = dict((k, v)
for k, v in tmp_dict1.items() if v is not None)
if len(tmp_dict2) != 0 and len(tmp_dict2) != 3:
self.module.fail_json(
msg='Error: The peer, peer_type, and reflect_client arguments must all exist or not exist.')
if self.peer_type:
if self.peer_type == 'ipv4_address' and not self.check_is_ipv4_addr():
self.module.fail_json(msg='Error: Illegal IPv4 address.')
elif self.peer_type == 'group_name' and self.check_is_ipv4_addr():
self.module.fail_json(
msg='Error: Ip address cannot be configured as group-name.')
def work(self):
"""Excute task"""
self.get_current_config()
self.check_params()
self.get_existing()
self.get_proposed()
self.conf_exist = self.judge_if_config_exist()
self.config_rr()
self.get_end_state()
self.show_result()
def main():
"""Main function entry"""
argument_spec = dict(
as_number=dict(required=True, type='str'),
bgp_instance=dict(required=False, type='str'),
bgp_evpn_enable=dict(required=False, type='str',
default='enable', choices=['enable', 'disable']),
peer_type=dict(required=False, type='str', choices=[
'group_name', 'ipv4_address']),
peer=dict(required=False, type='str'),
reflect_client=dict(required=False, type='str',
choices=['enable', 'disable']),
policy_vpn_target=dict(required=False, choices=['enable', 'disable']),
)
argument_spec.update(ce_argument_spec)
evpn_bgp_rr = EvpnBgpRr(argument_spec)
evpn_bgp_rr.work()
if __name__ == '__main__':
main()
|
holmes/intellij-community
|
refs/heads/master
|
python/helpers/profiler/prof_util.py
|
45
|
__author__ = 'traff'
import threading
import os
import tempfile
from _prof_imports import Stats, FuncStat, Function
class ProfDaemonThread(threading.Thread):
def __init__(self):
super(ProfDaemonThread, self).__init__()
self.setDaemon(True)
self.killReceived = False
def run(self):
self.OnRun()
def OnRun(self):
pass
def generate_snapshot_filepath(basepath, local_temp_dir=False):
if basepath is None:
basepath = 'snapshot'
if local_temp_dir:
basepath = os.path.join(tempfile.gettempdir(), os.path.basename(basepath.replace('\\', '/')))
n = 0
path = basepath + '.pstat'
while os.path.exists(path):
n+=1
path = basepath + (str(n) if n>0 else '') + '.pstat'
return path
def statsToResponse(stats, m):
ystats = Stats()
ystats.func_stats = []
m.ystats = ystats
for func, stat in stats.items():
path, line, func_name = func
cc, nc, tt, ct, callers = stat
func = Function()
func_stat = FuncStat()
func.func_stat = func_stat
ystats.func_stats.append(func)
func_stat.file = path
func_stat.line = line
func_stat.func_name = func_name
func_stat.calls_count = nc
func_stat.total_time = ct
func_stat.own_time = tt
func.callers = []
for f, s in callers.items():
caller_stat = FuncStat()
func.callers.append(caller_stat)
path, line, func_name = f
cc, nc, tt, ct = s
caller_stat.file = path
caller_stat.line = line
caller_stat.func_name = func_name
caller_stat.calls_count = cc
caller_stat.total_time = ct
caller_stat.own_time = tt
m.validate()
|
awemulya/fieldsight-kobocat
|
refs/heads/master
|
onadata/apps/fsforms/viewsets/SiteFormsViewset.py
|
1
|
from __future__ import unicode_literals
from rest_framework import viewsets
from rest_framework import serializers
from onadata.apps.fsforms.models import FieldSightXF
from onadata.apps.fsforms.serializers.FieldSightXFormApiSerializer import FSXFormListSerializer
class SiteFormViewSet(viewsets.ReadOnlyModelViewSet):
"""
A simple ViewSet for viewing site forms.
"""
queryset = FieldSightXF.objects.all()
serializer_class = FSXFormListSerializer
def filter_queryset(self, queryset):
site_id = self.kwargs.get('site_id', None)
if site_id is None:
# If no username is specified, the request must be authenticated
if self.request.user.is_anonymous():
# raises a permission denied exception, forces authentication
self.permission_denied(self.request)
else:
try:
int(site_id)
except:
raise serializers.ValidationError({'site': "Site Id Not Given."})
else:
return super(SiteFormViewSet, self).filter_queryset(queryset)
return super(SiteFormViewSet, self).filter_queryset(queryset)
site_id = int(site_id)
queryset = queryset.filter(site__id=site_id)
return queryset
|
UIKit0/Radicale
|
refs/heads/master
|
radicale/auth/__init__.py
|
10
|
# -*- coding: utf-8 -*-
#
# This file is part of Radicale Server - Calendar Server
# Copyright © 2008 Nicolas Kandel
# Copyright © 2008 Pascal Halter
# Copyright © 2008-2013 Guillaume Ayoub
#
# This library is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Radicale. If not, see <http://www.gnu.org/licenses/>.
"""
Authentication management.
"""
import sys
from .. import config, log
def load():
"""Load list of available authentication managers."""
auth_type = config.get("auth", "type")
log.LOGGER.debug("Authentication type is %s" % auth_type)
if auth_type == "None":
return None
elif auth_type == 'custom':
auth_module = config.get("auth", "custom_handler")
__import__(auth_module)
module = sys.modules[auth_module]
else:
root_module = __import__(
"auth.%s" % auth_type, globals=globals(), level=2)
module = getattr(root_module, auth_type)
# Override auth.is_authenticated
sys.modules[__name__].is_authenticated = module.is_authenticated
return module
def is_authenticated(user, password):
"""Check if the user is authenticated.
This method is overriden if an auth module is loaded.
"""
return True # Default is always True: no authentication
|
whoi-acomms/pyacomms
|
refs/heads/master
|
bin/plotter.py
|
1
|
"""
This demo demonstrates how to embed a matplotlib (mpl) plot
into a PyQt4 GUI application, including:
* Using the navigation toolbar
* Adding data to the plot
* Dynamically modifying the plot's properties
* Processing mpl events
* Saving the plot to a file from a menu
The main goal is to serve as a basis for developing rich PyQt GUI
applications featuring mpl plots (using the mpl OO API).
Eli Bendersky ([email protected])
License: this code is in the public domain
Last modified: 19.01.2009
"""
import sys, os, random
from PySide.QtCore import *
from PySide.QtGui import *
from acomms import CycleStats, CycleStatsList
import magiccstbox
os.environ['QT_API'] = 'pyside'
import matplotlib
matplotlib.use('Qt4Agg')
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QTAgg as NavigationToolbar
from matplotlib.figure import Figure
class AppForm(QMainWindow):
def __init__(self, cst_list, parent=None):
QMainWindow.__init__(self, parent)
self.setWindowTitle('Magic CST GUI')
self.create_menu()
self.create_main_frame()
self.create_status_bar()
self.textbox.setText('1 2 3 4')
self.data = [1, 2, 3, 4]
#self.data = [result.cst['snr_in'] for result in results_list]
#self.data = cst_list;
self.cst_list = cst_list
self.cst_dol = cst_list.to_dict_of_lists()
if len(cst_list) > 0:
self.table.setColumnCount(len(cst_list[0]))
self.table.setRowCount(len(cst_list))
# Optional, set the labels that show on top
self.table.setHorizontalHeaderLabels(CycleStats.fields)
for row_num in range(len(cst_list)):
for col_num in range(len(CycleStats.fields)):
text = cst_list[row_num][CycleStats.fields[col_num]]
table_item = QTableWidgetItem(str(text))
self.table.setItem(row_num, col_num, table_item)
# Also optional. Will fit the cells to its contents.
self.table.resizeColumnsToContents()
self.on_draw()
def save_plot(self):
file_choices = "PNG (*.png)|*.png"
path = unicode(QFileDialog.getSaveFileName(self,
'Save file', '',
file_choices))
if path:
self.canvas.print_figure(path, dpi=self.dpi)
self.statusBar().showMessage('Saved to %s' % path, 2000)
def on_about(self):
msg = """ The Magic CST GUI
"""
QMessageBox.about(self, "About the demo", msg.strip())
def on_pick(self, event):
# The event received here is of the type
# matplotlib.backend_bases.PickEvent
#
# It carries lots of information, of which we're using
# only a small amount here.
#
box_points = event.artist.get_bbox().get_points()
msg = "You've clicked on a dot with coords:\n %s" % box_points
QMessageBox.information(self, "Click!", msg)
def on_draw(self):
""" Redraws the figure
"""
#str = unicode(self.textbox.text())
#self.data = map(int, str.split())
xdata = self.cst_dol['toa']
# clear the axes and redraw the plot anew
#
plot_fields = []
# Create subplots for each checked item
for index in xrange(self.field_listbox.count()):
item = self.field_listbox.item(index)
ischecked = bool(item.checkState())
if ischecked:
plot_fields.append(item.text())
if len(plot_fields) > 0:
nrows = len(plot_fields)
ncols = 1
self.fig.clear()
for row in xrange(nrows):
ydata = self.cst_dol[plot_fields[row]]
axes = self.fig.add_subplot(nrows, ncols, row)
axes.plot(xdata, ydata, marker=".", linewidth=1)
axes.set_ylabel(plot_fields[row])
else:
# Pick the data based on the combo box value
self.axes = self.fig.add_subplot('111')
ydata = self.cst_dol[str(self.plot_combobox.currentText())]
self.axes.clear()
self.axes.grid(self.grid_cb.isChecked())
self.axes.plot(xdata, ydata, marker=".", linewidth=self.slider.value())
self.fig.autofmt_xdate()
self.canvas.draw()
def create_main_frame(self):
self.main_frame = QWidget()
# Create the mpl Figure and FigCanvas objects.
# 5x4 inches, 100 dots-per-inch
#
self.dpi = 100
self.fig = Figure(dpi=self.dpi)
self.canvas = FigureCanvas(self.fig)
self.canvas.setParent(self.main_frame)
# Since we have only one plot, we can use add_axes
# instead of add_subplot, but then the subplot
# configuration tool in the navigation toolbar wouldn't
# work.
#
# Bind the 'pick' event for clicking on one of the bars
#
self.canvas.mpl_connect('pick_event', self.on_pick)
# Create the navigation toolbar, tied to the canvas
#
self.mpl_toolbar = NavigationToolbar(self.canvas, self.main_frame)
# Other GUI controls
#
self.textbox = QLineEdit()
self.textbox.setMinimumWidth(200)
self.connect(self.textbox, SIGNAL('editingFinished ()'), self.on_draw)
self.draw_button = QPushButton("&Draw")
self.connect(self.draw_button, SIGNAL('clicked()'), self.on_draw)
self.grid_cb = QCheckBox("Show &Grid")
self.grid_cb.setChecked(False)
self.connect(self.grid_cb, SIGNAL('stateChanged(int)'), self.on_draw)
slider_label = QLabel('Line width:')
self.slider = QSlider(Qt.Horizontal)
self.slider.setRange(1, 10)
self.slider.setValue(2)
self.slider.setTracking(True)
self.slider.setTickPosition(QSlider.TicksBothSides)
self.connect(self.slider, SIGNAL('valueChanged(int)'), self.on_draw)
self.table = QTableWidget()
plot_cb_label = QLabel("Variable to plot:")
self.plot_combobox = QComboBox()
self.plot_combobox.addItems(CycleStats.fields)
self.connect(self.plot_combobox, SIGNAL('currentIndexChanged(int)'), self.on_draw)
self.field_listbox = QListWidget()
self.field_listbox.setSelectionMode(QAbstractItemView.NoSelection)
self.field_listbox.addItems(CycleStats.fields)
for index in xrange(self.field_listbox.count()):
item = self.field_listbox.item(index)
item.setCheckState(Qt.CheckState(0))
self.field_listbox.setMinimumWidth(self.field_listbox.sizeHintForColumn(0))
self.connect(self.field_listbox, SIGNAL('itemClicked(QListWidgetItem *)'), self.on_draw)
toparea = QSplitter()
toparea.addWidget(self.field_listbox)
toparea.addWidget(self.canvas)
#
# Layout with box sizers
#
hbox = QHBoxLayout()
for w in [ plot_cb_label, self.plot_combobox, self.grid_cb,
slider_label, self.slider]:
hbox.addWidget(w)
hbox.setAlignment(w, Qt.AlignVCenter)
plotsection = QVBoxLayout()
plotsection.addWidget(self.mpl_toolbar)
plotsection.addWidget(toparea)
#plotsection.addLayout(hbox)
plotsectionwidget = QWidget()
plotsectionwidget.setLayout(plotsection)
vbox = QSplitter(Qt.Orientation.Vertical)
vbox.addWidget(plotsectionwidget)
vbox.addWidget(self.table)
bigbox = QVBoxLayout()
bigbox.addWidget(vbox)
self.main_frame.setLayout(bigbox)
self.setCentralWidget(self.main_frame)
def create_status_bar(self):
self.status_text = QLabel("Loaded CSTs")
self.statusBar().addWidget(self.status_text, 1)
def create_menu(self):
self.file_menu = self.menuBar().addMenu("&File")
load_file_action = self.create_action("&Save plot",
shortcut="Ctrl+S", slot=self.save_plot,
tip="Save the plot")
quit_action = self.create_action("&Quit", slot=self.close,
shortcut="Ctrl+Q", tip="Close the application")
self.add_actions(self.file_menu,
(load_file_action, None, quit_action))
self.help_menu = self.menuBar().addMenu("&Help")
about_action = self.create_action("&About",
shortcut='F1', slot=self.on_about,
tip='About the demo')
self.add_actions(self.help_menu, (about_action,))
def add_actions(self, target, actions):
for action in actions:
if action is None:
target.addSeparator()
else:
target.addAction(action)
def create_action( self, text, slot=None, shortcut=None,
icon=None, tip=None, checkable=False,
signal="triggered()"):
action = QAction(text, self)
if icon is not None:
action.setIcon(QIcon(":/%s.png" % icon))
if shortcut is not None:
action.setShortcut(shortcut)
if tip is not None:
action.setToolTip(tip)
action.setStatusTip(tip)
if slot is not None:
self.connect(action, SIGNAL(signal), slot)
if checkable:
action.setCheckable(True)
return action
def plot_csts(cst_list):
app = QApplication(sys.argv)
form = AppForm(cst_list)
form.show()
app.exec_()
def main():
app = QApplication(sys.argv)
form = AppForm(None)
form.show()
app.exec_()
if __name__ == "__main__":
main()
|
wemanuel/smry
|
refs/heads/master
|
server-auth/ls/google-cloud-sdk/lib/googlecloudapis/container/v1beta1/__init__.py
|
11
|
"""Common imports for generated container client library."""
# pylint:disable=wildcard-import
import pkgutil
from googlecloudapis.apitools.base.py import *
from googlecloudapis.container.v1beta1.container_v1beta1_client import *
from googlecloudapis.container.v1beta1.container_v1beta1_messages import *
__path__ = pkgutil.extend_path(__path__, __name__)
|
ratelle/cpuset
|
refs/heads/master
|
cpuset/main.py
|
3
|
"""Front end command line tool for Linux cpusets
"""
__copyright__ = """
Copyright (C) 2007-2010 Novell Inc.
Author: Alex Tsariounov <[email protected]>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License version 2 as
published by the Free Software Foundation.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
import sys, os
from optparse import OptionParser
from cpuset import config
import cpuset.commands
from cpuset.commands.common import CmdException
from cpuset.util import CpusetException
#
# The commands map
#
class Commands(dict):
"""Commands class. It performs on-demand module loading
"""
def canonical_cmd(self, key):
"""Return the canonical name for a possibly-shortenned
command name.
"""
candidates = [cmd for cmd in self.keys() if cmd.startswith(key)]
if not candidates:
log.error('Unknown command: %s', key)
log.error('Try "%s help" for a list of supported commands', prog)
sys.exit(1)
elif len(candidates) > 1:
log.error('Ambiguous command: %s', key)
log.error('Candidates are: %s', ', '.join(candidates))
sys.exit(1)
return candidates[0]
def __getitem__(self, key):
"""Return the command python module name based.
"""
global prog
cmd_mod = self.get(key) or self.get(self.canonical_cmd(key))
__import__('cpuset.commands.' + cmd_mod)
return getattr(cpuset.commands, cmd_mod)
commands = Commands({
'shield': 'shield',
'set': 'set',
# 'mem': 'mem',
'proc': 'proc',
})
supercommands = (
'shield',
)
def _print_helpstring(cmd):
print ' ' + cmd + ' ' * (12 - len(cmd)) + commands[cmd].help
def print_help():
print 'Usage: %s [global options] <command> [command options]' % os.path.basename(sys.argv[0])
print
print 'Global options:'
print ' -l/--log <fname> output debugging log in fname'
print ' -m/--machine print machine readable output'
print ' -x/--tohex <CPUSPEC> convert a CPUSPEC to hex'
print
print 'Generic commands:'
print ' help print the detailed command usage'
print ' version display version information'
print ' copyright display copyright information'
cmds = commands.keys()
cmds.sort()
print
print 'Super commands (high-level and multi-function):'
for cmd in supercommands:
_print_helpstring(cmd)
print
print 'Regular commands:'
for cmd in cmds:
if not cmd in supercommands:
_print_helpstring(cmd)
def main():
# handle pipes better
import signal
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
global prog
prog = os.path.basename(sys.argv[0])
global logfile
logfile = None
if len(sys.argv) < 2:
print >> sys.stderr, 'usage: %s <command>' % prog
print >> sys.stderr, \
' Try "%s --help" for a list of supported commands' % prog
sys.exit(1)
# configure logging
import logging
console = logging.StreamHandler(sys.stdout)
console.setLevel(logging.INFO)
formatter = logging.Formatter(prog + ': %(message)s')
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
global log
log = logging.getLogger('')
log.setLevel(logging.DEBUG)
try:
debug_level = int(os.environ['CSET_DEBUG_LEVEL'])
except KeyError:
debug_level = 0
except ValueError:
log.error('Invalid CSET_DEBUG_LEVEL environment variable')
sys.exit(1)
while True:
if len(sys.argv) == 1:
log.error('no arguments, nothing to do!')
sys.exit(2)
cmd = sys.argv[1]
if cmd in ['-l', '--log']:
if len(sys.argv) < 3:
log.critical('not enough arguments')
sys.exit(1)
# FIXME: very fragile
logfile = sys.argv[2]
#trace = logging.FileHandler('/var/log/cset.log', 'w')
trace = logging.FileHandler(logfile, 'a')
trace.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s %(name)-6s %(levelname)-8s %(message)s',
'%y%m%d-%H:%M:%S')
trace.setFormatter(formatter)
logging.getLogger('').addHandler(trace)
log.debug("---------- STARTING ----------")
from cpuset.version import version
log.debug('Cpuset (cset) %s' % version)
del(sys.argv[2])
del(sys.argv[1])
continue
if cmd in ['-h', '--help']:
if len(sys.argv) >= 3:
cmd = commands.canonical_cmd(sys.argv[2])
sys.argv[2] = '--help'
else:
print_help()
sys.exit(0)
if cmd == 'help':
if len(sys.argv) == 3 and not sys.argv[2] in ['-h', '--help']:
cmd = commands.canonical_cmd(sys.argv[2])
if not cmd in commands:
log.error('help: "%s" command unknown' % cmd)
sys.exit(1)
sys.argv[0] += ' %s' % cmd
command = commands[cmd]
parser = OptionParser(usage = command.usage,
option_list = command.options)
from pydoc import pager
pager(parser.format_help())
else:
print_help()
sys.exit(0)
if cmd in ['-v', '--version', 'version']:
from cpuset.version import version
log.info('Cpuset (cset) %s' % version)
sys.exit(0)
if cmd in ['-c', 'copyright', 'copying']:
log.info(__copyright__)
sys.exit(0)
if cmd in ['-m', '--machine']:
config.mread = True
del(sys.argv[1])
continue
if cmd in ['-x', '--tohex']:
if len(sys.argv) < 3:
log.critical('not enough arguments')
sys.exit(1)
cpuspec = sys.argv[2]
import cset
try:
print cset.cpuspec_to_hex(cpuspec)
except (ValueError, OSError, IOError, CpusetException, CmdException), err:
log.critical('**> ' + str(err))
if debug_level:
raise
else:
sys.exit(2)
sys.exit(0)
break
# re-build the command line arguments
cmd = commands.canonical_cmd(cmd)
sys.argv[0] += ' %s' % cmd
del(sys.argv[1])
log.debug('cmdline: ' + ' '.join(sys.argv))
try:
# importing the cset class creates the model
log.debug("creating cpuset model")
import cpuset.cset
command = commands[cmd]
usage = command.usage.split('\n')[0].strip()
parser = OptionParser(usage = usage, option_list = command.options)
options, args = parser.parse_args()
command.func(parser, options, args)
except (ValueError, OSError, IOError, CpusetException, CmdException), err:
log.critical('**> ' + str(err))
if str(err).find('Permission denied') != -1:
log.critical('insufficient permissions, you probably need to be root')
if str(err).find('invalid literal') != -1:
log.critical('option not understood')
if debug_level:
raise
else:
sys.exit(2)
except KeyboardInterrupt:
sys.exit(1)
sys.exit(0)
|
ojengwa/oh-mainline
|
refs/heads/master
|
vendor/packages/Pygments/pygments/formatters/img.py
|
268
|
# -*- coding: utf-8 -*-
"""
pygments.formatters.img
~~~~~~~~~~~~~~~~~~~~~~~
Formatter for Pixmap output.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import sys
from pygments.formatter import Formatter
from pygments.util import get_bool_opt, get_int_opt, \
get_list_opt, get_choice_opt
# Import this carefully
try:
from PIL import Image, ImageDraw, ImageFont
pil_available = True
except ImportError:
pil_available = False
try:
import _winreg
except ImportError:
_winreg = None
__all__ = ['ImageFormatter', 'GifImageFormatter', 'JpgImageFormatter',
'BmpImageFormatter']
# For some unknown reason every font calls it something different
STYLES = {
'NORMAL': ['', 'Roman', 'Book', 'Normal', 'Regular', 'Medium'],
'ITALIC': ['Oblique', 'Italic'],
'BOLD': ['Bold'],
'BOLDITALIC': ['Bold Oblique', 'Bold Italic'],
}
# A sane default for modern systems
DEFAULT_FONT_NAME_NIX = 'Bitstream Vera Sans Mono'
DEFAULT_FONT_NAME_WIN = 'Courier New'
class PilNotAvailable(ImportError):
"""When Python imaging library is not available"""
class FontNotFound(Exception):
"""When there are no usable fonts specified"""
class FontManager(object):
"""
Manages a set of fonts: normal, italic, bold, etc...
"""
def __init__(self, font_name, font_size=14):
self.font_name = font_name
self.font_size = font_size
self.fonts = {}
self.encoding = None
if sys.platform.startswith('win'):
if not font_name:
self.font_name = DEFAULT_FONT_NAME_WIN
self._create_win()
else:
if not font_name:
self.font_name = DEFAULT_FONT_NAME_NIX
self._create_nix()
def _get_nix_font_path(self, name, style):
from commands import getstatusoutput
exit, out = getstatusoutput('fc-list "%s:style=%s" file' %
(name, style))
if not exit:
lines = out.splitlines()
if lines:
path = lines[0].strip().strip(':')
return path
def _create_nix(self):
for name in STYLES['NORMAL']:
path = self._get_nix_font_path(self.font_name, name)
if path is not None:
self.fonts['NORMAL'] = ImageFont.truetype(path, self.font_size)
break
else:
raise FontNotFound('No usable fonts named: "%s"' %
self.font_name)
for style in ('ITALIC', 'BOLD', 'BOLDITALIC'):
for stylename in STYLES[style]:
path = self._get_nix_font_path(self.font_name, stylename)
if path is not None:
self.fonts[style] = ImageFont.truetype(path, self.font_size)
break
else:
if style == 'BOLDITALIC':
self.fonts[style] = self.fonts['BOLD']
else:
self.fonts[style] = self.fonts['NORMAL']
def _lookup_win(self, key, basename, styles, fail=False):
for suffix in ('', ' (TrueType)'):
for style in styles:
try:
valname = '%s%s%s' % (basename, style and ' '+style, suffix)
val, _ = _winreg.QueryValueEx(key, valname)
return val
except EnvironmentError:
continue
else:
if fail:
raise FontNotFound('Font %s (%s) not found in registry' %
(basename, styles[0]))
return None
def _create_win(self):
try:
key = _winreg.OpenKey(
_winreg.HKEY_LOCAL_MACHINE,
r'Software\Microsoft\Windows NT\CurrentVersion\Fonts')
except EnvironmentError:
try:
key = _winreg.OpenKey(
_winreg.HKEY_LOCAL_MACHINE,
r'Software\Microsoft\Windows\CurrentVersion\Fonts')
except EnvironmentError:
raise FontNotFound('Can\'t open Windows font registry key')
try:
path = self._lookup_win(key, self.font_name, STYLES['NORMAL'], True)
self.fonts['NORMAL'] = ImageFont.truetype(path, self.font_size)
for style in ('ITALIC', 'BOLD', 'BOLDITALIC'):
path = self._lookup_win(key, self.font_name, STYLES[style])
if path:
self.fonts[style] = ImageFont.truetype(path, self.font_size)
else:
if style == 'BOLDITALIC':
self.fonts[style] = self.fonts['BOLD']
else:
self.fonts[style] = self.fonts['NORMAL']
finally:
_winreg.CloseKey(key)
def get_char_size(self):
"""
Get the character size.
"""
return self.fonts['NORMAL'].getsize('M')
def get_font(self, bold, oblique):
"""
Get the font based on bold and italic flags.
"""
if bold and oblique:
return self.fonts['BOLDITALIC']
elif bold:
return self.fonts['BOLD']
elif oblique:
return self.fonts['ITALIC']
else:
return self.fonts['NORMAL']
class ImageFormatter(Formatter):
"""
Create a PNG image from source code. This uses the Python Imaging Library to
generate a pixmap from the source code.
*New in Pygments 0.10.*
Additional options accepted:
`image_format`
An image format to output to that is recognised by PIL, these include:
* "PNG" (default)
* "JPEG"
* "BMP"
* "GIF"
`line_pad`
The extra spacing (in pixels) between each line of text.
Default: 2
`font_name`
The font name to be used as the base font from which others, such as
bold and italic fonts will be generated. This really should be a
monospace font to look sane.
Default: "Bitstream Vera Sans Mono"
`font_size`
The font size in points to be used.
Default: 14
`image_pad`
The padding, in pixels to be used at each edge of the resulting image.
Default: 10
`line_numbers`
Whether line numbers should be shown: True/False
Default: True
`line_number_start`
The line number of the first line.
Default: 1
`line_number_step`
The step used when printing line numbers.
Default: 1
`line_number_bg`
The background colour (in "#123456" format) of the line number bar, or
None to use the style background color.
Default: "#eed"
`line_number_fg`
The text color of the line numbers (in "#123456"-like format).
Default: "#886"
`line_number_chars`
The number of columns of line numbers allowable in the line number
margin.
Default: 2
`line_number_bold`
Whether line numbers will be bold: True/False
Default: False
`line_number_italic`
Whether line numbers will be italicized: True/False
Default: False
`line_number_separator`
Whether a line will be drawn between the line number area and the
source code area: True/False
Default: True
`line_number_pad`
The horizontal padding (in pixels) between the line number margin, and
the source code area.
Default: 6
`hl_lines`
Specify a list of lines to be highlighted. *New in Pygments 1.2.*
Default: empty list
`hl_color`
Specify the color for highlighting lines. *New in Pygments 1.2.*
Default: highlight color of the selected style
"""
# Required by the pygments mapper
name = 'img'
aliases = ['img', 'IMG', 'png']
filenames = ['*.png']
unicodeoutput = False
default_image_format = 'png'
def __init__(self, **options):
"""
See the class docstring for explanation of options.
"""
if not pil_available:
raise PilNotAvailable(
'Python Imaging Library is required for this formatter')
Formatter.__init__(self, **options)
# Read the style
self.styles = dict(self.style)
if self.style.background_color is None:
self.background_color = '#fff'
else:
self.background_color = self.style.background_color
# Image options
self.image_format = get_choice_opt(
options, 'image_format', ['png', 'jpeg', 'gif', 'bmp'],
self.default_image_format, normcase=True)
self.image_pad = get_int_opt(options, 'image_pad', 10)
self.line_pad = get_int_opt(options, 'line_pad', 2)
# The fonts
fontsize = get_int_opt(options, 'font_size', 14)
self.fonts = FontManager(options.get('font_name', ''), fontsize)
self.fontw, self.fonth = self.fonts.get_char_size()
# Line number options
self.line_number_fg = options.get('line_number_fg', '#886')
self.line_number_bg = options.get('line_number_bg', '#eed')
self.line_number_chars = get_int_opt(options,
'line_number_chars', 2)
self.line_number_bold = get_bool_opt(options,
'line_number_bold', False)
self.line_number_italic = get_bool_opt(options,
'line_number_italic', False)
self.line_number_pad = get_int_opt(options, 'line_number_pad', 6)
self.line_numbers = get_bool_opt(options, 'line_numbers', True)
self.line_number_separator = get_bool_opt(options,
'line_number_separator', True)
self.line_number_step = get_int_opt(options, 'line_number_step', 1)
self.line_number_start = get_int_opt(options, 'line_number_start', 1)
if self.line_numbers:
self.line_number_width = (self.fontw * self.line_number_chars +
self.line_number_pad * 2)
else:
self.line_number_width = 0
self.hl_lines = []
hl_lines_str = get_list_opt(options, 'hl_lines', [])
for line in hl_lines_str:
try:
self.hl_lines.append(int(line))
except ValueError:
pass
self.hl_color = options.get('hl_color',
self.style.highlight_color) or '#f90'
self.drawables = []
def get_style_defs(self, arg=''):
raise NotImplementedError('The -S option is meaningless for the image '
'formatter. Use -O style=<stylename> instead.')
def _get_line_height(self):
"""
Get the height of a line.
"""
return self.fonth + self.line_pad
def _get_line_y(self, lineno):
"""
Get the Y coordinate of a line number.
"""
return lineno * self._get_line_height() + self.image_pad
def _get_char_width(self):
"""
Get the width of a character.
"""
return self.fontw
def _get_char_x(self, charno):
"""
Get the X coordinate of a character position.
"""
return charno * self.fontw + self.image_pad + self.line_number_width
def _get_text_pos(self, charno, lineno):
"""
Get the actual position for a character and line position.
"""
return self._get_char_x(charno), self._get_line_y(lineno)
def _get_linenumber_pos(self, lineno):
"""
Get the actual position for the start of a line number.
"""
return (self.image_pad, self._get_line_y(lineno))
def _get_text_color(self, style):
"""
Get the correct color for the token from the style.
"""
if style['color'] is not None:
fill = '#' + style['color']
else:
fill = '#000'
return fill
def _get_style_font(self, style):
"""
Get the correct font for the style.
"""
return self.fonts.get_font(style['bold'], style['italic'])
def _get_image_size(self, maxcharno, maxlineno):
"""
Get the required image size.
"""
return (self._get_char_x(maxcharno) + self.image_pad,
self._get_line_y(maxlineno + 0) + self.image_pad)
def _draw_linenumber(self, posno, lineno):
"""
Remember a line number drawable to paint later.
"""
self._draw_text(
self._get_linenumber_pos(posno),
str(lineno).rjust(self.line_number_chars),
font=self.fonts.get_font(self.line_number_bold,
self.line_number_italic),
fill=self.line_number_fg,
)
def _draw_text(self, pos, text, font, **kw):
"""
Remember a single drawable tuple to paint later.
"""
self.drawables.append((pos, text, font, kw))
def _create_drawables(self, tokensource):
"""
Create drawables for the token content.
"""
lineno = charno = maxcharno = 0
for ttype, value in tokensource:
while ttype not in self.styles:
ttype = ttype.parent
style = self.styles[ttype]
# TODO: make sure tab expansion happens earlier in the chain. It
# really ought to be done on the input, as to do it right here is
# quite complex.
value = value.expandtabs(4)
lines = value.splitlines(True)
#print lines
for i, line in enumerate(lines):
temp = line.rstrip('\n')
if temp:
self._draw_text(
self._get_text_pos(charno, lineno),
temp,
font = self._get_style_font(style),
fill = self._get_text_color(style)
)
charno += len(temp)
maxcharno = max(maxcharno, charno)
if line.endswith('\n'):
# add a line for each extra line in the value
charno = 0
lineno += 1
self.maxcharno = maxcharno
self.maxlineno = lineno
def _draw_line_numbers(self):
"""
Create drawables for the line numbers.
"""
if not self.line_numbers:
return
for p in xrange(self.maxlineno):
n = p + self.line_number_start
if (n % self.line_number_step) == 0:
self._draw_linenumber(p, n)
def _paint_line_number_bg(self, im):
"""
Paint the line number background on the image.
"""
if not self.line_numbers:
return
if self.line_number_fg is None:
return
draw = ImageDraw.Draw(im)
recth = im.size[-1]
rectw = self.image_pad + self.line_number_width - self.line_number_pad
draw.rectangle([(0, 0),
(rectw, recth)],
fill=self.line_number_bg)
draw.line([(rectw, 0), (rectw, recth)], fill=self.line_number_fg)
del draw
def format(self, tokensource, outfile):
"""
Format ``tokensource``, an iterable of ``(tokentype, tokenstring)``
tuples and write it into ``outfile``.
This implementation calculates where it should draw each token on the
pixmap, then calculates the required pixmap size and draws the items.
"""
self._create_drawables(tokensource)
self._draw_line_numbers()
im = Image.new(
'RGB',
self._get_image_size(self.maxcharno, self.maxlineno),
self.background_color
)
self._paint_line_number_bg(im)
draw = ImageDraw.Draw(im)
# Highlight
if self.hl_lines:
x = self.image_pad + self.line_number_width - self.line_number_pad + 1
recth = self._get_line_height()
rectw = im.size[0] - x
for linenumber in self.hl_lines:
y = self._get_line_y(linenumber - 1)
draw.rectangle([(x, y), (x + rectw, y + recth)],
fill=self.hl_color)
for pos, value, font, kw in self.drawables:
draw.text(pos, value, font=font, **kw)
im.save(outfile, self.image_format.upper())
# Add one formatter per format, so that the "-f gif" option gives the correct result
# when used in pygmentize.
class GifImageFormatter(ImageFormatter):
"""
Create a GIF image from source code. This uses the Python Imaging Library to
generate a pixmap from the source code.
*New in Pygments 1.0.* (You could create GIF images before by passing a
suitable `image_format` option to the `ImageFormatter`.)
"""
name = 'img_gif'
aliases = ['gif']
filenames = ['*.gif']
default_image_format = 'gif'
class JpgImageFormatter(ImageFormatter):
"""
Create a JPEG image from source code. This uses the Python Imaging Library to
generate a pixmap from the source code.
*New in Pygments 1.0.* (You could create JPEG images before by passing a
suitable `image_format` option to the `ImageFormatter`.)
"""
name = 'img_jpg'
aliases = ['jpg', 'jpeg']
filenames = ['*.jpg']
default_image_format = 'jpeg'
class BmpImageFormatter(ImageFormatter):
"""
Create a bitmap image from source code. This uses the Python Imaging Library to
generate a pixmap from the source code.
*New in Pygments 1.0.* (You could create bitmap images before by passing a
suitable `image_format` option to the `ImageFormatter`.)
"""
name = 'img_bmp'
aliases = ['bmp', 'bitmap']
filenames = ['*.bmp']
default_image_format = 'bmp'
|
havard024/prego
|
refs/heads/master
|
venv/lib/python2.7/site-packages/django/utils/translation/trans_null.py
|
113
|
# These are versions of the functions in django.utils.translation.trans_real
# that don't actually do anything. This is purely for performance, so that
# settings.USE_I18N = False can use this module rather than trans_real.py.
from django.conf import settings
from django.utils.encoding import force_text
from django.utils.safestring import mark_safe, SafeData
def ngettext(singular, plural, number):
if number == 1: return singular
return plural
ngettext_lazy = ngettext
def ungettext(singular, plural, number):
return force_text(ngettext(singular, plural, number))
def pgettext(context, message):
return ugettext(message)
def npgettext(context, singular, plural, number):
return ungettext(singular, plural, number)
activate = lambda x: None
deactivate = deactivate_all = lambda: None
get_language = lambda: settings.LANGUAGE_CODE
get_language_bidi = lambda: settings.LANGUAGE_CODE in settings.LANGUAGES_BIDI
check_for_language = lambda x: True
# date formats shouldn't be used using gettext anymore. This
# is kept for backward compatibility
TECHNICAL_ID_MAP = {
"DATE_WITH_TIME_FULL": settings.DATETIME_FORMAT,
"DATE_FORMAT": settings.DATE_FORMAT,
"DATETIME_FORMAT": settings.DATETIME_FORMAT,
"TIME_FORMAT": settings.TIME_FORMAT,
"YEAR_MONTH_FORMAT": settings.YEAR_MONTH_FORMAT,
"MONTH_DAY_FORMAT": settings.MONTH_DAY_FORMAT,
}
def gettext(message):
result = TECHNICAL_ID_MAP.get(message, message)
if isinstance(message, SafeData):
return mark_safe(result)
return result
def ugettext(message):
return force_text(gettext(message))
gettext_noop = gettext_lazy = _ = gettext
def to_locale(language):
p = language.find('-')
if p >= 0:
return language[:p].lower()+'_'+language[p+1:].upper()
else:
return language.lower()
def get_language_from_request(request, check_path=False):
return settings.LANGUAGE_CODE
def get_language_from_path(request):
return None
|
kawasaki2013/python-for-android-x86
|
refs/heads/master
|
python3-alpha/python3-src/Doc/includes/sqlite3/connect_db_2.py
|
139
|
import sqlite3
con = sqlite3.connect(":memory:")
|
luxnovalabs/enjigo_door
|
refs/heads/master
|
web_interface/dbindexer/compiler.py
|
21
|
from .resolver import resolver
from django.utils.importlib import import_module
def __repr__(self):
return '<%s, %s, %s, %s>' % (self.alias, self.col, self.field.name,
self.field.model.__name__)
from django.db.models.sql.where import Constraint
Constraint.__repr__ = __repr__
# TODO: manipulate a copy of the query instead of the query itself. This has to
# be done because the query can be reused afterwards by the user so that a
# manipulated query can result in strange behavior for these cases!
# TODO: Add watching layer which gives suggestions for indexes via query inspection
# at runtime
class BaseCompiler(object):
def convert_filters(self):
resolver.convert_filters(self.query)
class SQLCompiler(BaseCompiler):
def execute_sql(self, *args, **kwargs):
self.convert_filters()
return super(SQLCompiler, self).execute_sql(*args, **kwargs)
def results_iter(self):
self.convert_filters()
return super(SQLCompiler, self).results_iter()
def has_results(self):
self.convert_filters()
return super(SQLCompiler, self).has_results()
class SQLInsertCompiler(BaseCompiler):
def execute_sql(self, return_id=False):
resolver.convert_insert_query(self.query)
return super(SQLInsertCompiler, self).execute_sql(return_id=return_id)
class SQLUpdateCompiler(BaseCompiler):
pass
class SQLDeleteCompiler(BaseCompiler):
pass
class SQLDateCompiler(BaseCompiler):
pass
class SQLDateTimeCompiler(BaseCompiler):
pass
class SQLAggregateCompiler(BaseCompiler):
pass
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.