filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
mne/report.py | """Generate self-contained HTML reports from MNE objects."""
# Authors: Alex Gramfort <[email protected]>
# Mainak Jas <[email protected]>
# Teon Brooks <[email protected]>
#
# License: BSD (3-clause)
import base64
from io import BytesIO
import os
import os.path as op
import fnmatch
import re
import codecs
from shutil import copyfile
import time
from glob import glob
import warnings
import webbrowser
import numpy as np
from . import read_evokeds, read_events, pick_types, read_cov
from .fixes import _get_img_fdata
from .io import read_raw_fif, read_info, _stamp_to_dt
from .io.pick import _DATA_CH_TYPES_SPLIT
from .utils import (logger, verbose, get_subjects_dir, warn, _import_mlab,
fill_doc, _check_option)
from .viz import plot_events, plot_alignment, plot_cov
from .viz._3d import _plot_mri_contours
from .forward import read_forward_solution
from .epochs import read_epochs
from .minimum_norm import read_inverse_operator
from .parallel import parallel_func, check_n_jobs
from .externals.tempita import HTMLTemplate, Template
from .externals.h5io import read_hdf5, write_hdf5
VALID_EXTENSIONS = ['raw.fif', 'raw.fif.gz', 'sss.fif', 'sss.fif.gz',
'-eve.fif', '-eve.fif.gz', '-cov.fif', '-cov.fif.gz',
'-trans.fif', '-trans.fif.gz', '-fwd.fif', '-fwd.fif.gz',
'-epo.fif', '-epo.fif.gz', '-inv.fif', '-inv.fif.gz',
'-ave.fif', '-ave.fif.gz', 'T1.mgz']
SECTION_ORDER = ['raw', 'events', 'epochs', 'evoked', 'covariance', 'trans',
'mri', 'forward', 'inverse']
###############################################################################
# PLOTTING FUNCTIONS
def _ndarray_to_fig(img):
"""Convert to MPL figure, adapted from matplotlib.image.imsave."""
figsize = np.array(img.shape[:2][::-1]) / 100.
fig = _figure_agg(dpi=100, figsize=figsize, frameon=False)
fig.figimage(img)
return fig
def _fig_to_img(fig, image_format='png', scale=None, **kwargs):
"""Plot figure and create a binary image."""
# fig can be ndarray, mpl Figure, Mayavi Figure, or callable that produces
# a mpl Figure
import matplotlib.pyplot as plt
from matplotlib.figure import Figure
if isinstance(fig, np.ndarray):
fig = _ndarray_to_fig(fig)
elif callable(fig):
plt.close('all')
fig = fig(**kwargs)
elif not isinstance(fig, Figure):
mlab = None
try:
mlab = _import_mlab()
# on some systems importing Mayavi raises SystemExit (!)
except Exception:
is_mayavi = False
else:
import mayavi
is_mayavi = isinstance(fig, mayavi.core.scene.Scene)
if not is_mayavi:
raise TypeError('Each fig must be a matplotlib Figure, mayavi '
'Scene, or NumPy ndarray, got %s (type %s)'
% (fig, type(fig)))
if fig.scene is not None:
img = mlab.screenshot(figure=fig)
else: # Testing mode
img = np.zeros((2, 2, 3))
mlab.close(fig)
fig = _ndarray_to_fig(img)
output = BytesIO()
if scale is not None:
_scale_mpl_figure(fig, scale)
logger.debug('Saving figure %s with dpi %s'
% (fig.get_size_inches(), fig.get_dpi()))
with warnings.catch_warnings(record=True):
warnings.simplefilter('ignore') # incompatible axes
fig.savefig(output, format=image_format, dpi=fig.get_dpi(),
bbox_to_inches='tight')
plt.close(fig)
output = output.getvalue()
return (output.decode('utf-8') if image_format == 'svg' else
base64.b64encode(output).decode('ascii'))
def _scale_mpl_figure(fig, scale):
"""Magic scaling helper.
Keeps font-size and artist sizes constant
0.5 : current font - 4pt
2.0 : current font + 4pt
XXX it's unclear why this works, but good to go for most cases
"""
scale = float(scale)
fig.set_size_inches(fig.get_size_inches() * scale)
fig.set_dpi(fig.get_dpi() * scale)
import matplotlib as mpl
if scale >= 1:
sfactor = scale ** 2
else:
sfactor = -((1. / scale) ** 2)
for text in fig.findobj(mpl.text.Text):
fs = text.get_fontsize()
new_size = fs + sfactor
if new_size <= 0:
raise ValueError('could not rescale matplotlib fonts, consider '
'increasing "scale"')
text.set_fontsize(new_size)
fig.canvas.draw()
def _figs_to_mrislices(sl, n_jobs, **kwargs):
import matplotlib.pyplot as plt
plt.close('all')
use_jobs = min(n_jobs, max(1, len(sl)))
parallel, p_fun, _ = parallel_func(_plot_mri_contours, use_jobs)
outs = parallel(p_fun(slices=s, **kwargs)
for s in np.array_split(sl, use_jobs))
for o in outs[1:]:
outs[0] += o
return outs[0]
def _iterate_trans_views(function, **kwargs):
"""Auxiliary function to iterate over views in trans fig."""
import matplotlib.pyplot as plt
from mayavi import mlab, core
from pyface.api import GUI
fig = function(**kwargs)
gui = GUI()
gui.process_events()
assert isinstance(fig, core.scene.Scene)
views = [(90, 90), (0, 90), (0, -90)]
fig2, axes = plt.subplots(1, len(views))
for view, ax in zip(views, axes):
mlab.view(view[0], view[1])
gui.process_events()
if fig.scene is not None:
im = mlab.screenshot(figure=fig)
else: # Testing mode
im = np.zeros((2, 2, 3))
ax.imshow(im)
ax.axis('off')
mlab.close(fig)
img = _fig_to_img(fig2, image_format='png')
return img
###############################################################################
# TOC FUNCTIONS
def _is_bad_fname(fname):
"""Identify bad file naming patterns and highlight them in the TOC."""
if fname.endswith('(whitened)'):
fname = fname[:-11]
if not fname.endswith(tuple(VALID_EXTENSIONS + ['bem', 'custom'])):
return 'red'
else:
return ''
def _get_fname(fname):
"""Get fname without -#-."""
if '-#-' in fname:
fname = fname.split('-#-')[0]
else:
fname = op.basename(fname)
fname = ' ... %s' % fname
return fname
def _get_toc_property(fname):
"""Assign class names to TOC elements to allow toggling with buttons."""
if fname.endswith(('-eve.fif', '-eve.fif.gz')):
div_klass = 'events'
tooltip = fname
text = op.basename(fname)
elif fname.endswith(('-ave.fif', '-ave.fif.gz')):
div_klass = 'evoked'
tooltip = fname
text = op.basename(fname)
elif fname.endswith(('-cov.fif', '-cov.fif.gz')):
div_klass = 'covariance'
tooltip = fname
text = op.basename(fname)
elif fname.endswith(('raw.fif', 'raw.fif.gz',
'sss.fif', 'sss.fif.gz')):
div_klass = 'raw'
tooltip = fname
text = op.basename(fname)
elif fname.endswith(('-trans.fif', '-trans.fif.gz')):
div_klass = 'trans'
tooltip = fname
text = op.basename(fname)
elif fname.endswith(('-fwd.fif', '-fwd.fif.gz')):
div_klass = 'forward'
tooltip = fname
text = op.basename(fname)
elif fname.endswith(('-inv.fif', '-inv.fif.gz')):
div_klass = 'inverse'
tooltip = fname
text = op.basename(fname)
elif fname.endswith(('-epo.fif', '-epo.fif.gz')):
div_klass = 'epochs'
tooltip = fname
text = op.basename(fname)
elif fname.endswith(('.nii', '.nii.gz', '.mgh', '.mgz')):
div_klass = 'mri'
tooltip = 'MRI'
text = 'MRI'
elif fname.endswith(('bem')):
div_klass = 'mri'
tooltip = 'MRI'
text = 'MRI'
elif fname.endswith('(whitened)'):
div_klass = 'evoked'
tooltip = fname
text = op.basename(fname[:-11]) + '(whitened)'
else:
div_klass = fname.split('-#-')[1]
tooltip = fname.split('-#-')[0]
text = fname.split('-#-')[0]
return div_klass, tooltip, text
def _iterate_files(report, fnames, info, cov, baseline, sfreq, on_error,
image_format):
"""Parallel process in batch mode."""
htmls, report_fnames, report_sectionlabels = [], [], []
def _update_html(html, report_fname, report_sectionlabel):
"""Update the lists above."""
htmls.append(html)
report_fnames.append(report_fname)
report_sectionlabels.append(report_sectionlabel)
for fname in fnames:
logger.info("Rendering : %s"
% op.join('...' + report.data_path[-20:],
fname))
try:
if fname.endswith(('raw.fif', 'raw.fif.gz',
'sss.fif', 'sss.fif.gz')):
html = report._render_raw(fname)
report_fname = fname
report_sectionlabel = 'raw'
elif fname.endswith(('-fwd.fif', '-fwd.fif.gz')):
html = report._render_forward(fname)
report_fname = fname
report_sectionlabel = 'forward'
elif fname.endswith(('-inv.fif', '-inv.fif.gz')):
html = report._render_inverse(fname)
report_fname = fname
report_sectionlabel = 'inverse'
elif fname.endswith(('-ave.fif', '-ave.fif.gz')):
if cov is not None:
html = report._render_whitened_evoked(fname, cov, baseline,
image_format)
report_fname = fname + ' (whitened)'
report_sectionlabel = 'evoked'
_update_html(html, report_fname, report_sectionlabel)
html = report._render_evoked(fname, baseline, image_format)
report_fname = fname
report_sectionlabel = 'evoked'
elif fname.endswith(('-eve.fif', '-eve.fif.gz')):
html = report._render_eve(fname, sfreq, image_format)
report_fname = fname
report_sectionlabel = 'events'
elif fname.endswith(('-epo.fif', '-epo.fif.gz')):
html = report._render_epochs(fname, image_format)
report_fname = fname
report_sectionlabel = 'epochs'
elif (fname.endswith(('-cov.fif', '-cov.fif.gz')) and
report.info_fname is not None):
html = report._render_cov(fname, info, image_format)
report_fname = fname
report_sectionlabel = 'covariance'
elif (fname.endswith(('-trans.fif', '-trans.fif.gz')) and
report.info_fname is not None and report.subjects_dir
is not None and report.subject is not None):
html = report._render_trans(fname, report.data_path, info,
report.subject,
report.subjects_dir)
report_fname = fname
report_sectionlabel = 'trans'
else:
html = None
report_fname = None
report_sectionlabel = None
except Exception as e:
if on_error == 'warn':
warn('Failed to process file %s:\n"%s"' % (fname, e))
elif on_error == 'raise':
raise
html = None
report_fname = None
report_sectionlabel = None
_update_html(html, report_fname, report_sectionlabel)
return htmls, report_fnames, report_sectionlabels
def open_report(fname, **params):
"""Read a saved report or, if it doesn't exist yet, create a new one.
The returned report can be used as a context manager, in which case any
changes to the report are saved when exiting the context block.
Parameters
----------
fname : str
The file containing the report, stored in the HDF5 format. If the file
does not exist yet, a new report is created that will be saved to the
specified file.
**params : kwargs
When creating a new report, any named parameters other than ``fname``
are passed to the ``__init__`` function of the `Report` object. When
reading an existing report, the parameters are checked with the
loaded report and an exception is raised when they don't match.
Returns
-------
report : instance of Report
The report.
"""
if op.exists(fname):
# Check **params with the loaded report
state = read_hdf5(fname, title='mnepython')
for param in params.keys():
if param not in state:
raise ValueError('The loaded report has no attribute %s' %
param)
if params[param] != state[param]:
raise ValueError("Attribute '%s' of loaded report does not "
"match the given parameter." % param)
report = Report()
report.__setstate__(state)
else:
report = Report(**params)
# Keep track of the filename in case the Report object is used as a context
# manager.
report._fname = fname
return report
###############################################################################
# IMAGE FUNCTIONS
def _figure_agg(**kwargs):
from matplotlib.figure import Figure
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
fig = Figure(**kwargs)
FigureCanvas(fig)
return fig
def _build_image_png(data, cmap='gray'):
"""Build an image encoded in base64."""
import matplotlib.pyplot as plt
figsize = data.shape[::-1]
if figsize[0] == 1:
figsize = tuple(figsize[1:])
data = data[:, :, 0]
fig = _figure_agg(figsize=figsize, dpi=1.0, frameon=False)
cmap = getattr(plt.cm, cmap, plt.cm.gray)
fig.figimage(data, cmap=cmap)
output = BytesIO()
fig.savefig(output, dpi=fig.get_dpi(), format='png')
return base64.b64encode(output.getvalue()).decode('ascii')
def _iterate_sagittal_slices(array, limits=None):
"""Iterate sagittal slices."""
shape = array.shape[0]
for ind in range(shape):
if limits and ind not in limits:
continue
yield ind, array[ind, :, :]
def _iterate_axial_slices(array, limits=None):
"""Iterate axial slices."""
shape = array.shape[1]
for ind in range(shape):
if limits and ind not in limits:
continue
yield ind, array[:, ind, :]
def _iterate_coronal_slices(array, limits=None):
"""Iterate coronal slices."""
shape = array.shape[2]
for ind in range(shape):
if limits and ind not in limits:
continue
yield ind, np.flipud(np.rot90(array[:, :, ind]))
def _iterate_mri_slices(name, ind, global_id, slides_klass, data, cmap):
"""Auxiliary function for parallel processing of mri slices."""
img_klass = 'slideimg-%s' % name
caption = u'Slice %s %s' % (name, ind)
slice_id = '%s-%s-%s' % (name, global_id, ind)
div_klass = 'span12 %s' % slides_klass
img = _build_image_png(data, cmap=cmap)
first = True if ind == 0 else False
html = _build_html_image(img, slice_id, div_klass, img_klass, caption,
first, image_format='png')
return ind, html
###############################################################################
# HTML functions
def _build_html_image(img, id, div_klass, img_klass, caption=None,
show=True, image_format='png'):
"""Build a html image from a slice array."""
html = []
add_style = u'' if show else u'style="display: none"'
html.append(u'<li class="%s" id="%s" %s>' % (div_klass, id, add_style))
html.append(u'<div class="thumbnail">')
if image_format == 'png':
html.append(u'<img class="%s" alt="" style="width:90%%;" '
'src="data:image/png;base64,%s">'
% (img_klass, img))
else:
html.append(u'<div style="text-align:center;" class="%s">%s</div>'
% (img_klass, img))
html.append(u'</div>')
if caption:
html.append(u'<h4>%s</h4>' % caption)
html.append(u'</li>')
return u'\n'.join(html)
slider_template = HTMLTemplate(u"""
<script>$("#{{slider_id}}").slider({
range: "min",
/*orientation: "vertical",*/
min: {{minvalue}},
max: {{maxvalue}},
step: {{step}},
value: {{startvalue}},
create: function(event, ui) {
$(".{{klass}}").hide();
$("#{{klass}}-{{startvalue}}").show();},
stop: function(event, ui) {
var list_value = $("#{{slider_id}}").slider("value");
$(".{{klass}}").hide();
$("#{{klass}}-"+list_value).show();}
})</script>
""")
slider_full_template = Template(u"""
<li class="{{div_klass}}" id="{{id}}">
<h4>{{title}}</h4>
<div class="thumbnail">
<ul><li class="slider">
<div class="row">
<div class="col-md-6 col-md-offset-3">
<div id="{{slider_id}}"></div>
<ul class="thumbnail">
{{image_html}}
</ul>
{{html}}
</div>
</div>
</li></ul>
</div>
</li>
""")
def _build_html_slider(slices_range, slides_klass, slider_id,
start_value=None):
"""Build an html slider for a given slices range and a slices klass."""
if start_value is None:
start_value = slices_range[len(slices_range) // 2]
with warnings.catch_warnings(record=True):
warnings.simplefilter('ignore')
out = slider_template.substitute(
slider_id=slider_id, klass=slides_klass,
step=slices_range[1] - slices_range[0],
minvalue=slices_range[0], maxvalue=slices_range[-1],
startvalue=start_value)
return out
###############################################################################
# HTML scan renderer
header_template = Template(u"""
<!DOCTYPE html>
<html lang="{{lang}}">
<head>
{{include}}
<script type="text/javascript">
var toggle_state = false;
$(document).on('keydown', function (event) {
if (event.which == 84){
if (!toggle_state)
$('.has_toggle').trigger('click');
else if (toggle_state)
$('.has_toggle').trigger('click');
toggle_state = !toggle_state;
}
});
function togglebutton(class_name){
$(class_name).toggle();
if ($(class_name + '-btn').hasClass('active'))
$(class_name + '-btn').removeClass('active');
else
$(class_name + '-btn').addClass('active');
}
/* Scroll down on click to #id so that caption is not hidden
by navbar */
var shiftWindow = function() { scrollBy(0, -60) };
if (location.hash) shiftWindow();
window.addEventListener("hashchange", shiftWindow);
</script>
<style type="text/css">
body {
line-height: 1.5em;
font-family: arial, sans-serif;
}
h1 {
font-size: 30px;
text-align: center;
}
h4 {
text-align: center;
}
@link-color: @brand-primary;
@link-hover-color: darken(@link-color, 15%);
a{
color: @link-color;
&:hover {
color: @link-hover-color;
text-decoration: underline;
}
}
li{
list-style-type:none;
}
#wrapper {
text-align: left;
margin: 5em auto;
width: 700px;
}
#container{
position: relative;
}
#content{
margin-left: 22%;
margin-top: 60px;
width: 75%;
}
#toc {
margin-top: navbar-height;
position: fixed;
width: 20%;
height: 90%;
overflow: auto;
}
#toc li {
overflow: hidden;
padding-bottom: 2px;
margin-left: 20px;
}
#toc span {
float: left;
padding: 0 2px 3px 0;
}
div.footer {
background-color: #C0C0C0;
color: #000000;
padding: 3px 8px 3px 0;
clear: both;
font-size: 0.8em;
text-align: right;
}
</style>
</head>
<body>
<nav class="navbar navbar-inverse navbar-fixed-top" role="navigation">
<div class="container-fluid">
<div class="navbar-header navbar-left">
<ul class="nav nav-pills"><li class="active">
<a class="navbar-btn" data-toggle="collapse"
data-target="#viewnavbar" href="javascript:void(0)">
></a></li></ul>
</div>
<h3 class="navbar-text" style="color:white">{{title}}</h3>
<ul class="nav nav-pills navbar-right" style="margin-top: 7px;"
id="viewnavbar">
{{for section in sections}}
<li class="active {{sectionvars[section]}}-btn">
<a href="javascript:void(0)"
onclick="togglebutton('.{{sectionvars[section]}}')"
class="has_toggle">
{{section if section != 'mri' else 'MRI'}}
</a>
</li>
{{endfor}}
</ul>
</div>
</nav>
""")
footer_template = HTMLTemplate(u"""
</div></body>
<div class="footer">
© Copyright 2012-{{current_year}}, MNE Developers.
Created on {{date}}.
Powered by <a href="http://mne.tools/">MNE.
</div>
</html>
""")
html_template = Template(u"""
<li class="{{div_klass}}" id="{{id}}">
<h4>{{caption}}</h4>
<div class="thumbnail">{{html}}</div>
</li>
""")
image_template = Template(u"""
{{default interactive = False}}
{{default width = 50}}
{{default id = False}}
{{default image_format = 'png'}}
{{default scale = None}}
{{default comment = None}}
<li class="{{div_klass}}" {{if id}}id="{{id}}"{{endif}}
{{if not show}}style="display: none"{{endif}}>
{{if caption}}
<h4>{{caption}}</h4>
{{endif}}
<div class="thumbnail">
{{if not interactive}}
{{if image_format == 'png'}}
{{if scale is not None}}
<img alt="" style="width:{{width}}%;"
src="data:image/png;base64,{{img}}">
{{else}}
<img alt=""
src="data:image/png;base64,{{img}}">
{{endif}}
{{elif image_format == 'gif'}}
{{if scale is not None}}
<img alt="" style="width:{{width}}%;"
src="data:image/gif;base64,{{img}}">
{{else}}
<img alt=""
src="data:image/gif;base64,{{img}}">
{{endif}}
{{elif image_format == 'svg'}}
<div style="text-align:center;">
{{img}}
</div>
{{endif}}
{{if comment is not None}}
<br><br>
<div style="text-align:center;">
<style>
p.test {word-wrap: break-word;}
</style>
<p class="test">
{{comment}}
</p>
</div>
{{endif}}
{{else}}
<center>{{interactive}}</center>
{{endif}}
</div>
</li>
""")
repr_template = Template(u"""
<li class="{{div_klass}}" id="{{id}}">
<h4>{{caption}}</h4><hr>
{{repr}}
<hr></li>
""")
raw_template = Template(u"""
<li class="{{div_klass}}" id="{{id}}">
<h4>{{caption}}</h4>
<table class="table table-hover">
<tr>
<th>Measurement date</th>
{{if meas_date is not None}}
<td>{{meas_date}}</td>
{{else}}<td>Unknown</td>{{endif}}
</tr>
<tr>
<th>Experimenter</th>
{{if info['experimenter'] is not None}}
<td>{{info['experimenter']}}</td>
{{else}}<td>Unknown</td>{{endif}}
</tr>
<tr>
<th>Digitized points</th>
{{if info['dig'] is not None}}
<td>{{len(info['dig'])}} points</td>
{{else}}
<td>Not available</td>
{{endif}}
</tr>
<tr>
<th>Good channels</th>
<td>{{n_mag}} magnetometer, {{n_grad}} gradiometer,
and {{n_eeg}} EEG channels</td>
</tr>
<tr>
<th>Bad channels</th>
{{if info['bads'] is not None}}
<td>{{', '.join(info['bads'])}}</td>
{{else}}<td>None</td>{{endif}}
</tr>
<tr>
<th>EOG channels</th>
<td>{{eog}}</td>
</tr>
<tr>
<th>ECG channels</th>
<td>{{ecg}}</td>
<tr>
<th>Measurement time range</th>
<td>{{u'%0.2f' % tmin}} to {{u'%0.2f' % tmax}} sec.</td>
</tr>
<tr>
<th>Sampling frequency</th>
<td>{{u'%0.2f' % info['sfreq']}} Hz</td>
</tr>
<tr>
<th>Highpass</th>
<td>{{u'%0.2f' % info['highpass']}} Hz</td>
</tr>
<tr>
<th>Lowpass</th>
<td>{{u'%0.2f' % info['lowpass']}} Hz</td>
</tr>
</table>
</li>
""")
toc_list = Template(u"""
<li class="{{div_klass}}">
{{if id}}
<a href="javascript:void(0)" onclick="window.location.hash={{id}};">
{{endif}}
<span title="{{tooltip}}" style="color:{{color}}"> {{text}}</span>
{{if id}}</a>{{endif}}
</li>
""")
def _check_scale(scale):
"""Ensure valid scale value is passed."""
if np.isscalar(scale) and scale <= 0:
raise ValueError('scale must be positive, not %s' % scale)
def _check_image_format(rep, image_format):
"""Ensure fmt is valid."""
if rep is None:
_check_option('image_format', image_format, ['png', 'svg'])
elif image_format is not None:
_check_option('image_format', image_format, ['png', 'svg', None])
else: # rep is not None and image_format is None
image_format = rep.image_format
return image_format
@fill_doc
class Report(object):
r"""Object for rendering HTML.
Parameters
----------
info_fname : str
Name of the file containing the info dictionary.
subjects_dir : str | None
Path to the SUBJECTS_DIR. If None, the path is obtained by using
the environment variable SUBJECTS_DIR.
subject : str | None
Subject name.
title : str
Title of the report.
cov_fname : str
Name of the file containing the noise covariance.
baseline : None or tuple of length 2 (default (None, 0))
The time interval to apply baseline correction for evokeds.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal to (None, None) all the time
interval is used.
The baseline (a, b) includes both endpoints, i.e. all
timepoints t such that a <= t <= b.
image_format : str
Default image format to use (default is 'png').
SVG uses vector graphics, so fidelity is higher but can increase
file size and browser image rendering time as well.
.. versionadded:: 0.15
raw_psd : bool | dict
If True, include PSD plots for raw files. Can be False (default) to
omit, True to plot, or a dict to pass as ``kwargs`` to
:meth:`mne.io.Raw.plot_psd`.
.. versionadded:: 0.17
%(verbose)s
Notes
-----
See :ref:`tut-report` for an introduction to using ``mne.Report``, and
:ref:`this example <ex-report>` for an example of customizing the report
with a slider.
.. versionadded:: 0.8.0
"""
def __init__(self, info_fname=None, subjects_dir=None,
subject=None, title=None, cov_fname=None, baseline=None,
image_format='png', raw_psd=False, verbose=None):
self.info_fname = info_fname
self.cov_fname = cov_fname
self.baseline = baseline
self.subjects_dir = get_subjects_dir(subjects_dir, raise_error=False)
self.subject = subject
self.title = title
self.image_format = _check_image_format(None, image_format)
self.verbose = verbose
self.initial_id = 0
self.html = []
self.fnames = [] # List of file names rendered
self.sections = [] # List of sections
self.lang = 'en-us' # language setting for the HTML file
self._sectionlabels = [] # Section labels
self._sectionvars = {} # Section variable names in js
# boolean to specify if sections should be ordered in natural
# order of processing (raw -> events ... -> inverse)
self._sort_sections = False
if not isinstance(raw_psd, bool) and not isinstance(raw_psd, dict):
raise TypeError('raw_psd must be bool or dict, got %s'
% (type(raw_psd),))
self.raw_psd = raw_psd
self._init_render() # Initialize the renderer
def __repr__(self):
"""Print useful info about report."""
s = '<Report | %d items' % len(self.fnames)
if self.title is not None:
s += ' | %s' % self.title
fnames = [_get_fname(f) for f in self.fnames]
if len(self.fnames) > 4:
s += '\n%s' % '\n'.join(fnames[:2])
s += '\n ...\n'
s += '\n'.join(fnames[-2:])
elif len(self.fnames) > 0:
s += '\n%s' % '\n'.join(fnames)
s += '\n>'
return s
def __len__(self):
"""Return the number of items in report."""
return len(self.fnames)
def _get_id(self):
"""Get id of plot."""
self.initial_id += 1
return self.initial_id
def _validate_input(self, items, captions, section, comments=None):
"""Validate input."""
if not isinstance(items, (list, tuple)):
items = [items]
if not isinstance(captions, (list, tuple)):
captions = [captions]
if not isinstance(comments, (list, tuple)):
if comments is None:
comments = [comments] * len(captions)
else:
comments = [comments]
if len(comments) != len(items):
raise ValueError('Comments and report items must have the same '
'length or comments should be None, got %d and %d'
% (len(comments), len(items)))
elif len(captions) != len(items):
raise ValueError('Captions and report items must have the same '
'length, got %d and %d'
% (len(captions), len(items)))
# Book-keeping of section names
if section not in self.sections:
self.sections.append(section)
self._sectionvars[section] = _clean_varnames(section)
return items, captions, comments
def remove(self, caption, section=None):
"""Remove a figure from the report.
The figure to remove is searched for by its caption. When searching by
caption, the section label can be specified as well to narrow down the
search. If multiple figures match the search criteria, the last one
will be removed.
Any empty sections will be removed as well.
Parameters
----------
caption : str
If set, search for the figure by caption.
section : str | None
If set, limit the search to the section with the given label.
Returns
-------
removed_index : int | None
The integer index of the figure that was removed, or ``None`` if no
figure matched the search criteria.
"""
# Construct the search pattern
pattern = r'^%s-#-.*-#-custom$' % caption
# Search for figures matching the search pattern, regardless of
# section
matches = [i for i, fname_ in enumerate(self.fnames)
if re.match(pattern, fname_)]
if section is not None:
# Narrow down the search to the given section
svar = self._sectionvars[section]
matches = [i for i in matches
if self._sectionlabels[i] == svar]
if len(matches) == 0:
return None
# Remove last occurrence
index = max(matches)
# Remove the figure
del self.fnames[index]
del self._sectionlabels[index]
del self.html[index]
# Remove any (now) empty sections.
# We use a list() to copy the _sectionvars dictionary, since we are
# removing elements during the loop.
for section_, sectionlabel_ in list(self._sectionvars.items()):
if sectionlabel_ not in self._sectionlabels:
self.sections.remove(section_)
del self._sectionvars[section_]
return index
def _add_or_replace(self, fname, sectionlabel, html, replace=False):
"""Append a figure to the report, or replace it if it already exists.
Parameters
----------
fname : str
A unique identifier for the figure. If a figure with this
identifier has already been added, it will be replaced.
sectionlabel : str
The section to place the figure in.
html : str
The HTML that contains the figure.
replace : bool
Existing figures are only replaced if this is set to ``True``.
Defaults to ``False``.
"""
assert isinstance(html, str) # otherwise later will break
if replace and fname in self.fnames:
# Find last occurrence of the figure
ind = max([i for i, existing in enumerate(self.fnames)
if existing == fname])
self.fnames[ind] = fname
self._sectionlabels[ind] = sectionlabel
self.html[ind] = html
else:
# Append new record
self.fnames.append(fname)
self._sectionlabels.append(sectionlabel)
self.html.append(html)
def add_figs_to_section(self, figs, captions, section='custom',
scale=None, image_format=None, comments=None,
replace=False):
"""Append custom user-defined figures.
Parameters
----------
figs : matplotlib.figure.Figure | mlab.Figure | array | list
A figure or a list of figures to add to the report. Each figure in
the list can be an instance of :class:`matplotlib.figure.Figure`,
:class:`mayavi.core.api.Scene`, or :class:`numpy.ndarray`.
captions : str | list of str
A caption or a list of captions to the figures.
section : str
Name of the section to place the figure in. If section already
exists, the figures will be appended to the end of the section.
scale : float | None | callable
Scale the images maintaining the aspect ratio.
If None, no scaling is applied. If float, scale will determine
the relative scaling (might not work for scale <= 1 depending on
font sizes). If function, should take a figure object as input
parameter. Defaults to None.
image_format : str | None
The image format to be used for the report, can be 'png' or 'svd'.
None (default) will use the default specified during Report
class construction.
comments : None | str | list of str
A string of text or a list of strings of text to be appended after
the figure.
replace : bool
If ``True``, figures already present that have the same caption
will be replaced. Defaults to ``False``.
"""
figs, captions, comments = self._validate_input(figs, captions,
section, comments)
image_format = _check_image_format(self, image_format)
_check_scale(scale)
for fig, caption, comment in zip(figs, captions, comments):
caption = 'custom plot' if caption == '' else caption
sectionvar = self._sectionvars[section]
global_id = self._get_id()
div_klass = self._sectionvars[section]
img_klass = self._sectionvars[section]
img = _fig_to_img(fig, image_format, scale)
html = image_template.substitute(img=img, id=global_id,
div_klass=div_klass,
img_klass=img_klass,
caption=caption,
show=True,
image_format=image_format,
comment=comment)
self._add_or_replace('%s-#-%s-#-custom' % (caption, sectionvar),
sectionvar, html, replace)
def add_images_to_section(self, fnames, captions, scale=None,
section='custom', comments=None, replace=False):
"""Append custom user-defined images.
Parameters
----------
fnames : str | list of str
A filename or a list of filenames from which images are read.
Images can be PNG, GIF or SVG.
captions : str | list of str
A caption or a list of captions to the images.
scale : float | None
Scale the images maintaining the aspect ratio.
Defaults to None. If None, no scaling will be applied.
section : str
Name of the section. If section already exists, the images
will be appended to the end of the section.
comments : None | str | list of str
A string of text or a list of strings of text to be appended after
the image.
replace : bool
If ``True``, figures already present that have the same caption
will be replaced. Defaults to ``False``.
"""
# Note: using scipy.misc is equivalent because scipy internally
# imports PIL anyway. It's not possible to redirect image output
# to binary string using scipy.misc.
fnames, captions, comments = self._validate_input(fnames, captions,
section, comments)
_check_scale(scale)
for fname, caption, comment in zip(fnames, captions, comments):
caption = 'custom plot' if caption == '' else caption
sectionvar = self._sectionvars[section]
global_id = self._get_id()
div_klass = self._sectionvars[section]
img_klass = self._sectionvars[section]
image_format = os.path.splitext(fname)[1][1:]
image_format = image_format.lower()
_check_option('image_format', image_format, ['png', 'gif', 'svg'])
# Convert image to binary string.
with open(fname, 'rb') as f:
img = base64.b64encode(f.read()).decode('ascii')
html = image_template.substitute(img=img, id=global_id,
image_format=image_format,
div_klass=div_klass,
img_klass=img_klass,
caption=caption,
width=scale,
comment=comment,
show=True)
self._add_or_replace('%s-#-%s-#-custom' % (caption, sectionvar),
sectionvar, html, replace)
def add_htmls_to_section(self, htmls, captions, section='custom',
replace=False):
"""Append htmls to the report.
Parameters
----------
htmls : str | list of str
An html str or a list of html str.
captions : str | list of str
A caption or a list of captions to the htmls.
section : str
Name of the section. If section already exists, the images
will be appended to the end of the section.
replace : bool
If ``True``, figures already present that have the same caption
will be replaced. Defaults to ``False``.
Notes
-----
.. versionadded:: 0.9.0
"""
htmls, captions, _ = self._validate_input(htmls, captions, section)
for html, caption in zip(htmls, captions):
caption = 'custom plot' if caption == '' else caption
sectionvar = self._sectionvars[section]
global_id = self._get_id()
div_klass = self._sectionvars[section]
self._add_or_replace(
'%s-#-%s-#-custom' % (caption, sectionvar), sectionvar,
html_template.substitute(div_klass=div_klass, id=global_id,
caption=caption, html=html), replace)
@fill_doc
def add_bem_to_section(self, subject, caption='BEM', section='bem',
decim=2, n_jobs=1, subjects_dir=None,
replace=False):
"""Render a bem slider html str.
Parameters
----------
subject : str
Subject name.
caption : str
A caption for the bem.
section : str
Name of the section. If section already exists, the bem
will be appended to the end of the section.
decim : int
Use this decimation factor for generating MRI/BEM images
(since it can be time consuming).
%(n_jobs)s
subjects_dir : str | None
Path to the SUBJECTS_DIR. If None, the path is obtained by using
the environment variable SUBJECTS_DIR.
replace : bool
If ``True``, figures already present that have the same caption
will be replaced. Defaults to ``False``.
Notes
-----
.. versionadded:: 0.9.0
"""
caption = 'custom plot' if caption == '' else caption
html = self._render_bem(subject=subject, subjects_dir=subjects_dir,
decim=decim, n_jobs=n_jobs, section=section,
caption=caption)
html, caption, _ = self._validate_input(html, caption, section)
sectionvar = self._sectionvars[section]
# convert list->str
assert isinstance(html, list)
html = u''.join(html)
self._add_or_replace('%s-#-%s-#-custom' % (caption[0], sectionvar),
sectionvar, html)
def add_slider_to_section(self, figs, captions=None, section='custom',
title='Slider', scale=None, image_format=None,
replace=False):
"""Render a slider of figs to the report.
Parameters
----------
figs : list of figures.
Each figure in the list can be an instance of
:class:`matplotlib.figure.Figure`,
:class:`mayavi.core.api.Scene`, or :class:`numpy.ndarray`.
Must have at least 2 elements.
captions : list of str | list of float | None
A list of captions to the figures. If float, a str will be
constructed as `%f s`. If None, it will default to
`Data slice %d`.
section : str
Name of the section. If section already exists, the figures
will be appended to the end of the section.
title : str
The title of the slider.
scale : float | None | callable
Scale the images maintaining the aspect ratio.
If None, no scaling is applied. If float, scale will determine
the relative scaling (might not work for scale <= 1 depending on
font sizes). If function, should take a figure object as input
parameter. Defaults to None.
image_format : str | None
The image format to be used for the report, can be 'png' or 'svd'.
None (default) will use the default specified during Report
class construction.
replace : bool
If ``True``, figures already present that have the same caption
will be replaced. Defaults to ``False``.
Notes
-----
.. versionadded:: 0.10.0
"""
_check_scale(scale)
image_format = _check_image_format(self, image_format)
if isinstance(figs[0], list):
raise NotImplementedError('`add_slider_to_section` '
'can only add one slider at a time.')
if len(figs) < 2:
raise ValueError('figs must be at least length 2, got %s'
% (len(figs),))
figs = [figs]
figs, _, _ = self._validate_input(figs, section, section)
figs = figs[0]
sectionvar = self._sectionvars[section]
global_id = self._get_id()
name = 'slider'
html = []
slides_klass = '%s-%s' % (name, global_id)
div_klass = 'span12 %s' % slides_klass
sl = np.arange(0, len(figs))
slices = []
img_klass = 'slideimg-%s' % name
if captions is None:
captions = ['Data slice %d' % ii for ii in sl]
elif isinstance(captions, (list, tuple, np.ndarray)):
if len(figs) != len(captions):
raise ValueError('Captions must be the same length as the '
'number of slides.')
if isinstance(captions[0], (float, int)):
captions = ['%0.3f s' % caption for caption in captions]
else:
raise TypeError('Captions must be None or an iterable of '
'float, int, str, Got %s' % type(captions))
for ii, (fig, caption) in enumerate(zip(figs, captions)):
img = _fig_to_img(fig, image_format, scale)
slice_id = '%s-%s-%s' % (name, global_id, sl[ii])
first = True if ii == 0 else False
slices.append(_build_html_image(img, slice_id, div_klass,
img_klass, caption, first,
image_format=image_format))
# Render the slider
slider_id = 'select-%s-%s' % (name, global_id)
# Render the slices
image_html = u'\n'.join(slices)
html.append(_build_html_slider(sl, slides_klass, slider_id,
start_value=0))
html = '\n'.join(html)
slider_klass = sectionvar
self._add_or_replace(
'%s-#-%s-#-custom' % (title, sectionvar), sectionvar,
slider_full_template.substitute(id=global_id, title=title,
div_klass=slider_klass,
slider_id=slider_id, html=html,
image_html=image_html))
###########################################################################
# HTML rendering
def _render_one_axis(self, slices_iter, name, global_id, cmap,
n_elements, n_jobs):
"""Render one axis of the array."""
global_id = global_id or name
html = []
html.append(u'<div class="col-xs-6 col-md-4">')
slides_klass = '%s-%s' % (name, global_id)
use_jobs = min(n_jobs, max(1, n_elements))
parallel, p_fun, _ = parallel_func(_iterate_mri_slices, use_jobs)
r = parallel(p_fun(name, ind, global_id, slides_klass, data, cmap)
for ind, data in slices_iter)
slices_range, slices = zip(*r)
# Render the slider
slider_id = 'select-%s-%s' % (name, global_id)
html.append(u'<div id="%s"></div>' % slider_id)
html.append(u'<ul class="thumbnail">')
# Render the slices
html.append(u'\n'.join(slices))
html.append(u'</ul>')
html.append(_build_html_slider(slices_range, slides_klass, slider_id))
html.append(u'</div>')
return '\n'.join(html)
###########################################################################
# global rendering functions
@verbose
def _init_render(self, verbose=None):
"""Initialize the renderer."""
inc_fnames = ['jquery.js', 'jquery-ui.min.js',
'bootstrap.min.js', 'jquery-ui.min.css',
'bootstrap.min.css']
include = list()
for inc_fname in inc_fnames:
logger.info('Embedding : %s' % inc_fname)
fname = op.join(op.dirname(__file__), 'html', inc_fname)
with open(fname, 'rb') as fid:
file_content = fid.read().decode('utf-8')
if inc_fname.endswith('.js'):
include.append(u'<script type="text/javascript">' +
file_content + u'</script>')
elif inc_fname.endswith('.css'):
include.append(u'<style type="text/css">' +
file_content + u'</style>')
self.include = ''.join(include)
@verbose
def parse_folder(self, data_path, pattern='*.fif', n_jobs=1, mri_decim=2,
sort_sections=True, on_error='warn', image_format=None,
render_bem=True, verbose=None):
r"""Render all the files in the folder.
Parameters
----------
data_path : str
Path to the folder containing data whose HTML report will be
created.
pattern : str | list of str
Filename pattern(s) to include in the report.
Example: [\*raw.fif, \*ave.fif] will include Raw as well as Evoked
files.
%(n_jobs)s
mri_decim : int
Use this decimation factor for generating MRI/BEM images
(since it can be time consuming).
sort_sections : bool
If True, sort sections in the order: raw -> events -> epochs
-> evoked -> covariance -> trans -> mri -> forward -> inverse.
on_error : str
What to do if a file cannot be rendered. Can be 'ignore',
'warn' (default), or 'raise'.
image_format : str | None
The image format to be used for the report, can be 'png' or 'svd'.
None (default) will use the default specified during Report
class construction.
.. versionadded:: 0.15
render_bem : bool
If True (default), try to render the BEM.
.. versionadded:: 0.16
%(verbose_meth)s
"""
image_format = _check_image_format(self, image_format)
_check_option('on_error', on_error, ['ignore', 'warn', 'raise'])
self._sort = sort_sections
n_jobs = check_n_jobs(n_jobs)
self.data_path = data_path
if self.title is None:
self.title = 'MNE Report for ...%s' % self.data_path[-20:]
if not isinstance(pattern, (list, tuple)):
pattern = [pattern]
# iterate through the possible patterns
fnames = list()
for p in pattern:
fnames.extend(sorted(_recursive_search(self.data_path, p)))
if self.info_fname is not None:
info = read_info(self.info_fname, verbose=False)
sfreq = info['sfreq']
else:
# only warn if relevant
if any(fname.endswith(('-cov.fif', '-cov.fif.gz'))
for fname in fnames):
warn('`info_fname` not provided. Cannot render '
'-cov.fif(.gz) files.')
if any(fname.endswith(('-trans.fif', '-trans.fif.gz'))
for fname in fnames):
warn('`info_fname` not provided. Cannot render '
'-trans.fif(.gz) files.')
info, sfreq = None, None
cov = None
if self.cov_fname is not None:
cov = read_cov(self.cov_fname)
baseline = self.baseline
# render plots in parallel; check that n_jobs <= # of files
logger.info('Iterating over %s potential files (this may take some '
'time)' % len(fnames))
use_jobs = min(n_jobs, max(1, len(fnames)))
parallel, p_fun, _ = parallel_func(_iterate_files, use_jobs)
r = parallel(p_fun(self, fname, info, cov, baseline, sfreq, on_error,
image_format)
for fname in np.array_split(fnames, use_jobs))
htmls, report_fnames, report_sectionlabels = zip(*r)
# combine results from n_jobs discarding plots not rendered
self.html = [html for html in sum(htmls, []) if html is not None]
self.fnames = [fname for fname in sum(report_fnames, []) if
fname is not None]
self._sectionlabels = [slabel for slabel in
sum(report_sectionlabels, [])
if slabel is not None]
# find unique section labels
self.sections = sorted(set(self._sectionlabels))
self._sectionvars = dict(zip(self.sections, self.sections))
# render mri
if render_bem:
if self.subjects_dir is not None and self.subject is not None:
logger.info('Rendering BEM')
self.html.append(self._render_bem(
self.subject, self.subjects_dir, mri_decim, n_jobs))
self.fnames.append('bem')
self._sectionlabels.append('mri')
else:
warn('`subjects_dir` and `subject` not provided. Cannot '
'render MRI and -trans.fif(.gz) files.')
def _get_state_params(self):
"""Obtain all fields that are in the state dictionary of this object.
Returns
-------
non_opt_params : list of str
All parameters that must be present in the state dictionary.
opt_params : list of str
All parameters that are optionally present in the state dictionary.
"""
# Note: self._fname is not part of the state
return (['baseline', 'cov_fname', 'fnames', 'html', 'include',
'image_format', 'info_fname', 'initial_id', 'raw_psd',
'_sectionlabels', 'sections', '_sectionvars',
'_sort_sections', 'subjects_dir', 'subject', 'title',
'verbose'],
['data_path', 'lang', '_sort'])
def __getstate__(self):
"""Get the state of the report as a dictionary."""
state = dict()
non_opt_params, opt_params = self._get_state_params()
for param in non_opt_params:
state[param] = getattr(self, param)
for param in opt_params:
if hasattr(self, param):
state[param] = getattr(self, param)
return state
def __setstate__(self, state):
"""Set the state of the report."""
non_opt_params, opt_params = self._get_state_params()
for param in non_opt_params:
setattr(self, param, state[param])
for param in opt_params:
if param in state:
setattr(self, param, state[param])
return state
def save(self, fname=None, open_browser=True, overwrite=False):
"""Save the report and optionally open it in browser.
Parameters
----------
fname : str | None
File name of the report. If the file name ends in '.h5' or '.hdf5',
the report is saved in HDF5 format, so it can later be loaded again
with :func:`open_report`. If the file name ends in anything else,
the report is rendered to HTML. If ``None``, the report is saved to
'report.html' in the current working directory.
Defaults to ``None``.
open_browser : bool
When saving to HTML, open the rendered HTML file browser after
saving if True. Defaults to True.
overwrite : bool
If True, overwrite report if it already exists. Defaults to False.
Returns
-------
fname : str
The file name to which the report was saved.
"""
if fname is None:
if not hasattr(self, 'data_path'):
self.data_path = os.getcwd()
warn('`data_path` not provided. Using %s instead'
% self.data_path)
fname = op.realpath(op.join(self.data_path, 'report.html'))
else:
fname = op.realpath(fname)
if not overwrite and op.isfile(fname):
msg = ('Report already exists at location %s. '
'Overwrite it (y/[n])? '
% fname)
answer = input(msg)
if answer.lower() == 'y':
overwrite = True
_, ext = op.splitext(fname)
is_hdf5 = ext.lower() in ['.h5', '.hdf5']
if overwrite or not op.isfile(fname):
logger.info('Saving report to location %s' % fname)
if is_hdf5:
write_hdf5(fname, self.__getstate__(), overwrite=overwrite,
title='mnepython')
else:
self._render_toc()
# Annotate the HTML with a TOC and footer.
with warnings.catch_warnings(record=True):
warnings.simplefilter('ignore')
html = footer_template.substitute(
date=time.strftime("%B %d, %Y"),
current_year=time.strftime("%Y"))
self.html.append(html)
# Writing to disk may fail. However, we need to make sure that
# the TOC and footer are removed regardless, otherwise they
# will be duplicated when the user attempts to save again.
try:
# Write HTML
with codecs.open(fname, 'w', 'utf-8') as fobj:
fobj.write(_fix_global_ids(u''.join(self.html)))
finally:
self.html.pop(0)
self.html.pop(0)
self.html.pop()
building_doc = os.getenv('_MNE_BUILDING_DOC', '').lower() == 'true'
if open_browser and not is_hdf5 and not building_doc:
webbrowser.open_new_tab('file://' + fname)
self.fname = fname
return fname
def __enter__(self):
"""Do nothing when entering the context block."""
return self
def __exit__(self, type, value, traceback):
"""Save the report when leaving the context block."""
if self._fname is not None:
self.save(self._fname, open_browser=False, overwrite=True)
@verbose
def _render_toc(self, verbose=None):
"""Render the Table of Contents."""
logger.info('Rendering : Table of Contents')
html_toc = u'<div id="container">'
html_toc += u'<div id="toc"><center><h4>CONTENTS</h4></center>'
global_id = 1
# Reorder self.sections to reflect natural ordering
if self._sort_sections:
sections = list(set(self.sections) & set(SECTION_ORDER))
custom = [section for section in self.sections if section
not in SECTION_ORDER]
order = [sections.index(section) for section in SECTION_ORDER if
section in sections]
self.sections = np.array(sections)[order].tolist() + custom
# Sort by section
html, fnames, sectionlabels = [], [], []
for section in self.sections:
logger.info('%s' % section)
for sectionlabel, this_html, fname in (zip(self._sectionlabels,
self.html, self.fnames)):
if self._sectionvars[section] == sectionlabel:
html.append(this_html)
fnames.append(fname)
sectionlabels.append(sectionlabel)
logger.info(_get_fname(fname))
color = _is_bad_fname(fname)
div_klass, tooltip, text = _get_toc_property(fname)
# loop through conditions for evoked
if fname.endswith(('-ave.fif', '-ave.fif.gz',
'(whitened)')):
text = os.path.basename(fname)
if fname.endswith('(whitened)'):
fname = fname[:-11]
# XXX: remove redundant read_evokeds
evokeds = read_evokeds(fname, verbose=False)
html_toc += toc_list.substitute(
div_klass=div_klass, id=None, tooltip=fname,
color='#428bca', text=text)
html_toc += u'<li class="evoked"><ul>'
for ev in evokeds:
html_toc += toc_list.substitute(
div_klass=div_klass, id=global_id,
tooltip=fname, color=color, text=ev.comment)
global_id += 1
html_toc += u'</ul></li>'
elif fname.endswith(tuple(VALID_EXTENSIONS +
['bem', 'custom'])):
html_toc += toc_list.substitute(div_klass=div_klass,
id=global_id,
tooltip=tooltip,
color=color,
text=text)
global_id += 1
html_toc += u'\n</ul></div>'
html_toc += u'<div id="content">'
# The sorted html (according to section)
self.html = html
self.fnames = fnames
self._sectionlabels = sectionlabels
lang = getattr(self, 'lang', 'en-us')
html_header = header_template.substitute(
title=self.title, include=self.include, lang=lang,
sections=self.sections, sectionvars=self._sectionvars)
self.html.insert(0, html_header) # Insert header at position 0
self.html.insert(1, html_toc) # insert TOC
def _render_array(self, array, global_id=None, cmap='gray',
limits=None, n_jobs=1):
"""Render mri without bem contours (only PNG)."""
html = []
html.append(u'<div class="thumbnail">')
# Axial
limits = limits or {}
axial_limit = limits.get('axial')
axial_slices_gen = _iterate_axial_slices(array, axial_limit)
html.append(
self._render_one_axis(axial_slices_gen, 'axial',
global_id, cmap, array.shape[1], n_jobs))
# Sagittal
sagittal_limit = limits.get('sagittal')
sagittal_slices_gen = _iterate_sagittal_slices(array, sagittal_limit)
html.append(
self._render_one_axis(sagittal_slices_gen, 'sagittal',
global_id, cmap, array.shape[1], n_jobs))
# Coronal
coronal_limit = limits.get('coronal')
coronal_slices_gen = _iterate_coronal_slices(array, coronal_limit)
html.append(
self._render_one_axis(coronal_slices_gen, 'coronal',
global_id, cmap, array.shape[1], n_jobs))
# Close section
html.append(u'</div>')
return '\n'.join(html)
def _render_one_bem_axis(self, mri_fname, surf_fnames, global_id,
shape, orientation='coronal', decim=2, n_jobs=1):
"""Render one axis of bem contours (only PNG)."""
orientation_name2axis = dict(sagittal=0, axial=1, coronal=2)
orientation_axis = orientation_name2axis[orientation]
n_slices = shape[orientation_axis]
orig_size = np.roll(shape, orientation_axis)[[1, 2]]
name = orientation
html = []
html.append(u'<div class="col-xs-6 col-md-4">')
slides_klass = '%s-%s' % (name, global_id)
sl = np.arange(0, n_slices, decim)
kwargs = dict(mri_fname=mri_fname, surf_fnames=surf_fnames, show=False,
orientation=orientation, img_output=orig_size)
imgs = _figs_to_mrislices(sl, n_jobs, **kwargs)
slices = []
img_klass = 'slideimg-%s' % name
div_klass = 'span12 %s' % slides_klass
for ii, img in enumerate(imgs):
slice_id = '%s-%s-%s' % (name, global_id, sl[ii])
caption = u'Slice %s %s' % (name, sl[ii])
first = True if ii == 0 else False
slices.append(_build_html_image(img, slice_id, div_klass,
img_klass, caption, first,
image_format='png'))
# Render the slider
slider_id = 'select-%s-%s' % (name, global_id)
html.append(u'<div id="%s"></div>' % slider_id)
html.append(u'<ul class="thumbnail">')
# Render the slices
html.append(u'\n'.join(slices))
html.append(u'</ul>')
html.append(_build_html_slider(sl, slides_klass, slider_id))
html.append(u'</div>')
return '\n'.join(html)
def _render_image_png(self, image, cmap='gray', n_jobs=1):
"""Render one slice of mri without bem as a PNG."""
import nibabel as nib
global_id = self._get_id()
if 'mri' not in self.sections:
self.sections.append('mri')
self._sectionvars['mri'] = 'mri'
nim = nib.load(image)
data = _get_img_fdata(nim)
shape = data.shape
limits = {'sagittal': range(0, shape[0], 2),
'axial': range(0, shape[1], 2),
'coronal': range(0, shape[2], 2)}
name = op.basename(image)
html = u'<li class="mri" id="%d">\n' % global_id
html += u'<h4>%s</h4>\n' % name
html += self._render_array(data, global_id=global_id,
cmap=cmap, limits=limits, n_jobs=n_jobs)
html += u'</li>\n'
return html
def _render_raw(self, raw_fname):
"""Render raw (only text)."""
import matplotlib.pyplot as plt
global_id = self._get_id()
raw = read_raw_fif(raw_fname, allow_maxshield='yes')
extra = ' (MaxShield on)' if raw.info.get('maxshield', False) else ''
caption = u'Raw : %s%s' % (raw_fname, extra)
n_eeg = len(pick_types(raw.info, meg=False, eeg=True))
n_grad = len(pick_types(raw.info, meg='grad'))
n_mag = len(pick_types(raw.info, meg='mag'))
pick_eog = pick_types(raw.info, meg=False, eog=True)
if len(pick_eog) > 0:
eog = ', '.join(np.array(raw.info['ch_names'])[pick_eog])
else:
eog = 'Not available'
pick_ecg = pick_types(raw.info, meg=False, ecg=True)
if len(pick_ecg) > 0:
ecg = ', '.join(np.array(raw.info['ch_names'])[pick_ecg])
else:
ecg = 'Not available'
meas_date = raw.info['meas_date']
if meas_date is not None:
meas_date = _stamp_to_dt(meas_date).strftime("%B %d, %Y") + ' GMT'
html = raw_template.substitute(
div_klass='raw', id=global_id, caption=caption, info=raw.info,
meas_date=meas_date, n_eeg=n_eeg, n_grad=n_grad, n_mag=n_mag,
eog=eog, ecg=ecg, tmin=raw._first_time, tmax=raw._last_time)
raw_psd = {} if self.raw_psd is True else self.raw_psd
if isinstance(raw_psd, dict):
from matplotlib.backends.backend_agg import FigureCanvasAgg
n_ax = sum(kind in raw for kind in _DATA_CH_TYPES_SPLIT)
fig, axes = plt.subplots(n_ax, 1, figsize=(6, 1 + 1.5 * n_ax),
dpi=92)
FigureCanvasAgg(fig)
img = _fig_to_img(raw.plot_psd, self.image_format,
ax=axes, **raw_psd)
new_html = image_template.substitute(
img=img, div_klass='raw', img_klass='raw',
caption='PSD', show=True, image_format=self.image_format)
html += '\n\n' + new_html
return html
def _render_forward(self, fwd_fname):
"""Render forward."""
div_klass = 'forward'
caption = u'Forward: %s' % fwd_fname
fwd = read_forward_solution(fwd_fname)
repr_fwd = re.sub('>', '', re.sub('<', '', repr(fwd)))
global_id = self._get_id()
html = repr_template.substitute(div_klass=div_klass,
id=global_id,
caption=caption,
repr=repr_fwd)
return html
def _render_inverse(self, inv_fname):
"""Render inverse."""
div_klass = 'inverse'
caption = u'Inverse: %s' % inv_fname
inv = read_inverse_operator(inv_fname)
repr_inv = re.sub('>', '', re.sub('<', '', repr(inv)))
global_id = self._get_id()
html = repr_template.substitute(div_klass=div_klass,
id=global_id,
caption=caption,
repr=repr_inv)
return html
def _render_evoked(self, evoked_fname, baseline, image_format):
"""Render evoked."""
logger.debug('Evoked: Reading %s' % evoked_fname)
evokeds = read_evokeds(evoked_fname, baseline=baseline, verbose=False)
html = []
for ei, ev in enumerate(evokeds):
global_id = self._get_id()
kwargs = dict(show=False)
logger.debug('Evoked: Plotting instance %s/%s'
% (ei + 1, len(evokeds)))
img = _fig_to_img(ev.plot, image_format, **kwargs)
caption = u'Evoked : %s (%s)' % (evoked_fname, ev.comment)
html.append(image_template.substitute(
img=img, id=global_id, div_klass='evoked',
img_klass='evoked', caption=caption, show=True,
image_format=image_format))
has_types = []
if len(pick_types(ev.info, meg=False, eeg=True)) > 0:
has_types.append('eeg')
if len(pick_types(ev.info, meg='grad', eeg=False)) > 0:
has_types.append('grad')
if len(pick_types(ev.info, meg='mag', eeg=False)) > 0:
has_types.append('mag')
for ch_type in has_types:
logger.debug(' Topomap type %s' % ch_type)
img = _fig_to_img(ev.plot_topomap, image_format,
ch_type=ch_type, **kwargs)
caption = u'Topomap (ch_type = %s)' % ch_type
html.append(image_template.substitute(
img=img, div_klass='evoked', img_klass='evoked',
caption=caption, show=True, image_format=image_format))
logger.debug('Evoked: done')
return '\n'.join(html)
def _render_eve(self, eve_fname, sfreq, image_format):
"""Render events."""
global_id = self._get_id()
events = read_events(eve_fname)
kwargs = dict(events=events, sfreq=sfreq, show=False)
img = _fig_to_img(plot_events, image_format, **kwargs)
caption = 'Events : ' + eve_fname
html = image_template.substitute(
img=img, id=global_id, div_klass='events', img_klass='events',
caption=caption, show=True, image_format=image_format)
return html
def _render_epochs(self, epo_fname, image_format):
"""Render epochs."""
global_id = self._get_id()
epochs = read_epochs(epo_fname)
kwargs = dict(subject=self.subject, show=False)
img = _fig_to_img(epochs.plot_drop_log, image_format, **kwargs)
caption = 'Epochs : ' + epo_fname
show = True
html = image_template.substitute(
img=img, id=global_id, div_klass='epochs', img_klass='epochs',
caption=caption, show=show, image_format=image_format)
return html
def _render_cov(self, cov_fname, info_fname, image_format, show_svd=True):
"""Render cov."""
global_id = self._get_id()
cov = read_cov(cov_fname)
fig, svd = plot_cov(cov, info_fname, show=False, show_svd=show_svd)
html = []
figs = [fig]
captions = ['Covariance : %s (n_samples: %s)' % (cov_fname, cov.nfree)]
if svd is not None:
figs.append(svd)
captions.append('Singular values of the noise covariance')
for fig, caption in zip(figs, captions):
img = _fig_to_img(fig, image_format)
show = True
html.append(image_template.substitute(
img=img, id=global_id, div_klass='covariance',
img_klass='covariance', caption=caption, show=show,
image_format=image_format))
return '\n'.join(html)
def _render_whitened_evoked(self, evoked_fname, noise_cov, baseline,
image_format):
"""Render whitened evoked."""
evokeds = read_evokeds(evoked_fname, verbose=False)
html = []
for ev in evokeds:
ev = read_evokeds(evoked_fname, ev.comment, baseline=baseline,
verbose=False)
global_id = self._get_id()
kwargs = dict(noise_cov=noise_cov, show=False)
img = _fig_to_img(ev.plot_white, image_format, **kwargs)
caption = u'Whitened evoked : %s (%s)' % (evoked_fname, ev.comment)
show = True
html.append(image_template.substitute(
img=img, id=global_id, div_klass='evoked',
img_klass='evoked', caption=caption, show=show,
image_format=image_format))
return '\n'.join(html)
def _render_trans(self, trans, path, info, subject, subjects_dir):
"""Render trans (only PNG)."""
kwargs = dict(info=info, trans=trans, subject=subject,
subjects_dir=subjects_dir)
try:
img = _iterate_trans_views(function=plot_alignment, **kwargs)
except IOError:
img = _iterate_trans_views(function=plot_alignment,
surfaces=['head'], **kwargs)
if img is not None:
global_id = self._get_id()
html = image_template.substitute(
img=img, id=global_id, div_klass='trans',
img_klass='trans', caption='Trans : ' + trans, width=75,
show=True, image_format='png')
return html
def _render_bem(self, subject, subjects_dir, decim, n_jobs,
section='mri', caption='BEM'):
"""Render mri+bem (only PNG)."""
import nibabel as nib
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
# Get the MRI filename
mri_fname = op.join(subjects_dir, subject, 'mri', 'T1.mgz')
if not op.isfile(mri_fname):
warn('MRI file "%s" does not exist' % mri_fname)
# Get the BEM surface filenames
bem_path = op.join(subjects_dir, subject, 'bem')
if not op.isdir(bem_path):
warn('Subject bem directory "%s" does not exist' % bem_path)
return self._render_image_png(mri_fname, cmap='gray',
n_jobs=n_jobs)
surf_fnames = []
for surf_name in ['*inner_skull', '*outer_skull', '*outer_skin']:
surf_fname = glob(op.join(bem_path, surf_name + '.surf'))
if len(surf_fname) > 0:
surf_fnames.append(surf_fname[0])
else:
warn('No surface found for %s.' % surf_name)
continue
if len(surf_fnames) == 0:
warn('No surfaces found at all, rendering empty MRI')
return self._render_image_png(mri_fname, cmap='gray',
n_jobs=n_jobs)
# XXX : find a better way to get max range of slices
nim = nib.load(mri_fname)
data = _get_img_fdata(nim)
shape = data.shape
del data # free up memory
html = []
global_id = self._get_id()
if section == 'mri' and 'mri' not in self.sections:
self.sections.append('mri')
self._sectionvars['mri'] = 'mri'
name = caption
html += u'<li class="mri" id="%d">\n' % global_id
html += u'<h4>%s</h4>\n' % name # all other captions are h4
html += self._render_one_bem_axis(mri_fname, surf_fnames, global_id,
shape, 'axial', decim, n_jobs)
html += self._render_one_bem_axis(mri_fname, surf_fnames, global_id,
shape, 'sagittal', decim, n_jobs)
html += self._render_one_bem_axis(mri_fname, surf_fnames, global_id,
shape, 'coronal', decim, n_jobs)
html += u'</li>\n'
return ''.join(html)
def _clean_varnames(s):
# Remove invalid characters
s = re.sub('[^0-9a-zA-Z_]', '', s)
# add report_ at the beginning so that the javascript class names
# are valid ones
return 'report_' + s
def _recursive_search(path, pattern):
"""Auxiliary function for recursive_search of the directory."""
filtered_files = list()
for dirpath, dirnames, files in os.walk(path):
for f in fnmatch.filter(files, pattern):
# only the following file types are supported
# this ensures equitable distribution of jobs
if f.endswith(tuple(VALID_EXTENSIONS)):
filtered_files.append(op.realpath(op.join(dirpath, f)))
return filtered_files
def _fix_global_ids(html):
"""Fix the global_ids after reordering in _render_toc()."""
html = re.sub(r'id="\d+"', 'id="###"', html)
global_id = 1
while len(re.findall('id="###"', html)) > 0:
html = re.sub('id="###"', 'id="%s"' % global_id, html, count=1)
global_id += 1
return html
###############################################################################
# Scraper for sphinx-gallery
_SCRAPER_TEXT = '''
.. only:: builder_html
.. container:: row
.. rubric:: The `HTML document <{0}>`__ written by :meth:`mne.Report.save`:
.. raw:: html
<iframe class="sg_report" sandbox="allow-scripts" src="{0}"></iframe>
''' # noqa: E501
# Adapted from fa-file-code
_FA_FILE_CODE = '<svg class="sg_report" role="img" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 384 512"><path fill="#dec" d="M149.9 349.1l-.2-.2-32.8-28.9 32.8-28.9c3.6-3.2 4-8.8.8-12.4l-.2-.2-17.4-18.6c-3.4-3.6-9-3.7-12.4-.4l-57.7 54.1c-3.7 3.5-3.7 9.4 0 12.8l57.7 54.1c1.6 1.5 3.8 2.4 6 2.4 2.4 0 4.8-1 6.4-2.8l17.4-18.6c3.3-3.5 3.1-9.1-.4-12.4zm220-251.2L286 14C277 5 264.8-.1 252.1-.1H48C21.5 0 0 21.5 0 48v416c0 26.5 21.5 48 48 48h288c26.5 0 48-21.5 48-48V131.9c0-12.7-5.1-25-14.1-34zM256 51.9l76.1 76.1H256zM336 464H48V48h160v104c0 13.3 10.7 24 24 24h104zM209.6 214c-4.7-1.4-9.5 1.3-10.9 6L144 408.1c-1.4 4.7 1.3 9.6 6 10.9l24.4 7.1c4.7 1.4 9.6-1.4 10.9-6L240 231.9c1.4-4.7-1.3-9.6-6-10.9zm24.5 76.9l.2.2 32.8 28.9-32.8 28.9c-3.6 3.2-4 8.8-.8 12.4l.2.2 17.4 18.6c3.3 3.5 8.9 3.7 12.4.4l57.7-54.1c3.7-3.5 3.7-9.4 0-12.8l-57.7-54.1c-3.5-3.3-9.1-3.2-12.4.4l-17.4 18.6c-3.3 3.5-3.1 9.1.4 12.4z" class=""></path></svg>' # noqa: E501
class _ReportScraper(object):
"""Scrape Report outputs.
Only works properly if conf.py is configured properly and the file
is written to the same directory as the example script.
"""
def __init__(self):
self.app = None
self.files = dict()
def __repr__(self):
return '<ReportScraper>'
def __call__(self, block, block_vars, gallery_conf):
for report in block_vars['example_globals'].values():
if (isinstance(report, Report) and hasattr(report, 'fname') and
report.fname.endswith('.html') and
gallery_conf['builder_name'] == 'html'):
# Thumbnail
image_path_iterator = block_vars['image_path_iterator']
img_fname = next(image_path_iterator)
img_fname = img_fname.replace('.png', '.svg')
with open(img_fname, 'w') as fid:
fid.write(_FA_FILE_CODE)
# copy HTML file
html_fname = op.basename(report.fname)
out_fname = op.join(
self.app.builder.outdir,
op.relpath(op.dirname(block_vars['target_file']),
self.app.builder.srcdir), html_fname)
self.files[report.fname] = out_fname
# embed links/iframe
data = _SCRAPER_TEXT.format(html_fname)
return data
return ''
def copyfiles(self, *args, **kwargs):
for key, value in self.files.items():
copyfile(key, value)
| []
| []
| [
"_MNE_BUILDING_DOC"
]
| [] | ["_MNE_BUILDING_DOC"] | python | 1 | 0 | |
controllers/tangserver_controller_client.go | /*
Copyright 2021.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controllers
import (
"bytes"
"fmt"
"io"
"os"
"path/filepath"
"strings"
core_v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/tools/remotecommand"
)
// GetClusterClientConfig first tries to get a config object which uses the service account kubernetes gives to pods,
// if it is called from a process running in a kubernetes environment.
// Otherwise, it tries to build config from a default kubeconfig filepath if it fails, it fallback to the default config.
// Once it get the config, it returns the same.
func GetClusterClientConfig() (*rest.Config, error) {
config, err := rest.InClusterConfig()
if err != nil {
err1 := err
kubeconfig := filepath.Join(os.Getenv("HOME"), ".kube", "config")
config, err = clientcmd.BuildConfigFromFlags("", kubeconfig)
if err != nil {
err = fmt.Errorf("InClusterConfig as well as BuildConfigFromFlags Failed. Error in InClusterConfig: %+v\nError in BuildConfigFromFlags: %+v", err1, err)
return nil, err
}
}
return config, nil
}
// GetClientsetFromClusterConfig takes REST config and Create a clientset based on that and return that clientset
func GetClientsetFromClusterConfig(config *rest.Config) (*kubernetes.Clientset, error) {
clientset, err := kubernetes.NewForConfig(config)
if err != nil {
err = fmt.Errorf("failed creating clientset. Error: %+v", err)
return nil, err
}
return clientset, nil
}
// GetClusterClientset first tries to get a config object which uses the service account kubernetes gives to pods,
// if it is called from a process running in a kubernetes environment.
// Otherwise, it tries to build config from a default kubeconfig filepath if it fails, it fallback to the default config.
// Once it get the config, it creates a new Clientset for the given config and returns the clientset.
func GetClusterClientset() (*kubernetes.Clientset, error) {
config, err := GetClusterClientConfig()
if err != nil {
return nil, err
}
return GetClientsetFromClusterConfig(config)
}
// GetRESTClient first tries to get a config object which uses the service account kubernetes gives to pods,
// if it is called from a process running in a kubernetes environment.
// Otherwise, it tries to build config from a default kubeconfig filepath if it fails, it fallback to the default config.
// Once it get the config, it
func GetRESTClient() (*rest.RESTClient, error) {
config, err := GetClusterClientConfig()
if err != nil {
return &rest.RESTClient{}, err
}
return rest.RESTClientFor(config)
}
// podCommandExec uninterractively exec to the pod with the command specified.
// :param string command: list of the str which specify the command.
// :param string pod_name: Pod name
// :param string namespace: namespace of the Pod.
// :param io.Reader stdin: Standard Input if necessary, otherwise `nil`
// :return: string: Output of the command. (STDOUT)
// string: Errors. (STDERR)
// error: If any error has occurred otherwise `nil`
func podCommandExec(command, containerName, podName, namespace string, stdin io.Reader) (string, string, error) {
config, err := GetClusterClientConfig()
if err != nil {
return "", "", err
}
if config == nil {
err = fmt.Errorf("nil config")
return "", "", err
}
clientset, err := GetClientsetFromClusterConfig(config)
if err != nil {
return "", "", err
}
if clientset == nil {
err = fmt.Errorf("nil clientset")
return "", "", err
}
req := clientset.CoreV1().RESTClient().Post().
Resource("pods").
Name(podName).
Namespace(namespace).
SubResource("exec")
scheme := runtime.NewScheme()
if err := core_v1.AddToScheme(scheme); err != nil {
return "", "", fmt.Errorf("error adding to scheme: %v", err)
}
parameterCodec := runtime.NewParameterCodec(scheme)
req.VersionedParams(&core_v1.PodExecOptions{
Command: []string{"/bin/bash", "-c", command},
Container: containerName,
Stdin: stdin != nil,
Stdout: true,
Stderr: true,
TTY: false,
}, parameterCodec)
exec, spdyerr := remotecommand.NewSPDYExecutor(config, "POST", req.URL())
if spdyerr != nil {
return "", "", fmt.Errorf("error while creating Executor: %v, Command: %s", err, strings.Fields(command))
}
var stdout, stderr bytes.Buffer
err = exec.Stream(remotecommand.StreamOptions{
Stdin: stdin,
Stdout: &stdout,
Stderr: &stderr,
Tty: false,
})
if err != nil {
return "", "", fmt.Errorf("error in Stream: %v, Command: %s", err, strings.Fields(command))
}
return stdout.String(), stderr.String(), nil
}
| [
"\"HOME\""
]
| []
| [
"HOME"
]
| [] | ["HOME"] | go | 1 | 0 | |
build/diff.go | // Copyright 2020 Daniel Erat <[email protected]>.
// All rights reserved.
package build
import (
"bufio"
"bytes"
"context"
"fmt"
"io"
"net/http"
"os"
"os/exec"
"strings"
"syscall"
)
const serverAddr = ":8888"
// prompt displays differences between directories a and b and
// prompts the user to accept the changes. The user's response is returned.
// If serveB is true, an HTTP server is started at serveAddr to serve the contents of b.
func prompt(ctx context.Context, a, b string, serveB bool) (ok bool, err error) {
var msg string
var srv *http.Server
var sch <-chan error
if serveB {
msg = fmt.Sprintf("Serving %v at %v\n\n", b, serverAddr)
srv, sch = startServer(b, serverAddr)
}
ok, perr := showDiffAndPrompt(ctx, a, b, msg)
var serr error
if srv != nil {
serr = srv.Shutdown(ctx)
if err := <-sch; err != nil && err != http.ErrServerClosed {
serr = err
}
}
if perr != nil {
return ok, perr
}
return ok, serr
}
// startServer starts a new HTTP server at addr to serve the files in dir.
// The return value from ListenAndServe will be written to the returned channel.
func startServer(dir, addr string) (*http.Server, <-chan error) {
srv := &http.Server{
Addr: addr,
Handler: http.FileServer(http.Dir(dir)),
}
ch := make(chan error, 1)
go func() {
ch <- srv.ListenAndServe()
}()
return srv, ch
}
// showDiffAndPrompt displays differences between directories a and b and
// prompts the user to accept the changes. The user's response is returned.
// msg is printed above the diff.
func showDiffAndPrompt(ctx context.Context, a, b, msg string) (ok bool, err error) {
for {
if err := showDiff(ctx, a, b, msg); err != nil {
return false, err
}
r := bufio.NewReader(os.Stdin)
fmt.Print("Replace output dir (y/N/diff)? ")
s, _ := r.ReadString('\n')
s = strings.ToLower(s)
switch {
case strings.HasPrefix(s, "y"):
return true, nil
case strings.HasPrefix(s, "d"):
continue // show diff again
default:
return false, nil
}
}
}
// showDiff displays differences between directories a and b.
// header is written above the diff.
func showDiff(ctx context.Context, a, b, header string) error {
pager := os.Getenv("PAGER")
if pager == "" {
pager = "less"
}
pagerCmd := exec.CommandContext(ctx, pager)
pagerStdin, err := pagerCmd.StdinPipe()
if err != nil {
return err
}
pagerCmd.Stdout = os.Stdout
pagerCmd.Stderr = os.Stderr
if err := pagerCmd.Start(); err != nil {
return fmt.Errorf("failed starting %q: %v", strings.Join(pagerCmd.Args, " "), err)
}
io.WriteString(pagerStdin, header)
diffCmd := exec.CommandContext(ctx, "diff", "-r", "-u", "--color=always", a, b)
diffCmd.Stdout = pagerStdin
var diffStderr bytes.Buffer
diffCmd.Stderr = &diffStderr
// diff(1): "Exit status is 0 if inputs are the same, 1 if different, 2 if trouble."
diffErr := diffCmd.Run()
if diffErr == nil {
io.WriteString(pagerStdin, "No differences.\n")
} else if exitErr, ok := diffErr.(*exec.ExitError); ok {
if exitErr.ExitCode() == 1 {
diffErr = nil // differences found
} else if ws, ok := exitErr.Sys().(syscall.WaitStatus); ok && ws.Signal() == syscall.SIGPIPE {
diffErr = nil // pager exited before it read entire diff
}
}
if diffErr != nil {
io.WriteString(pagerStdin, diffStderr.String())
diffErr = fmt.Errorf("%q failed: %v", strings.Join(diffCmd.Args, " "), diffErr)
}
if err := pagerStdin.Close(); err != nil {
return err
}
if err := pagerCmd.Wait(); err != nil {
return fmt.Errorf("failed waiting for %q: %v", strings.Join(pagerCmd.Args, " "), err)
}
return diffErr
}
| [
"\"PAGER\""
]
| []
| [
"PAGER"
]
| [] | ["PAGER"] | go | 1 | 0 | |
Cancer Web App/cancer/wsgi.py | """
WSGI config for cancer project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'cancer.settings')
application = get_wsgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
tests/test_build.py | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import os
import sys
from mock import patch
from oslo_log import fixture as log_fixture
from oslo_log import log as logging
from oslotest import base
import testtools
sys.path.append(
os.path.abspath(os.path.join(os.path.dirname(__file__), '../tools')))
from kolla.image import build
LOG = logging.getLogger(__name__)
class BuildTest(object):
excluded_images = abc.abstractproperty()
def setUp(self):
super(BuildTest, self).setUp()
self.useFixture(log_fixture.SetLogLevel([__name__],
logging.logging.INFO))
self.build_args = [__name__, "--debug", '--threads', '4']
@testtools.skipUnless(os.environ.get('DOCKER_BUILD_TEST'),
'Skip the docker build test')
def runTest(self):
with patch.object(sys, 'argv', self.build_args):
LOG.info("Running with args %s", self.build_args)
bad_results, good_results, unmatched_results = build.run_build()
failures = 0
for image, result in bad_results.items():
if image in self.excluded_images:
if result is 'error':
continue
failures = failures + 1
LOG.warning(">>> Expected image '%s' to fail, please update"
" the excluded_images in source file above if the"
" image build has been fixed.", image)
else:
if result is not 'error':
continue
failures = failures + 1
LOG.critical(">>> Expected image '%s' to succeed!", image)
for image in unmatched_results.keys():
LOG.warning(">>> Image '%s' was not matched", image)
self.assertEqual(failures, 0, "%d failure(s) occurred" % failures)
class BuildTestCentosBinary(BuildTest, base.BaseTestCase):
excluded_images = ["karbor-base",
"kuryr-base",
"neutron-sfc-agent",
"searchlight-base",
"senlin-base",
"solum-base",
"vmtp",
"manila-data",
"watcher-base",
"congress-base",
"bifrost-base",
"cloudkitty-base",
"freezer-base",
"tacker"]
def setUp(self):
super(BuildTestCentosBinary, self).setUp()
self.build_args.extend(["--base", "centos",
"--type", "binary"])
class BuildTestCentosSource(BuildTest, base.BaseTestCase):
excluded_images = ["mistral-base"]
def setUp(self):
super(BuildTestCentosSource, self).setUp()
self.build_args.extend(["--base", "centos",
"--type", "source"])
class BuildTestUbuntuBinary(BuildTest, base.BaseTestCase):
excluded_images = ["karbor-base",
"kuryr-base",
"octavia-base",
"neutron-sfc-agent",
"searchlight-base",
"senlin-base",
"solum-base",
"vmtp",
"zaqar",
"watcher-base",
"congress-base",
"bifrost-base",
"cloudkitty-base",
"freezer-base",
"panko-base",
"tacker"]
def setUp(self):
super(BuildTestUbuntuBinary, self).setUp()
self.build_args.extend(["--base", "ubuntu",
"--type", "binary"])
class BuildTestUbuntuSource(BuildTest, base.BaseTestCase):
excluded_images = []
def setUp(self):
super(BuildTestUbuntuSource, self).setUp()
self.build_args.extend(["--base", "ubuntu",
"--type", "source"])
class BuildTestOracleLinuxBinary(BuildTest, base.BaseTestCase):
excluded_images = ["karbor-base",
"kuryr-base",
"neutron-sfc-agent",
"searchlight-base",
"senlin-base",
"solum-base",
"vmtp",
"manila-data",
"watcher-base",
"congress-base",
"bifrost-base",
"cloudkitty-base",
"freezer-base",
"tacker"]
def setUp(self):
super(BuildTestOracleLinuxBinary, self).setUp()
self.build_args.extend(["--base", "oraclelinux",
"--type", "binary"])
class BuildTestOracleLinuxSource(BuildTest, base.BaseTestCase):
excluded_images = []
def setUp(self):
super(BuildTestOracleLinuxSource, self).setUp()
self.build_args.extend(["--base", "oraclelinux",
"--type", "source"])
class DeployTestCentosBinary(BuildTestCentosBinary):
def setUp(self):
super(DeployTestCentosBinary, self).setUp()
self.build_args.extend(["--profile", "gate"])
class DeployTestCentosSource(BuildTestCentosSource):
def setUp(self):
super(DeployTestCentosSource, self).setUp()
self.build_args.extend(["--profile", "gate"])
class DeployTestOracleLinuxBinary(BuildTestOracleLinuxBinary):
def setUp(self):
super(DeployTestOracleLinuxBinary, self).setUp()
self.build_args.extend(["--profile", "gate"])
class DeployTestOracleLinuxSource(BuildTestOracleLinuxSource):
def setUp(self):
super(DeployTestOracleLinuxSource, self).setUp()
self.build_args.extend(["--profile", "gate"])
class DeployTestUbuntuBinary(BuildTestUbuntuBinary):
def setUp(self):
super(DeployTestUbuntuBinary, self).setUp()
self.build_args.extend(["--profile", "gate"])
class DeployTestUbuntuSource(BuildTestUbuntuSource):
def setUp(self):
super(DeployTestUbuntuSource, self).setUp()
self.build_args.extend(["--profile", "gate"])
| []
| []
| [
"DOCKER_BUILD_TEST"
]
| [] | ["DOCKER_BUILD_TEST"] | python | 1 | 0 | |
tests/strategies/test_ddp_fully_sharded_with_full_state_dict.py | import os
from typing import Any, Dict, Optional
from unittest import mock
import pytest
import torch
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning.plugins import FullyShardedNativeMixedPrecisionPlugin
from pytorch_lightning.strategies import DDPFullyShardedStrategy
from pytorch_lightning.utilities import _FAIRSCALE_FULLY_SHARDED_AVAILABLE
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from tests.helpers.boring_model import BoringModel
from tests.helpers.runif import RunIf
if _FAIRSCALE_FULLY_SHARDED_AVAILABLE:
from fairscale.nn import FullyShardedDataParallel, wrap
def test_invalid_on_cpu(tmpdir):
"""Test to ensure that to raise Misconfiguration for FSDP on CPU."""
with pytest.raises(
MisconfigurationException, match="You selected strategy to be `ddp_fully_sharded`, but GPU is not available."
):
trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True, strategy="fsdp")
assert isinstance(trainer.strategy, DDPFullyShardedStrategy)
trainer.strategy.setup_environment()
@mock.patch.dict(os.environ, {"CUDA_VISIBLE_DEVICES": "0"})
@mock.patch("torch.cuda.device_count", return_value=1)
@mock.patch("torch.cuda.is_available", return_value=True)
@RunIf(fairscale_fully_sharded=True)
def test_fsdp_with_sharded_amp(device_count_mock, mock_cuda_available, tmpdir):
"""Test to ensure that plugin native amp plugin is correctly chosen when using sharded."""
trainer = Trainer(
default_root_dir=tmpdir, fast_dev_run=True, strategy="fsdp", accelerator="gpu", devices=1, precision=16
)
assert isinstance(trainer.strategy, DDPFullyShardedStrategy)
assert isinstance(trainer.strategy.precision_plugin, FullyShardedNativeMixedPrecisionPlugin)
class TestFSDPModel(BoringModel):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.layer: Optional[torch.nn.Module] = None
def _init_model(self) -> None:
self.layer = torch.nn.Sequential(torch.nn.Linear(32, 32), torch.nn.ReLU(), torch.nn.Linear(32, 2))
def setup(self, stage: str) -> None:
if self.layer is None:
self._init_model()
def configure_sharded_model(self) -> None:
# the model is already wrapped with FSDP: no need to wrap again!
if isinstance(self.layer, FullyShardedDataParallel):
return
for i, layer in enumerate(self.layer):
if i % 2 == 0:
self.layer[i] = wrap(layer)
self.layer = wrap(self.layer)
def on_load_checkpoint(self, checkpoint: Dict[str, Any]) -> None:
# when loading full state dict, we first need to create a new unwrapped model
self._init_model()
def configure_optimizers(self):
return torch.optim.SGD(self.layer.parameters(), lr=0.1)
def on_train_start(self) -> None:
self._assert_layer_fsdp_instance()
def on_test_start(self) -> None:
self._assert_layer_fsdp_instance()
def on_validation_start(self) -> None:
self._assert_layer_fsdp_instance()
def on_prediction_start(self) -> None:
self._assert_layer_fsdp_instance()
def _assert_layer_fsdp_instance(self) -> None:
assert isinstance(self.layer, FullyShardedDataParallel)
assert isinstance(self.layer.module[0], FullyShardedDataParallel)
assert isinstance(self.layer.module[2], FullyShardedDataParallel)
# Assert that the nested layers are set reshard_after_forward to True
assert self.layer.module[0].reshard_after_forward is True
assert self.layer.module[2].reshard_after_forward is True
if isinstance(self.trainer.precision_plugin, FullyShardedNativeMixedPrecisionPlugin):
assert self.layer.mixed_precision
assert self.layer.module[0].mixed_precision
assert self.layer.module[2].mixed_precision
@RunIf(min_gpus=1, skip_windows=True, standalone=True, fairscale_fully_sharded=True)
def test_fully_sharded_strategy_checkpoint(tmpdir):
"""Test to ensure that checkpoint is saved correctly when using a single GPU, and all stages can be run."""
model = TestFSDPModel()
trainer = Trainer(
default_root_dir=tmpdir,
accelerator="gpu",
devices=1,
strategy="fsdp",
precision=16,
max_epochs=1,
enable_progress_bar=False,
enable_model_summary=False,
)
_run_multiple_stages(trainer, model, os.path.join(tmpdir, "last.ckpt"))
@RunIf(min_gpus=2, skip_windows=True, standalone=True, fairscale_fully_sharded=True)
def test_fully_sharded_strategy_checkpoint_multi_gpus(tmpdir):
"""Test to ensure that checkpoint is saved correctly when using multiple GPUs, and all stages can be run."""
model = TestFSDPModel()
ck = ModelCheckpoint(save_last=True)
trainer = Trainer(
default_root_dir=tmpdir,
accelerator="gpu",
devices=2,
strategy="fsdp",
precision=16,
max_epochs=1,
callbacks=[ck],
enable_progress_bar=False,
enable_model_summary=False,
)
_run_multiple_stages(trainer, model)
def _assert_save_equality(trainer, ckpt_path, cls=TestFSDPModel):
# Use FullySharded to get the state dict for the sake of comparison
model_state_dict = trainer.strategy.lightning_module_state_dict()
if trainer.is_global_zero:
saved_model = cls.load_from_checkpoint(ckpt_path)
# Assert model parameters are identical after loading
for ddp_param, shard_param in zip(model_state_dict.values(), saved_model.state_dict().values()):
assert torch.equal(ddp_param.float().cpu(), shard_param)
def _run_multiple_stages(trainer, model, model_path: Optional[str] = None):
trainer.fit(model)
model_path = model_path if model_path else trainer.checkpoint_callback.last_model_path
trainer.save_checkpoint(model_path, weights_only=True)
_assert_save_equality(trainer, model_path, cls=TestFSDPModel)
# Test entry point
trainer.test(model) # model is wrapped, will not call configure_shared_model
# provide model path, will create a new unwrapped model and load and then call configure_shared_model to wrap
trainer.test(ckpt_path=model_path)
@RunIf(min_gpus=1, skip_windows=True, standalone=True, fairscale_fully_sharded=True)
def test_fsdp_gradient_clipping_raises(tmpdir):
"""Test to ensure that an exception is raised when clipping gradients by value with FSDP."""
model = BoringModel()
trainer = Trainer(
default_root_dir=tmpdir,
strategy="fsdp",
fast_dev_run=True,
accelerator="gpu",
devices=1,
precision=16,
gradient_clip_val=1,
gradient_clip_algorithm="norm",
enable_progress_bar=False,
enable_model_summary=False,
)
with pytest.raises(
MisconfigurationException, match="gradient_clip_algorithm='norm'` is currently not supported for `FullySharded"
):
trainer.fit(model)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
nilearn/image/tests/test_resampling.py | """
Test the resampling code.
"""
import os
import copy
import math
from nose import SkipTest
from nose.tools import assert_equal, assert_raises, \
assert_false, assert_true, assert_almost_equal, assert_not_equal
from numpy.testing import assert_array_equal, assert_array_almost_equal
import numpy as np
from nibabel import Nifti1Image
from nilearn.image.resampling import resample_img, resample_to_img, reorder_img
from nilearn.image.resampling import from_matrix_vector, coord_transform
from nilearn.image.resampling import BoundingBoxError
from nilearn._utils import testing, compat
###############################################################################
# Helper function
def rotation(theta, phi):
""" Returns a rotation 3x3 matrix.
"""
cos = np.cos
sin = np.sin
a1 = np.array([[cos(theta), -sin(theta), 0],
[sin(theta), cos(theta), 0],
[0, 0, 1]])
a2 = np.array([[1, 0, 0],
[0, cos(phi), -sin(phi)],
[0, sin(phi), cos(phi)]])
return np.dot(a1, a2)
def pad(array, *args):
"""Pad an ndarray with zeros of quantity specified
in args as follows args = (x1minpad, x1maxpad, x2minpad,
x2maxpad, x3minpad, ...)
"""
if len(args) % 2 != 0:
raise ValueError("Please specify as many max paddings as min"
" paddings. You have specified %d arguments" %
len(args))
all_paddings = np.zeros([array.ndim, 2], dtype=np.int64)
all_paddings[:len(args) // 2] = np.array(args).reshape(-1, 2)
lower_paddings, upper_paddings = all_paddings.T
new_shape = np.array(array.shape) + upper_paddings + lower_paddings
padded = np.zeros(new_shape, dtype=array.dtype)
source_slices = [slice(max(-lp, 0), min(s + up, s))
for lp, up, s in zip(lower_paddings,
upper_paddings,
array.shape)]
target_slices = [slice(max(lp, 0), min(s - up, s))
for lp, up, s in zip(lower_paddings,
upper_paddings,
new_shape)]
padded[target_slices] = array[source_slices].copy()
return padded
###############################################################################
# Tests
def test_identity_resample():
""" Test resampling with an identity affine.
"""
shape = (3, 2, 5, 2)
data = np.random.randint(0, 10, shape)
affine = np.eye(4)
affine[:3, -1] = 0.5 * np.array(shape[:3])
rot_img = resample_img(Nifti1Image(data, affine),
target_affine=affine, interpolation='nearest')
np.testing.assert_almost_equal(data, rot_img.get_data())
# Smoke-test with a list affine
rot_img = resample_img(Nifti1Image(data, affine),
target_affine=affine.tolist(),
interpolation='nearest')
# Test with a 3x3 affine
rot_img = resample_img(Nifti1Image(data, affine),
target_affine=affine[:3, :3],
interpolation='nearest')
np.testing.assert_almost_equal(data, rot_img.get_data())
# Test with non native endian data
# Test with big endian data ('>f8')
for interpolation in ['nearest', 'linear', 'continuous']:
rot_img = resample_img(Nifti1Image(data.astype('>f8'), affine),
target_affine=affine.tolist(),
interpolation=interpolation)
np.testing.assert_almost_equal(data, rot_img.get_data())
# Test with little endian data ('<f8')
for interpolation in ['nearest', 'linear', 'continuous']:
rot_img = resample_img(Nifti1Image(data.astype('<f8'), affine),
target_affine=affine.tolist(),
interpolation=interpolation)
np.testing.assert_almost_equal(data, rot_img.get_data())
def test_downsample():
""" Test resampling with a 1/2 down-sampling affine.
"""
rand_gen = np.random.RandomState(0)
shape = (6, 3, 6, 2)
data = rand_gen.random_sample(shape)
affine = np.eye(4)
rot_img = resample_img(Nifti1Image(data, affine),
target_affine=2 * affine, interpolation='nearest')
downsampled = data[::2, ::2, ::2, ...]
x, y, z = downsampled.shape[:3]
np.testing.assert_almost_equal(downsampled,
rot_img.get_data()[:x, :y, :z, ...])
# Test with non native endian data
# Test to check that if giving non native endian data as input should
# work as normal and expected to return the same output as above tests.
# Big endian data ('>f8')
for copy in [True, False]:
rot_img = resample_img(Nifti1Image(data.astype('>f8'), affine),
target_affine=2 * affine,
interpolation='nearest',
copy=copy)
np.testing.assert_almost_equal(downsampled,
rot_img.get_data()[:x, :y, :z, ...])
# Little endian data
for copy in [True, False]:
rot_img = resample_img(Nifti1Image(data.astype('<f8'), affine),
target_affine=2 * affine,
interpolation='nearest',
copy=copy)
np.testing.assert_almost_equal(downsampled,
rot_img.get_data()[:x, :y, :z, ...])
def test_resampling_with_affine():
""" Test resampling with a given rotation part of the affine.
"""
prng = np.random.RandomState(10)
data_3d = prng.randint(4, size=(1, 4, 4))
data_4d = prng.randint(4, size=(1, 4, 4, 3))
for data in [data_3d, data_4d]:
for angle in (0, np.pi, np.pi / 2., np.pi / 4., np.pi / 3.):
rot = rotation(0, angle)
rot_img = resample_img(Nifti1Image(data, np.eye(4)),
target_affine=rot,
interpolation='nearest')
assert_equal(np.max(data),
np.max(rot_img.get_data()))
assert_equal(rot_img.get_data().dtype, data.dtype)
# We take the same rotation logic as above and test with nonnative endian
# data as input
for data in [data_3d, data_4d]:
img = Nifti1Image(data.astype('>f8'), np.eye(4))
for angle in (0, np.pi, np.pi / 2., np.pi / 4., np.pi / 3.):
rot = rotation(0, angle)
rot_img = resample_img(img, target_affine=rot,
interpolation='nearest')
assert_equal(np.max(data),
np.max(rot_img.get_data()))
def test_resampling_continuous_with_affine():
prng = np.random.RandomState(10)
data_3d = prng.randint(1, 4, size=(1, 10, 10))
data_4d = prng.randint(1, 4, size=(1, 10, 10, 3))
for data in [data_3d, data_4d]:
for angle in (0, np.pi / 2., np.pi, 3 * np.pi / 2.):
rot = rotation(0, angle)
img = Nifti1Image(data, np.eye(4))
rot_img = resample_img(
img,
target_affine=rot,
interpolation='continuous')
rot_img_back = resample_img(
rot_img,
target_affine=np.eye(4),
interpolation='continuous')
center = slice(1, 9)
# values on the edges are wrong for some reason
mask = (0, center, center)
np.testing.assert_allclose(
img.get_data()[mask],
rot_img_back.get_data()[mask])
assert_equal(rot_img.get_data().dtype,
np.dtype(data.dtype.name.replace('int', 'float')))
def test_resampling_error_checks():
shape = (3, 2, 5, 2)
target_shape = (5, 3, 2)
affine = np.eye(4)
data = np.random.randint(0, 10, shape)
img = Nifti1Image(data, affine)
# Correct parameters: no exception
resample_img(img, target_shape=target_shape, target_affine=affine)
resample_img(img, target_affine=affine)
with testing.write_tmp_imgs(img) as filename:
resample_img(filename, target_shape=target_shape, target_affine=affine)
# Missing parameter
assert_raises(ValueError, resample_img, img, target_shape=target_shape)
# Invalid shape
assert_raises(ValueError, resample_img, img, target_shape=(2, 3),
target_affine=affine)
# Invalid interpolation
interpolation = 'an_invalid_interpolation'
pattern = "interpolation must be either.+{0}".format(interpolation)
testing.assert_raises_regex(ValueError, pattern,
resample_img, img, target_shape=target_shape,
target_affine=affine,
interpolation="an_invalid_interpolation")
# Noop
target_shape = shape[:3]
img_r = resample_img(img, copy=False)
assert_equal(img_r, img)
img_r = resample_img(img, copy=True)
assert_false(np.may_share_memory(img_r.get_data(), img.get_data()))
np.testing.assert_almost_equal(img_r.get_data(), img.get_data())
np.testing.assert_almost_equal(compat.get_affine(img_r), compat.get_affine(img))
img_r = resample_img(img, target_affine=affine, target_shape=target_shape,
copy=False)
assert_equal(img_r, img)
img_r = resample_img(img, target_affine=affine, target_shape=target_shape,
copy=True)
assert_false(np.may_share_memory(img_r.get_data(), img.get_data()))
np.testing.assert_almost_equal(img_r.get_data(), img.get_data())
np.testing.assert_almost_equal(compat.get_affine(img_r), compat.get_affine(img))
def test_4d_affine_bounding_box_error():
small_data = np.ones([4, 4, 4])
small_data_4D_affine = np.eye(4)
small_data_4D_affine[:3, -1] = np.array([5, 4, 5])
small_img = Nifti1Image(small_data,
small_data_4D_affine)
bigger_data_4D_affine = np.eye(4)
bigger_data = np.zeros([10, 10, 10])
bigger_img = Nifti1Image(bigger_data,
bigger_data_4D_affine)
# We would like to check whether all/most of the data
# will be contained in the resampled image
# The measure will be the l2 norm, since some resampling
# schemes approximately conserve it
def l2_norm(arr):
return (arr ** 2).sum()
# resample using 4D affine and specified target shape
small_to_big_with_shape = resample_img(
small_img,
target_affine=compat.get_affine(bigger_img),
target_shape=bigger_img.shape)
# resample using 3D affine and no target shape
small_to_big_without_shape_3D_affine = resample_img(
small_img,
target_affine=compat.get_affine(bigger_img)[:3, :3])
# resample using 4D affine and no target shape
small_to_big_without_shape = resample_img(
small_img,
target_affine=compat.get_affine(bigger_img))
# The first 2 should pass
assert_almost_equal(l2_norm(small_data),
l2_norm(small_to_big_with_shape.get_data()))
assert_almost_equal(l2_norm(small_data),
l2_norm(small_to_big_without_shape_3D_affine.get_data()))
# After correcting decision tree for 4x4 affine given + no target shape
# from "use initial shape" to "calculate minimal bounding box respecting
# the affine anchor and the data"
assert_almost_equal(l2_norm(small_data),
l2_norm(small_to_big_without_shape.get_data()))
assert_array_equal(small_to_big_without_shape.shape,
small_data_4D_affine[:3, -1] + np.array(small_img.shape))
def test_raises_upon_3x3_affine_and_no_shape():
img = Nifti1Image(np.zeros([8, 9, 10]),
affine=np.eye(4))
exception = ValueError
message = ("Given target shape without anchor "
"vector: Affine shape should be \(4, 4\) and "
"not \(3, 3\)")
testing.assert_raises_regex(
exception, message,
resample_img, img, target_affine=np.eye(3) * 2,
target_shape=(10, 10, 10))
def test_3x3_affine_bbox():
# Test that the bounding-box is properly computed when
# transforming with a negative affine component
# This is specifically to test for a change in behavior between
# scipy < 0.18 and scipy >= 0.18, which is an interaction between
# offset and a diagonal affine
image = np.ones((20, 30))
source_affine = np.eye(4)
# Give the affine an offset
source_affine[:2, 3] = np.array([96, 64])
# We need to turn this data into a nibabel image
img = Nifti1Image(image[:, :, np.newaxis], affine=source_affine)
target_affine_3x3 = np.eye(3) * 2
# One negative axes
target_affine_3x3[1] *= -1
img_3d_affine = resample_img(img, target_affine=target_affine_3x3)
# If the bounding box is computed wrong, the image will be only
# zeros
np.testing.assert_allclose(img_3d_affine.get_data().max(), image.max())
def test_raises_bbox_error_if_data_outside_box():
# Make some cases which should raise exceptions
# original image
data = np.zeros([8, 9, 10])
affine = np.eye(4)
affine_offset = np.array([1, 1, 1])
affine[:3, 3] = affine_offset
img = Nifti1Image(data, affine)
# some axis flipping affines
axis_flips = np.array(list(map(np.diag,
[[-1, 1, 1, 1],
[1, -1, 1, 1],
[1, 1, -1, 1],
[-1, -1, 1, 1],
[-1, 1, -1, 1],
[1, -1, -1, 1]])))
# some in plane 90 degree rotations base on these
# (by permuting two lines)
af = axis_flips
rotations = np.array([af[0][[1, 0, 2, 3]],
af[0][[2, 1, 0, 3]],
af[1][[1, 0, 2, 3]],
af[1][[0, 2, 1, 3]],
af[2][[2, 1, 0, 3]],
af[2][[0, 2, 1, 3]]])
new_affines = np.concatenate([axis_flips,
rotations])
new_offset = np.array([0., 0., 0.])
new_affines[:, :3, 3] = new_offset[np.newaxis, :]
for new_affine in new_affines:
exception = BoundingBoxError
message = ("The field of view given "
"by the target affine does "
"not contain any of the data")
testing.assert_raises_regex(
exception, message,
resample_img, img, target_affine=new_affine)
def test_resampling_result_axis_permutation():
# Transform real data using easily checkable transformations
# For now: axis permutations
# create a cuboid full of deterministic data, padded with one
# voxel thickness of zeros
core_shape = (3, 5, 4)
core_data = np.arange(np.prod(core_shape)).reshape(core_shape)
full_data_shape = np.array(core_shape) + 2
full_data = np.zeros(full_data_shape)
full_data[[slice(1, 1 + s) for s in core_shape]] = core_data
source_img = Nifti1Image(full_data, np.eye(4))
axis_permutations = [[0, 1, 2],
[1, 0, 2],
[2, 1, 0],
[0, 2, 1]]
# check 3x3 transformation matrix
for ap in axis_permutations:
target_affine = np.eye(3)[ap]
resampled_img = resample_img(source_img,
target_affine=target_affine)
resampled_data = resampled_img.get_data()
what_resampled_data_should_be = full_data.transpose(ap)
assert_array_almost_equal(resampled_data,
what_resampled_data_should_be)
# check 4x4 transformation matrix
offset = np.array([-2, 1, -3])
for ap in axis_permutations:
target_affine = np.eye(4)
target_affine[:3, :3] = np.eye(3)[ap]
target_affine[:3, 3] = offset
resampled_img = resample_img(source_img,
target_affine=target_affine)
resampled_data = resampled_img.get_data()
offset_cropping = np.vstack([-offset[ap][np.newaxis, :],
np.zeros([1, 3])]
).T.ravel().astype(int)
what_resampled_data_should_be = pad(full_data.transpose(ap),
*list(offset_cropping))
assert_array_almost_equal(resampled_data,
what_resampled_data_should_be)
def test_resampling_nan():
# Test that when the data has NaNs they do not propagate to the
# whole image
for core_shape in [(3, 5, 4), (3, 5, 4, 2)]:
# create deterministic data, padded with one
# voxel thickness of zeros
core_data = np.arange(np.prod(core_shape)
).reshape(core_shape).astype(np.float)
# Introduce a nan
core_data[2, 2:4, 1] = np.nan
full_data_shape = np.array(core_shape) + 2
full_data = np.zeros(full_data_shape)
full_data[[slice(1, 1 + s) for s in core_shape]] = core_data
source_img = Nifti1Image(full_data, np.eye(4))
# Transform real data using easily checkable transformations
# For now: axis permutations
axis_permutation = [0, 1, 2]
# check 3x3 transformation matrix
target_affine = np.eye(3)[axis_permutation]
resampled_img = testing.assert_warns(
RuntimeWarning, resample_img, source_img,
target_affine=target_affine)
resampled_data = resampled_img.get_data()
if full_data.ndim == 4:
axis_permutation.append(3)
what_resampled_data_should_be = full_data.transpose(axis_permutation)
non_nan = np.isfinite(what_resampled_data_should_be)
# Check that the input data hasn't been modified:
assert_false(np.all(non_nan))
# Check that for finite value resampling works without problems
assert_array_almost_equal(resampled_data[non_nan],
what_resampled_data_should_be[non_nan])
# Check that what was not finite is still not finite
assert_false(np.any(np.isfinite(
resampled_data[np.logical_not(non_nan)])))
# Test with an actual resampling, in the case of a bigish hole
# This checks the extrapolation mechanism: if we don't do any
# extrapolation before resampling, the hole creates big
# artefacts
data = 10 * np.ones((10, 10, 10))
data[4:6, 4:6, 4:6] = np.nan
source_img = Nifti1Image(data, 2 * np.eye(4))
resampled_img = testing.assert_warns(
RuntimeWarning, resample_img, source_img,
target_affine=np.eye(4))
resampled_data = resampled_img.get_data()
np.testing.assert_allclose(10, resampled_data[np.isfinite(resampled_data)])
def test_resample_to_img():
# Testing resample to img function
rand_gen = np.random.RandomState(0)
shape = (6, 3, 6, 3)
data = rand_gen.random_sample(shape)
source_affine = np.eye(4)
source_img = Nifti1Image(data, source_affine)
target_affine = 2 * source_affine
target_img = Nifti1Image(data, target_affine)
result_img = resample_to_img(source_img, target_img,
interpolation='nearest')
downsampled = data[::2, ::2, ::2, ...]
x, y, z = downsampled.shape[:3]
np.testing.assert_almost_equal(downsampled,
result_img.get_data()[:x, :y, :z, ...])
def test_reorder_img():
# We need to test on a square array, as rotation does not change
# shape, whereas reordering does.
shape = (5, 5, 5, 2, 2)
rng = np.random.RandomState(42)
data = rng.rand(*shape)
affine = np.eye(4)
affine[:3, -1] = 0.5 * np.array(shape[:3])
ref_img = Nifti1Image(data, affine)
# Test with purely positive matrices and compare to a rotation
for theta, phi in np.random.randint(4, size=(5, 2)):
rot = rotation(theta * np.pi / 2, phi * np.pi / 2)
rot[np.abs(rot) < 0.001] = 0
rot[rot > 0.9] = 1
rot[rot < -0.9] = 1
b = 0.5 * np.array(shape[:3])
new_affine = from_matrix_vector(rot, b)
rot_img = resample_img(ref_img, target_affine=new_affine)
np.testing.assert_array_equal(compat.get_affine(rot_img), new_affine)
np.testing.assert_array_equal(rot_img.get_data().shape, shape)
reordered_img = reorder_img(rot_img)
np.testing.assert_array_equal(compat.get_affine(reordered_img)[:3, :3],
np.eye(3))
np.testing.assert_almost_equal(reordered_img.get_data(),
data)
# Create a non-diagonal affine, and check that we raise a sensible
# exception
affine[1, 0] = 0.1
ref_img = Nifti1Image(data, affine)
testing.assert_raises_regex(ValueError, 'Cannot reorder the axes',
reorder_img, ref_img)
# Test that no exception is raised when resample='continuous'
reorder_img(ref_img, resample='continuous')
# Test that resample args gets passed to resample_img
interpolation = 'nearest'
reordered_img = reorder_img(ref_img, resample=interpolation)
resampled_img = resample_img(ref_img,
target_affine=compat.get_affine(reordered_img),
interpolation=interpolation)
np.testing.assert_array_equal(reordered_img.get_data(),
resampled_img.get_data())
# Make sure invalid resample argument is included in the error message
interpolation = 'an_invalid_interpolation'
pattern = "interpolation must be either.+{0}".format(interpolation)
testing.assert_raises_regex(ValueError, pattern,
reorder_img, ref_img,
resample=interpolation)
# Test flipping an axis
data = rng.rand(*shape)
for i in (0, 1, 2):
# Make a diagonal affine with a negative axis, and check that
# can be reordered, also vary the shape
shape = (i + 1, i + 2, 3 - i)
affine = np.eye(4)
affine[i, i] *= -1
img = Nifti1Image(data, affine)
orig_img = copy.copy(img)
#x, y, z = img.get_world_coords()
#sample = img.values_in_world(x, y, z)
img2 = reorder_img(img)
# Check that img has not been changed
np.testing.assert_array_equal(compat.get_affine(img),
compat.get_affine(orig_img))
np.testing.assert_array_equal(img.get_data(),
orig_img.get_data())
# Test that the affine is indeed diagonal:
np.testing.assert_array_equal(compat.get_affine(img2)[:3, :3],
np.diag(np.diag(
compat.get_affine(img2)[:3, :3])))
assert_true(np.all(np.diag(compat.get_affine(img2)) >= 0))
def test_reorder_img_non_native_endianness():
def _get_resampled_img(dtype):
data = np.ones((10, 10, 10), dtype=dtype)
data[3:7, 3:7, 3:7] = 2
affine = np.eye(4)
theta = math.pi / 6.
c = math.cos(theta)
s = math.sin(theta)
affine = np.array([[1, 0, 0, 0],
[0, c, -s, 0],
[0, s, c, 0],
[0, 0, 0, 1]])
img = Nifti1Image(data, affine)
return resample_img(img, target_affine=np.eye(4))
img_1 = _get_resampled_img('<f8')
img_2 = _get_resampled_img('>f8')
np.testing.assert_equal(img_1.get_data(), img_2.get_data())
def test_coord_transform_trivial():
sform = np.eye(4)
x = np.random.random((10,))
y = np.random.random((10,))
z = np.random.random((10,))
x_, y_, z_ = coord_transform(x, y, z, sform)
np.testing.assert_array_equal(x, x_)
np.testing.assert_array_equal(y, y_)
np.testing.assert_array_equal(z, z_)
sform[:, -1] = 1
x_, y_, z_ = coord_transform(x, y, z, sform)
np.testing.assert_array_equal(x + 1, x_)
np.testing.assert_array_equal(y + 1, y_)
np.testing.assert_array_equal(z + 1, z_)
# Test the output in case of one item array
x, y, z = x[:1], y[:1], z[:1]
x_, y_, z_ = coord_transform(x, y, z, sform)
np.testing.assert_array_equal(x + 1, x_)
np.testing.assert_array_equal(y + 1, y_)
np.testing.assert_array_equal(z + 1, z_)
# Test the output in case of simple items
x, y, z = x[0], y[0], z[0]
x_, y_, z_ = coord_transform(x, y, z, sform)
np.testing.assert_array_equal(x + 1, x_)
np.testing.assert_array_equal(y + 1, y_)
np.testing.assert_array_equal(z + 1, z_)
def test_resample_img_segmentation_fault():
if os.environ.get('APPVEYOR') == 'True':
raise SkipTest('This test too slow (7-8 minutes) on AppVeyor')
# see https://github.com/nilearn/nilearn/issues/346
shape_in = (64, 64, 64)
aff_in = np.diag([2., 2., 2., 1.])
aff_out = np.diag([3., 3., 3., 1.])
# fourth_dim = 1024 works fine but for 1025 creates a segmentation
# fault with scipy < 0.14.1
fourth_dim = 1025
try:
data = np.ones(shape_in + (fourth_dim, ), dtype=np.float64)
except MemoryError:
# This can happen on AppVeyor and for 32-bit Python on Windows
raise SkipTest('Not enough RAM to run this test')
img_in = Nifti1Image(data, aff_in)
resample_img(img_in,
target_affine=aff_out,
interpolation='nearest')
def test_resampling_with_int_types_no_crash():
affine = np.eye(4)
data = np.zeros((2, 2, 2))
for dtype in [np.int, np.int8, np.int16, np.int32, np.int64,
np.uint, np.uint8, np.uint16, np.uint32, np.uint64,
np.float32, np.float64, np.float, '>i8', '<i8']:
img = Nifti1Image(data.astype(dtype), affine)
resample_img(img, target_affine=2. * affine)
| []
| []
| [
"APPVEYOR"
]
| [] | ["APPVEYOR"] | python | 1 | 0 | |
synapse/app/_base.py | # -*- coding: utf-8 -*-
# Copyright 2017 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import logging
import signal
import sys
import traceback
import psutil
from daemonize import Daemonize
from twisted.internet import defer, error, reactor
from twisted.protocols.tls import TLSMemoryBIOFactory
import synapse
from synapse.app import check_bind_error
from synapse.crypto import context_factory
from synapse.util import PreserveLoggingContext
from synapse.util.async_helpers import Linearizer
from synapse.util.rlimit import change_resource_limit
from synapse.util.versionstring import get_version_string
logger = logging.getLogger(__name__)
_sighup_callbacks = []
def register_sighup(func):
"""
Register a function to be called when a SIGHUP occurs.
Args:
func (function): Function to be called when sent a SIGHUP signal.
Will be called with a single argument, the homeserver.
"""
_sighup_callbacks.append(func)
def start_worker_reactor(appname, config):
""" Run the reactor in the main process
Daemonizes if necessary, and then configures some resources, before starting
the reactor. Pulls configuration from the 'worker' settings in 'config'.
Args:
appname (str): application name which will be sent to syslog
config (synapse.config.Config): config object
"""
logger = logging.getLogger(config.worker_app)
start_reactor(
appname,
soft_file_limit=config.soft_file_limit,
gc_thresholds=config.gc_thresholds,
pid_file=config.worker_pid_file,
daemonize=config.worker_daemonize,
cpu_affinity=config.worker_cpu_affinity,
print_pidfile=config.print_pidfile,
logger=logger,
)
def start_reactor(
appname,
soft_file_limit,
gc_thresholds,
pid_file,
daemonize,
cpu_affinity,
print_pidfile,
logger,
):
""" Run the reactor in the main process
Daemonizes if necessary, and then configures some resources, before starting
the reactor
Args:
appname (str): application name which will be sent to syslog
soft_file_limit (int):
gc_thresholds:
pid_file (str): name of pid file to write to if daemonize is True
daemonize (bool): true to run the reactor in a background process
cpu_affinity (int|None): cpu affinity mask
print_pidfile (bool): whether to print the pid file, if daemonize is True
logger (logging.Logger): logger instance to pass to Daemonize
"""
install_dns_limiter(reactor)
def run():
# make sure that we run the reactor with the sentinel log context,
# otherwise other PreserveLoggingContext instances will get confused
# and complain when they see the logcontext arbitrarily swapping
# between the sentinel and `run` logcontexts.
with PreserveLoggingContext():
logger.info("Running")
if cpu_affinity is not None:
# Turn the bitmask into bits, reverse it so we go from 0 up
mask_to_bits = bin(cpu_affinity)[2:][::-1]
cpus = []
cpu_num = 0
for i in mask_to_bits:
if i == "1":
cpus.append(cpu_num)
cpu_num += 1
p = psutil.Process()
p.cpu_affinity(cpus)
change_resource_limit(soft_file_limit)
if gc_thresholds:
gc.set_threshold(*gc_thresholds)
reactor.run()
if daemonize:
if print_pidfile:
print(pid_file)
daemon = Daemonize(
app=appname,
pid=pid_file,
action=run,
auto_close_fds=False,
verbose=True,
logger=logger,
)
daemon.start()
else:
run()
def quit_with_error(error_string):
message_lines = error_string.split("\n")
line_length = max([len(l) for l in message_lines if len(l) < 80]) + 2
sys.stderr.write("*" * line_length + '\n')
for line in message_lines:
sys.stderr.write(" %s\n" % (line.rstrip(),))
sys.stderr.write("*" * line_length + '\n')
sys.exit(1)
def listen_metrics(bind_addresses, port):
"""
Start Prometheus metrics server.
"""
from synapse.metrics import RegistryProxy
from prometheus_client import start_http_server
for host in bind_addresses:
logger.info("Starting metrics listener on %s:%d", host, port)
start_http_server(port, addr=host, registry=RegistryProxy)
def listen_tcp(bind_addresses, port, factory, reactor=reactor, backlog=50):
"""
Create a TCP socket for a port and several addresses
Returns:
list[twisted.internet.tcp.Port]: listening for TCP connections
"""
r = []
for address in bind_addresses:
try:
r.append(
reactor.listenTCP(
port,
factory,
backlog,
address
)
)
except error.CannotListenError as e:
check_bind_error(e, address, bind_addresses)
return r
def listen_ssl(
bind_addresses, port, factory, context_factory, reactor=reactor, backlog=50
):
"""
Create an TLS-over-TCP socket for a port and several addresses
Returns:
list of twisted.internet.tcp.Port listening for TLS connections
"""
r = []
for address in bind_addresses:
try:
r.append(
reactor.listenSSL(
port,
factory,
context_factory,
backlog,
address
)
)
except error.CannotListenError as e:
check_bind_error(e, address, bind_addresses)
return r
def refresh_certificate(hs):
"""
Refresh the TLS certificates that Synapse is using by re-reading them from
disk and updating the TLS context factories to use them.
"""
if not hs.config.has_tls_listener():
# attempt to reload the certs for the good of the tls_fingerprints
hs.config.read_certificate_from_disk(require_cert_and_key=False)
return
hs.config.read_certificate_from_disk(require_cert_and_key=True)
hs.tls_server_context_factory = context_factory.ServerContextFactory(hs.config)
if hs._listening_services:
logger.info("Updating context factories...")
for i in hs._listening_services:
# When you listenSSL, it doesn't make an SSL port but a TCP one with
# a TLS wrapping factory around the factory you actually want to get
# requests. This factory attribute is public but missing from
# Twisted's documentation.
if isinstance(i.factory, TLSMemoryBIOFactory):
addr = i.getHost()
logger.info(
"Replacing TLS context factory on [%s]:%i", addr.host, addr.port,
)
# We want to replace TLS factories with a new one, with the new
# TLS configuration. We do this by reaching in and pulling out
# the wrappedFactory, and then re-wrapping it.
i.factory = TLSMemoryBIOFactory(
hs.tls_server_context_factory,
False,
i.factory.wrappedFactory
)
logger.info("Context factories updated.")
def start(hs, listeners=None):
"""
Start a Synapse server or worker.
Args:
hs (synapse.server.HomeServer)
listeners (list[dict]): Listener configuration ('listeners' in homeserver.yaml)
"""
try:
# Set up the SIGHUP machinery.
if hasattr(signal, "SIGHUP"):
def handle_sighup(*args, **kwargs):
for i in _sighup_callbacks:
i(hs)
signal.signal(signal.SIGHUP, handle_sighup)
register_sighup(refresh_certificate)
# Load the certificate from disk.
refresh_certificate(hs)
# It is now safe to start your Synapse.
hs.start_listening(listeners)
hs.get_datastore().start_profiling()
setup_sentry(hs)
except Exception:
traceback.print_exc(file=sys.stderr)
reactor = hs.get_reactor()
if reactor.running:
reactor.stop()
sys.exit(1)
def setup_sentry(hs):
"""Enable sentry integration, if enabled in configuration
Args:
hs (synapse.server.HomeServer)
"""
if not hs.config.sentry_enabled:
return
import sentry_sdk
sentry_sdk.init(
dsn=hs.config.sentry_dsn,
release=get_version_string(synapse),
)
# We set some default tags that give some context to this instance
with sentry_sdk.configure_scope() as scope:
scope.set_tag("matrix_server_name", hs.config.server_name)
app = hs.config.worker_app if hs.config.worker_app else "synapse.app.homeserver"
name = hs.config.worker_name if hs.config.worker_name else "master"
scope.set_tag("worker_app", app)
scope.set_tag("worker_name", name)
def install_dns_limiter(reactor, max_dns_requests_in_flight=100):
"""Replaces the resolver with one that limits the number of in flight DNS
requests.
This is to workaround https://twistedmatrix.com/trac/ticket/9620, where we
can run out of file descriptors and infinite loop if we attempt to do too
many DNS queries at once
"""
new_resolver = _LimitedHostnameResolver(
reactor.nameResolver, max_dns_requests_in_flight,
)
reactor.installNameResolver(new_resolver)
class _LimitedHostnameResolver(object):
"""Wraps a IHostnameResolver, limiting the number of in-flight DNS lookups.
"""
def __init__(self, resolver, max_dns_requests_in_flight):
self._resolver = resolver
self._limiter = Linearizer(
name="dns_client_limiter", max_count=max_dns_requests_in_flight,
)
def resolveHostName(self, resolutionReceiver, hostName, portNumber=0,
addressTypes=None, transportSemantics='TCP'):
# Note this is happening deep within the reactor, so we don't need to
# worry about log contexts.
# We need this function to return `resolutionReceiver` so we do all the
# actual logic involving deferreds in a separate function.
self._resolve(
resolutionReceiver, hostName, portNumber,
addressTypes, transportSemantics,
)
return resolutionReceiver
@defer.inlineCallbacks
def _resolve(self, resolutionReceiver, hostName, portNumber=0,
addressTypes=None, transportSemantics='TCP'):
with (yield self._limiter.queue(())):
# resolveHostName doesn't return a Deferred, so we need to hook into
# the receiver interface to get told when resolution has finished.
deferred = defer.Deferred()
receiver = _DeferredResolutionReceiver(resolutionReceiver, deferred)
self._resolver.resolveHostName(
receiver, hostName, portNumber,
addressTypes, transportSemantics,
)
yield deferred
class _DeferredResolutionReceiver(object):
"""Wraps a IResolutionReceiver and simply resolves the given deferred when
resolution is complete
"""
def __init__(self, receiver, deferred):
self._receiver = receiver
self._deferred = deferred
def resolutionBegan(self, resolutionInProgress):
self._receiver.resolutionBegan(resolutionInProgress)
def addressResolved(self, address):
self._receiver.addressResolved(address)
def resolutionComplete(self):
self._deferred.callback(())
self._receiver.resolutionComplete()
| []
| []
| []
| [] | [] | python | null | null | null |
docs/conf.py | # This file is execfile()d with the current directory set to its containing dir.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
import shutil
# -- Path setup --------------------------------------------------------------
__location__ = os.path.dirname(__file__)
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.join(__location__, "../src"))
# -- Run sphinx-apidoc -------------------------------------------------------
# This hack is necessary since RTD does not issue `sphinx-apidoc` before running
# `sphinx-build -b html . _build/html`. See Issue:
# https://github.com/readthedocs/readthedocs.org/issues/1139
# DON'T FORGET: Check the box "Install your project inside a virtualenv using
# setup.py install" in the RTD Advanced Settings.
# Additionally it helps us to avoid running apidoc manually
try: # for Sphinx >= 1.7
from sphinx.ext import apidoc
except ImportError:
from sphinx import apidoc
output_dir = os.path.join(__location__, "api")
module_dir = os.path.join(__location__, "../src/streaming_stats")
try:
shutil.rmtree(output_dir)
except FileNotFoundError:
pass
try:
import sphinx
cmd_line = f"sphinx-apidoc --implicit-namespaces -f -o {output_dir} {module_dir}"
args = cmd_line.split(" ")
if tuple(sphinx.__version__.split(".")) >= ("1", "7"):
# This is a rudimentary parse_version to avoid external dependencies
args = args[1:]
apidoc.main(args)
except Exception as e:
print("Running `sphinx-apidoc` failed!\n{}".format(e))
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.intersphinx",
"sphinx.ext.todo",
"sphinx.ext.autosummary",
"sphinx.ext.viewcode",
"sphinx.ext.coverage",
"sphinx.ext.doctest",
"sphinx.ext.ifconfig",
"sphinx.ext.mathjax",
"sphinx.ext.napoleon",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# Configure AutoStructify
# https://recommonmark.readthedocs.io/en/latest/auto_structify.html
def setup(app):
from recommonmark.transform import AutoStructify
params = {
"enable_auto_toc_tree": True,
"auto_toc_tree_section": "Contents",
"auto_toc_maxdepth": 2,
"enable_eval_rst": True,
"enable_math": True,
"enable_inline_math": True,
}
app.add_config_value("recommonmark_config", params, True)
app.add_transform(AutoStructify)
# Enable markdown
extensions.append("recommonmark")
# The suffix of source filenames.
source_suffix = [".rst", ".md"]
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "streaming_stats"
copyright = "2022, Will Fitzgerald"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# version: The short X.Y version.
# release: The full version, including alpha/beta/rc tags.
# If you don’t need the separation provided between version and release,
# just set them both to the same value.
try:
from streaming_stats import __version__ as version
except ImportError:
version = ""
if not version or version.lower() == "unknown":
version = os.getenv("READTHEDOCS_VERSION", "unknown") # automatically set by RTD
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store", ".venv"]
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If this is True, todo emits a warning for each TODO entries. The default is False.
todo_emit_warnings = True
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "alabaster"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
"sidebar_width": "300px",
"page_width": "1200px"
}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = ""
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = "streaming_stats-doc"
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ("letterpaper" or "a4paper").
# "papersize": "letterpaper",
# The font size ("10pt", "11pt" or "12pt").
# "pointsize": "10pt",
# Additional stuff for the LaTeX preamble.
# "preamble": "",
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
("index", "user_guide.tex", "streaming_stats Documentation", "Will Fitzgerald", "manual")
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = ""
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- External mapping --------------------------------------------------------
python_version = ".".join(map(str, sys.version_info[0:2]))
intersphinx_mapping = {
"sphinx": ("https://www.sphinx-doc.org/en/master", None),
"python": ("https://docs.python.org/" + python_version, None),
"matplotlib": ("https://matplotlib.org", None),
"numpy": ("https://numpy.org/doc/stable", None),
"sklearn": ("https://scikit-learn.org/stable", None),
"pandas": ("https://pandas.pydata.org/pandas-docs/stable", None),
"scipy": ("https://docs.scipy.org/doc/scipy/reference", None),
"setuptools": ("https://setuptools.pypa.io/en/stable/", None),
"pyscaffold": ("https://pyscaffold.org/en/stable", None),
}
print(f"loading configurations for {project} {version} ...", file=sys.stderr) | []
| []
| [
"READTHEDOCS_VERSION"
]
| [] | ["READTHEDOCS_VERSION"] | python | 1 | 0 | |
drone/tello/step6/main.go | package main
import (
"fmt"
"io"
"os/exec"
"runtime"
"time"
"os"
"sync/atomic"
"gobot.io/x/gobot"
"gobot.io/x/gobot/platforms/dji/tello"
"gobot.io/x/gobot/platforms/joystick"
)
var drone = tello.NewDriver("8888")
type pair struct {
x float64
y float64
}
var leftX, leftY, rightX, rightY atomic.Value
const offset = 32767.0
func main() {
// configLocation will get set at runtime based on OS
var configLocation string
switch runtime.GOOS {
case "darwin":
configLocation = fmt.Sprintf("%s/src/gobot.io/x/gobot/platforms/joystick/configs/dualshock3.json", os.Getenv("GOPATH"))
case "linux":
configLocation = "dualshock3"
default:
fmt.Sprintf("Unsupported OS: %s", runtime.GOOS)
}
var joystickAdaptor = joystick.NewAdaptor()
var stick = joystick.NewDriver(joystickAdaptor, configLocation)
var currentFlightData *tello.FlightData
work := func() {
leftX.Store(float64(0.0))
leftY.Store(float64(0.0))
rightX.Store(float64(0.0))
rightY.Store(float64(0.0))
configureStickEvents(stick)
mplayer := exec.Command("mplayer", "-fps", "25", "-")
mplayerIn, _ := mplayer.StdinPipe()
configureVideoEvents(mplayerIn)
if err := mplayer.Start(); err != nil {
fmt.Println(err)
return
}
drone.On(tello.FlightDataEvent, func(data interface{}) {
fd := data.(*tello.FlightData)
currentFlightData = fd
})
gobot.Every(1*time.Second, func() {
printFlightData(currentFlightData)
})
gobot.Every(50*time.Millisecond, func() {
rightStick := getRightStick()
switch {
case rightStick.y < -10:
drone.Forward(tello.ValidatePitch(rightStick.y, offset))
case rightStick.y > 10:
drone.Backward(tello.ValidatePitch(rightStick.y, offset))
default:
drone.Forward(0)
}
switch {
case rightStick.x > 10:
drone.Right(tello.ValidatePitch(rightStick.x, offset))
case rightStick.x < -10:
drone.Left(tello.ValidatePitch(rightStick.x, offset))
default:
drone.Right(0)
}
})
gobot.Every(50*time.Millisecond, func() {
leftStick := getLeftStick()
switch {
case leftStick.y < -10:
drone.Up(tello.ValidatePitch(leftStick.y, offset))
case leftStick.y > 10:
drone.Down(tello.ValidatePitch(leftStick.y, offset))
default:
drone.Up(0)
}
switch {
case leftStick.x > 20:
drone.Clockwise(tello.ValidatePitch(leftStick.x, offset))
case leftStick.x < -20:
drone.CounterClockwise(tello.ValidatePitch(leftStick.x, offset))
default:
drone.Clockwise(0)
}
})
}
robot := gobot.NewRobot("tello",
[]gobot.Connection{joystickAdaptor},
[]gobot.Device{drone, stick},
work,
)
robot.Start()
}
func configureVideoEvents(mplayerIn io.WriteCloser) {
drone.On(tello.ConnectedEvent, func(data interface{}) {
fmt.Println("Connected")
drone.StartVideo()
drone.SetVideoEncoderRate(tello.VideoBitRateAuto)
gobot.Every(100*time.Millisecond, func() {
drone.StartVideo()
})
})
drone.On(tello.VideoFrameEvent, func(data interface{}) {
pkt := data.([]byte)
if _, err := mplayerIn.Write(pkt); err != nil {
fmt.Println(err)
}
})
}
func configureStickEvents(stick *joystick.Driver) {
stick.On(joystick.TrianglePress, func(data interface{}) {
fmt.Println("taking off...")
drone.TakeOff()
})
stick.On(joystick.XPress, func(data interface{}) {
fmt.Println("landing...")
drone.Land()
})
stick.On(joystick.UpPress, func(data interface{}) {
fmt.Println("FrontFlip")
drone.FrontFlip()
})
stick.On(joystick.DownPress, func(data interface{}) {
fmt.Println("BackFlip")
drone.BackFlip()
})
stick.On(joystick.RightPress, func(data interface{}) {
fmt.Println("RightFlip")
drone.RightFlip()
})
stick.On(joystick.LeftPress, func(data interface{}) {
fmt.Println("LeftFlip")
drone.LeftFlip()
})
stick.On(joystick.LeftX, func(data interface{}) {
val := float64(data.(int16))
leftX.Store(val)
})
stick.On(joystick.LeftY, func(data interface{}) {
val := float64(data.(int16))
leftY.Store(val)
})
stick.On(joystick.RightX, func(data interface{}) {
val := float64(data.(int16))
rightX.Store(val)
})
stick.On(joystick.RightY, func(data interface{}) {
val := float64(data.(int16))
rightY.Store(val)
})
}
func printFlightData(d *tello.FlightData) {
if d.BatteryLow {
fmt.Printf(" -- Battery low: %d%% --\n", d.BatteryPercentage)
}
displayData := `
Height: %d
Ground Speed: %d
Light Strength: %d
`
fmt.Printf(displayData, d.Height, d.GroundSpeed, d.LightStrength)
}
func getLeftStick() pair {
s := pair{x: 0, y: 0}
s.x = leftX.Load().(float64)
s.y = leftY.Load().(float64)
return s
}
func getRightStick() pair {
s := pair{x: 0, y: 0}
s.x = rightX.Load().(float64)
s.y = rightY.Load().(float64)
return s
}
| [
"\"GOPATH\""
]
| []
| [
"GOPATH"
]
| [] | ["GOPATH"] | go | 1 | 0 | |
test_ms.py | import numpy as np
import torch
import torch.optim as optim
import torch.nn as nn
from torch.autograd import Variable
import time
import re
import os
import sys
import cv2
import bdcn
from datasets.dataset import Data
import argparse
import cfg
def test(model, args):
test_root = cfg.config_test[args.dataset]['data_root']
test_lst = cfg.config_test[args.dataset]['data_lst']
test_name_lst = os.path.join(test_root, 'voc_valtest.txt')
if 'Multicue' in args.dataset:
test_lst = test_lst % args.k
test_name_lst = os.path.join(test_root, 'test%d_id.txt' % args.k)
mean_bgr = np.array(cfg.config_test[args.dataset]['mean_bgr'])
test_img = Data(test_root, test_lst, mean_bgr=mean_bgr, scale=[0.5, 1, 1.5])
testloader = torch.utils.data.DataLoader(
test_img, batch_size=1, shuffle=False, num_workers=8)
nm = np.loadtxt(test_name_lst, dtype=str)
assert len(testloader) == len(nm)
save_dir = args.res_dir
if not os.path.exists(save_dir):
os.mkdir(save_dir)
if args.cuda:
model.cuda()
model.eval()
data_iter = iter(testloader)
iter_per_epoch = len(testloader)
start_time = time.time()
for i, (ms_data, label) in enumerate(testloader):
ms_fuse = np.zeros((label.size()[2], label.size()[3]))
for data in ms_data:
if args.cuda:
data = data.cuda()
data = Variable(data, volatile=True)
out = model(data)
fuse = torch.sigmoid(out[-1]).cpu().data.numpy()[0, 0, :, :]
fuse = cv2.resize(fuse, (label.size()[3], label.size()[2]), interpolation=cv2.INTER_LINEAR)
ms_fuse += fuse
ms_fuse /= len(ms_data)
if not os.path.exists(os.path.join(save_dir, 'fuse')):
os.mkdir(os.path.join(save_dir, 'fuse'))
cv2.imwrite(os.path.join(save_dir, 'fuse', '%s.jpg' % nm[i]), 255-ms_fuse*255)
print('Overall Time use: ', time.time() - start_time)
def main():
import time
print(time.localtime())
args = parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
model = bdcn.BDCN()
model.load_state_dict(torch.load('%s' % (args.model)))
test(model, args)
def parse_args():
parser = argparse.ArgumentParser('test BDCN')
parser.add_argument('-d', '--dataset', type=str, choices=cfg.config_test.keys(), default='bsds500', help='The dataset to train')
parser.add_argument('-c', '--cuda', action='store_true', help='whether use gpu to train network')
parser.add_argument('-g', '--gpu', type=str, default='0', help='the gpu id to train net')
parser.add_argument('-m', '--model', type=str, default='params/bdcn_40000.pth', help='the model to test')
parser.add_argument('--res-dir', type=str, default='result', help='the dir to store result')
parser.add_argument('-k', type=int, default=1, help='the k-th split set of multicue')
return parser.parse_args()
if __name__ == '__main__':
main()
| []
| []
| [
"CUDA_VISIBLE_DEVICES"
]
| [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
tools/releasetool/releasetool.py | #
# LC7 Release Tool
#
########################################################################################################################
from __future__ import print_function
import sys
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
import os
import argparse
import json
try:
import boto
import boto.s3.connection
except ImportError:
eprint("Please install boto. `pip install boto`")
sys.exit(1)
#try:
# import xmltodict
#except ImportError:
# alert("Please install xmltodict. `pip install xmltodict`")
# sys.exit(1)
import platform
import hashlib
import subprocess
import urlparse
import shlex
import errno
import datetime
import fnmatch
import distutils.spawn
import shutil
import zipfile
import s3engine
## COLORS ##
try:
import colorama
except ImportError:
eprint("Please install colorama. `pip install colorama`")
sys.exit(1)
from colorama import Fore, Back, Style
colorama.init()
########################################################################################################################
## LC7 ENVIRONMENTS ##
VERSIONFILE = r"..\..\lc7\include\appversion.h"
MANIFESTS = r"..\..\dist"
LINKS_BUCKET = r"s3://installers.lc7/"
ENVS = {
"windows": {
"release32": {
"suffix": ".exe",
"version_string": r"$$VERSION_NUMBER$$ Win32",
"installerdir": r"win32",
"root": r"..\..\build_win32\dist\RelWithDebInfo",
"plugins": r"..\..\build_win32\dist\RelWithDebInfo\lcplugins",
"output": r"..\..\build_win32\output\RelWithDebInfo_Win32",
"exclude": "*.pdb;Thumbs.db;*.lib;*.exp;*.PreARM;*.manifest",
"https_installers": "https://s3.amazonaws.com/installers.lc7/lc7/win32/release",
"s3_installers": "s3://installers.lc7/lc7/win32/release",
"https_updates": "https://s3.amazonaws.com/updates.lc7/lc7/win32/release",
"s3_updates": "s3://updates.lc7/lc7/win32/release",
"https_debugs": "https://s3.amazonaws.com/debugs.lc7/lc7/win32/release",
"s3_debugs": "s3://debugs.lc7/lc7/win32/release",
"https_plugins": "https://s3.amazonaws.com/updates.lc7/lc7/lc7/win32/release/lcplugins",
"s3_plugins": "s3://updates.lc7/lc7/win32/release/lcplugins",
"download_prefixes": [ "win32", "win32/release" ],
"buildcommand": [
"$$MSBUILD$$",
"..\\..\\build_win32\\L0phtcrack 7.sln",
"/t:Build",
"/p:Configuration=RelWithDebInfo",
"/p:Platform=Win32",
],
"rebuildcommand": [
"$$MSBUILD$$",
"..\\..\\build_win32\\L0phtcrack 7.sln",
"/t:Rebuild",
"/p:Configuration=RelWithDebInfo",
"/p:Platform=Win32"
],
"makeinstaller": [
r"$$NSISDIR$$\makensis.exe",
r"/DVERSION_STRING=$$VERSION_STRING$$",
r"/DVERSION_NUMBER=$$VERSION_NUMBER$$.$$VERSION_DATE$$$$VERSION_TIME$$",
r"/DINPUTDIR=$$INPUTDIR$$",
r"/DOUTFILE=$$OUTFILE$$",
r"/V4",
r"setup.nsi"
],
"makeupdate": [
r"$$NSISDIR$$\makensis.exe",
r"/DVERSION_STRING=$$VERSION_STRING$$",
r"/DVERSION_NUMBER=$$VERSION_NUMBER$$.$$VERSION_DATE$$$$VERSION_TIME$$",
r"/DINPUTDIR=$$INPUTDIR$$",
r"/DOUTFILE=$$OUTFILE$$",
r"/DFULL=$$FULL$$",
r"/V4",
r"update.nsi"
],
"signtool": [
r"signtool",
r"sign",
r"/t",
r"http://timestamp.digicert.com",
r"/a",
r"$$EXECUTABLE$$"
],
"signfilepatterns": [ "*.exe", "*.dll" ]
},
"beta32": {
"suffix": ".exe",
"version_string": r"$$VERSION_NUMBER$$ Win32 BETA $$VERSION_DATE$$$$VERSION_TIME$$",
"installerdir": r"win32",
"root": r"..\..\build_win32\dist\Beta",
"plugins": r"..\..\build_win32\dist\Beta\lcplugins",
"output": r"..\..\build_win32\output\Beta_Win32",
"exclude": "*.pdb;Thumbs.db;*.lib;*.exp;*.PreARM;*.manifest",
"https_installers": "https://s3.amazonaws.com/installers.lc7/lc7/win32/beta",
"s3_installers": "s3://installers.lc7/lc7/win32/beta",
"https_updates": "https://s3.amazonaws.com/updates.lc7/lc7/win32/beta",
"s3_updates": "s3://updates.lc7/lc7/win32/beta",
"https_debugs": "https://s3.amazonaws.com/debugs.lc7/lc7/win32/beta",
"s3_debugs": "s3://debugs.lc7/lc7/win32/beta",
"https_plugins": "https://s3.amazonaws.com/updates.lc7/lc7/lc7/win32/beta/lcplugins",
"s3_plugins": "s3://updates.lc7/lc7/win32/beta/lcplugins",
"download_prefixes": [ "win32/beta" ],
"buildcommand": [
"$$MSBUILD$$",
"..\\..\\build_win32\\L0phtcrack 7.sln",
"/t:Build",
"/p:Configuration=Beta",
"/p:Platform=Win32"
],
"rebuildcommand": [
"$$MSBUILD$$",
"..\\..\\build_win32\\L0phtcrack 7.sln",
"/t:Rebuild",
"/p:Configuration=Beta",
"/p:Platform=Win32"
],
"makeinstaller": [
r"$$NSISDIR$$\makensis.exe",
r"/DVERSION_STRING=$$VERSION_STRING$$",
r"/DVERSION_NUMBER=$$VERSION_NUMBER$$.$$VERSION_DATE$$$$VERSION_TIME$$",
r"/DINPUTDIR=$$INPUTDIR$$",
r"/DOUTFILE=$$OUTFILE$$",
r"/V4",
r"setup.nsi"
],
"makeupdate": [
r"$$NSISDIR$$\makensis.exe",
r"/DVERSION_STRING=$$VERSION_STRING$$",
r"/DVERSION_NUMBER=$$VERSION_NUMBER$$.$$VERSION_DATE$$$$VERSION_TIME$$",
r"/DINPUTDIR=$$INPUTDIR$$",
r"/DOUTFILE=$$OUTFILE$$",
r"/DFULL=$$FULL$$",
r"/V4",
r"update.nsi"
],
"signtool": [
r"signtool",
r"sign",
r"/t",
r"http://timestamp.digicert.com",
r"/a",
r"$$EXECUTABLE$$"
],
"signfilepatterns": [ "*.exe", "*.dll" ]
},
"release64": {
"suffix": ".exe",
"version_string": r"$$VERSION_NUMBER$$ Win64",
"installerdir": r"win64",
"root": r"..\..\build_win64\dist\RelWithDebInfo",
"plugins": r"..\..\build_win64\dist\RelWithDebInfo\lcplugins",
"output": r"..\..\build_win64\output\RelWithDebInfo_Win64",
"exclude": "*.pdb;Thumbs.db;*.lib;*.exp;*.PreARM;*.manifest",
"https_installers": "https://s3.amazonaws.com/installers.lc7/lc7/win64/release",
"s3_installers": "s3://installers.lc7/lc7/win64/release",
"https_updates": "https://s3.amazonaws.com/updates.lc7/lc7/win64/release",
"s3_updates": "s3://updates.lc7/lc7/win64/release",
"https_debugs": "https://s3.amazonaws.com/debugs.lc7/lc7/win64/release",
"s3_debugs": "s3://debugs.lc7/lc7/win64/release",
"https_plugins": "https://s3.amazonaws.com/updates.lc7/lc7/lc7/win64/release/lcplugins",
"s3_plugins": "s3://updates.lc7/lc7/win64/release/lcplugins",
"download_prefixes": [ "win64", "win64/release" ],
"buildcommand": [
"$$MSBUILD$$",
"..\\..\\build_win64\\L0phtcrack 7.sln",
"/t:Build",
"/p:Configuration=RelWithDebInfo",
"/p:Platform=x64"
],
"rebuildcommand": [
"$$MSBUILD$$",
"..\\..\\build_win64\\L0phtcrack 7.sln",
"/t:Rebuild",
"/p:Configuration=RelWithDebInfo",
"/p:Platform=x64"
],
"makeinstaller": [
r"$$NSISDIR$$\makensis.exe",
r"/DVERSION_STRING=$$VERSION_STRING$$",
r"/DVERSION_NUMBER=$$VERSION_NUMBER$$.$$VERSION_DATE$$$$VERSION_TIME$$",
r"/DINPUTDIR=$$INPUTDIR$$",
r"/DOUTFILE=$$OUTFILE$$",
r"/V4",
r"setup.nsi"
],
"makeupdate": [
r"$$NSISDIR$$\makensis.exe",
r"/DVERSION_STRING=$$VERSION_STRING$$",
r"/DVERSION_NUMBER=$$VERSION_NUMBER$$.$$VERSION_DATE$$$$VERSION_TIME$$",
r"/DINPUTDIR=$$INPUTDIR$$",
r"/DOUTFILE=$$OUTFILE$$",
r"/DFULL=$$FULL$$",
r"/V4",
r"update.nsi"
],
"signtool": [
r"signtool",
r"sign",
r"/t",
r"http://timestamp.digicert.com",
r"/a",
r"$$EXECUTABLE$$"
],
"signfilepatterns": [ "*.exe", "*.dll" ]
},
"beta64": {
"suffix": ".exe",
"version_string": r"$$VERSION_NUMBER$$ Win64 BETA $$VERSION_DATE$$$$VERSION_TIME$$",
"installerdir": r"win64",
"root": r"..\..\build_win64\dist\Beta",
"plugins": r"..\..\build_win64\dist\Beta\lcplugins",
"output": r"..\..\build_win64\output\Beta_Win64",
"exclude": "*.pdb;Thumbs.db;*.lib;*.exp;*.PreARM;*.manifest",
"https_installers": "https://s3.amazonaws.com/installers.lc7/lc7/win64/beta",
"s3_installers": "s3://installers.lc7/lc7/win64/beta",
"https_updates": "https://s3.amazonaws.com/updates.lc7/lc7/win64/beta",
"s3_updates": "s3://updates.lc7/lc7/win64/beta",
"https_debugs": "https://s3.amazonaws.com/debugs.lc7/lc7/win64/beta",
"s3_debugs": "s3://debugs.lc7/lc7/win64/beta",
"https_plugins": "https://s3.amazonaws.com/updates.lc7/lc7/lc7/win64/beta/lcplugins",
"s3_plugins": "s3://updates.lc7/lc7/win64/beta/lcplugins",
"download_prefixes": [ "win64/beta" ],
"buildcommand": [
"$$MSBUILD$$",
"..\\..\\build_win64\\L0phtcrack 7.sln",
"/t:Build",
"/p:Configuration=Beta",
"/p:Platform=x64"
],
"rebuildcommand": [
"$$MSBUILD$$",
"..\\..\\build_win64\\L0phtcrack 7.sln",
"/t:Rebuild",
"/p:Configuration=Beta",
"/p:Platform=x64"
],
"makeinstaller": [
r"$$NSISDIR$$\makensis.exe",
r"/DVERSION_STRING=$$VERSION_STRING$$",
r"/DVERSION_NUMBER=$$VERSION_NUMBER$$.$$VERSION_DATE$$$$VERSION_TIME$$",
r"/DINPUTDIR=$$INPUTDIR$$",
r"/DOUTFILE=$$OUTFILE$$",
r"/V4",
r"setup.nsi"
],
"makeupdate": [
r"$$NSISDIR$$\makensis.exe",
r"/DVERSION_STRING=$$VERSION_STRING$$",
r"/DVERSION_NUMBER=$$VERSION_NUMBER$$.$$VERSION_DATE$$$$VERSION_TIME$$",
r"/DINPUTDIR=$$INPUTDIR$$",
r"/DOUTFILE=$$OUTFILE$$",
r"/DFULL=$$FULL$$",
r"/V4",
r"update.nsi"
],
"signtool": [
r"signtool",
r"sign",
r"/t",
r"http://timestamp.digicert.com",
r"/a",
r"$$EXECUTABLE$$"
],
"signfilepatterns": [ "*.exe", "*.dll" ]
}
}
}
# Detect environment
if platform.architecture()[1]=="WindowsPE":
print("Selecting Windows environment")
BUILDS = ENVS["windows"]
# Get NSIS directory
NSISDIR = os.getenv("NSISDIR",None)
if not NSISDIR or not os.path.exists(NSISDIR):
if os.path.exists("C:\\Program Files (x86)\\NSIS"):
NSISDIR = "C:\\Program Files (x86)\\NSIS"
elif os.path.exists("D:\\NSIS"):
NSISDIR = "D:\\NSIS"
else:
print("Unknown NSIS location. Specify NSISDIR environment variable.")
sys.exit(1)
# Get MSBUILD location
MSBUILD = os.getenv("MSBUILD", None)
if not MSBUILD or not os.path.exists(MSBUILD):
MSBUILD = distutils.spawn.find_executable("MSBuild.exe")
if not MSBUILD:
MSBUILD = r"C:\Program Files (x86)\MSBuild\12.0\Bin\amd64\MSBuild.exe"
if not os.path.exists(MSBUILD):
print("Unknown MSBuild location. Specify MSBUILD environment variable.")
sys.exit(1)
else:
BUILDS = None
print("Unknown environment!")
sys.exit(1)
# Get boto variables
LC7_RELEASE_ACCESS_KEY_ID = os.getenv("LC7_RELEASE_ACCESS_KEY_ID",None)
LC7_RELEASE_SECRET_ACCESS_KEY = os.getenv("LC7_RELEASE_SECRET_ACCESS_KEY",None)
########################################################################################################################
## S3 Utilities ##
def get_s3_connection():
conn = boto.s3.connection.S3Connection(LC7_RELEASE_ACCESS_KEY_ID, LC7_RELEASE_SECRET_ACCESS_KEY, is_secure=False)
if not conn:
print("Unable to connect to S3")
sys.exit(3)
return conn
def get_bucket_path(s3url):
conn = get_s3_connection()
parts = urlparse.urlparse(s3url)
if parts.scheme!="s3":
print("Invalid s3url")
sys.exit(3)
bucket = parts.netloc
path = parts.path.strip("/")
bucket = conn.get_bucket(bucket)
if not bucket:
print("Unable to get bucket")
sys.exit(4)
return bucket, path
def s3status(cur, max):
done = (cur*20)/max
notdone = 20-done
sys.stdout.write("\r["+("#"*done)+("."*notdone)+"] %.2f%%" % (cur*100.0/max))
sys.stdout.flush()
########################################################################################################################
## LC7 Utilities ##
def find_matching_files(rootdir, wildcard):
matches = []
for root, dirnames, filenames in os.walk(rootdir):
for filename in fnmatch.filter(filenames, wildcard):
matches.append(os.path.join(root, filename))
return matches
def get_app_version():
f = open(VERSIONFILE, "r")
versionnumber = None
versiondate = None
versiontime = None
for l in f.readlines():
if "VERSION_NUMBER" in l:
versionnumber = l.split()[2].strip("\"")
if "VERSION_DATE" in l:
versiondate = l.split()[2].strip("\"")
if "VERSION_TIME" in l:
versiontime = l.split()[2].strip("\"")
if "END_EXTERNAL" in l:
break
if versionnumber is None:
print("Unable to get current app version")
sys.exit(5)
return (versionnumber, versiondate, versiontime)
def compare_version(a, b):
an=[int(n) for n in a.split(".")]
bn=[int(n) for n in b.split(".")]
if len(an)!=len(bn):
print("Version number length inconsistent!")
sys.exit(7)
p=0
while an[p] == bn[p]:
p+=1
if p==len(an):
return 0
if an[p]>bn[p]:
return 1
return -1
def parse_replacements(s, **kwargs):
for kw in kwargs.iteritems():
s = s.replace("$$"+kw[0].upper()+"$$",kw[1])
return s
def parse_command(cmd, **kwargs):
subcmd=[]
for arg in cmd:
arg = parse_replacements(arg, **kwargs)
subcmd.append(arg)
return subcmd
def get_digests(infile):
md5 = hashlib.md5()
md5.update(open(infile,"rb").read())
md5digest = md5.hexdigest()
sha256 = hashlib.sha256()
sha256.update(open(infile,"rb").read())
sha256digest = sha256.hexdigest()
return (md5digest, sha256digest)
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else: raise
def sign_one_executable(signtool, signfile):
cmd = parse_command(signtool, executable=signfile)
retval = subprocess.call(cmd)
if retval != 0:
print("Code signing error")
sys.exit(retval)
def sign_all_executables(inputdir, signtool, signfilepatterns):
for sfpat in signfilepatterns:
signfiles = find_matching_files(inputdir, sfpat)
for sf in signfiles:
# skip dump dlls
if sf.endswith("lc7dump.dll") or sf.endswith("lc7dump64.dll"):
continue
sign_one_executable(signtool, sf)
########################################################################################################################
## LC7 COMMANDS ##
def cmd_pushplugin(args):
pass
def cmd_pushplugins(args):
pass
def cmd_bumpversion(args):
# Get in-code version number
(version_number, version_date, version_time) = get_app_version()
print("Current Version Number: "+version_number)
print("Current Version Date: "+version_date)
print("Current Version Time: "+version_time)
# Bump minor version number if requested
if args.newversion:
version_parts = [int(n) for n in version_number.split(".")]
version_parts[1] += 1
version_parts[2] = 0
version_number = ".".join([str(n) for n in version_parts])
else:
version_parts = [int(n) for n in version_number.split(".")]
version_parts[2] += 1
version_number = ".".join([str(n) for n in version_parts])
now = datetime.datetime.now()
version_date = now.strftime("%Y%m%d")
version_time = now.strftime("%H%M%S")
# Replace version in appversion.h
infile = open(VERSIONFILE, "r")
outlines = []
replacing = False
for l in infile.readlines():
if replacing:
if "// END_EXTERNAL" in l:
outlines.append(l)
replacing = False
elif "// EXTERNAL" in l:
outlines.append(l)
outlines.append("#define VERSION_NUMBER \""+version_number+"\"\n")
outlines.append("#define VERSION_DATE \""+version_date+"\"\n")
outlines.append("#define VERSION_TIME \""+version_time+"\"\n")
replacing = True
else:
outlines.append(l)
infile.close()
outfile = open(VERSIONFILE, "w")
outfile.writelines(outlines)
outfile.close()
# Replace version in system plugin manifests
manifests = find_matching_files(MANIFESTS, "manifest.json")
for m in manifests:
mf = json.loads(open(m, "r").read())
if mf['isSystemLibrary']:
print("Updating '"+m+"'")
mf['internalVersion'] += 1
mf['displayVersion'] = version_number+" ("+version_date+version_time+")"
mf['releaseDate'] = now.isoformat()
else:
print("Skipping '"+m+"'")
open(m, "w").write(json.dumps(mf, indent=4, sort_keys=True))
# Done
print("New Version Number: "+version_number)
print("New Version Date: "+version_date)
print("New Version Time: "+version_time)
print("Build required for changes to take effect.")
def cmd_build(args):
global BUILDS
# Get build
if args.build not in BUILDS:
print("Unknown build!")
sys.exit(1)
ENV = BUILDS[args.build]
if args.rebuild:
cmd = ENV["rebuildcommand"]
else:
cmd = ENV["buildcommand"]
cmd = parse_command(cmd, msbuild=MSBUILD)
retval = subprocess.call(cmd)
if retval != 0:
print("Build error")
sys.exit(retval)
def cmd_buildinstaller(args):
global BUILDS
# Get build
if args.build not in BUILDS:
print("Unknown build!")
sys.exit(1)
ENV = BUILDS[args.build]
# Get in-code version number
(version_number, version_date, version_time) = get_app_version()
version_string = parse_replacements(ENV["version_string"], version_number=version_number, version_date=version_date, version_time=version_time)
print("App Version: "+version_string)
# Get currently released version number
if LC7_RELEASE_ACCESS_KEY_ID is not None and LC7_RELEASE_SECRET_ACCESS_KEY is not None:
bucket, path = get_bucket_path(ENV["s3_installers"])
key = bucket.get_key(path+"/current.json")
if key is not None and key.exists():
current = json.loads(key.get_contents_as_string())
print("Current Version: "+str(current['version']))
# Verify we aren't building the current version without force option (check we updated version numbers)
if not args.force and current["version"] == version_string:
print("You need to bump the version number and rebuild.")
sys.exit(6)
else:
print("Current Version: none")
inputdir = os.path.abspath(ENV["root"])
print("Input directory: "+inputdir)
#### Sign all executables
sign_all_executables(inputdir, ENV["signtool"], ENV["signfilepatterns"])
#### Build full installer
outname = "lc7setup_v"+version_string.replace(" ", "_")+ENV["suffix"]
outfile = os.path.join(os.path.abspath(ENV["output"]), outname)
print("Installer file: "+outfile)
cmd = parse_command(ENV["makeinstaller"], nsisdir=NSISDIR, version_string=version_string, version_number=version_number, version_date=version_date, version_time=version_time, inputdir=inputdir, outfile=outfile)
#print("Command: "+str(cmd))
# Make output directory
mkdir_p(os.path.abspath(ENV["output"]))
# Switch to environment directory
os.chdir("installer")
os.chdir(ENV["installerdir"])
retval = subprocess.call(cmd)
if retval != 0:
print("Error building installer")
sys.exit(retval)
os.chdir("..")
os.chdir("..")
sign_one_executable(ENV["signtool"], outfile)
#### Build Updater
update_outname = "lc7update_v"+version_string.replace(" ", "_")+ENV["suffix"]
update_outfile = os.path.join(os.path.abspath(ENV["output"]), update_outname)
# Build Partial Updater
print("Partial Update file: "+update_outfile)
cmd = parse_command(ENV["makeupdate"], full=("0" if args.partial else "1"), nsisdir=NSISDIR, version_string=version_string, version_number=version_number, version_date=version_date, version_time=version_time, inputdir=inputdir, outfile=update_outfile)
#print("Command: "+str(cmd))
# Make output directory
mkdir_p(os.path.abspath(ENV["output"]))
# Switch to environment directory
os.chdir("installer")
os.chdir(ENV["installerdir"])
retval = subprocess.call(cmd)
if retval != 0:
print("Error building updater")
sys.exit(retval)
os.chdir("..")
os.chdir("..")
sign_one_executable(ENV["signtool"], update_outfile)
# Write debug files
debug_outname = "lc7debug_v"+version_string.replace(" ", "_")+".zip"
debug_outfile = os.path.join(os.path.abspath(ENV["output"]), debug_outname)
print("Debug file: "+debug_outfile)
zf = zipfile.ZipFile(debug_outfile, "w")
for root, folders, files in os.walk(inputdir):
for f in files:
if f.lower().endswith(".pdb"):
zf.write(os.path.join(root, f), f)
zf.close()
print(Fore.CYAN)
print(os.path.basename(outfile))
(md5digest, sha256digest) = get_digests(outfile)
print("MD5 Digest: \t"+md5digest)
print("SHA256 Digest: \t"+sha256digest)
print("")
print(os.path.basename(update_outfile))
(md5digest, sha256digest) = get_digests(update_outfile)
print("MD5 Digest: \t"+md5digest)
print("SHA256 Digest: \t"+sha256digest)
print(Fore.RESET)
def cmd_pushinstaller(args):
# Get build
if args.build not in BUILDS:
print("Unknown build!")
sys.exit(1)
ENV = BUILDS[args.build]
# Get release notes
try:
releasenotes = open(args.releasenotes, "r").read()
except:
print("Missing release notes")
sys.exit(1)
# Get in-code version number
(version_number, version_date, version_time) = get_app_version()
version_string = parse_replacements(ENV["version_string"], version_number=version_number, version_date=version_date, version_time=version_time)
print("App Version: "+version_string)
###################### INSTALLER
# Find built executable
outname = "lc7setup_v"+version_string.replace(" ", "_")+ENV["suffix"]
outfile = os.path.join(os.path.abspath(ENV["output"]), outname)
print("Installer file: "+outfile)
if not os.path.exists(outfile):
print("Installer version "+version_string+" is not built, exiting.")
sys.exit(1)
(md5digest, sha256digest) = get_digests(outfile)
print("MD5 Digest: \t"+md5digest)
print("SHA256 Digest: \t"+sha256digest)
print("Release Notes:\n"+releasenotes)
# Upload release to S3
bucket, path = get_bucket_path(ENV["s3_installers"])
key = bucket.get_key(path+"/"+outname)
if not key:
key = bucket.new_key(path+"/"+outname)
#key.set_contents_from_filename(outfile, cb=s3status, num_cb=100)
s3 = s3engine.S3Engine(get_s3_connection(), progress_callback=s3engine.ConsoleProgressCallback())
s3.put(outfile, ENV["s3_installers"]+"/"+outname, parallel=True)
print(" Done.")
# Print URL
key.make_public()
installer_url = ENV["https_installers"]+"/"+outname
print(Fore.MAGENTA + "Installer URL:\n"+installer_url + Fore.RESET)
# Get currently released version number
currentkey = bucket.get_key(path+"/current.json")
if not currentkey:
currentkey = bucket.new_key(path+"/current.json")
current = {"version": version_string, "md5digest": md5digest, "sha256digest": sha256digest, "url": installer_url, "releasenotes": releasenotes }
currentkey.set_contents_from_string(json.dumps(current))
currentkey.make_public()
###################### UPDATER
# Find built executable
outname = "lc7update_v"+version_string.replace(" ", "_")+ENV["suffix"]
outfile = os.path.join(os.path.abspath(ENV["output"]), outname)
print("Updater file: "+outfile)
if not os.path.exists(outfile):
print("Updater version "+version_string+" is not built, exiting.")
sys.exit(1)
(md5digest, sha256digest) = get_digests(outfile)
print("MD5 Digest: \t"+md5digest)
print("SHA256 Digest: \t"+sha256digest)
print("Release Notes:\n"+releasenotes)
# Upload release to S3
bucket, path = get_bucket_path(ENV["s3_updates"])
key = bucket.get_key(path+"/"+outname)
if not key:
key = bucket.new_key(path+"/"+outname)
#key.set_contents_from_filename(outfile, cb=s3status, num_cb=100)
s3 = s3engine.S3Engine(get_s3_connection(), progress_callback=s3engine.ConsoleProgressCallback())
s3.put(outfile, ENV["s3_updates"]+"/"+outname, parallel=True)
print(" Done.")
# Print URL
key.make_public()
update_url = ENV["https_updates"]+"/"+outname
print(Fore.MAGENTA + "Update URL:\n"+update_url + Fore.RESET)
# Get currently released version number
currentkey = bucket.get_key(path+"/current.json")
if not currentkey:
currentkey = bucket.new_key(path+"/current.json")
current = {"version": version_string, "md5digest": md5digest, "sha256digest": sha256digest, "url": update_url, "releasenotes": releasenotes }
currentkey.set_contents_from_string(json.dumps(current))
currentkey.make_public()
###################### DEBUGS
# Find built zipfile
outname = "lc7debug_v"+version_string.replace(" ", "_")+".zip"
outfile = os.path.join(os.path.abspath(ENV["output"]), outname)
print("Debug file: "+outfile)
if not os.path.exists(outfile):
print("Debugs for version "+version_string+" are not built, exiting.")
sys.exit(1)
# Upload debugs to S3
bucket, path = get_bucket_path(ENV["s3_debugs"])
key = bucket.get_key(path+"/"+outname)
if not key:
key = bucket.new_key(path+"/"+outname)
#key.set_contents_from_filename(outfile, cb=s3status, num_cb=100)
s3 = s3engine.S3Engine(get_s3_connection(), progress_callback=s3engine.ConsoleProgressCallback())
s3.put(outfile, ENV["s3_debugs"]+"/"+outname, parallel=True)
print(" Done.")
# Print URL
# key.make_public()
debugs_url = ENV["https_debugs"]+"/"+outname
print("Debug URL:\n"+debugs_url)
# Get currently released version number
currentkey = bucket.get_key(path+"/current.json")
if not currentkey:
currentkey = bucket.new_key(path+"/current.json")
current = {"version": version_string, "url": debugs_url }
currentkey.set_contents_from_string(json.dumps(current))
def cmd_updatelinks(args):
global BUILDS
global LINKS_BUCKET
# Collect all download prefixes, and pair with links from current.json
download_prefixes={}
for build_key in BUILDS:
ENV = BUILDS[build_key]
bucket, path = get_bucket_path(ENV["s3_installers"])
key = bucket.get_key(path+"/current.json")
if key is not None and key.exists():
current = json.loads(key.get_contents_as_string())
for prefix in ENV["download_prefixes"]:
download_prefixes[prefix] = current["url"]
print("{0} -> {1}".format(prefix, download_prefixes[prefix]))
else:
print("No download for build '{0}'".format(build_key))
# Sort download prefixes from longest to shortest
keys = sorted(download_prefixes.keys(), key=len, reverse=True)
# Write routing rules out in order
rules = boto.s3.website.RoutingRules()
for key in keys:
newkey=download_prefixes[key]
if newkey.startswith("https://s3.amazonaws.com/"):
newkey=newkey[25:]
rules.add_rule(boto.s3.website.RoutingRule.when(key_prefix = key).then_redirect(
hostname = "s3.amazonaws.com",
protocol = "https",
http_redirect_code = 307,
replace_key = newkey
))
# Configure website
bucket, path = get_bucket_path(LINKS_BUCKET)
if bucket.configure_website(suffix = "index.html", error_key = "error.html", routing_rules = rules):
print("Links updated successfully.")
else:
print("There was an error updating the links.")
########################################################################################################################
## MAIN ENTRYPOINT ##
if __name__ == "__main__":
parser = argparse.ArgumentParser(prog=Fore.CYAN+sys.argv[0]+Fore.RESET, description=Fore.GREEN + "LC7 Release Tool" + Fore.RESET)
subparsers = parser.add_subparsers()
parser_bumpversion = subparsers.add_parser("bumpversion")
parser_bumpversion.add_argument("-n", "--newversion", action="store_true", help="new minor version number")
parser_bumpversion.set_defaults(func=cmd_bumpversion)
parser_build = subparsers.add_parser("build")
parser_build.add_argument("-r", "--rebuild", action="store_true")
parser_build.add_argument("build", type=str)
parser_build.set_defaults(func=cmd_build)
parser_buildinstaller = subparsers.add_parser("buildinstaller")
parser_buildinstaller.add_argument("-f", "--force", action="store_true")
parser_buildinstaller.add_argument("-p", "--partial", action="store_true", help="make update incremental/partial install")
parser_buildinstaller.add_argument("build", type=str)
parser_buildinstaller.set_defaults(func=cmd_buildinstaller)
if LC7_RELEASE_ACCESS_KEY_ID is not None and LC7_RELEASE_SECRET_ACCESS_KEY is not None:
parser_pushplugin = subparsers.add_parser("pushplugin")
parser_pushplugin.add_argument("name", type=str)
parser_pushplugin.set_defaults(func=cmd_pushplugin)
parser_pushplugins = subparsers.add_parser("pushplugins")
parser_pushplugins.set_defaults(func=cmd_pushplugins)
parser_pushinstaller = subparsers.add_parser("pushinstaller")
parser_pushinstaller.add_argument("build", type=str)
parser_pushinstaller.add_argument("releasenotes", type=str)
parser_pushinstaller.set_defaults(func=cmd_pushinstaller)
# parser_revert = subparsers.add_parser("revert")
# parser_revert.add_argument("build", type=str)
# parser_revert.add_argument("version", type=str)
# parser_revert.add_argument("releasenotes", type=str)
# parser_revert.set_defaults(func=cmd_revert)
parser_updatelinks = subparsers.add_parser("updatelinks")
parser_updatelinks.set_defaults(func=cmd_updatelinks)
args = parser.parse_args()
args.func(args)
| []
| []
| [
"NSISDIR",
"LC7_RELEASE_SECRET_ACCESS_KEY",
"LC7_RELEASE_ACCESS_KEY_ID",
"MSBUILD"
]
| [] | ["NSISDIR", "LC7_RELEASE_SECRET_ACCESS_KEY", "LC7_RELEASE_ACCESS_KEY_ID", "MSBUILD"] | python | 4 | 0 | |
ansible_runner/__main__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import ast
import pkg_resources
import threading
import argparse
import logging
import signal
import errno
import json
import stat
import os
import shutil
from contextlib import contextmanager
from uuid import uuid4
from yaml import safe_load
from ansible_runner import run
from ansible_runner import output
from ansible_runner.utils import dump_artifact, Bunch
from ansible_runner.runner import Runner
from ansible_runner.exceptions import AnsibleRunnerException
VERSION = pkg_resources.require("ansible_runner")[0].version
DEFAULT_ROLES_PATH = os.getenv('ANSIBLE_ROLES_PATH', None)
DEFAULT_RUNNER_BINARY = os.getenv('RUNNER_BINARY', None)
DEFAULT_RUNNER_PLAYBOOK = os.getenv('RUNNER_PLAYBOOK', None)
DEFAULT_RUNNER_ROLE = os.getenv('RUNNER_ROLE', None)
DEFAULT_RUNNER_MODULE = os.getenv('RUNNER_MODULE', None)
logger = logging.getLogger('ansible-runner')
@contextmanager
def role_manager(args):
if args.role:
role = {'name': args.role}
if args.role_vars:
role_vars = {}
for item in args.role_vars.split():
key, value = item.split('=')
try:
role_vars[key] = ast.literal_eval(value)
except Exception:
role_vars[key] = value
role['vars'] = role_vars
kwargs = Bunch(**args.__dict__)
kwargs.update(private_data_dir=args.private_data_dir,
json_mode=args.json,
ignore_logging=False,
rotate_artifacts=args.rotate_artifacts)
if args.artifact_dir:
kwargs.artifact_dir = args.artifact_dir
project_path = os.path.join(args.private_data_dir, 'project')
project_exists = os.path.exists(project_path)
env_path = os.path.join(args.private_data_dir, 'env')
env_exists = os.path.exists(env_path)
envvars_path = os.path.join(args.private_data_dir, 'env/envvars')
envvars_exists = os.path.exists(envvars_path)
if args.cmdline:
kwargs.cmdline = args.cmdline
playbook = None
tmpvars = None
play = [{'hosts': args.hosts if args.hosts is not None else "all",
'gather_facts': not args.role_skip_facts,
'roles': [role]}]
filename = str(uuid4().hex)
playbook = dump_artifact(json.dumps(play), project_path, filename)
kwargs.playbook = playbook
output.debug('using playbook file %s' % playbook)
if args.inventory:
inventory_file = os.path.join(args.private_data_dir, 'inventory', args.inventory)
if not os.path.exists(inventory_file):
raise AnsibleRunnerException('location specified by --inventory does not exist')
kwargs.inventory = inventory_file
output.debug('using inventory file %s' % inventory_file)
roles_path = args.roles_path or os.path.join(args.private_data_dir, 'roles')
roles_path = os.path.abspath(roles_path)
output.debug('setting ANSIBLE_ROLES_PATH to %s' % roles_path)
envvars = {}
if envvars_exists:
with open(envvars_path, 'rb') as f:
tmpvars = f.read()
new_envvars = safe_load(tmpvars)
if new_envvars:
envvars = new_envvars
envvars['ANSIBLE_ROLES_PATH'] = roles_path
kwargs.envvars = envvars
else:
kwargs = args
yield kwargs
if args.role:
if not project_exists and os.path.exists(project_path):
logger.debug('removing dynamically generated project folder')
shutil.rmtree(project_path)
elif playbook and os.path.isfile(playbook):
logger.debug('removing dynamically generated playbook')
os.remove(playbook)
# if a previous envvars existed in the private_data_dir,
# restore the original file contents
if tmpvars:
with open(envvars_path, 'wb') as f:
f.write(tmpvars)
elif not envvars_exists and os.path.exists(envvars_path):
logger.debug('removing dynamically generated envvars folder')
os.remove(envvars_path)
# since ansible-runner created the env folder, remove it
if not env_exists and os.path.exists(env_path):
logger.debug('removing dynamically generated env folder')
shutil.rmtree(env_path)
def main(sys_args=None):
parser = argparse.ArgumentParser(description='manage ansible execution')
parser.add_argument('--version', action='version', version=VERSION)
parser.add_argument('command', choices=['run', 'start', 'stop', 'is-alive'])
parser.add_argument('private_data_dir',
help='Base directory containing Runner metadata (project, inventory, etc')
group = parser.add_mutually_exclusive_group()
group.add_argument("-m", "--module", default=DEFAULT_RUNNER_MODULE,
help="Invoke an Ansible module directly without a playbook")
group.add_argument("-p", "--playbook", default=DEFAULT_RUNNER_PLAYBOOK,
help="The name of the playbook to execute")
group.add_argument("-r", "--role", default=DEFAULT_RUNNER_ROLE,
help="Invoke an Ansible role directly without a playbook")
parser.add_argument("-b", "--binary", default=DEFAULT_RUNNER_BINARY,
help="The full path to ansible[-playbook] binary")
parser.add_argument("--hosts",
help="Define the set of hosts to execute against")
parser.add_argument("-i", "--ident",
default=uuid4(),
help="An identifier that will be used when generating the"
"artifacts directory and can be used to uniquely identify a playbook run")
parser.add_argument("--rotate-artifacts",
default=0,
type=int,
help="Automatically clean up old artifact directories after a given number has been created, the default is 0 which disables rotation")
parser.add_argument("--roles-path", default=DEFAULT_ROLES_PATH,
help="Path to the Ansible roles directory")
parser.add_argument("--role-vars",
help="Variables to pass to the role at runtime")
parser.add_argument("--role-skip-facts", action="store_true", default=False,
help="Disable fact collection when executing a role directly")
parser.add_argument("--artifact-dir",
help="Optional Path for the artifact root directory, by default it is located inside the private data dir")
parser.add_argument("--inventory",
help="Override the default inventory location in private_data_dir")
parser.add_argument("-j", "--json", action="store_true",
help="Output the json event structure to stdout instead of Ansible output")
parser.add_argument("-v", action="count",
help="Increase the verbosity with multiple v's (up to 5) of the ansible-playbook output")
parser.add_argument("-q", "--quiet", action="store_true",
help="Disable all output")
parser.add_argument("--cmdline",
help="Command line options to pass to ansible-playbook at execution time")
parser.add_argument("--debug", action="store_true",
help="Enable Runner debug output logging")
parser.add_argument("--logfile",
help="Log output messages to a file")
parser.add_argument("-a", "--args", dest='module_args',
help="Module arguments")
args = parser.parse_args(sys_args)
output.configure()
# enable or disable debug mode
output.set_debug('enable' if args.debug else 'disable')
# set the output logfile
if args.logfile:
output.set_logfile(args.logfile)
output.debug('starting debug logging')
# get the absolute path for start since it is a daemon
args.private_data_dir = os.path.abspath(args.private_data_dir)
pidfile = os.path.join(args.private_data_dir, 'pid')
try:
os.makedirs(args.private_data_dir)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(args.private_data_dir):
pass
else:
raise
if args.command != 'run':
stderr_path = os.path.join(args.private_data_dir, 'daemon.log')
if not os.path.exists(stderr_path):
os.close(os.open(stderr_path, os.O_CREAT, stat.S_IRUSR | stat.S_IWUSR))
stderr = open(stderr_path, 'w+')
if args.command in ('start', 'run'):
if args.command == 'start':
import daemon
from daemon.pidfile import TimeoutPIDLockFile
context = daemon.DaemonContext(
pidfile=TimeoutPIDLockFile(pidfile),
stderr=stderr
)
else:
context = threading.Lock()
with context:
with role_manager(args) as args:
if args.inventory:
with open(args.inventory) as f:
inventory_data = f.read()
else:
inventory_data = None
run_options = dict(private_data_dir=args.private_data_dir,
ident=args.ident,
binary=args.binary,
playbook=args.playbook,
module=args.module,
module_args=args.module_args,
host_pattern=args.hosts,
verbosity=args.v,
quiet=args.quiet,
rotate_artifacts=args.rotate_artifacts,
ignore_logging=False,
json_mode=args.json,
inventory=inventory_data,
roles_path=[args.roles_path] if args.roles_path else None)
if args.cmdline:
run_options['cmdline'] = args.cmdline
res = run(**run_options)
return(res.rc)
try:
with open(pidfile, 'r') as f:
pid = int(f.readline())
except IOError:
return(1)
if args.command == 'stop':
Runner.handle_termination(pid)
return (0)
elif args.command == 'is-alive':
try:
os.kill(pid, signal.SIG_DFL)
return(0)
except OSError:
return(1)
| []
| []
| [
"RUNNER_PLAYBOOK",
"RUNNER_BINARY",
"RUNNER_ROLE",
"ANSIBLE_ROLES_PATH",
"RUNNER_MODULE"
]
| [] | ["RUNNER_PLAYBOOK", "RUNNER_BINARY", "RUNNER_ROLE", "ANSIBLE_ROLES_PATH", "RUNNER_MODULE"] | python | 5 | 0 | |
postgres_exporter_integration_test.go | // These are specialized integration tests. We only build them when we're doing
// a lot of additional work to keep the external docker environment they require
// working.
// +build integration
package main
import (
"os"
"testing"
. "gopkg.in/check.v1"
"database/sql"
"fmt"
_ "github.com/lib/pq"
"github.com/prometheus/client_golang/prometheus"
)
// Hook up gocheck into the "go test" runner.
func Test(t *testing.T) { TestingT(t) }
type IntegrationSuite struct {
e *Exporter
}
var _ = Suite(&IntegrationSuite{})
func (s *IntegrationSuite) SetUpSuite(c *C) {
dsn := os.Getenv("DATA_SOURCE_NAME")
c.Assert(dsn, Not(Equals), "")
exporter := NewExporter(dsn, "")
c.Assert(exporter, NotNil)
// Assign the exporter to the suite
s.e = exporter
prometheus.MustRegister(exporter)
}
// TODO: it would be nice if cu didn't mostly just recreate the scrape function
func (s *IntegrationSuite) TestAllNamespacesReturnResults(c *C) {
// Setup a dummy channel to consume metrics
ch := make(chan prometheus.Metric, 100)
go func() {
for range ch {
}
}()
// Open a database connection
db, err := sql.Open("postgres", s.e.dsn)
c.Assert(db, NotNil)
c.Assert(err, IsNil)
defer db.Close()
// Do a version update
err = s.e.checkMapVersions(ch, db)
c.Assert(err, IsNil)
err = querySettings(ch, db)
if !c.Check(err, Equals, nil) {
fmt.Println("## ERRORS FOUND")
fmt.Println(err)
}
// This should never happen in our test cases.
errMap := queryNamespaceMappings(ch, db, s.e.metricMap, s.e.queryOverrides)
if !c.Check(len(errMap), Equals, 0) {
fmt.Println("## NAMESPACE ERRORS FOUND")
for namespace, err := range errMap {
fmt.Println(namespace, ":", err)
}
}
}
// TestInvalidDsnDoesntCrash tests that specifying an invalid DSN doesn't crash
// the exporter. Related to https://github.com/wrouesnel/postgres_exporter/issues/93
// although not a replication of the scenario.
func (s *IntegrationSuite) TestInvalidDsnDoesntCrash(c *C) {
// Setup a dummy channel to consume metrics
ch := make(chan prometheus.Metric, 100)
go func() {
for range ch {
}
}()
// Send a bad DSN
exporter := NewExporter("invalid dsn", *queriesPath)
c.Assert(exporter, NotNil)
exporter.scrape(ch)
// Send a DSN to a non-listening port.
exporter = NewExporter("postgresql://nothing:[email protected]:1/nothing", *queriesPath)
c.Assert(exporter, NotNil)
exporter.scrape(ch)
}
// TestUnknownMetricParsingDoesntCrash deliberately deletes all the column maps out
// of an exporter to test that the default metric handling code can cope with unknown columns.
func (s *IntegrationSuite) TestUnknownMetricParsingDoesntCrash(c *C) {
// Setup a dummy channel to consume metrics
ch := make(chan prometheus.Metric, 100)
go func() {
for range ch {
}
}()
dsn := os.Getenv("DATA_SOURCE_NAME")
c.Assert(dsn, Not(Equals), "")
exporter := NewExporter(dsn, "")
c.Assert(exporter, NotNil)
// Convert the default maps into a list of empty maps.
emptyMaps := make(map[string]map[string]ColumnMapping, 0)
for k := range exporter.builtinMetricMaps {
emptyMaps[k] = map[string]ColumnMapping{}
}
exporter.builtinMetricMaps = emptyMaps
// scrape the exporter and make sure it works
exporter.scrape(ch)
}
| [
"\"DATA_SOURCE_NAME\"",
"\"DATA_SOURCE_NAME\""
]
| []
| [
"DATA_SOURCE_NAME"
]
| [] | ["DATA_SOURCE_NAME"] | go | 1 | 0 | |
backend/app/app/settings.py | """
Django settings for app project.
Generated by 'django-admin startproject' using Django 2.2.12.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '20ih0p+bc@=yj)i7sc@019em71ghf=s8ur+@4z-p7oep#0*!q6'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['localhost', '127.0.0.1', '[::1]', os.environ.get('HOST')]
# Application definition
INSTALLED_APPS = [
'channels',
'conference',
'rest_framework',
'corsheaders',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
'TEST': {
'NAME': os.path.join(BASE_DIR, 'db_test.sqlite3')
}
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
# Channels
ASGI_APPLICATION = 'app.routing.application'
CHANNEL_LAYERS = {
'default': {
'BACKEND': 'channels_redis.core.RedisChannelLayer',
'CONFIG': {
"hosts": [(os.environ.get('CHANNELS_REDIS_HOST'), 6379)],
},
},
}
# Allow all hosts
CORS_ORIGIN_ALLOW_ALL = True | []
| []
| [
"HOST",
"CHANNELS_REDIS_HOST"
]
| [] | ["HOST", "CHANNELS_REDIS_HOST"] | python | 2 | 0 | |
molecule/test/unit/command/conftest.py | # Copyright (c) 2015-2018 Cisco Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import os
import pytest
@pytest.fixture
def command_patched_ansible_create(mocker):
return mocker.patch("molecule.provisioner.ansible.Ansible.create")
@pytest.fixture
def command_driver_delegated_section_data():
x = {
"driver": {
"name": "delegated",
"options": {
"managed": False,
"login_cmd_template": "docker exec -ti {instance} bash",
"ansible_connection_options": {"ansible_connection": "docker"},
},
}
}
if "DOCKER_HOST" in os.environ:
x["driver"]["options"]["ansible_docker_extra_args"] = "-H={}".format(
os.environ["DOCKER_HOST"]
)
return x
@pytest.fixture
def command_driver_delegated_managed_section_data():
return {"driver": {"name": "delegated", "managed": True}}
| []
| []
| [
"DOCKER_HOST"
]
| [] | ["DOCKER_HOST"] | python | 1 | 0 | |
mlearner/__init__.py |
"""Jaime Sendra Berenguer-2020.
MLearner Machine Learning Library Extensions
Author:Jaime Sendra Berenguer<www.linkedin.com/in/jaisenbe>
License: MIT
Machine Learning for Data engineer
==================================
MLearner is Python module based on scikit-learn.
It is based on high-level libraries for data engineers.
See https://jaisenbe58r.github.io/MLearner/ for complete documentation.
"""
import logging
import os
logger = logging.getLogger(__name__)
logger.addHandler(logging.StreamHandler())
logger.setLevel(logging.INFO)
# PEP0440 compatible formatted version, see:
# https://www.python.org/dev/peps/pep-0440/
#
# Generic release markers:
# X.Y
# X.Y.Z # For bugfix releases
#
# Admissible pre-release markers:
# X.YaN # Alpha release
# X.YbN # Beta release
# X.YrcN # Release Candidate
# X.Y # Final release
#
# Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer.
# 'X.Y.dev0' is the canonical version of 'X.Y.dev'
#
__version__ = '0.2.8'
# On OSX, we can get a runtime error due to multiple OpenMP libraries loaded
# simultaneously. This can happen for instance when calling BLAS inside a
# prange. Setting the following environment variable allows multiple OpenMP
# libraries to be loaded. It should not degrade performances since we manually
# take care of potential over-subcription performance issues, in sections of
# the code where nested OpenMP loops can happen, by dynamically reconfiguring
# the inner OpenMP runtime to temporarily disable it while under the scope of
# the outer OpenMP parallel section.
os.environ.setdefault("KMP_DUPLICATE_LIB_OK", "True")
# Workaround issue discovered in intel-openmp 2019.5:
# https://github.com/ContinuumIO/anaconda-issues/issues/11294
os.environ.setdefault("KMP_INIT_AT_FORK", "FALSE")
__all__ = ["classifier", "data", "evaluation", "externals",
"feature_selection", "load", "images", "models", "neural", "preprocessing", "plotly",
"training", "utils", "stacking"]
| []
| []
| []
| [] | [] | python | 0 | 0 | |
setup.py | #!/usr/bin/env python3
# python setup.py sdist --format=zip,gztar
from setuptools import setup
import os
import sys
import platform
import imp
import argparse
with open('contrib/requirements/requirements.txt') as f:
requirements = f.read().splitlines()
with open('contrib/requirements/requirements-hw.txt') as f:
requirements_hw = f.read().splitlines()
version = imp.load_source('version', 'lib/version.py')
if sys.version_info[:3] < (3, 4, 0):
sys.exit("Error: Electrum-Zilla requires Python version >= 3.4.0...")
data_files = []
if platform.system() in ['Linux', 'FreeBSD', 'DragonFly']:
parser = argparse.ArgumentParser()
parser.add_argument('--root=', dest='root_path', metavar='dir', default='/')
opts, _ = parser.parse_known_args(sys.argv[1:])
usr_share = os.path.join(sys.prefix, "share")
icons_dirname = 'pixmaps'
if not os.access(opts.root_path + usr_share, os.W_OK) and \
not os.access(opts.root_path, os.W_OK):
icons_dirname = 'icons'
if 'XDG_DATA_HOME' in os.environ.keys():
usr_share = os.environ['XDG_DATA_HOME']
else:
usr_share = os.path.expanduser('~/.local/share')
data_files += [
(os.path.join(usr_share, 'applications/'), ['electrum.desktop']),
(os.path.join(usr_share, icons_dirname), ['icons/electrum.png'])
]
extras_require = {
'hardware': requirements_hw,
'fast': ['pycryptodomex'],
':python_version < "3.5"': ['typing>=3.0.0'],
}
extras_require['full'] = extras_require['hardware'] + extras_require['fast']
setup(
name="Electrum-Zilla",
version=version.ELECTRUM_VERSION,
install_requires=requirements,
extras_require=extras_require,
packages=[
'electrum',
'electrum_gui',
'electrum_gui.qt',
'electrum_plugins',
'electrum_plugins.audio_modem',
'electrum_plugins.cosigner_pool',
'electrum_plugins.email_requests',
'electrum_plugins.greenaddress_instant',
'electrum_plugins.hw_wallet',
'electrum_plugins.keepkey',
'electrum_plugins.labels',
'electrum_plugins.ledger',
'electrum_plugins.revealer',
'electrum_plugins.trezor',
'electrum_plugins.digitalbitbox',
'electrum_plugins.trustedcoin',
'electrum_plugins.virtualkeyboard',
],
package_dir={
'electrum': 'lib',
'electrum_gui': 'gui',
'electrum_plugins': 'plugins',
},
package_data={
'': ['*.txt', '*.json', '*.ttf', '*.otf'],
'electrum': [
'wordlist/*.txt',
'locale/*/LC_MESSAGES/electrum.mo',
],
},
scripts=['electrum'],
data_files=data_files,
description="Lightweight Bitcoin Wallet",
author="Thomas Voegtlin",
author_email="[email protected]",
license="MIT Licence",
url="https://electrum.org",
long_description="""Lightweight Bitcoin Wallet"""
)
| []
| []
| [
"XDG_DATA_HOME"
]
| [] | ["XDG_DATA_HOME"] | python | 1 | 0 | |
deeppavlov/evolve.py | """
Copyright 2017 Neural Networks and Deep Learning lab, MIPT
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
from pathlib import Path
import sys
import os
import json
from subprocess import Popen
import pandas as pd
from deeppavlov.core.common.errors import ConfigError
from deeppavlov.models.evolution.evolution_param_generator import ParamsEvolution
from deeppavlov.core.common.file import read_json, save_json
from deeppavlov.core.common.log import get_logger
from deeppavlov.core.commands.utils import set_deeppavlov_root, expand_path
log = get_logger(__name__)
parser = argparse.ArgumentParser()
parser.add_argument("config_path", help="path to a pipeline json config", type=str)
parser.add_argument('--key_main_model', help='key inserted in dictionary of main model in pipe', default="main")
parser.add_argument('--p_cross', help='probability of crossover', type=float, default=0.2)
parser.add_argument('--pow_cross', help='crossover power', type=float, default=0.1)
parser.add_argument('--p_mut', help='probability of mutation', type=float, default=1.)
parser.add_argument('--pow_mut', help='mutation power', type=float, default=0.1)
parser.add_argument('--p_size', help='population size', type=int, default=10)
parser.add_argument('--gpus', help='visible GPUs divided by comma <<,>>', default="-1")
parser.add_argument('--train_partition',
help='partition of splitted train file', default=1)
parser.add_argument('--start_from_population',
help='population number to start from. 0 means from scratch', default=0)
parser.add_argument('--path_to_population',
help='path to population to start from', default="")
parser.add_argument('--elitism_with_weights',
help='whether to save elite models with weights or without', action='store_true')
parser.add_argument('--iterations', help='Number of iterations', type=int, default=-1)
def find_config(pipeline_config_path: str):
if not Path(pipeline_config_path).is_file():
configs = [c for c in Path(__file__).parent.glob(f'configs/**/{pipeline_config_path}.json')
if str(c.with_suffix('')).endswith(pipeline_config_path)] # a simple way to not allow * and ?
if configs:
log.info(f"Interpreting '{pipeline_config_path}' as '{configs[0]}'")
pipeline_config_path = str(configs[0])
return pipeline_config_path
def main():
args = parser.parse_args()
pipeline_config_path = find_config(args.config_path)
key_main_model = args.key_main_model
population_size = args.p_size
gpus = [int(gpu) for gpu in args.gpus.split(",")]
train_partition = int(args.train_partition)
start_from_population = int(args.start_from_population)
path_to_population = args.path_to_population
elitism_with_weights = args.elitism_with_weights
iterations = int(args.iterations)
p_crossover = args.p_cross
pow_crossover = args.pow_cross
p_mutation = args.p_mut
pow_mutation = args.pow_mut
if os.environ.get("CUDA_VISIBLE_DEVICES") is None:
pass
else:
cvd = [int(gpu) for gpu in os.environ.get("CUDA_VISIBLE_DEVICES").split(",")]
if gpus == [-1]:
gpus = cvd
else:
try:
gpus = [cvd[gpu] for gpu in gpus]
except IndexError:
raise ConfigError("Can not use gpus `{}` with CUDA_VISIBLE_DEVICES='{}'".format(
",".join(map(str, gpus)), ",".join(map(str, cvd))
))
basic_params = read_json(pipeline_config_path)
log.info("Given basic params: {}\n".format(json.dumps(basic_params, indent=2)))
# Initialize evolution
evolution = ParamsEvolution(population_size=population_size,
p_crossover=p_crossover, crossover_power=pow_crossover,
p_mutation=p_mutation, mutation_power=pow_mutation,
key_main_model=key_main_model,
seed=42,
train_partition=train_partition,
elitism_with_weights=elitism_with_weights,
**basic_params)
considered_metrics = evolution.get_value_from_config(evolution.basic_config,
list(evolution.find_model_path(
evolution.basic_config, "metrics"))[0] + ["metrics"])
considered_metrics = [metric['name'] if isinstance(metric, dict) else metric for metric in considered_metrics]
log.info(considered_metrics)
evolve_metric = considered_metrics[0]
# Create table variable for gathering results
set_deeppavlov_root(evolution.basic_config)
expand_path(Path(evolution.get_value_from_config(
evolution.basic_config, evolution.main_model_path + ["save_path"]))).mkdir(parents=True, exist_ok=True)
result_file = expand_path(Path(evolution.get_value_from_config(evolution.basic_config,
evolution.main_model_path + ["save_path"])
).joinpath("result_table.csv"))
result_table_columns = []
result_table_dict = {}
for el in considered_metrics:
result_table_dict[el + "_valid"] = []
result_table_dict[el + "_test"] = []
result_table_columns.extend([el + "_valid", el + "_test"])
result_table_dict["params"] = []
result_table_columns.append("params")
if start_from_population == 0:
# if starting evolution from scratch
iters = 0
result_table = pd.DataFrame(result_table_dict)
# write down result table file
result_table.loc[:, result_table_columns].to_csv(result_file, index=False, sep='\t')
log.info("Iteration #{} starts".format(iters))
# randomly generate the first population
population = evolution.first_generation()
else:
# if starting evolution from already existing population
iters = start_from_population
log.info("Iteration #{} starts".format(iters))
population = []
for i in range(population_size):
population.append(read_json(expand_path(Path(path_to_population).joinpath(
"model_" + str(i)).joinpath("config.json"))))
population[i] = evolution.insert_value_or_dict_into_config(
population[i], evolution.main_model_path + ["save_path"],
str(Path(
evolution.get_value_from_config(evolution.basic_config, evolution.main_model_path + ["save_path"])
).joinpath(
"population_" + str(start_from_population)).joinpath(
"model_" + str(i)).joinpath(
"model")))
population[i] = evolution.insert_value_or_dict_into_config(
population[i], evolution.main_model_path + ["load_path"],
str(Path(
evolution.get_value_from_config(population[i], evolution.main_model_path + ["load_path"]))))
for path_id, path_ in enumerate(evolution.paths_to_fiton_dicts):
population[i] = evolution.insert_value_or_dict_into_config(
population[i], path_ + ["save_path"],
str(Path(evolution.get_value_from_config(evolution.basic_config,
evolution.main_model_path + ["save_path"])
).joinpath("population_" + str(iters)).joinpath("model_" + str(i)).joinpath(
"fitted_model_" + str(path_id))))
for path_id, path_ in enumerate(evolution.paths_to_fiton_dicts):
population[i] = evolution.insert_value_or_dict_into_config(
population[i], path_ + ["load_path"],
str(Path(evolution.get_value_from_config(
population[i], path_ + ["load_path"]))))
run_population(population, evolution, gpus)
population_scores = results_to_table(population, evolution, considered_metrics,
result_file, result_table_columns)[evolve_metric]
log.info("Population scores: {}".format(population_scores))
log.info("Iteration #{} was done".format(iters))
iters += 1
while True:
if iterations != -1 and start_from_population + iterations == iters:
log.info("End of evolution on iteration #{}".format(iters))
break
log.info("Iteration #{} starts".format(iters))
population = evolution.next_generation(population, population_scores, iters)
run_population(population, evolution, gpus)
population_scores = results_to_table(population, evolution, considered_metrics,
result_file, result_table_columns)[evolve_metric]
log.info("Population scores: {}".format(population_scores))
log.info("Iteration #{} was done".format(iters))
iters += 1
def run_population(population, evolution, gpus):
"""
Change save and load paths for obtained population, save config.json with model config,
run population via current python executor (with which evolve.py already run)
and on given devices (-1 means CPU, other integeres - visible for evolve.py GPUs)
Args:
population: list of dictionaries - configs of current population
evolution: ParamsEvolution
gpus: list of given devices (list of integers)
Returns:
None
"""
population_size = len(population)
for k in range(population_size // len(gpus) + 1):
procs = []
for j in range(len(gpus)):
i = k * len(gpus) + j
if i < population_size:
save_path = expand_path(Path(evolution.get_value_from_config(
population[i], evolution.main_model_path + ["save_path"])).parent)
save_path.mkdir(parents=True, exist_ok=True)
f_name = save_path.joinpath("config.json")
save_json(population[i], f_name)
with save_path.joinpath('out.txt').open('w', encoding='utf8') as outlog,\
save_path.joinpath('err.txt').open('w', encoding='utf8') as errlog:
env = dict(os.environ)
if len(gpus) > 1 or gpus[0] != -1:
env['CUDA_VISIBLE_DEVICES'] = str(gpus[j])
procs.append(Popen("{} -m deeppavlov train {}".format(sys.executable, str(f_name)),
shell=True, stdout=outlog, stderr=errlog, env=env))
for j, proc in enumerate(procs):
i = k * len(gpus) + j
log.info(f'Waiting on {i}th proc')
if proc.wait() != 0:
save_path = expand_path(Path(evolution.get_value_from_config(
population[i], evolution.main_model_path + ["save_path"])).parent)
with save_path.joinpath('err.txt').open(encoding='utf8') as errlog:
log.warning(f'Population {i} returned an error code {proc.returncode} and an error log:\n' +
errlog.read())
return None
def results_to_table(population, evolution, considered_metrics, result_file, result_table_columns):
population_size = len(population)
validate_best = evolution.get_value_from_config(evolution.basic_config,
list(evolution.find_model_path(
evolution.basic_config, "validate_best"))[0]
+ ["validate_best"])
test_best = evolution.get_value_from_config(evolution.basic_config,
list(evolution.find_model_path(
evolution.basic_config, "test_best"))[0]
+ ["test_best"])
if (not validate_best) and test_best:
log.info("Validate_best is set to False. Tuning parameters on test")
elif (not validate_best) and (not test_best):
raise ConfigError("Validate_best and test_best are set to False. Can not evolve.")
population_metrics = {}
for m in considered_metrics:
population_metrics[m] = []
for i in range(population_size):
with open(str(expand_path(Path(evolution.get_value_from_config(
population[i],
evolution.main_model_path + ["save_path"])).parent.joinpath("out.txt"))), "r", encoding='utf8') as fout:
reports_data = fout.read().splitlines()[-2:]
reports = []
for j in range(2):
try:
reports.append(json.loads(reports_data[j]))
except:
pass
val_results = {}
test_results = {}
for m in considered_metrics:
val_results[m] = None
test_results[m] = None
if len(reports) == 2 and "valid" in reports[0].keys() and "test" in reports[1].keys():
val_results = reports[0]["valid"]["metrics"]
test_results = reports[1]["test"]["metrics"]
elif len(reports) == 2 and "valid" in reports[0].keys() and "valid" in reports[1].keys():
val_results = reports[1]["valid"]["metrics"]
elif len(reports) == 2 and "test" in reports[0].keys() and "test" in reports[1].keys():
val_results = reports[1]["test"]["metrics"]
elif len(reports) == 2 and "train" in reports[0].keys() and "valid" in reports[1].keys():
val_results = reports[1]["valid"]["metrics"]
elif len(reports) == 2 and "train" in reports[0].keys() and "test" in reports[1].keys():
val_results = reports[1]["test"]["metrics"]
elif len(reports) == 2 and "train" in reports[0].keys() and "train" in reports[1].keys():
val_results = reports[1]["train"]["metrics"]
elif len(reports) == 1 and "valid" in reports[0].keys():
val_results = reports[0]["valid"]["metrics"]
elif len(reports) == 1 and "test" in reports[0].keys():
test_results = reports[0]["test"]["metrics"]
else:
raise ConfigError("Can not proceed output files: didn't find valid and/or test results")
result_table_dict = {}
for el in result_table_columns:
result_table_dict[el] = []
for m in considered_metrics:
result_table_dict[m + "_valid"].append(val_results[m])
result_table_dict[m + "_test"].append(test_results[m])
if validate_best:
population_metrics[m].append(val_results[m])
elif test_best:
population_metrics[m].append(test_results[m])
result_table_dict[result_table_columns[-1]] = [population[i]]
result_table = pd.DataFrame(result_table_dict)
result_table.loc[:, result_table_columns].to_csv(result_file, index=False, sep='\t', mode='a', header=None)
return population_metrics
if __name__ == "__main__":
main()
| []
| []
| [
"CUDA_VISIBLE_DEVICES"
]
| [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
daily-cost-by-account/lambda_function.py | from logging import getLogger, INFO
import os
import datetime
import boto3
import pandas
from botocore.exceptions import ClientError
logger = getLogger()
logger.setLevel(INFO)
def upload_s3(output, key, bucket):
try:
s3_resource = boto3.resource('s3')
s3_bucket = s3_resource.Bucket(bucket)
s3_bucket.upload_file(output, key, ExtraArgs={'ACL': 'bucket-owner-full-control'})
except ClientError as err:
logger.error(err.response['Error']['Message'])
raise
def get_ou_ids(org, parent_id):
ou_ids = []
try:
paginator = org.get_paginator('list_children')
iterator = paginator.paginate(
ParentId=parent_id,
ChildType='ORGANIZATIONAL_UNIT'
)
for page in iterator:
for ou in page['Children']:
ou_ids.append(ou['Id'])
ou_ids.extend(get_ou_ids(org, ou['Id']))
except ClientError as err:
logger.error(err.response['Error']['Message'])
raise
else:
return ou_ids
def list_accounts():
org = boto3.client('organizations')
root_id = 'r-xxxx'
ou_id_list = [root_id]
ou_id_list.extend(get_ou_ids(org, root_id))
accounts = []
try:
for ou_id in ou_id_list:
paginator = org.get_paginator('list_accounts_for_parent')
page_iterator = paginator.paginate(ParentId=ou_id)
for page in page_iterator:
for account in page['Accounts']:
item = [
account['Id'],
account['Name'],
]
accounts.append(item)
except ClientError as err:
logger.error(err.response['Error']['Message'])
raise
else:
return accounts
def get_cost_json(start, end):
ce = boto3.client('ce')
response = ce.get_cost_and_usage(
TimePeriod={
'Start': start,
'End' : end,
},
Granularity='DAILY',
Metrics=[
'NetUnblendedCost'
],
GroupBy=[
{
'Type': 'DIMENSION',
'Key': 'LINKED_ACCOUNT'
}
]
)
return response['ResultsByTime']
def lambda_handler(event, context):
today = datetime.date.today()
start = today.replace(day=1).strftime('%Y-%m-%d')
end = today.strftime('%Y-%m-%d')
key = 'daily-cost-' + today.strftime('%Y-%m') + '.csv'
output_file = '/tmp/output.csv'
bucket = os.environ['BUCKET']
account_list = pandas.DataFrame(list_accounts(), columns=['Account Id', 'Account Name'])
daily_cost_list = get_cost_json(start, end)
merged_cost = pandas.DataFrame(
index=[],
columns=['Account Id']
)
for index, item in enumerate(daily_cost_list):
normalized_json = pandas.json_normalize(item['Groups'])
split_keys = pandas.DataFrame(
normalized_json['Keys'].tolist(),
columns=['Account Id']
)
cost = pandas.concat(
[split_keys, normalized_json['Metrics.NetUnblendedCost.Amount']],
axis=1
)
renamed_cost = cost.rename(
columns={'Metrics.NetUnblendedCost.Amount': item['TimePeriod']['Start']}
)
merged_cost = pandas.merge(merged_cost, renamed_cost, on='Account Id', how='outer')
daily_cost = pandas.merge(account_list, merged_cost, on='Account Id', how='right')
daily_cost.to_csv(output_file, index=False)
upload_s3(output_file, key, bucket)
| []
| []
| [
"BUCKET"
]
| [] | ["BUCKET"] | python | 1 | 0 | |
azure/score.py | import json
import numpy as np
import os
import joblib
import mlflow
def init():
global model
# AZUREML_MODEL_DIR is an environment variable created during deployment.
# It is the path to the model folder (./azureml-models/$MODEL_NAME/$VERSION)
# For multiple models, it points to the folder containing all deployed models (./azureml-models)
model_path = os.path.join(os.getenv("AZUREML_MODEL_DIR"), "model")
model = mlflow.pyfunc.load_model(model_path)
def run(raw_data):
result = model.predict(json.loads(raw_data)["input_data"]["data"])
return result.tolist()
| []
| []
| [
"AZUREML_MODEL_DIR"
]
| [] | ["AZUREML_MODEL_DIR"] | python | 1 | 0 | |
toolium/behave/environment.py | # -*- coding: utf-8 -*-
u"""
Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
This file is part of Toolium.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
import os
import re
try:
from behave_pytest.hook import install_pytest_asserts
except ImportError:
def install_pytest_asserts():
pass
from toolium.config_files import ConfigFiles
from toolium.driver_wrapper import DriverWrappersPool
from toolium.jira import add_jira_status, change_all_jira_status, save_jira_conf
from toolium.visual_test import VisualTest
from toolium.pageelements import PageElement
from toolium.behave.env_utils import DynamicEnvironment
def before_all(context):
"""Initialization method that will be executed before the test execution
:param context: behave context
"""
# Use pytest asserts if behave_pytest is installed
install_pytest_asserts()
# Get 'Config_environment' property from user input (e.g. -D Config_environment=ios)
env = context.config.userdata.get('Config_environment')
# Deprecated: Get 'env' property from user input (e.g. -D env=ios)
env = env if env else context.config.userdata.get('env')
if env:
os.environ['Config_environment'] = env
if not hasattr(context, 'config_files'):
context.config_files = ConfigFiles()
context.config_files = DriverWrappersPool.initialize_config_files(context.config_files)
# By default config directory is located in environment path
if not context.config_files.config_directory:
context.config_files.set_config_directory(DriverWrappersPool.get_default_config_directory())
context.global_status = {'test_passed': True}
create_and_configure_wrapper(context)
# Behave dynamic environment
context.dyn_env = DynamicEnvironment(logger=context.logger)
def before_feature(context, feature):
"""Feature initialization
:param context: behave context
:param feature: running feature
"""
context.global_status = {'test_passed': True}
# Start driver if it should be reused in feature
context.reuse_driver_from_tags = 'reuse_driver' in feature.tags
if context.toolium_config.getboolean_optional('Driver', 'reuse_driver') or context.reuse_driver_from_tags:
no_driver = 'no_driver' in feature.tags
start_driver(context, no_driver)
# Behave dynamic environment
context.dyn_env.get_steps_from_feature_description(feature.description)
context.dyn_env.execute_before_feature_steps(context)
def before_scenario(context, scenario):
"""Scenario initialization
:param context: behave context
:param scenario: running scenario
"""
# Configure reset properties from behave tags
if 'no_reset_app' in scenario.tags:
os.environ["AppiumCapabilities_noReset"] = 'true'
os.environ["AppiumCapabilities_fullReset"] = 'false'
elif 'reset_app' in scenario.tags:
os.environ["AppiumCapabilities_noReset"] = 'false'
os.environ["AppiumCapabilities_fullReset"] = 'false'
elif 'full_reset_app' in scenario.tags:
os.environ["AppiumCapabilities_noReset"] = 'false'
os.environ["AppiumCapabilities_fullReset"] = 'true'
# Force to reset driver before each scenario if it has @reset_driver tag
if 'reset_driver' in scenario.tags:
DriverWrappersPool.stop_drivers()
DriverWrappersPool.download_videos('multiple tests', context.global_status['test_passed'])
DriverWrappersPool.save_all_ggr_logs('multiple tests', context.global_status['test_passed'])
DriverWrappersPool.remove_drivers()
context.global_status['test_passed'] = True
# Skip android_only or ios_only scenarios
if 'android_only' in scenario.tags and context.driver_wrapper.is_ios_test():
scenario.skip('Android scenario')
return
elif 'ios_only' in scenario.tags and context.driver_wrapper.is_android_test():
scenario.skip('iOS scenario')
return
# Read @no_driver tag
no_driver = 'no_driver' in scenario.tags or 'no_driver' in scenario.feature.tags
bdd_common_before_scenario(context, scenario, no_driver)
# Behave dynamic environment
context.dyn_env.execute_before_scenario_steps(context)
def bdd_common_before_scenario(context_or_world, scenario, no_driver=False):
"""Common scenario initialization in behave or lettuce
:param context_or_world: behave context or lettuce world
:param scenario: running scenario
:param no_driver: True if this is an api test and driver should not be started
"""
# Initialize and connect driver wrapper
start_driver(context_or_world, no_driver)
# Add assert screenshot methods with scenario configuration
add_assert_screenshot_methods(context_or_world, scenario)
# Configure Jira properties
save_jira_conf()
context_or_world.logger.info("Running new scenario: %s", scenario.name)
def create_and_configure_wrapper(context_or_world):
"""Create and configure driver wrapper in behave or lettuce tests
:param context_or_world: behave context or lettuce world
"""
# Create default driver wrapper
context_or_world.driver_wrapper = DriverWrappersPool.get_default_wrapper()
context_or_world.utils = context_or_world.driver_wrapper.utils
# Get behave userdata properties to override config properties
try:
behave_properties = context_or_world.config.userdata
except AttributeError:
behave_properties = None
# Configure wrapper
context_or_world.driver_wrapper.configure(context_or_world.config_files, behave_properties=behave_properties)
# Copy config object
context_or_world.toolium_config = context_or_world.driver_wrapper.config
# Configure logger
context_or_world.logger = logging.getLogger(__name__)
def connect_wrapper(context_or_world):
"""Connect driver in behave or lettuce tests
:param context_or_world: behave context or lettuce world
"""
# Create driver if it is not already created
if context_or_world.driver_wrapper.driver:
context_or_world.driver = context_or_world.driver_wrapper.driver
else:
context_or_world.driver = context_or_world.driver_wrapper.connect()
# Copy app_strings object
context_or_world.app_strings = context_or_world.driver_wrapper.app_strings
def add_assert_screenshot_methods(context_or_world, scenario):
"""Add assert screenshot methods to behave or lettuce object
:param context_or_world: behave context or lettuce world
:param scenario: running scenario
"""
file_suffix = scenario.name
def assert_screenshot(element_or_selector, filename, threshold=0, exclude_elements=[], driver_wrapper=None,
force=False):
VisualTest(driver_wrapper, force).assert_screenshot(element_or_selector, filename, file_suffix, threshold,
exclude_elements)
def assert_full_screenshot(filename, threshold=0, exclude_elements=[], driver_wrapper=None, force=False):
VisualTest(driver_wrapper, force).assert_screenshot(None, filename, file_suffix, threshold, exclude_elements)
# Monkey patching assert_screenshot method in PageElement to use the correct test name
def assert_screenshot_page_element(self, filename, threshold=0, exclude_elements=[], force=False):
VisualTest(self.driver_wrapper, force).assert_screenshot(self.web_element, filename, file_suffix, threshold,
exclude_elements)
context_or_world.assert_screenshot = assert_screenshot
context_or_world.assert_full_screenshot = assert_full_screenshot
PageElement.assert_screenshot = assert_screenshot_page_element
def after_scenario(context, scenario):
"""Clean method that will be executed after each scenario
:param context: behave context
:param scenario: running scenario
"""
bdd_common_after_scenario(context, scenario, scenario.status)
def bdd_common_after_scenario(context_or_world, scenario, status):
"""Clean method that will be executed after each scenario in behave or lettuce
:param context_or_world: behave context or lettuce world
:param scenario: running scenario
:param status: scenario status (passed, failed or skipped)
"""
if status == 'skipped':
return
elif status == 'passed':
test_status = 'Pass'
test_comment = None
context_or_world.logger.info("The scenario '%s' has passed", scenario.name)
else:
test_status = 'Fail'
test_comment = "The scenario '%s' has failed" % scenario.name
context_or_world.logger.error("The scenario '%s' has failed", scenario.name)
context_or_world.global_status['test_passed'] = False
# Close drivers
DriverWrappersPool.close_drivers(scope='function', test_name=scenario.name, test_passed=status == 'passed',
context=context_or_world)
# Save test status to be updated later
add_jira_status(get_jira_key_from_scenario(scenario), test_status, test_comment)
def get_jira_key_from_scenario(scenario):
"""Extract Jira Test Case key from scenario tags.
Two tag formats are allowed:
@jira('PROJECT-32')
@jira=PROJECT-32
:param scenario: behave scenario
:returns: Jira test case key
"""
jira_regex = re.compile(r'jira[=\(\']*([A-Z]+\-[0-9]+)[\'\)]*$')
for tag in scenario.tags:
match = jira_regex.search(tag)
if match:
return match.group(1)
return None
def after_feature(context, feature):
"""Clean method that will be executed after each feature
:param context: behave context
:param feature: running feature
"""
# Behave dynamic environment
context.dyn_env.execute_after_feature_steps(context)
# Close drivers
DriverWrappersPool.close_drivers(scope='module', test_name=feature.name,
test_passed=context.global_status['test_passed'])
def after_all(context):
"""Clean method that will be executed after all features are finished
:param context: behave context
"""
bdd_common_after_all(context)
def bdd_common_after_all(context_or_world):
"""Common after all method in behave or lettuce
:param context_or_world: behave context or lettuce world
"""
# Close drivers
DriverWrappersPool.close_drivers(scope='session', test_name='multiple_tests',
test_passed=context_or_world.global_status['test_passed'])
# Update tests status in Jira
change_all_jira_status()
def start_driver(context, no_driver):
"""Start driver with configured values
:param context: behave context
:param no_driver: True if this is an api test and driver should not be started
"""
create_and_configure_wrapper(context)
if not no_driver:
connect_wrapper(context)
| []
| []
| [
"AppiumCapabilities_noReset",
"Config_environment",
"AppiumCapabilities_fullReset"
]
| [] | ["AppiumCapabilities_noReset", "Config_environment", "AppiumCapabilities_fullReset"] | python | 3 | 0 | |
django_falcon/django_falcon/settings/dev.py | """
Django settings for django_falcon project.
Generated by 'django-admin startproject' using Django 4.0.
For more information on this file, see
https://docs.djangoproject.com/en/4.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/4.0/ref/settings/
"""
import os
from pathlib import Path
from dotenv import load_dotenv
load_dotenv()
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/4.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.getenv('DJANGO_SECRET')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_filters',
'rest_framework',
'rest_framework.authtoken',
'system.apps.SystemConfig'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'django_falcon.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'django_falcon.wsgi.application'
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache'
}
}
# Database
# https://docs.djangoproject.com/en/4.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': os.getenv('DB_NAME'),
'USER': os.getenv('DB_USER'),
'PASSWORD': os.getenv('DB_PASSWORD'),
'HOST': os.getenv('DB_HOST'),
'PORT': os.getenv('DB_PORT'),
}
}
# Password validation
# https://docs.djangoproject.com/en/4.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/4.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/4.0/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
# Default primary key field type
# https://docs.djangoproject.com/en/4.0/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
REST_FRAMEWORK = {
# Use Django's standard `django.contrib.auth` permissions,
# or allow read-only access for unauthenticated users.
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.IsAuthenticated'
],
'DEFAULT_AUTHENTICATION_CLASSES': [
'rest_framework.authentication.BasicAuthentication',
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.TokenAuthentication'
],
'DEFAULT_THROTTLE_CLASSES': [
'rest_framework.throttling.AnonRateThrottle',
'rest_framework.throttling.UserRateThrottle'
],
'DEFAULT_THROTTLE_RATES': {
'anon': '10000/day',
'user': '1000/minute'
},
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination',
'PAGE_SIZE': 100,
'DEFAULT_FILTER_BACKENDS': [
'django_filters.rest_framework.DjangoFilterBackend',
'rest_framework.filters.SearchFilter',
'rest_framework.filters.OrderingFilter'
]
}
| []
| []
| [
"DB_PASSWORD",
"DB_HOST",
"DB_PORT",
"DJANGO_SECRET",
"DB_NAME",
"DB_USER"
]
| [] | ["DB_PASSWORD", "DB_HOST", "DB_PORT", "DJANGO_SECRET", "DB_NAME", "DB_USER"] | python | 6 | 0 | |
main.go | package main
import (
"math/rand"
"os"
"strings"
"time"
"github.com/DarthSim/overmind/start"
"github.com/joho/godotenv"
"gopkg.in/urfave/cli.v1"
)
const version = "2.0.2"
func setupStartCmd() cli.Command {
c := start.Handler{}
return cli.Command{
Name: "start",
Aliases: []string{"s"},
Usage: "Run procfile",
Action: c.Run,
Flags: []cli.Flag{
cli.StringFlag{Name: "title, w", EnvVar: "OVERMIND_TITLE", Usage: "Specify a title of the application", Destination: &c.Title},
cli.StringFlag{Name: "procfile, f", EnvVar: "OVERMIND_PROCFILE", Usage: "Specify a Procfile to load", Value: "./Procfile", Destination: &c.Procfile},
cli.StringFlag{Name: "processes, l", EnvVar: "OVERMIND_PROCESSES", Usage: "Specify process names to launch. Divide names with comma", Destination: &c.ProcNames},
cli.StringFlag{Name: "root, d", Usage: "Specify a working directory of application. Default: directory containing the Procfile", Destination: &c.Root},
cli.IntFlag{Name: "timeout, t", EnvVar: "OVERMIND_TIMEOUT", Usage: "Specify the amount of time (in seconds) processes have to shut down gracefully before being brutally killed", Value: 5, Destination: &c.Timeout},
cli.IntFlag{Name: "port, p", EnvVar: "OVERMIND_PORT,PORT", Usage: "Specify a port to use as the base", Value: 5000, Destination: &c.PortBase},
cli.IntFlag{Name: "port-step, P", EnvVar: "OVERMIND_PORT_STEP", Usage: "Specify a step to increase port number", Value: 100, Destination: &c.PortStep},
cli.StringFlag{Name: "socket, s", EnvVar: "OVERMIND_SOCKET", Usage: "Specify a path to the command center socket", Value: "./.overmind.sock", Destination: &c.SocketPath},
cli.StringFlag{Name: "can-die, c", EnvVar: "OVERMIND_CAN_DIE", Usage: "Specify names of process which can die without interrupting the other processes. Divide names with comma", Destination: &c.CanDie},
cli.StringFlag{Name: "colors, b", EnvVar: "OVERMIND_COLORS", Usage: "Specify the xterm color codes that will be used to colorize process names. Divide codes with comma"},
cli.StringFlag{Name: "formation, m", EnvVar: "OVERMIND_FORMATION", Usage: "Specify the number of each process type to run. The value passed in should be in the format process=num,process=num. Use 'all' as a process name to set value for all processes"},
cli.IntFlag{Name: "formation-port-step", EnvVar: "OVERMIND_FORMATION_PORT_STEP", Usage: "Specify a step to increase port number for the next instance of a process", Value: 10, Destination: &c.FormationPortStep},
cli.StringFlag{Name: "stop-signals, i", EnvVar: "OVERMIND_STOP_SIGNALS", Usage: "Specify a signal that will be sent to each process when Overmind will try to stop them. The value passed in should be in the format process=signal,process=signal. Supported signals are: ABRT, INT, KILL, QUIT, STOP, TERM, USR1, USR2"},
},
}
}
func setupRestartCmd() cli.Command {
c := cmdRestartHandler{}
return cli.Command{
Name: "restart",
Aliases: []string{"r"},
Usage: "Restart specified processes",
Action: c.Run,
ArgsUsage: "[process name...]",
Flags: []cli.Flag{
cli.StringFlag{Name: "socket, s", EnvVar: "OVERMIND_SOCKET", Usage: "Path to overmind socket", Value: "./.overmind.sock", Destination: &c.SocketPath},
},
}
}
func setupStopCmd() cli.Command {
c := cmdStopHandler{}
return cli.Command{
Name: "stop",
Aliases: []string{"interrupt", "i"},
Usage: "Stop specified processes",
Action: c.Run,
ArgsUsage: "[process name...]",
Flags: []cli.Flag{
cli.StringFlag{Name: "socket, s", EnvVar: "OVERMIND_SOCKET", Usage: "Path to overmind socket", Value: "./.overmind.sock", Destination: &c.SocketPath},
},
}
}
func setupConnectCmd() cli.Command {
c := cmdConnectHandler{}
return cli.Command{
Name: "connect",
Aliases: []string{"c"},
Usage: "Connect to the tmux session of the specified process",
Action: c.Run,
ArgsUsage: "[process name]",
Flags: []cli.Flag{
cli.BoolFlag{Name: "control-mode, c", EnvVar: "OVERMIND_CONTROL_MODE", Usage: "Connect to the tmux session in control mode", Destination: &c.ControlMode},
cli.StringFlag{Name: "socket, s", EnvVar: "OVERMIND_SOCKET", Usage: "Path to overmind socket", Value: "./.overmind.sock", Destination: &c.SocketPath},
},
}
}
func setupKillCmd() cli.Command {
c := cmdKillHandler{}
return cli.Command{
Name: "kill",
Aliases: []string{"k"},
Usage: "Kills all processes",
Action: c.Run,
Flags: []cli.Flag{
cli.StringFlag{Name: "socket, s", EnvVar: "OVERMIND_SOCKET", Usage: "Path to overmind socket", Value: "./.overmind.sock", Destination: &c.SocketPath},
},
}
}
func setupRunCmd() cli.Command {
c := cmdRunHandler{}
return cli.Command{
Name: "run",
Aliases: []string{"exec", "e"},
Usage: "Runs provided command within the Overmind environment",
Action: c.Run,
SkipFlagParsing: true,
}
}
func init() {
rand.Seed(time.Now().UTC().UnixNano())
}
func main() {
loadEnvFiles()
app := cli.NewApp()
app.Name = "Overmind"
app.HelpName = "overmind"
app.Usage = "The mind to rule processes of your development environment"
app.Description = strings.Join([]string{
"Overmind runs commands specified in procfile in a tmux session.",
"This allows to connect to each process and manage processes on fly.",
}, " ")
app.Author = "Sergey \"DarthSim\" Alexandrovich"
app.Email = "[email protected]"
app.Version = version
app.Commands = []cli.Command{
setupStartCmd(),
setupRestartCmd(),
setupStopCmd(),
setupConnectCmd(),
setupKillCmd(),
setupRunCmd(),
}
app.Run(os.Args)
}
func loadEnvFiles() {
// First load the specifically named overmind env files
godotenv.Overload("~/.overmind.env")
godotenv.Overload("./.overmind.env")
_, skipEnv := os.LookupEnv("OVERMIND_SKIP_ENV")
if !skipEnv {
godotenv.Overload("./.env")
}
if f := os.Getenv("OVERMIND_ENV"); len(f) > 0 {
godotenv.Overload(f)
}
}
| [
"\"OVERMIND_ENV\""
]
| []
| [
"OVERMIND_ENV"
]
| [] | ["OVERMIND_ENV"] | go | 1 | 0 | |
main.go | // Enhanced Markdown template processor.
package main
import (
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"log"
"os"
"path/filepath"
"regexp"
"time"
"github.com/mh-cbon/emd/cli"
"github.com/mh-cbon/emd/deprecated"
"github.com/mh-cbon/emd/emd"
gostd "github.com/mh-cbon/emd/go"
gononstd "github.com/mh-cbon/emd/go-nonstd"
"github.com/mh-cbon/emd/provider"
"github.com/mh-cbon/emd/std"
)
// VERSION defines the running build id.
var VERSION = "1.0.2"
var program = cli.NewProgram("emd", VERSION)
var verbose bool
func logMsg(f string, args ...interface{}) {
if verbose {
log.Printf(f+"\n", args...)
}
}
func main() {
program.Bind()
if err := program.Run(os.Args); err != nil {
log.Println(err)
os.Exit(1)
}
}
// gen sub command
type gencommand struct {
*cli.Command
in mFlags
out string
data string
help bool
shortHelp bool
}
// init sub command
type initcommand struct {
*cli.Command
help bool
shortHelp bool
out string
force bool
}
func init() {
gen := &gencommand{Command: cli.NewCommand("gen", "Process an emd file.", Generate)}
gen.Set.Var(&gen.in, "in", "Input src file")
gen.Set.StringVar(&gen.out, "out", "-", "Output destination, defaults to stdout")
gen.Set.StringVar(&gen.data, "data", "", "JSON map of data")
gen.Set.BoolVar(&gen.help, "help", false, "Show help")
gen.Set.BoolVar(&gen.shortHelp, "h", false, "Show help")
program.Add(gen)
ini := &initcommand{Command: cli.NewCommand("init", "Init a basic emd file.", InitFile)}
ini.Set.BoolVar(&ini.help, "help", false, "Show help")
ini.Set.BoolVar(&ini.shortHelp, "h", false, "Show help")
ini.Set.BoolVar(&ini.force, "force", false, "Force write")
ini.Set.StringVar(&ini.out, "out", "README.e.md", "Out file")
program.Add(ini)
verbose = os.Getenv("VERBOSE") != ""
}
// Generate is the cli command implementation of gen.
func Generate(s cli.Commander) error {
cmd, ok := s.(*gencommand)
if ok == false {
return fmt.Errorf("Invalid command type %T", s)
}
if cmd.help || cmd.shortHelp {
return program.ShowCmdUsage(cmd)
}
out, err := getStdout(cmd.out)
if err != nil {
return err
}
if x, ok := out.(io.Closer); ok {
defer x.Close()
}
projectPath, err := getProjectPath()
if err != nil {
return err
}
logMsg("projectPath %q", projectPath)
plugins := getPlugins()
data, err := getData(projectPath)
if err != nil {
return err
}
gen := emd.NewGenerator()
gen.SetDataMap(data)
if len(cmd.in) == 0 {
b := tryReadOsStdin()
if b != nil && b.Len() > 0 {
gen.AddTemplate(b.String())
} else {
if s, err := os.Stat("README.e.md"); !os.IsNotExist(err) && s.IsDir() == false {
err := gen.AddFileTemplate("README.e.md")
if err != nil {
return err
}
} else {
gen.AddTemplate(defTemplate)
}
}
}
for name, plugin := range plugins {
if err := plugin(gen); err != nil {
return fmt.Errorf("Failed to register %v package: %v", name, err)
}
}
if cmd.data != "" {
jData := map[string]interface{}{}
if err := json.Unmarshal([]byte(cmd.data), &jData); err != nil {
return fmt.Errorf("Cannot decode JSON data string: %v", err)
}
gen.SetDataMap(jData)
}
if len(cmd.in) == 0 {
if err := gen.Execute(out); err != nil {
return fmt.Errorf("Generator failed: %v", err)
}
} else {
for _, val := range cmd.in {
if err := gen.AddFileTemplate(val); err != nil {
return err
}
if err := gen.Execute(out); err != nil {
return fmt.Errorf("Generator failed: %v", err)
}
}
}
return nil
}
func tryReadOsStdin() *bytes.Buffer {
copied := make(chan bool)
timedout := make(chan bool)
var ret bytes.Buffer
go func() {
io.Copy(&ret, os.Stdin)
copied <- true
}()
go func() {
<-time.After(time.Millisecond * 10)
timedout <- ret.Len() == 0
}()
select {
case empty := <-timedout:
if empty {
return nil
}
<-copied
case <-copied:
}
return &ret
}
func getProjectPath() (string, error) {
originalCwd, err := os.Getwd()
if err != nil {
return "", err
}
logMsg("cwd %q", originalCwd)
// regular go package
{
projectPath, err := matchProjectPath(originalCwd)
if err == nil {
return projectPath, nil
}
}
// symlinked go package
{
cwd, err := filepath.EvalSymlinks(originalCwd)
if err == nil {
projectPath, err := matchProjectPath(cwd)
if err == nil {
return projectPath, nil
}
}
}
// all other cases
return originalCwd, nil
}
var re = regexp.MustCompile("(src/[^/]+[.](com|org|net)/.+)")
func matchProjectPath(p string) (string, error) {
res := re.FindAllString(p, -1)
if len(res) > 0 {
return res[0][3:], nil
}
return "", fmt.Errorf("Invalid working directory %q", p)
}
func getData(cwd string) (map[string]interface{}, error) {
p := provider.Default(cwd)
return map[string]interface{}{
"Name": p.GetProjectName(),
"User": p.GetUserName(),
"ProviderURL": p.GetProviderURL(),
"ProviderName": p.GetProviderID(),
"URL": p.GetURL(),
"ProjectURL": p.GetProjectURL(),
"Branch": "master",
}, nil
}
func getPlugins() map[string]func(*emd.Generator) error {
return map[string]func(*emd.Generator) error{
"std": std.Register,
"gostd": gostd.Register,
"gononstd": gononstd.Register,
"deprecated": deprecated.Register,
}
}
func getStdout(out string) (io.Writer, error) {
ret := os.Stdout
if out != "-" {
f, err := os.Create(out)
if err != nil {
f, err = os.Open(out)
if err != nil {
return nil, fmt.Errorf("Cannot open out destination: %v", err)
}
}
ret = f
}
return ret, nil
}
// InitFile creates a basic emd file if none exists.
func InitFile(s cli.Commander) error {
cmd, ok := s.(*initcommand)
if ok == false {
return fmt.Errorf("Invalid command type %T", s)
}
if cmd.help || cmd.shortHelp {
return program.ShowCmdUsage(cmd)
}
out := cmd.out
if cmd.out == "" {
out = "README.e.md"
}
if _, err := os.Stat(out); !cmd.force && !os.IsNotExist(err) {
return fmt.Errorf("File exists at %q", out)
}
return ioutil.WriteFile(out, []byte(defTemplate), os.ModePerm)
}
var defTemplate = `# {{.Name}}
{{template "badge/goreport" .}} {{template "badge/godoc" .}}
{{pkgdoc}}
# {{toc 5}}
# Install
{{template "gh/releases" .}}
#### go
{{template "go/install" .}}
`
| [
"\"VERBOSE\""
]
| []
| [
"VERBOSE"
]
| [] | ["VERBOSE"] | go | 1 | 0 | |
dragDropInstall.py | """Requires Python 3"""
# General imports
import os, sys, shutil
# Third-Party imports
from PySide2 import QtCore
import maya.cmds as cmds
from maya.app.startup import basic
import maya.utils
# Base path definitions
MODULENAME = "depthOfFieldTool"
DRAGGEDFROMPATH = os.path.dirname(__file__)
DEFAULTMODULEPATH = f"{os.environ['MAYA_APP_DIR']}/modules"
DEFAULTSCRIPTSPATH = f"{os.environ['MAYA_APP_DIR']}/scripts"
# Custom module path definitions
MODULESCRIPTSPATH = f"{DEFAULTMODULEPATH}/{MODULENAME}/scripts"
# List of required files to install
INSTALLATIONPACKAGE = [
f"{DRAGGEDFROMPATH}/{MODULENAME}/plug-ins/windows/2022/{MODULENAME}.mll",
f"{DRAGGEDFROMPATH}/{MODULENAME}/plug-ins/windows/2020/{MODULENAME}.mll",
f"{DRAGGEDFROMPATH}/{MODULENAME}/plug-ins/linux/2022/{MODULENAME}.so",
f"{DRAGGEDFROMPATH}/{MODULENAME}/scripts/{MODULENAME}Properties.mel",
f"{DRAGGEDFROMPATH}/{MODULENAME}/scripts/{MODULENAME}Values.mel",
f"{DRAGGEDFROMPATH}/{MODULENAME}/scripts/depthOfField.mel",
f"{DRAGGEDFROMPATH}/{MODULENAME}/scripts/userSetup.py",
f"{DRAGGEDFROMPATH}/{MODULENAME}/icons/{MODULENAME}.png",
f"{DRAGGEDFROMPATH}/{MODULENAME}.mod"
]
def validatePythonVersion():
"""Required python version validation function."""
if os.environ['MAYA_PYTHON_VERSION'] == "2":
raise RuntimeError("Drag and drop installer requires Python 3, aborting installation!")
def _validateInstallationFiles():
"""Checks if all required installation files exist in source."""
missingFilesList = []
for pkg in INSTALLATIONPACKAGE:
if not QtCore.QFileInfo(pkg).exists():
missingFilesList.append(pkg)
if missingFilesList:
raise RuntimeError(
f"Installation package reported missing files: {missingFilesList}, aborting!"
)
def _removePreviousModule():
installationDestination = QtCore.QDir(f"{DEFAULTMODULEPATH}/{MODULENAME}")
if installationDestination.exists():
installationDestination.removeRecursively()
previousModFile = QtCore.QFile(f"{DEFAULTMODULEPATH}/{MODULENAME}.mod")
if previousModFile.exists():
previousModFile.remove()
def _createDirsForCopying():
"""TODO: Create a proper recrusive functrion for copying files over - temp workaround
but at least we don't have to deal with '\\' '/' slashes
"""
modulePath = QtCore.QDir(DEFAULTMODULEPATH)
modulePath.mkpath(f"{MODULENAME}/plug-ins/windows/2022/")
modulePath.mkpath(f"{MODULENAME}/plug-ins/windows/2020/")
modulePath.mkpath(f"{MODULENAME}/plug-ins/linux/2020/")
modulePath.mkpath(f"{MODULENAME}/scripts/")
modulePath.mkpath(f"{MODULENAME}/icons/")
def clearMemory():
"""Clean the current sys.path and sys.modules from anything to do with MODULENAME."""
pathsList = sys.path[:]
for index, path in enumerate(pathsList[::-1]):
if MODULENAME in path.lower():
sys.path.remove(path)
for module in list(sys.modules):
if MODULENAME in module:
del sys.modules[module]
def createDialog(message="Default Message", title="Default Title", icon="question",
buttons=["Install", "Cancel"], cancelButton="Cancel") -> str:
"""Convinience wrapper method for creating confirmDialogs."""
return(
cmds.confirmDialog(
title=title,
message=message,
icon=icon,
button=buttons,
cancelButton=cancelButton,
dismissString=cancelButton
)
)
def _finalizeInstallation():
"""Performs final installation procedures."""
clearMemory()
# Add path if its not already there
if not MODULESCRIPTSPATH in sys.path:
sys.path.append(MODULESCRIPTSPATH)
# Reload all the modules
cmds.loadModule(scan=True)
cmds.loadModule(allModules=True)
# Reload userSetup files
basic.executeUserSetup()
def onMayaDroppedPythonFile(*args, **kwargs):
"""Main function that runs when dragging the file into Maya.
Installation is performed by copying the module to the user preferences and creating
a module file.
"""
validatePythonVersion()
_validateInstallationFiles()
# Create install dialog
input = createDialog(
message=f"This will install SpeedLocator in:\n{DEFAULTMODULEPATH}",
title="SpeedLocator Installer"
)
if input == "Cancel": # Installation was cancelled
raise RuntimeError("Installation of SpeedLocator has been cancelled!")
else: # Installation continues
_createDirsForCopying()
finished = False
for pkg in INSTALLATIONPACKAGE:
pkgQt = QtCore.QFile(pkg)
finished = pkgQt.copy(pkg.replace(DRAGGEDFROMPATH, DEFAULTMODULEPATH))
if finished:
_finalizeInstallation()
| []
| []
| [
"MAYA_APP_DIR",
"MAYA_PYTHON_VERSION"
]
| [] | ["MAYA_APP_DIR", "MAYA_PYTHON_VERSION"] | python | 2 | 0 | |
interactionrouter/interactionrouter_test.go | package interactionrouter_test
import (
"bytes"
"context"
"encoding/hex"
"fmt"
"net/http"
"net/http/httptest"
"net/url"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/pkg/errors"
"github.com/slack-go/slack"
routererrors "github.com/genkami/go-slack-event-router/errors"
ir "github.com/genkami/go-slack-event-router/interactionrouter"
"github.com/genkami/go-slack-event-router/internal/testutils"
)
var _ = Describe("InteractionRouter", func() {
Describe("Type", func() {
var (
numHandlerCalled int
innerHandler = ir.HandlerFunc(func(_ context.Context, _ *slack.InteractionCallback) error {
numHandlerCalled++
return nil
})
ctx context.Context
)
BeforeEach(func() {
numHandlerCalled = 0
ctx = context.Background()
})
Context("when the type of the interaction callback matches to the predicate's", func() {
It("calls the inner handler", func() {
h := ir.Type(slack.InteractionTypeBlockActions).Wrap(innerHandler)
callback := &slack.InteractionCallback{
Type: slack.InteractionTypeBlockActions,
}
err := h.HandleInteraction(ctx, callback)
Expect(err).NotTo(HaveOccurred())
Expect(numHandlerCalled).To(Equal(1))
})
})
Context("when the type of the interaction callback differs from the predicate's", func() {
It("calls the inner handler", func() {
h := ir.Type(slack.InteractionTypeBlockActions).Wrap(innerHandler)
callback := &slack.InteractionCallback{
Type: slack.InteractionTypeViewSubmission,
}
err := h.HandleInteraction(ctx, callback)
Expect(err).To(Equal(routererrors.NotInterested))
Expect(numHandlerCalled).To(Equal(0))
})
})
})
Describe("BlockAction", func() {
var (
numHandlerCalled int
innerHandler = ir.HandlerFunc(func(_ context.Context, _ *slack.InteractionCallback) error {
numHandlerCalled++
return nil
})
ctx context.Context
)
BeforeEach(func() {
numHandlerCalled = 0
ctx = context.Background()
})
Context("when the interaction callback has the block_action specified by the predicate", func() {
It("calls the inner handler", func() {
h := ir.BlockAction("BLOCK_ID", "ACTION_ID").Wrap(innerHandler)
callback := &slack.InteractionCallback{
Type: slack.InteractionTypeBlockActions,
ActionCallback: slack.ActionCallbacks{
BlockActions: []*slack.BlockAction{
{BlockID: "BLOCK_ID", ActionID: "ACTION_ID"},
},
},
}
err := h.HandleInteraction(ctx, callback)
Expect(err).NotTo(HaveOccurred())
Expect(numHandlerCalled).To(Equal(1))
})
})
Context("when one of the block_acsions that the interaction callback has is the one specified by the predicate", func() {
It("calls the inner handler", func() {
h := ir.BlockAction("BLOCK_ID", "ACTION_ID").Wrap(innerHandler)
callback := &slack.InteractionCallback{
Type: slack.InteractionTypeBlockActions,
ActionCallback: slack.ActionCallbacks{
BlockActions: []*slack.BlockAction{
{BlockID: "ANOTHER_BLOCK_ID", ActionID: "ANOTHER_ACTION_ID"},
{BlockID: "BLOCK_ID", ActionID: "ACTION_ID"},
},
},
}
err := h.HandleInteraction(ctx, callback)
Expect(err).NotTo(HaveOccurred())
Expect(numHandlerCalled).To(Equal(1))
})
})
Context("when the interaction callback does not have any block_action", func() {
It("does not call the inner handler", func() {
h := ir.BlockAction("BLOCK_ID", "ACTION_ID").Wrap(innerHandler)
callback := &slack.InteractionCallback{
Type: slack.InteractionTypeBlockActions,
ActionCallback: slack.ActionCallbacks{
BlockActions: []*slack.BlockAction{},
},
}
err := h.HandleInteraction(ctx, callback)
Expect(err).To(Equal(routererrors.NotInterested))
Expect(numHandlerCalled).To(Equal(0))
})
})
Context("when the block_action in the interaction callback is not what the predicate expects", func() {
It("does not call the inner handler", func() {
h := ir.BlockAction("BLOCK_ID", "ACTION_ID").Wrap(innerHandler)
callback := &slack.InteractionCallback{
Type: slack.InteractionTypeBlockActions,
ActionCallback: slack.ActionCallbacks{
BlockActions: []*slack.BlockAction{
{BlockID: "ANOTHER_BLOCK_ID", ActionID: "ANOTHER_ACTION_ID"},
},
},
}
err := h.HandleInteraction(ctx, callback)
Expect(err).To(Equal(routererrors.NotInterested))
Expect(numHandlerCalled).To(Equal(0))
})
})
Context("when the block_id in the block_action is the same as the predicate expected but the action_id isn't", func() {
It("does not call the inner handler", func() {
h := ir.BlockAction("BLOCK_ID", "ACTION_ID").Wrap(innerHandler)
callback := &slack.InteractionCallback{
Type: slack.InteractionTypeBlockActions,
ActionCallback: slack.ActionCallbacks{
BlockActions: []*slack.BlockAction{
{BlockID: "BLOCK_ID", ActionID: "ANOTHER_ACTION_ID"},
},
},
}
err := h.HandleInteraction(ctx, callback)
Expect(err).To(Equal(routererrors.NotInterested))
Expect(numHandlerCalled).To(Equal(0))
})
})
Context("when the action_id in the block_action is the same as the predicate expected but the block_id isn't", func() {
It("does not call the inner handler", func() {
h := ir.BlockAction("BLOCK_ID", "ACTION_ID").Wrap(innerHandler)
callback := &slack.InteractionCallback{
Type: slack.InteractionTypeBlockActions,
ActionCallback: slack.ActionCallbacks{
BlockActions: []*slack.BlockAction{
{BlockID: "ANOTHER_BLOCK_ID", ActionID: "ACTION_ID"},
},
},
}
err := h.HandleInteraction(ctx, callback)
Expect(err).To(Equal(routererrors.NotInterested))
Expect(numHandlerCalled).To(Equal(0))
})
})
})
Describe("CallbackID", func() {
var (
numHandlerCalled int
innerHandler = ir.HandlerFunc(func(_ context.Context, _ *slack.InteractionCallback) error {
numHandlerCalled++
return nil
})
ctx context.Context
)
BeforeEach(func() {
numHandlerCalled = 0
ctx = context.Background()
})
Context("when the callback_id in the interaction callback matches to the predicate's", func() {
It("calls the inner handler", func() {
h := ir.CallbackID("CALLBACK_ID").Wrap(innerHandler)
callback := &slack.InteractionCallback{
Type: slack.InteractionTypeBlockActions,
CallbackID: "CALLBACK_ID",
}
err := h.HandleInteraction(ctx, callback)
Expect(err).NotTo(HaveOccurred())
Expect(numHandlerCalled).To(Equal(1))
})
})
Context("when the callback_id in the interaction callback differs from the predicate's", func() {
It("does not call the inner handler", func() {
h := ir.CallbackID("CALLBACK_ID").Wrap(innerHandler)
callback := &slack.InteractionCallback{
Type: slack.InteractionTypeBlockActions,
CallbackID: "ANOTHER_CALLBACK_ID",
}
err := h.HandleInteraction(ctx, callback)
Expect(err).To(Equal(routererrors.NotInterested))
Expect(numHandlerCalled).To(Equal(0))
})
})
})
Describe("New", func() {
Context("when neither WithSigningSecret nor InsecureSkipVerification is given", func() {
It("returns an error", func() {
_, err := ir.New()
Expect(err).To(MatchError(MatchRegexp("WithSigningSecret")))
})
})
Context("when InsecureSkipVerification is given", func() {
It("returns a new Router", func() {
r, err := ir.New(ir.InsecureSkipVerification())
Expect(err).NotTo(HaveOccurred())
Expect(r).NotTo(BeNil())
})
})
Context("when WithSigningSecret is given", func() {
It("returns a new Router", func() {
r, err := ir.New(ir.WithSigningSecret("THE_TOKEN"))
Expect(err).NotTo(HaveOccurred())
Expect(r).NotTo(BeNil())
})
})
Context("when both WithSigningSecret and InsecureSkipVerification are given", func() {
It("returns an error", func() {
_, err := ir.New(ir.InsecureSkipVerification(), ir.WithSigningSecret("THE_TOKEN"))
Expect(err).To(MatchError(MatchRegexp("WithSigningSecret")))
})
})
})
Describe("WithSigningSecret", func() {
var (
r *ir.Router
token = "THE_TOKEN"
content = `
{
"type": "shortcut",
"token": "XXXXXXXXXXXXX",
"action_ts": "1581106241.371594",
"team": {
"id": "TXXXXXXXX",
"domain": "shortcuts-test"
},
"user": {
"id": "UXXXXXXXXX",
"username": "aman",
"team_id": "TXXXXXXXX"
},
"callback_id": "shortcut_create_task",
"trigger_id": "944799105734.773906753841.38b5894552bdd4a780554ee59d1f3638"
}`
)
BeforeEach(func() {
var err error
r, err = ir.New(ir.WithSigningSecret(token), ir.VerboseResponse())
Expect(err).NotTo(HaveOccurred())
})
Context("when the signature is valid", func() {
It("responds with 200", func() {
req, err := NewSignedRequest(token, content, nil)
Expect(err).NotTo(HaveOccurred())
w := httptest.NewRecorder()
r.ServeHTTP(w, req)
resp := w.Result()
Expect(resp.StatusCode).To(Equal(http.StatusOK))
})
})
Context("when the signature is invalid", func() {
It("responds with Unauthorized", func() {
req, err := NewSignedRequest(token, content, nil)
Expect(err).NotTo(HaveOccurred())
req.Header.Set(testutils.HeaderSignature, "v0="+hex.EncodeToString([]byte("INVALID_SIGNATURE")))
w := httptest.NewRecorder()
r.ServeHTTP(w, req)
resp := w.Result()
Expect(resp.StatusCode).To(Equal(http.StatusUnauthorized))
})
})
Context("when the timestamp is too old", func() {
It("responds with BadRequest", func() {
ts := time.Now().Add(-1 * time.Hour)
req, err := NewSignedRequest(token, content, &ts)
Expect(err).NotTo(HaveOccurred())
w := httptest.NewRecorder()
r.ServeHTTP(w, req)
resp := w.Result()
Expect(resp.StatusCode).To(Equal(http.StatusBadRequest))
})
})
})
Describe("InsecureSkipVerification", func() {
var (
r *ir.Router
token = "THE_TOKEN"
content = `
{
"type": "shortcut",
"token": "XXXXXXXXXXXXX",
"action_ts": "1581106241.371594",
"team": {
"id": "TXXXXXXXX",
"domain": "shortcuts-test"
},
"user": {
"id": "UXXXXXXXXX",
"username": "aman",
"team_id": "TXXXXXXXX"
},
"callback_id": "shortcut_create_task",
"trigger_id": "944799105734.773906753841.38b5894552bdd4a780554ee59d1f3638"
}`
)
BeforeEach(func() {
var err error
r, err = ir.New(ir.InsecureSkipVerification(), ir.VerboseResponse())
Expect(err).NotTo(HaveOccurred())
})
Context("when the signature is valid", func() {
It("responds with 200", func() {
req, err := NewSignedRequest(token, content, nil)
Expect(err).NotTo(HaveOccurred())
w := httptest.NewRecorder()
r.ServeHTTP(w, req)
resp := w.Result()
Expect(resp.StatusCode).To(Equal(http.StatusOK))
})
})
Context("when the signature is invalid", func() {
It("responds with 200", func() {
req, err := NewSignedRequest(token, content, nil)
Expect(err).NotTo(HaveOccurred())
req.Header.Set(testutils.HeaderSignature, "v0="+hex.EncodeToString([]byte("INVALID_SIGNATURE")))
w := httptest.NewRecorder()
r.ServeHTTP(w, req)
resp := w.Result()
Expect(resp.StatusCode).To(Equal(http.StatusOK))
})
})
Context("when the timestamp is too old", func() {
It("responds with 200", func() {
ts := time.Now().Add(-1 * time.Hour)
req, err := NewSignedRequest(token, content, &ts)
Expect(err).NotTo(HaveOccurred())
w := httptest.NewRecorder()
r.ServeHTTP(w, req)
resp := w.Result()
Expect(resp.StatusCode).To(Equal(http.StatusOK))
})
})
})
Describe("On", func() {
var (
r *ir.Router
content = `
{
"type": "shortcut",
"token": "XXXXXXXXXXXXX",
"action_ts": "1581106241.371594",
"team": {
"id": "TXXXXXXXX",
"domain": "shortcuts-test"
},
"user": {
"id": "UXXXXXXXXX",
"username": "aman",
"team_id": "TXXXXXXXX"
},
"callback_id": "shortcut_create_task",
"trigger_id": "944799105734.773906753841.38b5894552bdd4a780554ee59d1f3638"
}`
numHandlerCalled = 0
handler = ir.HandlerFunc(func(_ context.Context, _ *slack.InteractionCallback) error {
numHandlerCalled++
return nil
})
)
BeforeEach(func() {
numHandlerCalled = 0
var err error
r, err = ir.New(ir.InsecureSkipVerification(), ir.VerboseResponse())
Expect(err).NotTo(HaveOccurred())
})
Context("when no handler is registered", func() {
It("just responds with 200", func() {
req, err := NewRequest(content)
Expect(err).NotTo(HaveOccurred())
w := httptest.NewRecorder()
r.ServeHTTP(w, req)
resp := w.Result()
Expect(resp.StatusCode).To(Equal(http.StatusOK))
Expect(numHandlerCalled).To(Equal(0))
})
})
Context("when a matching handler is registered", func() {
It("calls the handler and responds with 200", func() {
r.On(slack.InteractionTypeShortcut, handler)
req, err := NewRequest(content)
Expect(err).NotTo(HaveOccurred())
w := httptest.NewRecorder()
r.ServeHTTP(w, req)
resp := w.Result()
Expect(resp.StatusCode).To(Equal(http.StatusOK))
Expect(numHandlerCalled).To(Equal(1))
})
})
Context("when a matching handler is registered to a different type of events", func() {
It("does not call the handler and responds with 200", func() {
r.On("other_interaction_type", handler)
req, err := NewRequest(content)
Expect(err).NotTo(HaveOccurred())
w := httptest.NewRecorder()
r.ServeHTTP(w, req)
resp := w.Result()
Expect(resp.StatusCode).To(Equal(http.StatusOK))
Expect(numHandlerCalled).To(Equal(0))
})
})
Context("when a handler returned an error", func() {
It("responds with InternalServerError", func() {
r.On(slack.InteractionTypeShortcut, ir.HandlerFunc(func(_ context.Context, _ *slack.InteractionCallback) error {
return fmt.Errorf("something wrong happened")
}))
req, err := NewRequest(content)
Expect(err).NotTo(HaveOccurred())
w := httptest.NewRecorder()
r.ServeHTTP(w, req)
resp := w.Result()
Expect(resp.StatusCode).To(Equal(http.StatusInternalServerError))
})
})
Context("when a handler returned NotInterested", func() {
It("responds with 200", func() {
r.On(slack.InteractionTypeShortcut, ir.HandlerFunc(func(_ context.Context, _ *slack.InteractionCallback) error {
return routererrors.NotInterested
}))
req, err := NewRequest(content)
Expect(err).NotTo(HaveOccurred())
w := httptest.NewRecorder()
r.ServeHTTP(w, req)
resp := w.Result()
Expect(resp.StatusCode).To(Equal(http.StatusOK))
})
})
Context("when a handler returned an error that equals to NotInterested using errors.Is", func() {
It("responds with 200", func() {
r.On(slack.InteractionTypeShortcut, ir.HandlerFunc(func(_ context.Context, _ *slack.InteractionCallback) error {
return errors.WithMessage(routererrors.NotInterested, "not interested")
}))
req, err := NewRequest(content)
Expect(err).NotTo(HaveOccurred())
w := httptest.NewRecorder()
r.ServeHTTP(w, req)
resp := w.Result()
Expect(resp.StatusCode).To(Equal(http.StatusOK))
})
})
Context("when a handler returned an HttpError", func() {
It("responds with a corresponding status code", func() {
code := http.StatusUnauthorized
r.On(slack.InteractionTypeShortcut, ir.HandlerFunc(func(_ context.Context, _ *slack.InteractionCallback) error {
return routererrors.HttpError(code)
}))
req, err := NewRequest(content)
Expect(err).NotTo(HaveOccurred())
w := httptest.NewRecorder()
r.ServeHTTP(w, req)
resp := w.Result()
Expect(resp.StatusCode).To(Equal(code))
})
})
Context("when a handler returned an error that equals to HttpError using errors.As", func() {
It("responds with a corresponding status code", func() {
code := http.StatusUnauthorized
r.On(slack.InteractionTypeShortcut, ir.HandlerFunc(func(_ context.Context, _ *slack.InteractionCallback) error {
return errors.WithMessage(routererrors.HttpError(code), "you ain't authorized")
}))
req, err := NewRequest(content)
Expect(err).NotTo(HaveOccurred())
w := httptest.NewRecorder()
r.ServeHTTP(w, req)
resp := w.Result()
Expect(resp.StatusCode).To(Equal(code))
})
})
Describe("Fallback", func() {
var (
numFirstHandlerCalled int
numSecondHandlerCalled int
numFallbackCalled int
firstError error
secondError error
fallbackError error
)
BeforeEach(func() {
numFirstHandlerCalled = 0
numSecondHandlerCalled = 0
numFallbackCalled = 0
r.On(slack.InteractionTypeShortcut, ir.HandlerFunc(func(_ context.Context, _ *slack.InteractionCallback) error {
numFirstHandlerCalled++
return firstError
}))
r.On(slack.InteractionTypeShortcut, ir.HandlerFunc(func(_ context.Context, _ *slack.InteractionCallback) error {
numSecondHandlerCalled++
return secondError
}))
r.SetFallback(ir.HandlerFunc(func(_ context.Context, _ *slack.InteractionCallback) error {
numFallbackCalled++
return fallbackError
}))
})
Context("when a first handler returned nil", func() {
It("does not fall back to other handlers", func() {
firstError = nil
secondError = nil
fallbackError = nil
req, err := NewRequest(content)
Expect(err).NotTo(HaveOccurred())
w := httptest.NewRecorder()
r.ServeHTTP(w, req)
resp := w.Result()
Expect(resp.StatusCode).To(Equal(http.StatusOK))
Expect(numFirstHandlerCalled).To(Equal(1))
Expect(numSecondHandlerCalled).To(Equal(0))
Expect(numFallbackCalled).To(Equal(0))
})
})
Context("when a first handler returned an error", func() {
It("responds with InternalServerError and does not fall back to other handlers", func() {
firstError = errors.New("error in the first handler")
secondError = nil
fallbackError = nil
req, err := NewRequest(content)
Expect(err).NotTo(HaveOccurred())
w := httptest.NewRecorder()
r.ServeHTTP(w, req)
resp := w.Result()
Expect(resp.StatusCode).To(Equal(http.StatusInternalServerError))
Expect(numFirstHandlerCalled).To(Equal(1))
Expect(numSecondHandlerCalled).To(Equal(0))
Expect(numFallbackCalled).To(Equal(0))
})
})
Context("when a first handler returned NotInterested", func() {
It("falls back to another handler", func() {
firstError = routererrors.NotInterested
secondError = nil
fallbackError = nil
req, err := NewRequest(content)
Expect(err).NotTo(HaveOccurred())
w := httptest.NewRecorder()
r.ServeHTTP(w, req)
resp := w.Result()
Expect(resp.StatusCode).To(Equal(http.StatusOK))
Expect(numFirstHandlerCalled).To(Equal(1))
Expect(numSecondHandlerCalled).To(Equal(1))
Expect(numFallbackCalled).To(Equal(0))
})
})
Context("when a first handler returned an error that equals to NotInterested using errors.Is", func() {
It("falls back to another handler", func() {
firstError = errors.WithMessage(routererrors.NotInterested, "not interested")
secondError = nil
fallbackError = nil
req, err := NewRequest(content)
Expect(err).NotTo(HaveOccurred())
w := httptest.NewRecorder()
r.ServeHTTP(w, req)
resp := w.Result()
Expect(resp.StatusCode).To(Equal(http.StatusOK))
Expect(numFirstHandlerCalled).To(Equal(1))
Expect(numSecondHandlerCalled).To(Equal(1))
Expect(numFallbackCalled).To(Equal(0))
})
})
Context("when the last handler returned NotInterested", func() {
It("falls back to fallback handler", func() {
firstError = routererrors.NotInterested
secondError = routererrors.NotInterested
fallbackError = nil
req, err := NewRequest(content)
Expect(err).NotTo(HaveOccurred())
w := httptest.NewRecorder()
r.ServeHTTP(w, req)
resp := w.Result()
Expect(resp.StatusCode).To(Equal(http.StatusOK))
Expect(numFirstHandlerCalled).To(Equal(1))
Expect(numSecondHandlerCalled).To(Equal(1))
Expect(numFallbackCalled).To(Equal(1))
})
})
Context("when the last handler returned an error that equals to NotInterested using errors.Is", func() {
It("falls back to fallback handler", func() {
firstError = routererrors.NotInterested
secondError = errors.WithMessage(routererrors.NotInterested, "not interested")
fallbackError = nil
req, err := NewRequest(content)
Expect(err).NotTo(HaveOccurred())
w := httptest.NewRecorder()
r.ServeHTTP(w, req)
resp := w.Result()
Expect(resp.StatusCode).To(Equal(http.StatusOK))
Expect(numFirstHandlerCalled).To(Equal(1))
Expect(numSecondHandlerCalled).To(Equal(1))
Expect(numFallbackCalled).To(Equal(1))
})
})
})
Context("when no handler except for fallback is registered", func() {
It("calls fallback handler", func() {
numCalled := 0
r.SetFallback(ir.HandlerFunc(func(_ context.Context, _ *slack.InteractionCallback) error {
numCalled++
return nil
}))
req, err := NewRequest(content)
Expect(err).NotTo(HaveOccurred())
w := httptest.NewRecorder()
r.ServeHTTP(w, req)
resp := w.Result()
Expect(resp.StatusCode).To(Equal(http.StatusOK))
Expect(numCalled).To(Equal(1))
})
})
Context("when more than one fallback handlers are registered", func() {
It("uses the last one", func() {
numFirstHandlerCalled := 0
r.SetFallback(ir.HandlerFunc(func(_ context.Context, _ *slack.InteractionCallback) error {
numFirstHandlerCalled++
return nil
}))
numLastHandlerCalled := 0
r.SetFallback(ir.HandlerFunc(func(_ context.Context, _ *slack.InteractionCallback) error {
numLastHandlerCalled++
return nil
}))
req, err := NewRequest(content)
Expect(err).NotTo(HaveOccurred())
w := httptest.NewRecorder()
r.ServeHTTP(w, req)
resp := w.Result()
Expect(resp.StatusCode).To(Equal(http.StatusOK))
Expect(numFirstHandlerCalled).To(Equal(0))
Expect(numLastHandlerCalled).To(Equal(1))
})
})
})
})
func NewRequest(payload string) (*http.Request, error) {
body := buildRequestBody(payload)
req, err := http.NewRequest(http.MethodPost, "http://example.com/path/to/callback", bytes.NewReader([]byte(body)))
if err != nil {
return nil, err
}
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
return req, nil
}
func NewSignedRequest(signingSecret string, payload string, ts *time.Time) (*http.Request, error) {
var now time.Time
if ts == nil {
now = time.Now()
} else {
now = *ts
}
req, err := NewRequest(payload)
if err != nil {
return nil, err
}
body := buildRequestBody(payload)
if err := testutils.AddSignature(req.Header, []byte(signingSecret), []byte(body), now); err != nil {
return nil, err
}
return req, nil
}
func buildRequestBody(payload string) []byte {
form := url.Values{}
form.Set("payload", payload)
return []byte(form.Encode())
}
| []
| []
| []
| [] | [] | go | null | null | null |
Mesmer/manage.py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "Mesmer.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
pkg/cluster/nodes/create.go | /*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package nodes
import (
"fmt"
"net"
"os"
"github.com/pkg/errors"
"github.com/mitar/kind/pkg/cluster/config"
"github.com/mitar/kind/pkg/cluster/constants"
"github.com/mitar/kind/pkg/cluster/internal/haproxy"
"github.com/mitar/kind/pkg/cluster/internal/kubeadm"
"github.com/mitar/kind/pkg/docker"
)
// FromID creates a node handle from the node (container's) ID
func FromID(id string) *Node {
return &Node{
nameOrID: id,
}
}
// helper used to get a free TCP port for the API server
func getPort() (int, error) {
dummyListener, err := net.Listen("tcp", ":0")
if err != nil {
return 0, err
}
defer dummyListener.Close()
port := dummyListener.Addr().(*net.TCPAddr).Port
return port, nil
}
// CreateControlPlaneNode creates a contol-plane node
// and gets ready for exposing the the API server
func CreateControlPlaneNode(name, image, clusterLabel string) (node *Node, err error) {
// gets a random host port for the API server
port, err := getPort()
if err != nil {
return nil, errors.Wrap(err, "failed to get port for API server")
}
node, err = createNode(name, image, clusterLabel, config.ControlPlaneRole,
// publish selected port for the API server
"--expose", fmt.Sprintf("%d", port),
"-p", fmt.Sprintf("%d:%d", port, kubeadm.APIServerPort),
)
if err != nil {
return node, err
}
// stores the port mapping into the node internal state
node.ports = map[int]int{kubeadm.APIServerPort: port}
return node, nil
}
// CreateExternalLoadBalancerNode creates an external loab balancer node
// and gets ready for exposing the the API server and the load balancer admin console
func CreateExternalLoadBalancerNode(name, image, clusterLabel string) (node *Node, err error) {
// gets a random host port for control-plane load balancer
port, err := getPort()
if err != nil {
return nil, errors.Wrap(err, "failed to get port for control-plane load balancer")
}
node, err = createNode(name, image, clusterLabel, config.ExternalLoadBalancerRole,
// publish selected port for the control plane
"--expose", fmt.Sprintf("%d", port),
"-p", fmt.Sprintf("%d:%d", port, haproxy.ControlPlanePort),
)
if err != nil {
return node, err
}
// stores the port mapping into the node internal state
node.ports = map[int]int{haproxy.ControlPlanePort: port}
return node, nil
}
// CreateWorkerNode creates a worker node
func CreateWorkerNode(name, image, clusterLabel string) (node *Node, err error) {
node, err = createNode(name, image, clusterLabel, config.WorkerRole)
if err != nil {
return node, err
}
return node, nil
}
// createNode `docker run`s the node image, note that due to
// images/node/entrypoint being the entrypoint, this container will
// effectively be paused until we call actuallyStartNode(...)
func createNode(name, image, clusterLabel string, role config.NodeRole, extraArgs ...string) (handle *Node, err error) {
runArgs := []string{
"-d", // run the container detached
// running containers in a container requires privileged
// NOTE: we could try to replicate this with --cap-add, and use less
// privileges, but this flag also changes some mounts that are necessary
// including some ones docker would otherwise do by default.
// for now this is what we want. in the future we may revisit this.
"--privileged",
"--security-opt", "seccomp=unconfined", // also ignore seccomp
"--tmpfs", "/tmp", // various things depend on working /tmp
"--tmpfs", "/run", // systemd wants a writable /run
// some k8s things want /lib/modules
"-v", "/lib/modules:/lib/modules:ro",
"--hostname", name, // make hostname match container name
"--name", name, // ... and set the container name
// label the node with the cluster ID
"--label", clusterLabel,
// label the node with the role ID
"--label", fmt.Sprintf("%s=%s", constants.ClusterRoleKey, role),
// explicitly set the entrypoint
"--entrypoint=/usr/local/bin/entrypoint",
// mount D3M data and CI directories
"-v", "/tmp/ci:/tmp/ci",
"-v", "/data:/data:ro",
}
// pass proxy environment variables to be used by node's docker deamon
httpProxy := os.Getenv("HTTP_PROXY")
if httpProxy != "" {
runArgs = append(runArgs, "-e", "HTTP_PROXY="+httpProxy)
}
httpsProxy := os.Getenv("HTTPS_PROXY")
if httpsProxy != "" {
runArgs = append(runArgs, "-e", "HTTPS_PROXY="+httpsProxy)
}
// adds node specific args
runArgs = append(runArgs, extraArgs...)
if docker.UsernsRemap() {
// We need this argument in order to make this command work
// in systems that have userns-remap enabled on the docker daemon
runArgs = append(runArgs, "--userns=host")
}
id, err := docker.Run(
image,
runArgs,
[]string{
// explicitly pass the entrypoint argument
"/sbin/init",
},
)
// if there is a returned ID then we did create a container
// we should return a handle so the caller can clean it up
// we'll return a handle with the nice name though
if id != "" {
handle = &Node{
nameOrID: name,
}
}
if err != nil {
return handle, errors.Wrap(err, "docker run error")
}
// Deletes the machine-id embedded in the node image and regenerate a new one.
// This is necessary because both kubelet and other components like weave net
// use machine-id internally to distinguish nodes.
if err := handle.Command("rm", "-f", "/etc/machine-id").Run(); err != nil {
return handle, errors.Wrap(err, "machine-id-setup error")
}
if err := handle.Command("systemd-machine-id-setup").Run(); err != nil {
return handle, errors.Wrap(err, "machine-id-setup error")
}
return handle, nil
}
| [
"\"HTTP_PROXY\"",
"\"HTTPS_PROXY\""
]
| []
| [
"HTTP_PROXY",
"HTTPS_PROXY"
]
| [] | ["HTTP_PROXY", "HTTPS_PROXY"] | go | 2 | 0 | |
pkg/proc/proc_test.go | package proc_test
import (
"bytes"
"encoding/binary"
"flag"
"fmt"
"go/ast"
"go/constant"
"go/token"
"io/ioutil"
"math/rand"
"net"
"net/http"
"os"
"os/exec"
"path/filepath"
"reflect"
"runtime"
"sort"
"strconv"
"strings"
"testing"
"time"
"github.com/go-delve/delve/pkg/dwarf/frame"
"github.com/go-delve/delve/pkg/dwarf/op"
"github.com/go-delve/delve/pkg/dwarf/regnum"
"github.com/go-delve/delve/pkg/goversion"
"github.com/go-delve/delve/pkg/logflags"
"github.com/go-delve/delve/pkg/proc"
"github.com/go-delve/delve/pkg/proc/core"
"github.com/go-delve/delve/pkg/proc/gdbserial"
"github.com/go-delve/delve/pkg/proc/native"
protest "github.com/go-delve/delve/pkg/proc/test"
"github.com/go-delve/delve/service/api"
)
var normalLoadConfig = proc.LoadConfig{true, 1, 64, 64, -1, 0}
var testBackend, buildMode string
func init() {
runtime.GOMAXPROCS(4)
os.Setenv("GOMAXPROCS", "4")
}
func TestMain(m *testing.M) {
flag.StringVar(&testBackend, "backend", "", "selects backend")
flag.StringVar(&buildMode, "test-buildmode", "", "selects build mode")
var logConf string
flag.StringVar(&logConf, "log", "", "configures logging")
flag.Parse()
protest.DefaultTestBackend(&testBackend)
if buildMode != "" && buildMode != "pie" {
fmt.Fprintf(os.Stderr, "unknown build mode %q", buildMode)
os.Exit(1)
}
logflags.Setup(logConf != "", logConf, "")
os.Exit(protest.RunTestsWithFixtures(m))
}
func matchSkipConditions(conditions ...string) bool {
for _, cond := range conditions {
condfound := false
for _, s := range []string{runtime.GOOS, runtime.GOARCH, testBackend, buildMode} {
if s == cond {
condfound = true
break
}
}
if !condfound {
return false
}
}
return true
}
func skipOn(t testing.TB, reason string, conditions ...string) {
if matchSkipConditions(conditions...) {
t.Skipf("skipped on %s: %s", strings.Join(conditions, "/"), reason)
}
}
func skipUnlessOn(t testing.TB, reason string, conditions ...string) {
if !matchSkipConditions(conditions...) {
t.Skipf("skipped on %s: %s", strings.Join(conditions, "/"), reason)
}
}
func withTestProcess(name string, t testing.TB, fn func(p *proc.Target, fixture protest.Fixture)) {
withTestProcessArgs(name, t, ".", []string{}, 0, fn)
}
func withTestProcessArgs(name string, t testing.TB, wd string, args []string, buildFlags protest.BuildFlags, fn func(p *proc.Target, fixture protest.Fixture)) {
if buildMode == "pie" {
buildFlags |= protest.BuildModePIE
}
fixture := protest.BuildFixture(name, buildFlags)
var p *proc.Target
var err error
var tracedir string
switch testBackend {
case "native":
p, err = native.Launch(append([]string{fixture.Path}, args...), wd, 0, []string{}, "", [3]string{})
case "lldb":
p, err = gdbserial.LLDBLaunch(append([]string{fixture.Path}, args...), wd, 0, []string{}, "", [3]string{})
case "rr":
protest.MustHaveRecordingAllowed(t)
t.Log("recording")
p, tracedir, err = gdbserial.RecordAndReplay(append([]string{fixture.Path}, args...), wd, true, []string{}, [3]string{})
t.Logf("replaying %q", tracedir)
default:
t.Fatal("unknown backend")
}
if err != nil {
t.Fatal("Launch():", err)
}
defer func() {
p.Detach(true)
}()
fn(p, fixture)
}
func getRegisters(p *proc.Target, t *testing.T) proc.Registers {
regs, err := p.CurrentThread().Registers()
if err != nil {
t.Fatal("Registers():", err)
}
return regs
}
func dataAtAddr(thread proc.MemoryReadWriter, addr uint64) ([]byte, error) {
data := make([]byte, 1)
_, err := thread.ReadMemory(data, addr)
return data, err
}
func assertNoError(err error, t testing.TB, s string) {
if err != nil {
_, file, line, _ := runtime.Caller(1)
fname := filepath.Base(file)
t.Fatalf("failed assertion at %s:%d: %s - %s\n", fname, line, s, err)
}
}
func currentPC(p *proc.Target, t *testing.T) uint64 {
regs, err := p.CurrentThread().Registers()
if err != nil {
t.Fatal(err)
}
return regs.PC()
}
func currentLineNumber(p *proc.Target, t *testing.T) (string, int) {
pc := currentPC(p, t)
f, l, _ := p.BinInfo().PCToLine(pc)
return f, l
}
func assertLineNumber(p *proc.Target, t *testing.T, lineno int, descr string) (string, int) {
f, l := currentLineNumber(p, t)
if l != lineno {
_, callerFile, callerLine, _ := runtime.Caller(1)
t.Fatalf("%s expected line :%d got %s:%d\n\tat %s:%d", descr, lineno, f, l, callerFile, callerLine)
}
return f, l
}
func TestExit(t *testing.T) {
protest.AllowRecording(t)
withTestProcess("continuetestprog", t, func(p *proc.Target, fixture protest.Fixture) {
err := p.Continue()
pe, ok := err.(proc.ErrProcessExited)
if !ok {
t.Fatalf("Continue() returned unexpected error type %s", err)
}
if pe.Status != 0 {
t.Errorf("Unexpected error status: %d", pe.Status)
}
if pe.Pid != p.Pid() {
t.Errorf("Unexpected process id: %d", pe.Pid)
}
})
}
func TestExitAfterContinue(t *testing.T) {
protest.AllowRecording(t)
withTestProcess("continuetestprog", t, func(p *proc.Target, fixture protest.Fixture) {
setFunctionBreakpoint(p, t, "main.sayhi")
assertNoError(p.Continue(), t, "First Continue()")
err := p.Continue()
pe, ok := err.(proc.ErrProcessExited)
if !ok {
t.Fatalf("Continue() returned unexpected error type %s", pe)
}
if pe.Status != 0 {
t.Errorf("Unexpected error status: %d", pe.Status)
}
if pe.Pid != p.Pid() {
t.Errorf("Unexpected process id: %d", pe.Pid)
}
})
}
func setFunctionBreakpoint(p *proc.Target, t testing.TB, fname string) *proc.Breakpoint {
_, f, l, _ := runtime.Caller(1)
f = filepath.Base(f)
addrs, err := proc.FindFunctionLocation(p, fname, 0)
if err != nil {
t.Fatalf("%s:%d: FindFunctionLocation(%s): %v", f, l, fname, err)
}
if len(addrs) != 1 {
t.Fatalf("%s:%d: setFunctionBreakpoint(%s): too many results %v", f, l, fname, addrs)
}
bp, err := p.SetBreakpoint(addrs[0], proc.UserBreakpoint, nil)
if err != nil {
t.Fatalf("%s:%d: FindFunctionLocation(%s): %v", f, l, fname, err)
}
return bp
}
func setFileBreakpoint(p *proc.Target, t testing.TB, path string, lineno int) *proc.Breakpoint {
_, f, l, _ := runtime.Caller(1)
f = filepath.Base(f)
addrs, err := proc.FindFileLocation(p, path, lineno)
if err != nil {
t.Fatalf("%s:%d: FindFileLocation(%s, %d): %v", f, l, path, lineno, err)
}
if len(addrs) != 1 {
t.Fatalf("%s:%d: setFileLineBreakpoint(%s, %d): too many (or not enough) results %v", f, l, path, lineno, addrs)
}
bp, err := p.SetBreakpoint(addrs[0], proc.UserBreakpoint, nil)
if err != nil {
t.Fatalf("%s:%d: SetBreakpoint: %v", f, l, err)
}
return bp
}
func findFunctionLocation(p *proc.Target, t *testing.T, fnname string) uint64 {
_, f, l, _ := runtime.Caller(1)
f = filepath.Base(f)
addrs, err := proc.FindFunctionLocation(p, fnname, 0)
if err != nil {
t.Fatalf("%s:%d: FindFunctionLocation(%s): %v", f, l, fnname, err)
}
if len(addrs) != 1 {
t.Fatalf("%s:%d: FindFunctionLocation(%s): too many results %v", f, l, fnname, addrs)
}
return addrs[0]
}
func findFileLocation(p *proc.Target, t *testing.T, file string, lineno int) uint64 {
_, f, l, _ := runtime.Caller(1)
f = filepath.Base(f)
addrs, err := proc.FindFileLocation(p, file, lineno)
if err != nil {
t.Fatalf("%s:%d: FindFileLocation(%s, %d): %v", f, l, file, lineno, err)
}
if len(addrs) != 1 {
t.Fatalf("%s:%d: FindFileLocation(%s, %d): too many results %v", f, l, file, lineno, addrs)
}
return addrs[0]
}
func TestHalt(t *testing.T) {
stopChan := make(chan interface{}, 1)
withTestProcess("loopprog", t, func(p *proc.Target, fixture protest.Fixture) {
setFunctionBreakpoint(p, t, "main.loop")
assertNoError(p.Continue(), t, "Continue")
resumeChan := make(chan struct{}, 1)
go func() {
<-resumeChan
time.Sleep(100 * time.Millisecond)
stopChan <- p.RequestManualStop()
}()
p.ResumeNotify(resumeChan)
assertNoError(p.Continue(), t, "Continue")
retVal := <-stopChan
if err, ok := retVal.(error); ok && err != nil {
t.Fatal()
}
})
}
func TestStep(t *testing.T) {
protest.AllowRecording(t)
withTestProcess("testprog", t, func(p *proc.Target, fixture protest.Fixture) {
setFunctionBreakpoint(p, t, "main.helloworld")
assertNoError(p.Continue(), t, "Continue()")
regs := getRegisters(p, t)
rip := regs.PC()
err := p.CurrentThread().StepInstruction()
assertNoError(err, t, "Step()")
regs = getRegisters(p, t)
if rip >= regs.PC() {
t.Errorf("Expected %#v to be greater than %#v", regs.PC(), rip)
}
})
}
func TestBreakpoint(t *testing.T) {
protest.AllowRecording(t)
withTestProcess("testprog", t, func(p *proc.Target, fixture protest.Fixture) {
bp := setFunctionBreakpoint(p, t, "main.helloworld")
assertNoError(p.Continue(), t, "Continue()")
regs, err := p.CurrentThread().Registers()
assertNoError(err, t, "Registers")
pc := regs.PC()
if bp.UserBreaklet().TotalHitCount != 1 {
t.Fatalf("Breakpoint should be hit once, got %d\n", bp.UserBreaklet().TotalHitCount)
}
if pc-1 != bp.Addr && pc != bp.Addr {
f, l, _ := p.BinInfo().PCToLine(pc)
t.Fatalf("Break not respected:\nPC:%#v %s:%d\nFN:%#v \n", pc, f, l, bp.Addr)
}
})
}
func TestBreakpointInSeparateGoRoutine(t *testing.T) {
protest.AllowRecording(t)
withTestProcess("testthreads", t, func(p *proc.Target, fixture protest.Fixture) {
setFunctionBreakpoint(p, t, "main.anotherthread")
assertNoError(p.Continue(), t, "Continue")
regs, err := p.CurrentThread().Registers()
assertNoError(err, t, "Registers")
pc := regs.PC()
f, l, _ := p.BinInfo().PCToLine(pc)
if f != "testthreads.go" && l != 8 {
t.Fatal("Program did not hit breakpoint")
}
})
}
func TestBreakpointWithNonExistantFunction(t *testing.T) {
withTestProcess("testprog", t, func(p *proc.Target, fixture protest.Fixture) {
_, err := p.SetBreakpoint(0, proc.UserBreakpoint, nil)
if err == nil {
t.Fatal("Should not be able to break at non existant function")
}
})
}
func TestClearBreakpointBreakpoint(t *testing.T) {
withTestProcess("testprog", t, func(p *proc.Target, fixture protest.Fixture) {
bp := setFunctionBreakpoint(p, t, "main.sleepytime")
err := p.ClearBreakpoint(bp.Addr)
assertNoError(err, t, "ClearBreakpoint()")
data, err := dataAtAddr(p.Memory(), bp.Addr)
assertNoError(err, t, "dataAtAddr")
int3 := []byte{0xcc}
if bytes.Equal(data, int3) {
t.Fatalf("Breakpoint was not cleared data: %#v, int3: %#v", data, int3)
}
if countBreakpoints(p) != 0 {
t.Fatal("Breakpoint not removed internally")
}
})
}
type nextTest struct {
begin, end int
}
func countBreakpoints(p *proc.Target) int {
bpcount := 0
for _, bp := range p.Breakpoints().M {
if bp.LogicalID() >= 0 {
bpcount++
}
}
return bpcount
}
type contFunc int
const (
contContinue contFunc = iota
contNext
contStep
contStepout
contReverseNext
contReverseStep
contReverseStepout
contContinueToBreakpoint
)
type seqTest struct {
cf contFunc
pos interface{}
}
func testseq(program string, contFunc contFunc, testcases []nextTest, initialLocation string, t *testing.T) {
seqTestcases := make([]seqTest, len(testcases)+1)
seqTestcases[0] = seqTest{contContinue, testcases[0].begin}
for i := range testcases {
if i > 0 {
if testcases[i-1].end != testcases[i].begin {
panic(fmt.Errorf("begin/end mismatch at index %d", i))
}
}
seqTestcases[i+1] = seqTest{contFunc, testcases[i].end}
}
testseq2(t, program, initialLocation, seqTestcases)
}
const traceTestseq2 = true
func testseq2(t *testing.T, program string, initialLocation string, testcases []seqTest) {
testseq2Args(".", []string{}, 0, t, program, initialLocation, testcases)
}
func testseq2Args(wd string, args []string, buildFlags protest.BuildFlags, t *testing.T, program string, initialLocation string, testcases []seqTest) {
protest.AllowRecording(t)
withTestProcessArgs(program, t, wd, args, buildFlags, func(p *proc.Target, fixture protest.Fixture) {
var bp *proc.Breakpoint
if initialLocation != "" {
bp = setFunctionBreakpoint(p, t, initialLocation)
} else if testcases[0].cf == contContinue {
bp = setFileBreakpoint(p, t, fixture.Source, testcases[0].pos.(int))
} else {
panic("testseq2 can not set initial breakpoint")
}
if traceTestseq2 {
t.Logf("initial breakpoint %v", bp)
}
regs, err := p.CurrentThread().Registers()
assertNoError(err, t, "Registers")
f, ln := currentLineNumber(p, t)
for i, tc := range testcases {
switch tc.cf {
case contNext:
if traceTestseq2 {
t.Log("next")
}
assertNoError(p.Next(), t, "Next() returned an error")
case contStep:
if traceTestseq2 {
t.Log("step")
}
assertNoError(p.Step(), t, "Step() returned an error")
case contStepout:
if traceTestseq2 {
t.Log("stepout")
}
assertNoError(p.StepOut(), t, "StepOut() returned an error")
case contContinue:
if traceTestseq2 {
t.Log("continue")
}
assertNoError(p.Continue(), t, "Continue() returned an error")
if i == 0 {
if traceTestseq2 {
t.Log("clearing initial breakpoint")
}
err := p.ClearBreakpoint(bp.Addr)
assertNoError(err, t, "ClearBreakpoint() returned an error")
}
case contReverseNext:
if traceTestseq2 {
t.Log("reverse-next")
}
assertNoError(p.ChangeDirection(proc.Backward), t, "direction switch")
assertNoError(p.Next(), t, "reverse Next() returned an error")
assertNoError(p.ChangeDirection(proc.Forward), t, "direction switch")
case contReverseStep:
if traceTestseq2 {
t.Log("reverse-step")
}
assertNoError(p.ChangeDirection(proc.Backward), t, "direction switch")
assertNoError(p.Step(), t, "reverse Step() returned an error")
assertNoError(p.ChangeDirection(proc.Forward), t, "direction switch")
case contReverseStepout:
if traceTestseq2 {
t.Log("reverse-stepout")
}
assertNoError(p.ChangeDirection(proc.Backward), t, "direction switch")
assertNoError(p.StepOut(), t, "reverse StepOut() returned an error")
assertNoError(p.ChangeDirection(proc.Forward), t, "direction switch")
case contContinueToBreakpoint:
bp := setFileBreakpoint(p, t, fixture.Source, tc.pos.(int))
if traceTestseq2 {
t.Log("continue")
}
assertNoError(p.Continue(), t, "Continue() returned an error")
err := p.ClearBreakpoint(bp.Addr)
assertNoError(err, t, "ClearBreakpoint() returned an error")
}
f, ln = currentLineNumber(p, t)
regs, _ = p.CurrentThread().Registers()
pc := regs.PC()
if traceTestseq2 {
t.Logf("at %#x %s:%d", pc, f, ln)
fmt.Printf("at %#x %s:%d\n", pc, f, ln)
}
switch pos := tc.pos.(type) {
case int:
if pos >= 0 && ln != pos {
t.Fatalf("Program did not continue to correct next location expected %d was %s:%d (%#x) (testcase %d)", pos, filepath.Base(f), ln, pc, i)
}
case string:
v := strings.Split(pos, ":")
tgtln, _ := strconv.Atoi(v[1])
if !strings.HasSuffix(f, v[0]) || (ln != tgtln) {
t.Fatalf("Program did not continue to correct next location, expected %s was %s:%d (%#x) (testcase %d)", pos, filepath.Base(f), ln, pc, i)
}
}
}
if countBreakpoints(p) != 0 {
t.Fatal("Not all breakpoints were cleaned up", len(p.Breakpoints().M))
}
})
}
func TestNextGeneral(t *testing.T) {
var testcases []nextTest
ver, _ := goversion.Parse(runtime.Version())
if ver.Major < 0 || ver.AfterOrEqual(goversion.GoVersion{Major: 1, Minor: 7, Rev: -1}) {
testcases = []nextTest{
{17, 19},
{19, 20},
{20, 23},
{23, 24},
{24, 26},
{26, 31},
{31, 23},
{23, 24},
{24, 26},
{26, 31},
{31, 23},
{23, 24},
{24, 26},
{26, 27},
{27, 28},
{28, 34},
}
} else {
testcases = []nextTest{
{17, 19},
{19, 20},
{20, 23},
{23, 24},
{24, 26},
{26, 31},
{31, 23},
{23, 24},
{24, 26},
{26, 31},
{31, 23},
{23, 24},
{24, 26},
{26, 27},
{27, 34},
}
}
testseq("testnextprog", contNext, testcases, "main.testnext", t)
}
func TestNextConcurrent(t *testing.T) {
skipOn(t, "broken", "freebsd")
testcases := []nextTest{
{8, 9},
{9, 10},
{10, 11},
}
protest.AllowRecording(t)
withTestProcess("parallel_next", t, func(p *proc.Target, fixture protest.Fixture) {
bp := setFunctionBreakpoint(p, t, "main.sayhi")
assertNoError(p.Continue(), t, "Continue")
f, ln := currentLineNumber(p, t)
initV := evalVariable(p, t, "n")
initVval, _ := constant.Int64Val(initV.Value)
err := p.ClearBreakpoint(bp.Addr)
assertNoError(err, t, "ClearBreakpoint()")
for _, tc := range testcases {
g, err := proc.GetG(p.CurrentThread())
assertNoError(err, t, "GetG()")
if p.SelectedGoroutine().ID != g.ID {
t.Fatalf("SelectedGoroutine not CurrentThread's goroutine: %d %d", g.ID, p.SelectedGoroutine().ID)
}
if ln != tc.begin {
t.Fatalf("Program not stopped at correct spot expected %d was %s:%d", tc.begin, filepath.Base(f), ln)
}
assertNoError(p.Next(), t, "Next() returned an error")
f, ln = assertLineNumber(p, t, tc.end, "Program did not continue to the expected location")
v := evalVariable(p, t, "n")
vval, _ := constant.Int64Val(v.Value)
if vval != initVval {
t.Fatal("Did not end up on same goroutine")
}
}
})
}
func TestNextConcurrentVariant2(t *testing.T) {
skipOn(t, "broken", "freebsd")
// Just like TestNextConcurrent but instead of removing the initial breakpoint we check that when it happens is for other goroutines
testcases := []nextTest{
{8, 9},
{9, 10},
{10, 11},
}
protest.AllowRecording(t)
withTestProcess("parallel_next", t, func(p *proc.Target, fixture protest.Fixture) {
setFunctionBreakpoint(p, t, "main.sayhi")
assertNoError(p.Continue(), t, "Continue")
f, ln := currentLineNumber(p, t)
initV := evalVariable(p, t, "n")
initVval, _ := constant.Int64Val(initV.Value)
for _, tc := range testcases {
t.Logf("test case %v", tc)
g, err := proc.GetG(p.CurrentThread())
assertNoError(err, t, "GetG()")
if p.SelectedGoroutine().ID != g.ID {
t.Fatalf("SelectedGoroutine not CurrentThread's goroutine: %d %d", g.ID, p.SelectedGoroutine().ID)
}
if ln != tc.begin {
t.Fatalf("Program not stopped at correct spot expected %d was %s:%d", tc.begin, filepath.Base(f), ln)
}
assertNoError(p.Next(), t, "Next() returned an error")
var vval int64
for {
v := evalVariable(p, t, "n")
for _, thread := range p.ThreadList() {
proc.GetG(thread)
}
vval, _ = constant.Int64Val(v.Value)
if bpstate := p.CurrentThread().Breakpoint(); bpstate.Breakpoint == nil {
if vval != initVval {
t.Fatal("Did not end up on same goroutine")
}
break
} else {
if vval == initVval {
t.Fatal("Initial breakpoint triggered twice for the same goroutine")
}
assertNoError(p.Continue(), t, "Continue 2")
}
}
f, ln = assertLineNumber(p, t, tc.end, "Program did not continue to the expected location")
}
})
}
func TestNextFunctionReturn(t *testing.T) {
testcases := []nextTest{
{13, 14},
{14, 15},
{15, 35},
}
protest.AllowRecording(t)
testseq("testnextprog", contNext, testcases, "main.helloworld", t)
}
func TestNextFunctionReturnDefer(t *testing.T) {
var testcases []nextTest
ver, _ := goversion.Parse(runtime.Version())
if ver.Major < 0 || ver.AfterOrEqual(goversion.GoVersion{Major: 1, Minor: 9, Rev: -1}) {
testcases = []nextTest{
{5, 6},
{6, 9},
{9, 10},
}
} else {
testcases = []nextTest{
{5, 8},
{8, 9},
{9, 10},
}
}
protest.AllowRecording(t)
testseq("testnextdefer", contNext, testcases, "main.main", t)
}
func TestNextNetHTTP(t *testing.T) {
testcases := []nextTest{
{11, 12},
{12, 13},
}
withTestProcess("testnextnethttp", t, func(p *proc.Target, fixture protest.Fixture) {
go func() {
// Wait for program to start listening.
for {
conn, err := net.Dial("tcp", "127.0.0.1:9191")
if err == nil {
conn.Close()
break
}
time.Sleep(50 * time.Millisecond)
}
http.Get("http://127.0.0.1:9191")
}()
if err := p.Continue(); err != nil {
t.Fatal(err)
}
f, ln := currentLineNumber(p, t)
for _, tc := range testcases {
if ln != tc.begin {
t.Fatalf("Program not stopped at correct spot expected %d was %s:%d", tc.begin, filepath.Base(f), ln)
}
assertNoError(p.Next(), t, "Next() returned an error")
f, ln = assertLineNumber(p, t, tc.end, "Program did not continue to correct next location")
}
})
}
func TestRuntimeBreakpoint(t *testing.T) {
withTestProcess("testruntimebreakpoint", t, func(p *proc.Target, fixture protest.Fixture) {
err := p.Continue()
if err != nil {
t.Fatal(err)
}
regs, err := p.CurrentThread().Registers()
assertNoError(err, t, "Registers")
pc := regs.PC()
f, l, _ := p.BinInfo().PCToLine(pc)
if l != 10 {
t.Fatalf("did not respect breakpoint %s:%d", f, l)
}
})
}
func returnAddress(thread proc.Thread) (uint64, error) {
locations, err := proc.ThreadStacktrace(thread, 2)
if err != nil {
return 0, err
}
if len(locations) < 2 {
return 0, fmt.Errorf("no return address for function: %s", locations[0].Current.Fn.BaseName())
}
return locations[1].Current.PC, nil
}
func TestFindReturnAddress(t *testing.T) {
protest.AllowRecording(t)
withTestProcess("testnextprog", t, func(p *proc.Target, fixture protest.Fixture) {
setFileBreakpoint(p, t, fixture.Source, 24)
err := p.Continue()
if err != nil {
t.Fatal(err)
}
addr, err := returnAddress(p.CurrentThread())
if err != nil {
t.Fatal(err)
}
_, l, _ := p.BinInfo().PCToLine(addr)
if l != 40 {
t.Fatalf("return address not found correctly, expected line 40")
}
})
}
func TestFindReturnAddressTopOfStackFn(t *testing.T) {
protest.AllowRecording(t)
withTestProcess("testreturnaddress", t, func(p *proc.Target, fixture protest.Fixture) {
fnName := "runtime.rt0_go"
setFunctionBreakpoint(p, t, fnName)
if err := p.Continue(); err != nil {
t.Fatal(err)
}
if _, err := returnAddress(p.CurrentThread()); err == nil {
t.Fatal("expected error to be returned")
}
})
}
func TestSwitchThread(t *testing.T) {
protest.AllowRecording(t)
withTestProcess("testnextprog", t, func(p *proc.Target, fixture protest.Fixture) {
// With invalid thread id
err := p.SwitchThread(-1)
if err == nil {
t.Fatal("Expected error for invalid thread id")
}
setFunctionBreakpoint(p, t, "main.main")
err = p.Continue()
if err != nil {
t.Fatal(err)
}
var nt int
ct := p.CurrentThread().ThreadID()
for _, thread := range p.ThreadList() {
if thread.ThreadID() != ct {
nt = thread.ThreadID()
break
}
}
if nt == 0 {
t.Fatal("could not find thread to switch to")
}
// With valid thread id
err = p.SwitchThread(nt)
if err != nil {
t.Fatal(err)
}
if p.CurrentThread().ThreadID() != nt {
t.Fatal("Did not switch threads")
}
})
}
func TestCGONext(t *testing.T) {
// Test if one can do 'next' in a cgo binary
// On OSX with Go < 1.5 CGO is not supported due to: https://github.com/golang/go/issues/8973
if !goversion.VersionAfterOrEqual(runtime.Version(), 1, 5) {
skipOn(t, "upstream issue", "darwin")
}
protest.MustHaveCgo(t)
skipOn(t, "broken - cgo stacktraces", "darwin", "arm64")
protest.AllowRecording(t)
withTestProcess("cgotest", t, func(p *proc.Target, fixture protest.Fixture) {
setFunctionBreakpoint(p, t, "main.main")
assertNoError(p.Continue(), t, "Continue()")
assertNoError(p.Next(), t, "Next()")
})
}
type loc struct {
line int
fn string
}
func (l1 *loc) match(l2 proc.Stackframe) bool {
if l1.line >= 0 {
if l1.line != l2.Call.Line {
return false
}
}
return l1.fn == l2.Call.Fn.Name
}
func TestStacktrace(t *testing.T) {
stacks := [][]loc{
{{4, "main.stacktraceme"}, {8, "main.func1"}, {16, "main.main"}},
{{4, "main.stacktraceme"}, {8, "main.func1"}, {12, "main.func2"}, {17, "main.main"}},
}
protest.AllowRecording(t)
withTestProcess("stacktraceprog", t, func(p *proc.Target, fixture protest.Fixture) {
bp := setFunctionBreakpoint(p, t, "main.stacktraceme")
for i := range stacks {
assertNoError(p.Continue(), t, "Continue()")
locations, err := proc.ThreadStacktrace(p.CurrentThread(), 40)
assertNoError(err, t, "Stacktrace()")
if len(locations) != len(stacks[i])+2 {
t.Fatalf("Wrong stack trace size %d %d\n", len(locations), len(stacks[i])+2)
}
t.Logf("Stacktrace %d:\n", i)
for i := range locations {
t.Logf("\t%s:%d\n", locations[i].Call.File, locations[i].Call.Line)
}
for j := range stacks[i] {
if !stacks[i][j].match(locations[j]) {
t.Fatalf("Wrong stack trace pos %d\n", j)
}
}
}
p.ClearBreakpoint(bp.Addr)
p.Continue()
})
}
func TestStacktrace2(t *testing.T) {
withTestProcess("retstack", t, func(p *proc.Target, fixture protest.Fixture) {
assertNoError(p.Continue(), t, "Continue()")
locations, err := proc.ThreadStacktrace(p.CurrentThread(), 40)
assertNoError(err, t, "Stacktrace()")
if !stackMatch([]loc{{-1, "main.f"}, {16, "main.main"}}, locations, false) {
for i := range locations {
t.Logf("\t%s:%d [%s]\n", locations[i].Call.File, locations[i].Call.Line, locations[i].Call.Fn.Name)
}
t.Fatalf("Stack error at main.f()\n%v\n", locations)
}
assertNoError(p.Continue(), t, "Continue()")
locations, err = proc.ThreadStacktrace(p.CurrentThread(), 40)
assertNoError(err, t, "Stacktrace()")
if !stackMatch([]loc{{-1, "main.g"}, {17, "main.main"}}, locations, false) {
for i := range locations {
t.Logf("\t%s:%d [%s]\n", locations[i].Call.File, locations[i].Call.Line, locations[i].Call.Fn.Name)
}
t.Fatalf("Stack error at main.g()\n%v\n", locations)
}
})
}
func stackMatch(stack []loc, locations []proc.Stackframe, skipRuntime bool) bool {
if len(stack) > len(locations) {
return false
}
i := 0
for j := range locations {
if i >= len(stack) {
break
}
if skipRuntime {
if locations[j].Call.Fn == nil || strings.HasPrefix(locations[j].Call.Fn.Name, "runtime.") {
continue
}
}
if !stack[i].match(locations[j]) {
return false
}
i++
}
return i >= len(stack)
}
func TestStacktraceGoroutine(t *testing.T) {
skipOn(t, "broken - cgo stacktraces", "darwin", "arm64")
mainStack := []loc{{14, "main.stacktraceme"}, {29, "main.main"}}
if goversion.VersionAfterOrEqual(runtime.Version(), 1, 11) {
mainStack[0].line = 15
}
agoroutineStacks := [][]loc{
{{8, "main.agoroutine"}},
{{9, "main.agoroutine"}},
{{10, "main.agoroutine"}},
}
lenient := 0
if runtime.GOOS == "windows" {
lenient = 1
}
protest.AllowRecording(t)
withTestProcess("goroutinestackprog", t, func(p *proc.Target, fixture protest.Fixture) {
bp := setFunctionBreakpoint(p, t, "main.stacktraceme")
assertNoError(p.Continue(), t, "Continue()")
gs, _, err := proc.GoroutinesInfo(p, 0, 0)
assertNoError(err, t, "GoroutinesInfo")
agoroutineCount := 0
mainCount := 0
for i, g := range gs {
locations, err := g.Stacktrace(40, 0)
if err != nil {
// On windows we do not have frame information for goroutines doing system calls.
t.Logf("Could not retrieve goroutine stack for goid=%d: %v", g.ID, err)
continue
}
if stackMatch(mainStack, locations, false) {
mainCount++
}
found := false
for _, agoroutineStack := range agoroutineStacks {
if stackMatch(agoroutineStack, locations, true) {
found = true
}
}
if found {
agoroutineCount++
} else {
t.Logf("Non-goroutine stack: %d (%d)", i, len(locations))
for i := range locations {
name := ""
if locations[i].Call.Fn != nil {
name = locations[i].Call.Fn.Name
}
t.Logf("\t%s:%d %s (%#x) %x %v\n", locations[i].Call.File, locations[i].Call.Line, name, locations[i].Current.PC, locations[i].FrameOffset(), locations[i].SystemStack)
}
}
}
if mainCount != 1 {
t.Fatalf("Main goroutine stack not found %d", mainCount)
}
if agoroutineCount < 10-lenient {
t.Fatalf("Goroutine stacks not found (%d)", agoroutineCount)
}
p.ClearBreakpoint(bp.Addr)
p.Continue()
})
}
func TestKill(t *testing.T) {
skipOn(t, "N/A", "lldb") // k command presumably works but leaves the process around?
withTestProcess("testprog", t, func(p *proc.Target, fixture protest.Fixture) {
if err := p.Detach(true); err != nil {
t.Fatal(err)
}
if valid, _ := p.Valid(); valid {
t.Fatal("expected process to have exited")
}
if runtime.GOOS == "linux" {
if runtime.GOARCH == "arm64" {
//there is no any sync between signal sended(tracee handled) and open /proc/%d/. It may fail on arm64
return
}
_, err := os.Open(fmt.Sprintf("/proc/%d/", p.Pid()))
if err == nil {
t.Fatal("process has not exited", p.Pid())
}
}
})
}
func testGSupportFunc(name string, t *testing.T, p *proc.Target, fixture protest.Fixture) {
bp := setFunctionBreakpoint(p, t, "main.main")
assertNoError(p.Continue(), t, name+": Continue()")
g, err := proc.GetG(p.CurrentThread())
assertNoError(err, t, name+": GetG()")
if g == nil {
t.Fatal(name + ": g was nil")
}
t.Logf(name+": g is: %v", g)
p.ClearBreakpoint(bp.Addr)
}
func TestGetG(t *testing.T) {
withTestProcess("testprog", t, func(p *proc.Target, fixture protest.Fixture) {
testGSupportFunc("nocgo", t, p, fixture)
})
// On OSX with Go < 1.5 CGO is not supported due to: https://github.com/golang/go/issues/8973
if !goversion.VersionAfterOrEqual(runtime.Version(), 1, 5) {
skipOn(t, "upstream issue", "darwin")
}
protest.MustHaveCgo(t)
protest.AllowRecording(t)
withTestProcess("cgotest", t, func(p *proc.Target, fixture protest.Fixture) {
testGSupportFunc("cgo", t, p, fixture)
})
}
func TestContinueMulti(t *testing.T) {
protest.AllowRecording(t)
withTestProcess("integrationprog", t, func(p *proc.Target, fixture protest.Fixture) {
bp1 := setFunctionBreakpoint(p, t, "main.main")
bp2 := setFunctionBreakpoint(p, t, "main.sayhi")
mainCount := 0
sayhiCount := 0
for {
err := p.Continue()
if valid, _ := p.Valid(); !valid {
break
}
assertNoError(err, t, "Continue()")
if bp := p.CurrentThread().Breakpoint(); bp.LogicalID() == bp1.LogicalID() {
mainCount++
}
if bp := p.CurrentThread().Breakpoint(); bp.LogicalID() == bp2.LogicalID() {
sayhiCount++
}
}
if mainCount != 1 {
t.Fatalf("Main breakpoint hit wrong number of times: %d\n", mainCount)
}
if sayhiCount != 3 {
t.Fatalf("Sayhi breakpoint hit wrong number of times: %d\n", sayhiCount)
}
})
}
func TestBreakpointOnFunctionEntry(t *testing.T) {
testseq2(t, "testprog", "main.main", []seqTest{{contContinue, 17}})
}
func TestProcessReceivesSIGCHLD(t *testing.T) {
protest.AllowRecording(t)
withTestProcess("sigchldprog", t, func(p *proc.Target, fixture protest.Fixture) {
err := p.Continue()
_, ok := err.(proc.ErrProcessExited)
if !ok {
t.Fatalf("Continue() returned unexpected error type %v", err)
}
})
}
func TestIssue239(t *testing.T) {
withTestProcess("is sue239", t, func(p *proc.Target, fixture protest.Fixture) {
setFileBreakpoint(p, t, fixture.Source, 17)
assertNoError(p.Continue(), t, "Continue()")
})
}
func findFirstNonRuntimeFrame(p *proc.Target) (proc.Stackframe, error) {
frames, err := proc.ThreadStacktrace(p.CurrentThread(), 10)
if err != nil {
return proc.Stackframe{}, err
}
for _, frame := range frames {
if frame.Current.Fn != nil && !strings.HasPrefix(frame.Current.Fn.Name, "runtime.") {
return frame, nil
}
}
return proc.Stackframe{}, fmt.Errorf("non-runtime frame not found")
}
func evalVariableOrError(p *proc.Target, symbol string) (*proc.Variable, error) {
var scope *proc.EvalScope
var err error
if testBackend == "rr" {
var frame proc.Stackframe
frame, err = findFirstNonRuntimeFrame(p)
if err == nil {
scope = proc.FrameToScope(p, p.Memory(), nil, frame)
}
} else {
scope, err = proc.GoroutineScope(p, p.CurrentThread())
}
if err != nil {
return nil, err
}
return scope.EvalExpression(symbol, normalLoadConfig)
}
func evalVariable(p *proc.Target, t testing.TB, symbol string) *proc.Variable {
v, err := evalVariableOrError(p, symbol)
if err != nil {
_, file, line, _ := runtime.Caller(1)
fname := filepath.Base(file)
t.Fatalf("%s:%d: EvalVariable(%q): %v", fname, line, symbol, err)
}
return v
}
func setVariable(p *proc.Target, symbol, value string) error {
scope, err := proc.GoroutineScope(p, p.CurrentThread())
if err != nil {
return err
}
return scope.SetVariable(symbol, value)
}
func TestVariableEvaluation(t *testing.T) {
protest.AllowRecording(t)
testcases := []struct {
name string
st reflect.Kind
value interface{}
length, cap int64
childrenlen int
}{
{"a1", reflect.String, "foofoofoofoofoofoo", 18, 0, 0},
{"a11", reflect.Array, nil, 3, -1, 3},
{"a12", reflect.Slice, nil, 2, 2, 2},
{"a13", reflect.Slice, nil, 3, 3, 3},
{"a2", reflect.Int, int64(6), 0, 0, 0},
{"a3", reflect.Float64, float64(7.23), 0, 0, 0},
{"a4", reflect.Array, nil, 2, -1, 2},
{"a5", reflect.Slice, nil, 5, 5, 5},
{"a6", reflect.Struct, nil, 2, 0, 2},
{"a7", reflect.Ptr, nil, 1, 0, 1},
{"a8", reflect.Struct, nil, 2, 0, 2},
{"a9", reflect.Ptr, nil, 1, 0, 1},
{"baz", reflect.String, "bazburzum", 9, 0, 0},
{"neg", reflect.Int, int64(-1), 0, 0, 0},
{"f32", reflect.Float32, float64(float32(1.2)), 0, 0, 0},
{"c64", reflect.Complex64, complex128(complex64(1 + 2i)), 0, 0, 0},
{"c128", reflect.Complex128, complex128(2 + 3i), 0, 0, 0},
{"a6.Baz", reflect.Int, int64(8), 0, 0, 0},
{"a7.Baz", reflect.Int, int64(5), 0, 0, 0},
{"a8.Baz", reflect.String, "feh", 3, 0, 0},
{"a8", reflect.Struct, nil, 2, 0, 2},
{"i32", reflect.Array, nil, 2, -1, 2},
{"b1", reflect.Bool, true, 0, 0, 0},
{"b2", reflect.Bool, false, 0, 0, 0},
{"f", reflect.Func, "main.barfoo", 0, 0, 0},
{"ba", reflect.Slice, nil, 200, 200, 64},
}
withTestProcess("testvariables", t, func(p *proc.Target, fixture protest.Fixture) {
assertNoError(p.Continue(), t, "Continue() returned an error")
for _, tc := range testcases {
v := evalVariable(p, t, tc.name)
if v.Kind != tc.st {
t.Fatalf("%s simple type: expected: %s got: %s", tc.name, tc.st, v.Kind.String())
}
if v.Value == nil && tc.value != nil {
t.Fatalf("%s value: expected: %v got: %v", tc.name, tc.value, v.Value)
} else {
switch v.Kind {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
x, _ := constant.Int64Val(v.Value)
if y, ok := tc.value.(int64); !ok || x != y {
t.Fatalf("%s value: expected: %v got: %v", tc.name, tc.value, v.Value)
}
case reflect.Float32, reflect.Float64:
x, _ := constant.Float64Val(v.Value)
if y, ok := tc.value.(float64); !ok || x != y {
t.Fatalf("%s value: expected: %v got: %v", tc.name, tc.value, v.Value)
}
case reflect.Complex64, reflect.Complex128:
xr, _ := constant.Float64Val(constant.Real(v.Value))
xi, _ := constant.Float64Val(constant.Imag(v.Value))
if y, ok := tc.value.(complex128); !ok || complex(xr, xi) != y {
t.Fatalf("%s value: expected: %v got: %v", tc.name, tc.value, v.Value)
}
case reflect.String:
if y, ok := tc.value.(string); !ok || constant.StringVal(v.Value) != y {
t.Fatalf("%s value: expected: %v got: %v", tc.name, tc.value, v.Value)
}
}
}
if v.Len != tc.length {
t.Fatalf("%s len: expected: %d got: %d", tc.name, tc.length, v.Len)
}
if v.Cap != tc.cap {
t.Fatalf("%s cap: expected: %d got: %d", tc.name, tc.cap, v.Cap)
}
if len(v.Children) != tc.childrenlen {
t.Fatalf("%s children len: expected %d got: %d", tc.name, tc.childrenlen, len(v.Children))
}
}
})
}
func TestFrameEvaluation(t *testing.T) {
protest.AllowRecording(t)
lenient := false
if runtime.GOOS == "windows" {
lenient = true
}
withTestProcess("goroutinestackprog", t, func(p *proc.Target, fixture protest.Fixture) {
setFunctionBreakpoint(p, t, "main.stacktraceme")
assertNoError(p.Continue(), t, "Continue()")
t.Logf("stopped on thread %d, goroutine: %#v", p.CurrentThread().ThreadID(), p.SelectedGoroutine())
// Testing evaluation on goroutines
gs, _, err := proc.GoroutinesInfo(p, 0, 0)
assertNoError(err, t, "GoroutinesInfo")
found := make([]bool, 10)
for _, g := range gs {
frame := -1
frames, err := g.Stacktrace(40, 0)
if err != nil {
t.Logf("could not stacktrace goroutine %d: %v\n", g.ID, err)
continue
}
t.Logf("Goroutine %d %#v", g.ID, g.Thread)
logStacktrace(t, p, frames)
for i := range frames {
if frames[i].Call.Fn != nil && frames[i].Call.Fn.Name == "main.agoroutine" {
frame = i
break
}
}
if frame < 0 {
t.Logf("Goroutine %d: could not find correct frame", g.ID)
continue
}
scope, err := proc.ConvertEvalScope(p, g.ID, frame, 0)
assertNoError(err, t, "ConvertEvalScope()")
t.Logf("scope = %v", scope)
v, err := scope.EvalExpression("i", normalLoadConfig)
t.Logf("v = %v", v)
if err != nil {
t.Logf("Goroutine %d: %v\n", g.ID, err)
continue
}
vval, _ := constant.Int64Val(v.Value)
found[vval] = true
}
for i := range found {
if !found[i] {
if lenient {
lenient = false
} else {
t.Fatalf("Goroutine %d not found\n", i)
}
}
}
// Testing evaluation on frames
assertNoError(p.Continue(), t, "Continue() 2")
g, err := proc.GetG(p.CurrentThread())
assertNoError(err, t, "GetG()")
frames, err := g.Stacktrace(40, 0)
t.Logf("Goroutine %d %#v", g.ID, g.Thread)
logStacktrace(t, p, frames)
for i := 0; i <= 3; i++ {
scope, err := proc.ConvertEvalScope(p, g.ID, i+1, 0)
assertNoError(err, t, fmt.Sprintf("ConvertEvalScope() on frame %d", i+1))
v, err := scope.EvalExpression("n", normalLoadConfig)
assertNoError(err, t, fmt.Sprintf("EvalVariable() on frame %d", i+1))
n, _ := constant.Int64Val(v.Value)
t.Logf("frame %d n %d\n", i+1, n)
if n != int64(3-i) {
t.Fatalf("On frame %d value of n is %d (not %d)", i+1, n, 3-i)
}
}
})
}
func TestThreadFrameEvaluation(t *testing.T) {
skipOn(t, "upstream issue - https://github.com/golang/go/issues/29322", "pie")
deadlockBp := proc.FatalThrow
if !goversion.VersionAfterOrEqual(runtime.Version(), 1, 11) {
t.SkipNow()
}
withTestProcess("testdeadlock", t, func(p *proc.Target, fixture protest.Fixture) {
assertNoError(p.Continue(), t, "Continue()")
bp := p.CurrentThread().Breakpoint()
if bp.Breakpoint == nil || bp.Name != deadlockBp {
t.Fatalf("did not stop at deadlock breakpoint %v", bp)
}
// There is no selected goroutine during a deadlock, so the scope will
// be a thread scope.
scope, err := proc.ConvertEvalScope(p, 0, 0, 0)
assertNoError(err, t, "ConvertEvalScope() on frame 0")
_, err = scope.EvalExpression("s", normalLoadConfig)
assertNoError(err, t, "EvalVariable(\"s\") on frame 0")
})
}
func TestPointerSetting(t *testing.T) {
withTestProcess("testvariables2", t, func(p *proc.Target, fixture protest.Fixture) {
assertNoError(p.Continue(), t, "Continue() returned an error")
pval := func(n int64) {
variable := evalVariable(p, t, "p1")
c0val, _ := constant.Int64Val(variable.Children[0].Value)
if c0val != n {
t.Fatalf("Wrong value of p1, *%d expected *%d", c0val, n)
}
}
pval(1)
// change p1 to point to i2
scope, err := proc.GoroutineScope(p, p.CurrentThread())
assertNoError(err, t, "Scope()")
i2addr, err := scope.EvalExpression("i2", normalLoadConfig)
assertNoError(err, t, "EvalExpression()")
assertNoError(setVariable(p, "p1", fmt.Sprintf("(*int)(0x%x)", i2addr.Addr)), t, "SetVariable()")
pval(2)
// change the value of i2 check that p1 also changes
assertNoError(setVariable(p, "i2", "5"), t, "SetVariable()")
pval(5)
})
}
func TestVariableFunctionScoping(t *testing.T) {
withTestProcess("testvariables", t, func(p *proc.Target, fixture protest.Fixture) {
err := p.Continue()
assertNoError(err, t, "Continue() returned an error")
evalVariable(p, t, "a1")
evalVariable(p, t, "a2")
// Move scopes, a1 exists here by a2 does not
err = p.Continue()
assertNoError(err, t, "Continue() returned an error")
evalVariable(p, t, "a1")
_, err = evalVariableOrError(p, "a2")
if err == nil {
t.Fatalf("Can eval out of scope variable a2")
}
})
}
func TestRecursiveStructure(t *testing.T) {
protest.AllowRecording(t)
withTestProcess("testvariables2", t, func(p *proc.Target, fixture protest.Fixture) {
assertNoError(p.Continue(), t, "Continue()")
v := evalVariable(p, t, "aas")
t.Logf("v: %v\n", v)
})
}
func TestIssue316(t *testing.T) {
// A pointer loop that includes one interface should not send dlv into an infinite loop
protest.AllowRecording(t)
withTestProcess("testvariables2", t, func(p *proc.Target, fixture protest.Fixture) {
assertNoError(p.Continue(), t, "Continue()")
evalVariable(p, t, "iface5")
})
}
func TestIssue325(t *testing.T) {
// nil pointer dereference when evaluating interfaces to function pointers
protest.AllowRecording(t)
withTestProcess("testvariables2", t, func(p *proc.Target, fixture protest.Fixture) {
assertNoError(p.Continue(), t, "Continue()")
iface2fn1v := evalVariable(p, t, "iface2fn1")
t.Logf("iface2fn1: %v\n", iface2fn1v)
iface2fn2v := evalVariable(p, t, "iface2fn2")
t.Logf("iface2fn2: %v\n", iface2fn2v)
})
}
func TestBreakpointCounts(t *testing.T) {
skipOn(t, "broken", "freebsd")
protest.AllowRecording(t)
withTestProcess("bpcountstest", t, func(p *proc.Target, fixture protest.Fixture) {
bp := setFileBreakpoint(p, t, fixture.Source, 12)
for {
if err := p.Continue(); err != nil {
if _, exited := err.(proc.ErrProcessExited); exited {
break
}
assertNoError(err, t, "Continue()")
}
}
t.Logf("TotalHitCount: %d", bp.UserBreaklet().TotalHitCount)
if bp.UserBreaklet().TotalHitCount != 200 {
t.Fatalf("Wrong TotalHitCount for the breakpoint (%d)", bp.UserBreaklet().TotalHitCount)
}
if len(bp.UserBreaklet().HitCount) != 2 {
t.Fatalf("Wrong number of goroutines for breakpoint (%d)", len(bp.UserBreaklet().HitCount))
}
for _, v := range bp.UserBreaklet().HitCount {
if v != 100 {
t.Fatalf("Wrong HitCount for breakpoint (%v)", bp.UserBreaklet().HitCount)
}
}
})
}
func BenchmarkArray(b *testing.B) {
// each bencharr struct is 128 bytes, bencharr is 64 elements long
b.SetBytes(int64(64 * 128))
withTestProcess("testvariables2", b, func(p *proc.Target, fixture protest.Fixture) {
assertNoError(p.Continue(), b, "Continue()")
b.ResetTimer()
for i := 0; i < b.N; i++ {
evalVariable(p, b, "bencharr")
}
})
}
const doTestBreakpointCountsWithDetection = false
func TestBreakpointCountsWithDetection(t *testing.T) {
if !doTestBreakpointCountsWithDetection {
return
}
m := map[int64]int64{}
protest.AllowRecording(t)
withTestProcess("bpcountstest", t, func(p *proc.Target, fixture protest.Fixture) {
bp := setFileBreakpoint(p, t, fixture.Source, 12)
for {
if err := p.Continue(); err != nil {
if _, exited := err.(proc.ErrProcessExited); exited {
break
}
assertNoError(err, t, "Continue()")
}
for _, th := range p.ThreadList() {
if bp := th.Breakpoint(); bp.Breakpoint == nil {
continue
}
scope, err := proc.GoroutineScope(p, th)
assertNoError(err, t, "Scope()")
v, err := scope.EvalExpression("i", normalLoadConfig)
assertNoError(err, t, "evalVariable")
i, _ := constant.Int64Val(v.Value)
v, err = scope.EvalExpression("id", normalLoadConfig)
assertNoError(err, t, "evalVariable")
id, _ := constant.Int64Val(v.Value)
m[id] = i
}
total := int64(0)
for i := range m {
total += m[i] + 1
}
if uint64(total) != bp.UserBreaklet().TotalHitCount {
t.Fatalf("Mismatched total count %d %d\n", total, bp.UserBreaklet().TotalHitCount)
}
}
t.Logf("TotalHitCount: %d", bp.UserBreaklet().TotalHitCount)
if bp.UserBreaklet().TotalHitCount != 200 {
t.Fatalf("Wrong TotalHitCount for the breakpoint (%d)", bp.UserBreaklet().TotalHitCount)
}
if len(bp.UserBreaklet().HitCount) != 2 {
t.Fatalf("Wrong number of goroutines for breakpoint (%d)", len(bp.UserBreaklet().HitCount))
}
for _, v := range bp.UserBreaklet().HitCount {
if v != 100 {
t.Fatalf("Wrong HitCount for breakpoint (%v)", bp.UserBreaklet().HitCount)
}
}
})
}
func BenchmarkArrayPointer(b *testing.B) {
// each bencharr struct is 128 bytes, benchparr is an array of 64 pointers to bencharr
// each read will read 64 bencharr structs plus the 64 pointers of benchparr
b.SetBytes(int64(64*128 + 64*8))
withTestProcess("testvariables2", b, func(p *proc.Target, fixture protest.Fixture) {
assertNoError(p.Continue(), b, "Continue()")
b.ResetTimer()
for i := 0; i < b.N; i++ {
evalVariable(p, b, "bencharr")
}
})
}
func BenchmarkMap(b *testing.B) {
// m1 contains 41 entries, each one has a value that's 2 int values (2* 8 bytes) and a string key
// each string key has an average of 9 character
// reading strings and the map structure imposes a overhead that we ignore here
b.SetBytes(int64(41 * (2*8 + 9)))
withTestProcess("testvariables2", b, func(p *proc.Target, fixture protest.Fixture) {
assertNoError(p.Continue(), b, "Continue()")
b.ResetTimer()
for i := 0; i < b.N; i++ {
evalVariable(p, b, "m1")
}
})
}
func BenchmarkGoroutinesInfo(b *testing.B) {
withTestProcess("testvariables2", b, func(p *proc.Target, fixture protest.Fixture) {
assertNoError(p.Continue(), b, "Continue()")
b.ResetTimer()
for i := 0; i < b.N; i++ {
p.ClearCaches()
_, _, err := proc.GoroutinesInfo(p, 0, 0)
assertNoError(err, b, "GoroutinesInfo")
}
})
}
func TestIssue262(t *testing.T) {
// Continue does not work when the current breakpoint is set on a NOP instruction
protest.AllowRecording(t)
withTestProcess("issue262", t, func(p *proc.Target, fixture protest.Fixture) {
setFileBreakpoint(p, t, fixture.Source, 11)
assertNoError(p.Continue(), t, "Continue()")
err := p.Continue()
if err == nil {
t.Fatalf("No error on second continue")
}
_, exited := err.(proc.ErrProcessExited)
if !exited {
t.Fatalf("Process did not exit after second continue: %v", err)
}
})
}
func TestIssue305(t *testing.T) {
// If 'next' hits a breakpoint on the goroutine it's stepping through
// the internal breakpoints aren't cleared preventing further use of
// 'next' command
protest.AllowRecording(t)
withTestProcess("issue305", t, func(p *proc.Target, fixture protest.Fixture) {
setFileBreakpoint(p, t, fixture.Source, 5)
assertNoError(p.Continue(), t, "Continue()")
assertNoError(p.Next(), t, "Next() 1")
assertNoError(p.Next(), t, "Next() 2")
assertNoError(p.Next(), t, "Next() 3")
assertNoError(p.Next(), t, "Next() 4")
assertNoError(p.Next(), t, "Next() 5")
})
}
func TestPointerLoops(t *testing.T) {
// Pointer loops through map entries, pointers and slices
// Regression test for issue #341
protest.AllowRecording(t)
withTestProcess("testvariables2", t, func(p *proc.Target, fixture protest.Fixture) {
assertNoError(p.Continue(), t, "Continue()")
for _, expr := range []string{"mapinf", "ptrinf", "sliceinf"} {
t.Logf("requesting %s", expr)
v := evalVariable(p, t, expr)
t.Logf("%s: %v\n", expr, v)
}
})
}
func BenchmarkLocalVariables(b *testing.B) {
withTestProcess("testvariables", b, func(p *proc.Target, fixture protest.Fixture) {
assertNoError(p.Continue(), b, "Continue() returned an error")
scope, err := proc.GoroutineScope(p, p.CurrentThread())
assertNoError(err, b, "Scope()")
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := scope.LocalVariables(normalLoadConfig)
assertNoError(err, b, "LocalVariables()")
}
})
}
func TestCondBreakpoint(t *testing.T) {
skipOn(t, "broken", "freebsd")
protest.AllowRecording(t)
withTestProcess("parallel_next", t, func(p *proc.Target, fixture protest.Fixture) {
bp := setFileBreakpoint(p, t, fixture.Source, 9)
bp.UserBreaklet().Cond = &ast.BinaryExpr{
Op: token.EQL,
X: &ast.Ident{Name: "n"},
Y: &ast.BasicLit{Kind: token.INT, Value: "7"},
}
assertNoError(p.Continue(), t, "Continue()")
nvar := evalVariable(p, t, "n")
n, _ := constant.Int64Val(nvar.Value)
if n != 7 {
t.Fatalf("Stopped on wrong goroutine %d\n", n)
}
})
}
func TestCondBreakpointError(t *testing.T) {
skipOn(t, "broken", "freebsd")
protest.AllowRecording(t)
withTestProcess("parallel_next", t, func(p *proc.Target, fixture protest.Fixture) {
bp := setFileBreakpoint(p, t, fixture.Source, 9)
bp.UserBreaklet().Cond = &ast.BinaryExpr{
Op: token.EQL,
X: &ast.Ident{Name: "nonexistentvariable"},
Y: &ast.BasicLit{Kind: token.INT, Value: "7"},
}
err := p.Continue()
if err == nil {
t.Fatalf("No error on first Continue()")
}
if err.Error() != "error evaluating expression: could not find symbol value for nonexistentvariable" && err.Error() != "multiple errors evaluating conditions" {
t.Fatalf("Unexpected error on first Continue(): %v", err)
}
bp.UserBreaklet().Cond = &ast.BinaryExpr{
Op: token.EQL,
X: &ast.Ident{Name: "n"},
Y: &ast.BasicLit{Kind: token.INT, Value: "7"},
}
err = p.Continue()
if err != nil {
if _, exited := err.(proc.ErrProcessExited); !exited {
t.Fatalf("Unexpected error on second Continue(): %v", err)
}
} else {
nvar := evalVariable(p, t, "n")
n, _ := constant.Int64Val(nvar.Value)
if n != 7 {
t.Fatalf("Stopped on wrong goroutine %d\n", n)
}
}
})
}
func TestHitCondBreakpointEQ(t *testing.T) {
withTestProcess("break", t, func(p *proc.Target, fixture protest.Fixture) {
bp := setFileBreakpoint(p, t, fixture.Source, 7)
bp.UserBreaklet().HitCond = &struct {
Op token.Token
Val int
}{token.EQL, 3}
assertNoError(p.Continue(), t, "Continue()")
ivar := evalVariable(p, t, "i")
i, _ := constant.Int64Val(ivar.Value)
if i != 3 {
t.Fatalf("Stopped on wrong hitcount %d\n", i)
}
err := p.Continue()
if _, exited := err.(proc.ErrProcessExited); !exited {
t.Fatalf("Unexpected error on Continue(): %v", err)
}
})
}
func TestHitCondBreakpointGEQ(t *testing.T) {
protest.AllowRecording(t)
withTestProcess("break", t, func(p *proc.Target, fixture protest.Fixture) {
bp := setFileBreakpoint(p, t, fixture.Source, 7)
bp.UserBreaklet().HitCond = &struct {
Op token.Token
Val int
}{token.GEQ, 3}
for it := 3; it <= 10; it++ {
assertNoError(p.Continue(), t, "Continue()")
ivar := evalVariable(p, t, "i")
i, _ := constant.Int64Val(ivar.Value)
if int(i) != it {
t.Fatalf("Stopped on wrong hitcount %d\n", i)
}
}
assertNoError(p.Continue(), t, "Continue()")
})
}
func TestHitCondBreakpointREM(t *testing.T) {
protest.AllowRecording(t)
withTestProcess("break", t, func(p *proc.Target, fixture protest.Fixture) {
bp := setFileBreakpoint(p, t, fixture.Source, 7)
bp.UserBreaklet().HitCond = &struct {
Op token.Token
Val int
}{token.REM, 2}
for it := 2; it <= 10; it += 2 {
assertNoError(p.Continue(), t, "Continue()")
ivar := evalVariable(p, t, "i")
i, _ := constant.Int64Val(ivar.Value)
if int(i) != it {
t.Fatalf("Stopped on wrong hitcount %d\n", i)
}
}
err := p.Continue()
if _, exited := err.(proc.ErrProcessExited); !exited {
t.Fatalf("Unexpected error on Continue(): %v", err)
}
})
}
func TestIssue356(t *testing.T) {
// slice with a typedef does not get printed correctly
protest.AllowRecording(t)
withTestProcess("testvariables2", t, func(p *proc.Target, fixture protest.Fixture) {
assertNoError(p.Continue(), t, "Continue() returned an error")
mmvar := evalVariable(p, t, "mainMenu")
if mmvar.Kind != reflect.Slice {
t.Fatalf("Wrong kind for mainMenu: %v\n", mmvar.Kind)
}
})
}
func TestStepIntoFunction(t *testing.T) {
withTestProcess("teststep", t, func(p *proc.Target, fixture protest.Fixture) {
// Continue until breakpoint
assertNoError(p.Continue(), t, "Continue() returned an error")
// Step into function
assertNoError(p.Step(), t, "Step() returned an error")
// We should now be inside the function.
loc, err := p.CurrentThread().Location()
if err != nil {
t.Fatal(err)
}
if loc.Fn.Name != "main.callme" {
t.Fatalf("expected to be within the 'callme' function, was in %s instead", loc.Fn.Name)
}
if !strings.Contains(loc.File, "teststep") {
t.Fatalf("debugger stopped at incorrect location: %s:%d", loc.File, loc.Line)
}
if loc.Line != 8 {
t.Fatalf("debugger stopped at incorrect line: %d", loc.Line)
}
})
}
func TestIssue332_Part1(t *testing.T) {
// Next shouldn't step inside a function call
protest.AllowRecording(t)
withTestProcess("issue332", t, func(p *proc.Target, fixture protest.Fixture) {
setFileBreakpoint(p, t, fixture.Source, 8)
assertNoError(p.Continue(), t, "Continue()")
assertNoError(p.Next(), t, "first Next()")
locations, err := proc.ThreadStacktrace(p.CurrentThread(), 2)
assertNoError(err, t, "Stacktrace()")
if locations[0].Call.Fn == nil {
t.Fatalf("Not on a function")
}
if locations[0].Call.Fn.Name != "main.main" {
t.Fatalf("Not on main.main after Next: %s (%s:%d)", locations[0].Call.Fn.Name, locations[0].Call.File, locations[0].Call.Line)
}
if locations[0].Call.Line != 9 {
t.Fatalf("Not on line 9 after Next: %s (%s:%d)", locations[0].Call.Fn.Name, locations[0].Call.File, locations[0].Call.Line)
}
})
}
func TestIssue332_Part2(t *testing.T) {
// Step should skip a function's prologue
// In some parts of the prologue, for some functions, the FDE data is incorrect
// which leads to 'next' and 'stack' failing with error "could not find FDE for PC: <garbage>"
// because the incorrect FDE data leads to reading the wrong stack address as the return address
protest.AllowRecording(t)
withTestProcess("issue332", t, func(p *proc.Target, fixture protest.Fixture) {
setFileBreakpoint(p, t, fixture.Source, 8)
assertNoError(p.Continue(), t, "Continue()")
// step until we enter changeMe
for {
assertNoError(p.Step(), t, "Step()")
locations, err := proc.ThreadStacktrace(p.CurrentThread(), 2)
assertNoError(err, t, "Stacktrace()")
if locations[0].Call.Fn == nil {
t.Fatalf("Not on a function")
}
if locations[0].Call.Fn.Name == "main.changeMe" {
break
}
}
regs, err := p.CurrentThread().Registers()
assertNoError(err, t, "Registers()")
pc := regs.PC()
pcAfterPrologue := findFunctionLocation(p, t, "main.changeMe")
if pcAfterPrologue == p.BinInfo().LookupFunc["main.changeMe"].Entry {
t.Fatalf("main.changeMe and main.changeMe:0 are the same (%x)", pcAfterPrologue)
}
if pc != pcAfterPrologue {
t.Fatalf("Step did not skip the prologue: current pc: %x, first instruction after prologue: %x", pc, pcAfterPrologue)
}
assertNoError(p.Next(), t, "first Next()")
assertNoError(p.Next(), t, "second Next()")
assertNoError(p.Next(), t, "third Next()")
err = p.Continue()
if _, exited := err.(proc.ErrProcessExited); !exited {
assertNoError(err, t, "final Continue()")
}
})
}
func TestIssue414(t *testing.T) {
skipOn(t, "broken", "linux", "386", "pie") // test occasionally hangs on linux/386/pie
// Stepping until the program exits
protest.AllowRecording(t)
withTestProcess("math", t, func(p *proc.Target, fixture protest.Fixture) {
setFileBreakpoint(p, t, fixture.Source, 9)
assertNoError(p.Continue(), t, "Continue()")
for {
pc := currentPC(p, t)
f, ln := currentLineNumber(p, t)
t.Logf("at %s:%d %#x\n", f, ln, pc)
var err error
// Stepping through the runtime is not generally safe so after we are out
// of main.main just use Next.
// See: https://github.com/go-delve/delve/pull/2082
if f == fixture.Source {
err = p.Step()
} else {
err = p.Next()
}
if err != nil {
if _, exited := err.(proc.ErrProcessExited); exited {
break
}
}
assertNoError(err, t, "Step()")
}
})
}
func TestPackageVariables(t *testing.T) {
var skippedVariable = map[string]bool{
"runtime.uint16Eface": true,
"runtime.uint32Eface": true,
"runtime.uint64Eface": true,
"runtime.stringEface": true,
"runtime.sliceEface": true,
}
protest.AllowRecording(t)
withTestProcess("testvariables", t, func(p *proc.Target, fixture protest.Fixture) {
err := p.Continue()
assertNoError(err, t, "Continue()")
scope, err := proc.GoroutineScope(p, p.CurrentThread())
assertNoError(err, t, "Scope()")
vars, err := scope.PackageVariables(normalLoadConfig)
assertNoError(err, t, "PackageVariables()")
failed := false
for _, v := range vars {
if skippedVariable[v.Name] {
continue
}
if v.Unreadable != nil && v.Unreadable.Error() != "no location attribute Location" {
failed = true
t.Logf("Unreadable variable %s: %v", v.Name, v.Unreadable)
}
}
if failed {
t.Fatalf("previous errors")
}
})
}
func TestIssue149(t *testing.T) {
ver, _ := goversion.Parse(runtime.Version())
if ver.Major > 0 && !ver.AfterOrEqual(goversion.GoVersion{Major: 1, Minor: 7, Rev: -1}) {
return
}
// setting breakpoint on break statement
withTestProcess("break", t, func(p *proc.Target, fixture protest.Fixture) {
findFileLocation(p, t, fixture.Source, 8)
})
}
func TestPanicBreakpoint(t *testing.T) {
protest.AllowRecording(t)
withTestProcess("panic", t, func(p *proc.Target, fixture protest.Fixture) {
assertNoError(p.Continue(), t, "Continue()")
bp := p.CurrentThread().Breakpoint()
if bp.Breakpoint == nil || bp.Name != proc.UnrecoveredPanic {
t.Fatalf("not on unrecovered-panic breakpoint: %v", bp)
}
})
}
func TestCmdLineArgs(t *testing.T) {
expectSuccess := func(p *proc.Target, fixture protest.Fixture) {
err := p.Continue()
bp := p.CurrentThread().Breakpoint()
if bp.Breakpoint != nil && bp.Name == proc.UnrecoveredPanic {
t.Fatalf("testing args failed on unrecovered-panic breakpoint: %v", bp)
}
exit, exited := err.(proc.ErrProcessExited)
if !exited {
t.Fatalf("Process did not exit: %v", err)
} else {
if exit.Status != 0 {
t.Fatalf("process exited with invalid status %d", exit.Status)
}
}
}
expectPanic := func(p *proc.Target, fixture protest.Fixture) {
p.Continue()
bp := p.CurrentThread().Breakpoint()
if bp.Breakpoint == nil || bp.Name != proc.UnrecoveredPanic {
t.Fatalf("not on unrecovered-panic breakpoint: %v", bp)
}
}
// make sure multiple arguments (including one with spaces) are passed to the binary correctly
withTestProcessArgs("testargs", t, ".", []string{"test"}, 0, expectSuccess)
withTestProcessArgs("testargs", t, ".", []string{"-test"}, 0, expectPanic)
withTestProcessArgs("testargs", t, ".", []string{"test", "pass flag"}, 0, expectSuccess)
// check that arguments with spaces are *only* passed correctly when correctly called
withTestProcessArgs("testargs", t, ".", []string{"test pass", "flag"}, 0, expectPanic)
withTestProcessArgs("testargs", t, ".", []string{"test", "pass", "flag"}, 0, expectPanic)
withTestProcessArgs("testargs", t, ".", []string{"test pass flag"}, 0, expectPanic)
// and that invalid cases (wrong arguments or no arguments) panic
withTestProcess("testargs", t, expectPanic)
withTestProcessArgs("testargs", t, ".", []string{"invalid"}, 0, expectPanic)
withTestProcessArgs("testargs", t, ".", []string{"test", "invalid"}, 0, expectPanic)
withTestProcessArgs("testargs", t, ".", []string{"invalid", "pass flag"}, 0, expectPanic)
}
func TestIssue462(t *testing.T) {
skipOn(t, "broken", "windows") // Stacktrace of Goroutine 0 fails with an error
withTestProcess("testnextnethttp", t, func(p *proc.Target, fixture protest.Fixture) {
go func() {
// Wait for program to start listening.
for {
conn, err := net.Dial("tcp", "127.0.0.1:9191")
if err == nil {
conn.Close()
break
}
time.Sleep(50 * time.Millisecond)
}
p.RequestManualStop()
}()
assertNoError(p.Continue(), t, "Continue()")
_, err := proc.ThreadStacktrace(p.CurrentThread(), 40)
assertNoError(err, t, "Stacktrace()")
})
}
func TestNextParked(t *testing.T) {
skipOn(t, "broken", "freebsd")
protest.AllowRecording(t)
withTestProcess("parallel_next", t, func(p *proc.Target, fixture protest.Fixture) {
bp := setFunctionBreakpoint(p, t, "main.sayhi")
// continue until a parked goroutine exists
var parkedg *proc.G
for parkedg == nil {
err := p.Continue()
if _, exited := err.(proc.ErrProcessExited); exited {
t.Log("could not find parked goroutine")
return
}
assertNoError(err, t, "Continue()")
gs, _, err := proc.GoroutinesInfo(p, 0, 0)
assertNoError(err, t, "GoroutinesInfo()")
// Search for a parked goroutine that we know for sure will have to be
// resumed before the program can exit. This is a parked goroutine that:
// 1. is executing main.sayhi
// 2. hasn't called wg.Done yet
for _, g := range gs {
if g.Thread != nil {
continue
}
frames, _ := g.Stacktrace(5, 0)
for _, frame := range frames {
// line 11 is the line where wg.Done is called
if frame.Current.Fn != nil && frame.Current.Fn.Name == "main.sayhi" && frame.Current.Line < 11 {
parkedg = g
break
}
}
if parkedg != nil {
break
}
}
}
assertNoError(p.SwitchGoroutine(parkedg), t, "SwitchGoroutine()")
p.ClearBreakpoint(bp.Addr)
assertNoError(p.Next(), t, "Next()")
if p.SelectedGoroutine().ID != parkedg.ID {
t.Fatalf("Next did not continue on the selected goroutine, expected %d got %d", parkedg.ID, p.SelectedGoroutine().ID)
}
})
}
func TestStepParked(t *testing.T) {
skipOn(t, "broken", "freebsd")
protest.AllowRecording(t)
withTestProcess("parallel_next", t, func(p *proc.Target, fixture protest.Fixture) {
bp := setFunctionBreakpoint(p, t, "main.sayhi")
// continue until a parked goroutine exists
var parkedg *proc.G
LookForParkedG:
for {
err := p.Continue()
if _, exited := err.(proc.ErrProcessExited); exited {
t.Log("could not find parked goroutine")
return
}
assertNoError(err, t, "Continue()")
gs, _, err := proc.GoroutinesInfo(p, 0, 0)
assertNoError(err, t, "GoroutinesInfo()")
for _, g := range gs {
if g.Thread == nil && g.CurrentLoc.Fn != nil && g.CurrentLoc.Fn.Name == "main.sayhi" {
parkedg = g
break LookForParkedG
}
}
}
t.Logf("Parked g is: %v\n", parkedg)
frames, _ := parkedg.Stacktrace(20, 0)
for _, frame := range frames {
name := ""
if frame.Call.Fn != nil {
name = frame.Call.Fn.Name
}
t.Logf("\t%s:%d in %s (%#x)", frame.Call.File, frame.Call.Line, name, frame.Current.PC)
}
assertNoError(p.SwitchGoroutine(parkedg), t, "SwitchGoroutine()")
p.ClearBreakpoint(bp.Addr)
assertNoError(p.Step(), t, "Step()")
if p.SelectedGoroutine().ID != parkedg.ID {
t.Fatalf("Step did not continue on the selected goroutine, expected %d got %d", parkedg.ID, p.SelectedGoroutine().ID)
}
})
}
func TestUnsupportedArch(t *testing.T) {
ver, _ := goversion.Parse(runtime.Version())
if ver.Major < 0 || !ver.AfterOrEqual(goversion.GoVersion{Major: 1, Minor: 6, Rev: -1}) || ver.AfterOrEqual(goversion.GoVersion{Major: 1, Minor: 7, Rev: -1}) {
// cross compile (with -N?) works only on select versions of go
return
}
fixturesDir := protest.FindFixturesDir()
infile := filepath.Join(fixturesDir, "math.go")
outfile := filepath.Join(fixturesDir, "_math_debug_386")
cmd := exec.Command("go", "build", "-gcflags=-N -l", "-o", outfile, infile)
for _, v := range os.Environ() {
if !strings.HasPrefix(v, "GOARCH=") {
cmd.Env = append(cmd.Env, v)
}
}
cmd.Env = append(cmd.Env, "GOARCH=386")
out, err := cmd.CombinedOutput()
if err != nil {
t.Fatalf("go build failed: %v: %v", err, string(out))
}
defer os.Remove(outfile)
var p *proc.Target
switch testBackend {
case "native":
p, err = native.Launch([]string{outfile}, ".", 0, []string{}, "", [3]string{})
case "lldb":
p, err = gdbserial.LLDBLaunch([]string{outfile}, ".", 0, []string{}, "", [3]string{})
default:
t.Skip("test not valid for this backend")
}
if err == nil {
p.Detach(true)
t.Fatal("Launch is expected to fail, but succeeded")
}
if _, ok := err.(*proc.ErrUnsupportedArch); ok {
// all good
return
}
t.Fatal(err)
}
func TestIssue573(t *testing.T) {
// calls to runtime.duffzero and runtime.duffcopy jump directly into the middle
// of the function and the internal breakpoint set by StepInto may be missed.
protest.AllowRecording(t)
withTestProcess("issue573", t, func(p *proc.Target, fixture protest.Fixture) {
setFunctionBreakpoint(p, t, "main.foo")
assertNoError(p.Continue(), t, "Continue()")
assertNoError(p.Step(), t, "Step() #1")
assertNoError(p.Step(), t, "Step() #2") // Bug exits here.
assertNoError(p.Step(), t, "Step() #3") // Third step ought to be possible; program ought not have exited.
})
}
func TestTestvariables2Prologue(t *testing.T) {
withTestProcess("testvariables2", t, func(p *proc.Target, fixture protest.Fixture) {
addrEntry := p.BinInfo().LookupFunc["main.main"].Entry
addrPrologue := findFunctionLocation(p, t, "main.main")
if addrEntry == addrPrologue {
t.Fatalf("Prologue detection failed on testvariables2.go/main.main")
}
})
}
func TestNextDeferReturnAndDirectCall(t *testing.T) {
// Next should not step into a deferred function if it is called
// directly, only if it is called through a panic or a deferreturn.
// Here we test the case where the function is called by a deferreturn
testseq("defercall", contNext, []nextTest{
{9, 10},
{10, 11},
{11, 12},
{12, 13},
{13, 28}}, "main.callAndDeferReturn", t)
}
func TestNextPanicAndDirectCall(t *testing.T) {
// Next should not step into a deferred function if it is called
// directly, only if it is called through a panic or a deferreturn.
// Here we test the case where the function is called by a panic
if goversion.VersionAfterOrEqual(runtime.Version(), 1, 11) {
testseq("defercall", contNext, []nextTest{
{15, 16},
{16, 17},
{17, 18},
{18, 6}}, "main.callAndPanic2", t)
} else {
testseq("defercall", contNext, []nextTest{
{15, 16},
{16, 17},
{17, 18},
{18, 5}}, "main.callAndPanic2", t)
}
}
func TestStepCall(t *testing.T) {
testseq("testnextprog", contStep, []nextTest{
{34, 13},
{13, 14}}, "", t)
}
func TestStepCallPtr(t *testing.T) {
// Tests that Step works correctly when calling functions with a
// function pointer.
if goversion.VersionAfterOrEqual(runtime.Version(), 1, 11) && !protest.RegabiSupported() {
testseq("teststepprog", contStep, []nextTest{
{9, 10},
{10, 6},
{6, 7},
{7, 11}}, "", t)
} else {
testseq("teststepprog", contStep, []nextTest{
{9, 10},
{10, 5},
{5, 6},
{6, 7},
{7, 11}}, "", t)
}
}
func TestStepReturnAndPanic(t *testing.T) {
// Tests that Step works correctly when returning from functions
// and when a deferred function is called when panic'ing.
switch {
case goversion.VersionAfterOrEqual(runtime.Version(), 1, 11):
testseq("defercall", contStep, []nextTest{
{17, 6},
{6, 7},
{7, 18},
{18, 6},
{6, 7}}, "", t)
case goversion.VersionAfterOrEqual(runtime.Version(), 1, 10):
testseq("defercall", contStep, []nextTest{
{17, 5},
{5, 6},
{6, 7},
{7, 18},
{18, 5},
{5, 6},
{6, 7}}, "", t)
case goversion.VersionAfterOrEqual(runtime.Version(), 1, 9):
testseq("defercall", contStep, []nextTest{
{17, 5},
{5, 6},
{6, 7},
{7, 17},
{17, 18},
{18, 5},
{5, 6},
{6, 7}}, "", t)
default:
testseq("defercall", contStep, []nextTest{
{17, 5},
{5, 6},
{6, 7},
{7, 18},
{18, 5},
{5, 6},
{6, 7}}, "", t)
}
}
func TestStepDeferReturn(t *testing.T) {
// Tests that Step works correctly when a deferred function is
// called during a return.
if goversion.VersionAfterOrEqual(runtime.Version(), 1, 11) {
testseq("defercall", contStep, []nextTest{
{11, 6},
{6, 7},
{7, 12},
{12, 13},
{13, 6},
{6, 7},
{7, 13},
{13, 28}}, "", t)
} else {
testseq("defercall", contStep, []nextTest{
{11, 5},
{5, 6},
{6, 7},
{7, 12},
{12, 13},
{13, 5},
{5, 6},
{6, 7},
{7, 13},
{13, 28}}, "", t)
}
}
func TestStepIgnorePrivateRuntime(t *testing.T) {
// Tests that Step will ignore calls to private runtime functions
// (such as runtime.convT2E in this case)
switch {
case goversion.VersionAfterOrEqual(runtime.Version(), 1, 17) && protest.RegabiSupported():
testseq("teststepprog", contStep, []nextTest{
{21, 13},
{13, 14},
{14, 15},
{15, 17},
{17, 22}}, "", t)
case goversion.VersionAfterOrEqual(runtime.Version(), 1, 17):
testseq("teststepprog", contStep, []nextTest{
{21, 14},
{14, 15},
{15, 17},
{17, 22}}, "", t)
case goversion.VersionAfterOrEqual(runtime.Version(), 1, 11):
testseq("teststepprog", contStep, []nextTest{
{21, 14},
{14, 15},
{15, 22}}, "", t)
case goversion.VersionAfterOrEqual(runtime.Version(), 1, 10):
testseq("teststepprog", contStep, []nextTest{
{21, 13},
{13, 14},
{14, 15},
{15, 22}}, "", t)
case goversion.VersionAfterOrEqual(runtime.Version(), 1, 7):
testseq("teststepprog", contStep, []nextTest{
{21, 13},
{13, 14},
{14, 15},
{15, 14},
{14, 17},
{17, 22}}, "", t)
default:
testseq("teststepprog", contStep, []nextTest{
{21, 13},
{13, 14},
{14, 15},
{15, 17},
{17, 22}}, "", t)
}
}
func TestIssue561(t *testing.T) {
// Step fails to make progress when PC is at a CALL instruction
// where a breakpoint is also set.
protest.AllowRecording(t)
withTestProcess("issue561", t, func(p *proc.Target, fixture protest.Fixture) {
setFileBreakpoint(p, t, fixture.Source, 10)
assertNoError(p.Continue(), t, "Continue()")
assertNoError(p.Step(), t, "Step()")
assertLineNumber(p, t, 5, "wrong line number after Step,")
})
}
func TestGoroutineLables(t *testing.T) {
withTestProcess("goroutineLabels", t, func(p *proc.Target, fixture protest.Fixture) {
assertNoError(p.Continue(), t, "Continue()")
g, err := proc.GetG(p.CurrentThread())
assertNoError(err, t, "GetG()")
if len(g.Labels()) != 0 {
t.Fatalf("No labels expected")
}
assertNoError(p.Continue(), t, "Continue()")
g, err = proc.GetG(p.CurrentThread())
assertNoError(err, t, "GetG()")
labels := g.Labels()
if v := labels["k1"]; v != "v1" {
t.Errorf("Unexpected label value k1=%v", v)
}
if v := labels["k2"]; v != "v2" {
t.Errorf("Unexpected label value k2=%v", v)
}
})
}
func TestStepOut(t *testing.T) {
testseq2(t, "testnextprog", "main.helloworld", []seqTest{{contContinue, 13}, {contStepout, 35}})
}
func TestStepConcurrentDirect(t *testing.T) {
skipOn(t, "broken", "freebsd")
protest.AllowRecording(t)
withTestProcess("teststepconcurrent", t, func(p *proc.Target, fixture protest.Fixture) {
bp := setFileBreakpoint(p, t, fixture.Source, 37)
assertNoError(p.Continue(), t, "Continue()")
err := p.ClearBreakpoint(bp.Addr)
assertNoError(err, t, "ClearBreakpoint()")
for _, b := range p.Breakpoints().M {
if b.Name == proc.UnrecoveredPanic {
err := p.ClearBreakpoint(b.Addr)
assertNoError(err, t, "ClearBreakpoint(unrecovered-panic)")
break
}
}
gid := p.SelectedGoroutine().ID
seq := []int{37, 38, 13, 15, 16, 38}
i := 0
count := 0
for {
anyerr := false
if p.SelectedGoroutine().ID != gid {
t.Errorf("Step switched to different goroutine %d %d\n", gid, p.SelectedGoroutine().ID)
anyerr = true
}
f, ln := currentLineNumber(p, t)
if ln != seq[i] {
if i == 1 && ln == 40 {
// loop exited
break
}
frames, err := proc.ThreadStacktrace(p.CurrentThread(), 20)
if err != nil {
t.Errorf("Could not get stacktrace of goroutine %d\n", p.SelectedGoroutine().ID)
} else {
t.Logf("Goroutine %d (thread: %d):", p.SelectedGoroutine().ID, p.CurrentThread().ThreadID())
for _, frame := range frames {
t.Logf("\t%s:%d (%#x)", frame.Call.File, frame.Call.Line, frame.Current.PC)
}
}
t.Errorf("Program did not continue at expected location (%d) %s:%d [i %d count %d]", seq[i], f, ln, i, count)
anyerr = true
}
if anyerr {
t.FailNow()
}
i = (i + 1) % len(seq)
if i == 0 {
count++
}
assertNoError(p.Step(), t, "Step()")
}
if count != 100 {
t.Fatalf("Program did not loop expected number of times: %d", count)
}
})
}
func TestStepConcurrentPtr(t *testing.T) {
skipOn(t, "broken", "freebsd")
protest.AllowRecording(t)
withTestProcess("teststepconcurrent", t, func(p *proc.Target, fixture protest.Fixture) {
setFileBreakpoint(p, t, fixture.Source, 24)
for _, b := range p.Breakpoints().M {
if b.Name == proc.UnrecoveredPanic {
err := p.ClearBreakpoint(b.Addr)
assertNoError(err, t, "ClearBreakpoint(unrecovered-panic)")
break
}
}
kvals := map[int]int64{}
count := 0
for {
err := p.Continue()
_, exited := err.(proc.ErrProcessExited)
if exited {
break
}
assertNoError(err, t, "Continue()")
f, ln := currentLineNumber(p, t)
if ln != 24 {
for _, th := range p.ThreadList() {
t.Logf("thread %d stopped on breakpoint %v", th.ThreadID(), th.Breakpoint())
}
curbp := p.CurrentThread().Breakpoint()
t.Fatalf("Program did not continue at expected location (24): %s:%d %#x [%v] (gid %d count %d)", f, ln, currentPC(p, t), curbp, p.SelectedGoroutine().ID, count)
}
gid := p.SelectedGoroutine().ID
kvar := evalVariable(p, t, "k")
k, _ := constant.Int64Val(kvar.Value)
if oldk, ok := kvals[gid]; ok {
if oldk >= k {
t.Fatalf("Goroutine %d did not make progress?", gid)
}
}
kvals[gid] = k
assertNoError(p.Step(), t, "Step()")
for p.Breakpoints().HasSteppingBreakpoints() {
if p.SelectedGoroutine().ID == gid {
t.Fatalf("step did not step into function call (but internal breakpoints still active?) (%d %d)", gid, p.SelectedGoroutine().ID)
}
assertNoError(p.Continue(), t, "Continue()")
}
if p.SelectedGoroutine().ID != gid {
t.Fatalf("Step switched goroutines (wanted: %d got: %d)", gid, p.SelectedGoroutine().ID)
}
f, ln = assertLineNumber(p, t, 13, "Step did not step into function call")
count++
if count > 50 {
// this test could potentially go on for 10000 cycles, since that's
// too slow we cut the execution after 50 cycles
break
}
}
if count == 0 {
t.Fatalf("Breakpoint never hit")
}
})
}
func TestStepOutBreakpoint(t *testing.T) {
protest.AllowRecording(t)
withTestProcess("testnextprog", t, func(p *proc.Target, fixture protest.Fixture) {
bp := setFileBreakpoint(p, t, fixture.Source, 13)
assertNoError(p.Continue(), t, "Continue()")
p.ClearBreakpoint(bp.Addr)
// StepOut should be interrupted by a breakpoint on the same goroutine.
setFileBreakpoint(p, t, fixture.Source, 14)
assertNoError(p.StepOut(), t, "StepOut()")
assertLineNumber(p, t, 14, "wrong line number")
if p.Breakpoints().HasSteppingBreakpoints() {
t.Fatal("has internal breakpoints after hitting breakpoint on same goroutine")
}
})
}
func TestNextBreakpoint(t *testing.T) {
protest.AllowRecording(t)
withTestProcess("testnextprog", t, func(p *proc.Target, fixture protest.Fixture) {
bp := setFileBreakpoint(p, t, fixture.Source, 34)
assertNoError(p.Continue(), t, "Continue()")
p.ClearBreakpoint(bp.Addr)
// Next should be interrupted by a breakpoint on the same goroutine.
setFileBreakpoint(p, t, fixture.Source, 14)
assertNoError(p.Next(), t, "Next()")
assertLineNumber(p, t, 14, "wrong line number")
if p.Breakpoints().HasSteppingBreakpoints() {
t.Fatal("has internal breakpoints after hitting breakpoint on same goroutine")
}
})
}
func TestNextBreakpointKeepsSteppingBreakpoints(t *testing.T) {
protest.AllowRecording(t)
withTestProcess("testnextprog", t, func(p *proc.Target, fixture protest.Fixture) {
p.KeepSteppingBreakpoints = proc.TracepointKeepsSteppingBreakpoints
bp := setFileBreakpoint(p, t, fixture.Source, 34)
assertNoError(p.Continue(), t, "Continue()")
p.ClearBreakpoint(bp.Addr)
// Next should be interrupted by a tracepoint on the same goroutine.
bp = setFileBreakpoint(p, t, fixture.Source, 14)
bp.Tracepoint = true
assertNoError(p.Next(), t, "Next()")
assertLineNumber(p, t, 14, "wrong line number")
if !p.Breakpoints().HasSteppingBreakpoints() {
t.Fatal("does not have internal breakpoints after hitting tracepoint on same goroutine")
}
// Continue to complete next.
assertNoError(p.Continue(), t, "Continue()")
assertLineNumber(p, t, 35, "wrong line number")
if p.Breakpoints().HasSteppingBreakpoints() {
t.Fatal("has internal breakpoints after completing next")
}
})
}
func TestStepOutDefer(t *testing.T) {
protest.AllowRecording(t)
withTestProcess("testnextdefer", t, func(p *proc.Target, fixture protest.Fixture) {
bp := setFileBreakpoint(p, t, fixture.Source, 9)
assertNoError(p.Continue(), t, "Continue()")
p.ClearBreakpoint(bp.Addr)
assertLineNumber(p, t, 9, "wrong line number")
assertNoError(p.StepOut(), t, "StepOut()")
f, l, _ := p.BinInfo().PCToLine(currentPC(p, t))
if f == fixture.Source || l == 6 {
t.Fatalf("wrong location %s:%d, expected to end somewhere in runtime", f, l)
}
})
}
func TestStepOutDeferReturnAndDirectCall(t *testing.T) {
// StepOut should not step into a deferred function if it is called
// directly, only if it is called through a panic.
// Here we test the case where the function is called by a deferreturn
testseq2(t, "defercall", "", []seqTest{
{contContinue, 11},
{contStepout, 28}})
}
func TestStepOnCallPtrInstr(t *testing.T) {
protest.AllowRecording(t)
withTestProcess("teststepprog", t, func(p *proc.Target, fixture protest.Fixture) {
setFileBreakpoint(p, t, fixture.Source, 10)
assertNoError(p.Continue(), t, "Continue()")
found := false
for {
_, ln := currentLineNumber(p, t)
if ln != 10 {
break
}
regs, err := p.CurrentThread().Registers()
assertNoError(err, t, "Registers()")
pc := regs.PC()
text, err := proc.Disassemble(p.Memory(), regs, p.Breakpoints(), p.BinInfo(), pc, pc+uint64(p.BinInfo().Arch.MaxInstructionLength()))
assertNoError(err, t, "Disassemble()")
if text[0].IsCall() {
found = true
break
}
assertNoError(p.StepInstruction(), t, "StepInstruction()")
}
if !found {
t.Fatal("Could not find CALL instruction")
}
assertNoError(p.Step(), t, "Step()")
if goversion.VersionAfterOrEqual(runtime.Version(), 1, 11) && !protest.RegabiSupported() {
assertLineNumber(p, t, 6, "Step continued to wrong line,")
} else {
assertLineNumber(p, t, 5, "Step continued to wrong line,")
}
})
}
func TestIssue594(t *testing.T) {
skipOn(t, "upstream issue", "darwin", "lldb")
// debugserver will receive an EXC_BAD_ACCESS for this, at that point
// there is no way to reconvert this exception into a unix signal and send
// it to the process.
// This is a bug in debugserver/lldb:
// https://bugs.llvm.org//show_bug.cgi?id=22868
// Exceptions that aren't caused by breakpoints should be propagated
// back to the target.
// In particular the target should be able to cause a nil pointer
// dereference panic and recover from it.
protest.AllowRecording(t)
withTestProcess("issue594", t, func(p *proc.Target, fixture protest.Fixture) {
assertNoError(p.Continue(), t, "Continue()")
var f string
var ln int
if testBackend == "rr" {
frame, err := findFirstNonRuntimeFrame(p)
assertNoError(err, t, "findFirstNonRuntimeFrame")
f, ln = frame.Current.File, frame.Current.Line
} else {
f, ln = currentLineNumber(p, t)
}
if ln != 21 {
t.Fatalf("Program stopped at %s:%d, expected :21", f, ln)
}
})
}
func TestStepOutPanicAndDirectCall(t *testing.T) {
// StepOut should not step into a deferred function if it is called
// directly, only if it is called through a panic.
// Here we test the case where the function is called by a panic
if goversion.VersionAfterOrEqual(runtime.Version(), 1, 11) {
testseq2(t, "defercall", "", []seqTest{
{contContinue, 17},
{contStepout, 6}})
} else {
testseq2(t, "defercall", "", []seqTest{
{contContinue, 17},
{contStepout, 5}})
}
}
func TestWorkDir(t *testing.T) {
wd := os.TempDir()
// For Darwin `os.TempDir()` returns `/tmp` which is symlink to `/private/tmp`.
if runtime.GOOS == "darwin" {
wd = "/private/tmp"
}
protest.AllowRecording(t)
withTestProcessArgs("workdir", t, wd, []string{}, 0, func(p *proc.Target, fixture protest.Fixture) {
setFileBreakpoint(p, t, fixture.Source, 14)
p.Continue()
v := evalVariable(p, t, "pwd")
str := constant.StringVal(v.Value)
if wd != str {
t.Fatalf("Expected %s got %s\n", wd, str)
}
})
}
func TestNegativeIntEvaluation(t *testing.T) {
testcases := []struct {
name string
typ string
value interface{}
}{
{"ni8", "int8", int64(-5)},
{"ni16", "int16", int64(-5)},
{"ni32", "int32", int64(-5)},
}
protest.AllowRecording(t)
withTestProcess("testvariables2", t, func(p *proc.Target, fixture protest.Fixture) {
assertNoError(p.Continue(), t, "Continue()")
for _, tc := range testcases {
v := evalVariable(p, t, tc.name)
if typ := v.RealType.String(); typ != tc.typ {
t.Fatalf("Wrong type for variable %q: %q (expected: %q)", tc.name, typ, tc.typ)
}
if val, _ := constant.Int64Val(v.Value); val != tc.value {
t.Fatalf("Wrong value for variable %q: %v (expected: %v)", tc.name, val, tc.value)
}
}
})
}
func TestIssue683(t *testing.T) {
// Step panics when source file can not be found
protest.AllowRecording(t)
withTestProcess("issue683", t, func(p *proc.Target, fixture protest.Fixture) {
setFunctionBreakpoint(p, t, "main.main")
assertNoError(p.Continue(), t, "First Continue()")
for i := 0; i < 20; i++ {
// eventually an error about the source file not being found will be
// returned, the important thing is that we shouldn't panic
err := p.Step()
if err != nil {
break
}
}
})
}
func TestIssue664(t *testing.T) {
protest.AllowRecording(t)
withTestProcess("issue664", t, func(p *proc.Target, fixture protest.Fixture) {
setFileBreakpoint(p, t, fixture.Source, 4)
assertNoError(p.Continue(), t, "Continue()")
assertNoError(p.Next(), t, "Next()")
assertLineNumber(p, t, 5, "Did not continue to correct location,")
})
}
// Benchmarks (*Process).Continue + (*Scope).FunctionArguments
func BenchmarkTrace(b *testing.B) {
withTestProcess("traceperf", b, func(p *proc.Target, fixture protest.Fixture) {
setFunctionBreakpoint(p, b, "main.PerfCheck")
b.ResetTimer()
for i := 0; i < b.N; i++ {
assertNoError(p.Continue(), b, "Continue()")
s, err := proc.GoroutineScope(p, p.CurrentThread())
assertNoError(err, b, "Scope()")
_, err = s.FunctionArguments(proc.LoadConfig{false, 0, 64, 0, 3, 0})
assertNoError(err, b, "FunctionArguments()")
}
b.StopTimer()
})
}
func TestNextInDeferReturn(t *testing.T) {
// runtime.deferreturn updates the G struct in a way that for one
// instruction leaves the curg._defer field non-nil but with curg._defer.fn
// field being nil.
// We need to deal with this without panicing.
protest.AllowRecording(t)
withTestProcess("defercall", t, func(p *proc.Target, fixture protest.Fixture) {
setFunctionBreakpoint(p, t, "runtime.deferreturn")
assertNoError(p.Continue(), t, "First Continue()")
// Set a breakpoint on the deferred function so that the following loop
// can not step out of the runtime.deferreturn and all the way to the
// point where the target program panics.
setFunctionBreakpoint(p, t, "main.sampleFunction")
for i := 0; i < 20; i++ {
loc, err := p.CurrentThread().Location()
assertNoError(err, t, "CurrentThread().Location()")
t.Logf("at %#x %s:%d", loc.PC, loc.File, loc.Line)
if loc.Fn != nil && loc.Fn.Name == "main.sampleFunction" {
break
}
assertNoError(p.Next(), t, fmt.Sprintf("Next() %d", i))
}
})
}
func getg(goid int, gs []*proc.G) *proc.G {
for _, g := range gs {
if g.ID == goid {
return g
}
}
return nil
}
func TestAttachDetach(t *testing.T) {
if testBackend == "lldb" && runtime.GOOS == "linux" {
bs, _ := ioutil.ReadFile("/proc/sys/kernel/yama/ptrace_scope")
if bs == nil || strings.TrimSpace(string(bs)) != "0" {
t.Logf("can not run TestAttachDetach: %v\n", bs)
return
}
}
if testBackend == "rr" {
return
}
var buildFlags protest.BuildFlags
if buildMode == "pie" {
buildFlags |= protest.BuildModePIE
}
fixture := protest.BuildFixture("testnextnethttp", buildFlags)
cmd := exec.Command(fixture.Path)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
assertNoError(cmd.Start(), t, "starting fixture")
// wait for testnextnethttp to start listening
t0 := time.Now()
for {
conn, err := net.Dial("tcp", "127.0.0.1:9191")
if err == nil {
conn.Close()
break
}
time.Sleep(50 * time.Millisecond)
if time.Since(t0) > 10*time.Second {
t.Fatal("fixture did not start")
}
}
var p *proc.Target
var err error
switch testBackend {
case "native":
p, err = native.Attach(cmd.Process.Pid, []string{})
case "lldb":
path := ""
if runtime.GOOS == "darwin" {
path = fixture.Path
}
p, err = gdbserial.LLDBAttach(cmd.Process.Pid, path, []string{})
default:
err = fmt.Errorf("unknown backend %q", testBackend)
}
assertNoError(err, t, "Attach")
go func() {
time.Sleep(1 * time.Second)
http.Get("http://127.0.0.1:9191")
}()
assertNoError(p.Continue(), t, "Continue")
assertLineNumber(p, t, 11, "Did not continue to correct location,")
assertNoError(p.Detach(false), t, "Detach")
if runtime.GOOS != "darwin" {
// Debugserver sometimes will leave a zombie process after detaching, this
// seems to be a bug with debugserver.
resp, err := http.Get("http://127.0.0.1:9191/nobp")
assertNoError(err, t, "Page request after detach")
bs, err := ioutil.ReadAll(resp.Body)
assertNoError(err, t, "Reading /nobp page")
if out := string(bs); !strings.Contains(out, "hello, world!") {
t.Fatalf("/nobp page does not contain \"hello, world!\": %q", out)
}
}
cmd.Process.Kill()
}
func TestVarSum(t *testing.T) {
protest.AllowRecording(t)
withTestProcess("testvariables2", t, func(p *proc.Target, fixture protest.Fixture) {
assertNoError(p.Continue(), t, "Continue()")
sumvar := evalVariable(p, t, "s1[0] + s1[1]")
sumvarstr := constant.StringVal(sumvar.Value)
if sumvarstr != "onetwo" {
t.Fatalf("s1[0] + s1[1] == %q (expected \"onetwo\")", sumvarstr)
}
if sumvar.Len != int64(len(sumvarstr)) {
t.Fatalf("sumvar.Len == %d (expected %d)", sumvar.Len, len(sumvarstr))
}
})
}
func TestPackageWithPathVar(t *testing.T) {
protest.AllowRecording(t)
withTestProcess("pkgrenames", t, func(p *proc.Target, fixture protest.Fixture) {
assertNoError(p.Continue(), t, "Continue()")
evalVariable(p, t, "pkg.SomeVar")
evalVariable(p, t, "pkg.SomeVar.X")
})
}
func TestEnvironment(t *testing.T) {
protest.AllowRecording(t)
os.Setenv("SOMEVAR", "bah")
withTestProcess("testenv", t, func(p *proc.Target, fixture protest.Fixture) {
assertNoError(p.Continue(), t, "Continue()")
v := evalVariable(p, t, "x")
vv := constant.StringVal(v.Value)
t.Logf("v = %q", vv)
if vv != "bah" {
t.Fatalf("value of v is %q (expected \"bah\")", vv)
}
})
}
func getFrameOff(p *proc.Target, t *testing.T) int64 {
frameoffvar := evalVariable(p, t, "runtime.frameoff")
frameoff, _ := constant.Int64Val(frameoffvar.Value)
return frameoff
}
func TestRecursiveNext(t *testing.T) {
protest.AllowRecording(t)
testcases := []nextTest{
{6, 7},
{7, 10},
{10, 11},
{11, 17},
}
testseq("increment", contNext, testcases, "main.Increment", t)
withTestProcess("increment", t, func(p *proc.Target, fixture protest.Fixture) {
bp := setFunctionBreakpoint(p, t, "main.Increment")
assertNoError(p.Continue(), t, "Continue")
err := p.ClearBreakpoint(bp.Addr)
assertNoError(err, t, "ClearBreakpoint")
assertNoError(p.Next(), t, "Next 1")
assertNoError(p.Next(), t, "Next 2")
assertNoError(p.Next(), t, "Next 3")
frameoff0 := getFrameOff(p, t)
assertNoError(p.Step(), t, "Step")
frameoff1 := getFrameOff(p, t)
if frameoff0 == frameoff1 {
t.Fatalf("did not step into function?")
}
assertLineNumber(p, t, 6, "program did not continue to expected location,")
assertNoError(p.Next(), t, "Next 4")
assertLineNumber(p, t, 7, "program did not continue to expected location,")
assertNoError(p.StepOut(), t, "StepOut")
assertLineNumber(p, t, 11, "program did not continue to expected location,")
frameoff2 := getFrameOff(p, t)
if frameoff0 != frameoff2 {
t.Fatalf("frame offset mismatch %x != %x", frameoff0, frameoff2)
}
})
}
// TestIssue877 ensures that the environment variables starting with DYLD_ and LD_
// are passed when executing the binary on OSX via debugserver
func TestIssue877(t *testing.T) {
if runtime.GOOS != "darwin" && testBackend == "lldb" {
return
}
if os.Getenv("TRAVIS") == "true" && runtime.GOOS == "darwin" {
// Something changed on Travis side that makes the Go compiler fail if
// DYLD_LIBRARY_PATH is set.
t.Skip("broken")
}
const envval = "/usr/local/lib"
os.Setenv("DYLD_LIBRARY_PATH", envval)
withTestProcess("issue877", t, func(p *proc.Target, fixture protest.Fixture) {
assertNoError(p.Continue(), t, "Continue()")
v := evalVariable(p, t, "dyldenv")
vv := constant.StringVal(v.Value)
t.Logf("v = %q", vv)
if vv != envval {
t.Fatalf("value of v is %q (expected %q)", vv, envval)
}
})
}
func TestIssue893(t *testing.T) {
// Test what happens when next is called immediately after launching the
// executable, acceptable behaviors are: (a) no error, (b) no source at PC
// error, (c) program runs to completion
protest.AllowRecording(t)
withTestProcess("increment", t, func(p *proc.Target, fixture protest.Fixture) {
err := p.Next()
if err == nil {
return
}
if _, ok := err.(*frame.ErrNoFDEForPC); ok {
return
}
if _, ok := err.(*proc.ErrNoSourceForPC); ok {
return
}
if _, ok := err.(proc.ErrProcessExited); ok {
return
}
assertNoError(err, t, "Next")
})
}
func TestStepInstructionNoGoroutine(t *testing.T) {
protest.AllowRecording(t)
withTestProcess("increment", t, func(p *proc.Target, fixture protest.Fixture) {
// Call StepInstruction immediately after launching the program, it should
// work even though no goroutine is selected.
assertNoError(p.StepInstruction(), t, "StepInstruction")
})
}
func TestIssue871(t *testing.T) {
protest.AllowRecording(t)
withTestProcess("issue871", t, func(p *proc.Target, fixture protest.Fixture) {
assertNoError(p.Continue(), t, "Continue")
var scope *proc.EvalScope
var err error
if testBackend == "rr" {
var frame proc.Stackframe
frame, err = findFirstNonRuntimeFrame(p)
if err == nil {
scope = proc.FrameToScope(p, p.Memory(), nil, frame)
}
} else {
scope, err = proc.GoroutineScope(p, p.CurrentThread())
}
assertNoError(err, t, "scope")
locals, err := scope.LocalVariables(normalLoadConfig)
assertNoError(err, t, "LocalVariables")
foundA, foundB := false, false
for _, v := range locals {
t.Logf("local %v", v)
switch v.Name {
case "a":
foundA = true
if v.Flags&proc.VariableEscaped == 0 {
t.Errorf("variable a not flagged as escaped")
}
case "b":
foundB = true
}
}
if !foundA {
t.Errorf("variable a not found")
}
if !foundB {
t.Errorf("variable b not found")
}
})
}
func TestShadowedFlag(t *testing.T) {
if ver, _ := goversion.Parse(runtime.Version()); ver.Major >= 0 && !ver.AfterOrEqual(goversion.GoVersion{Major: 1, Minor: 9, Rev: -1}) {
return
}
withTestProcess("testshadow", t, func(p *proc.Target, fixture protest.Fixture) {
assertNoError(p.Continue(), t, "Continue")
scope, err := proc.GoroutineScope(p, p.CurrentThread())
assertNoError(err, t, "GoroutineScope")
locals, err := scope.LocalVariables(normalLoadConfig)
assertNoError(err, t, "LocalVariables")
foundShadowed := false
foundNonShadowed := false
for _, v := range locals {
if v.Flags&proc.VariableShadowed != 0 {
if v.Name != "a" {
t.Errorf("wrong shadowed variable %s", v.Name)
}
foundShadowed = true
if n, _ := constant.Int64Val(v.Value); n != 0 {
t.Errorf("wrong value for shadowed variable a: %d", n)
}
} else {
if v.Name != "a" {
t.Errorf("wrong non-shadowed variable %s", v.Name)
}
foundNonShadowed = true
if n, _ := constant.Int64Val(v.Value); n != 1 {
t.Errorf("wrong value for non-shadowed variable a: %d", n)
}
}
}
if !foundShadowed {
t.Error("could not find any shadowed variable")
}
if !foundNonShadowed {
t.Error("could not find any non-shadowed variable")
}
})
}
func TestAttachStripped(t *testing.T) {
if testBackend == "lldb" && runtime.GOOS == "linux" {
bs, _ := ioutil.ReadFile("/proc/sys/kernel/yama/ptrace_scope")
if bs == nil || strings.TrimSpace(string(bs)) != "0" {
t.Logf("can not run TestAttachStripped: %v\n", bs)
return
}
}
if testBackend == "rr" {
return
}
if runtime.GOOS == "darwin" {
t.Log("-s does not produce stripped executables on macOS")
return
}
if buildMode != "" {
t.Skip("not enabled with buildmode=PIE")
}
fixture := protest.BuildFixture("testnextnethttp", protest.LinkStrip)
cmd := exec.Command(fixture.Path)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
assertNoError(cmd.Start(), t, "starting fixture")
// wait for testnextnethttp to start listening
t0 := time.Now()
for {
conn, err := net.Dial("tcp", "127.0.0.1:9191")
if err == nil {
conn.Close()
break
}
time.Sleep(50 * time.Millisecond)
if time.Since(t0) > 10*time.Second {
t.Fatal("fixture did not start")
}
}
var p *proc.Target
var err error
switch testBackend {
case "native":
p, err = native.Attach(cmd.Process.Pid, []string{})
case "lldb":
path := ""
if runtime.GOOS == "darwin" {
path = fixture.Path
}
p, err = gdbserial.LLDBAttach(cmd.Process.Pid, path, []string{})
default:
t.Fatalf("unknown backend %q", testBackend)
}
t.Logf("error is %v", err)
if err == nil {
p.Detach(true)
t.Fatalf("expected error after attach, got nothing")
} else {
cmd.Process.Kill()
}
os.Remove(fixture.Path)
}
func TestIssue844(t *testing.T) {
// Conditional breakpoints should not prevent next from working if their
// condition isn't met.
withTestProcess("nextcond", t, func(p *proc.Target, fixture protest.Fixture) {
setFileBreakpoint(p, t, fixture.Source, 9)
condbp := setFileBreakpoint(p, t, fixture.Source, 10)
condbp.UserBreaklet().Cond = &ast.BinaryExpr{
Op: token.EQL,
X: &ast.Ident{Name: "n"},
Y: &ast.BasicLit{Kind: token.INT, Value: "11"},
}
assertNoError(p.Continue(), t, "Continue")
assertNoError(p.Next(), t, "Next")
assertLineNumber(p, t, 10, "continued to wrong location,")
})
}
func logStacktrace(t *testing.T, p *proc.Target, frames []proc.Stackframe) {
for j := range frames {
name := "?"
if frames[j].Current.Fn != nil {
name = frames[j].Current.Fn.Name
}
if frames[j].Call.Fn != nil && frames[j].Current.Fn != frames[j].Call.Fn {
name = fmt.Sprintf("%s inlined in %s", frames[j].Call.Fn.Name, frames[j].Current.Fn.Name)
}
t.Logf("\t%#x %#x %#x %s at %s:%d\n", frames[j].Call.PC, frames[j].FrameOffset(), frames[j].FramePointerOffset(), name, filepath.Base(frames[j].Call.File), frames[j].Call.Line)
if frames[j].TopmostDefer != nil {
_, _, fn := frames[j].TopmostDefer.DeferredFunc(p)
fnname := ""
if fn != nil {
fnname = fn.Name
}
t.Logf("\t\ttopmost defer: %#x %s\n", frames[j].TopmostDefer.DwrapPC, fnname)
}
for deferIdx, _defer := range frames[j].Defers {
_, _, fn := _defer.DeferredFunc(p)
fnname := ""
if fn != nil {
fnname = fn.Name
}
t.Logf("\t\t%d defer: %#x %s\n", deferIdx, _defer.DwrapPC, fnname)
}
}
}
// stacktraceCheck checks that all the functions listed in tc appear in
// frames in the same order.
// Checks that all the functions in tc starting with "C." or with "!" are in
// a systemstack frame.
// Returns a slice m where m[i] is the index in frames of the function tc[i]
// or nil if any check fails.
func stacktraceCheck(t *testing.T, tc []string, frames []proc.Stackframe) []int {
m := make([]int, len(tc))
i, j := 0, 0
for i < len(tc) {
tcname := tc[i]
tcsystem := strings.HasPrefix(tcname, "C.")
if tcname[0] == '!' {
tcsystem = true
tcname = tcname[1:]
}
for j < len(frames) {
name := "?"
if frames[j].Current.Fn != nil {
name = frames[j].Current.Fn.Name
}
if name == tcname {
m[i] = j
if tcsystem != frames[j].SystemStack {
t.Logf("system stack check failed for frame %d (expected %v got %v)", j, tcsystem, frames[j].SystemStack)
t.Logf("expected: %v\n", tc)
return nil
}
break
}
j++
}
if j >= len(frames) {
t.Logf("couldn't find frame %d %s", i, tc)
t.Logf("expected: %v\n", tc)
return nil
}
i++
}
return m
}
func frameInFile(frame proc.Stackframe, file string) bool {
for _, loc := range []proc.Location{frame.Current, frame.Call} {
if !strings.HasSuffix(loc.File, file) && !strings.HasSuffix(loc.File, "/"+file) && !strings.HasSuffix(loc.File, "\\"+file) {
return false
}
if loc.Line <= 0 {
return false
}
}
return true
}
func TestCgoStacktrace(t *testing.T) {
if runtime.GOOS == "windows" {
ver, _ := goversion.Parse(runtime.Version())
if ver.Major > 0 && !ver.AfterOrEqual(goversion.GoVersion{Major: 1, Minor: 9, Rev: -1}) {
t.Skip("disabled on windows with go before version 1.9")
}
}
if runtime.GOOS == "darwin" {
ver, _ := goversion.Parse(runtime.Version())
if ver.Major > 0 && !ver.AfterOrEqual(goversion.GoVersion{Major: 1, Minor: 8, Rev: -1}) {
t.Skip("disabled on macOS with go before version 1.8")
}
}
skipOn(t, "broken - cgo stacktraces", "386")
skipOn(t, "broken - cgo stacktraces", "linux", "arm64")
protest.MustHaveCgo(t)
// Tests that:
// a) we correctly identify the goroutine while we are executing cgo code
// b) that we can stitch together the system stack (where cgo code
// executes) and the normal goroutine stack
// Each test case describes how the stack trace should appear after a
// continue. The first function on each test case is the topmost function
// that should be found on the stack, the actual stack trace can have more
// frame than those listed here but all the frames listed must appear in
// the specified order.
testCases := [][]string{
[]string{"main.main"},
[]string{"C.helloworld_pt2", "C.helloworld", "main.main"},
[]string{"main.helloWorldS", "main.helloWorld", "C.helloworld_pt2", "C.helloworld", "main.main"},
[]string{"C.helloworld_pt4", "C.helloworld_pt3", "main.helloWorldS", "main.helloWorld", "C.helloworld_pt2", "C.helloworld", "main.main"},
[]string{"main.helloWorld2", "C.helloworld_pt4", "C.helloworld_pt3", "main.helloWorldS", "main.helloWorld", "C.helloworld_pt2", "C.helloworld", "main.main"}}
var gid int
frameOffs := map[string]int64{}
framePointerOffs := map[string]int64{}
withTestProcess("cgostacktest/", t, func(p *proc.Target, fixture protest.Fixture) {
for itidx, tc := range testCases {
assertNoError(p.Continue(), t, fmt.Sprintf("Continue at iteration step %d", itidx))
g, err := proc.GetG(p.CurrentThread())
assertNoError(err, t, fmt.Sprintf("GetG at iteration step %d", itidx))
if itidx == 0 {
gid = g.ID
} else {
if gid != g.ID {
t.Fatalf("wrong goroutine id at iteration step %d (expected %d got %d)", itidx, gid, g.ID)
}
}
frames, err := g.Stacktrace(100, 0)
assertNoError(err, t, fmt.Sprintf("Stacktrace at iteration step %d", itidx))
t.Logf("iteration step %d", itidx)
logStacktrace(t, p, frames)
m := stacktraceCheck(t, tc, frames)
mismatch := (m == nil)
for i, j := range m {
if strings.HasPrefix(tc[i], "C.hellow") {
if !frameInFile(frames[j], "hello.c") {
t.Logf("position in %q is %s:%d (call %s:%d)", tc[i], frames[j].Current.File, frames[j].Current.Line, frames[j].Call.File, frames[j].Call.Line)
mismatch = true
break
}
}
if frameOff, ok := frameOffs[tc[i]]; ok {
if frameOff != frames[j].FrameOffset() {
t.Logf("frame %s offset mismatch", tc[i])
}
if framePointerOffs[tc[i]] != frames[j].FramePointerOffset() {
t.Logf("frame %s pointer offset mismatch", tc[i])
}
} else {
frameOffs[tc[i]] = frames[j].FrameOffset()
framePointerOffs[tc[i]] = frames[j].FramePointerOffset()
}
}
// also check that ThreadStacktrace produces the same list of frames
threadFrames, err := proc.ThreadStacktrace(p.CurrentThread(), 100)
assertNoError(err, t, fmt.Sprintf("ThreadStacktrace at iteration step %d", itidx))
if len(threadFrames) != len(frames) {
mismatch = true
} else {
for j := range frames {
if frames[j].Current.File != threadFrames[j].Current.File || frames[j].Current.Line != threadFrames[j].Current.Line {
t.Logf("stack mismatch between goroutine stacktrace and thread stacktrace")
t.Logf("thread stacktrace:")
logStacktrace(t, p, threadFrames)
mismatch = true
break
}
}
}
if mismatch {
t.Fatal("see previous loglines")
}
}
})
}
func TestCgoSources(t *testing.T) {
if runtime.GOOS == "windows" {
ver, _ := goversion.Parse(runtime.Version())
if ver.Major > 0 && !ver.AfterOrEqual(goversion.GoVersion{Major: 1, Minor: 9, Rev: -1}) {
t.Skip("disabled on windows with go before version 1.9")
}
}
if runtime.GOARCH == "386" {
t.Skip("cgo stacktraces not supported on i386 for now")
}
protest.MustHaveCgo(t)
withTestProcess("cgostacktest/", t, func(p *proc.Target, fixture protest.Fixture) {
sources := p.BinInfo().Sources
for _, needle := range []string{"main.go", "hello.c"} {
found := false
for _, k := range sources {
if strings.HasSuffix(k, needle) || strings.HasSuffix(k, "/"+needle) || strings.HasSuffix(k, "\\"+needle) {
found = true
break
}
}
if !found {
t.Errorf("File %s not found", needle)
}
}
})
}
func TestSystemstackStacktrace(t *testing.T) {
// check that we can follow a stack switch initiated by runtime.systemstack()
withTestProcess("panic", t, func(p *proc.Target, fixture protest.Fixture) {
setFunctionBreakpoint(p, t, "runtime.startpanic_m")
assertNoError(p.Continue(), t, "first continue")
assertNoError(p.Continue(), t, "second continue")
g, err := proc.GetG(p.CurrentThread())
assertNoError(err, t, "GetG")
frames, err := g.Stacktrace(100, 0)
assertNoError(err, t, "stacktrace")
logStacktrace(t, p, frames)
m := stacktraceCheck(t, []string{"!runtime.startpanic_m", "runtime.gopanic", "main.main"}, frames)
if m == nil {
t.Fatal("see previous loglines")
}
})
}
func TestSystemstackOnRuntimeNewstack(t *testing.T) {
// The bug being tested here manifests as follows:
// - set a breakpoint somewhere or interrupt the program with Ctrl-C
// - try to look at stacktraces of other goroutines
// If one of the other goroutines is resizing its own stack the stack
// command won't work for it.
withTestProcess("binarytrees", t, func(p *proc.Target, fixture protest.Fixture) {
setFunctionBreakpoint(p, t, "main.main")
assertNoError(p.Continue(), t, "first continue")
g, err := proc.GetG(p.CurrentThread())
assertNoError(err, t, "GetG")
mainGoroutineID := g.ID
setFunctionBreakpoint(p, t, "runtime.newstack")
for {
assertNoError(p.Continue(), t, "second continue")
g, err = proc.GetG(p.CurrentThread())
assertNoError(err, t, "GetG")
if g.ID == mainGoroutineID {
break
}
}
frames, err := g.Stacktrace(100, 0)
assertNoError(err, t, "stacktrace")
logStacktrace(t, p, frames)
m := stacktraceCheck(t, []string{"!runtime.newstack", "main.main"}, frames)
if m == nil {
t.Fatal("see previous loglines")
}
})
}
func TestIssue1034(t *testing.T) {
skipOn(t, "broken - cgo stacktraces", "386")
protest.MustHaveCgo(t)
// The external linker on macOS produces an abbrev for DW_TAG_subprogram
// without the "has children" flag, we should support this.
withTestProcess("cgostacktest/", t, func(p *proc.Target, fixture protest.Fixture) {
setFunctionBreakpoint(p, t, "main.main")
assertNoError(p.Continue(), t, "Continue()")
frames, err := p.SelectedGoroutine().Stacktrace(10, 0)
assertNoError(err, t, "Stacktrace")
scope := proc.FrameToScope(p, p.Memory(), nil, frames[2:]...)
args, _ := scope.FunctionArguments(normalLoadConfig)
assertNoError(err, t, "FunctionArguments()")
if len(args) > 0 {
t.Fatalf("wrong number of arguments for frame %v (%d)", frames[2], len(args))
}
})
}
func TestIssue1008(t *testing.T) {
skipOn(t, "broken - cgo stacktraces", "386")
protest.MustHaveCgo(t)
// The external linker on macOS inserts "end of sequence" extended opcodes
// in debug_line. which we should support correctly.
withTestProcess("cgostacktest/", t, func(p *proc.Target, fixture protest.Fixture) {
setFunctionBreakpoint(p, t, "main.main")
assertNoError(p.Continue(), t, "Continue()")
loc, err := p.CurrentThread().Location()
assertNoError(err, t, "CurrentThread().Location()")
t.Logf("location %v\n", loc)
if !strings.HasSuffix(loc.File, "/main.go") {
t.Errorf("unexpected location %s:%d\n", loc.File, loc.Line)
}
if loc.Line > 31 {
t.Errorf("unexpected location %s:%d (file only has 30 lines)\n", loc.File, loc.Line)
}
})
}
func testDeclLineCount(t *testing.T, p *proc.Target, lineno int, tgtvars []string) {
sort.Strings(tgtvars)
assertLineNumber(p, t, lineno, "Program did not continue to correct next location")
scope, err := proc.GoroutineScope(p, p.CurrentThread())
assertNoError(err, t, fmt.Sprintf("GoroutineScope (:%d)", lineno))
vars, err := scope.Locals(0)
assertNoError(err, t, fmt.Sprintf("Locals (:%d)", lineno))
if len(vars) != len(tgtvars) {
t.Fatalf("wrong number of variables %d (:%d)", len(vars), lineno)
}
outvars := make([]string, len(vars))
for i, v := range vars {
outvars[i] = v.Name
}
sort.Strings(outvars)
for i := range outvars {
if tgtvars[i] != outvars[i] {
t.Fatalf("wrong variables, got: %q expected %q\n", outvars, tgtvars)
}
}
}
func TestDeclLine(t *testing.T) {
ver, _ := goversion.Parse(runtime.Version())
if ver.Major > 0 && !ver.AfterOrEqual(goversion.GoVersion{Major: 1, Minor: 10, Rev: -1}) {
t.Skip("go 1.9 and prior versions do not emit DW_AT_decl_line")
}
withTestProcess("decllinetest", t, func(p *proc.Target, fixture protest.Fixture) {
setFileBreakpoint(p, t, fixture.Source, 8)
setFileBreakpoint(p, t, fixture.Source, 9)
setFileBreakpoint(p, t, fixture.Source, 10)
setFileBreakpoint(p, t, fixture.Source, 11)
setFileBreakpoint(p, t, fixture.Source, 14)
assertNoError(p.Continue(), t, "Continue 1")
if goversion.VersionAfterOrEqual(runtime.Version(), 1, 15) {
testDeclLineCount(t, p, 8, []string{})
} else {
testDeclLineCount(t, p, 8, []string{"a"})
}
assertNoError(p.Continue(), t, "Continue 2")
testDeclLineCount(t, p, 9, []string{"a"})
assertNoError(p.Continue(), t, "Continue 3")
if goversion.VersionAfterOrEqual(runtime.Version(), 1, 15) {
testDeclLineCount(t, p, 10, []string{"a"})
} else {
testDeclLineCount(t, p, 10, []string{"a", "b"})
}
assertNoError(p.Continue(), t, "Continue 4")
testDeclLineCount(t, p, 11, []string{"a", "b"})
assertNoError(p.Continue(), t, "Continue 5")
testDeclLineCount(t, p, 14, []string{"a", "b"})
})
}
func TestIssue1137(t *testing.T) {
withTestProcess("dotpackagesiface", t, func(p *proc.Target, fixture protest.Fixture) {
assertNoError(p.Continue(), t, "Continue()")
v := evalVariable(p, t, "iface")
assertNoError(v.Unreadable, t, "iface unreadable")
v2 := evalVariable(p, t, "iface2")
assertNoError(v2.Unreadable, t, "iface2 unreadable")
})
}
func TestIssue1101(t *testing.T) {
// If a breakpoint is hit close to process death on a thread that isn't the
// group leader the process could die while we are trying to stop it.
//
// This can be easily reproduced by having the goroutine that's executing
// main.main (which will almost always run on the thread group leader) wait
// for a second goroutine before exiting, then setting a breakpoint on the
// second goroutine and stepping through it (see TestIssue1101 in
// proc_test.go).
//
// When stepping over the return instruction of main.f the deferred
// wg.Done() call will be executed which will cause the main goroutine to
// resume and proceed to exit. Both the temporary breakpoint on wg.Done and
// the temporary breakpoint on the return address of main.f will be in
// close proximity to main.main calling os.Exit() and causing the death of
// the thread group leader.
withTestProcess("issue1101", t, func(p *proc.Target, fixture protest.Fixture) {
setFunctionBreakpoint(p, t, "main.f")
assertNoError(p.Continue(), t, "Continue()")
assertNoError(p.Next(), t, "Next() 1")
assertNoError(p.Next(), t, "Next() 2")
lastCmd := "Next() 3"
exitErr := p.Next()
if exitErr == nil {
lastCmd = "final Continue()"
exitErr = p.Continue()
}
if pexit, exited := exitErr.(proc.ErrProcessExited); exited {
if pexit.Status != 2 && testBackend != "lldb" && (runtime.GOOS != "linux" || runtime.GOARCH != "386") {
// Looks like there's a bug with debugserver on macOS that sometimes
// will report exit status 0 instead of the proper exit status.
//
// Also it seems that sometimes on linux/386 we will not receive the
// exit status. This happens if the process exits at the same time as it
// receives a signal.
t.Fatalf("process exited status %d (expected 2)", pexit.Status)
}
} else {
assertNoError(exitErr, t, lastCmd)
t.Fatalf("process did not exit after %s", lastCmd)
}
})
}
func TestIssue1145(t *testing.T) {
withTestProcess("sleep", t, func(p *proc.Target, fixture protest.Fixture) {
setFileBreakpoint(p, t, fixture.Source, 18)
assertNoError(p.Continue(), t, "Continue()")
resumeChan := make(chan struct{}, 1)
p.ResumeNotify(resumeChan)
go func() {
<-resumeChan
time.Sleep(100 * time.Millisecond)
p.RequestManualStop()
}()
assertNoError(p.Next(), t, "Next()")
if p.Breakpoints().HasSteppingBreakpoints() {
t.Fatal("has internal breakpoints after manual stop request")
}
})
}
func TestHaltKeepsSteppingBreakpoints(t *testing.T) {
withTestProcess("sleep", t, func(p *proc.Target, fixture protest.Fixture) {
p.KeepSteppingBreakpoints = proc.HaltKeepsSteppingBreakpoints
setFileBreakpoint(p, t, fixture.Source, 18)
assertNoError(p.Continue(), t, "Continue()")
resumeChan := make(chan struct{}, 1)
p.ResumeNotify(resumeChan)
go func() {
<-resumeChan
time.Sleep(100 * time.Millisecond)
p.RequestManualStop()
}()
assertNoError(p.Next(), t, "Next()")
if !p.Breakpoints().HasSteppingBreakpoints() {
t.Fatal("does not have internal breakpoints after manual stop request")
}
})
}
func TestDisassembleGlobalVars(t *testing.T) {
skipOn(t, "broken - global variable symbolication", "arm64") // On ARM64 symLookup can't look up variables due to how they are loaded, see issue #1778
// On 386 linux when pie, the genered code use __x86.get_pc_thunk to ensure position-independent.
// Locate global variable by
// `CALL __x86.get_pc_thunk.ax(SB) 0xb0f7f
// LEAL 0xc0a19(AX), AX`
// dynamically.
if runtime.GOARCH == "386" && runtime.GOOS == "linux" && buildMode == "pie" {
t.Skip("On 386 linux when pie, symLookup can't look up global variables")
}
withTestProcess("teststepconcurrent", t, func(p *proc.Target, fixture protest.Fixture) {
mainfn := p.BinInfo().LookupFunc["main.main"]
regs, _ := p.CurrentThread().Registers()
text, err := proc.Disassemble(p.Memory(), regs, p.Breakpoints(), p.BinInfo(), mainfn.Entry, mainfn.End)
assertNoError(err, t, "Disassemble")
found := false
for i := range text {
if strings.Index(text[i].Text(proc.IntelFlavour, p.BinInfo()), "main.v") > 0 {
found = true
break
}
}
if !found {
t.Fatalf("could not find main.v reference in disassembly")
}
})
}
func checkFrame(frame proc.Stackframe, fnname, file string, line int, inlined bool) error {
if frame.Call.Fn == nil || frame.Call.Fn.Name != fnname {
return fmt.Errorf("wrong function name: %s", fnname)
}
if file != "" {
if frame.Call.File != file || frame.Call.Line != line {
return fmt.Errorf("wrong file:line %s:%d", frame.Call.File, frame.Call.Line)
}
}
if frame.Inlined != inlined {
if inlined {
return fmt.Errorf("not inlined")
} else {
return fmt.Errorf("inlined")
}
}
return nil
}
func TestAllPCsForFileLines(t *testing.T) {
if ver, _ := goversion.Parse(runtime.Version()); ver.Major >= 0 && !ver.AfterOrEqual(goversion.GoVersion{Major: 1, Minor: 10, Rev: -1}) {
// Versions of go before 1.10 do not have DWARF information for inlined calls
t.Skip("inlining not supported")
}
withTestProcessArgs("testinline", t, ".", []string{}, protest.EnableInlining, func(p *proc.Target, fixture protest.Fixture) {
l2pcs := p.BinInfo().AllPCsForFileLines(fixture.Source, []int{7, 20})
if len(l2pcs) != 2 {
t.Fatalf("expected two map entries for %s:{%d,%d} (got %d: %v)", fixture.Source, 7, 20, len(l2pcs), l2pcs)
}
pcs := l2pcs[20]
if len(pcs) < 1 {
t.Fatalf("expected at least one location for %s:%d (got %d: %#x)", fixture.Source, 20, len(pcs), pcs)
}
pcs = l2pcs[7]
if len(pcs) < 2 {
t.Fatalf("expected at least two locations for %s:%d (got %d: %#x)", fixture.Source, 7, len(pcs), pcs)
}
})
}
func TestInlinedStacktraceAndVariables(t *testing.T) {
if ver, _ := goversion.Parse(runtime.Version()); ver.Major >= 0 && !ver.AfterOrEqual(goversion.GoVersion{Major: 1, Minor: 10, Rev: -1}) {
// Versions of go before 1.10 do not have DWARF information for inlined calls
t.Skip("inlining not supported")
}
firstCallCheck := &scopeCheck{
line: 7,
ok: false,
varChecks: []varCheck{
varCheck{
name: "a",
typ: "int",
kind: reflect.Int,
hasVal: true,
intVal: 3,
},
varCheck{
name: "z",
typ: "int",
kind: reflect.Int,
hasVal: true,
intVal: 9,
},
},
}
secondCallCheck := &scopeCheck{
line: 7,
ok: false,
varChecks: []varCheck{
varCheck{
name: "a",
typ: "int",
kind: reflect.Int,
hasVal: true,
intVal: 4,
},
varCheck{
name: "z",
typ: "int",
kind: reflect.Int,
hasVal: true,
intVal: 16,
},
},
}
withTestProcessArgs("testinline", t, ".", []string{}, protest.EnableInlining, func(p *proc.Target, fixture protest.Fixture) {
pcs, err := proc.FindFileLocation(p, fixture.Source, 7)
assertNoError(err, t, "LineToPC")
if len(pcs) < 2 {
t.Fatalf("expected at least two locations for %s:%d (got %d: %#x)", fixture.Source, 7, len(pcs), pcs)
}
for _, pc := range pcs {
t.Logf("setting breakpoint at %#x\n", pc)
_, err := p.SetBreakpoint(pc, proc.UserBreakpoint, nil)
assertNoError(err, t, fmt.Sprintf("SetBreakpoint(%#x)", pc))
}
// first inlined call
assertNoError(p.Continue(), t, "Continue")
frames, err := proc.ThreadStacktrace(p.CurrentThread(), 20)
assertNoError(err, t, "ThreadStacktrace")
t.Logf("Stacktrace:\n")
for i := range frames {
t.Logf("\t%s at %s:%d (%#x)\n", frames[i].Call.Fn.Name, frames[i].Call.File, frames[i].Call.Line, frames[i].Current.PC)
}
if err := checkFrame(frames[0], "main.inlineThis", fixture.Source, 7, true); err != nil {
t.Fatalf("Wrong frame 0: %v", err)
}
if err := checkFrame(frames[1], "main.main", fixture.Source, 18, false); err != nil {
t.Fatalf("Wrong frame 1: %v", err)
}
if avar, _ := constant.Int64Val(evalVariable(p, t, "a").Value); avar != 3 {
t.Fatalf("value of 'a' variable is not 3 (%d)", avar)
}
if zvar, _ := constant.Int64Val(evalVariable(p, t, "z").Value); zvar != 9 {
t.Fatalf("value of 'z' variable is not 9 (%d)", zvar)
}
if _, ok := firstCallCheck.checkLocalsAndArgs(p, t); !ok {
t.Fatalf("exiting for past errors")
}
// second inlined call
assertNoError(p.Continue(), t, "Continue")
frames, err = proc.ThreadStacktrace(p.CurrentThread(), 20)
assertNoError(err, t, "ThreadStacktrace (2)")
t.Logf("Stacktrace 2:\n")
for i := range frames {
t.Logf("\t%s at %s:%d (%#x)\n", frames[i].Call.Fn.Name, frames[i].Call.File, frames[i].Call.Line, frames[i].Current.PC)
}
if err := checkFrame(frames[0], "main.inlineThis", fixture.Source, 7, true); err != nil {
t.Fatalf("Wrong frame 0: %v", err)
}
if err := checkFrame(frames[1], "main.main", fixture.Source, 19, false); err != nil {
t.Fatalf("Wrong frame 1: %v", err)
}
if avar, _ := constant.Int64Val(evalVariable(p, t, "a").Value); avar != 4 {
t.Fatalf("value of 'a' variable is not 3 (%d)", avar)
}
if zvar, _ := constant.Int64Val(evalVariable(p, t, "z").Value); zvar != 16 {
t.Fatalf("value of 'z' variable is not 9 (%d)", zvar)
}
if bvar, err := evalVariableOrError(p, "b"); err == nil {
t.Fatalf("expected error evaluating 'b', but it succeeded instead: %v", bvar)
}
if _, ok := secondCallCheck.checkLocalsAndArgs(p, t); !ok {
t.Fatalf("exiting for past errors")
}
})
}
func TestInlineStep(t *testing.T) {
if ver, _ := goversion.Parse(runtime.Version()); ver.Major >= 0 && !ver.AfterOrEqual(goversion.GoVersion{Major: 1, Minor: 10, Rev: -1}) {
// Versions of go before 1.10 do not have DWARF information for inlined calls
t.Skip("inlining not supported")
}
testseq2Args(".", []string{}, protest.EnableInlining, t, "testinline", "", []seqTest{
{contContinue, 18},
{contStep, 6},
{contStep, 7},
{contStep, 18},
{contStep, 19},
})
}
func TestInlineNext(t *testing.T) {
if ver, _ := goversion.Parse(runtime.Version()); ver.Major >= 0 && !ver.AfterOrEqual(goversion.GoVersion{Major: 1, Minor: 10, Rev: -1}) {
// Versions of go before 1.10 do not have DWARF information for inlined calls
t.Skip("inlining not supported")
}
testseq2Args(".", []string{}, protest.EnableInlining, t, "testinline", "", []seqTest{
{contContinue, 18},
{contStep, 6},
{contNext, 7},
{contNext, 18},
{contNext, 19},
})
}
func TestInlineStepOver(t *testing.T) {
if ver, _ := goversion.Parse(runtime.Version()); ver.Major >= 0 && !ver.AfterOrEqual(goversion.GoVersion{Major: 1, Minor: 10, Rev: -1}) {
// Versions of go before 1.10 do not have DWARF information for inlined calls
t.Skip("inlining not supported")
}
testseq2Args(".", []string{}, protest.EnableInlining, t, "testinline", "", []seqTest{
{contContinue, 18},
{contNext, 19},
{contNext, 20},
})
}
func TestInlineStepOut(t *testing.T) {
if ver, _ := goversion.Parse(runtime.Version()); ver.Major >= 0 && !ver.AfterOrEqual(goversion.GoVersion{Major: 1, Minor: 10, Rev: -1}) {
// Versions of go before 1.10 do not have DWARF information for inlined calls
t.Skip("inlining not supported")
}
testseq2Args(".", []string{}, protest.EnableInlining, t, "testinline", "", []seqTest{
{contContinue, 18},
{contStep, 6},
{contStepout, 18},
})
}
func TestInlineFunctionList(t *testing.T) {
// We should be able to list all functions, even inlined ones.
if ver, _ := goversion.Parse(runtime.Version()); ver.Major >= 0 && !ver.AfterOrEqual(goversion.GoVersion{Major: 1, Minor: 10, Rev: -1}) {
// Versions of go before 1.10 do not have DWARF information for inlined calls
t.Skip("inlining not supported")
}
withTestProcessArgs("testinline", t, ".", []string{}, protest.EnableInlining|protest.EnableOptimization, func(p *proc.Target, fixture protest.Fixture) {
var found bool
for _, fn := range p.BinInfo().Functions {
if strings.Contains(fn.Name, "inlineThis") {
found = true
break
}
}
if !found {
t.Fatal("inline function not returned")
}
})
}
func TestInlineBreakpoint(t *testing.T) {
// We should be able to set a breakpoint on the call site of an inlined function.
if ver, _ := goversion.Parse(runtime.Version()); ver.Major >= 0 && !ver.AfterOrEqual(goversion.GoVersion{Major: 1, Minor: 10, Rev: -1}) {
// Versions of go before 1.10 do not have DWARF information for inlined calls
t.Skip("inlining not supported")
}
withTestProcessArgs("testinline", t, ".", []string{}, protest.EnableInlining|protest.EnableOptimization, func(p *proc.Target, fixture protest.Fixture) {
pcs, err := proc.FindFileLocation(p, fixture.Source, 17)
t.Logf("%#v\n", pcs)
if len(pcs) != 1 {
t.Fatalf("unable to get PC for inlined function call: %v", pcs)
}
fn := p.BinInfo().PCToFunc(pcs[0])
expectedFn := "main.main"
if fn.Name != expectedFn {
t.Fatalf("incorrect function returned, expected %s, got %s", expectedFn, fn.Name)
}
_, err = p.SetBreakpoint(pcs[0], proc.UserBreakpoint, nil)
if err != nil {
t.Fatalf("unable to set breakpoint: %v", err)
}
})
}
func TestIssue951(t *testing.T) {
if ver, _ := goversion.Parse(runtime.Version()); ver.Major >= 0 && !ver.AfterOrEqual(goversion.GoVersion{Major: 1, Minor: 9, Rev: -1}) {
t.Skip("scopes not implemented in <=go1.8")
}
withTestProcess("issue951", t, func(p *proc.Target, fixture protest.Fixture) {
assertNoError(p.Continue(), t, "Continue()")
scope, err := proc.GoroutineScope(p, p.CurrentThread())
assertNoError(err, t, "GoroutineScope")
args, err := scope.FunctionArguments(normalLoadConfig)
assertNoError(err, t, "FunctionArguments")
t.Logf("%#v", args[0])
if args[0].Flags&proc.VariableShadowed == 0 {
t.Error("argument is not shadowed")
}
vars, err := scope.LocalVariables(normalLoadConfig)
assertNoError(err, t, "LocalVariables")
shadowed, notShadowed := 0, 0
for i := range vars {
t.Logf("var %d: %#v\n", i, vars[i])
if vars[i].Flags&proc.VariableShadowed != 0 {
shadowed++
} else {
notShadowed++
}
}
if shadowed != 1 || notShadowed != 1 {
t.Errorf("Wrong number of shadowed/non-shadowed local variables: %d %d", shadowed, notShadowed)
}
})
}
func TestDWZCompression(t *testing.T) {
// If dwz is not available in the system, skip this test
if _, err := exec.LookPath("dwz"); err != nil {
t.Skip("dwz not installed")
}
withTestProcessArgs("dwzcompression", t, ".", []string{}, protest.EnableDWZCompression, func(p *proc.Target, fixture protest.Fixture) {
setFunctionBreakpoint(p, t, "C.fortytwo")
assertNoError(p.Continue(), t, "first Continue()")
val := evalVariable(p, t, "stdin")
if val.RealType == nil {
t.Errorf("Can't find type for \"stdin\" global variable")
}
})
}
func TestMapLoadConfigWithReslice(t *testing.T) {
// Check that load configuration is respected for resliced maps.
withTestProcess("testvariables2", t, func(p *proc.Target, fixture protest.Fixture) {
zolotovLoadCfg := proc.LoadConfig{FollowPointers: true, MaxStructFields: -1, MaxVariableRecurse: 3, MaxStringLen: 10, MaxArrayValues: 10}
assertNoError(p.Continue(), t, "First Continue()")
scope, err := proc.GoroutineScope(p, p.CurrentThread())
assertNoError(err, t, "GoroutineScope")
m1, err := scope.EvalExpression("m1", zolotovLoadCfg)
assertNoError(err, t, "EvalVariable")
t.Logf("m1 returned children %d (%d)", len(m1.Children)/2, m1.Len)
expr := fmt.Sprintf("(*(*%q)(%d))[10:]", m1.DwarfType.String(), m1.Addr)
t.Logf("expr %q\n", expr)
m1cont, err := scope.EvalExpression(expr, zolotovLoadCfg)
assertNoError(err, t, "EvalVariable")
t.Logf("m1cont returned children %d", len(m1cont.Children)/2)
if len(m1cont.Children) != 20 {
t.Fatalf("wrong number of children returned %d\n", len(m1cont.Children)/2)
}
})
}
func TestStepOutReturn(t *testing.T) {
ver, _ := goversion.Parse(runtime.Version())
if ver.Major >= 0 && !ver.AfterOrEqual(goversion.GoVersion{Major: 1, Minor: 10, Rev: -1}) {
t.Skip("return variables aren't marked on 1.9 or earlier")
}
withTestProcess("stepoutret", t, func(p *proc.Target, fixture protest.Fixture) {
setFunctionBreakpoint(p, t, "main.stepout")
assertNoError(p.Continue(), t, "Continue")
assertNoError(p.StepOut(), t, "StepOut")
ret := p.CurrentThread().Common().ReturnValues(normalLoadConfig)
if len(ret) != 2 {
t.Fatalf("wrong number of return values %v", ret)
}
stridx := 0
numidx := 1
if !goversion.VersionAfterOrEqual(runtime.Version(), 1, 12) {
// in 1.11 and earlier the order of return values in DWARF is
// unspecified, in 1.11 and later it follows the order of definition
// specified by the user
for i := range ret {
if ret[i].Name == "str" {
stridx = i
numidx = 1 - i
break
}
}
}
if ret[stridx].Name != "str" {
t.Fatalf("(str) bad return value name %s", ret[stridx].Name)
}
if ret[stridx].Kind != reflect.String {
t.Fatalf("(str) bad return value kind %v", ret[stridx].Kind)
}
if s := constant.StringVal(ret[stridx].Value); s != "return 47" {
t.Fatalf("(str) bad return value %q", s)
}
if ret[numidx].Name != "num" {
t.Fatalf("(num) bad return value name %s", ret[numidx].Name)
}
if ret[numidx].Kind != reflect.Int {
t.Fatalf("(num) bad return value kind %v", ret[numidx].Kind)
}
if n, _ := constant.Int64Val(ret[numidx].Value); n != 48 {
t.Fatalf("(num) bad return value %d", n)
}
})
}
func TestOptimizationCheck(t *testing.T) {
withTestProcess("continuetestprog", t, func(p *proc.Target, fixture protest.Fixture) {
fn := p.BinInfo().LookupFunc["main.main"]
if fn.Optimized() {
t.Fatalf("main.main is optimized")
}
})
if goversion.VersionAfterOrEqual(runtime.Version(), 1, 10) {
withTestProcessArgs("continuetestprog", t, ".", []string{}, protest.EnableOptimization|protest.EnableInlining, func(p *proc.Target, fixture protest.Fixture) {
fn := p.BinInfo().LookupFunc["main.main"]
if !fn.Optimized() {
t.Fatalf("main.main is not optimized")
}
})
}
}
func TestIssue1264(t *testing.T) {
// It should be possible to set a breakpoint condition that consists only
// of evaluating a single boolean variable.
withTestProcess("issue1264", t, func(p *proc.Target, fixture protest.Fixture) {
bp := setFileBreakpoint(p, t, fixture.Source, 8)
bp.UserBreaklet().Cond = &ast.Ident{Name: "equalsTwo"}
assertNoError(p.Continue(), t, "Continue()")
assertLineNumber(p, t, 8, "after continue")
})
}
func TestReadDefer(t *testing.T) {
withTestProcess("deferstack", t, func(p *proc.Target, fixture protest.Fixture) {
assertNoError(p.Continue(), t, "Continue")
frames, err := p.SelectedGoroutine().Stacktrace(10, proc.StacktraceReadDefers)
assertNoError(err, t, "Stacktrace")
logStacktrace(t, p, frames)
examples := []struct {
frameIdx int
topmostDefer string
defers []string
}{
// main.call3 (defers nothing, topmost defer main.f2)
{0, "main.f2", []string{}},
// main.call2 (defers main.f2, main.f3, topmost defer main.f2)
{1, "main.f2", []string{"main.f2", "main.f3"}},
// main.call1 (defers main.f1, main.f2, topmost defer main.f1)
{2, "main.f1", []string{"main.f1", "main.f2"}},
// main.main (defers nothing)
{3, "", []string{}}}
defercheck := func(d *proc.Defer, deferName, tgt string, frameIdx int) {
if d == nil {
t.Fatalf("expected %q as %s of frame %d, got nothing", tgt, deferName, frameIdx)
}
if d.Unreadable != nil {
t.Fatalf("expected %q as %s of frame %d, got unreadable defer: %v", tgt, deferName, frameIdx, d.Unreadable)
}
_, _, dfn := d.DeferredFunc(p)
if dfn == nil {
t.Fatalf("expected %q as %s of frame %d, got %#x", tgt, deferName, frameIdx, d.DwrapPC)
}
if dfn.Name != tgt {
t.Fatalf("expected %q as %s of frame %d, got %q", tgt, deferName, frameIdx, dfn.Name)
}
}
for _, example := range examples {
frame := &frames[example.frameIdx]
if example.topmostDefer != "" {
defercheck(frame.TopmostDefer, "topmost defer", example.topmostDefer, example.frameIdx)
}
if len(example.defers) != len(frames[example.frameIdx].Defers) {
t.Fatalf("expected %d defers for %d, got %v", len(example.defers), example.frameIdx, frame.Defers)
}
for deferIdx := range example.defers {
defercheck(frame.Defers[deferIdx], fmt.Sprintf("defer %d", deferIdx), example.defers[deferIdx], example.frameIdx)
}
}
})
}
func TestNextUnknownInstr(t *testing.T) {
skipUnlessOn(t, "amd64 only", "amd64")
if !goversion.VersionAfterOrEqual(runtime.Version(), 1, 10) {
t.Skip("versions of Go before 1.10 can't assemble the instruction VPUNPCKLWD")
}
withTestProcess("nodisasm/", t, func(p *proc.Target, fixture protest.Fixture) {
setFunctionBreakpoint(p, t, "main.asmFunc")
assertNoError(p.Continue(), t, "Continue()")
assertNoError(p.Next(), t, "Next()")
})
}
func TestReadDeferArgs(t *testing.T) {
if goversion.VersionAfterOrEqual(runtime.Version(), 1, 17) {
// When regabi is enabled in Go 1.17 and later, reading arguments of
// deferred functions becomes significantly more complicated because of
// the autogenerated code used to unpack the argument frame stored in
// runtime._defer into registers.
// We either need to know how to do the translation, implementing the ABI1
// rules in Delve, or have some assistence from the compiler (for example
// have the dwrap function contain entries for each of the captured
// variables with a location describing their offset from DX).
// Ultimately this feature is unimportant enough that we can leave it
// disabled for now.
t.Skip("unsupported")
}
var tests = []struct {
frame, deferCall int
a, b int64
}{
{1, 1, 42, 61},
{2, 2, 1, -1},
}
withTestProcess("deferstack", t, func(p *proc.Target, fixture protest.Fixture) {
assertNoError(p.Continue(), t, "Continue()")
for _, test := range tests {
scope, err := proc.ConvertEvalScope(p, -1, test.frame, test.deferCall)
assertNoError(err, t, fmt.Sprintf("ConvertEvalScope(-1, %d, %d)", test.frame, test.deferCall))
if !goversion.VersionAfterOrEqual(runtime.Version(), 1, 17) {
// In Go 1.17 deferred function calls can end up inside a wrapper, and
// the scope for this evaluation needs to be the wrapper.
if scope.Fn.Name != "main.f2" {
t.Fatalf("expected function \"main.f2\" got %q", scope.Fn.Name)
}
}
avar, err := scope.EvalExpression("a", normalLoadConfig)
if err != nil {
t.Fatal(err)
}
bvar, err := scope.EvalExpression("b", normalLoadConfig)
if err != nil {
t.Fatal(err)
}
a, _ := constant.Int64Val(avar.Value)
b, _ := constant.Int64Val(bvar.Value)
if a != test.a {
t.Errorf("value of argument 'a' at frame %d, deferred call %d: %d (expected %d)", test.frame, test.deferCall, a, test.a)
}
if b != test.b {
t.Errorf("value of argument 'b' at frame %d, deferred call %d: %d (expected %d)", test.frame, test.deferCall, b, test.b)
}
}
})
}
func TestIssue1374(t *testing.T) {
// Continue did not work when stopped at a breakpoint immediately after calling CallFunction.
protest.MustSupportFunctionCalls(t, testBackend)
withTestProcess("issue1374", t, func(p *proc.Target, fixture protest.Fixture) {
setFileBreakpoint(p, t, fixture.Source, 7)
assertNoError(p.Continue(), t, "First Continue")
assertLineNumber(p, t, 7, "Did not continue to correct location (first continue),")
assertNoError(proc.EvalExpressionWithCalls(p, p.SelectedGoroutine(), "getNum()", normalLoadConfig, true), t, "Call")
err := p.Continue()
if _, isexited := err.(proc.ErrProcessExited); !isexited {
regs, _ := p.CurrentThread().Registers()
f, l, _ := p.BinInfo().PCToLine(regs.PC())
t.Fatalf("expected process exited error got %v at %s:%d", err, f, l)
}
})
}
func TestIssue1432(t *testing.T) {
// Check that taking the address of a struct, casting it into a pointer to
// the struct's type and then accessing a member field will still:
// - perform auto-dereferencing on struct member access
// - yield a Variable that's ultimately assignable (i.e. has an address)
withTestProcess("issue1432", t, func(p *proc.Target, fixture protest.Fixture) {
assertNoError(p.Continue(), t, "Continue")
svar := evalVariable(p, t, "s")
t.Logf("%#x", svar.Addr)
scope, err := proc.GoroutineScope(p, p.CurrentThread())
assertNoError(err, t, "GoroutineScope()")
err = scope.SetVariable(fmt.Sprintf("(*\"main.s\")(%#x).i", svar.Addr), "10")
assertNoError(err, t, "SetVariable")
})
}
func TestGoroutinesInfoLimit(t *testing.T) {
withTestProcess("teststepconcurrent", t, func(p *proc.Target, fixture protest.Fixture) {
setFileBreakpoint(p, t, fixture.Source, 37)
assertNoError(p.Continue(), t, "Continue()")
gcount := 0
nextg := 0
const goroutinesInfoLimit = 10
for nextg >= 0 {
oldnextg := nextg
var gs []*proc.G
var err error
gs, nextg, err = proc.GoroutinesInfo(p, nextg, goroutinesInfoLimit)
assertNoError(err, t, fmt.Sprintf("GoroutinesInfo(%d, %d)", oldnextg, goroutinesInfoLimit))
gcount += len(gs)
t.Logf("got %d goroutines\n", len(gs))
}
t.Logf("number of goroutines: %d\n", gcount)
gs, _, err := proc.GoroutinesInfo(p, 0, 0)
assertNoError(err, t, "GoroutinesInfo(0, 0)")
t.Logf("number of goroutines (full scan): %d\n", gcount)
if len(gs) != gcount {
t.Fatalf("mismatch in the number of goroutines %d %d\n", gcount, len(gs))
}
})
}
func TestIssue1469(t *testing.T) {
withTestProcess("issue1469", t, func(p *proc.Target, fixture protest.Fixture) {
setFileBreakpoint(p, t, fixture.Source, 13)
assertNoError(p.Continue(), t, "Continue()")
gid2thread := make(map[int][]proc.Thread)
for _, thread := range p.ThreadList() {
g, _ := proc.GetG(thread)
if g == nil {
continue
}
gid2thread[g.ID] = append(gid2thread[g.ID], thread)
}
for gid := range gid2thread {
if len(gid2thread[gid]) > 1 {
t.Logf("too many threads running goroutine %d", gid)
for _, thread := range gid2thread[gid] {
t.Logf("\tThread %d", thread.ThreadID())
frames, err := proc.ThreadStacktrace(thread, 20)
if err != nil {
t.Logf("\t\tcould not get stacktrace %v", err)
}
for _, frame := range frames {
t.Logf("\t\t%#x at %s:%d (systemstack: %v)", frame.Call.PC, frame.Call.File, frame.Call.Line, frame.SystemStack)
}
}
}
}
})
}
func TestDeadlockBreakpoint(t *testing.T) {
skipOn(t, "upstream issue - https://github.com/golang/go/issues/29322", "pie")
deadlockBp := proc.FatalThrow
if !goversion.VersionAfterOrEqual(runtime.Version(), 1, 11) {
deadlockBp = proc.UnrecoveredPanic
}
withTestProcess("testdeadlock", t, func(p *proc.Target, fixture protest.Fixture) {
assertNoError(p.Continue(), t, "Continue()")
bp := p.CurrentThread().Breakpoint()
if bp.Breakpoint == nil || bp.Name != deadlockBp {
t.Fatalf("did not stop at deadlock breakpoint %v", bp)
}
})
}
func findSource(source string, sources []string) bool {
for _, s := range sources {
if s == source {
return true
}
}
return false
}
func TestListImages(t *testing.T) {
pluginFixtures := protest.WithPlugins(t, protest.AllNonOptimized, "plugin1/", "plugin2/")
withTestProcessArgs("plugintest", t, ".", []string{pluginFixtures[0].Path, pluginFixtures[1].Path}, protest.AllNonOptimized, func(p *proc.Target, fixture protest.Fixture) {
if !findSource(fixture.Source, p.BinInfo().Sources) {
t.Fatalf("could not find %s in sources: %q\n", fixture.Source, p.BinInfo().Sources)
}
assertNoError(p.Continue(), t, "first continue")
f, l := currentLineNumber(p, t)
plugin1Found := false
t.Logf("Libraries before %s:%d:", f, l)
for _, image := range p.BinInfo().Images {
t.Logf("\t%#x %q err:%v", image.StaticBase, image.Path, image.LoadError())
if image.Path == pluginFixtures[0].Path {
plugin1Found = true
}
}
if !plugin1Found {
t.Fatalf("Could not find plugin1")
}
if !findSource(fixture.Source, p.BinInfo().Sources) {
// Source files for the base program must be available even after a plugin is loaded. Issue #2074.
t.Fatalf("could not find %s in sources (after loading plugin): %q\n", fixture.Source, p.BinInfo().Sources)
}
assertNoError(p.Continue(), t, "second continue")
f, l = currentLineNumber(p, t)
plugin1Found, plugin2Found := false, false
t.Logf("Libraries after %s:%d:", f, l)
for _, image := range p.BinInfo().Images {
t.Logf("\t%#x %q err:%v", image.StaticBase, image.Path, image.LoadError())
switch image.Path {
case pluginFixtures[0].Path:
plugin1Found = true
case pluginFixtures[1].Path:
plugin2Found = true
}
}
if !plugin1Found {
t.Fatalf("Could not find plugin1")
}
if !plugin2Found {
t.Fatalf("Could not find plugin2")
}
})
}
func TestAncestors(t *testing.T) {
if !goversion.VersionAfterOrEqual(runtime.Version(), 1, 11) {
t.Skip("not supported on Go <= 1.10")
}
savedGodebug := os.Getenv("GODEBUG")
os.Setenv("GODEBUG", "tracebackancestors=100")
defer os.Setenv("GODEBUG", savedGodebug)
withTestProcess("testnextprog", t, func(p *proc.Target, fixture protest.Fixture) {
setFunctionBreakpoint(p, t, "main.testgoroutine")
assertNoError(p.Continue(), t, "Continue()")
as, err := proc.Ancestors(p, p.SelectedGoroutine(), 1000)
assertNoError(err, t, "Ancestors")
t.Logf("ancestors: %#v\n", as)
if len(as) != 1 {
t.Fatalf("expected only one ancestor got %d", len(as))
}
mainFound := false
for i, a := range as {
astack, err := a.Stack(100)
assertNoError(err, t, fmt.Sprintf("Ancestor %d stack", i))
t.Logf("ancestor %d\n", i)
logStacktrace(t, p, astack)
for _, frame := range astack {
if frame.Current.Fn != nil && frame.Current.Fn.Name == "main.main" {
mainFound = true
}
}
}
if !mainFound {
t.Fatal("could not find main.main function in ancestors")
}
})
}
func testCallConcurrentCheckReturns(p *proc.Target, t *testing.T, gid1, gid2 int) int {
found := 0
for _, thread := range p.ThreadList() {
g, _ := proc.GetG(thread)
if g == nil || (g.ID != gid1 && g.ID != gid2) {
continue
}
retvals := thread.Common().ReturnValues(normalLoadConfig)
if len(retvals) == 0 {
continue
}
n, _ := constant.Int64Val(retvals[0].Value)
t.Logf("injection on goroutine %d (thread %d) returned %v\n", g.ID, thread.ThreadID(), n)
switch g.ID {
case gid1:
if n != 11 {
t.Errorf("wrong return value for goroutine %d", g.ID)
}
found++
case gid2:
if n != 12 {
t.Errorf("wrong return value for goroutine %d", g.ID)
}
found++
}
}
return found
}
func TestCallConcurrent(t *testing.T) {
skipOn(t, "broken", "freebsd")
protest.MustSupportFunctionCalls(t, testBackend)
withTestProcess("teststepconcurrent", t, func(p *proc.Target, fixture protest.Fixture) {
bp := setFileBreakpoint(p, t, fixture.Source, 24)
assertNoError(p.Continue(), t, "Continue()")
//_, err := p.ClearBreakpoint(bp.Addr)
//assertNoError(err, t, "ClearBreakpoint() returned an error")
gid1 := p.SelectedGoroutine().ID
t.Logf("starting injection in %d / %d", p.SelectedGoroutine().ID, p.CurrentThread().ThreadID())
assertNoError(proc.EvalExpressionWithCalls(p, p.SelectedGoroutine(), "Foo(10, 1)", normalLoadConfig, false), t, "EvalExpressionWithCalls()")
returned := testCallConcurrentCheckReturns(p, t, gid1, -1)
curthread := p.CurrentThread()
if curbp := curthread.Breakpoint(); curbp.Breakpoint == nil || curbp.LogicalID() != bp.LogicalID() || returned > 0 {
t.Logf("skipping test, the call injection terminated before we hit a breakpoint in a different thread")
return
}
err := p.ClearBreakpoint(bp.Addr)
assertNoError(err, t, "ClearBreakpoint() returned an error")
gid2 := p.SelectedGoroutine().ID
t.Logf("starting second injection in %d / %d", p.SelectedGoroutine().ID, p.CurrentThread().ThreadID())
assertNoError(proc.EvalExpressionWithCalls(p, p.SelectedGoroutine(), "Foo(10, 2)", normalLoadConfig, false), t, "EvalExpressioniWithCalls")
for {
returned += testCallConcurrentCheckReturns(p, t, gid1, gid2)
if returned >= 2 {
break
}
t.Logf("Continuing... %d", returned)
assertNoError(p.Continue(), t, "Continue()")
}
p.Continue()
})
}
func TestPluginStepping(t *testing.T) {
pluginFixtures := protest.WithPlugins(t, protest.AllNonOptimized, "plugin1/", "plugin2/")
testseq2Args(".", []string{pluginFixtures[0].Path, pluginFixtures[1].Path}, protest.AllNonOptimized, t, "plugintest2", "", []seqTest{
{contContinue, 41},
{contStep, "plugin1.go:9"},
{contStep, "plugin1.go:10"},
{contStep, "plugin1.go:11"},
{contNext, "plugin1.go:12"},
{contNext, "plugintest2.go:41"},
{contNext, "plugintest2.go:42"},
{contStep, "plugin2.go:22"},
{contNext, "plugin2.go:23"},
{contNext, "plugin2.go:26"},
{contNext, "plugintest2.go:42"}})
}
func TestIssue1601(t *testing.T) {
protest.MustHaveCgo(t)
//Tests that recursive types involving C qualifiers and typedefs are parsed correctly
withTestProcess("issue1601", t, func(p *proc.Target, fixture protest.Fixture) {
assertNoError(p.Continue(), t, "Continue")
evalVariable(p, t, "C.globalq")
})
}
func TestIssue1615(t *testing.T) {
// A breakpoint condition that tests for string equality with a constant string shouldn't fail with 'string too long for comparison' error
withTestProcess("issue1615", t, func(p *proc.Target, fixture protest.Fixture) {
bp := setFileBreakpoint(p, t, fixture.Source, 19)
bp.UserBreaklet().Cond = &ast.BinaryExpr{
Op: token.EQL,
X: &ast.Ident{Name: "s"},
Y: &ast.BasicLit{Kind: token.STRING, Value: `"projects/my-gcp-project-id-string/locations/us-central1/queues/my-task-queue-name"`},
}
assertNoError(p.Continue(), t, "Continue")
assertLineNumber(p, t, 19, "")
})
}
func TestCgoStacktrace2(t *testing.T) {
skipOn(t, "upstream issue", "windows")
skipOn(t, "broken", "386")
skipOn(t, "broken", "arm64")
protest.MustHaveCgo(t)
// If a panic happens during cgo execution the stacktrace should show the C
// function that caused the problem.
withTestProcess("cgosigsegvstack", t, func(p *proc.Target, fixture protest.Fixture) {
p.Continue()
frames, err := proc.ThreadStacktrace(p.CurrentThread(), 100)
assertNoError(err, t, "Stacktrace()")
logStacktrace(t, p, frames)
m := stacktraceCheck(t, []string{"C.sigsegv", "C.testfn", "main.main"}, frames)
if m == nil {
t.Fatal("see previous loglines")
}
})
}
func TestIssue1656(t *testing.T) {
skipUnlessOn(t, "amd64 only", "amd64")
withTestProcess("issue1656/", t, func(p *proc.Target, fixture protest.Fixture) {
setFileBreakpoint(p, t, filepath.ToSlash(filepath.Join(fixture.BuildDir, "main.s")), 5)
assertNoError(p.Continue(), t, "Continue()")
t.Logf("step1\n")
assertNoError(p.Step(), t, "Step()")
assertLineNumber(p, t, 8, "wrong line number after first step")
t.Logf("step2\n")
assertNoError(p.Step(), t, "Step()")
assertLineNumber(p, t, 9, "wrong line number after second step")
})
}
func TestBreakpointConfusionOnResume(t *testing.T) {
// Checks that SetCurrentBreakpoint, (*Thread).StepInstruction and
// native.(*Thread).singleStep all agree on which breakpoint the thread is
// stopped at.
// This test checks for a regression introduced when fixing Issue #1656
skipUnlessOn(t, "amd64 only", "amd64")
withTestProcess("nopbreakpoint/", t, func(p *proc.Target, fixture protest.Fixture) {
maindots := filepath.ToSlash(filepath.Join(fixture.BuildDir, "main.s"))
maindotgo := filepath.ToSlash(filepath.Join(fixture.BuildDir, "main.go"))
setFileBreakpoint(p, t, maindots, 5) // line immediately after the NOP
assertNoError(p.Continue(), t, "First Continue")
assertLineNumber(p, t, 5, "not on main.s:5")
setFileBreakpoint(p, t, maindots, 4) // sets a breakpoint on the NOP line, which will be one byte before the breakpoint we currently are stopped at.
setFileBreakpoint(p, t, maindotgo, 18) // set one extra breakpoint so that we can recover execution and check the global variable g
assertNoError(p.Continue(), t, "Second Continue")
gvar := evalVariable(p, t, "g")
if n, _ := constant.Int64Val(gvar.Value); n != 1 {
t.Fatalf("wrong value of global variable 'g': %v (expected 1)", gvar.Value)
}
})
}
func TestIssue1736(t *testing.T) {
withTestProcess("testvariables2", t, func(p *proc.Target, fixture protest.Fixture) {
assertNoError(p.Continue(), t, "Continue()")
ch1BufVar := evalVariable(p, t, "*(ch1.buf)")
q := fmt.Sprintf("*(*%q)(%d)", ch1BufVar.DwarfType.Common().Name, ch1BufVar.Addr)
t.Logf("%s", q)
ch1BufVar2 := evalVariable(p, t, q)
if ch1BufVar2.Unreadable != nil {
t.Fatal(ch1BufVar2.Unreadable)
}
})
}
func TestIssue1817(t *testing.T) {
// Setting a breakpoint on a line that doesn't have any PC addresses marked
// is_stmt should work.
withTestProcess("issue1817", t, func(p *proc.Target, fixture protest.Fixture) {
setFileBreakpoint(p, t, fixture.Source, 16)
})
}
func TestListPackagesBuildInfo(t *testing.T) {
withTestProcess("pkgrenames", t, func(p *proc.Target, fixture protest.Fixture) {
pkgs := p.BinInfo().ListPackagesBuildInfo(true)
t.Logf("returned %d", len(pkgs))
if len(pkgs) < 10 {
t.Errorf("very few packages returned")
}
for _, pkg := range pkgs {
t.Logf("%q %q", pkg.ImportPath, pkg.DirectoryPath)
const _fixtures = "_fixtures"
fidx := strings.Index(pkg.ImportPath, _fixtures)
if fidx < 0 {
continue
}
if !strings.HasSuffix(strings.Replace(pkg.DirectoryPath, "\\", "/", -1), pkg.ImportPath[fidx:]) {
t.Errorf("unexpected suffix: %q %q", pkg.ImportPath, pkg.DirectoryPath)
}
}
})
}
func TestIssue1795(t *testing.T) {
// When doing midstack inlining the Go compiler sometimes (always?) emits
// the toplevel inlined call with ranges that do not cover the inlining of
// other nested inlined calls.
// For example if a function A calls B which calls C and both the calls to
// B and C are inlined the DW_AT_inlined_subroutine entry for A might have
// ranges that do not cover the ranges of the inlined call to C.
// This is probably a violation of the DWARF standard (it's unclear) but we
// might as well support it as best as possible anyway.
if !goversion.VersionAfterOrEqual(runtime.Version(), 1, 13) {
t.Skip("Test not relevant to Go < 1.13")
}
withTestProcessArgs("issue1795", t, ".", []string{}, protest.EnableInlining|protest.EnableOptimization, func(p *proc.Target, fixture protest.Fixture) {
assertNoError(p.Continue(), t, "Continue()")
assertLineNumber(p, t, 12, "wrong line number after Continue,")
assertNoError(p.Next(), t, "Next()")
assertLineNumber(p, t, 13, "wrong line number after Next,")
})
withTestProcessArgs("issue1795", t, ".", []string{}, protest.EnableInlining|protest.EnableOptimization, func(p *proc.Target, fixture protest.Fixture) {
setFunctionBreakpoint(p, t, "regexp.(*Regexp).doExecute")
assertNoError(p.Continue(), t, "Continue()")
assertLineNumber(p, t, 12, "wrong line number after Continue (1),")
assertNoError(p.Continue(), t, "Continue()")
frames, err := proc.ThreadStacktrace(p.CurrentThread(), 40)
assertNoError(err, t, "ThreadStacktrace()")
logStacktrace(t, p, frames)
if err := checkFrame(frames[0], "regexp.(*Regexp).doExecute", "", 0, false); err != nil {
t.Errorf("Wrong frame 0: %v", err)
}
if err := checkFrame(frames[1], "regexp.(*Regexp).doMatch", "", 0, true); err != nil {
t.Errorf("Wrong frame 1: %v", err)
}
if err := checkFrame(frames[2], "regexp.(*Regexp).MatchString", "", 0, true); err != nil {
t.Errorf("Wrong frame 2: %v", err)
}
if err := checkFrame(frames[3], "main.main", fixture.Source, 12, false); err != nil {
t.Errorf("Wrong frame 3: %v", err)
}
})
}
func BenchmarkConditionalBreakpoints(b *testing.B) {
b.N = 1
withTestProcess("issue1549", b, func(p *proc.Target, fixture protest.Fixture) {
bp := setFileBreakpoint(p, b, fixture.Source, 12)
bp.UserBreaklet().Cond = &ast.BinaryExpr{
Op: token.EQL,
X: &ast.Ident{Name: "value"},
Y: &ast.BasicLit{Kind: token.INT, Value: "-1"},
}
err := p.Continue()
if _, exited := err.(proc.ErrProcessExited); !exited {
b.Fatalf("Unexpected error on Continue(): %v", err)
}
})
}
func TestBackwardNextGeneral(t *testing.T) {
if testBackend != "rr" {
t.Skip("Reverse stepping test needs rr")
}
testseq2(t, "testnextprog", "main.helloworld", []seqTest{
{contContinue, 13},
{contNext, 14},
{contReverseNext, 13},
{contReverseNext, 34},
{contReverseNext, 28},
{contReverseNext, 27},
{contReverseNext, 26},
{contReverseNext, 24},
{contReverseNext, 23},
{contReverseNext, 31},
{contReverseNext, 26},
{contReverseNext, 24},
{contReverseNext, 23},
{contReverseNext, 31},
{contReverseNext, 26},
{contReverseNext, 24},
{contReverseNext, 23},
{contReverseNext, 20},
{contReverseNext, 19},
{contReverseNext, 17},
{contReverseNext, 39},
{contReverseNext, 38},
{contReverseNext, 37},
})
}
func TestBackwardStepOutGeneral(t *testing.T) {
if testBackend != "rr" {
t.Skip("Reverse stepping test needs rr")
}
testseq2(t, "testnextprog", "main.helloworld", []seqTest{
{contContinue, 13},
{contNext, 14},
{contReverseStepout, 34},
{contReverseStepout, 39},
})
}
func TestBackwardStepGeneral(t *testing.T) {
if testBackend != "rr" {
t.Skip("Reverse stepping test needs rr")
}
testseq2(t, "testnextprog", "main.helloworld", []seqTest{
{contContinue, 13},
{contNext, 14},
{contReverseStep, 13},
{contReverseStep, 34},
{contReverseStep, 28},
{contReverseNext, 27}, // skip fmt.Printf
{contReverseStep, 26},
{contReverseStep, 24},
{contReverseStep, 23},
{contReverseStep, 11},
{contReverseNext, 10}, // skip time.Sleep
{contReverseStep, 9},
{contReverseStep, 31},
{contReverseStep, 26},
{contReverseStep, 24},
{contReverseStep, 23},
{contReverseStep, 11},
{contReverseNext, 10}, // skip time.Sleep
{contReverseStep, 9},
{contReverseStep, 31},
{contReverseStep, 26},
{contReverseStep, 24},
{contReverseStep, 23},
{contReverseStep, 20},
{contReverseStep, 19},
{contReverseStep, 17},
{contReverseStep, 39},
{contReverseStep, 38},
{contReverseStep, 37},
})
}
func TestBackwardNextDeferPanic(t *testing.T) {
if testBackend != "rr" {
t.Skip("Reverse stepping test needs rr")
}
if goversion.VersionAfterOrEqual(runtime.Version(), 1, 18) {
testseq2(t, "defercall", "", []seqTest{
{contContinue, 12},
{contReverseNext, 11},
{contReverseNext, 10},
{contReverseNext, 9},
{contReverseNext, 27},
{contContinueToBreakpoint, 12}, // skip first call to sampleFunction
{contContinueToBreakpoint, 6}, // go to call to sampleFunction through deferreturn
{contReverseNext, -1}, // runtime.deferreturn, maybe we should try to skip this
{contReverseStepout, 13},
{contReverseNext, 12},
{contReverseNext, 11},
{contReverseNext, 10},
{contReverseNext, 9},
{contReverseNext, 27},
{contContinueToBreakpoint, 18}, // go to panic call
{contNext, 6}, // panic so the deferred call happens
{contReverseNext, 18},
{contReverseNext, 17},
{contReverseNext, 16},
{contReverseNext, 15},
{contReverseNext, 23},
{contReverseNext, 22},
{contReverseNext, 21},
{contReverseNext, 28},
})
} else {
testseq2(t, "defercall", "", []seqTest{
{contContinue, 12},
{contReverseNext, 11},
{contReverseNext, 10},
{contReverseNext, 9},
{contReverseNext, 27},
{contContinueToBreakpoint, 12}, // skip first call to sampleFunction
{contContinueToBreakpoint, 6}, // go to call to sampleFunction through deferreturn
{contReverseNext, 13},
{contReverseNext, 12},
{contReverseNext, 11},
{contReverseNext, 10},
{contReverseNext, 9},
{contReverseNext, 27},
{contContinueToBreakpoint, 18}, // go to panic call
{contNext, 6}, // panic so the deferred call happens
{contReverseNext, 18},
{contReverseNext, 17},
{contReverseNext, 16},
{contReverseNext, 15},
{contReverseNext, 23},
{contReverseNext, 22},
{contReverseNext, 21},
{contReverseNext, 28},
})
}
}
func TestIssue1925(t *testing.T) {
// Calling a function should not leave cached goroutine information in an
// inconsistent state.
// In particular the stepInstructionOut function called at the end of a
// 'call' procedure should clean the G cache like every other function
// altering the state of the target process.
protest.MustSupportFunctionCalls(t, testBackend)
withTestProcess("testvariables2", t, func(p *proc.Target, fixture protest.Fixture) {
assertNoError(p.Continue(), t, "Continue()")
assertNoError(proc.EvalExpressionWithCalls(p, p.SelectedGoroutine(), "afunc(2)", normalLoadConfig, true), t, "Call")
t.Logf("%v\n", p.SelectedGoroutine().CurrentLoc)
if loc := p.SelectedGoroutine().CurrentLoc; loc.File != fixture.Source {
t.Errorf("wrong location for selected goroutine after call: %s:%d", loc.File, loc.Line)
}
})
}
func TestStepIntoWrapperForEmbeddedPointer(t *testing.T) {
skipOn(t, "N/A", "linux", "386", "pie") // skipping wrappers doesn't work on linux/386/PIE due to the use of get_pc_thunk
// Under some circumstances (when using an interface to call a method on an
// embedded field, see _fixtures/ifaceembcall.go) the compiler will
// autogenerate a wrapper function that uses a tail call (i.e. it ends in
// an unconditional jump instruction to a different function).
// Delve should be able to step into this tail call.
testseq2(t, "ifaceembcall", "", []seqTest{
{contContinue, 28}, // main.main, the line calling iface.PtrReceiver()
{contStep, 18}, // main.(*A).PtrReceiver
{contStep, 19},
{contStepout, 28},
{contContinueToBreakpoint, 29}, // main.main, the line calling iface.NonPtrReceiver()
{contStep, 22}, // main.(A).NonPtrReceiver
{contStep, 23},
{contStepout, 29}})
// same test but with next instead of stepout
if goversion.VersionAfterOrEqual(runtime.Version(), 1, 14) && runtime.GOARCH != "386" && !goversion.VersionAfterOrEqualRev(runtime.Version(), 1, 15, 4) {
// Line numbers generated for versions 1.14 through 1.15.3 on any system except linux/386
testseq2(t, "ifaceembcall", "", []seqTest{
{contContinue, 28}, // main.main, the line calling iface.PtrReceiver()
{contStep, 18}, // main.(*A).PtrReceiver
{contNext, 19},
{contNext, 19},
{contNext, 28},
{contContinueToBreakpoint, 29}, // main.main, the line calling iface.NonPtrReceiver()
{contStep, 22},
{contNext, 23},
{contNext, 23},
{contNext, 29}})
} else {
testseq2(t, "ifaceembcall", "", []seqTest{
{contContinue, 28}, // main.main, the line calling iface.PtrReceiver()
{contStep, 18}, // main.(*A).PtrReceiver
{contNext, 19},
{contNext, 28},
{contContinueToBreakpoint, 29}, // main.main, the line calling iface.NonPtrReceiver()
{contStep, 22},
{contNext, 23},
{contNext, 29}})
}
}
func TestRefreshCurThreadSelGAfterContinueOnceError(t *testing.T) {
// Issue #2078:
// Tests that on macOS/lldb the current thread/selected goroutine are
// refreshed after ContinueOnce returns an error due to a segmentation
// fault.
skipUnlessOn(t, "N/A", "darwin", "lldb")
withTestProcess("issue2078", t, func(p *proc.Target, fixture protest.Fixture) {
setFileBreakpoint(p, t, fixture.Source, 4)
assertNoError(p.Continue(), t, "Continue() (first)")
if p.Continue() == nil {
t.Fatalf("Second continue did not return an error")
}
g := p.SelectedGoroutine()
if g.CurrentLoc.Line != 9 {
t.Fatalf("wrong current location %s:%d (expected :9)", g.CurrentLoc.File, g.CurrentLoc.Line)
}
})
}
func TestStepoutOneliner(t *testing.T) {
// The heuristic detecting autogenerated wrappers when stepping out should
// not skip oneliner functions.
withTestProcess("issue2086", t, func(p *proc.Target, fixture protest.Fixture) {
assertNoError(p.Continue(), t, "Continue()")
assertLineNumber(p, t, 15, "after first continue")
assertNoError(p.StepOut(), t, "StepOut()")
if fn := p.BinInfo().PCToFunc(currentPC(p, t)); fn == nil || fn.Name != "main.T.m" {
t.Fatalf("wrong function after stepout %#v", fn)
}
assertNoError(p.StepOut(), t, "second StepOut()")
if fn := p.BinInfo().PCToFunc(currentPC(p, t)); fn == nil || fn.Name != "main.main" {
t.Fatalf("wrong fnuction after second stepout %#v", fn)
}
})
}
func TestRequestManualStopWhileStopped(t *testing.T) {
// Requesting a manual stop while stopped shouldn't cause problems (issue #2138).
withTestProcess("issue2138", t, func(p *proc.Target, fixture protest.Fixture) {
resumed := make(chan struct{})
setFileBreakpoint(p, t, fixture.Source, 8)
assertNoError(p.Continue(), t, "Continue() 1")
p.ResumeNotify(resumed)
go func() {
<-resumed
time.Sleep(1 * time.Second)
p.RequestManualStop()
}()
t.Logf("at time.Sleep call")
assertNoError(p.Continue(), t, "Continue() 2")
t.Logf("manually stopped")
p.RequestManualStop()
p.RequestManualStop()
p.RequestManualStop()
resumed = make(chan struct{})
p.ResumeNotify(resumed)
go func() {
<-resumed
time.Sleep(1 * time.Second)
p.RequestManualStop()
}()
t.Logf("resuming sleep")
assertNoError(p.Continue(), t, "Continue() 3")
t.Logf("done")
})
}
func TestStepOutPreservesGoroutine(t *testing.T) {
// Checks that StepOut preserves the currently selected goroutine.
skipOn(t, "broken", "freebsd")
rand.Seed(time.Now().Unix())
withTestProcess("issue2113", t, func(p *proc.Target, fixture protest.Fixture) {
assertNoError(p.Continue(), t, "Continue()")
logState := func() {
g := p.SelectedGoroutine()
var goid int = -42
if g != nil {
goid = g.ID
}
pc := currentPC(p, t)
f, l, fn := p.BinInfo().PCToLine(pc)
var fnname string = "???"
if fn != nil {
fnname = fn.Name
}
t.Logf("goroutine %d at %s:%d in %s", goid, f, l, fnname)
}
logState()
gs, _, err := proc.GoroutinesInfo(p, 0, 0)
assertNoError(err, t, "GoroutinesInfo")
candg := []*proc.G{}
bestg := []*proc.G{}
for _, g := range gs {
frames, err := g.Stacktrace(20, 0)
assertNoError(err, t, "Stacktrace")
for _, frame := range frames {
if frame.Call.Fn != nil && frame.Call.Fn.Name == "main.coroutine" {
candg = append(candg, g)
if g.Thread != nil && frames[0].Call.Fn != nil && strings.HasPrefix(frames[0].Call.Fn.Name, "runtime.") {
bestg = append(bestg, g)
}
break
}
}
}
var pickg *proc.G
if len(bestg) > 0 {
pickg = bestg[rand.Intn(len(bestg))]
t.Logf("selected goroutine %d (best)\n", pickg.ID)
} else {
pickg = candg[rand.Intn(len(candg))]
t.Logf("selected goroutine %d\n", pickg.ID)
}
goid := pickg.ID
assertNoError(p.SwitchGoroutine(pickg), t, "SwitchGoroutine")
logState()
err = p.StepOut()
if err != nil {
_, isexited := err.(proc.ErrProcessExited)
if !isexited {
assertNoError(err, t, "StepOut()")
} else {
return
}
}
logState()
g2 := p.SelectedGoroutine()
if g2 == nil {
t.Fatalf("no selected goroutine after stepout")
} else if g2.ID != goid {
t.Fatalf("unexpected selected goroutine %d", g2.ID)
}
})
}
func TestIssue2319(t *testing.T) {
// Check to make sure we don't crash on startup when the target is
// a binary with a mix of DWARF-5 C++ compilation units and
// DWARF-4 Go compilation units.
// Require CGO, since we need to use the external linker for this test.
protest.MustHaveCgo(t)
// The test fixture uses linux/amd64 assembly and a *.syso file
// that is linux/amd64, so skip for other architectures.
if runtime.GOOS != "linux" || runtime.GOARCH != "amd64" {
t.Skipf("skipping since not linux/amd64")
}
// Skip unless on 1.14 or later. The test fixture uses a *.syso
// file, which in 1.13 is not loaded unless we're in internal
// linking mode (we need external linking here).
if !goversion.VersionAfterOrEqual(runtime.Version(), 1, 14) {
t.Skip("test contains fixture that is specific to go 1.14+")
}
fixture := protest.BuildFixture("issue2319/", protest.BuildModeExternalLinker)
// Load up the binary and make sure there are no crashes.
bi := proc.NewBinaryInfo("linux", "amd64")
assertNoError(bi.LoadBinaryInfo(fixture.Path, 0, nil), t, "LoadBinaryInfo")
}
func TestDump(t *testing.T) {
if runtime.GOOS == "freebsd" || (runtime.GOOS == "darwin" && testBackend == "native") {
t.Skip("not supported")
}
convertRegisters := func(arch *proc.Arch, dregs op.DwarfRegisters) string {
dregs.Reg(^uint64(0))
buf := new(bytes.Buffer)
for i := 0; i < dregs.CurrentSize(); i++ {
reg := dregs.Reg(uint64(i))
if reg == nil {
continue
}
name, _, repr := arch.DwarfRegisterToString(i, reg)
fmt.Fprintf(buf, " %s=%s", name, repr)
}
return buf.String()
}
convertThread := func(thread proc.Thread) string {
regs, err := thread.Registers()
assertNoError(err, t, fmt.Sprintf("Thread registers %d", thread.ThreadID()))
arch := thread.BinInfo().Arch
dregs := arch.RegistersToDwarfRegisters(0, regs)
return fmt.Sprintf("%08d %s", thread.ThreadID(), convertRegisters(arch, *dregs))
}
convertThreads := func(threads []proc.Thread) []string {
r := make([]string, len(threads))
for i := range threads {
r[i] = convertThread(threads[i])
}
sort.Strings(r)
return r
}
convertGoroutine := func(g *proc.G) string {
threadID := 0
if g.Thread != nil {
threadID = g.Thread.ThreadID()
}
return fmt.Sprintf("%d pc=%#x sp=%#x bp=%#x lr=%#x gopc=%#x startpc=%#x systemstack=%v thread=%d", g.ID, g.PC, g.SP, g.BP, g.LR, g.GoPC, g.StartPC, g.SystemStack, threadID)
}
convertFrame := func(arch *proc.Arch, frame *proc.Stackframe) string {
return fmt.Sprintf("currentPC=%#x callPC=%#x frameOff=%#x\n", frame.Current.PC, frame.Call.PC, frame.FrameOffset())
}
makeDump := func(p *proc.Target, corePath, exePath string, flags proc.DumpFlags) *proc.Target {
fh, err := os.Create(corePath)
assertNoError(err, t, "Create()")
var state proc.DumpState
p.Dump(fh, flags, &state)
assertNoError(state.Err, t, "Dump()")
if state.ThreadsDone != state.ThreadsTotal || state.MemDone != state.MemTotal || !state.AllDone || state.Dumping || state.Canceled {
t.Fatalf("bad DumpState %#v", &state)
}
c, err := core.OpenCore(corePath, exePath, nil)
assertNoError(err, t, "OpenCore()")
return c
}
testDump := func(p, c *proc.Target) {
if p.Pid() != c.Pid() {
t.Errorf("Pid mismatch %x %x", p.Pid(), c.Pid())
}
threads := convertThreads(p.ThreadList())
cthreads := convertThreads(c.ThreadList())
if len(threads) != len(cthreads) {
t.Errorf("Thread number mismatch %d %d", len(threads), len(cthreads))
}
for i := range threads {
if threads[i] != cthreads[i] {
t.Errorf("Thread mismatch\nlive:\t%s\ncore:\t%s", threads[i], cthreads[i])
}
}
gos, _, err := proc.GoroutinesInfo(p, 0, 0)
assertNoError(err, t, "GoroutinesInfo() - live process")
cgos, _, err := proc.GoroutinesInfo(c, 0, 0)
assertNoError(err, t, "GoroutinesInfo() - core dump")
if len(gos) != len(cgos) {
t.Errorf("Goroutine number mismatch %d %d", len(gos), len(cgos))
}
var scope, cscope *proc.EvalScope
for i := range gos {
if convertGoroutine(gos[i]) != convertGoroutine(cgos[i]) {
t.Errorf("Goroutine mismatch\nlive:\t%s\ncore:\t%s", convertGoroutine(gos[i]), convertGoroutine(cgos[i]))
}
frames, err := gos[i].Stacktrace(20, 0)
assertNoError(err, t, fmt.Sprintf("Stacktrace for goroutine %d - live process", gos[i].ID))
cframes, err := cgos[i].Stacktrace(20, 0)
assertNoError(err, t, fmt.Sprintf("Stacktrace for goroutine %d - core dump", gos[i].ID))
if len(frames) != len(cframes) {
t.Errorf("Frame number mismatch for goroutine %d: %d %d", gos[i].ID, len(frames), len(cframes))
}
for j := range frames {
if convertFrame(p.BinInfo().Arch, &frames[j]) != convertFrame(p.BinInfo().Arch, &cframes[j]) {
t.Errorf("Frame mismatch %d.%d\nlive:\t%s\ncore:\t%s", gos[i].ID, j, convertFrame(p.BinInfo().Arch, &frames[j]), convertFrame(p.BinInfo().Arch, &cframes[j]))
}
if frames[j].Call.Fn != nil && frames[j].Call.Fn.Name == "main.main" {
scope = proc.FrameToScope(p, p.Memory(), gos[i], frames[j:]...)
cscope = proc.FrameToScope(c, c.Memory(), cgos[i], cframes[j:]...)
}
}
}
vars, err := scope.LocalVariables(normalLoadConfig)
assertNoError(err, t, "LocalVariables - live process")
cvars, err := cscope.LocalVariables(normalLoadConfig)
assertNoError(err, t, "LocalVariables - core dump")
if len(vars) != len(cvars) {
t.Errorf("Variable number mismatch %d %d", len(vars), len(cvars))
}
for i := range vars {
varstr := vars[i].Name + "=" + api.ConvertVar(vars[i]).SinglelineString()
cvarstr := cvars[i].Name + "=" + api.ConvertVar(cvars[i]).SinglelineString()
if strings.Contains(varstr, "(unreadable") {
// errors reading from unmapped memory differ between live process and core
continue
}
if varstr != cvarstr {
t.Errorf("Variable mismatch %s %s", varstr, cvarstr)
}
}
}
withTestProcess("testvariables2", t, func(p *proc.Target, fixture protest.Fixture) {
assertNoError(p.Continue(), t, "Continue()")
corePath := filepath.Join(fixture.BuildDir, "coredump")
corePathPlatIndep := filepath.Join(fixture.BuildDir, "coredump-indep")
t.Logf("testing normal dump")
c := makeDump(p, corePath, fixture.Path, 0)
defer os.Remove(corePath)
testDump(p, c)
if runtime.GOOS == "linux" && runtime.GOARCH == "amd64" {
// No reason to do this test on other goos/goarch because they use the
// platform-independent format anyway.
t.Logf("testing platform-independent dump")
c2 := makeDump(p, corePathPlatIndep, fixture.Path, proc.DumpPlatformIndependent)
defer os.Remove(corePathPlatIndep)
testDump(p, c2)
}
})
}
func TestCompositeMemoryWrite(t *testing.T) {
if runtime.GOARCH != "amd64" {
t.Skip("only valid on amd64")
}
skipOn(t, "not implemented", "freebsd")
withTestProcess("fputest/", t, func(p *proc.Target, fixture protest.Fixture) {
getregs := func() (pc, rax, xmm1 uint64) {
regs, err := p.CurrentThread().Registers()
assertNoError(err, t, "Registers")
fmtregs, err := regs.Slice(true)
assertNoError(err, t, "register slice")
var xmm1buf []byte
for _, reg := range fmtregs {
switch strings.ToLower(reg.Name) {
case "rax":
rax = reg.Reg.Uint64Val
case "xmm1":
xmm1buf = reg.Reg.Bytes
}
}
xmm1 = binary.LittleEndian.Uint64(xmm1buf[:8])
return regs.PC(), rax, xmm1
}
const fakeAddress = 0xbeef0000
getmem := func(mem proc.MemoryReader) uint64 {
buf := make([]byte, 8)
_, err := mem.ReadMemory(buf, fakeAddress)
assertNoError(err, t, "ReadMemory")
return binary.LittleEndian.Uint64(buf)
}
assertNoError(p.Continue(), t, "Continue()")
oldPc, oldRax, oldXmm1 := getregs()
t.Logf("PC %#x AX %#x XMM1 %#x", oldPc, oldRax, oldXmm1)
memRax, err := proc.NewCompositeMemory(p, []op.Piece{{Size: 0, Val: 0, Kind: op.RegPiece}}, fakeAddress)
assertNoError(err, t, "NewCompositeMemory (rax)")
memXmm1, err := proc.NewCompositeMemory(p, []op.Piece{{Size: 0, Val: 18, Kind: op.RegPiece}}, fakeAddress)
assertNoError(err, t, "NewCompositeMemory (xmm1)")
if memRax := getmem(memRax); memRax != oldRax {
t.Errorf("reading rax memory, expected %#x got %#x", oldRax, memRax)
}
if memXmm1 := getmem(memXmm1); memXmm1 != oldXmm1 {
t.Errorf("reading xmm1 memory, expected %#x got %#x", oldXmm1, memXmm1)
}
_, err = memRax.WriteMemory(0xbeef0000, []byte{0xef, 0xbe, 0x0d, 0xf0, 0xef, 0xbe, 0x0d, 0xf0})
assertNoError(err, t, "WriteMemory (rax)")
_, err = memXmm1.WriteMemory(0xbeef0000, []byte{0xef, 0xbe, 0x0d, 0xf0, 0xef, 0xbe, 0x0d, 0xf0})
assertNoError(err, t, "WriteMemory (xmm1)")
newPc, newRax, newXmm1 := getregs()
t.Logf("PC %#x AX %#x XMM1 %#x", newPc, newRax, newXmm1)
const tgt = 0xf00dbeeff00dbeef
if newRax != tgt {
t.Errorf("reading rax register, expected %#x, got %#x", uint64(tgt), newRax)
}
if newXmm1 != tgt {
t.Errorf("reading xmm1 register, expected %#x, got %#x", uint64(tgt), newXmm1)
}
})
}
func TestVariablesWithExternalLinking(t *testing.T) {
protest.MustHaveCgo(t)
// Tests that macOSDebugFrameBugWorkaround works.
// See:
// https://github.com/golang/go/issues/25841
// https://github.com/go-delve/delve/issues/2346
withTestProcessArgs("testvariables2", t, ".", []string{}, protest.BuildModeExternalLinker, func(p *proc.Target, fixture protest.Fixture) {
assertNoError(p.Continue(), t, "Continue()")
str1Var := evalVariable(p, t, "str1")
if str1Var.Unreadable != nil {
t.Fatalf("variable str1 is unreadable: %v", str1Var.Unreadable)
}
t.Logf("%#v", str1Var)
if constant.StringVal(str1Var.Value) != "01234567890" {
t.Fatalf("wrong value for str1: %v", str1Var.Value)
}
})
}
func TestWatchpointsBasic(t *testing.T) {
skipOn(t, "not implemented", "freebsd")
skipOn(t, "not implemented", "386")
skipOn(t, "see https://github.com/go-delve/delve/issues/2768", "windows")
protest.AllowRecording(t)
position1 := 19
position5 := 41
if runtime.GOARCH == "arm64" {
position1 = 18
position5 = 40
}
withTestProcess("databpeasy", t, func(p *proc.Target, fixture protest.Fixture) {
setFunctionBreakpoint(p, t, "main.main")
setFileBreakpoint(p, t, fixture.Source, 21) // Position 2 breakpoint
setFileBreakpoint(p, t, fixture.Source, 27) // Position 4 breakpoint
assertNoError(p.Continue(), t, "Continue 0")
assertLineNumber(p, t, 13, "Continue 0") // Position 0
scope, err := proc.GoroutineScope(p, p.CurrentThread())
assertNoError(err, t, "GoroutineScope")
bp, err := p.SetWatchpoint(scope, "globalvar1", proc.WatchWrite, nil)
assertNoError(err, t, "SetDataBreakpoint(write-only)")
assertNoError(p.Continue(), t, "Continue 1")
assertLineNumber(p, t, position1, "Continue 1") // Position 1
if curbp := p.CurrentThread().Breakpoint().Breakpoint; curbp == nil || (curbp.LogicalID() != bp.LogicalID()) {
t.Fatal("breakpoint not set")
}
assertNoError(p.ClearBreakpoint(bp.Addr), t, "ClearBreakpoint")
assertNoError(p.Continue(), t, "Continue 2")
assertLineNumber(p, t, 21, "Continue 2") // Position 2
_, err = p.SetWatchpoint(scope, "globalvar1", proc.WatchWrite|proc.WatchRead, nil)
assertNoError(err, t, "SetDataBreakpoint(read-write)")
assertNoError(p.Continue(), t, "Continue 3")
assertLineNumber(p, t, 22, "Continue 3") // Position 3
p.ClearBreakpoint(bp.Addr)
assertNoError(p.Continue(), t, "Continue 4")
assertLineNumber(p, t, 27, "Continue 4") // Position 4
t.Logf("setting final breakpoint")
_, err = p.SetWatchpoint(scope, "globalvar1", proc.WatchWrite, nil)
assertNoError(err, t, "SetDataBreakpoint(write-only, again)")
assertNoError(p.Continue(), t, "Continue 5")
assertLineNumber(p, t, position5, "Continue 5") // Position 5
})
}
func TestWatchpointCounts(t *testing.T) {
skipOn(t, "not implemented", "freebsd")
skipOn(t, "not implemented", "386")
skipOn(t, "see https://github.com/go-delve/delve/issues/2768", "windows")
protest.AllowRecording(t)
withTestProcess("databpcountstest", t, func(p *proc.Target, fixture protest.Fixture) {
setFunctionBreakpoint(p, t, "main.main")
assertNoError(p.Continue(), t, "Continue 0")
scope, err := proc.GoroutineScope(p, p.CurrentThread())
assertNoError(err, t, "GoroutineScope")
bp, err := p.SetWatchpoint(scope, "globalvar1", proc.WatchWrite, nil)
assertNoError(err, t, "SetWatchpoint(write-only)")
for {
if err := p.Continue(); err != nil {
if _, exited := err.(proc.ErrProcessExited); exited {
break
}
assertNoError(err, t, "Continue()")
}
}
t.Logf("TotalHitCount: %d", bp.UserBreaklet().TotalHitCount)
if bp.UserBreaklet().TotalHitCount != 200 {
t.Fatalf("Wrong TotalHitCount for the breakpoint (%d)", bp.UserBreaklet().TotalHitCount)
}
if len(bp.UserBreaklet().HitCount) != 2 {
t.Fatalf("Wrong number of goroutines for breakpoint (%d)", len(bp.UserBreaklet().HitCount))
}
for _, v := range bp.UserBreaklet().HitCount {
if v != 100 {
t.Fatalf("Wrong HitCount for breakpoint (%v)", bp.UserBreaklet().HitCount)
}
}
})
}
func TestManualStopWhileStopped(t *testing.T) {
// Checks that RequestManualStop sent to a stopped thread does not cause the target process to die.
withTestProcess("loopprog", t, func(p *proc.Target, fixture protest.Fixture) {
asyncCont := func(done chan struct{}) {
defer close(done)
err := p.Continue()
t.Logf("%v\n", err)
if err != nil {
panic(err)
}
for _, th := range p.ThreadList() {
if th.Breakpoint().Breakpoint != nil {
t.Logf("unexpected stop at breakpoint: %v", th.Breakpoint().Breakpoint)
panic("unexpected stop at breakpoint")
}
}
}
const (
repeatsSlow = 3
repeatsFast = 5
)
for i := 0; i < repeatsSlow; i++ {
t.Logf("Continue %d (slow)", i)
done := make(chan struct{})
go asyncCont(done)
time.Sleep(1 * time.Second)
p.RequestManualStop()
time.Sleep(1 * time.Second)
p.RequestManualStop()
time.Sleep(1 * time.Second)
<-done
}
for i := 0; i < repeatsFast; i++ {
t.Logf("Continue %d (fast)", i)
rch := make(chan struct{})
done := make(chan struct{})
p.ResumeNotify(rch)
go asyncCont(done)
<-rch
p.RequestManualStop()
p.RequestManualStop()
<-done
}
})
}
func TestDwrapStartLocation(t *testing.T) {
// Tests that the start location of a goroutine is unwrapped in Go 1.17 and later.
withTestProcess("goroutinestackprog", t, func(p *proc.Target, fixture protest.Fixture) {
setFunctionBreakpoint(p, t, "main.stacktraceme")
assertNoError(p.Continue(), t, "Continue()")
gs, _, err := proc.GoroutinesInfo(p, 0, 0)
assertNoError(err, t, "GoroutinesInfo")
found := false
for _, g := range gs {
startLoc := g.StartLoc(p)
if startLoc.Fn == nil {
continue
}
t.Logf("%#v\n", startLoc.Fn.Name)
if startLoc.Fn.Name == "main.agoroutine" {
found = true
break
}
}
if !found {
t.Errorf("could not find any goroutine with a start location of main.agoroutine")
}
})
}
func TestWatchpointStack(t *testing.T) {
skipOn(t, "not implemented", "freebsd")
skipOn(t, "not implemented", "386")
skipOn(t, "see https://github.com/go-delve/delve/issues/2768", "windows")
protest.AllowRecording(t)
position1 := 17
if runtime.GOARCH == "arm64" {
position1 = 16
}
withTestProcess("databpstack", t, func(p *proc.Target, fixture protest.Fixture) {
setFileBreakpoint(p, t, fixture.Source, 11) // Position 0 breakpoint
clearlen := len(p.Breakpoints().M)
assertNoError(p.Continue(), t, "Continue 0")
assertLineNumber(p, t, 11, "Continue 0") // Position 0
scope, err := proc.GoroutineScope(p, p.CurrentThread())
assertNoError(err, t, "GoroutineScope")
_, err = p.SetWatchpoint(scope, "w", proc.WatchWrite, nil)
assertNoError(err, t, "SetDataBreakpoint(write-only)")
watchbpnum := 3
if recorded, _ := p.Recorded(); recorded {
watchbpnum = 4
}
if len(p.Breakpoints().M) != clearlen+watchbpnum {
// want 1 watchpoint, 1 stack resize breakpoint, 1 out of scope sentinel (2 if recorded)
t.Errorf("wrong number of breakpoints after setting watchpoint: %d", len(p.Breakpoints().M)-clearlen)
}
var retaddr uint64
for _, bp := range p.Breakpoints().M {
for _, breaklet := range bp.Breaklets {
if breaklet.Kind&proc.WatchOutOfScopeBreakpoint != 0 {
retaddr = bp.Addr
break
}
}
}
// Note: for recorded processes retaddr will not always be the return
// address, ~50% of the times it will be the address of the CALL
// instruction preceding the return address, this does not matter for this
// test.
_, err = p.SetBreakpoint(retaddr, proc.UserBreakpoint, nil)
assertNoError(err, t, "SetBreakpoint")
if len(p.Breakpoints().M) != clearlen+watchbpnum {
// want 1 watchpoint, 1 stack resize breakpoint, 1 out of scope sentinel (which is also a user breakpoint) (and another out of scope sentinel if recorded)
t.Errorf("wrong number of breakpoints after setting watchpoint: %d", len(p.Breakpoints().M)-clearlen)
}
assertNoError(p.Continue(), t, "Continue 1")
assertLineNumber(p, t, position1, "Continue 1") // Position 1
assertNoError(p.Continue(), t, "Continue 2")
t.Logf("%#v", p.CurrentThread().Breakpoint().Breakpoint)
assertLineNumber(p, t, 24, "Continue 2") // Position 2 (watchpoint gone out of scope)
if len(p.Breakpoints().M) != clearlen+1 {
// want 1 user breakpoint set at retaddr
t.Errorf("wrong number of breakpoints after watchpoint goes out of scope: %d", len(p.Breakpoints().M)-clearlen)
}
if len(p.Breakpoints().WatchOutOfScope) != 1 {
t.Errorf("wrong number of out-of-scope watchpoints after watchpoint goes out of scope: %d", len(p.Breakpoints().WatchOutOfScope))
}
err = p.ClearBreakpoint(retaddr)
assertNoError(err, t, "ClearBreakpoint")
if len(p.Breakpoints().M) != clearlen {
// want 1 user breakpoint set at retaddr
t.Errorf("wrong number of breakpoints after removing user breakpoint: %d", len(p.Breakpoints().M)-clearlen)
}
})
}
func TestWatchpointStackBackwardsOutOfScope(t *testing.T) {
skipUnlessOn(t, "only for recorded targets", "rr")
protest.AllowRecording(t)
withTestProcess("databpstack", t, func(p *proc.Target, fixture protest.Fixture) {
setFileBreakpoint(p, t, fixture.Source, 11) // Position 0 breakpoint
clearlen := len(p.Breakpoints().M)
assertNoError(p.Continue(), t, "Continue 0")
assertLineNumber(p, t, 11, "Continue 0") // Position 0
scope, err := proc.GoroutineScope(p, p.CurrentThread())
assertNoError(err, t, "GoroutineScope")
_, err = p.SetWatchpoint(scope, "w", proc.WatchWrite, nil)
assertNoError(err, t, "SetDataBreakpoint(write-only)")
assertNoError(p.Continue(), t, "Continue 1")
assertLineNumber(p, t, 17, "Continue 1") // Position 1
p.ChangeDirection(proc.Backward)
assertNoError(p.Continue(), t, "Continue 2")
t.Logf("%#v", p.CurrentThread().Breakpoint().Breakpoint)
assertLineNumber(p, t, 16, "Continue 2") // Position 1 again (because of inverted movement)
assertNoError(p.Continue(), t, "Continue 3")
t.Logf("%#v", p.CurrentThread().Breakpoint().Breakpoint)
assertLineNumber(p, t, 11, "Continue 3") // Position 0 (breakpoint 1 hit)
assertNoError(p.Continue(), t, "Continue 4")
t.Logf("%#v", p.CurrentThread().Breakpoint().Breakpoint)
assertLineNumber(p, t, 23, "Continue 4") // Position 2 (watchpoint gone out of scope)
if len(p.Breakpoints().M) != clearlen {
t.Errorf("wrong number of breakpoints after watchpoint goes out of scope: %d", len(p.Breakpoints().M)-clearlen)
}
if len(p.Breakpoints().WatchOutOfScope) != 1 {
t.Errorf("wrong number of out-of-scope watchpoints after watchpoint goes out of scope: %d", len(p.Breakpoints().WatchOutOfScope))
}
if len(p.Breakpoints().M) != clearlen {
// want 1 user breakpoint set at retaddr
t.Errorf("wrong number of breakpoints after removing user breakpoint: %d", len(p.Breakpoints().M)-clearlen)
}
})
}
func TestSetOnFunctions(t *testing.T) {
// The set command between function variables should fail with an error
// Issue #2691
withTestProcess("goroutinestackprog", t, func(p *proc.Target, fixture protest.Fixture) {
setFunctionBreakpoint(p, t, "main.main")
assertNoError(p.Continue(), t, "Continue()")
scope, err := proc.GoroutineScope(p, p.CurrentThread())
assertNoError(err, t, "GoroutineScope")
err = scope.SetVariable("main.func1", "main.func2")
if err == nil {
t.Fatal("expected error when assigning between function variables")
}
})
}
func TestSetYMMRegister(t *testing.T) {
skipUnlessOn(t, "N/A", "darwin", "amd64")
// Checks that setting a XMM register works. This checks that the
// workaround for a bug in debugserver works.
// See issue #2767.
withTestProcess("setymmreg/", t, func(p *proc.Target, fixture protest.Fixture) {
setFunctionBreakpoint(p, t, "main.asmFunc")
assertNoError(p.Continue(), t, "Continue()")
getReg := func(pos string) *op.DwarfRegister {
regs := getRegisters(p, t)
arch := p.BinInfo().Arch
dregs := arch.RegistersToDwarfRegisters(0, regs)
r := dregs.Reg(regnum.AMD64_XMM0)
t.Logf("%s: %#v", pos, r)
return r
}
getReg("before")
p.CurrentThread().SetReg(regnum.AMD64_XMM0, op.DwarfRegisterFromBytes([]byte{
0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44,
0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44,
0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44,
0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44}))
assertNoError(p.CurrentThread().StepInstruction(), t, "SetpInstruction")
xmm0 := getReg("after")
for i := range xmm0.Bytes {
if xmm0.Bytes[i] != 0x44 {
t.Fatalf("wrong register value")
}
}
})
}
func TestNilPtrDerefInBreakInstr(t *testing.T) {
// Checks that having a breakpoint on the exact instruction that causes a
// nil pointer dereference does not cause problems.
var asmfile string
switch runtime.GOARCH {
case "amd64":
asmfile = "main_amd64.s"
case "arm64":
asmfile = "main_arm64.s"
case "386":
asmfile = "main_386.s"
default:
t.Fatalf("assembly file for %s not provided", runtime.GOARCH)
}
withTestProcess("asmnilptr/", t, func(p *proc.Target, fixture protest.Fixture) {
f := filepath.Join(fixture.BuildDir, asmfile)
f = strings.Replace(f, "\\", "/", -1)
setFileBreakpoint(p, t, f, 5)
t.Logf("first continue")
assertNoError(p.Continue(), t, "Continue()")
t.Logf("second continue")
err := p.Continue()
if runtime.GOOS == "darwin" && err != nil && err.Error() == "bad access" {
// this is also ok
return
}
assertNoError(err, t, "Continue()")
bp := p.CurrentThread().Breakpoint()
if bp != nil {
t.Logf("%#v\n", bp.Breakpoint)
}
if bp == nil || (bp.Name != proc.UnrecoveredPanic) {
t.Fatalf("no breakpoint hit or wrong breakpoint hit: %#v", bp)
}
})
}
| [
"\"TRAVIS\"",
"\"GODEBUG\""
]
| []
| [
"GODEBUG",
"TRAVIS"
]
| [] | ["GODEBUG", "TRAVIS"] | go | 2 | 0 | |
testing/acceptance_test.py | import os
import sys
import textwrap
import types
import attr
import py
import pytest
from _pytest.compat import importlib_metadata
from _pytest.main import ExitCode
def prepend_pythonpath(*dirs):
cur = os.getenv("PYTHONPATH")
if cur:
dirs += (cur,)
return os.pathsep.join(str(p) for p in dirs)
class TestGeneralUsage:
def test_config_error(self, testdir):
testdir.copy_example("conftest_usageerror/conftest.py")
result = testdir.runpytest(testdir.tmpdir)
assert result.ret == ExitCode.USAGE_ERROR
result.stderr.fnmatch_lines(["*ERROR: hello"])
result.stdout.fnmatch_lines(["*pytest_unconfigure_called"])
def test_root_conftest_syntax_error(self, testdir):
testdir.makepyfile(conftest="raise SyntaxError\n")
result = testdir.runpytest()
result.stderr.fnmatch_lines(["*raise SyntaxError*"])
assert result.ret != 0
def test_early_hook_error_issue38_1(self, testdir):
testdir.makeconftest(
"""
def pytest_sessionstart():
0 / 0
"""
)
result = testdir.runpytest(testdir.tmpdir)
assert result.ret != 0
# tracestyle is native by default for hook failures
result.stdout.fnmatch_lines(
["*INTERNALERROR*File*conftest.py*line 2*", "*0 / 0*"]
)
result = testdir.runpytest(testdir.tmpdir, "--fulltrace")
assert result.ret != 0
# tracestyle is native by default for hook failures
result.stdout.fnmatch_lines(
["*INTERNALERROR*def pytest_sessionstart():*", "*INTERNALERROR*0 / 0*"]
)
def test_early_hook_configure_error_issue38(self, testdir):
testdir.makeconftest(
"""
def pytest_configure():
0 / 0
"""
)
result = testdir.runpytest(testdir.tmpdir)
assert result.ret != 0
# here we get it on stderr
result.stderr.fnmatch_lines(
["*INTERNALERROR*File*conftest.py*line 2*", "*0 / 0*"]
)
def test_file_not_found(self, testdir):
result = testdir.runpytest("asd")
assert result.ret != 0
result.stderr.fnmatch_lines(["ERROR: file not found*asd"])
def test_file_not_found_unconfigure_issue143(self, testdir):
testdir.makeconftest(
"""
def pytest_configure():
print("---configure")
def pytest_unconfigure():
print("---unconfigure")
"""
)
result = testdir.runpytest("-s", "asd")
assert result.ret == ExitCode.USAGE_ERROR
result.stderr.fnmatch_lines(["ERROR: file not found*asd"])
result.stdout.fnmatch_lines(["*---configure", "*---unconfigure"])
def test_config_preparse_plugin_option(self, testdir):
testdir.makepyfile(
pytest_xyz="""
def pytest_addoption(parser):
parser.addoption("--xyz", dest="xyz", action="store")
"""
)
testdir.makepyfile(
test_one="""
def test_option(pytestconfig):
assert pytestconfig.option.xyz == "123"
"""
)
result = testdir.runpytest("-p", "pytest_xyz", "--xyz=123", syspathinsert=True)
assert result.ret == 0
result.stdout.fnmatch_lines(["*1 passed*"])
@pytest.mark.parametrize("load_cov_early", [True, False])
def test_early_load_setuptools_name(self, testdir, monkeypatch, load_cov_early):
testdir.makepyfile(mytestplugin1_module="")
testdir.makepyfile(mytestplugin2_module="")
testdir.makepyfile(mycov_module="")
testdir.syspathinsert()
loaded = []
@attr.s
class DummyEntryPoint:
name = attr.ib()
module = attr.ib()
group = "pytest11"
def load(self):
__import__(self.module)
loaded.append(self.name)
return sys.modules[self.module]
entry_points = [
DummyEntryPoint("myplugin1", "mytestplugin1_module"),
DummyEntryPoint("myplugin2", "mytestplugin2_module"),
DummyEntryPoint("mycov", "mycov_module"),
]
@attr.s
class DummyDist:
entry_points = attr.ib()
files = ()
def my_dists():
return (DummyDist(entry_points),)
monkeypatch.setattr(importlib_metadata, "distributions", my_dists)
params = ("-p", "mycov") if load_cov_early else ()
testdir.runpytest_inprocess(*params)
if load_cov_early:
assert loaded == ["mycov", "myplugin1", "myplugin2"]
else:
assert loaded == ["myplugin1", "myplugin2", "mycov"]
def test_assertion_magic(self, testdir):
p = testdir.makepyfile(
"""
def test_this():
x = 0
assert x
"""
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(["> assert x", "E assert 0"])
assert result.ret == 1
def test_nested_import_error(self, testdir):
p = testdir.makepyfile(
"""
import import_fails
def test_this():
assert import_fails.a == 1
"""
)
testdir.makepyfile(import_fails="import does_not_work")
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(
[
"ImportError while importing test module*",
"*No module named *does_not_work*",
]
)
assert result.ret == 2
def test_not_collectable_arguments(self, testdir):
p1 = testdir.makepyfile("")
p2 = testdir.makefile(".pyc", "123")
result = testdir.runpytest(p1, p2)
assert result.ret == ExitCode.USAGE_ERROR
result.stderr.fnmatch_lines(
[
"ERROR: not found: {}".format(p2),
"(no name {!r} in any of [[][]])".format(str(p2)),
"",
]
)
@pytest.mark.filterwarnings("default")
def test_better_reporting_on_conftest_load_failure(self, testdir, request):
"""Show a user-friendly traceback on conftest import failures (#486, #3332)"""
testdir.makepyfile("")
testdir.makeconftest(
"""
def foo():
import qwerty
foo()
"""
)
result = testdir.runpytest("--help")
result.stdout.fnmatch_lines(
"""
*--version*
*warning*conftest.py*
"""
)
result = testdir.runpytest()
dirname = request.node.name + "0"
exc_name = (
"ModuleNotFoundError" if sys.version_info >= (3, 6) else "ImportError"
)
result.stderr.fnmatch_lines(
[
"ImportError while loading conftest '*{sep}{dirname}{sep}conftest.py'.".format(
dirname=dirname, sep=os.sep
),
"conftest.py:3: in <module>",
" foo()",
"conftest.py:2: in foo",
" import qwerty",
"E {}: No module named 'qwerty'".format(exc_name),
]
)
def test_early_skip(self, testdir):
testdir.mkdir("xyz")
testdir.makeconftest(
"""
import pytest
def pytest_collect_directory():
pytest.skip("early")
"""
)
result = testdir.runpytest()
assert result.ret == ExitCode.NO_TESTS_COLLECTED
result.stdout.fnmatch_lines(["*1 skip*"])
def test_issue88_initial_file_multinodes(self, testdir):
testdir.copy_example("issue88_initial_file_multinodes")
p = testdir.makepyfile("def test_hello(): pass")
result = testdir.runpytest(p, "--collect-only")
result.stdout.fnmatch_lines(["*MyFile*test_issue88*", "*Module*test_issue88*"])
def test_issue93_initialnode_importing_capturing(self, testdir):
testdir.makeconftest(
"""
import sys
print("should not be seen")
sys.stderr.write("stder42\\n")
"""
)
result = testdir.runpytest()
assert result.ret == ExitCode.NO_TESTS_COLLECTED
result.stdout.no_fnmatch_line("*should not be seen*")
assert "stderr42" not in result.stderr.str()
def test_conftest_printing_shows_if_error(self, testdir):
testdir.makeconftest(
"""
print("should be seen")
assert 0
"""
)
result = testdir.runpytest()
assert result.ret != 0
assert "should be seen" in result.stdout.str()
@pytest.mark.skipif(
not hasattr(py.path.local, "mksymlinkto"),
reason="symlink not available on this platform",
)
def test_chdir(self, testdir):
testdir.tmpdir.join("py").mksymlinkto(py._pydir)
p = testdir.tmpdir.join("main.py")
p.write(
textwrap.dedent(
"""\
import sys, os
sys.path.insert(0, '')
import py
print(py.__file__)
print(py.__path__)
os.chdir(os.path.dirname(os.getcwd()))
print(py.log)
"""
)
)
result = testdir.runpython(p)
assert not result.ret
def test_issue109_sibling_conftests_not_loaded(self, testdir):
sub1 = testdir.mkdir("sub1")
sub2 = testdir.mkdir("sub2")
sub1.join("conftest.py").write("assert 0")
result = testdir.runpytest(sub2)
assert result.ret == ExitCode.NO_TESTS_COLLECTED
sub2.ensure("__init__.py")
p = sub2.ensure("test_hello.py")
result = testdir.runpytest(p)
assert result.ret == ExitCode.NO_TESTS_COLLECTED
result = testdir.runpytest(sub1)
assert result.ret == ExitCode.USAGE_ERROR
def test_directory_skipped(self, testdir):
testdir.makeconftest(
"""
import pytest
def pytest_ignore_collect():
pytest.skip("intentional")
"""
)
testdir.makepyfile("def test_hello(): pass")
result = testdir.runpytest()
assert result.ret == ExitCode.NO_TESTS_COLLECTED
result.stdout.fnmatch_lines(["*1 skipped*"])
def test_multiple_items_per_collector_byid(self, testdir):
c = testdir.makeconftest(
"""
import pytest
class MyItem(pytest.Item):
def runtest(self):
pass
class MyCollector(pytest.File):
def collect(self):
return [MyItem(name="xyz", parent=self)]
def pytest_collect_file(path, parent):
if path.basename.startswith("conftest"):
return MyCollector(path, parent)
"""
)
result = testdir.runpytest(c.basename + "::" + "xyz")
assert result.ret == 0
result.stdout.fnmatch_lines(["*1 pass*"])
def test_skip_on_generated_funcarg_id(self, testdir):
testdir.makeconftest(
"""
import pytest
def pytest_generate_tests(metafunc):
metafunc.parametrize('x', [3], ids=['hello-123'])
def pytest_runtest_setup(item):
print(item.keywords)
if 'hello-123' in item.keywords:
pytest.skip("hello")
assert 0
"""
)
p = testdir.makepyfile("""def test_func(x): pass""")
res = testdir.runpytest(p)
assert res.ret == 0
res.stdout.fnmatch_lines(["*1 skipped*"])
def test_direct_addressing_selects(self, testdir):
p = testdir.makepyfile(
"""
def pytest_generate_tests(metafunc):
metafunc.parametrize('i', [1, 2], ids=["1", "2"])
def test_func(i):
pass
"""
)
res = testdir.runpytest(p.basename + "::" + "test_func[1]")
assert res.ret == 0
res.stdout.fnmatch_lines(["*1 passed*"])
def test_direct_addressing_notfound(self, testdir):
p = testdir.makepyfile(
"""
def test_func():
pass
"""
)
res = testdir.runpytest(p.basename + "::" + "test_notfound")
assert res.ret
res.stderr.fnmatch_lines(["*ERROR*not found*"])
def test_docstring_on_hookspec(self):
from _pytest import hookspec
for name, value in vars(hookspec).items():
if name.startswith("pytest_"):
assert value.__doc__, "no docstring for %s" % name
def test_initialization_error_issue49(self, testdir):
testdir.makeconftest(
"""
def pytest_configure():
x
"""
)
result = testdir.runpytest()
assert result.ret == 3 # internal error
result.stderr.fnmatch_lines(["INTERNAL*pytest_configure*", "INTERNAL*x*"])
assert "sessionstarttime" not in result.stderr.str()
@pytest.mark.parametrize("lookfor", ["test_fun.py::test_a"])
def test_issue134_report_error_when_collecting_member(self, testdir, lookfor):
testdir.makepyfile(
test_fun="""
def test_a():
pass
def"""
)
result = testdir.runpytest(lookfor)
result.stdout.fnmatch_lines(["*SyntaxError*"])
if "::" in lookfor:
result.stderr.fnmatch_lines(["*ERROR*"])
assert result.ret == 4 # usage error only if item not found
def test_report_all_failed_collections_initargs(self, testdir):
testdir.makeconftest(
"""
from _pytest.main import ExitCode
def pytest_sessionfinish(exitstatus):
assert exitstatus == ExitCode.USAGE_ERROR
print("pytest_sessionfinish_called")
"""
)
testdir.makepyfile(test_a="def", test_b="def")
result = testdir.runpytest("test_a.py::a", "test_b.py::b")
result.stderr.fnmatch_lines(["*ERROR*test_a.py::a*", "*ERROR*test_b.py::b*"])
result.stdout.fnmatch_lines(["pytest_sessionfinish_called"])
assert result.ret == ExitCode.USAGE_ERROR
@pytest.mark.usefixtures("recwarn")
def test_namespace_import_doesnt_confuse_import_hook(self, testdir):
"""
Ref #383. Python 3.3's namespace package messed with our import hooks
Importing a module that didn't exist, even if the ImportError was
gracefully handled, would make our test crash.
Use recwarn here to silence this warning in Python 2.7:
ImportWarning: Not importing directory '...\not_a_package': missing __init__.py
"""
testdir.mkdir("not_a_package")
p = testdir.makepyfile(
"""
try:
from not_a_package import doesnt_exist
except ImportError:
# We handle the import error gracefully here
pass
def test_whatever():
pass
"""
)
res = testdir.runpytest(p.basename)
assert res.ret == 0
def test_unknown_option(self, testdir):
result = testdir.runpytest("--qwlkej")
result.stderr.fnmatch_lines(
"""
*unrecognized*
"""
)
def test_getsourcelines_error_issue553(self, testdir, monkeypatch):
monkeypatch.setattr("inspect.getsourcelines", None)
p = testdir.makepyfile(
"""
def raise_error(obj):
raise IOError('source code not available')
import inspect
inspect.getsourcelines = raise_error
def test_foo(invalid_fixture):
pass
"""
)
res = testdir.runpytest(p)
res.stdout.fnmatch_lines(
["*source code not available*", "E*fixture 'invalid_fixture' not found"]
)
def test_plugins_given_as_strings(self, tmpdir, monkeypatch, _sys_snapshot):
"""test that str values passed to main() as `plugins` arg
are interpreted as module names to be imported and registered.
#855.
"""
with pytest.raises(ImportError) as excinfo:
pytest.main([str(tmpdir)], plugins=["invalid.module"])
assert "invalid" in str(excinfo.value)
p = tmpdir.join("test_test_plugins_given_as_strings.py")
p.write("def test_foo(): pass")
mod = types.ModuleType("myplugin")
monkeypatch.setitem(sys.modules, "myplugin", mod)
assert pytest.main(args=[str(tmpdir)], plugins=["myplugin"]) == 0
def test_parametrized_with_bytes_regex(self, testdir):
p = testdir.makepyfile(
"""
import re
import pytest
@pytest.mark.parametrize('r', [re.compile(b'foo')])
def test_stuff(r):
pass
"""
)
res = testdir.runpytest(p)
res.stdout.fnmatch_lines(["*1 passed*"])
def test_parametrized_with_null_bytes(self, testdir):
"""Test parametrization with values that contain null bytes and unicode characters (#2644, #2957)"""
p = testdir.makepyfile(
"""\
import pytest
@pytest.mark.parametrize("data", [b"\\x00", "\\x00", 'ação'])
def test_foo(data):
assert data
"""
)
res = testdir.runpytest(p)
res.assert_outcomes(passed=3)
class TestInvocationVariants:
def test_earlyinit(self, testdir):
p = testdir.makepyfile(
"""
import pytest
assert hasattr(pytest, 'mark')
"""
)
result = testdir.runpython(p)
assert result.ret == 0
def test_pydoc(self, testdir):
for name in ("py.test", "pytest"):
result = testdir.runpython_c("import {};help({})".format(name, name))
assert result.ret == 0
s = result.stdout.str()
assert "MarkGenerator" in s
def test_import_star_py_dot_test(self, testdir):
p = testdir.makepyfile(
"""
from py.test import *
#collect
#cmdline
#Item
# assert collect.Item is Item
# assert collect.Collector is Collector
main
skip
xfail
"""
)
result = testdir.runpython(p)
assert result.ret == 0
def test_import_star_pytest(self, testdir):
p = testdir.makepyfile(
"""
from pytest import *
#Item
#File
main
skip
xfail
"""
)
result = testdir.runpython(p)
assert result.ret == 0
def test_double_pytestcmdline(self, testdir):
p = testdir.makepyfile(
run="""
import pytest
pytest.main()
pytest.main()
"""
)
testdir.makepyfile(
"""
def test_hello():
pass
"""
)
result = testdir.runpython(p)
result.stdout.fnmatch_lines(["*1 passed*", "*1 passed*"])
def test_python_minus_m_invocation_ok(self, testdir):
p1 = testdir.makepyfile("def test_hello(): pass")
res = testdir.run(sys.executable, "-m", "pytest", str(p1))
assert res.ret == 0
def test_python_minus_m_invocation_fail(self, testdir):
p1 = testdir.makepyfile("def test_fail(): 0/0")
res = testdir.run(sys.executable, "-m", "pytest", str(p1))
assert res.ret == 1
def test_python_pytest_package(self, testdir):
p1 = testdir.makepyfile("def test_pass(): pass")
res = testdir.run(sys.executable, "-m", "pytest", str(p1))
assert res.ret == 0
res.stdout.fnmatch_lines(["*1 passed*"])
def test_equivalence_pytest_pytest(self):
assert pytest.main == py.test.cmdline.main
def test_invoke_with_invalid_type(self, capsys):
with pytest.raises(
TypeError, match="expected to be a list or tuple of strings, got: '-h'"
):
pytest.main("-h")
def test_invoke_with_path(self, tmpdir, capsys):
retcode = pytest.main(tmpdir)
assert retcode == ExitCode.NO_TESTS_COLLECTED
out, err = capsys.readouterr()
def test_invoke_plugin_api(self, testdir, capsys):
class MyPlugin:
def pytest_addoption(self, parser):
parser.addoption("--myopt")
pytest.main(["-h"], plugins=[MyPlugin()])
out, err = capsys.readouterr()
assert "--myopt" in out
def test_pyargs_importerror(self, testdir, monkeypatch):
monkeypatch.delenv("PYTHONDONTWRITEBYTECODE", False)
path = testdir.mkpydir("tpkg")
path.join("test_hello.py").write("raise ImportError")
result = testdir.runpytest("--pyargs", "tpkg.test_hello", syspathinsert=True)
assert result.ret != 0
result.stdout.fnmatch_lines(["collected*0*items*/*1*error"])
def test_pyargs_only_imported_once(self, testdir):
pkg = testdir.mkpydir("foo")
pkg.join("test_foo.py").write("print('hello from test_foo')\ndef test(): pass")
pkg.join("conftest.py").write(
"def pytest_configure(config): print('configuring')"
)
result = testdir.runpytest("--pyargs", "foo.test_foo", "-s", syspathinsert=True)
# should only import once
assert result.outlines.count("hello from test_foo") == 1
# should only configure once
assert result.outlines.count("configuring") == 1
def test_pyargs_filename_looks_like_module(self, testdir):
testdir.tmpdir.join("conftest.py").ensure()
testdir.tmpdir.join("t.py").write("def test(): pass")
result = testdir.runpytest("--pyargs", "t.py")
assert result.ret == ExitCode.OK
def test_cmdline_python_package(self, testdir, monkeypatch):
import warnings
monkeypatch.delenv("PYTHONDONTWRITEBYTECODE", False)
path = testdir.mkpydir("tpkg")
path.join("test_hello.py").write("def test_hello(): pass")
path.join("test_world.py").write("def test_world(): pass")
result = testdir.runpytest("--pyargs", "tpkg")
assert result.ret == 0
result.stdout.fnmatch_lines(["*2 passed*"])
result = testdir.runpytest("--pyargs", "tpkg.test_hello", syspathinsert=True)
assert result.ret == 0
result.stdout.fnmatch_lines(["*1 passed*"])
empty_package = testdir.mkpydir("empty_package")
monkeypatch.setenv("PYTHONPATH", str(empty_package), prepend=os.pathsep)
# the path which is not a package raises a warning on pypy;
# no idea why only pypy and not normal python warn about it here
with warnings.catch_warnings():
warnings.simplefilter("ignore", ImportWarning)
result = testdir.runpytest("--pyargs", ".")
assert result.ret == 0
result.stdout.fnmatch_lines(["*2 passed*"])
monkeypatch.setenv("PYTHONPATH", str(testdir), prepend=os.pathsep)
result = testdir.runpytest("--pyargs", "tpkg.test_missing", syspathinsert=True)
assert result.ret != 0
result.stderr.fnmatch_lines(["*not*found*test_missing*"])
def test_cmdline_python_namespace_package(self, testdir, monkeypatch):
"""
test --pyargs option with namespace packages (#1567)
Ref: https://packaging.python.org/guides/packaging-namespace-packages/
"""
monkeypatch.delenv("PYTHONDONTWRITEBYTECODE", raising=False)
search_path = []
for dirname in "hello", "world":
d = testdir.mkdir(dirname)
search_path.append(d)
ns = d.mkdir("ns_pkg")
ns.join("__init__.py").write(
"__import__('pkg_resources').declare_namespace(__name__)"
)
lib = ns.mkdir(dirname)
lib.ensure("__init__.py")
lib.join("test_{}.py".format(dirname)).write(
"def test_{}(): pass\ndef test_other():pass".format(dirname)
)
# The structure of the test directory is now:
# .
# ├── hello
# │ └── ns_pkg
# │ ├── __init__.py
# │ └── hello
# │ ├── __init__.py
# │ └── test_hello.py
# └── world
# └── ns_pkg
# ├── __init__.py
# └── world
# ├── __init__.py
# └── test_world.py
# NOTE: the different/reversed ordering is intentional here.
monkeypatch.setenv("PYTHONPATH", prepend_pythonpath(*search_path))
for p in search_path:
monkeypatch.syspath_prepend(p)
# mixed module and filenames:
monkeypatch.chdir("world")
result = testdir.runpytest("--pyargs", "-v", "ns_pkg.hello", "ns_pkg/world")
assert result.ret == 0
result.stdout.fnmatch_lines(
[
"test_hello.py::test_hello*PASSED*",
"test_hello.py::test_other*PASSED*",
"ns_pkg/world/test_world.py::test_world*PASSED*",
"ns_pkg/world/test_world.py::test_other*PASSED*",
"*4 passed in*",
]
)
# specify tests within a module
testdir.chdir()
result = testdir.runpytest(
"--pyargs", "-v", "ns_pkg.world.test_world::test_other"
)
assert result.ret == 0
result.stdout.fnmatch_lines(
["*test_world.py::test_other*PASSED*", "*1 passed*"]
)
def test_invoke_test_and_doctestmodules(self, testdir):
p = testdir.makepyfile(
"""
def test():
pass
"""
)
result = testdir.runpytest(str(p) + "::test", "--doctest-modules")
result.stdout.fnmatch_lines(["*1 passed*"])
def test_cmdline_python_package_symlink(self, testdir, monkeypatch):
"""
test --pyargs option with packages with path containing symlink can
have conftest.py in their package (#2985)
"""
# dummy check that we can actually create symlinks: on Windows `os.symlink` is available,
# but normal users require special admin privileges to create symlinks.
if sys.platform == "win32":
try:
os.symlink(
str(testdir.tmpdir.ensure("tmpfile")),
str(testdir.tmpdir.join("tmpfile2")),
)
except OSError as e:
pytest.skip(str(e.args[0]))
monkeypatch.delenv("PYTHONDONTWRITEBYTECODE", raising=False)
dirname = "lib"
d = testdir.mkdir(dirname)
foo = d.mkdir("foo")
foo.ensure("__init__.py")
lib = foo.mkdir("bar")
lib.ensure("__init__.py")
lib.join("test_bar.py").write(
"def test_bar(): pass\ndef test_other(a_fixture):pass"
)
lib.join("conftest.py").write(
"import pytest\[email protected]\ndef a_fixture():pass"
)
d_local = testdir.mkdir("local")
symlink_location = os.path.join(str(d_local), "lib")
os.symlink(str(d), symlink_location, target_is_directory=True)
# The structure of the test directory is now:
# .
# ├── local
# │ └── lib -> ../lib
# └── lib
# └── foo
# ├── __init__.py
# └── bar
# ├── __init__.py
# ├── conftest.py
# └── test_bar.py
# NOTE: the different/reversed ordering is intentional here.
search_path = ["lib", os.path.join("local", "lib")]
monkeypatch.setenv("PYTHONPATH", prepend_pythonpath(*search_path))
for p in search_path:
monkeypatch.syspath_prepend(p)
# module picked up in symlink-ed directory:
# It picks up local/lib/foo/bar (symlink) via sys.path.
result = testdir.runpytest("--pyargs", "-v", "foo.bar")
testdir.chdir()
assert result.ret == 0
if hasattr(py.path.local, "mksymlinkto"):
result.stdout.fnmatch_lines(
[
"lib/foo/bar/test_bar.py::test_bar PASSED*",
"lib/foo/bar/test_bar.py::test_other PASSED*",
"*2 passed*",
]
)
else:
result.stdout.fnmatch_lines(
[
"*lib/foo/bar/test_bar.py::test_bar PASSED*",
"*lib/foo/bar/test_bar.py::test_other PASSED*",
"*2 passed*",
]
)
def test_cmdline_python_package_not_exists(self, testdir):
result = testdir.runpytest("--pyargs", "tpkgwhatv")
assert result.ret
result.stderr.fnmatch_lines(["ERROR*file*or*package*not*found*"])
@pytest.mark.xfail(reason="decide: feature or bug")
def test_noclass_discovery_if_not_testcase(self, testdir):
testpath = testdir.makepyfile(
"""
import unittest
class TestHello(object):
def test_hello(self):
assert self.attr
class RealTest(unittest.TestCase, TestHello):
attr = 42
"""
)
reprec = testdir.inline_run(testpath)
reprec.assertoutcome(passed=1)
def test_doctest_id(self, testdir):
testdir.makefile(
".txt",
"""
>>> x=3
>>> x
4
""",
)
testid = "test_doctest_id.txt::test_doctest_id.txt"
expected_lines = [
"*= FAILURES =*",
"*_ ?doctest? test_doctest_id.txt _*",
"FAILED test_doctest_id.txt::test_doctest_id.txt",
"*= 1 failed in*",
]
result = testdir.runpytest(testid, "-rf", "--tb=short")
result.stdout.fnmatch_lines(expected_lines)
# Ensure that re-running it will still handle it as
# doctest.DocTestFailure, which was not the case before when
# re-importing doctest, but not creating a new RUNNER_CLASS.
result = testdir.runpytest(testid, "-rf", "--tb=short")
result.stdout.fnmatch_lines(expected_lines)
def test_core_backward_compatibility(self):
"""Test backward compatibility for get_plugin_manager function. See #787."""
import _pytest.config
assert (
type(_pytest.config.get_plugin_manager())
is _pytest.config.PytestPluginManager
)
def test_has_plugin(self, request):
"""Test hasplugin function of the plugin manager (#932)."""
assert request.config.pluginmanager.hasplugin("python")
class TestDurations:
source = """
import time
frag = 0.002
def test_something():
pass
def test_2():
time.sleep(frag*5)
def test_1():
time.sleep(frag)
def test_3():
time.sleep(frag*10)
"""
def test_calls(self, testdir):
testdir.makepyfile(self.source)
result = testdir.runpytest("--durations=10")
assert result.ret == 0
result.stdout.fnmatch_lines_random(
["*durations*", "*call*test_3*", "*call*test_2*"]
)
result.stdout.fnmatch_lines(
["(0.00 durations hidden. Use -vv to show these durations.)"]
)
def test_calls_show_2(self, testdir):
testdir.makepyfile(self.source)
result = testdir.runpytest("--durations=2")
assert result.ret == 0
lines = result.stdout.get_lines_after("*slowest*durations*")
assert "4 passed" in lines[2]
def test_calls_showall(self, testdir):
testdir.makepyfile(self.source)
result = testdir.runpytest("--durations=0")
assert result.ret == 0
for x in "23":
for y in ("call",): # 'setup', 'call', 'teardown':
for line in result.stdout.lines:
if ("test_%s" % x) in line and y in line:
break
else:
raise AssertionError("not found {} {}".format(x, y))
def test_calls_showall_verbose(self, testdir):
testdir.makepyfile(self.source)
result = testdir.runpytest("--durations=0", "-vv")
assert result.ret == 0
for x in "123":
for y in ("call",): # 'setup', 'call', 'teardown':
for line in result.stdout.lines:
if ("test_%s" % x) in line and y in line:
break
else:
raise AssertionError("not found {} {}".format(x, y))
def test_with_deselected(self, testdir):
testdir.makepyfile(self.source)
result = testdir.runpytest("--durations=2", "-k test_2")
assert result.ret == 0
result.stdout.fnmatch_lines(["*durations*", "*call*test_2*"])
def test_with_failing_collection(self, testdir):
testdir.makepyfile(self.source)
testdir.makepyfile(test_collecterror="""xyz""")
result = testdir.runpytest("--durations=2", "-k test_1")
assert result.ret == 2
result.stdout.fnmatch_lines(["*Interrupted: 1 error during collection*"])
# Collection errors abort test execution, therefore no duration is
# output
result.stdout.no_fnmatch_line("*duration*")
def test_with_not(self, testdir):
testdir.makepyfile(self.source)
result = testdir.runpytest("-k not 1")
assert result.ret == 0
class TestDurationWithFixture:
source = """
import pytest
import time
frag = 0.01
@pytest.fixture
def setup_fixt():
time.sleep(frag)
def test_1(setup_fixt):
time.sleep(frag)
"""
def test_setup_function(self, testdir):
testdir.makepyfile(self.source)
result = testdir.runpytest("--durations=10")
assert result.ret == 0
result.stdout.fnmatch_lines_random(
"""
*durations*
* setup *test_1*
* call *test_1*
"""
)
def test_zipimport_hook(testdir, tmpdir):
"""Test package loader is being used correctly (see #1837)."""
zipapp = pytest.importorskip("zipapp")
testdir.tmpdir.join("app").ensure(dir=1)
testdir.makepyfile(
**{
"app/foo.py": """
import pytest
def main():
pytest.main(['--pyargs', 'foo'])
"""
}
)
target = tmpdir.join("foo.zip")
zipapp.create_archive(str(testdir.tmpdir.join("app")), str(target), main="foo:main")
result = testdir.runpython(target)
assert result.ret == 0
result.stderr.fnmatch_lines(["*not found*foo*"])
result.stdout.no_fnmatch_line("*INTERNALERROR>*")
def test_import_plugin_unicode_name(testdir):
testdir.makepyfile(myplugin="")
testdir.makepyfile("def test(): pass")
testdir.makeconftest("pytest_plugins = ['myplugin']")
r = testdir.runpytest()
assert r.ret == 0
def test_pytest_plugins_as_module(testdir):
"""Do not raise an error if pytest_plugins attribute is a module (#3899)"""
testdir.makepyfile(
**{
"__init__.py": "",
"pytest_plugins.py": "",
"conftest.py": "from . import pytest_plugins",
"test_foo.py": "def test(): pass",
}
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(["* 1 passed in *"])
def test_deferred_hook_checking(testdir):
"""
Check hooks as late as possible (#1821).
"""
testdir.syspathinsert()
testdir.makepyfile(
**{
"plugin.py": """
class Hooks(object):
def pytest_my_hook(self, config):
pass
def pytest_configure(config):
config.pluginmanager.add_hookspecs(Hooks)
""",
"conftest.py": """
pytest_plugins = ['plugin']
def pytest_my_hook(config):
return 40
""",
"test_foo.py": """
def test(request):
assert request.config.hook.pytest_my_hook(config=request.config) == [40]
""",
}
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(["* 1 passed *"])
def test_fixture_values_leak(testdir):
"""Ensure that fixture objects are properly destroyed by the garbage collector at the end of their expected
life-times (#2981).
"""
testdir.makepyfile(
"""
import attr
import gc
import pytest
import weakref
@attr.s
class SomeObj(object):
name = attr.ib()
fix_of_test1_ref = None
session_ref = None
@pytest.fixture(scope='session')
def session_fix():
global session_ref
obj = SomeObj(name='session-fixture')
session_ref = weakref.ref(obj)
return obj
@pytest.fixture
def fix(session_fix):
global fix_of_test1_ref
obj = SomeObj(name='local-fixture')
fix_of_test1_ref = weakref.ref(obj)
return obj
def test1(fix):
assert fix_of_test1_ref() is fix
def test2():
gc.collect()
# fixture "fix" created during test1 must have been destroyed by now
assert fix_of_test1_ref() is None
"""
)
# Running on subprocess does not activate the HookRecorder
# which holds itself a reference to objects in case of the
# pytest_assert_reprcompare hook
result = testdir.runpytest_subprocess()
result.stdout.fnmatch_lines(["* 2 passed *"])
def test_fixture_order_respects_scope(testdir):
"""Ensure that fixtures are created according to scope order, regression test for #2405
"""
testdir.makepyfile(
"""
import pytest
data = {}
@pytest.fixture(scope='module')
def clean_data():
data.clear()
@pytest.fixture(autouse=True)
def add_data():
data.update(value=True)
@pytest.mark.usefixtures('clean_data')
def test_value():
assert data.get('value')
"""
)
result = testdir.runpytest()
assert result.ret == 0
def test_frame_leak_on_failing_test(testdir):
"""pytest would leak garbage referencing the frames of tests that failed that could never be reclaimed (#2798)
Unfortunately it was not possible to remove the actual circles because most of them
are made of traceback objects which cannot be weakly referenced. Those objects at least
can be eventually claimed by the garbage collector.
"""
testdir.makepyfile(
"""
import gc
import weakref
class Obj:
pass
ref = None
def test1():
obj = Obj()
global ref
ref = weakref.ref(obj)
assert 0
def test2():
gc.collect()
assert ref() is None
"""
)
result = testdir.runpytest_subprocess()
result.stdout.fnmatch_lines(["*1 failed, 1 passed in*"])
def test_fixture_mock_integration(testdir):
"""Test that decorators applied to fixture are left working (#3774)"""
p = testdir.copy_example("acceptance/fixture_mock_integration.py")
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(["*1 passed*"])
def test_usage_error_code(testdir):
result = testdir.runpytest("-unknown-option-")
assert result.ret == ExitCode.USAGE_ERROR
@pytest.mark.filterwarnings("default")
def test_warn_on_async_function(testdir):
testdir.makepyfile(
test_async="""
async def test_1():
pass
async def test_2():
pass
def test_3():
return test_2()
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(
[
"test_async.py::test_1",
"test_async.py::test_2",
"test_async.py::test_3",
"*async def functions are not natively supported*",
"*3 skipped, 3 warnings in*",
]
)
# ensure our warning message appears only once
assert (
result.stdout.str().count("async def functions are not natively supported") == 1
)
@pytest.mark.filterwarnings("default")
@pytest.mark.skipif(
sys.version_info < (3, 6), reason="async gen syntax available in Python 3.6+"
)
def test_warn_on_async_gen_function(testdir):
testdir.makepyfile(
test_async="""
async def test_1():
yield
async def test_2():
yield
def test_3():
return test_2()
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(
[
"test_async.py::test_1",
"test_async.py::test_2",
"test_async.py::test_3",
"*async def functions are not natively supported*",
"*3 skipped, 3 warnings in*",
]
)
# ensure our warning message appears only once
assert (
result.stdout.str().count("async def functions are not natively supported") == 1
)
def test_pdb_can_be_rewritten(testdir):
testdir.makepyfile(
**{
"conftest.py": """
import pytest
pytest.register_assert_rewrite("pdb")
""",
"__init__.py": "",
"pdb.py": """
def check():
assert 1 == 2
""",
"test_pdb.py": """
def test():
import pdb
assert pdb.check()
""",
}
)
# Disable debugging plugin itself to avoid:
# > INTERNALERROR> AttributeError: module 'pdb' has no attribute 'set_trace'
result = testdir.runpytest_subprocess("-p", "no:debugging", "-vv")
result.stdout.fnmatch_lines(
[
" def check():",
"> assert 1 == 2",
"E assert 1 == 2",
"E -1",
"E +2",
"",
"pdb.py:2: AssertionError",
"*= 1 failed in *",
]
)
assert result.ret == 1
def test_tee_stdio_captures_and_live_prints(testdir):
testpath = testdir.makepyfile(
"""
import sys
def test_simple():
print ("@this is stdout@")
print ("@this is stderr@", file=sys.stderr)
"""
)
result = testdir.runpytest_subprocess(
testpath, "--capture=tee-sys", "--junitxml=output.xml"
)
# ensure stdout/stderr were 'live printed'
result.stdout.fnmatch_lines(["*@this is stdout@*"])
result.stderr.fnmatch_lines(["*@this is stderr@*"])
# now ensure the output is in the junitxml
with open(os.path.join(testdir.tmpdir.strpath, "output.xml"), "r") as f:
fullXml = f.read()
assert "<system-out>@this is stdout@\n</system-out>" in fullXml
assert "<system-err>@this is stderr@\n</system-err>" in fullXml
| []
| []
| [
"PYTHONPATH"
]
| [] | ["PYTHONPATH"] | python | 1 | 0 | |
src/somes/wsgi.py | """
WSGI config for somes project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/dev/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "somes.settings.production")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Wrap werkzeug debugger if DEBUG is on
from django.conf import settings
if settings.DEBUG:
try:
import django.views.debug
import six
from werkzeug.debug import DebuggedApplication
def null_technical_500_response(request, exc_type, exc_value, tb):
six.reraise(exc_type, exc_value, tb)
django.views.debug.technical_500_response = null_technical_500_response
application = DebuggedApplication(application, evalex=True,
# Turning off pin security as DEBUG is True
pin_security=False)
except ImportError:
pass
| []
| []
| []
| [] | [] | python | 0 | 0 | |
pkg/installer/installer.go | package installer
import (
"bufio"
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"regexp"
"strings"
"github.com/sirupsen/logrus"
"gopkg.in/yaml.v3"
"github.com/ekristen/cast/pkg/saltstack"
)
type Mode int
const (
LocalInstallMode Mode = iota
LocalUpgradeMode
LocalRemoveMode
RemoteInstallMode
RemoteUpgradeMode
RemoteRemoveMode
)
type Installer struct {
ctx context.Context
log *logrus.Entry
Mode Mode
config *Config
configRoot string
logRoot string
logFile string
outFile string
command string
pillarJSON string
}
type Config struct {
Mode Mode
CachePath string
NoRootCheck bool
SaltStackUser string
SaltStackState string
SaltStackTest bool
SaltStackLogLevel string
SaltStackFileRoot string
SaltStackInstallMode saltstack.Mode
SaltStackPillars map[string]string
}
func New(ctx context.Context, config *Config) *Installer {
return &Installer{
ctx: ctx,
log: logrus.WithField("component", "installer"),
config: config,
command: "salt-call",
}
}
func (i *Installer) Run() (err error) {
i.log.Info("checking if install can progress")
if err := i.checks(); err != nil {
return err
}
i.log.Debug("configuring the installer")
if err := i.setup(); err != nil {
return err
}
i.log.Info("preparing pillar data")
pillarJSON, err := json.Marshal(i.config.SaltStackPillars)
if err != nil {
return err
}
i.pillarJSON = string(pillarJSON)
i.log.Debug("configuring saltstack installer")
sconfig := saltstack.NewConfig()
sconfig.Path = filepath.Join(i.config.CachePath, "saltstack")
i.log.Info("running saltstack installer")
sinstaller := saltstack.New(sconfig)
sinstaller.SetMode(saltstack.Package)
if err := sinstaller.Run(i.ctx); err != nil {
return err
}
i.command = sinstaller.GetBinary()
if i.command == "" {
return fmt.Errorf("unable to resolve salt binary to use")
}
i.log.Info("running cast installer")
i.log.Infof("installing as user: %s", i.config.SaltStackUser)
switch i.config.Mode {
case LocalInstallMode:
i.log.Info("performing local install")
return i.localRun()
default:
return fmt.Errorf("unsupported install mode: %d", i.Mode)
}
}
func (i *Installer) checks() error {
if os.Geteuid() == 0 {
sudoUser := os.Getenv("SUDO_USER")
if i.config.SaltStackUser == "" && sudoUser == "" {
return fmt.Errorf("--user was not provided, or install was not ran with sudo")
}
}
return nil
}
func (i *Installer) setup() error {
i.logRoot = filepath.Join(i.config.CachePath, "logs")
if err := os.MkdirAll(i.logRoot, 0755); err != nil {
return err
}
i.logFile = filepath.Join(i.logRoot, "saltstack.log")
i.outFile = filepath.Join(i.logRoot, "results.yaml")
i.configRoot = filepath.Join(i.config.CachePath, "salt")
if err := os.MkdirAll(i.configRoot, 0755); err != nil {
return err
}
data := []byte("enable_fqdns_grains: False\n")
if err := os.WriteFile(filepath.Join(i.configRoot, "minion"), data, 0644); err != nil {
return err
}
return nil
}
func (i *Installer) localRun() error {
if err := i.runSaltstack(); err != nil {
return err
}
return nil
}
func (i *Installer) runSaltstack() error {
i.log.Info("starting saltstack run")
beginRegexp := regexp.MustCompile(`\[INFO\s+\] Running state \[(.+)\] at time (.*)$`)
endRegexp := regexp.MustCompile(`\[INFO\s+\] Completed state \[(.*)\] at time (.*) (\()?duration_in_ms=([\d.]+)(\))?$`)
execRegexp := regexp.MustCompile(`\[INFO\s+\] Executing state \[(.*)\] for \[(.*)\]$`)
resRegexp := regexp.MustCompile(`^\[.*$`)
args := []string{
"--config-dir", i.configRoot,
"--local",
"--retcode-passthrough",
"-l", i.config.SaltStackLogLevel,
"--out", "yaml",
"--file-root", i.config.SaltStackFileRoot,
"--no-color",
"state.apply",
i.config.SaltStackState,
fmt.Sprintf(`pillar=%s`, i.pillarJSON),
}
if i.config.SaltStackTest {
args = append(args, "test=True")
}
if !strings.HasSuffix(i.command, "-call") {
args = append([]string{"call"}, args...)
}
i.log.Debugf("running command %s %s", i.command, args)
logFile, err := os.OpenFile(i.logFile, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)
if err != nil {
i.log.WithError(err).Error("unable to open log file for writing")
return err
}
defer logFile.Close()
var out bytes.Buffer
cmd := exec.CommandContext(i.ctx, i.command, args...)
cmd.Stdout = &out
stderr, err := cmd.StderrPipe()
if err != nil {
return err
}
teeStderr := io.TeeReader(stderr, logFile)
scanner := bufio.NewScanner(teeStderr)
done := make(chan struct{})
/*
go func() {
<-i.ctx.Done()
if cmd == nil || cmd.Process == nil {
return
}
log := i.log.WithField("pid", cmd.Process.Pid)
log.Warn("parent context signaled done, killing salt-call process")
if err := cmd.Process.Kill(); err != nil {
log.Fatal(err)
return
}
log.Warn("salt-call killed")
log.WithField("log", i.logFile).Info("log file location")
}()
*/
go func() {
inStateExecution := false
inStateFailure := false
inStateStartTime := ""
for scanner.Scan() {
m := strings.TrimPrefix(scanner.Text(), "# ")
log := i.log.WithField("component", "saltstack")
if !inStateExecution {
if beginRegexp.MatchString(m) {
matches := beginRegexp.FindAllStringSubmatch(m, -1)
fields := logrus.Fields{
"state": matches[0][1],
"time_begin": matches[0][2],
}
inStateStartTime = matches[0][2]
log.WithFields(fields).Trace(m)
i.log.WithFields(fields).Debug("running state")
inStateExecution = true
} else {
i.log.WithField("component", "saltstack").Trace(m)
}
} else {
if m == "" {
continue
}
if execRegexp.MatchString(m) {
matches := execRegexp.FindAllStringSubmatch(m, -1)
log.Trace(m)
i.log.Infof("Executing %s for %s", matches[0][1], matches[0][2])
} else if !resRegexp.MatchString(m) {
log.Trace(m)
if strings.HasSuffix(m, "Failure!") {
inStateFailure = true
i.log.Warnf("Result: %s", m)
} else {
i.log.Debugf("Result: %s", m)
}
} else if endRegexp.MatchString(m) {
matches := endRegexp.FindAllStringSubmatch(m, -1)
duration := matches[0][3]
if len(matches[0]) > 3 {
duration = matches[0][4]
}
fields := logrus.Fields{
"state": matches[0][1],
"time_begin": inStateStartTime,
"time_end": matches[0][2],
"duration": duration,
}
inStateStartTime = ""
log.WithFields(fields).Trace(m)
if inStateFailure {
i.log.WithFields(fields).Error("state failed")
} else {
i.log.WithFields(fields).Info("state completed")
}
inStateExecution = false
inStateFailure = false
} else {
i.log.WithField("component", "saltstack").Trace(m)
}
}
}
i.log.Debug("signaling stderr read is complete")
done <- struct{}{}
}()
i.log.Debug("executing cmd.start")
if err := cmd.Start(); err != nil {
return err
}
i.log.Debug("waiting for stderr read to complete")
<-done
i.log.Debug("reading from stderr is done")
// Note: we do not look for error here because
// we do it via the exit code down lower
cmd.Wait()
// TODO: write out to a file
if _, err := logFile.Write(out.Bytes()); err != nil {
i.log.WithError(err).Error("unable to write to log file")
}
if err := ioutil.WriteFile(i.outFile, out.Bytes(), 0640); err != nil {
i.log.WithError(err).Error("unable to write to out file")
}
i.log.WithField("file", i.logFile).Info("log file location")
i.log.WithField("file", i.outFile).Info("results file location")
switch code := cmd.ProcessState.ExitCode(); {
// This is hit when salt-call encounters an error
case code == 1:
i.log.WithField("code", code).Error("salt-call finished with errors")
var results saltstack.LocalResultsErrors
if err := yaml.Unmarshal(out.Bytes(), &results); err != nil {
fmt.Println(out.String())
return err
}
i.log.Warn(out.String())
// This is hit when we kill salt-call because of a signals
// handler trap on the main cli process
case code == -1:
i.log.Warn("salt-call terminated")
case code == 2:
if err := i.parseAndLogResults(out.Bytes()); err != nil {
return err
}
i.log.Info("salt-call completed but had failed states")
case code == 0:
if err := i.parseAndLogResults(out.Bytes()); err != nil {
return err
}
i.log.Info("salt-call completed successfully")
}
return nil
}
func (i *Installer) parseAndLogResults(in []byte) error {
var results saltstack.LocalResults
if err := yaml.Unmarshal(in, &results); err != nil {
fmt.Println(in)
return err
}
success, failed := 0, 0
var firstFailedState saltstack.Result
for _, r := range results.Local {
switch r.Result {
case true:
success++
case false:
if failed == 0 {
firstFailedState = r
}
failed++
}
}
if failed > 0 {
space := regexp.MustCompile(`\s+`)
i.log.WithFields(logrus.Fields{
"sls": firstFailedState.SLS,
"run_num": firstFailedState.RunNumber,
"comment": strings.ReplaceAll(fmt.Sprintf("%q", space.ReplaceAllString(firstFailedState.Comment, " ")), `"`, ""),
}).Warn("first failed state")
}
i.log.WithFields(logrus.Fields{
"total": len(results.Local),
"success": success,
"failed": failed,
}).Info("statistics")
return nil
}
| [
"\"SUDO_USER\""
]
| []
| [
"SUDO_USER"
]
| [] | ["SUDO_USER"] | go | 1 | 0 | |
porch/apiserver/cmd/porch/main.go | // Copyright 2022 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"context"
"fmt"
"net/http"
"os"
"github.com/GoogleContainerTools/kpt/porch/apiserver/pkg/cmd/server"
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/exporters/otlp"
"go.opentelemetry.io/otel/exporters/otlp/otlpgrpc"
"go.opentelemetry.io/otel/exporters/stdout"
"go.opentelemetry.io/otel/propagation"
"go.opentelemetry.io/otel/sdk/metric/controller/basic"
"go.opentelemetry.io/otel/sdk/trace"
"google.golang.org/grpc"
genericapiserver "k8s.io/apiserver/pkg/server"
"k8s.io/component-base/cli"
"k8s.io/klog/v2"
)
func main() {
code := run()
os.Exit(code)
}
func run() int {
t := &telemetry{}
t.Start()
defer t.Stop()
http.DefaultTransport = otelhttp.NewTransport(http.DefaultClient.Transport)
http.DefaultClient.Transport = http.DefaultTransport
ctx := genericapiserver.SetupSignalContext()
options := server.NewPorchServerOptions(os.Stdout, os.Stderr)
cmd := server.NewCommandStartPorchServer(ctx, options)
code := cli.Run(cmd)
return code
}
type telemetry struct {
tp *trace.TracerProvider
pusher *basic.Controller
exporter trace.SpanExporter
}
func (t *telemetry) Start() error {
config := os.Getenv("OTEL")
if config == "" {
return nil
}
if config == "stdout" {
exportOpts := []stdout.Option{
stdout.WithPrettyPrint(),
}
// Registers both a trace and meter Provider globally.
tracerProvider, pusher, err := stdout.InstallNewPipeline(exportOpts, nil)
if err != nil {
return fmt.Errorf("error initializing stdout exporter: %w", err)
}
t.tp = tracerProvider
t.pusher = pusher
return nil
}
if config == "otel" {
ctx := context.Background()
// See https://github.com/open-telemetry/opentelemetry-go/issues/1484
driver := otlpgrpc.NewDriver(
otlpgrpc.WithInsecure(),
otlpgrpc.WithEndpoint("localhost:4317"),
otlpgrpc.WithDialOption(grpc.WithBlock()), // useful for testing
)
// set global propagator to tracecontext (the default is no-op).
otel.SetTextMapPropagator(propagation.TraceContext{})
// Registers both a trace and meter Provider globally.
exporter, tracerProvider, pusher, err := otlp.InstallNewPipeline(ctx, driver)
if err != nil {
return fmt.Errorf("error initializing otel exporter: %w", err)
}
t.tp = tracerProvider
t.pusher = pusher
t.exporter = exporter
return nil
}
return fmt.Errorf("unknown OTEL configuration %q", config)
}
func (t *telemetry) Stop() {
if t.pusher != nil {
if err := t.pusher.Stop(context.Background()); err != nil {
klog.Warningf("failed to shut down telemetry: %v", err)
}
t.pusher = nil
}
if t.tp != nil {
if err := t.tp.Shutdown(context.Background()); err != nil {
klog.Warningf("failed to shut down telemetry: %v", err)
}
t.tp = nil
}
if t.exporter != nil {
if err := t.exporter.Shutdown(context.Background()); err != nil {
klog.Warningf("failed to shut down telemetry exporter: %v", err)
}
t.exporter = nil
}
}
| [
"\"OTEL\""
]
| []
| [
"OTEL"
]
| [] | ["OTEL"] | go | 1 | 0 | |
pkg/scalers/azure_servicebus_scaler_test.go | package scalers
import (
"context"
"os"
"testing"
)
const (
topicName = "testtopic"
subscriptionName = "testsubscription"
queueName = "testqueue"
)
type parseServiceBusMetadataTestData struct {
metadata map[string]string
isError bool
entityType EntityType
}
// not testing connections so it doesn't matter what the resolved env value is for this
var sampleResolvedEnv = map[string]string{
defaultConnectionSetting: "none",
}
var parseServiceBusMetadataDataset = []parseServiceBusMetadataTestData{
{map[string]string{}, true, None},
// properly formed queue
{map[string]string{"queueName": queueName}, false, Queue},
// properly formed topic & subscription
{map[string]string{"topicName": topicName, "subscriptionName": subscriptionName}, false, Subscription},
// queue and topic specified
{map[string]string{"queueName": queueName, "topicName": topicName}, true, None},
// queue and subscription specified
{map[string]string{"queueName": queueName, "subscriptionName": subscriptionName}, true, None},
// topic but no subscription specifed
{map[string]string{"topicName": topicName}, true, None},
// subscription but no topic specified
{map[string]string{"subscriptionName": subscriptionName}, true, None},
}
var getServiceBusLengthTestScalers = []azureServiceBusScaler{
{metadata: &azureServiceBusMetadata{
entityType: Queue,
queueName: queueName,
}},
{metadata: &azureServiceBusMetadata{
entityType: Subscription,
topicName: topicName,
subscriptionName: subscriptionName,
}},
}
func TestParseServiceBusMetadata(t *testing.T) {
for _, testData := range parseServiceBusMetadataDataset {
meta, err := parseAzureServiceBusMetadata(sampleResolvedEnv, testData.metadata)
if err != nil && !testData.isError {
t.Error("Expected success but got error", err)
}
if testData.isError && err == nil {
t.Error("Expected error but got success")
}
if meta != nil && meta.entityType != testData.entityType {
t.Errorf("Expected entity type %v but got %v\n", testData.entityType, meta.entityType)
}
}
}
func TestGetServiceBusLength(t *testing.T) {
t.Log("This test will use the environment variable SERVICEBUS_CONNECTION_STRING if it is set")
t.Log("If set, it will connect to the servicebus namespace specified by the connection string & check:")
t.Logf("\tQueue '%s' has 1 message\n", queueName)
t.Logf("\tTopic '%s' with subscription '%s' has 1 message\n", topicName, subscriptionName)
connection_string := os.Getenv("SERVICEBUS_CONNECTION_STRING")
for _, scaler := range getServiceBusLengthTestScalers {
if connection_string != "" {
// Can actually test that numbers return
scaler.metadata.connection = connection_string
length, err := scaler.GetAzureServiceBusLength(context.TODO())
if err != nil {
t.Errorf("Expected success but got error: %s", err)
}
if length != 1 {
t.Errorf("Expected 1 message, got %d", length)
}
} else {
// Just test error message
length, err := scaler.GetAzureServiceBusLength(context.TODO())
if length != -1 || err == nil {
t.Errorf("Expected error but got success")
}
}
}
}
| [
"\"SERVICEBUS_CONNECTION_STRING\""
]
| []
| [
"SERVICEBUS_CONNECTION_STRING"
]
| [] | ["SERVICEBUS_CONNECTION_STRING"] | go | 1 | 0 | |
tests/test_config.py | import os
import pytest
import configparser
from spectools.config import SDSSConfig, MaNGAConfig
@pytest.fixture()
def config_path(tmpdir):
yield tmpdir.mkdir
@pytest.fixture()
def config_file(tmpdir):
tmp_config = tmpdir.mkdir('spectools').join('.spectools')
yield tmp_config
@pytest.fixture()
def goodconfig(config_file):
config = configparser.ConfigParser()
config['sas'] = {'base_dir': '/tmp/SAS'}
config['manga'] = {}
manga = config['manga']
manga['drp_version'] = 'v1_0_0'
manga['dap_version'] = '1.0.0'
# config.write(config_file)
with open(config_file, 'w') as configfile:
config.write(configfile)
return config_file
@pytest.fixture()
def badconfig(config_file):
config = configparser.ConfigParser()
config['sas2'] = {'base_dir': '/tmp/SAS'}
with open(config_file, 'w') as configfile:
config.write(configfile)
return config_file
def test_sdss_goodconfig(goodconfig):
sdss_config = SDSSConfig(config_file=goodconfig)
os.environ["SAS_BASE_DIR"] = ""
assert sdss_config.sas_base_dir == '/tmp/SAS'
def test_sdss_badconfig(badconfig):
sdss_config = SDSSConfig(config_file=badconfig)
os.environ["SAS_BASE_DIR"] = ""
assert sdss_config.sas_base_dir == os.path.join(os.path.expanduser('~'),
'SAS')
def test_sdss_environment():
sdss_config = SDSSConfig()
if os.getenv('SAS_BASE_DIR'):
assert sdss_config.sas_base_dir == os.getenv('SAS_BASE_DIR')
def test_manga(goodconfig):
manga_config = MaNGAConfig(config_file=goodconfig)
assert manga_config.drp_version == 'v1_0_0'
assert manga_config.dap_version == '1.0.0'
| []
| []
| [
"SAS_BASE_DIR"
]
| [] | ["SAS_BASE_DIR"] | python | 1 | 0 | |
tests/integration/mongodb/results/user.py | from typing import Type, Tuple, Any
from bson import ObjectId
from JellyBot.api.static import result
from flags import Platform
from models import Model, APIUserModel, OnPlatformUserModel, RootUserModel
from mongodb.factory.results import (
ModelResult, APIUserRegistrationResult, OnPlatformUserRegistrationResult, GetRootUserDataResult,
RootUserRegistrationResult, RootUserUpdateResult, GetOutcome, WriteOutcome
)
from tests.base import TestOnModelResult
__all__ = ["TestOnSiteUserRegistrationResult", "TestOnPlatformUserRegistrationResult",
"TestGetRootUserDataResult", "TestRootUserRegistrationResult", "TestRootUserUpdateResult"]
class TestOnSiteUserRegistrationResult(TestOnModelResult.TestClass):
@classmethod
def get_result_class(cls) -> Type[ModelResult]:
return APIUserRegistrationResult
@classmethod
def result_args_no_error(cls) -> Tuple[Any, ...]:
return "ABCD",
@classmethod
def result_args_has_error(cls) -> Tuple[Any, ...]:
return "EFGH",
@classmethod
def default_serialized(cls):
d = super().default_serialized()
d.update({result.UserManagementResponse.TOKEN: "ABCD"})
return d
@classmethod
def default_serialized_error(cls):
d = super().default_serialized_error()
d.update({result.UserManagementResponse.TOKEN: "EFGH"})
return d
@classmethod
def get_constructed_model(cls) -> Model:
return APIUserModel(Email="[email protected]", GoogleUid="123456789", Token="A" * APIUserModel.API_TOKEN_LENGTH)
class TestOnPlatformUserRegistrationResult(TestOnModelResult.TestClass):
@classmethod
def get_result_class(cls) -> Type[ModelResult]:
return OnPlatformUserRegistrationResult
@classmethod
def get_constructed_model(cls) -> Model:
return OnPlatformUserModel(Token="ABC", Platform=Platform.LINE)
class TestGetRootUserDataResult(TestOnModelResult.TestClass):
API_OID = ObjectId()
@classmethod
def get_result_class(cls) -> Type[ModelResult]:
return GetRootUserDataResult
@classmethod
def get_constructed_model(cls) -> RootUserModel:
return RootUserModel(ApiOid=TestGetRootUserDataResult.API_OID)
def test_get_extra(self):
mdl_api = APIUserModel(Email="[email protected]", GoogleUid="123456789", Token="A" * APIUserModel.API_TOKEN_LENGTH)
mdl_onplat = [OnPlatformUserModel(Token="ABC", Platform=Platform.LINE)]
r = GetRootUserDataResult(GetOutcome.O_ADDED, None, self.get_constructed_model(), mdl_api, mdl_onplat)
self.assertEqual(r.model_api, mdl_api)
self.assertEqual(r.model_onplat_list, mdl_onplat)
class TestRootUserRegistrationResult(TestOnModelResult.TestClass):
@classmethod
def get_result_class(cls):
return RootUserRegistrationResult
@classmethod
def get_constructed_model(cls) -> Model:
return RootUserModel(ApiOid=TestGetRootUserDataResult.API_OID)
@classmethod
def result_args_no_error(cls) -> Tuple[Any, ...]:
return \
WriteOutcome.O_INSERTED, \
OnPlatformUserRegistrationResult(
WriteOutcome.O_MISC, None, OnPlatformUserModel(Token="ABC", Platform=Platform.LINE))
@classmethod
def result_args_has_error(cls) -> Tuple[Any, ...]:
return WriteOutcome.X_NOT_EXECUTED, None
@classmethod
def default_serialized(cls):
d = super().default_serialized()
d.update({result.UserManagementResponse.CONN_OUTCOME: WriteOutcome.O_INSERTED,
result.UserManagementResponse.REG_RESULT: OnPlatformUserRegistrationResult(
WriteOutcome.O_MISC,
None,
OnPlatformUserModel(Token="ABC", Platform=Platform.LINE)).serialize()})
return d
@classmethod
def default_serialized_error(cls):
d = super().default_serialized_error()
d.update({result.UserManagementResponse.CONN_OUTCOME: WriteOutcome.X_NOT_EXECUTED,
result.UserManagementResponse.REG_RESULT: None})
return d
class TestRootUserUpdateResult(TestOnModelResult.TestClass):
@classmethod
def get_result_class(cls):
return RootUserUpdateResult
@classmethod
def get_constructed_model(cls) -> Model:
return RootUserModel(ApiOid=TestGetRootUserDataResult.API_OID)
| []
| []
| []
| [] | [] | python | null | null | null |
setup.py | from setuptools import setup, Extension
from setuptools.command.build_ext import build_ext as _build_ext
import os
import re
import codecs
import platform
from distutils.sysconfig import get_config_var
from distutils.version import LooseVersion
import sys
# to publish use:
# > python setup.py sdist bdist_wheel upload
# which depends on ~/.pypirc
# This is copied from @robbuckley's fix for Panda's
# For mac, ensure extensions are built for macos 10.9 when compiling on a
# 10.9 system or above, overriding distuitls behavior which is to target
# the version that python was built for. This may be overridden by setting
# MACOSX_DEPLOYMENT_TARGET before calling setup.py
if sys.platform == 'darwin':
if 'MACOSX_DEPLOYMENT_TARGET' not in os.environ:
current_system = LooseVersion(platform.mac_ver()[0])
python_target = LooseVersion(get_config_var('MACOSX_DEPLOYMENT_TARGET'))
if python_target < '10.9' and current_system >= '10.9':
os.environ['MACOSX_DEPLOYMENT_TARGET'] = '10.9'
here = os.path.abspath(os.path.dirname(__file__))
def read(*parts):
with codecs.open(os.path.join(here, *parts), 'r') as fp:
return fp.read()
def find_version(*file_paths):
version_file = read(*file_paths)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
# Extend the default build_ext class to bootstrap numpy installation
# that are needed to build C extensions.
# see https://stackoverflow.com/questions/19919905/how-to-bootstrap-numpy-installation-in-setup-py
class build_ext(_build_ext):
def finalize_options(self):
_build_ext.finalize_options(self)
if isinstance(__builtins__, dict):
__builtins__["__NUMPY_SETUP__"] = False
else:
setattr(__builtins__, "__NUMPY_SETUP__", False)
import numpy
print("numpy.get_include()", numpy.get_include())
self.include_dirs.append(numpy.get_include())
def run_setup(with_binary=True, test_xgboost=True, test_lightgbm=True):
ext_modules = []
if with_binary:
ext_modules.append(
Extension('shap._cext', sources=['shap/_cext.cc'])
)
if test_xgboost and test_lightgbm:
tests_require = ['nose', 'xgboost', 'lightgbm']
elif test_xgboost:
tests_require = ['nose', 'xgboost']
elif test_lightgbm:
tests_require = ['nose', 'lightgbm']
else:
tests_require = ['nose']
setup(
name='shap',
version=find_version("shap", "__init__.py"),
description='A unified approach to explain the output of any machine learning model.',
long_description="SHAP (SHapley Additive exPlanations) is a unified approach to explain the output of " + \
"any machine learning model. SHAP connects game theory with local explanations, uniting " + \
"several previous methods and representing the only possible consistent and locally accurate " + \
"additive feature attribution method based on expectations.",
long_description_content_type="text/markdown",
url='http://github.com/slundberg/shap',
author='Scott Lundberg',
author_email='[email protected]',
license='MIT',
packages=[
'shap', 'shap.explainers', 'shap.explainers.other', 'shap.explainers.deep',
'shap.plots', 'shap.benchmark'
],
package_data={'shap': ['plots/resources/*', 'tree_shap.h']},
cmdclass={'build_ext': build_ext},
setup_requires=['numpy'],
install_requires=['numpy', 'scipy', 'scikit-learn', 'matplotlib', 'pandas', 'tqdm', 'ipython', 'scikit-image'],
test_suite='nose.collector',
tests_require=tests_require,
ext_modules=ext_modules,
zip_safe=False
)
def try_run_setup(**kwargs):
""" Fails gracefully when various install steps don't work.
"""
try:
run_setup(**kwargs)
except Exception as e:
print(str(e))
if "xgboost" in str(e).lower():
kwargs["test_xgboost"] = False
print("Couldn't install XGBoost for testing!")
try_run_setup(**kwargs)
elif "lightgbm" in str(e).lower():
kwargs["test_lightgbm"] = False
print("Couldn't install LightGBM for testing!")
try_run_setup(**kwargs)
elif kwargs["with_binary"]:
kwargs["with_binary"] = False
print("WARNING: The C extension could not be compiled, sklearn tree models not supported.")
try_run_setup(**kwargs)
else:
print("ERROR: Failed to build!")
# we seem to need this import guard for appveyor
if __name__ == "__main__":
try_run_setup(with_binary=True, test_xgboost=True, test_lightgbm=True)
| []
| []
| [
"MACOSX_DEPLOYMENT_TARGET"
]
| [] | ["MACOSX_DEPLOYMENT_TARGET"] | python | 1 | 0 | |
lib/srv/regular/sshserver_test.go | /*
Copyright 2015-2020 Gravitational, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package regular
import (
"context"
"fmt"
"io"
"net"
"net/http"
"net/http/httptest"
"net/url"
"os"
"os/exec"
"os/user"
"strconv"
"strings"
"sync"
"testing"
"time"
"golang.org/x/crypto/ssh"
"golang.org/x/crypto/ssh/agent"
"github.com/gravitational/teleport"
"github.com/gravitational/teleport/api/client/proto"
"github.com/gravitational/teleport/api/constants"
apidefaults "github.com/gravitational/teleport/api/defaults"
"github.com/gravitational/teleport/api/types"
"github.com/gravitational/teleport/lib/auth"
"github.com/gravitational/teleport/lib/bpf"
"github.com/gravitational/teleport/lib/limiter"
"github.com/gravitational/teleport/lib/pam"
restricted "github.com/gravitational/teleport/lib/restrictedsession"
"github.com/gravitational/teleport/lib/reversetunnel"
"github.com/gravitational/teleport/lib/services"
sess "github.com/gravitational/teleport/lib/session"
"github.com/gravitational/teleport/lib/srv"
"github.com/gravitational/teleport/lib/sshutils"
"github.com/gravitational/teleport/lib/sshutils/x11"
"github.com/gravitational/teleport/lib/utils"
"github.com/google/uuid"
"github.com/gravitational/trace"
"github.com/jonboulle/clockwork"
"github.com/sirupsen/logrus"
"github.com/stretchr/testify/require"
)
// teleportTestUser is additional user used for tests
const teleportTestUser = "teleport-test"
// wildcardAllow is used in tests to allow access to all labels.
var wildcardAllow = types.Labels{
types.Wildcard: []string{types.Wildcard},
}
// TestMain will re-execute Teleport to run a command if "exec" is passed to
// it as an argument. Otherwise it will run tests as normal.
func TestMain(m *testing.M) {
utils.InitLoggerForTests()
if srv.IsReexec() {
srv.RunAndExit(os.Args[1])
return
}
code := m.Run()
os.Exit(code)
}
type sshInfo struct {
srv *Server
srvAddress string
srvPort string
srvHostPort string
clt *ssh.Client
cltConfig *ssh.ClientConfig
assertCltClose require.ErrorAssertionFunc
}
type sshTestFixture struct {
ssh sshInfo
up *upack
signer ssh.Signer
user string
clock clockwork.FakeClock
testSrv *auth.TestServer
}
func newFixture(t *testing.T) *sshTestFixture {
return newCustomFixture(t, func(*auth.TestServerConfig) {})
}
func newCustomFixture(t *testing.T, mutateCfg func(*auth.TestServerConfig), sshOpts ...ServerOption) *sshTestFixture {
ctx := context.Background()
u, err := user.Current()
require.NoError(t, err)
clock := clockwork.NewFakeClock()
serverCfg := auth.TestServerConfig{
Auth: auth.TestAuthServerConfig{
ClusterName: "localhost",
Dir: t.TempDir(),
Clock: clock,
},
}
mutateCfg(&serverCfg)
testServer, err := auth.NewTestServer(serverCfg)
require.NoError(t, err)
t.Cleanup(func() { require.NoError(t, testServer.Shutdown(ctx)) })
priv, pub, err := testServer.Auth().GenerateKeyPair("")
require.NoError(t, err)
tlsPub, err := auth.PrivateKeyToPublicKeyTLS(priv)
require.NoError(t, err)
certs, err := testServer.Auth().GenerateHostCerts(ctx,
&proto.HostCertsRequest{
HostID: hostID,
NodeName: testServer.ClusterName(),
Role: types.RoleNode,
PublicSSHKey: pub,
PublicTLSKey: tlsPub,
})
require.NoError(t, err)
// set up user CA and set up a user that has access to the server
signer, err := sshutils.NewSigner(priv, certs.SSH)
require.NoError(t, err)
nodeID := uuid.New().String()
nodeClient, err := testServer.NewClient(auth.TestIdentity{
I: auth.BuiltinRole{
Role: types.RoleNode,
Username: nodeID,
},
})
require.NoError(t, err)
nodeDir := t.TempDir()
serverOptions := []ServerOption{
SetUUID(nodeID),
SetNamespace(apidefaults.Namespace),
SetEmitter(nodeClient),
SetShell("/bin/sh"),
SetSessionServer(nodeClient),
SetPAMConfig(&pam.Config{Enabled: false}),
SetLabels(
map[string]string{"foo": "bar"},
services.CommandLabels{
"baz": &types.CommandLabelV2{
Period: types.NewDuration(time.Millisecond),
Command: []string{"expr", "1", "+", "3"},
},
},
),
SetBPF(&bpf.NOP{}),
SetRestrictedSessionManager(&restricted.NOP{}),
SetClock(clock),
SetLockWatcher(newLockWatcher(ctx, t, nodeClient)),
SetX11ForwardingConfig(&x11.ServerConfig{}),
}
serverOptions = append(serverOptions, sshOpts...)
sshSrv, err := New(
utils.NetAddr{AddrNetwork: "tcp", Addr: "127.0.0.1:0"},
testServer.ClusterName(),
[]ssh.Signer{signer},
nodeClient,
nodeDir,
"",
utils.NetAddr{},
nil,
serverOptions...)
require.NoError(t, err)
require.NoError(t, auth.CreateUploaderDir(nodeDir))
require.NoError(t, sshSrv.Start())
t.Cleanup(func() { require.NoError(t, sshSrv.Close()) })
require.NoError(t, sshSrv.heartbeat.ForceSend(time.Second))
sshSrvAddress := sshSrv.Addr()
_, sshSrvPort, err := net.SplitHostPort(sshSrvAddress)
require.NoError(t, err)
sshSrvHostPort := fmt.Sprintf("%v:%v", testServer.ClusterName(), sshSrvPort)
// set up SSH client using the user private key for signing
up, err := newUpack(testServer, u.Username, []string{u.Username}, wildcardAllow)
require.NoError(t, err)
// set up an agent server and a client that uses agent for forwarding
keyring := agent.NewKeyring()
addedKey := agent.AddedKey{
PrivateKey: up.pkey,
Certificate: up.pcert,
}
require.NoError(t, keyring.Add(addedKey))
cltConfig := &ssh.ClientConfig{
User: u.Username,
Auth: []ssh.AuthMethod{ssh.PublicKeys(up.certSigner)},
HostKeyCallback: ssh.FixedHostKey(signer.PublicKey()),
}
client, err := ssh.Dial("tcp", sshSrv.Addr(), cltConfig)
require.NoError(t, err)
f := &sshTestFixture{
ssh: sshInfo{
srv: sshSrv,
srvAddress: sshSrvAddress,
srvPort: sshSrvPort,
srvHostPort: sshSrvHostPort,
clt: client,
cltConfig: cltConfig,
assertCltClose: require.NoError,
},
up: up,
signer: signer,
user: u.Username,
clock: clock,
testSrv: testServer,
}
t.Cleanup(func() { f.ssh.assertCltClose(t, client.Close()) })
require.NoError(t, agent.ForwardToAgent(client, keyring))
return f
}
func newProxyClient(t *testing.T, testSvr *auth.TestServer) (*auth.Client, string) {
// create proxy client used in some tests
proxyID := uuid.New().String()
proxyClient, err := testSvr.NewClient(auth.TestIdentity{
I: auth.BuiltinRole{
Role: types.RoleProxy,
Username: proxyID,
},
})
require.NoError(t, err)
return proxyClient, proxyID
}
func newNodeClient(t *testing.T, testSvr *auth.TestServer) (*auth.Client, string) {
nodeID := uuid.New().String()
nodeClient, err := testSvr.NewClient(auth.TestIdentity{
I: auth.BuiltinRole{
Role: types.RoleNode,
Username: nodeID,
},
})
require.NoError(t, err)
return nodeClient, nodeID
}
const hostID = "00000000-0000-0000-0000-000000000000"
func startReadAll(r io.Reader) <-chan []byte {
ch := make(chan []byte)
go func() {
data, _ := io.ReadAll(r)
ch <- data
}()
return ch
}
func waitForBytes(ch <-chan []byte) ([]byte, error) {
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
select {
case data := <-ch:
return data, nil
case <-ctx.Done():
return nil, ctx.Err()
}
}
func TestInactivityTimeout(t *testing.T) {
const timeoutMessage = "You snooze, you lose."
// Given
// * a running auth server configured with a 5s inactivity timeout,
// * a running SSH server configured with a given disconnection message
// * a client connected to the SSH server,
// * an SSH session running over the client connection
mutateCfg := func(cfg *auth.TestServerConfig) {
networkCfg := types.DefaultClusterNetworkingConfig()
networkCfg.SetClientIdleTimeout(5 * time.Second)
networkCfg.SetClientIdleTimeoutMessage(timeoutMessage)
cfg.Auth.ClusterNetworkingConfig = networkCfg
}
f := newCustomFixture(t, mutateCfg)
// If all goes well, the client will be closed by the time cleanup happens,
// so change the assertion on closing the client to expect it to fail
f.ssh.assertCltClose = require.Error
se, err := f.ssh.clt.NewSession()
require.NoError(t, err)
defer se.Close()
stderr, err := se.StderrPipe()
require.NoError(t, err)
stdErrCh := startReadAll(stderr)
endCh := make(chan error)
go func() { endCh <- f.ssh.clt.Wait() }()
// When I let the session idle (with the clock running at approx 10x speed)...
sessionHasFinished := func() bool {
f.clock.Advance(1 * time.Second)
select {
case <-endCh:
return true
default:
return false
}
}
require.Eventually(t, sessionHasFinished, 6*time.Second, 100*time.Millisecond,
"Timed out waiting for session to finish")
// Expect that the idle timeout has been delivered via stderr
text, err := waitForBytes(stdErrCh)
require.NoError(t, err)
require.Equal(t, timeoutMessage, string(text))
}
func TestLockInForce(t *testing.T) {
t.Parallel()
ctx := context.Background()
f := newFixture(t)
// If all goes well, the client will be closed by the time cleanup happens,
// so change the assertion on closing the client to expect it to fail.
f.ssh.assertCltClose = require.Error
se, err := f.ssh.clt.NewSession()
require.NoError(t, err)
stderr, err := se.StderrPipe()
require.NoError(t, err)
stdErrCh := startReadAll(stderr)
endCh := make(chan error)
go func() { endCh <- f.ssh.clt.Wait() }()
lock, err := types.NewLock("test-lock", types.LockSpecV2{
Target: types.LockTarget{Login: f.user},
})
require.NoError(t, err)
require.NoError(t, f.testSrv.Auth().UpsertLock(ctx, lock))
// When I let the session idle (with the clock running at approx 10x speed)...
sessionHasFinished := func() bool {
f.clock.Advance(1 * time.Second)
select {
case <-endCh:
return true
default:
return false
}
}
require.Eventually(t, sessionHasFinished, 1*time.Second, 100*time.Millisecond,
"Timed out waiting for session to finish")
// Expect the lock-in-force message to have been delivered via stderr.
lockInForceMsg := services.LockInForceAccessDenied(lock).Error()
text, err := waitForBytes(stdErrCh)
require.NoError(t, err)
require.Equal(t, lockInForceMsg, string(text))
// As long as the lock is in force, new sessions cannot be opened.
newClient, err := ssh.Dial("tcp", f.ssh.srvAddress, f.ssh.cltConfig)
require.NoError(t, err)
t.Cleanup(func() {
// The client is expected to be closed by the lock monitor therefore expect
// an error on this second attempt.
require.Error(t, newClient.Close())
})
_, err = newClient.NewSession()
require.Error(t, err)
require.Contains(t, err.Error(), lockInForceMsg)
// Once the lock is lifted, new sessions should go through without error.
require.NoError(t, f.testSrv.Auth().DeleteLock(ctx, "test-lock"))
newClient2, err := ssh.Dial("tcp", f.ssh.srvAddress, f.ssh.cltConfig)
require.NoError(t, err)
t.Cleanup(func() { require.NoError(t, newClient2.Close()) })
_, err = newClient2.NewSession()
require.NoError(t, err)
}
// TestDirectTCPIP ensures that the server can create a "direct-tcpip"
// channel to the target address. The "direct-tcpip" channel is what port
// forwarding is built upon.
func TestDirectTCPIP(t *testing.T) {
t.Parallel()
f := newFixture(t)
// Startup a test server that will reply with "hello, world\n"
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
fmt.Fprintln(w, "hello, world")
}))
defer ts.Close()
// Extract the host:port the test HTTP server is running on.
u, err := url.Parse(ts.URL)
require.NoError(t, err)
// Build a http.Client that will dial through the server to establish the
// connection. That's why a custom dialer is used and the dialer uses
// s.clt.Dial (which performs the "direct-tcpip" request).
httpClient := http.Client{
Transport: &http.Transport{
Dial: func(network string, addr string) (net.Conn, error) {
return f.ssh.clt.Dial("tcp", u.Host)
},
},
}
// Perform a HTTP GET to the test HTTP server through a "direct-tcpip" request.
resp, err := httpClient.Get(ts.URL)
require.NoError(t, err)
defer resp.Body.Close()
// Make sure the response is what was expected.
body, err := io.ReadAll(resp.Body)
require.NoError(t, err)
require.Equal(t, []byte("hello, world\n"), body)
}
func TestAdvertiseAddr(t *testing.T) {
t.Parallel()
f := newFixture(t)
// No advertiseAddr was set in fixture, should default to srvAddress.
require.Equal(t, f.ssh.srv.Addr(), f.ssh.srv.AdvertiseAddr())
var (
advIP = utils.MustParseAddr("10.10.10.1")
advIPPort = utils.MustParseAddr("10.10.10.1:1234")
advBadAddr = &utils.NetAddr{Addr: "localhost:badport", AddrNetwork: "tcp"}
)
// IP-only advertiseAddr should use the port from srvAddress.
f.ssh.srv.setAdvertiseAddr(advIP)
require.Equal(t, fmt.Sprintf("%s:%s", advIP, f.ssh.srvPort), f.ssh.srv.AdvertiseAddr())
// IP and port advertiseAddr should fully override srvAddress.
f.ssh.srv.setAdvertiseAddr(advIPPort)
require.Equal(t, advIPPort.String(), f.ssh.srv.AdvertiseAddr())
// nil advertiseAddr should default to srvAddress.
f.ssh.srv.setAdvertiseAddr(nil)
require.Equal(t, f.ssh.srvAddress, f.ssh.srv.AdvertiseAddr())
// Invalid advertiseAddr should fall back to srvAddress.
f.ssh.srv.setAdvertiseAddr(advBadAddr)
require.Equal(t, f.ssh.srvAddress, f.ssh.srv.AdvertiseAddr())
}
// TestAgentForwardPermission makes sure if RBAC rules don't allow agent
// forwarding, we don't start an agent even if requested.
func TestAgentForwardPermission(t *testing.T) {
t.Parallel()
f := newFixture(t)
ctx := context.Background()
// make sure the role does not allow agent forwarding
roleName := services.RoleNameForUser(f.user)
role, err := f.testSrv.Auth().GetRole(ctx, roleName)
require.NoError(t, err)
roleOptions := role.GetOptions()
roleOptions.ForwardAgent = types.NewBool(false)
role.SetOptions(roleOptions)
require.NoError(t, f.testSrv.Auth().UpsertRole(ctx, role))
se, err := f.ssh.clt.NewSession()
require.NoError(t, err)
t.Cleanup(func() { se.Close() })
// to interoperate with OpenSSH, requests for agent forwarding always succeed.
// however that does not mean the users agent will actually be forwarded.
require.NoError(t, agent.RequestAgentForwarding(se))
// the output of env, we should not see SSH_AUTH_SOCK in the output
output, err := se.Output("env")
require.NoError(t, err)
require.NotContains(t, string(output), "SSH_AUTH_SOCK")
}
// TestMaxSesssions makes sure that MaxSessions RBAC rules prevent
// too many concurrent sessions.
func TestMaxSessions(t *testing.T) {
t.Parallel()
const maxSessions int64 = 2
f := newFixture(t)
ctx := context.Background()
// make sure the role does not allow agent forwarding
roleName := services.RoleNameForUser(f.user)
role, err := f.testSrv.Auth().GetRole(ctx, roleName)
require.NoError(t, err)
roleOptions := role.GetOptions()
roleOptions.MaxSessions = maxSessions
role.SetOptions(roleOptions)
err = f.testSrv.Auth().UpsertRole(ctx, role)
require.NoError(t, err)
for i := int64(0); i < maxSessions; i++ {
se, err := f.ssh.clt.NewSession()
require.NoError(t, err)
defer se.Close()
}
_, err = f.ssh.clt.NewSession()
require.Error(t, err)
require.Contains(t, err.Error(), "too many session channels")
// verify that max sessions does not affect max connections.
for i := int64(0); i <= maxSessions; i++ {
clt, err := ssh.Dial("tcp", f.ssh.srv.Addr(), f.ssh.cltConfig)
require.NoError(t, err)
require.NoError(t, clt.Close())
}
}
// TestExecLongCommand makes sure that commands that are longer than the
// maximum pipe size on the OS can still be started. This tests the reexec
// functionality of Teleport as Teleport will reexec itself when launching a
// command and send the command to then launch through a pipe.
func TestExecLongCommand(t *testing.T) {
t.Parallel()
f := newFixture(t)
// Get the path to where the "echo" command is on disk.
echoPath, err := exec.LookPath("echo")
require.NoError(t, err)
se, err := f.ssh.clt.NewSession()
require.NoError(t, err)
defer se.Close()
// Write a message that larger than the maximum pipe size.
_, err = se.Output(fmt.Sprintf("%v %v", echoPath, strings.Repeat("a", maxPipeSize)))
require.NoError(t, err)
}
// TestOpenExecSessionSetsSession tests that OpenExecSession()
// sets ServerContext session.
func TestOpenExecSessionSetsSession(t *testing.T) {
t.Parallel()
f := newFixture(t)
se, err := f.ssh.clt.NewSession()
require.NoError(t, err)
defer se.Close()
// This will trigger an exec request, which will start a non-interactive session,
// which then triggers setting env for SSH_SESSION_ID.
output, err := se.Output("env")
require.NoError(t, err)
require.Contains(t, string(output), teleport.SSHSessionID)
}
// TestAgentForward tests agent forwarding via unix sockets
func TestAgentForward(t *testing.T) {
t.Parallel()
f := newFixture(t)
ctx := context.Background()
roleName := services.RoleNameForUser(f.user)
role, err := f.testSrv.Auth().GetRole(ctx, roleName)
require.NoError(t, err)
roleOptions := role.GetOptions()
roleOptions.ForwardAgent = types.NewBool(true)
role.SetOptions(roleOptions)
err = f.testSrv.Auth().UpsertRole(ctx, role)
require.NoError(t, err)
se, err := f.ssh.clt.NewSession()
require.NoError(t, err)
defer se.Close()
err = agent.RequestAgentForwarding(se)
require.NoError(t, err)
// prepare to send virtual "keyboard input" into the shell:
keyboard, err := se.StdinPipe()
require.NoError(t, err)
// start interactive SSH session (new shell):
err = se.Shell()
require.NoError(t, err)
// create a temp file to collect the shell output into:
tmpFile, err := os.CreateTemp(os.TempDir(), "teleport-agent-forward-test")
require.NoError(t, err)
tmpFile.Close()
defer os.Remove(tmpFile.Name())
// type 'printenv SSH_AUTH_SOCK > /path/to/tmp/file' into the session (dumping the value of SSH_AUTH_STOCK into the temp file)
_, err = keyboard.Write([]byte(fmt.Sprintf("printenv %v >> %s\n\r", teleport.SSHAuthSock, tmpFile.Name())))
require.NoError(t, err)
// wait for the output
var socketPath string
require.Eventually(t, func() bool {
output, err := os.ReadFile(tmpFile.Name())
if err == nil && len(output) != 0 {
socketPath = strings.TrimSpace(string(output))
return true
}
return false
}, 5*time.Second, 10*time.Millisecond, "failed to read socket path")
// try dialing the ssh agent socket:
file, err := net.Dial("unix", socketPath)
require.NoError(t, err)
clientAgent := agent.NewClient(file)
signers, err := clientAgent.Signers()
require.NoError(t, err)
sshConfig := &ssh.ClientConfig{
User: f.user,
Auth: []ssh.AuthMethod{ssh.PublicKeys(signers...)},
HostKeyCallback: ssh.FixedHostKey(f.signer.PublicKey()),
}
client, err := ssh.Dial("tcp", f.ssh.srv.Addr(), sshConfig)
require.NoError(t, err)
err = client.Close()
require.NoError(t, err)
// make sure the socket persists after the session is closed.
// (agents are started from specific sessions, but apply to all
// sessions on the connection).
err = se.Close()
require.NoError(t, err)
// Pause to allow closure to propagate.
time.Sleep(150 * time.Millisecond)
_, err = net.Dial("unix", socketPath)
require.NoError(t, err)
// make sure the socket is gone after we closed the connection. Note that
// we now expect the client close to fail during the test cleanup, so we
// change the assertion accordingly
require.NoError(t, f.ssh.clt.Close())
f.ssh.assertCltClose = require.Error
// clt must be nullified to prevent double-close during test cleanup
f.ssh.clt = nil
for i := 0; i < 4; i++ {
_, err = net.Dial("unix", socketPath)
if err != nil {
return
}
time.Sleep(50 * time.Millisecond)
}
require.FailNow(t, "expected socket to be closed, still could dial after 150 ms")
}
// TestX11Forward tests x11 forwarding via unix sockets
func TestX11Forward(t *testing.T) {
if os.Getenv("TELEPORT_XAUTH_TEST") == "" {
t.Skip("Skipping test as xauth is not enabled")
}
t.Parallel()
f := newFixture(t)
f.ssh.srv.x11 = &x11.ServerConfig{
Enabled: true,
DisplayOffset: x11.DefaultDisplayOffset,
MaxDisplay: x11.DefaultMaxDisplays,
}
ctx := context.Background()
roleName := services.RoleNameForUser(f.user)
role, err := f.testSrv.Auth().GetRole(ctx, roleName)
require.NoError(t, err)
roleOptions := role.GetOptions()
roleOptions.PermitX11Forwarding = types.NewBool(true)
role.SetOptions(roleOptions)
err = f.testSrv.Auth().UpsertRole(ctx, role)
require.NoError(t, err)
// Open two x11 sessions, the server should handle multiple
// concurrent X11 sessions.
serverDisplay := x11EchoSession(ctx, t, f.ssh.clt)
client2, err := ssh.Dial("tcp", f.ssh.srv.Addr(), f.ssh.cltConfig)
require.NoError(t, err)
serverDisplay2 := x11EchoSession(ctx, t, client2)
// Create multiple XServer requests, the server should
// handle multiple concurrent XServer requests.
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
x11EchoRequest(t, serverDisplay)
}()
wg.Add(1)
go func() {
defer wg.Done()
x11EchoRequest(t, serverDisplay)
}()
wg.Add(1)
go func() {
defer wg.Done()
x11EchoRequest(t, serverDisplay2)
}()
wg.Add(1)
go func() {
defer wg.Done()
x11EchoRequest(t, serverDisplay2)
}()
wg.Wait()
}
// x11EchoSession creates a new ssh session and handles x11 forwarding for the session,
// echoing XServer requests received back to the client. Returns the Display opened on the
// session, which is set in $DISPLAY.
func x11EchoSession(ctx context.Context, t *testing.T, clt *ssh.Client) x11.Display {
se, err := clt.NewSession()
require.NoError(t, err)
t.Cleanup(func() { se.Close() })
// Create a fake client XServer listener which echos
// back whatever it receives.
fakeClientDisplay, err := net.Listen("tcp", ":0")
require.NoError(t, err)
go func() {
for {
conn, err := fakeClientDisplay.Accept()
if err != nil {
return
}
_, err = io.Copy(conn, conn)
require.NoError(t, err)
conn.Close()
}
}()
t.Cleanup(func() { fakeClientDisplay.Close() })
// Handle any x11 channel requests received from the server
// and start x11 forwarding to the client display.
err = x11.ServeChannelRequests(ctx, clt, func(ctx context.Context, nch ssh.NewChannel) {
sch, sin, err := nch.Accept()
require.NoError(t, err)
defer sch.Close()
clientConn, err := net.Dial("tcp", fakeClientDisplay.Addr().String())
require.NoError(t, err)
clientXConn, ok := clientConn.(*net.TCPConn)
require.True(t, ok)
defer clientConn.Close()
go func() {
err := sshutils.ForwardRequests(ctx, sin, se)
if err != nil {
log.WithError(err).Debug("Failed to forward ssh request from server during X11 forwarding")
}
}()
err = x11.Forward(ctx, clientXConn, sch)
require.NoError(t, err)
})
require.NoError(t, err)
// Client requests x11 forwarding for the server session.
clientXAuthEntry, err := x11.NewFakeXAuthEntry(x11.Display{})
require.NoError(t, err)
err = x11.RequestForwarding(se, clientXAuthEntry)
require.NoError(t, err)
// prepare to send virtual "keyboard input" into the shell:
keyboard, err := se.StdinPipe()
require.NoError(t, err)
// start interactive SSH session with x11 forwarding enabled (new shell):
err = se.Shell()
require.NoError(t, err)
// create a temp file to collect the shell output into:
tmpFile, err := os.CreateTemp(os.TempDir(), "teleport-x11-forward-test")
require.NoError(t, err)
t.Cleanup(func() {
os.Remove(tmpFile.Name())
})
// type 'printenv DISPLAY > /path/to/tmp/file' into the session (dumping the value of DISPLAY into the temp file)
_, err = keyboard.Write([]byte(fmt.Sprintf("printenv %v >> %s\n\r", x11.DisplayEnv, tmpFile.Name())))
require.NoError(t, err)
// wait for the output
var display string
require.Eventually(t, func() bool {
output, err := os.ReadFile(tmpFile.Name())
if err == nil && len(output) != 0 {
display = strings.TrimSpace(string(output))
return true
}
return false
}, 5*time.Second, 10*time.Millisecond, "failed to read display")
// Make a new connection to the XServer proxy, the client
// XServer should echo back anything written on it.
serverDisplay, err := x11.ParseDisplay(display)
require.NoError(t, err)
return serverDisplay
}
// x11EchoRequest sends a message to the serverDisplay and expects the
// server to echo the message back to it.
func x11EchoRequest(t *testing.T, serverDisplay x11.Display) {
conn, err := serverDisplay.Dial()
require.NoError(t, err)
defer conn.Close()
msg := "msg"
_, err = conn.Write([]byte(msg))
require.NoError(t, err)
err = conn.CloseWrite()
require.NoError(t, err)
echo, err := io.ReadAll(conn)
require.NoError(t, err)
require.Equal(t, msg, string(echo))
}
func TestAllowedUsers(t *testing.T) {
t.Parallel()
f := newFixture(t)
up, err := newUpack(f.testSrv, f.user, []string{f.user}, wildcardAllow)
require.NoError(t, err)
sshConfig := &ssh.ClientConfig{
User: f.user,
Auth: []ssh.AuthMethod{ssh.PublicKeys(up.certSigner)},
HostKeyCallback: ssh.FixedHostKey(f.signer.PublicKey()),
}
client, err := ssh.Dial("tcp", f.ssh.srv.Addr(), sshConfig)
require.NoError(t, err)
require.NoError(t, client.Close())
// now remove OS user from valid principals
up, err = newUpack(f.testSrv, f.user, []string{"otheruser"}, wildcardAllow)
require.NoError(t, err)
sshConfig = &ssh.ClientConfig{
User: f.user,
Auth: []ssh.AuthMethod{ssh.PublicKeys(up.certSigner)},
HostKeyCallback: ssh.FixedHostKey(f.signer.PublicKey()),
}
_, err = ssh.Dial("tcp", f.ssh.srv.Addr(), sshConfig)
require.Error(t, err)
}
func TestAllowedLabels(t *testing.T) {
t.Parallel()
f := newFixture(t)
tests := []struct {
desc string
inLabelMap types.Labels
outError bool
}{
// Valid static label.
{
desc: "Valid Static",
inLabelMap: types.Labels{"foo": []string{"bar"}},
outError: false,
},
// Invalid static label.
{
desc: "Invalid Static",
inLabelMap: types.Labels{"foo": []string{"baz"}},
outError: true,
},
// Valid dynamic label.
{
desc: "Valid Dynamic",
inLabelMap: types.Labels{"baz": []string{"4"}},
outError: false,
},
// Invalid dynamic label.
{
desc: "Invalid Dynamic",
inLabelMap: types.Labels{"baz": []string{"5"}},
outError: true,
},
}
for _, tt := range tests {
t.Run(tt.desc, func(t *testing.T) {
up, err := newUpack(f.testSrv, f.user, []string{f.user}, tt.inLabelMap)
require.NoError(t, err)
sshConfig := &ssh.ClientConfig{
User: f.user,
Auth: []ssh.AuthMethod{ssh.PublicKeys(up.certSigner)},
HostKeyCallback: ssh.FixedHostKey(f.signer.PublicKey()),
}
_, err = ssh.Dial("tcp", f.ssh.srv.Addr(), sshConfig)
if tt.outError {
require.Error(t, err)
} else {
require.NoError(t, err)
}
})
}
}
// TestKeyAlgorithms makes sure Teleport does not accept invalid user
// certificates. The main check is the certificate algorithms.
func TestKeyAlgorithms(t *testing.T) {
t.Parallel()
f := newFixture(t)
_, ellipticSigner, err := utils.CreateEllipticCertificate("foo", ssh.UserCert)
require.NoError(t, err)
sshConfig := &ssh.ClientConfig{
User: f.user,
Auth: []ssh.AuthMethod{ssh.PublicKeys(ellipticSigner)},
HostKeyCallback: ssh.FixedHostKey(f.signer.PublicKey()),
}
_, err = ssh.Dial("tcp", f.ssh.srv.Addr(), sshConfig)
require.Error(t, err)
}
func TestInvalidSessionID(t *testing.T) {
t.Parallel()
f := newFixture(t)
session, err := f.ssh.clt.NewSession()
require.NoError(t, err)
err = session.Setenv(sshutils.SessionEnvVar, "foo")
require.NoError(t, err)
err = session.Shell()
require.Error(t, err)
}
func TestSessionHijack(t *testing.T) {
t.Parallel()
_, err := user.Lookup(teleportTestUser)
if err != nil {
t.Skip(fmt.Sprintf("user %v is not found, skipping test", teleportTestUser))
}
f := newFixture(t)
// user 1 has access to the server
up, err := newUpack(f.testSrv, f.user, []string{f.user}, wildcardAllow)
require.NoError(t, err)
// login with first user
sshConfig := &ssh.ClientConfig{
User: f.user,
Auth: []ssh.AuthMethod{ssh.PublicKeys(up.certSigner)},
HostKeyCallback: ssh.FixedHostKey(f.signer.PublicKey()),
}
client, err := ssh.Dial("tcp", f.ssh.srv.Addr(), sshConfig)
require.NoError(t, err)
defer func() {
err := client.Close()
require.NoError(t, err)
}()
se, err := client.NewSession()
require.NoError(t, err)
defer se.Close()
firstSessionID := string(sess.NewID())
err = se.Setenv(sshutils.SessionEnvVar, firstSessionID)
require.NoError(t, err)
err = se.Shell()
require.NoError(t, err)
// user 2 does not have s.user as a listed principal
up2, err := newUpack(f.testSrv, teleportTestUser, []string{teleportTestUser}, wildcardAllow)
require.NoError(t, err)
sshConfig2 := &ssh.ClientConfig{
User: teleportTestUser,
Auth: []ssh.AuthMethod{ssh.PublicKeys(up2.certSigner)},
HostKeyCallback: ssh.FixedHostKey(f.signer.PublicKey()),
}
client2, err := ssh.Dial("tcp", f.ssh.srv.Addr(), sshConfig2)
require.NoError(t, err)
defer func() {
err := client2.Close()
require.NoError(t, err)
}()
se2, err := client2.NewSession()
require.NoError(t, err)
defer se2.Close()
err = se2.Setenv(sshutils.SessionEnvVar, firstSessionID)
require.NoError(t, err)
// attempt to hijack, should return error
err = se2.Shell()
require.Error(t, err)
}
// testClient dials targetAddr via proxyAddr and executes 2+3 command
func testClient(t *testing.T, f *sshTestFixture, proxyAddr, targetAddr, remoteAddr string, sshConfig *ssh.ClientConfig) {
// Connect to node using registered address
client, err := ssh.Dial("tcp", proxyAddr, sshConfig)
require.NoError(t, err)
defer client.Close()
se, err := client.NewSession()
require.NoError(t, err)
defer se.Close()
writer, err := se.StdinPipe()
require.NoError(t, err)
reader, err := se.StdoutPipe()
require.NoError(t, err)
// Request opening TCP connection to the remote host
require.NoError(t, se.RequestSubsystem(fmt.Sprintf("proxy:%v", targetAddr)))
local, err := utils.ParseAddr("tcp://" + proxyAddr)
require.NoError(t, err)
remote, err := utils.ParseAddr("tcp://" + remoteAddr)
require.NoError(t, err)
pipeNetConn := utils.NewPipeNetConn(
reader,
writer,
se,
local,
remote,
)
defer pipeNetConn.Close()
// Open SSH connection via TCP
conn, chans, reqs, err := ssh.NewClientConn(pipeNetConn,
f.ssh.srv.Addr(), sshConfig)
require.NoError(t, err)
defer conn.Close()
// using this connection as regular SSH
client2 := ssh.NewClient(conn, chans, reqs)
require.NoError(t, err)
defer client2.Close()
se2, err := client2.NewSession()
require.NoError(t, err)
defer se2.Close()
out, err := se2.Output("echo hello")
require.NoError(t, err)
require.Equal(t, "hello\n", string(out))
}
func mustListen(t *testing.T) (net.Listener, utils.NetAddr) {
l, err := net.Listen("tcp", "127.0.0.1:0")
require.NoError(t, err)
addr := utils.NetAddr{AddrNetwork: "tcp", Addr: l.Addr().String()}
return l, addr
}
func noCache(clt auth.ClientI, cacheName []string) (auth.RemoteProxyAccessPoint, error) {
return clt, nil
}
func TestProxyRoundRobin(t *testing.T) {
t.Parallel()
log.Infof("[TEST START] TestProxyRoundRobin")
f := newFixture(t)
ctx := context.Background()
proxyClient, _ := newProxyClient(t, f.testSrv)
nodeClient, _ := newNodeClient(t, f.testSrv)
logger := logrus.WithField("test", "TestProxyRoundRobin")
listener, reverseTunnelAddress := mustListen(t)
defer listener.Close()
lockWatcher := newLockWatcher(ctx, t, proxyClient)
reverseTunnelServer, err := reversetunnel.NewServer(reversetunnel.Config{
ClusterName: f.testSrv.ClusterName(),
ClientTLS: proxyClient.TLSConfig(),
ID: hostID,
Listener: listener,
HostSigners: []ssh.Signer{f.signer},
LocalAuthClient: proxyClient,
LocalAccessPoint: proxyClient,
NewCachingAccessPoint: noCache,
NewCachingAccessPointOldProxy: noCache,
DirectClusters: []reversetunnel.DirectCluster{{Name: f.testSrv.ClusterName(), Client: proxyClient}},
DataDir: t.TempDir(),
Emitter: proxyClient,
Log: logger,
LockWatcher: lockWatcher,
})
require.NoError(t, err)
logger.WithField("tun-addr", reverseTunnelAddress.String()).Info("Created reverse tunnel server.")
require.NoError(t, reverseTunnelServer.Start())
defer reverseTunnelServer.Close()
proxy, err := New(
utils.NetAddr{AddrNetwork: "tcp", Addr: "localhost:0"},
f.testSrv.ClusterName(),
[]ssh.Signer{f.signer},
proxyClient,
t.TempDir(),
"",
utils.NetAddr{},
nil,
SetProxyMode(reverseTunnelServer, proxyClient),
SetSessionServer(proxyClient),
SetEmitter(nodeClient),
SetNamespace(apidefaults.Namespace),
SetPAMConfig(&pam.Config{Enabled: false}),
SetBPF(&bpf.NOP{}),
SetRestrictedSessionManager(&restricted.NOP{}),
SetClock(f.clock),
SetLockWatcher(lockWatcher),
)
require.NoError(t, err)
require.NoError(t, proxy.Start())
defer proxy.Close()
// set up SSH client using the user private key for signing
up, err := newUpack(f.testSrv, f.user, []string{f.user}, wildcardAllow)
require.NoError(t, err)
// start agent and load balance requests
eventsC := make(chan string, 2)
rsAgent, err := reversetunnel.NewAgent(reversetunnel.AgentConfig{
Context: ctx,
Addr: reverseTunnelAddress,
ClusterName: "remote",
Username: fmt.Sprintf("%v.%v", hostID, f.testSrv.ClusterName()),
Signer: f.signer,
Client: proxyClient,
AccessPoint: proxyClient,
EventsC: eventsC,
Log: logger,
})
require.NoError(t, err)
rsAgent.Start()
rsAgent2, err := reversetunnel.NewAgent(reversetunnel.AgentConfig{
Context: ctx,
Addr: reverseTunnelAddress,
ClusterName: "remote",
Username: fmt.Sprintf("%v.%v", hostID, f.testSrv.ClusterName()),
Signer: f.signer,
Client: proxyClient,
AccessPoint: proxyClient,
EventsC: eventsC,
Log: logger,
})
require.NoError(t, err)
rsAgent2.Start()
defer rsAgent2.Close()
timeout := time.After(time.Second)
for i := 0; i < 2; i++ {
select {
case event := <-eventsC:
require.Equal(t, reversetunnel.ConnectedEvent, event)
case <-timeout:
require.FailNow(t, "timeout waiting for clusters to connect")
}
}
sshConfig := &ssh.ClientConfig{
User: f.user,
Auth: []ssh.AuthMethod{ssh.PublicKeys(up.certSigner)},
HostKeyCallback: ssh.FixedHostKey(f.signer.PublicKey()),
}
_, err = newUpack(f.testSrv, "user1", []string{f.user}, wildcardAllow)
require.NoError(t, err)
for i := 0; i < 3; i++ {
testClient(t, f, proxy.Addr(), f.ssh.srvAddress, f.ssh.srv.Addr(), sshConfig)
}
// close first connection, and test it again
rsAgent.Close()
for i := 0; i < 3; i++ {
testClient(t, f, proxy.Addr(), f.ssh.srvAddress, f.ssh.srv.Addr(), sshConfig)
}
}
// TestProxyDirectAccess tests direct access via proxy bypassing
// reverse tunnel
func TestProxyDirectAccess(t *testing.T) {
t.Parallel()
f := newFixture(t)
ctx := context.Background()
listener, _ := mustListen(t)
logger := logrus.WithField("test", "TestProxyDirectAccess")
proxyClient, _ := newProxyClient(t, f.testSrv)
lockWatcher := newLockWatcher(ctx, t, proxyClient)
reverseTunnelServer, err := reversetunnel.NewServer(reversetunnel.Config{
ClientTLS: proxyClient.TLSConfig(),
ID: hostID,
ClusterName: f.testSrv.ClusterName(),
Listener: listener,
HostSigners: []ssh.Signer{f.signer},
LocalAuthClient: proxyClient,
LocalAccessPoint: proxyClient,
NewCachingAccessPoint: noCache,
NewCachingAccessPointOldProxy: noCache,
DirectClusters: []reversetunnel.DirectCluster{{Name: f.testSrv.ClusterName(), Client: proxyClient}},
DataDir: t.TempDir(),
Emitter: proxyClient,
Log: logger,
LockWatcher: lockWatcher,
})
require.NoError(t, err)
require.NoError(t, reverseTunnelServer.Start())
defer reverseTunnelServer.Close()
nodeClient, _ := newNodeClient(t, f.testSrv)
proxy, err := New(
utils.NetAddr{AddrNetwork: "tcp", Addr: "localhost:0"},
f.testSrv.ClusterName(),
[]ssh.Signer{f.signer},
proxyClient,
t.TempDir(),
"",
utils.NetAddr{},
nil,
SetProxyMode(reverseTunnelServer, proxyClient),
SetSessionServer(proxyClient),
SetEmitter(nodeClient),
SetNamespace(apidefaults.Namespace),
SetPAMConfig(&pam.Config{Enabled: false}),
SetBPF(&bpf.NOP{}),
SetRestrictedSessionManager(&restricted.NOP{}),
SetClock(f.clock),
SetLockWatcher(lockWatcher),
)
require.NoError(t, err)
require.NoError(t, proxy.Start())
defer proxy.Close()
// set up SSH client using the user private key for signing
up, err := newUpack(f.testSrv, f.user, []string{f.user}, wildcardAllow)
require.NoError(t, err)
sshConfig := &ssh.ClientConfig{
User: f.user,
Auth: []ssh.AuthMethod{ssh.PublicKeys(up.certSigner)},
HostKeyCallback: ssh.FixedHostKey(f.signer.PublicKey()),
}
_, err = newUpack(f.testSrv, "user1", []string{f.user}, wildcardAllow)
require.NoError(t, err)
testClient(t, f, proxy.Addr(), f.ssh.srvAddress, f.ssh.srv.Addr(), sshConfig)
}
// TestPTY requests PTY for an interactive session
func TestPTY(t *testing.T) {
t.Parallel()
f := newFixture(t)
se, err := f.ssh.clt.NewSession()
require.NoError(t, err)
defer se.Close()
// request PTY with valid size
require.NoError(t, se.RequestPty("xterm", 30, 30, ssh.TerminalModes{}))
// request PTY with invalid size, should still work (selects defaults)
require.NoError(t, se.RequestPty("xterm", 0, 0, ssh.TerminalModes{}))
}
// TestEnv requests setting environment variables. (We are currently ignoring these requests)
func TestEnv(t *testing.T) {
t.Parallel()
f := newFixture(t)
se, err := f.ssh.clt.NewSession()
require.NoError(t, err)
defer se.Close()
require.NoError(t, se.Setenv("HOME", "/"))
}
// TestNoAuth tries to log in with no auth methods and should be rejected
func TestNoAuth(t *testing.T) {
t.Parallel()
f := newFixture(t)
_, err := ssh.Dial("tcp", f.ssh.srv.Addr(), &ssh.ClientConfig{})
require.Error(t, err)
}
// TestPasswordAuth tries to log in with empty pass and should be rejected
func TestPasswordAuth(t *testing.T) {
t.Parallel()
f := newFixture(t)
config := &ssh.ClientConfig{
Auth: []ssh.AuthMethod{ssh.Password("")},
HostKeyCallback: ssh.FixedHostKey(f.signer.PublicKey()),
}
_, err := ssh.Dial("tcp", f.ssh.srv.Addr(), config)
require.Error(t, err)
}
func TestClientDisconnect(t *testing.T) {
t.Parallel()
f := newFixture(t)
config := &ssh.ClientConfig{
User: f.user,
Auth: []ssh.AuthMethod{ssh.PublicKeys(f.up.certSigner)},
HostKeyCallback: ssh.FixedHostKey(f.signer.PublicKey()),
}
clt, err := ssh.Dial("tcp", f.ssh.srv.Addr(), config)
require.NoError(t, err)
require.NotNil(t, clt)
se, err := f.ssh.clt.NewSession()
require.NoError(t, err)
require.NoError(t, se.Shell())
require.NoError(t, clt.Close())
}
func TestLimiter(t *testing.T) {
t.Parallel()
f := newFixture(t)
ctx := context.Background()
limiter, err := limiter.NewLimiter(
limiter.Config{
MaxConnections: 2,
Rates: []limiter.Rate{
{
Period: 10 * time.Second,
Average: 1,
Burst: 3,
},
{
Period: 40 * time.Millisecond,
Average: 10,
Burst: 30,
},
},
},
)
require.NoError(t, err)
nodeClient, _ := newNodeClient(t, f.testSrv)
nodeStateDir := t.TempDir()
srv, err := New(
utils.NetAddr{AddrNetwork: "tcp", Addr: "127.0.0.1:0"},
f.testSrv.ClusterName(),
[]ssh.Signer{f.signer},
nodeClient,
nodeStateDir,
"",
utils.NetAddr{},
nil,
SetLimiter(limiter),
SetShell("/bin/sh"),
SetSessionServer(nodeClient),
SetEmitter(nodeClient),
SetNamespace(apidefaults.Namespace),
SetPAMConfig(&pam.Config{Enabled: false}),
SetBPF(&bpf.NOP{}),
SetRestrictedSessionManager(&restricted.NOP{}),
SetClock(f.clock),
SetLockWatcher(newLockWatcher(ctx, t, nodeClient)),
)
require.NoError(t, err)
require.NoError(t, srv.Start())
require.NoError(t, auth.CreateUploaderDir(nodeStateDir))
defer srv.Close()
// maxConnection = 3
// current connections = 1 (one connection is opened from SetUpTest)
config := &ssh.ClientConfig{
User: f.user,
Auth: []ssh.AuthMethod{ssh.PublicKeys(f.up.certSigner)},
HostKeyCallback: ssh.FixedHostKey(f.signer.PublicKey()),
}
clt0, err := ssh.Dial("tcp", srv.Addr(), config)
require.NoError(t, err)
require.NotNil(t, clt0)
se0, err := clt0.NewSession()
require.NoError(t, err)
require.NoError(t, se0.Shell())
// current connections = 2
clt, err := ssh.Dial("tcp", srv.Addr(), config)
require.NotNil(t, clt)
require.NoError(t, err)
se, err := clt.NewSession()
require.NoError(t, err)
require.NoError(t, se.Shell())
// current connections = 3
_, err = ssh.Dial("tcp", srv.Addr(), config)
require.Error(t, err)
require.NoError(t, se.Close())
require.NoError(t, clt.Close())
time.Sleep(50 * time.Millisecond)
// current connections = 2
clt, err = ssh.Dial("tcp", srv.Addr(), config)
require.NotNil(t, clt)
require.NoError(t, err)
se, err = clt.NewSession()
require.NoError(t, err)
require.NoError(t, se.Shell())
// current connections = 3
_, err = ssh.Dial("tcp", srv.Addr(), config)
require.Error(t, err)
require.NoError(t, se.Close())
require.NoError(t, clt.Close())
time.Sleep(50 * time.Millisecond)
// current connections = 2
// requests rate should exceed now
clt, err = ssh.Dial("tcp", srv.Addr(), config)
require.NotNil(t, clt)
require.NoError(t, err)
_, err = clt.NewSession()
require.Error(t, err)
clt.Close()
}
// TestServerAliveInterval simulates ServerAliveInterval and OpenSSH
// interoperability by sending a [email protected] global request to the
// server and expecting a response in return.
func TestServerAliveInterval(t *testing.T) {
t.Parallel()
f := newFixture(t)
ok, _, err := f.ssh.clt.SendRequest(teleport.KeepAliveReqType, true, nil)
require.NoError(t, err)
require.True(t, ok)
}
// TestGlobalRequestRecordingProxy simulates sending a global out-of-band
// [email protected] request.
func TestGlobalRequestRecordingProxy(t *testing.T) {
t.Parallel()
ctx := context.Background()
f := newFixture(t)
// set cluster config to record at the node
recConfig, err := types.NewSessionRecordingConfigFromConfigFile(types.SessionRecordingConfigSpecV2{
Mode: types.RecordAtNode,
})
require.NoError(t, err)
err = f.testSrv.Auth().SetSessionRecordingConfig(ctx, recConfig)
require.NoError(t, err)
// send the request again, we have cluster config and when we parse the
// response, it should be false because recording is occurring at the node.
ok, responseBytes, err := f.ssh.clt.SendRequest(teleport.RecordingProxyReqType, true, nil)
require.NoError(t, err)
require.True(t, ok)
response, err := strconv.ParseBool(string(responseBytes))
require.NoError(t, err)
require.False(t, response)
// set cluster config to record at the proxy
recConfig, err = types.NewSessionRecordingConfigFromConfigFile(types.SessionRecordingConfigSpecV2{
Mode: types.RecordAtProxy,
})
require.NoError(t, err)
err = f.testSrv.Auth().SetSessionRecordingConfig(ctx, recConfig)
require.NoError(t, err)
// send request again, now that we have cluster config and it's set to record
// at the proxy, we should return true and when we parse the payload it should
// also be true
ok, responseBytes, err = f.ssh.clt.SendRequest(teleport.RecordingProxyReqType, true, nil)
require.NoError(t, err)
require.True(t, ok)
response, err = strconv.ParseBool(string(responseBytes))
require.NoError(t, err)
require.True(t, response)
}
// rawNode is a basic non-teleport node which holds a
// valid teleport cert and allows any client to connect.
// useful for simulating basic behaviors of openssh nodes.
type rawNode struct {
listener net.Listener
cfg ssh.ServerConfig
addr string
errCh chan error
}
// accept an incoming connection and perform a basic ssh handshake
func (r *rawNode) accept() (*ssh.ServerConn, <-chan ssh.NewChannel, <-chan *ssh.Request, error) {
netConn, err := r.listener.Accept()
if err != nil {
return nil, nil, nil, trace.Wrap(err)
}
srvConn, chs, reqs, err := ssh.NewServerConn(netConn, &r.cfg)
if err != nil {
netConn.Close()
return nil, nil, nil, trace.Wrap(err)
}
return srvConn, chs, reqs, nil
}
func (r *rawNode) Close() error {
return trace.Wrap(r.listener.Close())
}
// newRawNode constructs a new raw node instance.
func newRawNode(t *testing.T, authSrv *auth.Server) *rawNode {
hostname, err := os.Hostname()
require.NoError(t, err)
priv, pub, err := authSrv.GenerateKeyPair("")
require.NoError(t, err)
tlsPub, err := auth.PrivateKeyToPublicKeyTLS(priv)
require.NoError(t, err)
// Create host key and certificate for node.
certs, err := authSrv.GenerateHostCerts(context.Background(),
&proto.HostCertsRequest{
HostID: "raw-node",
NodeName: "raw-node",
Role: types.RoleNode,
AdditionalPrincipals: []string{hostname},
DNSNames: []string{hostname},
PublicSSHKey: pub,
PublicTLSKey: tlsPub,
})
require.NoError(t, err)
signer, err := sshutils.NewSigner(priv, certs.SSH)
require.NoError(t, err)
// configure a server which allows any client to connect
cfg := ssh.ServerConfig{
NoClientAuth: true,
PasswordCallback: func(conn ssh.ConnMetadata, password []byte) (*ssh.Permissions, error) {
return &ssh.Permissions{}, nil
},
PublicKeyCallback: func(conn ssh.ConnMetadata, key ssh.PublicKey) (*ssh.Permissions, error) {
return &ssh.Permissions{}, nil
},
}
cfg.AddHostKey(signer)
listener, err := net.Listen("tcp", ":0")
require.NoError(t, err)
_, port, err := net.SplitHostPort(listener.Addr().String())
require.NoError(t, err)
addr := net.JoinHostPort(hostname, port)
return &rawNode{
listener: listener,
cfg: cfg,
addr: addr,
errCh: make(chan error),
}
}
// startX11EchoServer starts a fake node which, for each incoming SSH connection, accepts an
// X11 forwarding request and then dials a single X11 channel which echoes all bytes written
// to it. Used to verify the behavior of X11 forwarding in recording proxies. Returns a
// node and an error channel that can be monitored for asynchronous failures.
func startX11EchoServer(ctx context.Context, t *testing.T, authSrv *auth.Server) (*rawNode, <-chan error) {
node := newRawNode(t, authSrv)
sessionMain := func(ctx context.Context, conn *ssh.ServerConn, chs <-chan ssh.NewChannel) error {
defer conn.Close()
// expect client to open a session channel
var nch ssh.NewChannel
select {
case nch = <-chs:
case <-time.After(time.Second * 3):
return trace.LimitExceeded("Timeout waiting for session channel")
case <-ctx.Done():
return nil
}
if nch.ChannelType() != teleport.ChanSession {
return trace.BadParameter("Unexpected channel type: %q", nch.ChannelType())
}
sch, creqs, err := nch.Accept()
if err != nil {
return trace.Wrap(err)
}
defer sch.Close()
// expect client to send an X11 forwarding request
var req *ssh.Request
select {
case req = <-creqs:
case <-time.After(time.Second * 3):
return trace.LimitExceeded("Timeout waiting for X11 forwarding request")
case <-ctx.Done():
return nil
}
if req.Type != sshutils.X11ForwardRequest {
return trace.BadParameter("Unexpected request type %q", req.Type)
}
if err = req.Reply(true, nil); err != nil {
return trace.Wrap(err)
}
// start a fake X11 channel
xch, _, err := conn.OpenChannel(sshutils.X11ChannelRequest, nil)
if err != nil {
return trace.Wrap(err)
}
defer xch.Close()
// echo all bytes back across the X11 channel
_, err = io.Copy(xch, xch)
if err == nil {
xch.CloseWrite()
} else {
log.Errorf("X11 channel error: %v", err)
}
return nil
}
errorCh := make(chan error, 1)
nodeMain := func() {
for {
conn, chs, _, err := node.accept()
if err != nil {
log.Warnf("X11 echo server closing: %v", err)
return
}
go func() {
if err := sessionMain(ctx, conn, chs); err != nil {
errorCh <- err
}
}()
}
}
go nodeMain()
return node, errorCh
}
// startGatheringErrors starts a goroutine that pulls error values from a
// channel and aggregates them. Returns a channel where the goroutine will post
// the aggregated errors when the routine is stopped.
func startGatheringErrors(ctx context.Context, errCh <-chan error) <-chan []error {
doneGathering := make(chan []error)
go func() {
errors := []error{}
for {
select {
case err := <-errCh:
errors = append(errors, err)
case <-ctx.Done():
doneGathering <- errors
return
}
}
}()
return doneGathering
}
// requireNoErrors waits for any aggregated errors to appear on the supplied channel
// and asserts that the aggregation is empty
func requireNoErrors(t *testing.T, errsCh <-chan []error) {
timeoutCtx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
defer cancel()
select {
case errs := <-errsCh:
require.Empty(t, errs)
case <-timeoutCtx.Done():
require.Fail(t, "Timed out waiting for errors")
}
}
// TestX11ProxySupport verifies that recording proxies correctly forward
// X11 request/channels.
func TestX11ProxySupport(t *testing.T) {
t.Parallel()
f := newFixture(t)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// set cluster config to record at the proxy
recConfig, err := types.NewSessionRecordingConfigFromConfigFile(types.SessionRecordingConfigSpecV2{
Mode: types.RecordAtProxy,
})
require.NoError(t, err)
err = f.testSrv.Auth().SetSessionRecordingConfig(ctx, recConfig)
require.NoError(t, err)
// verify that the proxy is in recording mode
ok, responseBytes, err := f.ssh.clt.SendRequest(teleport.RecordingProxyReqType, true, nil)
require.NoError(t, err)
require.True(t, ok)
response, err := strconv.ParseBool(string(responseBytes))
require.NoError(t, err)
require.True(t, response)
// setup our fake X11 echo server.
x11Ctx, x11Cancel := context.WithCancel(ctx)
node, errCh := startX11EchoServer(x11Ctx, t, f.testSrv.Auth())
// start gathering errors from the X11 server
doneGathering := startGatheringErrors(x11Ctx, errCh)
defer requireNoErrors(t, doneGathering)
// The error gathering routine needs this context to expire or it will wait
// forever on the X11 server to exit. Hence we defer a call to the x11cancel
// here rather than directly below the context creation
defer x11Cancel()
// Create a direct TCP/IP connection from proxy to our X11 test server.
netConn, err := f.ssh.clt.Dial("tcp", node.addr)
require.NoError(t, err)
defer netConn.Close()
// make an insecure version of our client config (this test is only about X11 forwarding,
// so we don't bother to verify recording proxy key generation here).
cltConfig := *f.ssh.cltConfig
cltConfig.HostKeyCallback = ssh.InsecureIgnoreHostKey()
// Perform ssh handshake and setup client for X11 test server.
cltConn, chs, reqs, err := ssh.NewClientConn(netConn, node.addr, &cltConfig)
require.NoError(t, err)
clt := ssh.NewClient(cltConn, chs, reqs)
sess, err := clt.NewSession()
require.NoError(t, err)
// register X11 channel handler before requesting forwarding to avoid races
xchs := clt.HandleChannelOpen(sshutils.X11ChannelRequest)
require.NotNil(t, xchs)
// Send an X11 forwarding request to the server
ok, err = sess.SendRequest(sshutils.X11ForwardRequest, true, nil)
require.NoError(t, err)
require.True(t, ok)
// wait for server to start an X11 channel
var xnc ssh.NewChannel
select {
case xnc = <-xchs:
case <-time.After(time.Second * 3):
require.Fail(t, "Timeout waiting for X11 channel open from %v", node.addr)
}
require.NotNil(t, xnc)
require.Equal(t, sshutils.X11ChannelRequest, xnc.ChannelType())
xch, _, err := xnc.Accept()
require.NoError(t, err)
defer xch.Close()
// write some data to the channel
msg := []byte("testing!")
_, err = xch.Write(msg)
require.NoError(t, err)
// send EOF
require.NoError(t, xch.CloseWrite())
// expect node to successfully echo the data
rsp := make([]byte, len(msg))
_, err = io.ReadFull(xch, rsp)
require.NoError(t, err)
require.Equal(t, string(msg), string(rsp))
}
// upack holds all ssh signing artefacts needed for signing and checking user keys
type upack struct {
// key is a raw private user key
key []byte
// pkey is parsed private SSH key
pkey interface{}
// pub is a public user key
pub []byte
// cert is a certificate signed by user CA
cert []byte
// pcert is a parsed ssh Certificae
pcert *ssh.Certificate
// signer is a signer that answers signing challenges using private key
signer ssh.Signer
// certSigner is a signer that answers signing challenges using private
// key and a certificate issued by user certificate authority
certSigner ssh.Signer
}
func newUpack(testSvr *auth.TestServer, username string, allowedLogins []string, allowedLabels types.Labels) (*upack, error) {
ctx := context.Background()
auth := testSvr.Auth()
upriv, upub, err := auth.GenerateKeyPair("")
if err != nil {
return nil, trace.Wrap(err)
}
user, err := types.NewUser(username)
if err != nil {
return nil, trace.Wrap(err)
}
role := services.RoleForUser(user)
rules := role.GetRules(types.Allow)
rules = append(rules, types.NewRule(types.Wildcard, services.RW()))
role.SetRules(types.Allow, rules)
opts := role.GetOptions()
opts.PermitX11Forwarding = types.NewBool(true)
role.SetOptions(opts)
role.SetLogins(types.Allow, allowedLogins)
role.SetNodeLabels(types.Allow, allowedLabels)
err = auth.UpsertRole(ctx, role)
if err != nil {
return nil, trace.Wrap(err)
}
user.AddRole(role.GetName())
err = auth.UpsertUser(user)
if err != nil {
return nil, trace.Wrap(err)
}
ucert, err := testSvr.AuthServer.GenerateUserCert(upub, user.GetName(), 5*time.Minute, constants.CertificateFormatStandard)
if err != nil {
return nil, trace.Wrap(err)
}
upkey, err := ssh.ParseRawPrivateKey(upriv)
if err != nil {
return nil, trace.Wrap(err)
}
usigner, err := ssh.NewSignerFromKey(upkey)
if err != nil {
return nil, trace.Wrap(err)
}
pcert, _, _, _, err := ssh.ParseAuthorizedKey(ucert)
if err != nil {
return nil, trace.Wrap(err)
}
ucertSigner, err := ssh.NewCertSigner(pcert.(*ssh.Certificate), usigner)
if err != nil {
return nil, trace.Wrap(err)
}
return &upack{
key: upriv,
pkey: upkey,
pub: upub,
cert: ucert,
pcert: pcert.(*ssh.Certificate),
signer: usigner,
certSigner: ucertSigner,
}, nil
}
func newLockWatcher(ctx context.Context, t *testing.T, client types.Events) *services.LockWatcher {
lockWatcher, err := services.NewLockWatcher(ctx, services.LockWatcherConfig{
ResourceWatcherConfig: services.ResourceWatcherConfig{
Component: "test",
Client: client,
},
})
require.NoError(t, err)
t.Cleanup(lockWatcher.Close)
return lockWatcher
}
// maxPipeSize is one larger than the maximum pipe size for most operating
// systems which appears to be 65536 bytes.
//
// The maximum pipe size for Linux could potentially be obtained, however
// getting it for macOS is much harder, and unclear if even possible. Therefor
// just hard code it.
//
// See the following links for more details.
//
// https://man7.org/linux/man-pages/man7/pipe.7.html
// https://github.com/afborchert/pipebuf
// https://unix.stackexchange.com/questions/11946/how-big-is-the-pipe-buffer
const maxPipeSize = 65536 + 1
| [
"\"TELEPORT_XAUTH_TEST\""
]
| []
| [
"TELEPORT_XAUTH_TEST"
]
| [] | ["TELEPORT_XAUTH_TEST"] | go | 1 | 0 | |
repo/fsrepo/migrations/migrations.go | package mfsr
import (
"bufio"
"bytes"
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
"os/exec"
"path/filepath"
"runtime"
"strconv"
"strings"
)
var DistPath = "https://ipfs.io/ipfs/QmYRLRDKobvg1AXTGeK5Xk6ntWTsjGiHbyNKhWfz7koGpa"
func init() {
if dist := os.Getenv("IPFS_DIST_PATH"); dist != "" {
DistPath = dist
}
}
const migrations = "fs-repo-migrations"
func migrationsBinName() string {
switch runtime.GOOS {
case "windows":
return migrations + ".exe"
default:
return migrations
}
}
func RunMigration(newv int) error {
migrateBin := migrationsBinName()
fmt.Println(" => Looking for suitable fs-repo-migrations binary.")
var err error
migrateBin, err = exec.LookPath(migrateBin)
if err == nil {
// check to make sure migrations binary supports our target version
err = verifyMigrationSupportsVersion(migrateBin, newv)
}
if err != nil {
fmt.Println(" => None found, downloading.")
loc, err := GetMigrations()
if err != nil {
fmt.Println(" => Failed to download fs-repo-migrations.")
return err
}
err = verifyMigrationSupportsVersion(loc, newv)
if err != nil {
return fmt.Errorf("no fs-repo-migration binary found for version %d: %s", newv, err)
}
migrateBin = loc
}
cmd := exec.Command(migrateBin, "-to", fmt.Sprint(newv), "-y")
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
fmt.Printf(" => Running: %s -to %d -y\n", migrateBin, newv)
err = cmd.Run()
if err != nil {
fmt.Printf(" => Failed: %s -to %d -y\n", migrateBin, newv)
return fmt.Errorf("migration failed: %s", err)
}
fmt.Printf(" => Success: fs-repo has been migrated to version %d.\n", newv)
return nil
}
func GetMigrations() (string, error) {
latest, err := GetLatestVersion(DistPath, migrations)
if err != nil {
return "", fmt.Errorf("failed to find latest fs-repo-migrations: %s", err)
}
dir, err := ioutil.TempDir("", "go-ipfs-migrate")
if err != nil {
return "", fmt.Errorf("failed to create fs-repo-migrations tempdir: %s", err)
}
out := filepath.Join(dir, migrationsBinName())
err = GetBinaryForVersion(migrations, migrations, DistPath, latest, out)
if err != nil {
return "", fmt.Errorf("failed to download latest fs-repo-migrations: %s", err)
}
err = os.Chmod(out, 0755)
if err != nil {
return "", err
}
return out, nil
}
func verifyMigrationSupportsVersion(fsrbin string, vn int) error {
sn, err := migrationsVersion(fsrbin)
if err != nil {
return err
}
if sn >= vn {
return nil
}
return fmt.Errorf("migrations binary doesn't support version %d: %s", vn, fsrbin)
}
func migrationsVersion(bin string) (int, error) {
out, err := exec.Command(bin, "-v").CombinedOutput()
if err != nil {
return 0, fmt.Errorf("failed to check migrations version: %s", err)
}
vs := strings.Trim(string(out), " \n\t")
vn, err := strconv.Atoi(vs)
if err != nil {
return 0, fmt.Errorf("migrations binary version check did not return a number: %s", err)
}
return vn, nil
}
func GetVersions(ipfspath, dist string) ([]string, error) {
rc, err := httpFetch(ipfspath + "/" + dist + "/versions")
if err != nil {
return nil, err
}
defer rc.Close()
var out []string
scan := bufio.NewScanner(rc)
for scan.Scan() {
out = append(out, scan.Text())
}
return out, nil
}
func GetLatestVersion(ipfspath, dist string) (string, error) {
vs, err := GetVersions(ipfspath, dist)
if err != nil {
return "", err
}
var latest string
for i := len(vs) - 1; i >= 0; i-- {
if !strings.Contains(vs[i], "-dev") {
latest = vs[i]
break
}
}
if latest == "" {
return "", fmt.Errorf("couldn't find a non dev version in the list")
}
return vs[len(vs)-1], nil
}
func httpGet(url string) (*http.Response, error) {
req, err := http.NewRequest(http.MethodGet, url, nil)
if err != nil {
return nil, fmt.Errorf("http.NewRequest error: %s", err)
}
req.Header.Set("User-Agent", "go-ipfs")
resp, err := http.DefaultClient.Do(req)
if err != nil {
return nil, fmt.Errorf("http.DefaultClient.Do error: %s", err)
}
return resp, nil
}
func httpFetch(url string) (io.ReadCloser, error) {
resp, err := httpGet(url)
if err != nil {
return nil, err
}
if resp.StatusCode >= 400 {
mes, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("error reading error body: %s", err)
}
return nil, fmt.Errorf("GET %s error: %s: %s", url, resp.Status, string(mes))
}
return resp.Body, nil
}
func GetBinaryForVersion(distname, binnom, root, vers, out string) error {
dir, err := ioutil.TempDir("", "go-ipfs-auto-migrate")
if err != nil {
return err
}
var archive string
switch runtime.GOOS {
case "windows":
archive = "zip"
binnom += ".exe"
default:
archive = "tar.gz"
}
osv, err := osWithVariant()
if err != nil {
return err
}
if osv == "linux-musl" {
return fmt.Errorf("linux-musl not supported, you must build the binary from source for your platform")
}
finame := fmt.Sprintf("%s_%s_%s-%s.%s", distname, vers, osv, runtime.GOARCH, archive)
distpath := fmt.Sprintf("%s/%s/%s/%s", root, distname, vers, finame)
data, err := httpFetch(distpath)
if err != nil {
return err
}
arcpath := filepath.Join(dir, finame)
fi, err := os.Create(arcpath)
if err != nil {
return err
}
_, err = io.Copy(fi, data)
if err != nil {
return err
}
fi.Close()
return unpackArchive(distname, binnom, arcpath, out, archive)
}
// osWithVariant returns the OS name with optional variant.
// Currently returns either runtime.GOOS, or "linux-musl".
func osWithVariant() (string, error) {
if runtime.GOOS != "linux" {
return runtime.GOOS, nil
}
// ldd outputs the system's kind of libc.
// - on standard ubuntu: ldd (Ubuntu GLIBC 2.23-0ubuntu5) 2.23
// - on alpine: musl libc (x86_64)
//
// we use the combined stdout+stderr,
// because ldd --version prints differently on different OSes.
// - on standard ubuntu: stdout
// - on alpine: stderr (it probably doesn't know the --version flag)
//
// we suppress non-zero exit codes (see last point about alpine).
out, err := exec.Command("sh", "-c", "ldd --version || true").CombinedOutput()
if err != nil {
return "", err
}
// now just see if we can find "musl" somewhere in the output
scan := bufio.NewScanner(bytes.NewBuffer(out))
for scan.Scan() {
if strings.Contains(scan.Text(), "musl") {
return "linux-musl", nil
}
}
return "linux", nil
}
| [
"\"IPFS_DIST_PATH\""
]
| []
| [
"IPFS_DIST_PATH"
]
| [] | ["IPFS_DIST_PATH"] | go | 1 | 0 | |
src/main/java/com/google/devtools/build/lib/runtime/BlazeRuntime.java | // Copyright 2014 The Bazel Authors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.devtools.build.lib.runtime;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import com.google.common.eventbus.SubscriberExceptionContext;
import com.google.common.eventbus.SubscriberExceptionHandler;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.Uninterruptibles;
import com.google.devtools.build.lib.actions.ActionKeyContext;
import com.google.devtools.build.lib.analysis.BlazeDirectories;
import com.google.devtools.build.lib.analysis.BlazeVersionInfo;
import com.google.devtools.build.lib.analysis.ConfiguredRuleClassProvider;
import com.google.devtools.build.lib.analysis.ServerDirectories;
import com.google.devtools.build.lib.analysis.config.BuildOptions;
import com.google.devtools.build.lib.analysis.config.ConfigurationFragmentFactory;
import com.google.devtools.build.lib.analysis.test.CoverageReportActionFactory;
import com.google.devtools.build.lib.buildeventstream.PathConverter;
import com.google.devtools.build.lib.buildtool.BuildRequestOptions;
import com.google.devtools.build.lib.clock.BlazeClock;
import com.google.devtools.build.lib.clock.Clock;
import com.google.devtools.build.lib.events.Event;
import com.google.devtools.build.lib.events.OutputFilter;
import com.google.devtools.build.lib.exec.BinTools;
import com.google.devtools.build.lib.packages.Package;
import com.google.devtools.build.lib.packages.PackageFactory;
import com.google.devtools.build.lib.packages.RuleClassProvider;
import com.google.devtools.build.lib.profiler.AutoProfiler;
import com.google.devtools.build.lib.profiler.MemoryProfiler;
import com.google.devtools.build.lib.profiler.ProfilePhase;
import com.google.devtools.build.lib.profiler.Profiler;
import com.google.devtools.build.lib.profiler.Profiler.ProfiledTaskKinds;
import com.google.devtools.build.lib.profiler.ProfilerTask;
import com.google.devtools.build.lib.query2.AbstractBlazeQueryEnvironment;
import com.google.devtools.build.lib.query2.QueryEnvironmentFactory;
import com.google.devtools.build.lib.query2.engine.QueryEnvironment.QueryFunction;
import com.google.devtools.build.lib.query2.output.OutputFormatter;
import com.google.devtools.build.lib.runtime.BlazeCommandDispatcher.LockingMode;
import com.google.devtools.build.lib.runtime.commands.InfoItem;
import com.google.devtools.build.lib.runtime.proto.InvocationPolicyOuterClass.InvocationPolicy;
import com.google.devtools.build.lib.server.CommandProtos.EnvironmentVariable;
import com.google.devtools.build.lib.server.CommandProtos.ExecRequest;
import com.google.devtools.build.lib.server.RPCServer;
import com.google.devtools.build.lib.server.signal.InterruptSignalHandler;
import com.google.devtools.build.lib.shell.JavaSubprocessFactory;
import com.google.devtools.build.lib.shell.SubprocessBuilder;
import com.google.devtools.build.lib.shell.SubprocessFactory;
import com.google.devtools.build.lib.unix.UnixFileSystem;
import com.google.devtools.build.lib.util.AbruptExitException;
import com.google.devtools.build.lib.util.CustomExitCodePublisher;
import com.google.devtools.build.lib.util.ExitCode;
import com.google.devtools.build.lib.util.LoggingUtil;
import com.google.devtools.build.lib.util.OS;
import com.google.devtools.build.lib.util.Pair;
import com.google.devtools.build.lib.util.ProcessUtils;
import com.google.devtools.build.lib.util.ThreadUtils;
import com.google.devtools.build.lib.util.io.OutErr;
import com.google.devtools.build.lib.vfs.FileSystem;
import com.google.devtools.build.lib.vfs.JavaIoFileSystem;
import com.google.devtools.build.lib.vfs.Path;
import com.google.devtools.build.lib.vfs.PathFragment;
import com.google.devtools.build.lib.windows.WindowsFileSystem;
import com.google.devtools.build.lib.windows.WindowsSubprocessFactory;
import com.google.devtools.common.options.CommandNameCache;
import com.google.devtools.common.options.InvocationPolicyParser;
import com.google.devtools.common.options.OptionDefinition;
import com.google.devtools.common.options.OptionPriority.PriorityCategory;
import com.google.devtools.common.options.OptionsBase;
import com.google.devtools.common.options.OptionsClassProvider;
import com.google.devtools.common.options.OptionsParser;
import com.google.devtools.common.options.OptionsParsingException;
import com.google.devtools.common.options.OptionsProvider;
import com.google.devtools.common.options.TriState;
import java.io.BufferedOutputStream;
import java.io.File;
import java.io.IOException;
import java.io.OutputStream;
import java.lang.reflect.Type;
import java.nio.charset.StandardCharsets;
import java.time.Duration;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Date;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Optional;
import java.util.UUID;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.function.Function;
import java.util.logging.Handler;
import java.util.logging.Level;
import java.util.logging.LogRecord;
import java.util.logging.Logger;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import javax.annotation.Nullable;
/**
* The BlazeRuntime class encapsulates the immutable configuration of the current instance. These
* runtime settings and services are available to most parts of any Blaze application for the
* duration of the batch run or server lifetime.
*
* <p>The parts specific to the current command are stored in {@link CommandEnvironment}.
*/
public final class BlazeRuntime {
private static final Pattern suppressFromLog =
Pattern.compile("--client_env=([^=]*(?:auth|pass|cookie)[^=]*)=", Pattern.CASE_INSENSITIVE);
private static final Logger logger = Logger.getLogger(BlazeRuntime.class.getName());
private final FileSystem fileSystem;
private final Iterable<BlazeModule> blazeModules;
private final Map<String, BlazeCommand> commandMap = new LinkedHashMap<>();
private final Clock clock;
private final Runnable abruptShutdownHandler;
private final PackageFactory packageFactory;
private final ImmutableList<ConfigurationFragmentFactory> configurationFragmentFactories;
private final ConfiguredRuleClassProvider ruleClassProvider;
// For bazel info.
private final ImmutableMap<String, InfoItem> infoItems;
// For bazel query.
private final QueryEnvironmentFactory queryEnvironmentFactory;
private final ImmutableList<QueryFunction> queryFunctions;
private final ImmutableList<OutputFormatter> queryOutputFormatters;
private final AtomicInteger storedExitCode = new AtomicInteger();
// We pass this through here to make it available to the MasterLogWriter.
private final OptionsProvider startupOptionsProvider;
private final ProjectFile.Provider projectFileProvider;
@Nullable private final InvocationPolicy moduleInvocationPolicy;
private final String defaultsPackageContent;
private final SubscriberExceptionHandler eventBusExceptionHandler;
private final String productName;
private final PathConverter pathToUriConverter;
private final ActionKeyContext actionKeyContext;
// Workspace state (currently exactly one workspace per server)
private BlazeWorkspace workspace;
private BlazeRuntime(
FileSystem fileSystem,
QueryEnvironmentFactory queryEnvironmentFactory,
ImmutableList<QueryFunction> queryFunctions,
ImmutableList<OutputFormatter> queryOutputFormatters,
PackageFactory pkgFactory,
ConfiguredRuleClassProvider ruleClassProvider,
ImmutableList<ConfigurationFragmentFactory> configurationFragmentFactories,
ImmutableMap<String, InfoItem> infoItems,
ActionKeyContext actionKeyContext,
Clock clock,
Runnable abruptShutdownHandler,
OptionsProvider startupOptionsProvider,
Iterable<BlazeModule> blazeModules,
SubscriberExceptionHandler eventBusExceptionHandler,
ProjectFile.Provider projectFileProvider,
InvocationPolicy moduleInvocationPolicy,
Iterable<BlazeCommand> commands,
String productName,
PathConverter pathToUriConverter) {
// Server state
this.fileSystem = fileSystem;
this.blazeModules = blazeModules;
overrideCommands(commands);
this.packageFactory = pkgFactory;
this.projectFileProvider = projectFileProvider;
this.moduleInvocationPolicy = moduleInvocationPolicy;
this.ruleClassProvider = ruleClassProvider;
this.configurationFragmentFactories = configurationFragmentFactories;
this.infoItems = infoItems;
this.actionKeyContext = actionKeyContext;
this.clock = clock;
this.abruptShutdownHandler = abruptShutdownHandler;
this.startupOptionsProvider = startupOptionsProvider;
this.queryEnvironmentFactory = queryEnvironmentFactory;
this.queryFunctions = queryFunctions;
this.queryOutputFormatters = queryOutputFormatters;
this.eventBusExceptionHandler = eventBusExceptionHandler;
this.defaultsPackageContent =
ruleClassProvider.getDefaultsPackageContent(getModuleInvocationPolicy());
CommandNameCache.CommandNameCacheInstance.INSTANCE.setCommandNameCache(
new CommandNameCacheImpl(getCommandMap()));
this.productName = productName;
this.pathToUriConverter = pathToUriConverter;
}
public BlazeWorkspace initWorkspace(BlazeDirectories directories, BinTools binTools)
throws AbruptExitException {
Preconditions.checkState(this.workspace == null);
WorkspaceBuilder builder = new WorkspaceBuilder(directories, binTools);
for (BlazeModule module : blazeModules) {
module.workspaceInit(this, directories, builder);
}
this.workspace =
builder.build(this, packageFactory, ruleClassProvider, eventBusExceptionHandler);
return workspace;
}
@Nullable public CoverageReportActionFactory getCoverageReportActionFactory(
OptionsClassProvider commandOptions) {
CoverageReportActionFactory firstFactory = null;
for (BlazeModule module : blazeModules) {
CoverageReportActionFactory factory = module.getCoverageReportFactory(commandOptions);
if (factory != null) {
Preconditions.checkState(firstFactory == null,
"only one Blaze Module can have a Coverage Report Factory");
firstFactory = factory;
}
}
return firstFactory;
}
/**
* Adds the given command under the given name to the map of commands.
*
* @throws AssertionError if the name is already used by another command.
*/
private void addCommand(BlazeCommand command) {
String name = command.getClass().getAnnotation(Command.class).name();
if (commandMap.containsKey(name)) {
throw new IllegalStateException("Command name or alias " + name + " is already used.");
}
commandMap.put(name, command);
}
final void overrideCommands(Iterable<BlazeCommand> commands) {
commandMap.clear();
for (BlazeCommand command : commands) {
addCommand(command);
}
}
@Nullable
public InvocationPolicy getModuleInvocationPolicy() {
return moduleInvocationPolicy;
}
/**
* Conditionally enable profiling.
*/
private final boolean initProfiler(CommandEnvironment env, CommonCommandOptions options,
UUID buildID, long execStartTimeNanos) {
OutputStream out = null;
boolean recordFullProfilerData = false;
ProfiledTaskKinds profiledTasks = ProfiledTaskKinds.NONE;
try {
if (options.profilePath != null) {
Path profilePath = env.getWorkspace().getRelative(options.profilePath);
recordFullProfilerData = options.recordFullProfilerData;
out = new BufferedOutputStream(profilePath.getOutputStream(), 1024 * 1024);
env.getReporter().handle(Event.info("Writing profile data to '" + profilePath + "'"));
profiledTasks = ProfiledTaskKinds.ALL;
} else if (options.alwaysProfileSlowOperations) {
recordFullProfilerData = false;
out = null;
profiledTasks = ProfiledTaskKinds.SLOWEST;
}
if (profiledTasks != ProfiledTaskKinds.NONE) {
Profiler.instance().start(profiledTasks, out,
getProductName() + " profile for " + env.getOutputBase() + " at " + new Date()
+ ", build ID: " + buildID,
recordFullProfilerData, clock, execStartTimeNanos);
return true;
}
} catch (IOException e) {
env.getReporter().handle(Event.error("Error while creating profile file: " + e.getMessage()));
}
return false;
}
public FileSystem getFileSystem() {
return fileSystem;
}
public BlazeWorkspace getWorkspace() {
return workspace;
}
public ActionKeyContext getActionKeyContext() {
return actionKeyContext;
}
/**
* The directory in which blaze stores the server state - that is, the socket
* file and a log.
*/
private Path getServerDirectory() {
return getWorkspace().getDirectories().getOutputBase().getChild("server");
}
/**
* Returns the {@link QueryEnvironmentFactory} that should be used to create a
* {@link AbstractBlazeQueryEnvironment}, whenever one is needed.
*/
public QueryEnvironmentFactory getQueryEnvironmentFactory() {
return queryEnvironmentFactory;
}
public ImmutableList<QueryFunction> getQueryFunctions() {
return queryFunctions;
}
public ImmutableList<OutputFormatter> getQueryOutputFormatters() {
return queryOutputFormatters;
}
/**
* Returns the package factory.
*/
public PackageFactory getPackageFactory() {
return packageFactory;
}
/**
* Returns the rule class provider.
*/
public ConfiguredRuleClassProvider getRuleClassProvider() {
return ruleClassProvider;
}
public ImmutableMap<String, InfoItem> getInfoItems() {
return infoItems;
}
public Iterable<BlazeModule> getBlazeModules() {
return blazeModules;
}
public BuildOptions getDefaultBuildOptions() {
BuildOptions options = null;
for (BlazeModule module : blazeModules) {
BuildOptions optionsFromModule = module.getDefaultBuildOptions(this);
if (optionsFromModule != null) {
if (options == null) {
options = optionsFromModule;
} else {
throw new IllegalArgumentException(
"Two or more blaze modules contained default build options.");
}
}
}
if (options == null) {
throw new IllegalArgumentException("No default build options specified in any Blaze module");
}
return options;
}
@SuppressWarnings("unchecked")
public <T extends BlazeModule> T getBlazeModule(Class<T> moduleClass) {
for (BlazeModule module : blazeModules) {
if (module.getClass() == moduleClass) {
return (T) module;
}
}
return null;
}
public ImmutableList<ConfigurationFragmentFactory> getConfigurationFragmentFactories() {
return configurationFragmentFactories;
}
/**
* Returns a provider for project file objects. Can be null if no such provider was set by any of
* the modules.
*/
@Nullable
public ProjectFile.Provider getProjectFileProvider() {
return projectFileProvider;
}
public Path getOutputBase() {
return getWorkspace().getDirectories().getOutputBase();
}
/**
* Hook method called by the BlazeCommandDispatcher prior to the dispatch of
* each command.
*
* @param options The CommonCommandOptions used by every command.
* @throws AbruptExitException if this command is unsuitable to be run as specified
*/
void beforeCommand(CommandEnvironment env, CommonCommandOptions options, long execStartTimeNanos)
throws AbruptExitException {
// Conditionally enable profiling
// We need to compensate for launchTimeNanos (measurements taken outside of the jvm).
long startupTimeNanos = options.startupTime * 1000000L;
if (initProfiler(env, options, env.getCommandId(), execStartTimeNanos - startupTimeNanos)) {
Profiler profiler = Profiler.instance();
// Instead of logEvent() we're calling the low level function to pass the timings we took in
// the launcher. We're setting the INIT phase marker so that it follows immediately the LAUNCH
// phase.
profiler.logSimpleTaskDuration(
execStartTimeNanos - startupTimeNanos,
Duration.ZERO,
ProfilerTask.PHASE,
ProfilePhase.LAUNCH.description);
profiler.logSimpleTaskDuration(
execStartTimeNanos, Duration.ZERO, ProfilerTask.PHASE, ProfilePhase.INIT.description);
}
if (options.memoryProfilePath != null) {
Path memoryProfilePath = env.getWorkingDirectory().getRelative(options.memoryProfilePath);
MemoryProfiler.instance()
.setStableMemoryParameters(options.memoryProfileStableHeapParameters);
try {
MemoryProfiler.instance().start(memoryProfilePath.getOutputStream());
} catch (IOException e) {
env.getReporter().handle(
Event.error("Error while creating memory profile file: " + e.getMessage()));
}
}
// Initialize exit code to dummy value for afterCommand.
storedExitCode.set(ExitCode.RESERVED.getNumericExitCode());
}
/**
* Posts the {@link CommandCompleteEvent}, so that listeners can tidy up. Called by {@link
* #afterCommand}, and by BugReport when crashing from an exception in an async thread.
*/
void notifyCommandComplete(int exitCode) {
if (!storedExitCode.compareAndSet(ExitCode.RESERVED.getNumericExitCode(), exitCode)) {
// This command has already been called, presumably because there is a race between the main
// thread and a worker thread that crashed. Don't try to arbitrate the dispute. If the main
// thread won the race (unlikely, but possible), this may be incorrectly logged as a success.
return;
}
workspace.getSkyframeExecutor().getEventBus().post(new CommandCompleteEvent(exitCode));
}
/** Hook method called by the BlazeCommandDispatcher after the dispatch of each command. */
@VisibleForTesting
public void afterCommand(CommandEnvironment env, int exitCode) {
// Remove any filters that the command might have added to the reporter.
env.getReporter().setOutputFilter(OutputFilter.OUTPUT_EVERYTHING);
notifyCommandComplete(exitCode);
for (BlazeModule module : blazeModules) {
module.afterCommand();
}
// If the command just completed was or inherits from Build, wipe the dependency graph if
// requested. This is sufficient, as this method is always run at the end of commands unless
// the server crashes, in which case no inmemory state will linger for the next build anyway.
BuildRequestOptions buildRequestOptions =
env.getOptions().getOptions(BuildRequestOptions.class);
if (buildRequestOptions != null && !buildRequestOptions.keepStateAfterBuild) {
workspace.getSkyframeExecutor().resetEvaluator();
}
env.getBlazeWorkspace().clearEventBus();
try {
Profiler.instance().stop();
MemoryProfiler.instance().stop();
} catch (IOException e) {
env.getReporter().handle(Event.error("Error while writing profile file: " + e.getMessage()));
}
env.getReporter().clearEventBus();
actionKeyContext.clear();
}
// Make sure we keep a strong reference to this logger, so that the
// configuration isn't lost when the gc kicks in.
private static Logger templateLogger = Logger.getLogger("com.google.devtools.build");
/**
* Configures "com.google.devtools.build.*" loggers to the given
* {@code level}. Note: This code relies on static state.
*/
public static void setupLogging(Level level) {
templateLogger.setLevel(level);
templateLogger.info("Log level: " + templateLogger.getLevel());
}
/**
* Returns the Clock-instance used for the entire build. Before,
* individual classes (such as Profiler) used to specify the type
* of clock (e.g. EpochClock) they wanted to use. This made it
* difficult to get Blaze working on Windows as some of the clocks
* available for Linux aren't (directly) available on Windows.
* Setting the Blaze-wide clock upon construction of BlazeRuntime
* allows injecting whatever Clock instance should be used from
* BlazeMain.
*
* @return The Blaze-wide clock
*/
public Clock getClock() {
return clock;
}
public OptionsProvider getStartupOptionsProvider() {
return startupOptionsProvider;
}
public Map<String, BlazeCommand> getCommandMap() {
return commandMap;
}
/** Invokes {@link BlazeModule#blazeShutdown()} on all registered modules. */
public void shutdown() {
for (BlazeModule module : blazeModules) {
module.blazeShutdown();
}
}
public void prepareForAbruptShutdown() {
if (abruptShutdownHandler != null) {
abruptShutdownHandler.run();
}
}
/** Invokes {@link BlazeModule#blazeShutdownOnCrash()} on all registered modules. */
public void shutdownOnCrash() {
for (BlazeModule module : blazeModules) {
module.blazeShutdownOnCrash();
}
}
/**
* Returns the defaults package for the default settings. Should only be called by commands that
* do <i>not</i> process {@link BuildOptions}, since build options can alter the contents of the
* defaults package, which will not be reflected here.
*/
public String getDefaultsPackageContent() {
return defaultsPackageContent;
}
/**
* Returns the defaults package for the given options taken from an optionsProvider.
*/
public String getDefaultsPackageContent(OptionsClassProvider optionsProvider) {
return ruleClassProvider.getDefaultsPackageContent(optionsProvider);
}
/**
* Creates a BuildOptions class for the given options taken from an optionsProvider.
*/
public BuildOptions createBuildOptions(OptionsClassProvider optionsProvider) {
return ruleClassProvider.createBuildOptions(optionsProvider);
}
/**
* An EventBus exception handler that will report the exception to a remote server, if a
* handler is registered.
*/
public static final class RemoteExceptionHandler implements SubscriberExceptionHandler {
@Override
public void handleException(Throwable exception, SubscriberExceptionContext context) {
logger.log(Level.SEVERE, "Failure in EventBus subscriber", exception);
LoggingUtil.logToRemote(Level.SEVERE, "Failure in EventBus subscriber.", exception);
}
}
/**
* An EventBus exception handler that will call BugReport.handleCrash exiting
* the current thread.
*/
public static final class BugReportingExceptionHandler implements SubscriberExceptionHandler {
@Override
public void handleException(Throwable exception, SubscriberExceptionContext context) {
BugReport.handleCrash(exception);
}
}
/**
* Main method for the Blaze server startup. Note: This method logs
* exceptions to remote servers. Do not add this to a unittest.
*/
public static void main(Iterable<Class<? extends BlazeModule>> moduleClasses, String[] args) {
setupUncaughtHandler(args);
List<BlazeModule> modules = createModules(moduleClasses);
// blaze.cc will put --batch first if the user set it.
if (args.length >= 1 && args[0].equals("--batch")) {
// Run Blaze in batch mode.
System.exit(batchMain(modules, args));
}
logger.info(
"Starting Blaze server with "
+ maybeGetPidString()
+ "args "
+ Arrays.toString(args));
try {
// Run Blaze in server mode.
System.exit(serverMain(modules, OutErr.SYSTEM_OUT_ERR, args));
} catch (RuntimeException | Error e) { // A definite bug...
BugReport.printBug(OutErr.SYSTEM_OUT_ERR, e);
BugReport.sendBugReport(e, Arrays.asList(args));
System.exit(ExitCode.BLAZE_INTERNAL_ERROR.getNumericExitCode());
throw e; // Shouldn't get here.
}
}
@VisibleForTesting
public static List<BlazeModule> createModules(
Iterable<Class<? extends BlazeModule>> moduleClasses) {
ImmutableList.Builder<BlazeModule> result = ImmutableList.builder();
for (Class<? extends BlazeModule> moduleClass : moduleClasses) {
try {
BlazeModule module = moduleClass.getConstructor().newInstance();
result.add(module);
} catch (Throwable e) {
throw new IllegalStateException("Cannot instantiate module " + moduleClass.getName(), e);
}
}
return result.build();
}
/**
* Generates a string form of a request to be written to the logs, filtering the user environment
* to remove anything that looks private. The current filter criteria removes any variable whose
* name includes "auth", "pass", or "cookie".
*
* @param requestStrings
* @return the filtered request to write to the log.
*/
public static String getRequestLogString(List<String> requestStrings) {
StringBuilder buf = new StringBuilder();
buf.append('[');
String sep = "";
Matcher m = suppressFromLog.matcher("");
for (String s : requestStrings) {
buf.append(sep);
m.reset(s);
if (m.lookingAt()) {
buf.append(m.group());
buf.append("__private_value_removed__");
} else {
buf.append(s);
}
sep = ", ";
}
buf.append(']');
return buf.toString();
}
/**
* Command line options split in to two parts: startup options and everything else.
*/
@VisibleForTesting
static class CommandLineOptions {
private final List<String> startupArgs;
private final List<String> otherArgs;
CommandLineOptions(List<String> startupArgs, List<String> otherArgs) {
this.startupArgs = ImmutableList.copyOf(startupArgs);
this.otherArgs = ImmutableList.copyOf(otherArgs);
}
public List<String> getStartupArgs() {
return startupArgs;
}
public List<String> getOtherArgs() {
return otherArgs;
}
}
/**
* Splits given options into two lists - arguments matching options defined in this class and
* everything else, while preserving order in each list.
*
* <p>Note that this method relies on the startup options always being in the
* <code>--flag=ARG</code> form (instead of <code>--flag ARG</code>). This is enforced by
* <code>GetArgumentArray()</code> in <code>blaze.cc</code> by reconstructing the startup
* options from their parsed versions instead of using <code>argv</code> verbatim.
*/
static CommandLineOptions splitStartupOptions(
Iterable<BlazeModule> modules, String... args) {
List<String> prefixes = new ArrayList<>();
List<OptionDefinition> startupOptions = Lists.newArrayList();
for (Class<? extends OptionsBase> defaultOptions
: BlazeCommandUtils.getStartupOptions(modules)) {
startupOptions.addAll(OptionsParser.getOptionDefinitions(defaultOptions));
}
for (OptionDefinition optionDefinition : startupOptions) {
Type optionType = optionDefinition.getField().getType();
prefixes.add("--" + optionDefinition.getOptionName());
if (optionType == boolean.class || optionType == TriState.class) {
prefixes.add("--no" + optionDefinition.getOptionName());
}
}
List<String> startupArgs = new ArrayList<>();
List<String> otherArgs = Lists.newArrayList(args);
for (Iterator<String> argi = otherArgs.iterator(); argi.hasNext(); ) {
String arg = argi.next();
if (!arg.startsWith("--")) {
break; // stop at command - all startup options would be specified before it.
}
for (String prefix : prefixes) {
if (arg.startsWith(prefix)) {
startupArgs.add(arg);
argi.remove();
break;
}
}
}
return new CommandLineOptions(startupArgs, otherArgs);
}
private static InterruptSignalHandler captureSigint() {
final Thread mainThread = Thread.currentThread();
final AtomicInteger numInterrupts = new AtomicInteger();
final Runnable interruptWatcher =
() -> {
int count = 0;
// Not an actual infinite loop because it's run in a daemon thread.
while (true) {
count++;
Uninterruptibles.sleepUninterruptibly(10, TimeUnit.SECONDS);
logger.warning("Slow interrupt number " + count + " in batch mode");
ThreadUtils.warnAboutSlowInterrupt();
}
};
return new InterruptSignalHandler() {
@Override
public void run() {
logger.info("User interrupt");
OutErr.SYSTEM_OUT_ERR.printErrLn("Blaze received an interrupt");
mainThread.interrupt();
int curNumInterrupts = numInterrupts.incrementAndGet();
if (curNumInterrupts == 1) {
Thread interruptWatcherThread = new Thread(interruptWatcher, "interrupt-watcher");
interruptWatcherThread.setDaemon(true);
interruptWatcherThread.start();
} else if (curNumInterrupts == 2) {
logger.warning("Second --batch interrupt: Reverting to JVM SIGINT handler");
uninstall();
}
}
};
}
/**
* A main method that runs blaze commands in batch mode. The return value indicates the desired
* exit status of the program.
*/
private static int batchMain(Iterable<BlazeModule> modules, String[] args) {
InterruptSignalHandler signalHandler = captureSigint();
CommandLineOptions commandLineOptions = splitStartupOptions(modules, args);
logger.info(
"Running Blaze in batch mode with "
+ maybeGetPidString()
+ "startup args "
+ commandLineOptions.getStartupArgs());
BlazeRuntime runtime;
InvocationPolicy policy;
try {
runtime = newRuntime(modules, commandLineOptions.getStartupArgs(), null);
policy = InvocationPolicyParser.parsePolicy(
runtime.getStartupOptionsProvider().getOptions(BlazeServerStartupOptions.class)
.invocationPolicy);
} catch (OptionsParsingException e) {
OutErr.SYSTEM_OUT_ERR.printErrLn(e.getMessage());
return ExitCode.COMMAND_LINE_ERROR.getNumericExitCode();
} catch (AbruptExitException e) {
OutErr.SYSTEM_OUT_ERR.printErrLn(e.getMessage());
return e.getExitCode().getNumericExitCode();
}
ImmutableList.Builder<Pair<String, String>> startupOptionsFromCommandLine =
ImmutableList.builder();
for (String option : commandLineOptions.getStartupArgs()) {
startupOptionsFromCommandLine.add(new Pair<>("", option));
}
BlazeCommandDispatcher dispatcher = new BlazeCommandDispatcher(runtime);
boolean shutdownDone = false;
try {
logger.info(getRequestLogString(commandLineOptions.getOtherArgs()));
BlazeCommandResult result = dispatcher.exec(
policy,
commandLineOptions.getOtherArgs(),
OutErr.SYSTEM_OUT_ERR,
LockingMode.ERROR_OUT,
"batch client",
runtime.getClock().currentTimeMillis(),
Optional.of(startupOptionsFromCommandLine.build()));
if (result.getExecRequest() == null) {
// Simple case: we are given an exit code
return result.getExitCode().getNumericExitCode();
}
// Not so simple case: we need to execute a binary on shutdown. exec() is not accessible from
// Java and is impossible on Windows in any case, so we just execute the binary after getting
// out of the way as completely as possible and forward its exit code.
// When this code is executed, no locks are held: the client lock is released by the client
// before it executes any command and the server lock is handled by BlazeCommandDispatcher,
// whose job is done by the time we get here.
runtime.shutdown();
dispatcher.shutdown();
shutdownDone = true;
signalHandler.uninstall();
ExecRequest request = result.getExecRequest();
String[] argv = new String[request.getArgvCount()];
for (int i = 0; i < argv.length; i++) {
argv[i] = request.getArgv(i).toString(StandardCharsets.ISO_8859_1);
}
String workingDirectory = request.getWorkingDirectory().toString(StandardCharsets.ISO_8859_1);
try {
ProcessBuilder process = new ProcessBuilder()
.command(argv)
.directory(new File(workingDirectory))
.inheritIO();
for (int i = 0; i < request.getEnvironmentVariableCount(); i++) {
EnvironmentVariable variable = request.getEnvironmentVariable(i);
process.environment().put(variable.getName().toString(StandardCharsets.ISO_8859_1),
variable.getValue().toString(StandardCharsets.ISO_8859_1));
}
return process.start().waitFor();
} catch (IOException e) {
// We are in batch mode, thus, stdout/stderr are the same as that of the client.
System.err.println("Cannot execute process for 'run' command: " + e.getMessage());
logger.log(Level.SEVERE, "Exception while executing binary from 'run' command", e);
return ExitCode.LOCAL_ENVIRONMENTAL_ERROR.getNumericExitCode();
}
} catch (InterruptedException e) {
// This is almost main(), so it's okay to just swallow it. We are exiting soon.
return ExitCode.INTERRUPTED.getNumericExitCode();
} finally {
if (!shutdownDone) {
runtime.shutdown();
dispatcher.shutdown();
}
}
}
/**
* A main method that does not send email. The return value indicates the desired exit status of
* the program.
*/
private static int serverMain(Iterable<BlazeModule> modules, OutErr outErr, String[] args) {
InterruptSignalHandler sigintHandler = null;
try {
final RPCServer[] rpcServer = new RPCServer[1];
Runnable prepareForAbruptShutdown = () -> rpcServer[0].prepareForAbruptShutdown();
BlazeRuntime runtime = newRuntime(modules, Arrays.asList(args), prepareForAbruptShutdown);
BlazeCommandDispatcher dispatcher = new BlazeCommandDispatcher(runtime);
BlazeServerStartupOptions startupOptions =
runtime.getStartupOptionsProvider().getOptions(BlazeServerStartupOptions.class);
try {
// This is necessary so that Bazel kind of works during bootstrapping, at which time the
// gRPC server is not compiled in so that we don't need gRPC for bootstrapping.
Class<?> factoryClass = Class.forName(
"com.google.devtools.build.lib.server.GrpcServerImpl$Factory");
RPCServer.Factory factory = (RPCServer.Factory) factoryClass.getConstructor().newInstance();
rpcServer[0] = factory.create(dispatcher, runtime.getClock(),
startupOptions.commandPort,
runtime.getWorkspace().getWorkspace(),
runtime.getServerDirectory(),
startupOptions.maxIdleSeconds);
} catch (ReflectiveOperationException | IllegalArgumentException e) {
throw new AbruptExitException("gRPC server not compiled in", ExitCode.BLAZE_INTERNAL_ERROR);
}
// Register the signal handler.
sigintHandler =
new InterruptSignalHandler() {
@Override
public void run() {
logger.severe("User interrupt");
rpcServer[0].interrupt();
}
};
rpcServer[0].serve();
runtime.shutdown();
dispatcher.shutdown();
return ExitCode.SUCCESS.getNumericExitCode();
} catch (OptionsParsingException e) {
outErr.printErr(e.getMessage());
return ExitCode.COMMAND_LINE_ERROR.getNumericExitCode();
} catch (IOException e) {
outErr.printErr("I/O Error: " + e.getMessage());
return ExitCode.BUILD_FAILURE.getNumericExitCode();
} catch (AbruptExitException e) {
outErr.printErr(e.getMessage());
return e.getExitCode().getNumericExitCode();
} finally {
if (sigintHandler != null) {
sigintHandler.uninstall();
}
}
}
private static FileSystem defaultFileSystemImplementation() {
if ("0".equals(System.getProperty("io.bazel.EnableJni"))) {
// Ignore UnixFileSystem, to be used for bootstrapping.
return OS.getCurrent() == OS.WINDOWS ? new WindowsFileSystem() : new JavaIoFileSystem();
}
// The JNI-based UnixFileSystem is faster, but on Windows it is not available.
return OS.getCurrent() == OS.WINDOWS ? new WindowsFileSystem() : new UnixFileSystem();
}
private static SubprocessFactory subprocessFactoryImplementation() {
if (!"0".equals(System.getProperty("io.bazel.EnableJni")) && OS.getCurrent() == OS.WINDOWS) {
return WindowsSubprocessFactory.INSTANCE;
} else {
return JavaSubprocessFactory.INSTANCE;
}
}
/**
* Parses the command line arguments into a {@link OptionsParser} object.
*
* <p>This function needs to parse the --option_sources option manually so that the real option
* parser can set the source for every option correctly. If that cannot be parsed or is missing,
* we just report an unknown source for every startup option.
*/
private static OptionsProvider parseOptions(
Iterable<BlazeModule> modules, List<String> args) throws OptionsParsingException {
ImmutableList<Class<? extends OptionsBase>> optionClasses =
BlazeCommandUtils.getStartupOptions(modules);
// First parse the command line so that we get the option_sources argument
OptionsParser parser = OptionsParser.newOptionsParser(optionClasses);
parser.setAllowResidue(false);
parser.parse(PriorityCategory.COMMAND_LINE, null, args);
Map<String, String> optionSources =
parser.getOptions(BlazeServerStartupOptions.class).optionSources;
Function<OptionDefinition, String> sourceFunction =
option ->
!optionSources.containsKey(option.getOptionName())
? "default"
: optionSources.get(option.getOptionName()).isEmpty()
? "command line"
: optionSources.get(option.getOptionName());
// Then parse the command line again, this time with the correct option sources
parser = OptionsParser.newOptionsParser(optionClasses);
parser.setAllowResidue(false);
parser.parseWithSourceFunction(PriorityCategory.COMMAND_LINE, sourceFunction, args);
return parser;
}
/**
* Creates a new blaze runtime, given the install and output base directories.
*
* <p>Note: This method can and should only be called once per startup, as it also creates the
* filesystem object that will be used for the runtime. So it should only ever be called from the
* main method of the Blaze program.
*
* @param args Blaze startup options.
*
* @return a new BlazeRuntime instance initialized with the given filesystem and directories, and
* an error string that, if not null, describes a fatal initialization failure that makes
* this runtime unsuitable for real commands
*/
private static BlazeRuntime newRuntime(Iterable<BlazeModule> blazeModules, List<String> args,
Runnable abruptShutdownHandler)
throws AbruptExitException, OptionsParsingException {
OptionsProvider options = parseOptions(blazeModules, args);
for (BlazeModule module : blazeModules) {
module.globalInit(options);
}
BlazeServerStartupOptions startupOptions = options.getOptions(BlazeServerStartupOptions.class);
String productName = startupOptions.productName.toLowerCase(Locale.US);
PathFragment workspaceDirectory = startupOptions.workspaceDirectory;
PathFragment outputUserRoot = startupOptions.outputUserRoot;
PathFragment installBase = startupOptions.installBase;
PathFragment outputBase = startupOptions.outputBase;
maybeForceJNIByGettingPid(installBase); // Must be before first use of JNI.
// From the point of view of the Java program --install_base, --output_base, and
// --output_user_root are mandatory options, despite the comment in their declarations.
if (installBase == null || !installBase.isAbsolute()) { // (includes "" default case)
throw new IllegalArgumentException(
"Bad --install_base option specified: '" + installBase + "'");
}
if (outputUserRoot != null && !outputUserRoot.isAbsolute()) { // (includes "" default case)
throw new IllegalArgumentException(
"Bad --output_user_root option specified: '" + outputUserRoot + "'");
}
if (outputBase != null && !outputBase.isAbsolute()) { // (includes "" default case)
throw new IllegalArgumentException(
"Bad --output_base option specified: '" + outputBase + "'");
}
FileSystem fs = null;
for (BlazeModule module : blazeModules) {
FileSystem moduleFs = module.getFileSystem(options);
if (moduleFs != null) {
Preconditions.checkState(fs == null, "more than one module returns a file system");
fs = moduleFs;
}
}
if (fs == null) {
fs = defaultFileSystemImplementation();
}
Path.setFileSystemForSerialization(fs);
SubprocessBuilder.setSubprocessFactory(subprocessFactoryImplementation());
Path outputUserRootPath = fs.getPath(outputUserRoot);
Path installBasePath = fs.getPath(installBase);
Path outputBasePath = fs.getPath(outputBase);
Path workspaceDirectoryPath = null;
if (!workspaceDirectory.equals(PathFragment.EMPTY_FRAGMENT)) {
workspaceDirectoryPath = fs.getPath(workspaceDirectory);
}
ServerDirectories serverDirectories =
new ServerDirectories(
installBasePath, outputBasePath, outputUserRootPath, startupOptions.installMD5);
Clock clock = BlazeClock.instance();
BlazeRuntime.Builder runtimeBuilder =
new BlazeRuntime.Builder()
.setProductName(productName)
.setFileSystem(fs)
.setServerDirectories(serverDirectories)
.setActionKeyContext(new ActionKeyContext())
.setStartupOptionsProvider(options)
.setClock(clock)
.setAbruptShutdownHandler(abruptShutdownHandler)
// TODO(bazel-team): Make BugReportingExceptionHandler the default.
// See bug "Make exceptions in EventBus subscribers fatal"
.setEventBusExceptionHandler(
startupOptions.fatalEventBusExceptions
|| !BlazeVersionInfo.instance().isReleasedBlaze()
? new BlazeRuntime.BugReportingExceptionHandler()
: new BlazeRuntime.RemoteExceptionHandler());
if (System.getenv("TEST_TMPDIR") != null
&& System.getenv("NO_CRASH_ON_LOGGING_IN_TEST") == null) {
LoggingUtil.installRemoteLogger(getTestCrashLogger());
}
runtimeBuilder.addBlazeModule(new BuiltinCommandModule());
// This module needs to be registered before any module providing a SpawnCache implementation.
runtimeBuilder.addBlazeModule(new NoSpawnCacheModule());
runtimeBuilder.addBlazeModule(new CommandLogModule());
for (BlazeModule blazeModule : blazeModules) {
runtimeBuilder.addBlazeModule(blazeModule);
}
BlazeRuntime runtime = runtimeBuilder.build();
BlazeDirectories directories =
new BlazeDirectories(serverDirectories, workspaceDirectoryPath, productName);
BinTools binTools;
try {
binTools = BinTools.forProduction(directories);
} catch (IOException e) {
throw new AbruptExitException(
"Cannot enumerate embedded binaries: " + e.getMessage(),
ExitCode.LOCAL_ENVIRONMENTAL_ERROR);
}
runtime.initWorkspace(directories, binTools);
CustomExitCodePublisher.setAbruptExitStatusFileDir(serverDirectories.getOutputBase());
// Most static initializers for @SkylarkSignature-containing classes have already run by this
// point, but this will pick up the stragglers.
initSkylarkBuiltinsRegistry();
AutoProfiler.setClock(runtime.getClock());
BugReport.setRuntime(runtime);
return runtime;
}
/**
* Configures the Skylark builtins registry.
*
* <p>Any class containing {@link SkylarkSignature}-annotated fields should call
* {@link SkylarkSignatureProcessor#configureSkylarkFunctions} on itself. This serves two
* purposes: 1) it initializes those fields for use, and 2) it registers them with the Skylark
* builtins registry object
* ({@link com.google.devtools.build.lib.syntax.Runtime#getBuiltinRegistry}). Unfortunately
* there's some technical debt here: The registry object is static and the registration occurs
* inside static initializer blocks.
*
* <p>The registry supports concurrent read/write access, but read access is not actually
* efficient (lockless) until write access is disallowed by calling its
* {@link com.google.devtools.build.lib.syntax.Runtime.BuiltinRegistry#freeze freeze} method.
* We want to freeze before the build begins, but not before all classes have had a chance to run
* their static initializers.
*
* <p>Therefore, this method first ensures that the initializers have run, and then explicitly
* freezes the registry. It ensures initialization by calling a no-op static method on the class.
* Only classes whose initializers have been observed to cause {@code BuiltinRegistry} to throw an
* exception need to be included here, since that indicates that their initialization did not
* happen by this point in time.
*
* <p>Unit tests don't need to worry about registry freeze exceptions, since the registry isn't
* frozen at all for them. They just pay the cost of extra synchronization on every access.
*/
private static void initSkylarkBuiltinsRegistry() {
// Currently no classes need to be initialized here. The hook's still here because it's
// possible it may be needed again in the future.
com.google.devtools.build.lib.syntax.Runtime.getBuiltinRegistry().freeze();
}
private static String maybeGetPidString() {
Integer pid = maybeForceJNIByGettingPid(null);
return pid == null ? "" : "pid " + pid + " and ";
}
/** Loads JNI libraries, if necessary under the current platform. */
@Nullable
private static Integer maybeForceJNIByGettingPid(@Nullable PathFragment installBase) {
return jniLibsAvailable() ? getPidUsingJNI(installBase) : null;
}
private static boolean jniLibsAvailable() {
return !"0".equals(System.getProperty("io.bazel.EnableJni"));
}
// Force JNI linking at a moment when we have 'installBase' handy, and print
// an informative error if it fails.
private static int getPidUsingJNI(@Nullable PathFragment installBase) {
try {
return ProcessUtils.getpid(); // force JNI initialization
} catch (UnsatisfiedLinkError t) {
System.err.println(
"JNI initialization failed: "
+ t.getMessage()
+ ". "
+ "Possibly your installation has been corrupted"
+ (installBase == null
? ""
: "; if this problem persists, try 'rm -fr " + installBase + "'")
+ ".");
throw t;
}
}
/**
* Returns a logger that crashes as soon as it's written to, since tests should not cause events
* that would be logged.
*/
@VisibleForTesting
public static Future<Logger> getTestCrashLogger() {
Logger crashLogger = Logger.getAnonymousLogger();
crashLogger.addHandler(
new Handler() {
@Override
public void publish(LogRecord record) {
System.err.println("Remote logging disabled for testing, forcing abrupt shutdown.");
System.err.printf("%s#%s: %s\n",
record.getSourceClassName(),
record.getSourceMethodName(),
record.getMessage());
Throwable e = record.getThrown();
if (e != null) {
e.printStackTrace();
}
Runtime.getRuntime().halt(ExitCode.BLAZE_INTERNAL_ERROR.getNumericExitCode());
}
@Override
public void flush() {
throw new IllegalStateException();
}
@Override
public void close() {
throw new IllegalStateException();
}
});
return Futures.immediateFuture(crashLogger);
}
/**
* Make sure async threads cannot be orphaned. This method makes sure bugs are reported to
* telemetry and the proper exit code is reported.
*/
private static void setupUncaughtHandler(final String[] args) {
Thread.setDefaultUncaughtExceptionHandler(
(thread, throwable) -> BugReport.handleCrash(throwable, args));
}
public String getProductName() {
return productName;
}
public PathConverter getPathToUriConverter() {
return pathToUriConverter;
}
/**
* A builder for {@link BlazeRuntime} objects. The only required fields are the {@link
* BlazeDirectories}, and the {@link RuleClassProvider} (except for testing). All other fields
* have safe default values.
*
* <p>The default behavior of the BlazeRuntime's EventBus is to exit when a subscriber throws
* an exception. Please plan appropriately.
*/
public static class Builder {
private FileSystem fileSystem;
private ServerDirectories serverDirectories;
private Clock clock;
private Runnable abruptShutdownHandler;
private OptionsProvider startupOptionsProvider;
private final List<BlazeModule> blazeModules = new ArrayList<>();
private SubscriberExceptionHandler eventBusExceptionHandler = new RemoteExceptionHandler();
private UUID instanceId;
private String productName;
private ActionKeyContext actionKeyContext;
public BlazeRuntime build() throws AbruptExitException {
Preconditions.checkNotNull(productName);
Preconditions.checkNotNull(serverDirectories);
Preconditions.checkNotNull(startupOptionsProvider);
ActionKeyContext actionKeyContext =
this.actionKeyContext != null ? this.actionKeyContext : new ActionKeyContext();
Clock clock = (this.clock == null) ? BlazeClock.instance() : this.clock;
UUID instanceId = (this.instanceId == null) ? UUID.randomUUID() : this.instanceId;
Preconditions.checkNotNull(clock);
for (BlazeModule module : blazeModules) {
module.blazeStartup(
startupOptionsProvider,
BlazeVersionInfo.instance(),
instanceId,
fileSystem,
serverDirectories,
clock);
}
ServerBuilder serverBuilder = new ServerBuilder();
serverBuilder.addQueryOutputFormatters(OutputFormatter.getDefaultFormatters());
for (BlazeModule module : blazeModules) {
module.serverInit(startupOptionsProvider, serverBuilder);
}
ConfiguredRuleClassProvider.Builder ruleClassBuilder =
new ConfiguredRuleClassProvider.Builder();
for (BlazeModule module : blazeModules) {
module.initializeRuleClasses(ruleClassBuilder);
}
ConfiguredRuleClassProvider ruleClassProvider = ruleClassBuilder.build();
Package.Builder.Helper packageBuilderHelper = null;
for (BlazeModule module : blazeModules) {
Package.Builder.Helper candidateHelper =
module.getPackageBuilderHelper(ruleClassProvider, fileSystem);
if (candidateHelper != null) {
Preconditions.checkState(packageBuilderHelper == null,
"more than one module defines a package builder helper");
packageBuilderHelper = candidateHelper;
}
}
if (packageBuilderHelper == null) {
packageBuilderHelper = Package.Builder.DefaultHelper.INSTANCE;
}
PackageFactory packageFactory =
new PackageFactory(
ruleClassProvider,
serverBuilder.getAttributeContainerFactory(),
serverBuilder.getEnvironmentExtensions(),
BlazeVersionInfo.instance().getVersion(),
packageBuilderHelper);
ProjectFile.Provider projectFileProvider = null;
for (BlazeModule module : blazeModules) {
ProjectFile.Provider candidate = module.createProjectFileProvider();
if (candidate != null) {
Preconditions.checkState(projectFileProvider == null,
"more than one module defines a project file provider");
projectFileProvider = candidate;
}
}
return new BlazeRuntime(
fileSystem,
serverBuilder.getQueryEnvironmentFactory(),
serverBuilder.getQueryFunctions(),
serverBuilder.getQueryOutputFormatters(),
packageFactory,
ruleClassProvider,
ruleClassProvider.getConfigurationFragments(),
serverBuilder.getInfoItems(),
actionKeyContext,
clock,
abruptShutdownHandler,
startupOptionsProvider,
ImmutableList.copyOf(blazeModules),
eventBusExceptionHandler,
projectFileProvider,
serverBuilder.getInvocationPolicy(),
serverBuilder.getCommands(),
productName,
serverBuilder.getPathToUriConverter());
}
public Builder setProductName(String productName) {
this.productName = productName;
return this;
}
public Builder setFileSystem(FileSystem fileSystem) {
this.fileSystem = fileSystem;
return this;
}
public Builder setServerDirectories(ServerDirectories serverDirectories) {
this.serverDirectories = serverDirectories;
return this;
}
public Builder setClock(Clock clock) {
this.clock = clock;
return this;
}
public Builder setAbruptShutdownHandler(Runnable handler) {
this.abruptShutdownHandler = handler;
return this;
}
public Builder setStartupOptionsProvider(OptionsProvider startupOptionsProvider) {
this.startupOptionsProvider = startupOptionsProvider;
return this;
}
public Builder addBlazeModule(BlazeModule blazeModule) {
blazeModules.add(blazeModule);
return this;
}
public Builder setInstanceId(UUID id) {
instanceId = id;
return this;
}
@VisibleForTesting
public Builder setEventBusExceptionHandler(
SubscriberExceptionHandler eventBusExceptionHandler) {
this.eventBusExceptionHandler = eventBusExceptionHandler;
return this;
}
public Builder setActionKeyContext(ActionKeyContext actionKeyContext) {
this.actionKeyContext = actionKeyContext;
return this;
}
}
}
| [
"\"TEST_TMPDIR\"",
"\"NO_CRASH_ON_LOGGING_IN_TEST\""
]
| []
| [
"TEST_TMPDIR",
"NO_CRASH_ON_LOGGING_IN_TEST"
]
| [] | ["TEST_TMPDIR", "NO_CRASH_ON_LOGGING_IN_TEST"] | java | 2 | 0 | |
plugins/modules/oracle_profile.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: oracle_profile
short_description: Manage profiles in an Oracle database
description:
- Manage profiles in an Oracle database
version_added: "0.8.0"
options:
name:
description:
- The name of the profile
required: true
default: None
aliases: ['profile']
state:
description:
- The intended state of the profile.
default: present
choices: ['present','absent']
attribute_name:
description:
- The attribute name (e.g PASSWORD_REUSE_TIME)
default: None
aliases: ['an']
attribute_value:
description:
- The attribute value (e.g 10)
default: None
aliases: ['av']
username:
description:
- The DB username
required: false
default: sys
aliases: ['un']
password:
description:
- The password for the DB user
required: false
default: None
aliases: ['pw']
service_name:
description:
- The profile_name to connect to the database.
required: false
aliases: ['sn']
hostname:
description:
- The host of the database if using dbms_profile
required: false
default: localhost
aliases: ['host']
port:
description:
- The listener port to connect to the database if using dbms_profile
required: false
default: 1521
oracle_home:
description:
- The GI ORACLE_HOME
required: false
default: None
aliases: ['oh']
notes:
- cx_Oracle needs to be installed
requirements: [ "cx_Oracle" ]
author: Mikael Sandström, [email protected], @oravirt
'''
EXAMPLES = '''
# Create a profile
- hosts: dbserver
vars:
oracle_home: /u01/app/oracle/12.2.0.1/db1
hostname: "{{ inventory_hostname }}"
service_name: orclpdb
user: system
password: Oracle_123
oracle_env:
ORACLE_HOME: "{{ oracle_home }}"
LD_LIBRARY_PATH: "{{ oracle_home }}/lib"
profiles:
- name: profile1
attribute_name:
- password_reuse_max
- password_reuse_time
- sessions_per_user
attribute_value:
- 6
- 20
- 5
state: present
tasks:
- name: Manage profiles
oracle_profile:
name={{ item.name }}
attribute_name={{ item.attribute_name}}
attribute_value={{ item.attribute_value}}
state={{ item.state }}
hostname={{ hostname }}
service_name={{ service_name }}
user={{ user }}
password={{ password }}
environment: "{{oracle_env}}"
with_items: "{{ profiles }}"
'''
try:
import cx_Oracle
except ImportError:
cx_oracle_exists = False
else:
cx_oracle_exists = True
# Check if the profile exists
def check_profile_exists(cursor, module, msg, name):
sql = 'select count(*) from dba_profiles where lower (profile) = \'%s\'' % (name.lower())
result = execute_sql_get(module, msg, cursor, sql)
if result[0][0] > 0:
return True
else:
return False
def create_profile(cursor, module, msg, oracle_home, name, attribute_name, attribute_value):
add_attr = False
if not any(x == 'None' for x in attribute_name):
add_attr = True
if not any(x is None for x in attribute_name):
add_attr = True
if add_attr:
attributes = ' '.join(['' + str(n) + ' ' + str(v) + '' for n, v in zip(attribute_name, attribute_value)])
sql = 'create profile %s limit ' % name
if add_attr:
sql += ' %s' % (attributes.lower())
if execute_sql(module, msg, cursor, sql):
return True
else:
return False
def remove_profile(cursor, module, msg, oracle_home, name):
dropsql = 'drop profile %s' % name
if execute_sql(module, msg, cursor, dropsql):
return True
else:
return False
def ensure_profile_state(cursor, module, msg, name, state, attribute_name, attribute_value):
# pass
total_sql = []
profile_sql = 'alter profile %s ' % (name.upper())
# Deal with attribute differences
if attribute_name and attribute_value:
# Make sure attributes are lower case
attribute_name = [x.lower() for x in attribute_name]
attribute_value = [str(y).lower() for y in attribute_value]
wanted_attributes = zip(attribute_name, attribute_value)
# Check the current attributes
attribute_names_ = ','.join(['\'' + n[0] + '\'' for n in wanted_attributes])
if len(attribute_names_) != 0:
current_attributes = get_current_attributes(cursor, module, msg, name, attribute_names_)
# Convert to dict and compare current with wanted
if dict(current_attributes) != dict(wanted_attributes):
for i in wanted_attributes:
total_sql.append("alter profile %s limit %s %s " % (name, i[0], i[1]))
# module.exit_json(msg=total_sql, changed=True)
if len(total_sql) > 0:
if ensure_profile_state_sql(module, msg, cursor, total_sql):
msg = 'profile %s has been put in the intended state' % name
module.exit_json(msg=msg, changed=True)
else:
return False
else:
msg = 'Nothing to do'
module.exit_json(msg=msg, changed=False)
def ensure_profile_state_sql(module, msg, cursor, total_sql):
for sql in total_sql:
execute_sql(module, msg, cursor, sql)
return True
def get_current_attributes(cursor, module, msg, name, attribute_names_):
sql = 'select lower(resource_name),lower(limit) '
sql += 'from dba_profiles '
sql += 'where lower(profile) = \'%s\' ' % (name.lower())
sql += 'and lower(resource_name) in (%s) ' % (attribute_names_.lower())
result = execute_sql_get(module, msg, cursor, sql)
return result
def execute_sql_get(module, msg, cursor, sql):
try:
cursor.execute(sql)
result = (cursor.fetchall())
except cx_Oracle.DatabaseError as exc:
error, = exc.args
msg = 'Something went wrong while executing sql_get - %s sql: %s' % (error.message, sql)
module.fail_json(msg=msg, changed=False)
return False
return result
def execute_sql(module, msg, cursor, sql):
try:
cursor.execute(sql)
except cx_Oracle.DatabaseError as exc:
error, = exc.args
msg = 'Something went wrong while executing sql - %s sql: %s' % (error.message, sql)
module.fail_json(msg=msg, changed=False)
return False
return True
def main():
msg = ['']
cursor = None
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True, aliases=['profile']),
attribute_name=dict(required=True, type='list', aliases=['an']),
attribute_value=dict(required=True, type='list', aliases=['av']),
state=dict(default="present", choices=["present", "absent"]),
user=dict(required=False, aliases=['un', 'username']),
password=dict(required=False, no_log=True, aliases=['pw']),
mode=dict(default='normal', choices=["normal", "sysdba"]),
hostname=dict(required=False, default='localhost', aliases=['host']),
port=dict(required=False, default=1521),
service_name=dict(required=False, aliases=['sn']),
oracle_home=dict(required=False, aliases=['oh']),
),
)
name = module.params["name"]
attribute_name = module.params["attribute_name"]
attribute_value = module.params["attribute_value"]
state = module.params["state"]
user = module.params["user"]
password = module.params["password"]
mode = module.params["mode"]
hostname = module.params["hostname"]
port = module.params["port"]
service_name = module.params["service_name"]
oracle_home = module.params["oracle_home"]
if not cx_oracle_exists:
msg = "The cx_Oracle module is required. 'pip install cx_Oracle' should do the trick. If cx_Oracle is installed, make sure ORACLE_HOME & LD_LIBRARY_PATH is set"
module.fail_json(msg=msg)
wallet_connect = '/@%s' % service_name
try:
if (
not user and not password): # If neither user or password is supplied, the use of an oracle wallet is assumed
connect = wallet_connect
if mode == 'sysdba':
conn = cx_Oracle.connect(wallet_connect, mode=cx_Oracle.SYSDBA)
else:
conn = cx_Oracle.connect(wallet_connect)
elif user and password:
dsn = cx_Oracle.makedsn(host=hostname, port=port, service_name=service_name)
connect = dsn
if mode == 'sysdba':
conn = cx_Oracle.connect(user, password, dsn, mode=cx_Oracle.SYSDBA)
else:
conn = cx_Oracle.connect(user, password, dsn)
elif not user or not password:
module.fail_json(msg='Missing username or password for cx_Oracle')
except cx_Oracle.DatabaseError as exc:
error, = exc.args
msg = 'Could not connect to DB: %s, connect descriptor: %s, username: %s, pass: %s' % (
error.message, connect, user, password)
module.fail_json(msg=msg, changed=False)
cursor = conn.cursor()
if oracle_home is not None:
os.environ['ORACLE_HOME'] = oracle_home
elif 'ORACLE_HOME' in os.environ:
oracle_home = os.environ['ORACLE_HOME']
else:
msg = 'ORACLE_HOME variable not set. Please set it and re-run the command'
module.fail_json(msg=msg, changed=False)
if state == 'present':
if not check_profile_exists(cursor, module, msg, name):
if create_profile(cursor, module, msg, oracle_home, name, attribute_name, attribute_value):
msg = 'Successfully created profile %s ' % name
module.exit_json(msg=msg, changed=True)
else:
module.fail_json(msg=msg, changed=False)
else:
ensure_profile_state(cursor, module, msg, name, state, attribute_name, attribute_value)
elif state == 'absent':
if check_profile_exists(cursor, module, msg, name):
if remove_profile(cursor, module, msg, oracle_home, name):
msg = 'Profile %s successfully removed' % name
module.exit_json(msg=msg, changed=True)
else:
module.exit_json(msg=msg, changed=False)
else:
msg = 'Profile %s doesn\'t exist' % name
module.exit_json(msg=msg, changed=False)
module.exit_json(msg="Unhandled exit", changed=False)
from ansible.module_utils.basic import AnsibleModule, os
if __name__ == '__main__':
main()
| []
| []
| [
"ORACLE_HOME"
]
| [] | ["ORACLE_HOME"] | python | 1 | 0 | |
test/functional/test_runner.py | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Run regression test suite.
This module calls down into individual test cases via subprocess. It will
forward all unrecognized arguments onto the individual test scripts.
Functional tests are disabled on Windows by default. Use --force to run them anyway.
For a description of arguments recognized by test scripts, see
`test/functional/test_framework/test_framework.py:BitcoinTestFramework.main`.
"""
import argparse
import configparser
import datetime
import os
import time
import shutil
import signal
import sys
import subprocess
import tempfile
import re
import logging
# Formatting. Default colors to empty strings.
BOLD, BLUE, RED, GREY = ("", ""), ("", ""), ("", ""), ("", "")
try:
# Make sure python thinks it can write unicode to its stdout
"\u2713".encode("utf_8").decode(sys.stdout.encoding)
TICK = "✓ "
CROSS = "✖ "
CIRCLE = "○ "
except UnicodeDecodeError:
TICK = "P "
CROSS = "x "
CIRCLE = "o "
if os.name == 'posix':
# primitive formatting on supported
# terminal via ANSI escape sequences:
BOLD = ('\033[0m', '\033[1m')
BLUE = ('\033[0m', '\033[0;34m')
RED = ('\033[0m', '\033[0;31m')
GREY = ('\033[0m', '\033[1;30m')
TEST_EXIT_PASSED = 0
TEST_EXIT_SKIPPED = 77
BASE_SCRIPTS= [
# Scripts that are run by the travis build process.
# Longest test should go first, to favor running tests in parallel
'wallet-hd.py',
'walletbackup.py',
# vv Tests less than 5m vv
'p2p-fullblocktest.py',
'fundrawtransaction.py',
'p2p-compactblocks.py',
'segwit.py',
# vv Tests less than 2m vv
'wallet.py',
'wallet-accounts.py',
'p2p-segwit.py',
'wallet-dump.py',
'listtransactions.py',
# vv Tests less than 60s vv
'sendheaders.py',
'zapwallettxes.py',
'importmulti.py',
'mempool_limit.py',
'merkle_blocks.py',
'receivedby.py',
'abandonconflict.py',
'bip68-112-113-p2p.py',
'rawtransactions.py',
'reindex.py',
# vv Tests less than 30s vv
'keypool-topup.py',
'zmq_test.py',
'mempool_resurrect_test.py',
'txn_doublespend.py --mineblock',
'txn_clone.py',
'getchaintips.py',
'rest.py',
'mempool_spendcoinbase.py',
'mempool_reorg.py',
'mempool_persist.py',
'multiwallet.py',
'httpbasics.py',
'multi_rpc.py',
'proxy_test.py',
'signrawtransactions.py',
'disconnect_ban.py',
'decodescript.py',
'blockchain.py',
'disablewallet.py',
'net.py',
'keypool.py',
'p2p-mempool.py',
'prioritise_transaction.py',
'invalidblockrequest.py',
'invalidtxrequest.py',
'p2p-versionbits-warning.py',
'preciousblock.py',
'test_script_address2.py',
'importprunedfunds.py',
'signmessages.py',
'nulldummy.py',
'import-rescan.py',
'mining.py',
'bumpfee.py',
'rpcnamedargs.py',
'listsinceblock.py',
'p2p-leaktests.py',
'wallet-encryption.py',
'bipdersig-p2p.py',
'bip65-cltv-p2p.py',
'uptime.py',
'resendwallettransactions.py',
]
EXTENDED_SCRIPTS = [
# These tests are not run by the travis build process.
# Longest test should go first, to favor running tests in parallel
'pruning.py',
# vv Tests less than 20m vv
'smartfees.py',
# vv Tests less than 5m vv
'maxuploadtarget.py',
'mempool_packages.py',
'dbcrash.py',
# vv Tests less than 2m vv
'bip68-sequence.py',
'getblocktemplate_longpoll.py',
'p2p-timeouts.py',
# vv Tests less than 60s vv
'bip9-softforks.py',
'p2p-feefilter.py',
'rpcbind_test.py',
# vv Tests less than 30s vv
'assumevalid.py',
'example_test.py',
'txn_doublespend.py',
'txn_clone.py --mineblock',
'forknotify.py',
'invalidateblock.py',
'p2p-acceptblock.py',
'replace-by-fee.py',
]
# Place EXTENDED_SCRIPTS first since it has the 3 longest running tests
ALL_SCRIPTS = EXTENDED_SCRIPTS + BASE_SCRIPTS
NON_SCRIPTS = [
# These are python files that live in the functional tests directory, but are not test scripts.
"combine_logs.py",
"create_cache.py",
"test_runner.py",
]
def main():
# Parse arguments and pass through unrecognised args
parser = argparse.ArgumentParser(add_help=False,
usage='%(prog)s [test_runner.py options] [script options] [scripts]',
description=__doc__,
epilog='''
Help text and arguments for individual test script:''',
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('--coverage', action='store_true', help='generate a basic coverage report for the RPC interface')
parser.add_argument('--exclude', '-x', help='specify a comma-seperated-list of scripts to exclude.')
parser.add_argument('--extended', action='store_true', help='run the extended test suite in addition to the basic tests')
parser.add_argument('--force', '-f', action='store_true', help='run tests even on platforms where they are disabled by default (e.g. windows).')
parser.add_argument('--help', '-h', '-?', action='store_true', help='print help text and exit')
parser.add_argument('--jobs', '-j', type=int, default=4, help='how many test scripts to run in parallel. Default=4.')
parser.add_argument('--keepcache', '-k', action='store_true', help='the default behavior is to flush the cache directory on startup. --keepcache retains the cache from the previous testrun.')
parser.add_argument('--quiet', '-q', action='store_true', help='only print results summary and failure logs')
parser.add_argument('--tmpdirprefix', '-t', default=tempfile.gettempdir(), help="Root directory for datadirs")
args, unknown_args = parser.parse_known_args()
# args to be passed on always start with two dashes; tests are the remaining unknown args
tests = [arg for arg in unknown_args if arg[:2] != "--"]
passon_args = [arg for arg in unknown_args if arg[:2] == "--"]
# Read config generated by configure.
config = configparser.ConfigParser()
configfile = os.path.abspath(os.path.dirname(__file__)) + "/../config.ini"
config.read_file(open(configfile))
passon_args.append("--configfile=%s" % configfile)
# Set up logging
logging_level = logging.INFO if args.quiet else logging.DEBUG
logging.basicConfig(format='%(message)s', level=logging_level)
# Create base test directory
tmpdir = "%s/farmcoin_test_runner_%s" % (args.tmpdirprefix, datetime.datetime.now().strftime("%Y%m%d_%H%M%S"))
os.makedirs(tmpdir)
logging.debug("Temporary test directory at %s" % tmpdir)
enable_wallet = config["components"].getboolean("ENABLE_WALLET")
enable_utils = config["components"].getboolean("ENABLE_UTILS")
enable_bitcoind = config["components"].getboolean("ENABLE_BITCOIND")
if config["environment"]["EXEEXT"] == ".exe" and not args.force:
# https://github.com/bitcoin/bitcoin/commit/d52802551752140cf41f0d9a225a43e84404d3e9
# https://github.com/bitcoin/bitcoin/pull/5677#issuecomment-136646964
print("Tests currently disabled on Windows by default. Use --force option to enable")
sys.exit(0)
if not (enable_wallet and enable_utils and enable_bitcoind):
print("No functional tests to run. Wallet, utils, and farmcoind must all be enabled")
print("Rerun `configure` with -enable-wallet, -with-utils and -with-daemon and rerun make")
sys.exit(0)
# Build list of tests
if tests:
# Individual tests have been specified. Run specified tests that exist
# in the ALL_SCRIPTS list. Accept the name with or without .py extension.
tests = [re.sub("\.py$", "", t) + ".py" for t in tests]
test_list = []
for t in tests:
if t in ALL_SCRIPTS:
test_list.append(t)
else:
print("{}WARNING!{} Test '{}' not found in full test list.".format(BOLD[1], BOLD[0], t))
else:
# No individual tests have been specified.
# Run all base tests, and optionally run extended tests.
test_list = BASE_SCRIPTS
if args.extended:
# place the EXTENDED_SCRIPTS first since the three longest ones
# are there and the list is shorter
test_list = EXTENDED_SCRIPTS + test_list
# Remove the test cases that the user has explicitly asked to exclude.
if args.exclude:
tests_excl = [re.sub("\.py$", "", t) + ".py" for t in args.exclude.split(',')]
for exclude_test in tests_excl:
if exclude_test in test_list:
test_list.remove(exclude_test)
else:
print("{}WARNING!{} Test '{}' not found in current test list.".format(BOLD[1], BOLD[0], exclude_test))
if not test_list:
print("No valid test scripts specified. Check that your test is in one "
"of the test lists in test_runner.py, or run test_runner.py with no arguments to run all tests")
sys.exit(0)
if args.help:
# Print help for test_runner.py, then print help of the first script (with args removed) and exit.
parser.print_help()
subprocess.check_call([(config["environment"]["SRCDIR"] + '/test/functional/' + test_list[0].split()[0])] + ['-h'])
sys.exit(0)
check_script_list(config["environment"]["SRCDIR"])
if not args.keepcache:
shutil.rmtree("%s/test/cache" % config["environment"]["BUILDDIR"], ignore_errors=True)
run_tests(test_list, config["environment"]["SRCDIR"], config["environment"]["BUILDDIR"], config["environment"]["EXEEXT"], tmpdir, args.jobs, args.coverage, passon_args)
def run_tests(test_list, src_dir, build_dir, exeext, tmpdir, jobs=1, enable_coverage=False, args=[]):
# Warn if bitcoind is already running (unix only)
try:
if subprocess.check_output(["pidof", "farmcoind"]) is not None:
print("%sWARNING!%s There is already a farmcoind process running on this system. Tests may fail unexpectedly due to resource contention!" % (BOLD[1], BOLD[0]))
except (OSError, subprocess.SubprocessError):
pass
# Warn if there is a cache directory
cache_dir = "%s/test/cache" % build_dir
if os.path.isdir(cache_dir):
print("%sWARNING!%s There is a cache directory here: %s. If tests fail unexpectedly, try deleting the cache directory." % (BOLD[1], BOLD[0], cache_dir))
#Set env vars
if "FARMCOIND" not in os.environ:
os.environ["FARMCOIND"] = build_dir + '/src/farmcoind' + exeext
tests_dir = src_dir + '/test/functional/'
flags = ["--srcdir={}/src".format(build_dir)] + args
flags.append("--cachedir=%s" % cache_dir)
if enable_coverage:
coverage = RPCCoverage()
flags.append(coverage.flag)
logging.debug("Initializing coverage directory at %s" % coverage.dir)
else:
coverage = None
if len(test_list) > 1 and jobs > 1:
# Populate cache
subprocess.check_output([tests_dir + 'create_cache.py'] + flags + ["--tmpdir=%s/cache" % tmpdir])
#Run Tests
job_queue = TestHandler(jobs, tests_dir, tmpdir, test_list, flags)
time0 = time.time()
test_results = []
max_len_name = len(max(test_list, key=len))
for _ in range(len(test_list)):
test_result, stdout, stderr = job_queue.get_next()
test_results.append(test_result)
if test_result.status == "Passed":
logging.debug("\n%s%s%s passed, Duration: %s s" % (BOLD[1], test_result.name, BOLD[0], test_result.time))
elif test_result.status == "Skipped":
logging.debug("\n%s%s%s skipped" % (BOLD[1], test_result.name, BOLD[0]))
else:
print("\n%s%s%s failed, Duration: %s s\n" % (BOLD[1], test_result.name, BOLD[0], test_result.time))
print(BOLD[1] + 'stdout:\n' + BOLD[0] + stdout + '\n')
print(BOLD[1] + 'stderr:\n' + BOLD[0] + stderr + '\n')
print_results(test_results, max_len_name, (int(time.time() - time0)))
if coverage:
coverage.report_rpc_coverage()
logging.debug("Cleaning up coverage data")
coverage.cleanup()
# Clear up the temp directory if all subdirectories are gone
if not os.listdir(tmpdir):
os.rmdir(tmpdir)
all_passed = all(map(lambda test_result: test_result.was_successful, test_results))
sys.exit(not all_passed)
def print_results(test_results, max_len_name, runtime):
results = "\n" + BOLD[1] + "%s | %s | %s\n\n" % ("TEST".ljust(max_len_name), "STATUS ", "DURATION") + BOLD[0]
test_results.sort(key=lambda result: result.name.lower())
all_passed = True
time_sum = 0
for test_result in test_results:
all_passed = all_passed and test_result.was_successful
time_sum += test_result.time
test_result.padding = max_len_name
results += str(test_result)
status = TICK + "Passed" if all_passed else CROSS + "Failed"
results += BOLD[1] + "\n%s | %s | %s s (accumulated) \n" % ("ALL".ljust(max_len_name), status.ljust(9), time_sum) + BOLD[0]
results += "Runtime: %s s\n" % (runtime)
print(results)
class TestHandler:
"""
Trigger the testscrips passed in via the list.
"""
def __init__(self, num_tests_parallel, tests_dir, tmpdir, test_list=None, flags=None):
assert(num_tests_parallel >= 1)
self.num_jobs = num_tests_parallel
self.tests_dir = tests_dir
self.tmpdir = tmpdir
self.test_list = test_list
self.flags = flags
self.num_running = 0
# In case there is a graveyard of zombie bitcoinds, we can apply a
# pseudorandom offset to hopefully jump over them.
# (625 is PORT_RANGE/MAX_NODES)
self.portseed_offset = int(time.time() * 1000) % 625
self.jobs = []
def get_next(self):
while self.num_running < self.num_jobs and self.test_list:
# Add tests
self.num_running += 1
t = self.test_list.pop(0)
portseed = len(self.test_list) + self.portseed_offset
portseed_arg = ["--portseed={}".format(portseed)]
log_stdout = tempfile.SpooledTemporaryFile(max_size=2**16)
log_stderr = tempfile.SpooledTemporaryFile(max_size=2**16)
test_argv = t.split()
tmpdir = ["--tmpdir=%s/%s_%s" % (self.tmpdir, re.sub(".py$", "", test_argv[0]), portseed)]
self.jobs.append((t,
time.time(),
subprocess.Popen([self.tests_dir + test_argv[0]] + test_argv[1:] + self.flags + portseed_arg + tmpdir,
universal_newlines=True,
stdout=log_stdout,
stderr=log_stderr),
log_stdout,
log_stderr))
if not self.jobs:
raise IndexError('pop from empty list')
while True:
# Return first proc that finishes
time.sleep(.5)
for j in self.jobs:
(name, time0, proc, log_out, log_err) = j
if os.getenv('TRAVIS') == 'true' and int(time.time() - time0) > 20 * 60:
# In travis, timeout individual tests after 20 minutes (to stop tests hanging and not
# providing useful output.
proc.send_signal(signal.SIGINT)
if proc.poll() is not None:
log_out.seek(0), log_err.seek(0)
[stdout, stderr] = [l.read().decode('utf-8') for l in (log_out, log_err)]
log_out.close(), log_err.close()
if proc.returncode == TEST_EXIT_PASSED and stderr == "":
status = "Passed"
elif proc.returncode == TEST_EXIT_SKIPPED:
status = "Skipped"
else:
status = "Failed"
self.num_running -= 1
self.jobs.remove(j)
return TestResult(name, status, int(time.time() - time0)), stdout, stderr
print('.', end='', flush=True)
class TestResult():
def __init__(self, name, status, time):
self.name = name
self.status = status
self.time = time
self.padding = 0
def __repr__(self):
if self.status == "Passed":
color = BLUE
glyph = TICK
elif self.status == "Failed":
color = RED
glyph = CROSS
elif self.status == "Skipped":
color = GREY
glyph = CIRCLE
return color[1] + "%s | %s%s | %s s\n" % (self.name.ljust(self.padding), glyph, self.status.ljust(7), self.time) + color[0]
@property
def was_successful(self):
return self.status != "Failed"
def check_script_list(src_dir):
"""Check scripts directory.
Check that there are no scripts in the functional tests directory which are
not being run by pull-tester.py."""
script_dir = src_dir + '/test/functional/'
python_files = set([t for t in os.listdir(script_dir) if t[-3:] == ".py"])
missed_tests = list(python_files - set(map(lambda x: x.split()[0], ALL_SCRIPTS + NON_SCRIPTS)))
if len(missed_tests) != 0:
print("%sWARNING!%s The following scripts are not being run: %s. Check the test lists in test_runner.py." % (BOLD[1], BOLD[0], str(missed_tests)))
if os.getenv('TRAVIS') == 'true':
# On travis this warning is an error to prevent merging incomplete commits into master
sys.exit(1)
class RPCCoverage(object):
"""
Coverage reporting utilities for test_runner.
Coverage calculation works by having each test script subprocess write
coverage files into a particular directory. These files contain the RPC
commands invoked during testing, as well as a complete listing of RPC
commands per `bitcoin-cli help` (`rpc_interface.txt`).
After all tests complete, the commands run are combined and diff'd against
the complete list to calculate uncovered RPC commands.
See also: test/functional/test_framework/coverage.py
"""
def __init__(self):
self.dir = tempfile.mkdtemp(prefix="coverage")
self.flag = '--coveragedir=%s' % self.dir
def report_rpc_coverage(self):
"""
Print out RPC commands that were unexercised by tests.
"""
uncovered = self._get_uncovered_rpc_commands()
if uncovered:
print("Uncovered RPC commands:")
print("".join((" - %s\n" % i) for i in sorted(uncovered)))
else:
print("All RPC commands covered.")
def cleanup(self):
return shutil.rmtree(self.dir)
def _get_uncovered_rpc_commands(self):
"""
Return a set of currently untested RPC commands.
"""
# This is shared from `test/functional/test-framework/coverage.py`
reference_filename = 'rpc_interface.txt'
coverage_file_prefix = 'coverage.'
coverage_ref_filename = os.path.join(self.dir, reference_filename)
coverage_filenames = set()
all_cmds = set()
covered_cmds = set()
if not os.path.isfile(coverage_ref_filename):
raise RuntimeError("No coverage reference found")
with open(coverage_ref_filename, 'r') as f:
all_cmds.update([i.strip() for i in f.readlines()])
for root, dirs, files in os.walk(self.dir):
for filename in files:
if filename.startswith(coverage_file_prefix):
coverage_filenames.add(os.path.join(root, filename))
for filename in coverage_filenames:
with open(filename, 'r') as f:
covered_cmds.update([i.strip() for i in f.readlines()])
return all_cmds - covered_cmds
if __name__ == '__main__':
main()
| []
| []
| [
"FARMCOIND",
"TRAVIS"
]
| [] | ["FARMCOIND", "TRAVIS"] | python | 2 | 0 | |
functional_test.go | //+build functional
package sarama
import (
"context"
"fmt"
"io"
"log"
"net"
"net/http"
"net/url"
"os"
"os/exec"
"path/filepath"
"strconv"
"strings"
"testing"
"time"
toxiproxy "github.com/Shopify/toxiproxy/client"
)
const (
uncomittedMsgJar = "https://github.com/FrancoisPoinsot/simplest-uncommitted-msg/releases/download/0.1/simplest-uncommitted-msg-0.1-jar-with-dependencies.jar"
)
var (
testTopicDetails = map[string]*TopicDetail{
"test.1": {
NumPartitions: 1,
ReplicationFactor: 3,
},
"test.4": {
NumPartitions: 4,
ReplicationFactor: 3,
},
"test.64": {
NumPartitions: 64,
ReplicationFactor: 3,
},
"uncommitted-topic-test-4": {
NumPartitions: 1,
ReplicationFactor: 3,
},
}
FunctionalTestEnv *testEnvironment
)
func TestMain(m *testing.M) {
// Functional tests for Sarama
//
// You can either set TOXIPROXY_ADDR, which points at a toxiproxy address
// already set up with 21801-21805 bound to zookeeper and 29091-29095
// bound to kafka. Alternatively, if TOXIPROXY_ADDR is not set, we'll try
// and use Docker to bring up a 5-node zookeeper cluster & 5-node kafka
// cluster, with toxiproxy configured as above.
//
// In either case, the following topics will be deleted (if they exist) and
// then created/pre-seeded with data for the functional test run:
// * uncomitted-topic-test-4
// * test.1
// * test.4
// * test.64
os.Exit(testMain(m))
}
func testMain(m *testing.M) int {
ctx := context.Background()
var env testEnvironment
if os.Getenv("DEBUG") == "true" {
Logger = log.New(os.Stdout, "[sarama] ", log.LstdFlags)
}
usingExisting, err := existingEnvironment(ctx, &env)
if err != nil {
panic(err)
}
if !usingExisting {
err := prepareDockerTestEnvironment(ctx, &env)
if err != nil {
_ = tearDownDockerTestEnvironment(ctx, &env)
panic(err)
}
defer tearDownDockerTestEnvironment(ctx, &env) // nolint:errcheck
}
if err := prepareTestTopics(ctx, &env); err != nil {
panic(err)
}
FunctionalTestEnv = &env
return m.Run()
}
type testEnvironment struct {
ToxiproxyClient *toxiproxy.Client
Proxies map[string]*toxiproxy.Proxy
KafkaBrokerAddrs []string
KafkaVersion string
}
func prepareDockerTestEnvironment(ctx context.Context, env *testEnvironment) error {
Logger.Println("bringing up docker-based test environment")
// Always (try to) tear down first.
if err := tearDownDockerTestEnvironment(ctx, env); err != nil {
return fmt.Errorf("failed to tear down existing env: %w", err)
}
if version, ok := os.LookupEnv("KAFKA_VERSION"); ok {
env.KafkaVersion = version
} else {
// We have cp-5.5.0 as the default in the docker-compose file, so that's kafka 2.5.0.
env.KafkaVersion = "2.5.0"
}
// the mapping of confluent platform docker image versions -> kafka versions can be
// found here: https://docs.confluent.io/current/installation/versions-interoperability.html
var confluentPlatformVersion string
switch env.KafkaVersion {
case "2.6.0":
confluentPlatformVersion = "5.5.0"
case "2.5.1":
confluentPlatformVersion = "5.5.0"
default:
return fmt.Errorf("don't know what confluent platform version to use for kafka %s", env.KafkaVersion)
}
c := exec.Command("docker-compose", "up", "-d")
c.Stdout = os.Stdout
c.Stderr = os.Stderr
c.Env = append(os.Environ(), fmt.Sprintf("CONFLUENT_PLATFORM_VERSION=%s", confluentPlatformVersion))
err := c.Run()
if err != nil {
return fmt.Errorf("failed to run docker-compose to start test enviroment: %w", err)
}
// Set up toxiproxy Proxies
env.ToxiproxyClient = toxiproxy.NewClient("localhost:8474")
env.Proxies = map[string]*toxiproxy.Proxy{}
for i := 1; i <= 5; i++ {
proxyName := fmt.Sprintf("kafka%d", i)
proxy, err := env.ToxiproxyClient.CreateProxy(
proxyName,
fmt.Sprintf("0.0.0.0:%d", 29090+i),
fmt.Sprintf("kafka-%d:%d", i, 29090+i),
)
if err != nil {
return fmt.Errorf("failed to create toxiproxy: %w", err)
}
env.Proxies[proxyName] = proxy
env.KafkaBrokerAddrs = append(env.KafkaBrokerAddrs, fmt.Sprintf("127.0.0.1:%d", 29090+i))
}
// Wait for the kafka broker to come up
allBrokersUp := false
for i := 0; i < 45 && !allBrokersUp; i++ {
Logger.Println("waiting for kafka brokers to come up")
time.Sleep(1 * time.Second)
config := NewTestConfig()
config.Version, err = ParseKafkaVersion(env.KafkaVersion)
if err != nil {
return err
}
config.Net.DialTimeout = 1 * time.Second
config.Net.ReadTimeout = 1 * time.Second
config.Net.WriteTimeout = 1 * time.Second
config.ClientID = "sarama-tests"
brokersOk := make([]bool, len(env.KafkaBrokerAddrs))
retryLoop:
for j, addr := range env.KafkaBrokerAddrs {
client, err := NewClient([]string{addr}, config)
if err != nil {
continue
}
err = client.RefreshMetadata()
if err != nil {
continue
}
brokers := client.Brokers()
if len(brokers) < 5 {
continue
}
for _, broker := range brokers {
err := broker.Open(client.Config())
if err != nil {
continue retryLoop
}
connected, err := broker.Connected()
if err != nil || !connected {
continue retryLoop
}
}
brokersOk[j] = true
}
allBrokersUp = true
for _, u := range brokersOk {
allBrokersUp = allBrokersUp && u
}
}
if !allBrokersUp {
return fmt.Errorf("timed out waiting for broker to come up")
}
return nil
}
func existingEnvironment(ctx context.Context, env *testEnvironment) (bool, error) {
toxiproxyAddr, ok := os.LookupEnv("TOXIPROXY_ADDR")
if !ok {
return false, nil
}
toxiproxyURL, err := url.Parse(toxiproxyAddr)
if err != nil {
return false, fmt.Errorf("$TOXIPROXY_ADDR not parseable as url")
}
toxiproxyHost := toxiproxyURL.Hostname()
env.ToxiproxyClient = toxiproxy.NewClient(toxiproxyAddr)
for i := 1; i <= 5; i++ {
proxyName := fmt.Sprintf("kafka%d", i)
proxy, err := env.ToxiproxyClient.Proxy(proxyName)
if err != nil {
return false, fmt.Errorf("no proxy kafka%d on toxiproxy: %w", i, err)
}
env.Proxies[proxyName] = proxy
// get the host:port from the proxy & toxiproxy addr, so we can do "$toxiproxy_addr:$proxy_port"
_, proxyPort, err := net.SplitHostPort(proxy.Listen)
if err != nil {
return false, fmt.Errorf("proxy.Listen not a host:port combo: %w", err)
}
env.KafkaBrokerAddrs = append(env.KafkaBrokerAddrs, fmt.Sprintf("%s:%s", toxiproxyHost, proxyPort))
}
env.KafkaVersion, ok = os.LookupEnv("KAFKA_VERSION")
if !ok {
return false, fmt.Errorf("KAFKA_VERSION needs to be provided with TOXIPROXY_ADDR")
}
return true, nil
}
func tearDownDockerTestEnvironment(ctx context.Context, env *testEnvironment) error {
c := exec.Command("docker-compose", "down", "--volumes")
c.Stdout = os.Stdout
c.Stderr = os.Stderr
downErr := c.Run()
c = exec.Command("docker-compose", "rm", "-v", "--force", "--stop")
c.Stdout = os.Stdout
c.Stderr = os.Stderr
rmErr := c.Run()
if downErr != nil {
return fmt.Errorf("failed to run docker-compose to stop test enviroment: %w", downErr)
}
if rmErr != nil {
return fmt.Errorf("failed to run docker-compose to rm test enviroment: %w", rmErr)
}
return nil
}
func prepareTestTopics(ctx context.Context, env *testEnvironment) error {
Logger.Println("creating test topics")
var testTopicNames []string
for topic := range testTopicDetails {
testTopicNames = append(testTopicNames, topic)
}
Logger.Println("Creating topics")
config := NewTestConfig()
config.Metadata.Retry.Max = 5
config.Metadata.Retry.Backoff = 10 * time.Second
config.ClientID = "sarama-tests"
var err error
config.Version, err = ParseKafkaVersion(env.KafkaVersion)
if err != nil {
return fmt.Errorf("failed to parse kafka version %s: %w", env.KafkaVersion, err)
}
client, err := NewClient(env.KafkaBrokerAddrs, config)
if err != nil {
return fmt.Errorf("failed to connect to kafka: %w", err)
}
defer client.Close()
controller, err := client.Controller()
if err != nil {
return fmt.Errorf("failed to connect to kafka controller: %w", err)
}
defer controller.Close()
// Start by deleting the test topics (if they already exist)
deleteRes, err := controller.DeleteTopics(&DeleteTopicsRequest{
Topics: testTopicNames,
Timeout: 30 * time.Second,
})
if err != nil {
return fmt.Errorf("failed to delete test topics: %w", err)
}
for topic, topicErr := range deleteRes.TopicErrorCodes {
if !isTopicNotExistsErrorOrOk(topicErr) {
return fmt.Errorf("failed to delete topic %s: %w", topic, topicErr)
}
}
// wait for the topics to _actually_ be gone - the delete is not guaranteed to be processed
// synchronously
var topicsOk bool
for i := 0; i < 20 && !topicsOk; i++ {
time.Sleep(1 * time.Second)
md, err := controller.GetMetadata(&MetadataRequest{
Topics: testTopicNames,
})
if err != nil {
return fmt.Errorf("failed to get metadata for test topics: %w", err)
}
topicsOk = true
for _, topicsMd := range md.Topics {
if !isTopicNotExistsErrorOrOk(topicsMd.Err) {
topicsOk = false
}
}
}
if !topicsOk {
return fmt.Errorf("timed out waiting for test topics to be gone")
}
// now create the topics empty
createRes, err := controller.CreateTopics(&CreateTopicsRequest{
TopicDetails: testTopicDetails,
Timeout: 30 * time.Second,
})
if err != nil {
return fmt.Errorf("failed to create test topics: %w", err)
}
for topic, topicErr := range createRes.TopicErrors {
if !isTopicExistsErrorOrOk(topicErr.Err) {
return fmt.Errorf("failed to create test topic %s: %w", topic, topicErr)
}
}
// This is kind of gross, but we don't actually have support for doing transactional publishing
// with sarama, so we need to use a java-based tool to publish uncomitted messages to
// the uncommitted-topic-test-4 topic
jarName := filepath.Base(uncomittedMsgJar)
if _, err := os.Stat(jarName); err != nil {
Logger.Printf("Downloading %s\n", uncomittedMsgJar)
req, err := http.NewRequest("GET", uncomittedMsgJar, nil)
if err != nil {
return fmt.Errorf("failed creating requst for uncomitted msg jar: %w", err)
}
res, err := http.DefaultClient.Do(req)
if err != nil {
return fmt.Errorf("failed fetching the uncommitted msg jar: %w", err)
}
defer res.Body.Close()
jarFile, err := os.OpenFile(jarName, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 0644)
if err != nil {
return fmt.Errorf("failed opening the uncomitted msg jar: %w", err)
}
defer jarFile.Close()
_, err = io.Copy(jarFile, res.Body)
if err != nil {
return fmt.Errorf("failed writing the uncomitted msg jar: %w", err)
}
}
c := exec.Command("java", "-jar", jarName, "-b", env.KafkaBrokerAddrs[0], "-c", "4")
c.Stdout = os.Stdout
c.Stderr = os.Stderr
err = c.Run()
if err != nil {
return fmt.Errorf("failed running uncomitted msg jar: %w", err)
}
return nil
}
func isTopicNotExistsErrorOrOk(err KError) bool {
return err == ErrUnknownTopicOrPartition || err == ErrInvalidTopic || err == ErrNoError
}
func isTopicExistsErrorOrOk(err KError) bool {
return err == ErrTopicAlreadyExists || err == ErrNoError
}
func checkKafkaVersion(t testing.TB, requiredVersion string) {
kafkaVersion := FunctionalTestEnv.KafkaVersion
if kafkaVersion == "" {
t.Skipf("No KAFKA_VERSION set. This test requires Kafka version %s or higher. Continuing...", requiredVersion)
} else {
available := parseKafkaVersion(kafkaVersion)
required := parseKafkaVersion(requiredVersion)
if !available.satisfies(required) {
t.Skipf("Kafka version %s is required for this test; you have %s. Skipping...", requiredVersion, kafkaVersion)
}
}
}
func resetProxies(t testing.TB) {
if err := FunctionalTestEnv.ToxiproxyClient.ResetState(); err != nil {
t.Error(err)
}
}
func SaveProxy(t *testing.T, px string) {
if err := FunctionalTestEnv.Proxies[px].Save(); err != nil {
t.Fatal(err)
}
}
func setupFunctionalTest(t testing.TB) {
resetProxies(t)
}
func teardownFunctionalTest(t testing.TB) {
resetProxies(t)
}
type kafkaVersion []int
func (kv kafkaVersion) satisfies(other kafkaVersion) bool {
var ov int
for index, v := range kv {
if len(other) <= index {
ov = 0
} else {
ov = other[index]
}
if v < ov {
return false
} else if v > ov {
return true
}
}
return true
}
func parseKafkaVersion(version string) kafkaVersion {
numbers := strings.Split(version, ".")
result := make(kafkaVersion, 0, len(numbers))
for _, number := range numbers {
nr, _ := strconv.Atoi(number)
result = append(result, nr)
}
return result
}
| [
"\"DEBUG\""
]
| []
| [
"DEBUG"
]
| [] | ["DEBUG"] | go | 1 | 0 | |
sdk/keyvault/azkeys/example_test.go | //go:build go1.16
// +build go1.16
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
package azkeys_test
import (
"context"
"fmt"
"os"
"time"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/to"
"github.com/Azure/azure-sdk-for-go/sdk/azidentity"
"github.com/Azure/azure-sdk-for-go/sdk/keyvault/azkeys"
)
var client *azkeys.Client
func ExampleNewClient() {
vaultUrl := os.Getenv("AZURE_KEYVAULT_URL")
cred, err := azidentity.NewDefaultAzureCredential(nil)
if err != nil {
panic(err)
}
client, err = azkeys.NewClient(vaultUrl, cred, nil)
if err != nil {
panic(err)
}
}
func ExampleClient_CreateRSAKey() {
vaultUrl := os.Getenv("AZURE_KEYVAULT_URL")
cred, err := azidentity.NewDefaultAzureCredential(nil)
if err != nil {
panic(err)
}
client, err := azkeys.NewClient(vaultUrl, cred, nil)
if err != nil {
panic(err)
}
resp, err := client.CreateRSAKey(context.TODO(), "new-rsa-key", &azkeys.CreateRSAKeyOptions{KeySize: to.Int32Ptr(2048)})
if err != nil {
panic(err)
}
fmt.Println(*resp.Key.ID)
fmt.Println(*resp.Key.KeyType)
}
func ExampleClient_CreateECKey() {
vaultUrl := os.Getenv("AZURE_KEYVAULT_URL")
cred, err := azidentity.NewDefaultAzureCredential(nil)
if err != nil {
panic(err)
}
client, err := azkeys.NewClient(vaultUrl, cred, nil)
if err != nil {
panic(err)
}
resp, err := client.CreateECKey(context.TODO(), "new-rsa-key", &azkeys.CreateECKeyOptions{CurveName: azkeys.JSONWebKeyCurveNameP256.ToPtr()})
if err != nil {
panic(err)
}
fmt.Println(*resp.Key.ID)
fmt.Println(*resp.Key.KeyType)
}
func ExampleClient_GetKey() {
vaultUrl := os.Getenv("AZURE_KEYVAULT_URL")
cred, err := azidentity.NewDefaultAzureCredential(nil)
if err != nil {
panic(err)
}
client, err := azkeys.NewClient(vaultUrl, cred, nil)
if err != nil {
panic(err)
}
resp, err := client.GetKey(context.TODO(), "key-to-retrieve", nil)
if err != nil {
panic(err)
}
fmt.Println(*resp.Key.ID)
}
func ExampleClient_UpdateKeyProperties() {
vaultUrl := os.Getenv("AZURE_KEYVAULT_URL")
cred, err := azidentity.NewDefaultAzureCredential(nil)
if err != nil {
panic(err)
}
client, err := azkeys.NewClient(vaultUrl, cred, nil)
if err != nil {
panic(err)
}
resp, err := client.UpdateKeyProperties(context.TODO(), "key-to-update", &azkeys.UpdateKeyPropertiesOptions{
Tags: map[string]*string{
"Tag1": to.StringPtr("val1"),
},
KeyAttributes: &azkeys.KeyAttributes{
RecoveryLevel: azkeys.DeletionRecoveryLevelCustomizedRecoverablePurgeable.ToPtr(),
},
})
if err != nil {
panic(err)
}
fmt.Println(*resp.Attributes.RecoveryLevel, *resp.Tags["Tag1"])
}
func ExampleClient_BeginDeleteKey() {
vaultUrl := os.Getenv("AZURE_KEYVAULT_URL")
cred, err := azidentity.NewDefaultAzureCredential(nil)
if err != nil {
panic(err)
}
client, err := azkeys.NewClient(vaultUrl, cred, nil)
if err != nil {
panic(err)
}
resp, err := client.BeginDeleteKey(context.TODO(), "key-to-delete", nil)
if err != nil {
panic(err)
}
pollResp, err := resp.PollUntilDone(context.TODO(), 1*time.Second)
if err != nil {
panic(err)
}
fmt.Printf("Successfully deleted key %s", *pollResp.Key.ID)
}
func ExampleClient_ListKeys() {
vaultUrl := os.Getenv("AZURE_KEYVAULT_URL")
cred, err := azidentity.NewDefaultAzureCredential(nil)
if err != nil {
panic(err)
}
client, err := azkeys.NewClient(vaultUrl, cred, nil)
if err != nil {
panic(err)
}
pager := client.ListKeys(nil)
for pager.NextPage(context.TODO()) {
for _, key := range pager.PageResponse().Keys {
fmt.Println(*key.KID)
}
}
if pager.Err() != nil {
panic(pager.Err())
}
}
| [
"\"AZURE_KEYVAULT_URL\"",
"\"AZURE_KEYVAULT_URL\"",
"\"AZURE_KEYVAULT_URL\"",
"\"AZURE_KEYVAULT_URL\"",
"\"AZURE_KEYVAULT_URL\"",
"\"AZURE_KEYVAULT_URL\"",
"\"AZURE_KEYVAULT_URL\""
]
| []
| [
"AZURE_KEYVAULT_URL"
]
| [] | ["AZURE_KEYVAULT_URL"] | go | 1 | 0 | |
ai2thor/tests/test_controller.py | import ai2thor.controller
from ai2thor.server import Event
from ai2thor.platform import CloudRendering, Linux64
import pytest
import numpy as np
import warnings
import os
import math
def fake_linux64_exists(self):
if self.platform.name() == "Linux64":
return True
else:
return False
@classmethod
def fake_invalid_cr_validate(cls, request):
return ["Missing libvulkan1."]
@classmethod
def fake_invalid_linux64_validate(cls, request):
return ["No display found. "]
def fake_cr_exists(self):
if self.platform.name() == "CloudRendering":
return True
else:
return False
def fake_not_exists(self):
return False
def fake_find_platform_builds(self, canditate_platorms, request, commits, releases_dir, local_build):
return []
def fake_exists(self):
return True
def fake_linux_system():
return "Linux"
def fake_darwin_system():
return "Darwin"
def noop_download(self):
pass
def select_platforms_linux_cr(request):
return (Linux64, CloudRendering)
def select_platforms_cr(request):
return (CloudRendering, )
@classmethod
def fake_validate(cls, request):
return []
class FakeServer(object):
def __init__(self):
self.request_queue = FakeQueue()
self.response_queue = FakeQueue()
def send(self, action):
assert self.request_queue.empty()
self.response_queue.put_nowait(action)
def receive(self):
return self.request_queue.get(False, 0)
class FakeQueue(object):
def __init__(self):
self.value = None
def put_nowait(self, v):
assert self.value is None
self.value = v
def get(self, block=False, timeout=0):
v = self.value
self.value = None
return v
# always return empty so that we pass
def empty(self):
return True
def controller(**args):
# delete display so the tests can run on Linux
if "DISPLAY" in os.environ:
del os.environ["DISPLAY"]
# during a ci-build we will get a warning that we are using a commit_id for the
# build instead of 'local'
default_args = dict(download_only=True)
default_args.update(args)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
c = ai2thor.controller.Controller(**default_args)
c.server = FakeServer()
return c
def test_osx_build_missing(mocker):
mocker.patch("ai2thor.controller.platform_system", fake_darwin_system)
mocker.patch("ai2thor.controller.Controller.find_platform_builds", fake_find_platform_builds)
with pytest.raises(Exception) as ex:
c = controller()
assert str(ex.value).startswith("No build exists for arch=Darwin platforms=OSXIntel64 and commits:")
def test_osx_build_invalid_commit_id(mocker):
mocker.patch("ai2thor.controller.platform_system", fake_darwin_system)
mocker.patch("ai2thor.controller.ai2thor.build.Build.exists", fake_not_exists)
fake_commit_id = "1234567TEST"
with pytest.raises(ValueError) as ex:
c = controller(commit_id=fake_commit_id)
assert (
str(ex.value)
== "Invalid commit_id: %s - no build exists for arch=Darwin platforms=OSXIntel64" % fake_commit_id
)
def test_osx_build(mocker):
mocker.patch("ai2thor.controller.platform_system", fake_darwin_system)
mocker.patch("ai2thor.controller.ai2thor.build.Build.exists", fake_exists)
mocker.patch("ai2thor.controller.ai2thor.build.Build.download", noop_download)
fake_commit_id = "1234567TEST"
c = controller(commit_id=fake_commit_id)
assert c._build.platform.name() == "OSXIntel64"
assert c._build.commit_id == fake_commit_id
def test_linux_explicit_xdisplay(mocker):
mocker.patch("ai2thor.controller.platform_system", fake_linux_system)
mocker.patch("ai2thor.controller.ai2thor.build.Build.exists", fake_exists)
mocker.patch("ai2thor.controller.ai2thor.build.Build.download", noop_download)
mocker.patch("ai2thor.controller.ai2thor.platform.Linux64.validate", fake_validate)
fake_commit_id = "1234567TEST"
c = controller(commit_id=fake_commit_id, x_display="75.9")
assert c._build.platform.name() == "Linux64"
assert c._build.commit_id == fake_commit_id
def test_linux_invalid_linux64_invalid_cr(mocker):
mocker.patch("ai2thor.controller.platform_system", fake_linux_system)
mocker.patch("ai2thor.controller.ai2thor.build.Build.exists", fake_exists)
mocker.patch("ai2thor.controller.ai2thor.build.Build.download", noop_download)
mocker.patch("ai2thor.controller.ai2thor.platform.select_platforms", select_platforms_linux_cr)
mocker.patch(
"ai2thor.controller.ai2thor.platform.CloudRendering.validate",
fake_invalid_cr_validate,
)
mocker.patch(
"ai2thor.controller.ai2thor.platform.Linux64.validate",
fake_invalid_linux64_validate,
)
fake_commit_id = "1234567TEST"
with pytest.raises(Exception) as excinfo:
c = controller(commit_id=fake_commit_id)
assert str(excinfo.value).startswith(
"The following builds were found, but had missing dependencies. Only one valid platform is required to run AI2-THOR."
)
def test_linux_invalid_linux64_valid_cr(mocker):
mocker.patch("ai2thor.controller.platform_system", fake_linux_system)
mocker.patch("ai2thor.controller.ai2thor.build.Build.exists", fake_exists)
mocker.patch("ai2thor.controller.ai2thor.build.Build.download", noop_download)
mocker.patch("ai2thor.controller.ai2thor.platform.select_platforms", select_platforms_linux_cr)
mocker.patch(
"ai2thor.controller.ai2thor.platform.CloudRendering.validate", fake_validate
)
mocker.patch(
"ai2thor.controller.ai2thor.platform.Linux64.validate",
fake_invalid_linux64_validate,
)
mocker.patch("ai2thor.platform.CloudRendering.enabled", True)
fake_commit_id = "1234567TEST"
c = controller(commit_id=fake_commit_id)
assert c._build.platform.name() == "CloudRendering"
assert c._build.commit_id == fake_commit_id
def test_linux_valid_linux64_valid_cloudrendering(mocker):
mocker.patch("ai2thor.controller.platform_system", fake_linux_system)
mocker.patch("ai2thor.controller.ai2thor.build.Build.exists", fake_exists)
mocker.patch("ai2thor.controller.ai2thor.build.Build.download", noop_download)
mocker.patch(
"ai2thor.controller.ai2thor.platform.CloudRendering.validate", fake_validate
)
mocker.patch("ai2thor.controller.ai2thor.platform.Linux64.validate", fake_validate)
fake_commit_id = "1234567TEST"
c = controller(commit_id=fake_commit_id)
assert c._build.platform.name() == "Linux64"
assert c._build.commit_id == fake_commit_id
def test_linux_valid_linux64_valid_cloudrendering_enabled_cr(mocker):
mocker.patch("ai2thor.controller.platform_system", fake_linux_system)
mocker.patch("ai2thor.controller.ai2thor.build.Build.exists", fake_exists)
mocker.patch("ai2thor.controller.ai2thor.build.Build.download", noop_download)
mocker.patch("ai2thor.controller.ai2thor.platform.select_platforms", select_platforms_cr)
mocker.patch(
"ai2thor.controller.ai2thor.platform.CloudRendering.validate", fake_validate
)
mocker.patch("ai2thor.controller.ai2thor.platform.Linux64.validate", fake_validate)
mocker.patch("ai2thor.platform.CloudRendering.enabled", True)
mocker.patch("ai2thor.platform.Linux64.enabled", False)
fake_commit_id = "1234567TEST"
c = controller(commit_id=fake_commit_id)
assert c._build.platform.name() == "CloudRendering"
assert c._build.commit_id == fake_commit_id
def test_linux_valid_linux64_invalid_cloudrendering(mocker):
mocker.patch("ai2thor.controller.platform_system", fake_linux_system)
mocker.patch("ai2thor.controller.ai2thor.build.Build.exists", fake_exists)
mocker.patch("ai2thor.controller.ai2thor.build.Build.download", noop_download)
mocker.patch(
"ai2thor.controller.ai2thor.platform.CloudRendering.validate",
fake_invalid_cr_validate,
)
mocker.patch("ai2thor.controller.ai2thor.platform.Linux64.validate", fake_validate)
fake_commit_id = "1234567TEST"
c = controller(commit_id=fake_commit_id)
assert c._build.platform.name() == "Linux64"
assert c._build.commit_id == fake_commit_id
def test_linux_missing_linux64(mocker):
mocker.patch("ai2thor.controller.platform_system", fake_linux_system)
mocker.patch("ai2thor.controller.ai2thor.build.Build.exists", fake_cr_exists)
mocker.patch("ai2thor.controller.ai2thor.build.Build.download", noop_download)
mocker.patch(
"ai2thor.controller.ai2thor.platform.CloudRendering.validate", fake_validate
)
mocker.patch("ai2thor.platform.CloudRendering.enabled", True)
mocker.patch("ai2thor.controller.ai2thor.platform.select_platforms", select_platforms_linux_cr)
fake_commit_id = "1234567TEST"
c = controller(commit_id=fake_commit_id)
assert c._build.platform.name() == "CloudRendering"
assert c._build.commit_id == fake_commit_id
def test_linux_missing_cloudrendering(mocker):
mocker.patch("ai2thor.controller.platform_system", fake_linux_system)
mocker.patch("ai2thor.controller.ai2thor.build.Build.exists", fake_linux64_exists)
mocker.patch("ai2thor.controller.ai2thor.build.Build.download", noop_download)
mocker.patch("ai2thor.controller.ai2thor.platform.Linux64.validate", fake_validate)
fake_commit_id = "1234567TEST"
c = controller(commit_id=fake_commit_id)
assert c._build.platform.name() == "Linux64"
assert c._build.commit_id == fake_commit_id
def test_distance():
point1 = dict(x=1.5, z=2.5)
point2 = dict(x=4.33, z=7.5)
point3 = dict(x=2.5, z=3.5)
assert ai2thor.controller.distance(point1, point2) == 5.745337239884183
assert ai2thor.controller.distance(point1, point1) == 0.0
assert ai2thor.controller.distance(point1, point3) == math.sqrt(2.0)
def test_key_for_point():
assert ai2thor.controller.key_for_point(2.567, -3.43) == "2.6 -3.4"
def test_invalid_commit(mocker):
caught_exception = False
try:
c = ai2thor.controller.Controller(commit_id="1234567x")
except ValueError as e:
caught_exception = True
assert caught_exception, "invalid commit id should throw ValueError"
def test_scene_names(mocker):
mocker.patch("ai2thor.controller.platform_system", fake_darwin_system)
mocker.patch("ai2thor.controller.ai2thor.build.Build.exists", fake_exists)
mocker.patch("ai2thor.controller.ai2thor.build.Build.download", noop_download)
c = controller()
assert len(c.scene_names()) == 195
assert len(c.ithor_scenes()) == 120
assert len(c.robothor_scenes()) == 195 - 120
def test_invalid_action(mocker):
mocker.patch("ai2thor.controller.platform_system", fake_darwin_system)
mocker.patch("ai2thor.controller.ai2thor.build.Build.exists", fake_exists)
mocker.patch("ai2thor.controller.ai2thor.build.Build.download", noop_download)
fake_event = Event(
dict(
screenWidth=300,
screenHeight=300,
colors=[],
lastActionSuccess=False,
errorCode="InvalidAction",
errorMessage="Invalid method: moveaheadbadmethod",
)
)
c = controller()
c.last_event = fake_event
action1 = dict(action="MoveaheadbadMethod")
c.server.request_queue.put_nowait(fake_event)
with pytest.raises(ValueError) as excinfo:
c.step(action1, raise_for_failure=True)
assert excinfo.value.args == ("Invalid method: moveaheadbadmethod",)
def test_fix_visibility_distance_env(mocker):
mocker.patch("ai2thor.controller.platform_system", fake_darwin_system)
mocker.patch("ai2thor.controller.ai2thor.build.Build.exists", fake_exists)
mocker.patch("ai2thor.controller.ai2thor.build.Build.download", noop_download)
try:
os.environ["AI2THOR_VISIBILITY_DISTANCE"] = "2.0"
fake_event = Event(
dict(screenWidth=300, screenHeight=300, colors=[], lastActionSuccess=True)
)
c = controller()
c.last_event = fake_event
action1 = dict(action="Initialize", gridSize=0.25)
c.server.request_queue.put_nowait(fake_event)
c.step(action1)
filtered_action = c.server.response_queue.get()
assert filtered_action == {
"action": "Initialize",
"gridSize": 0.25,
"visibilityDistance": 2.0,
}
finally:
del os.environ["AI2THOR_VISIBILITY_DISTANCE"]
def test_raise_for_failure(mocker):
mocker.patch("ai2thor.controller.platform_system", fake_darwin_system)
mocker.patch("ai2thor.controller.ai2thor.build.Build.exists", fake_exists)
mocker.patch("ai2thor.controller.ai2thor.build.Build.download", noop_download)
fake_event = Event(
dict(
screenWidth=300,
screenHeight=300,
colors=[],
lastActionSuccess=False,
errorCode="NotOpen",
)
)
c = controller()
c.last_event = fake_event
action1 = dict(action="MoveAhead")
c.server.request_queue.put_nowait(fake_event)
with pytest.raises(RuntimeError):
c.step(action1, raise_for_failure=True)
def test_failure(mocker):
mocker.patch("ai2thor.controller.platform_system", fake_darwin_system)
mocker.patch("ai2thor.controller.ai2thor.build.Build.exists", fake_exists)
mocker.patch("ai2thor.controller.ai2thor.build.Build.download", noop_download)
fake_event = Event(
dict(
screenWidth=300,
screenHeight=300,
colors=[],
lastActionSuccess=False,
errorCode="NotOpen",
)
)
c = controller()
c.last_event = fake_event
action1 = dict(action="MoveAhead")
c.server.request_queue.put_nowait(fake_event)
e = c.step(action1)
assert c.last_action == action1
assert not e.metadata["lastActionSuccess"]
def test_last_action(mocker):
mocker.patch("ai2thor.controller.platform_system", fake_darwin_system)
mocker.patch("ai2thor.controller.ai2thor.build.Build.exists", fake_exists)
mocker.patch("ai2thor.controller.ai2thor.build.Build.download", noop_download)
fake_event = Event(
dict(screenWidth=300, screenHeight=300, colors=[], lastActionSuccess=True)
)
c = controller()
c.last_event = fake_event
action1 = dict(action="RotateRight")
c.server.request_queue.put_nowait(fake_event)
e = c.step(action1)
assert c.last_action == action1
assert e.metadata["lastActionSuccess"]
c = controller()
c.last_event = fake_event
action2 = dict(action="RotateLeft")
c.server.request_queue.put_nowait(fake_event)
e = c.step(action2)
assert c.last_action == action2
assert e.metadata["lastActionSuccess"]
def test_unity_command_force_device_index(mocker):
mocker.patch("ai2thor.controller.platform_system", fake_linux_system)
mocker.patch("ai2thor.controller.ai2thor.build.Build.exists", fake_exists)
mocker.patch("ai2thor.controller.ai2thor.build.Build.download", noop_download)
mocker.patch("ai2thor.controller.ai2thor.platform.select_platforms", select_platforms_linux_cr)
mocker.patch(
"ai2thor.controller.ai2thor.platform.CloudRendering.validate", fake_validate
)
mocker.patch(
"ai2thor.controller.ai2thor.platform.Linux64.validate",
fake_invalid_linux64_validate,
)
mocker.patch("ai2thor.platform.CloudRendering.enabled", True)
original_visible_devices = os.environ.get("CUDA_VISIBLE_DEVICES")
try:
os.environ["CUDA_VISIBLE_DEVICES"] = "2,3,4"
c = controller(platform=CloudRendering, gpu_device=1)
assert c.unity_command(650, 550, False) == [
c._build.executable_path,
"-screen-fullscreen",
"0",
"-screen-quality",
"7",
"-screen-width",
"650",
"-screen-height",
"550",
'-force-device-index',
'4'
]
finally:
if original_visible_devices:
os.environ["CUDA_VISIBLE_DEVICES"] = original_visible_devices
else:
del os.environ["CUDA_VISIBLE_DEVICES"]
c = controller(platform=CloudRendering, gpu_device=5)
assert c.unity_command(650, 550, False) == [
c._build.executable_path,
"-screen-fullscreen",
"0",
"-screen-quality",
"7",
"-screen-width",
"650",
"-screen-height",
"550",
'-force-device-index',
'6'
]
c = controller(platform=CloudRendering, gpu_device=0)
assert c.unity_command(650, 550, False) == [
c._build.executable_path,
"-screen-fullscreen",
"0",
"-screen-quality",
"7",
"-screen-width",
"650",
"-screen-height",
"550",
'-force-device-index',
'0'
]
def test_unity_command(mocker):
mocker.patch("ai2thor.controller.platform_system", fake_linux_system)
mocker.patch("ai2thor.controller.ai2thor.build.Build.exists", fake_exists)
mocker.patch("ai2thor.controller.ai2thor.build.Build.download", noop_download)
mocker.patch("ai2thor.controller.ai2thor.platform.Linux64.validate", fake_validate)
c = controller()
assert c.unity_command(650, 550, False) == [
c._build.executable_path,
"-screen-fullscreen",
"0",
"-screen-quality",
"7",
"-screen-width",
"650",
"-screen-height",
"550",
]
c = controller(fullscreen=True, quality="Low")
assert c.unity_command(650, 550, False) == [
c._build.executable_path,
"-screen-fullscreen",
"1",
"-screen-quality",
"2",
"-screen-width",
"650",
"-screen-height",
"550",
]
| []
| []
| [
"AI2THOR_VISIBILITY_DISTANCE",
"CUDA_VISIBLE_DEVICES",
"DISPLAY"
]
| [] | ["AI2THOR_VISIBILITY_DISTANCE", "CUDA_VISIBLE_DEVICES", "DISPLAY"] | python | 3 | 0 | |
vendor/github.com/aliyun/terraform-provider-alicloud/alicloud/common.go | package alicloud
import (
"bytes"
"encoding/base64"
"encoding/json"
"fmt"
"io/ioutil"
"log"
"os"
"os/user"
"path/filepath"
"reflect"
"runtime"
"strconv"
"strings"
"time"
"github.com/hashicorp/terraform-plugin-sdk/helper/schema"
"github.com/denverdino/aliyungo/cs"
"github.com/aliyun/aliyun-datahub-sdk-go/datahub"
sls "github.com/aliyun/aliyun-log-go-sdk"
"github.com/aliyun/aliyun-oss-go-sdk/oss"
"github.com/aliyun/aliyun-tablestore-go-sdk/tablestore"
"github.com/aliyun/fc-go-sdk"
"github.com/hashicorp/terraform-plugin-sdk/helper/resource"
"gopkg.in/yaml.v2"
"math"
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
"github.com/denverdino/aliyungo/common"
"github.com/google/uuid"
"github.com/mitchellh/go-homedir"
)
type InstanceNetWork string
const (
ClassicNet = InstanceNetWork("classic")
VpcNet = InstanceNetWork("vpc")
)
type PayType string
const (
PrePaid = PayType("PrePaid")
PostPaid = PayType("PostPaid")
Prepaid = PayType("Prepaid")
Postpaid = PayType("Postpaid")
)
const (
NormalMode = "normal"
SafetyMode = "safety"
)
type DdosbgpInsatnceType string
const (
Enterprise = DdosbgpInsatnceType("Enterprise")
Professional = DdosbgpInsatnceType("Professional")
)
type DdosbgpInstanceIpType string
const (
IPv4 = DdosbgpInstanceIpType("IPv4")
IPv6 = DdosbgpInstanceIpType("IPv6")
)
type NetType string
const (
Internet = NetType("Internet")
Intranet = NetType("Intranet")
)
type NetworkType string
const (
Classic = NetworkType("Classic")
Vpc = NetworkType("Vpc")
ClassicInternet = NetworkType("classic_internet")
ClassicIntranet = NetworkType("classic_intranet")
PUBLIC = NetworkType("PUBLIC")
PRIVATE = NetworkType("PRIVATE")
)
type NodeType string
const (
WORKER = NodeType("WORKER")
KIBANA = NodeType("KIBANA")
)
type ActionType string
const (
OPEN = ActionType("OPEN")
CLOSE = ActionType("CLOSE")
)
type TimeType string
const (
Hour = TimeType("Hour")
Day = TimeType("Day")
Week = TimeType("Week")
Month = TimeType("Month")
Year = TimeType("Year")
)
type IpVersion string
const (
IPV4 = IpVersion("ipv4")
IPV6 = IpVersion("ipv6")
)
type Status string
const (
Pending = Status("Pending")
Creating = Status("Creating")
Running = Status("Running")
Available = Status("Available")
Unavailable = Status("Unavailable")
Modifying = Status("Modifying")
Deleting = Status("Deleting")
Starting = Status("Starting")
Stopping = Status("Stopping")
Stopped = Status("Stopped")
Normal = Status("Normal")
Changing = Status("Changing")
Online = Status("online")
Configuring = Status("configuring")
Associating = Status("Associating")
Unassociating = Status("Unassociating")
InUse = Status("InUse")
DiskInUse = Status("In_use")
Active = Status("Active")
Inactive = Status("Inactive")
Idle = Status("Idle")
SoldOut = Status("SoldOut")
InService = Status("InService")
Removing = Status("Removing")
DisabledStatus = Status("Disabled")
Init = Status("Init")
Provisioning = Status("Provisioning")
Updating = Status("Updating")
FinancialLocked = Status("FinancialLocked")
PUBLISHED = Status("Published")
NOPUBLISHED = Status("NonPublished")
Deleted = Status("Deleted")
Null = Status("Null")
Enable = Status("Enable")
BINDED = Status("BINDED")
)
type IPType string
const (
Inner = IPType("Inner")
Private = IPType("Private")
Public = IPType("Public")
)
type ResourceType string
const (
ResourceTypeInstance = ResourceType("Instance")
ResourceTypeDisk = ResourceType("Disk")
ResourceTypeVSwitch = ResourceType("VSwitch")
ResourceTypeRds = ResourceType("Rds")
ResourceTypePolarDB = ResourceType("PolarDB")
IoOptimized = ResourceType("IoOptimized")
ResourceTypeRkv = ResourceType("KVStore")
ResourceTypeFC = ResourceType("FunctionCompute")
ResourceTypeElasticsearch = ResourceType("Elasticsearch")
ResourceTypeSlb = ResourceType("Slb")
ResourceTypeMongoDB = ResourceType("MongoDB")
ResourceTypeGpdb = ResourceType("Gpdb")
ResourceTypeHBase = ResourceType("HBase")
ResourceTypeAdb = ResourceType("ADB")
ResourceTypeCassandra = ResourceType("Cassandra")
)
type InternetChargeType string
const (
PayByBandwidth = InternetChargeType("PayByBandwidth")
PayByTraffic = InternetChargeType("PayByTraffic")
PayBy95 = InternetChargeType("PayBy95")
)
type AccountSite string
const (
DomesticSite = AccountSite("Domestic")
IntlSite = AccountSite("International")
)
const (
SnapshotCreatingInProcessing = Status("progressing")
SnapshotCreatingAccomplished = Status("accomplished")
SnapshotCreatingFailed = Status("failed")
SnapshotPolicyCreating = Status("Creating")
SnapshotPolicyAvailable = Status("available")
SnapshotPolicyNormal = Status("Normal")
)
// timeout for common product, ecs e.g.
const DefaultTimeout = 120
const Timeout5Minute = 300
const DefaultTimeoutMedium = 500
// timeout for long time progerss product, rds e.g.
const DefaultLongTimeout = 1000
const DefaultIntervalMini = 2
const DefaultIntervalShort = 5
const DefaultIntervalMedium = 10
const DefaultIntervalLong = 20
const (
PageSizeSmall = 10
PageSizeMedium = 20
PageSizeLarge = 50
PageSizeXLarge = 100
)
// Protocol represents network protocol
type Protocol string
// Constants of protocol definition
const (
Http = Protocol("http")
Https = Protocol("https")
Tcp = Protocol("tcp")
Udp = Protocol("udp")
All = Protocol("all")
Icmp = Protocol("icmp")
Gre = Protocol("gre")
)
// ValidProtocols network protocol list
var ValidProtocols = []Protocol{Http, Https, Tcp, Udp}
// simple array value check method, support string type only
func isProtocolValid(value string) bool {
res := false
for _, v := range ValidProtocols {
if string(v) == value {
res = true
}
}
return res
}
// default region for all resource
const DEFAULT_REGION = "cn-beijing"
const INT_MAX = 2147483647
// symbol of multiIZ
const MULTI_IZ_SYMBOL = "MAZ"
const COMMA_SEPARATED = ","
const COLON_SEPARATED = ":"
const SLASH_SEPARATED = "/"
const LOCAL_HOST_IP = "127.0.0.1"
// Takes the result of flatmap.Expand for an array of strings
// and returns a []string
func expandStringList(configured []interface{}) []string {
vs := make([]string, 0, len(configured))
for _, v := range configured {
if v == nil {
continue
}
vs = append(vs, v.(string))
}
return vs
}
// Takes list of string to strings. Expand to an array
// of raw strings and returns a []interface{}
func convertListStringToListInterface(list []string) []interface{} {
vs := make([]interface{}, 0, len(list))
for _, v := range list {
vs = append(vs, v)
}
return vs
}
func expandIntList(configured []interface{}) []int {
vs := make([]int, 0, len(configured))
for _, v := range configured {
vs = append(vs, v.(int))
}
return vs
}
// Convert the result for an array and returns a Json string
func convertListToJsonString(configured []interface{}) string {
if len(configured) < 1 {
return ""
}
result := "["
for i, v := range configured {
if v == nil {
continue
}
result += "\"" + v.(string) + "\""
if i < len(configured)-1 {
result += ","
}
}
result += "]"
return result
}
func convertJsonStringToStringList(src interface{}) (result []interface{}) {
if err, ok := src.([]interface{}); !ok {
panic(err)
}
for _, v := range src.([]interface{}) {
result = append(result, fmt.Sprint(formatInt(v)))
}
return
}
func convertJsonStringToMap(configured string) (map[string]interface{}, error) {
result := make(map[string]interface{})
if err := json.Unmarshal([]byte(configured), &result); err != nil {
return nil, err
}
return result, nil
}
// Convert the result for an array and returns a comma separate
func convertListToCommaSeparate(configured []interface{}) string {
if len(configured) < 1 {
return ""
}
result := ""
for i, v := range configured {
rail := ","
if i == len(configured)-1 {
rail = ""
}
result += v.(string) + rail
}
return result
}
func convertBoolToString(configured bool) string {
return strconv.FormatBool(configured)
}
func convertIntergerToString(configured int) string {
return strconv.Itoa(configured)
}
func convertFloat64ToString(configured float64) string {
return strconv.FormatFloat(configured, 'E', -1, 64)
}
func convertJsonStringToList(configured string) ([]interface{}, error) {
result := make([]interface{}, 0)
if err := json.Unmarshal([]byte(configured), &result); err != nil {
return nil, err
}
return result, nil
}
func convertMaptoJsonString(m map[string]interface{}) (string, error) {
//sm := make(map[string]string, len(m))
//for k, v := range m {
// sm[k] = v.(string)
//}
if result, err := json.Marshal(m); err != nil {
return "", err
} else {
return string(result), nil
}
}
func convertListMapToJsonString(configured []map[string]interface{}) (string, error) {
if len(configured) < 1 {
return "[]", nil
}
result := "["
for i, m := range configured {
if m == nil {
continue
}
sm := make(map[string]interface{}, len(m))
for k, v := range m {
sm[k] = v
}
item, err := json.Marshal(sm)
if err == nil {
result += string(item)
if i < len(configured)-1 {
result += ","
}
}
}
result += "]"
return result, nil
}
func convertMapFloat64ToJsonString(m map[string]interface{}) (string, error) {
sm := make(map[string]json.Number, len(m))
for k, v := range m {
sm[k] = v.(json.Number)
}
if result, err := json.Marshal(sm); err != nil {
return "", err
} else {
return string(result), nil
}
}
func StringPointer(s string) *string {
return &s
}
func BoolPointer(b bool) *bool {
return &b
}
func Int32Pointer(i int32) *int32 {
return &i
}
func Int64Pointer(i int64) *int64 {
return &i
}
func IntMin(x, y int) int {
if x < y {
return x
}
return y
}
const ServerSideEncryptionAes256 = "AES256"
const ServerSideEncryptionKMS = "KMS"
type OptimizedType string
const (
IOOptimized = OptimizedType("optimized")
NoneOptimized = OptimizedType("none")
)
type TagResourceType string
const (
TagResourceImage = TagResourceType("image")
TagResourceInstance = TagResourceType("instance")
TagResourceAcl = TagResourceType("acl")
TagResourceCertificate = TagResourceType("certificate")
TagResourceSnapshot = TagResourceType("snapshot")
TagResourceKeypair = TagResourceType("keypair")
TagResourceDisk = TagResourceType("disk")
TagResourceSecurityGroup = TagResourceType("securitygroup")
TagResourceEni = TagResourceType("eni")
TagResourceCdn = TagResourceType("DOMAIN")
TagResourceVpc = TagResourceType("VPC")
TagResourceVSwitch = TagResourceType("VSWITCH")
TagResourceRouteTable = TagResourceType("ROUTETABLE")
TagResourceEip = TagResourceType("EIP")
TagResourcePlugin = TagResourceType("plugin")
TagResourceApiGroup = TagResourceType("apiGroup")
TagResourceApp = TagResourceType("app")
TagResourceTopic = TagResourceType("topic")
TagResourceConsumerGroup = TagResourceType("consumergroup")
TagResourceCluster = TagResourceType("cluster")
)
type KubernetesNodeType string
const (
KubernetesNodeMaster = ResourceType("Master")
KubernetesNodeWorker = ResourceType("Worker")
)
func getPagination(pageNumber, pageSize int) (pagination common.Pagination) {
pagination.PageSize = pageSize
pagination.PageNumber = pageNumber
return
}
const CharityPageUrl = "http://promotion.alicdn.com/help/oss/error.html"
func userDataHashSum(user_data string) string {
// Check whether the user_data is not Base64 encoded.
// Always calculate hash of base64 decoded value since we
// check against double-encoding when setting it
v, base64DecodeError := base64.StdEncoding.DecodeString(user_data)
if base64DecodeError != nil {
v = []byte(user_data)
}
return string(v)
}
// Remove useless blank in the string.
func Trim(v string) string {
if len(v) < 1 {
return v
}
return strings.Trim(v, " ")
}
func ConvertIntegerToInt(value requests.Integer) (v int, err error) {
if strings.TrimSpace(string(value)) == "" {
return
}
v, err = strconv.Atoi(string(value))
if err != nil {
return v, fmt.Errorf("Converting integer %s to int got an error: %#v.", value, err)
}
return
}
func GetUserHomeDir() (string, error) {
usr, err := user.Current()
if err != nil {
return "", fmt.Errorf("Get current user got an error: %#v.", err)
}
return usr.HomeDir, nil
}
func writeToFile(filePath string, data interface{}) error {
var out string
switch data.(type) {
case string:
out = data.(string)
break
case nil:
return nil
default:
bs, err := json.MarshalIndent(data, "", "\t")
if err != nil {
return fmt.Errorf("MarshalIndent data %#v got an error: %#v", data, err)
}
out = string(bs)
}
if strings.HasPrefix(filePath, "~") {
home, err := GetUserHomeDir()
if err != nil {
return err
}
if home != "" {
filePath = strings.Replace(filePath, "~", home, 1)
}
}
if _, err := os.Stat(filePath); err == nil {
if err := os.Remove(filePath); err != nil {
return err
}
}
return ioutil.WriteFile(filePath, []byte(out), 422)
}
type Invoker struct {
catchers []*Catcher
}
type Catcher struct {
Reason string
RetryCount int
RetryWaitSeconds int
}
var ClientErrorCatcher = Catcher{AliyunGoClientFailure, 10, 5}
var ServiceBusyCatcher = Catcher{"ServiceUnavailable", 10, 5}
var ThrottlingCatcher = Catcher{Throttling, 50, 2}
func NewInvoker() Invoker {
i := Invoker{}
i.AddCatcher(ClientErrorCatcher)
i.AddCatcher(ServiceBusyCatcher)
i.AddCatcher(ThrottlingCatcher)
return i
}
func (a *Invoker) AddCatcher(catcher Catcher) {
a.catchers = append(a.catchers, &catcher)
}
func (a *Invoker) Run(f func() error) error {
err := f()
if err == nil {
return nil
}
for _, catcher := range a.catchers {
if IsExpectedErrors(err, []string{catcher.Reason}) {
catcher.RetryCount--
if catcher.RetryCount <= 0 {
return fmt.Errorf("Retry timeout and got an error: %#v.", err)
} else {
time.Sleep(time.Duration(catcher.RetryWaitSeconds) * time.Second)
return a.Run(f)
}
}
}
return err
}
func buildClientToken(action string) string {
token := strings.TrimSpace(fmt.Sprintf("TF-%s-%d-%s", action, time.Now().Unix(), strings.Trim(uuid.New().String(), "-")))
if len(token) > 64 {
token = token[0:64]
}
return token
}
func getNextpageNumber(number requests.Integer) (requests.Integer, error) {
page, err := strconv.Atoi(string(number))
if err != nil {
return "", err
}
return requests.NewInteger(page + 1), nil
}
func terraformToAPI(field string) string {
var result string
for _, v := range strings.Split(field, "_") {
if len(v) > 0 {
result = fmt.Sprintf("%s%s%s", result, strings.ToUpper(string(v[0])), v[1:])
}
}
return result
}
func compareJsonTemplateAreEquivalent(tem1, tem2 string) (bool, error) {
obj1 := make(map[string]interface{})
err := json.Unmarshal([]byte(tem1), &obj1)
if err != nil {
return false, err
}
canonicalJson1, _ := json.Marshal(obj1)
obj2 := make(map[string]interface{})
err = json.Unmarshal([]byte(tem2), &obj2)
if err != nil {
return false, err
}
canonicalJson2, _ := json.Marshal(obj2)
equal := bytes.Compare(canonicalJson1, canonicalJson2) == 0
if !equal {
log.Printf("[DEBUG] Canonical template are not equal.\nFirst: %s\nSecond: %s\n",
canonicalJson1, canonicalJson2)
}
return equal, nil
}
func compareYamlTemplateAreEquivalent(tem1, tem2 string) (bool, error) {
var obj1 interface{}
err := yaml.Unmarshal([]byte(tem1), &obj1)
if err != nil {
return false, err
}
canonicalYaml1, _ := yaml.Marshal(obj1)
var obj2 interface{}
err = yaml.Unmarshal([]byte(tem2), &obj2)
if err != nil {
return false, err
}
canonicalYaml2, _ := yaml.Marshal(obj2)
equal := bytes.Compare(canonicalYaml1, canonicalYaml2) == 0
if !equal {
log.Printf("[DEBUG] Canonical template are not equal.\nFirst: %s\nSecond: %s\n",
canonicalYaml1, canonicalYaml2)
}
return equal, nil
}
// loadFileContent returns contents of a file in a given path
func loadFileContent(v string) ([]byte, error) {
filename, err := homedir.Expand(v)
if err != nil {
return nil, err
}
fileContent, err := ioutil.ReadFile(filename)
if err != nil {
return nil, err
}
return fileContent, nil
}
func debugOn() bool {
for _, part := range strings.Split(os.Getenv("DEBUG"), ",") {
if strings.TrimSpace(part) == "terraform" {
return true
}
}
return false
}
func addDebug(action, content interface{}, requestInfo ...interface{}) {
if debugOn() {
trace := "[DEBUG TRACE]:\n"
for skip := 1; skip < 5; skip++ {
_, filepath, line, _ := runtime.Caller(skip)
trace += fmt.Sprintf("%s:%d\n", filepath, line)
}
if len(requestInfo) > 0 {
var request = struct {
Domain string
Version string
UserAgent string
ActionName string
Method string
Product string
Region string
AK string
}{}
switch requestInfo[0].(type) {
case *requests.RpcRequest:
tmp := requestInfo[0].(*requests.RpcRequest)
request.Domain = tmp.GetDomain()
request.Version = tmp.GetVersion()
request.ActionName = tmp.GetActionName()
request.Method = tmp.GetMethod()
request.Product = tmp.GetProduct()
request.Region = tmp.GetRegionId()
case *requests.RoaRequest:
tmp := requestInfo[0].(*requests.RoaRequest)
request.Domain = tmp.GetDomain()
request.Version = tmp.GetVersion()
request.ActionName = tmp.GetActionName()
request.Method = tmp.GetMethod()
request.Product = tmp.GetProduct()
request.Region = tmp.GetRegionId()
case *requests.CommonRequest:
tmp := requestInfo[0].(*requests.CommonRequest)
request.Domain = tmp.GetDomain()
request.Version = tmp.GetVersion()
request.ActionName = tmp.GetActionName()
request.Method = tmp.GetMethod()
request.Product = tmp.GetProduct()
request.Region = tmp.GetRegionId()
case *fc.Client:
client := requestInfo[0].(*fc.Client)
request.Version = client.Config.APIVersion
request.Product = "FC"
request.ActionName = fmt.Sprintf("%s", action)
case *sls.Client:
request.Product = "LOG"
request.ActionName = fmt.Sprintf("%s", action)
case *tablestore.TableStoreClient:
request.Product = "OTS"
request.ActionName = fmt.Sprintf("%s", action)
case *oss.Client:
request.Product = "OSS"
request.ActionName = fmt.Sprintf("%s", action)
case *datahub.DataHub:
request.Product = "DataHub"
request.ActionName = fmt.Sprintf("%s", action)
case *cs.Client:
request.Product = "CS"
request.ActionName = fmt.Sprintf("%s", action)
}
requestContent := ""
if len(requestInfo) > 1 {
requestContent = fmt.Sprintf("%#v", requestInfo[1])
}
if len(requestInfo) == 1 {
if v, ok := requestInfo[0].(map[string]interface{}); ok {
if res, err := json.Marshal(&v); err == nil {
requestContent = string(res)
}
if res, err := json.Marshal(&content); err == nil {
content = string(res)
}
}
}
content = fmt.Sprintf("%vDomain:%v, Version:%v, ActionName:%v, Method:%v, Product:%v, Region:%v\n\n"+
"*************** %s Request ***************\n%#v\n",
content, request.Domain, request.Version, request.ActionName,
request.Method, request.Product, request.Region, request.ActionName, requestContent)
}
//fmt.Printf(DefaultDebugMsg, action, content, trace)
log.Printf(DefaultDebugMsg, action, content, trace)
}
}
// Return a ComplexError which including extra error message, error occurred file and path
func GetFunc(level int) string {
pc, _, _, ok := runtime.Caller(level)
if !ok {
log.Printf("[ERROR] runtime.Caller error in GetFuncName.")
return ""
}
return strings.TrimPrefix(filepath.Ext(runtime.FuncForPC(pc).Name()), ".")
}
func ParseResourceId(id string, length int) (parts []string, err error) {
parts = strings.Split(id, ":")
if len(parts) != length {
err = WrapError(fmt.Errorf("Invalid Resource Id %s. Expected parts' length %d, got %d", id, length, len(parts)))
}
return parts, err
}
func ParseSlbListenerId(id string) (parts []string, err error) {
parts = strings.Split(id, ":")
if len(parts) != 2 && len(parts) != 3 {
err = WrapError(fmt.Errorf("Invalid alicloud_slb_listener Id %s. Expected Id format is <slb id>:<protocol>:< frontend>.", id))
}
return parts, err
}
func GetCenChildInstanceType(id string) (c string, e error) {
if strings.HasPrefix(id, "vpc") {
return ChildInstanceTypeVpc, nil
} else if strings.HasPrefix(id, "vbr") {
return ChildInstanceTypeVbr, nil
} else if strings.HasPrefix(id, "ccn") {
return ChildInstanceTypeCcn, nil
} else {
return c, fmt.Errorf("CEN child instance ID invalid. Now, it only supports VPC or VBR or CCN instance.")
}
}
func BuildStateConf(pending, target []string, timeout, delay time.Duration, f resource.StateRefreshFunc) *resource.StateChangeConf {
return &resource.StateChangeConf{
Pending: pending,
Target: target,
Refresh: f,
Timeout: timeout,
Delay: delay,
MinTimeout: 3 * time.Second,
}
}
func incrementalWait(firstDuration time.Duration, increaseDuration time.Duration) func() {
retryCount := 1
return func() {
var waitTime time.Duration
if retryCount == 1 {
waitTime = firstDuration
} else if retryCount > 1 {
waitTime += increaseDuration
}
time.Sleep(waitTime)
retryCount++
}
}
// If auto renew, the period computed from computePeriodByUnit will be changed
// This method used to compute a period accourding to current period and unit
func computePeriodByUnit(createTime, endTime interface{}, currentPeriod int, periodUnit string) (int, error) {
var createTimeStr, endTimeStr string
switch value := createTime.(type) {
case int64:
createTimeStr = time.Unix(createTime.(int64), 0).Format(time.RFC3339)
endTimeStr = time.Unix(endTime.(int64), 0).Format(time.RFC3339)
case string:
createTimeStr = createTime.(string)
endTimeStr = endTime.(string)
default:
return 0, WrapError(fmt.Errorf("Unsupported time type: %#v", value))
}
// currently, there is time value does not format as standard RFC3339
UnStandardRFC3339 := "2006-01-02T15:04Z07:00"
create, err := time.Parse(time.RFC3339, createTimeStr)
if err != nil {
log.Printf("Parase the CreateTime %#v failed and error is: %#v.", createTime, err)
create, err = time.Parse(UnStandardRFC3339, createTimeStr)
if err != nil {
return 0, WrapError(err)
}
}
end, err := time.Parse(time.RFC3339, endTimeStr)
if err != nil {
log.Printf("Parase the EndTime %#v failed and error is: %#v.", endTime, err)
end, err = time.Parse(UnStandardRFC3339, endTimeStr)
if err != nil {
return 0, WrapError(err)
}
}
var period int
switch periodUnit {
case "Month":
period = int(math.Floor(end.Sub(create).Hours() / 24 / 30))
case "Week":
period = int(math.Floor(end.Sub(create).Hours() / 24 / 7))
case "Year":
period = int(math.Floor(end.Sub(create).Hours() / 24 / 365))
default:
err = fmt.Errorf("Unexpected period unit %s", periodUnit)
}
// The period at least is 1
if period < 1 {
period = 1
}
if period > 12 {
period = 12
}
// period can not be modified and if the new period is changed, using the previous one.
if currentPeriod > 0 && currentPeriod != period {
period = currentPeriod
}
return period, WrapError(err)
}
func checkWaitForReady(object interface{}, conditions map[string]interface{}) (bool, map[string]interface{}, error) {
if conditions == nil {
return false, nil, nil
}
objectType := reflect.TypeOf(object)
objectValue := reflect.ValueOf(object)
values := make(map[string]interface{})
for key, value := range conditions {
if _, ok := objectType.FieldByName(key); ok {
current := objectValue.FieldByName(key)
values[key] = current
if fmt.Sprintf("%v", current) != fmt.Sprintf("%v", value) {
return false, values, nil
}
} else {
return false, values, WrapError(fmt.Errorf("There is missing attribute %s in the object.", key))
}
}
return true, values, nil
}
// When using teadsl, we need to convert float, int64 and int32 to int for comparison.
func formatInt(src interface{}) int {
if src == nil {
return 0
}
attrType := reflect.TypeOf(src)
switch attrType.String() {
case "float64":
return int(src.(float64))
case "float32":
return int(src.(float32))
case "int64":
return int(src.(int64))
case "int32":
return int(src.(int32))
case "int":
return src.(int)
case "string":
v, err := strconv.Atoi(src.(string))
if err != nil {
panic(err)
}
return v
case "json.Number":
v, err := strconv.Atoi(src.(json.Number).String())
if err != nil {
panic(err)
}
return v
default:
panic(fmt.Sprintf("Not support type %s", attrType.String()))
}
}
func convertArrayObjectToJsonString(src interface{}) (string, error) {
res, err := json.Marshal(&src)
if err != nil {
return "", err
}
return string(res), nil
}
func convertArrayToString(src interface{}, sep string) string {
if src == nil {
return ""
}
items := make([]string, 0)
for _, v := range src.([]interface{}) {
items = append(items, fmt.Sprint(v))
}
return strings.Join(items, sep)
}
func splitMultiZoneId(id string) (ids []string) {
if !(strings.Contains(id, MULTI_IZ_SYMBOL) || strings.Contains(id, "(")) {
return
}
firstIndex := strings.Index(id, MULTI_IZ_SYMBOL)
secondIndex := strings.Index(id, "(")
for _, p := range strings.Split(id[secondIndex+1:len(id)-1], COMMA_SEPARATED) {
ids = append(ids, id[:firstIndex]+string(p))
}
return
}
func Case2Camel(name string) string {
name = strings.Replace(name, "_", " ", -1)
name = strings.Title(name)
return strings.Replace(name, " ", "", -1)
}
func FirstLower(s string) string {
if s == "" {
return ""
}
return strings.ToLower(s[:1]) + s[1:]
}
// SplitSlice Divides the slice into blocks of the specified size
func SplitSlice(xs []interface{}, chunkSize int) [][]interface{} {
if len(xs) == 0 {
return nil
}
divided := make([][]interface{}, (len(xs)+chunkSize-1)/chunkSize)
prev := 0
i := 0
till := len(xs) - chunkSize
for prev < till {
next := prev + chunkSize
divided[i] = xs[prev:next]
prev = next
i++
}
divided[i] = xs[prev:]
return divided
}
func isPagingRequest(d *schema.ResourceData) bool {
v, ok := d.GetOk("page_number")
return ok && v.(int) > 0
}
func setPagingRequest(d *schema.ResourceData, request map[string]interface{}, maxPageSize int) {
if v, ok := d.GetOk("page_number"); ok && v.(int) > 0 {
request["PageNumber"] = v.(int)
} else {
request["PageNumber"] = 1
}
if v, ok := d.GetOk("page_size"); ok && v.(int) > 0 {
request["PageSize"] = v.(int)
} else {
request["PageSize"] = PageSizeLarge
}
return
}
| [
"\"DEBUG\""
]
| []
| [
"DEBUG"
]
| [] | ["DEBUG"] | go | 1 | 0 | |
cmd/gen_static_prelude/prep.go | // prep the prelude for static inclusion with the
// `gi` binary.
package main
import (
"fmt"
"net/http"
"os"
"github.com/shurcooL/vfsgen"
)
func main() {
gopath := os.Getenv("GOPATH")
compiler := gopath + "/src/github.com/gijit/gi/pkg/compiler"
prelude := compiler + "/prelude"
gentarget := compiler + "/prelude_static.go"
var fs http.FileSystem = http.Dir(prelude)
err := vfsgen.Generate(fs, vfsgen.Options{
Filename: gentarget,
PackageName: "compiler",
//BuildTags: "!dev",
VariableName: "preludeFiles",
})
if err != nil {
panic(err)
}
fmt.Printf("gen_static_prelude '%s' ->\n '%s'\n", prelude, gentarget)
}
| [
"\"GOPATH\""
]
| []
| [
"GOPATH"
]
| [] | ["GOPATH"] | go | 1 | 0 | |
unbabel/api.py | """
Disclaimer: This is a slightly customized Unbabel API to make it compatible with Python 3.6
check /docs/ for more detais.
"""
import json
import logging
import os
import requests
log = logging.getLogger()
import copy
UNBABEL_SANDBOX_API_URL = os.environ.get(
'UNBABEL_SANDOX_API_URL', 'https://sandbox.unbabel.com/tapi/v2/')
UNBABEL_API_URL = os.environ.get(
'UNBABEL_API_URL', 'https://unbabel.com/tapi/v2/')
class UnauthorizedException(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class BadRequestException(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class Language(object):
def __init__(self, shortname, name):
self.shortname = shortname
self.name = name
def __repr__(self):
return self.name
def __str__(self):
return self.name
class Tone(object):
def __init__(self, description, name):
self.description = description
self.name = name
def __repr__(self):
return self.name
def __str__(self):
return self.name
class Topic(object):
def __init__(self, name):
self.name = name
def __repr__(self):
return self.name
def __str__(self):
return self.name
class LangPair(object):
def __init__(self, source_language, target_language):
self.source_language = source_language
self.target_language = target_language
def __repr__(self):
return "%s_%s" % (
self.source_language.shortname, self.target_language.shortname)
def __str__(self):
return "%s_%s" % (
self.source_language.shortname, self.target_language.shortname)
class Translator(object):
def __init__(self, first_name="", last_name="", picture_url="",
profile_url=""):
self.first_name = first_name
self.last_name = last_name
self.picture_url = picture_url
self.profile_url = profile_url
@classmethod
def from_json(cls, json):
t = Translator(json["first_name"], json["last_name"],
json["picture_url"], json["profile_url"])
return t
class Translation(object):
def __init__(
self,
uid=-1,
text="",
translatedText=None,
target_language="",
source_language=None,
status=None,
translators=[],
topics=None,
price=None,
text_format='text',
origin=None,
price_plan=None,
balance=None,
client=None,
order_number=None):
self.uid = uid
self.text = text
self.translation = translatedText
self.source_language = source_language
self.target_language = target_language
self.status = status
self.translators = translators
self.topics = topics
self.price = price
self.text_format = text_format
self.origin = origin
self.price_plan = price_plan
self.client = client
self.balance = balance
self.order_number = order_number
def __repr__(self):
return "%s %s %s_%s" % (
self.uid, self.status, self.source_language, self.target_language)
def __str__(self):
return "%s %s %s_%s" % (
self.uid, self.status, self.source_language, self.target_language)
class MTTranslation(object):
def __init__(
self,
uid=-1,
text="",
translatedText=None,
target_language="",
source_language=None,
status=None,
topics=None,
text_format='text',
origin=None,
client=None):
self.uid = uid
self.text = text
self.translation = translatedText
self.source_language = source_language
self.target_language = target_language
self.status = status
self.topics = topics
self.text_format = text_format
self.origin = origin
self.client = client
def __repr__(self):
return "%s %s %s_%s" % (
self.uid, self.status, self.source_language, self.target_language)
def __str__(self):
return "%s %s %s_%s" % (
self.uid, self.status, self.source_language, self.target_language)
class Account(object):
def __init__(self, username, email, balance):
self.username = username
self.email = email
self.balance = balance
def __unicode__(self):
return u'email: {email}, balance: {balance}'.format(
email=self.email, balance=self.balance,
)
class Job(object):
def __init__(self, id, uid, order_id, status, source_language,
target_language,
text, price, tone, text_format):
self.id = id
self.uid = uid
self.order_id = order_id
self.status = status
self.text = text
self.price = price
self.source_language = source_language
self.target_language = target_language
self.tone = tone
self.text_format = text_format
def __unicode__(self):
return u'order_id: {}, id: {}, status: {}'.format(
self.order_id, self.id, self.status)
class Order(object):
def __init__(self, id, status, price):
self.id = id
self.status = status
self.price = price
def __unicode__(self):
return u'{id} - {status} - {price}'.format(
id=self.id,
status=self.status,
price=self.price,
)
class UnbabelApi(object):
def __init__(self, username, api_key, sandbox=False):
if sandbox:
api_url = UNBABEL_SANDBOX_API_URL
else:
api_url = UNBABEL_API_URL
self.username = username
self.api_key = api_key
self.api_url = api_url
self.is_bulk = False
self.headers = {
'Authorization': 'ApiKey {}:{}'.format(self.username,
self.api_key),
'content-type': 'application/json'}
def api_call(self, uri, data=None, internal_api_call=False):
api_url = self.api_url
if internal_api_call:
api_url = api_url.replace('/tapi/v2/', '/api/v1/')
url = "{}{}".format(api_url, uri)
if data is None:
return requests.get(url, headers=self.headers)
return requests.post(url, headers=self.headers, data=json.dumps(data))
def post_translations(self, text, target_language, source_language=None, type=None, tone=None, visibility=None,
public_url=None, callback_url=None, topics=None, instructions=None, uid=None,
text_format="text", target_text=None, origin=None, client_owner_email=None, context=None):
data = {k: v for k, v in locals().items() if not v in (self, None)}
if self.is_bulk:
self.bulk_data.append(data)
return
return self._make_request(data)
def post_mt_translations(self, text, target_language, source_language=None, tone=None, callback_url=None,
topics=None, instructions=None, uid=None, text_format="text", origin=None,
client_owner_email=None):
data = {k: v for k, v in locals().items() if not v in (self, None)}
result = requests.post("%smt_translation/" % self.api_url,
headers=self.headers, data=json.dumps(data))
if result.status_code in (201, 202):
json_object = json.loads(result.content)
toret = self._build_mt_translation_object(json_object)
return toret
elif result.status_code == 401:
raise UnauthorizedException(result.content)
elif result.status_code == 400:
raise BadRequestException(result.content)
else:
raise Exception("Unknown Error return status %d: %s",
result.status_code, result.content[0:100])
def _build_translation_object(self, json_object):
source_lang = json_object.get("source_language", None)
translation = json_object.get("translation", None)
status = json_object.get("status", None)
translators = [Translator.from_json(t) for t in
json_object.get("translators", [])]
translation = Translation(
uid=json_object["uid"],
text=json_object["text"],
target_language=json_object.get('target_language', None),
source_language=json_object.get('source_language', None),
translatedText=json_object.get('translatedText', None),
status=json_object.get('status', None),
translators=translators,
topics=json_object.get('topics', None),
price=json_object.get('price', None),
balance=json_object.get('balance', None),
text_format=json_object.get('text_format', "text"),
origin=json_object.get('origin', None),
price_plan=json_object.get('price_plan', None),
client=json_object.get('client', None),
)
return translation
def _build_mt_translation_object(self, json_object):
source_lang = json_object.get("source_language", None)
translation = json_object.get("translation", None)
status = json_object.get("status", None)
translation = MTTranslation(
uid=json_object["uid"],
text=json_object["text"],
target_language=json_object.get('target_language', None),
source_language=json_object.get('source_language', None),
translatedText=json_object.get('translatedText', None),
status=json_object.get('status', None),
topics=json_object.get('topics', None),
text_format=json_object.get('text_format', "text"),
origin=json_object.get('origin', None),
client=json_object.get('client', None),
)
return translation
def _make_request(self, data):
# headers={'Authorization': 'ApiKey %s:%s'%(self.username,
# self.api_key),'content-type': 'application/json'}
if self.is_bulk:
f = requests.patch
else:
f = requests.post
result = f("%stranslation/" % self.api_url, headers=self.headers,
data=json.dumps(data))
if result.status_code in (201, 202):
json_object = json.loads(result.content)
toret = None
if self.is_bulk:
toret = []
for obj in json_object['objects']:
toret.append(self._build_translation_object(obj))
else:
toret = self._build_translation_object(json_object)
return toret
elif result.status_code == 401:
raise UnauthorizedException(result.content)
elif result.status_code == 400:
raise BadRequestException(result.content)
else:
raise Exception("Unknown Error return status %d: %s",
result.status_code, result.content[0:100])
def start_bulk_transaction(self):
self.bulk_data = []
self.is_bulk = True
def _post_bulk(self):
data = {'objects': self.bulk_data}
return self._make_request(data=data)
def post_bulk_translations(self, translations):
self.start_bulk_transaction()
for obj in translations:
obj = copy.deepcopy(obj)
text, target_language = obj['text'], obj['target_language']
del obj['text']
del obj['target_language']
self.post_translations(text, target_language, **obj)
return self._post_bulk()
def get_translations(self, status=None):
'''
Returns the translations requested by the user
'''
if status is not None:
result = self.api_call('translation/?status=%s' % status)
else:
result = self.api_call('translation/')
if result.status_code == 200:
translations_json = json.loads(result.content)["objects"]
translations = [Translation(**tj) for tj in translations_json]
else:
log.critical(
'Error status when fetching translation from server: {'
'}!'.format(
result.status_code))
translations = []
return translations
def get_translation(self, uid):
'''
Returns a translation with the given id
'''
result = self.api_call('translation/{}/'.format(uid))
if result.status_code == 200:
translation = Translation(**json.loads(result.content))
else:
log.critical(
'Error status when fetching translation from server: {'
'}!'.format(
result.status_code))
raise ValueError(result.content)
return translation
def upgrade_mt_translation(self, uid, properties=None):
"""
:param uid:
:param properties: This is suppose to be a dictionary with new
properties values to be replaced on the upgraded job
:return:
"""
api_url = self.api_url
uri = 'mt_translation/{}/'.format(uid)
url = "{}{}".format(api_url, uri)
data = {"status": "upgrade", "properties": properties}
return requests.patch(url, headers=self.headers, data=json.dumps(data))
def get_mt_translations(self, status=None):
'''
Returns the translations requested by the user
'''
if status is not None:
result = self.api_call('mt_translation/?status=%s' % status)
else:
result = self.api_call('mt_translation/')
if result.status_code == 200:
translations_json = json.loads(result.content)["objects"]
translations = [Translation(**tj) for tj in translations_json]
else:
log.critical(
'Error status when fetching machine translation from server: '
'{}!'.format(
result.status_code))
translations = []
return translations
def get_mt_translation(self, uid):
'''
Returns a translation with the given id
'''
result = self.api_call('mt_translation/{}/'.format(uid))
if result.status_code == 200:
translation = Translation(**json.loads(result.content))
else:
log.critical(
'Error status when fetching machine translation from server: '
'{}!'.format(
result.status_code))
raise ValueError(result.content)
return translation
def get_language_pairs(self, train_langs=None):
'''
Returns the language pairs available on unbabel
'''
if train_langs is None:
result = self.api_call('language_pair/')
else:
result = self.api_call(
'language_pair/?train_langs={}'.format(train_langs))
try:
langs_json = json.loads(result.content)
if 'error' in langs_json:
return []
languages = [LangPair(Language(
shortname=lang_json["lang_pair"]["source_language"][
"shortname"],
name=lang_json["lang_pair"]["source_language"]["name"]),
Language(shortname=lang_json["lang_pair"][
"target_language"]["shortname"],
name=lang_json["lang_pair"][
"target_language"]["name"])
) for lang_json in langs_json["objects"]]
except Exception as e:
log.exception("Error decoding get language pairs")
raise e
return languages
def get_tones(self):
'''
Returns the tones available on unbabel
'''
result = self.api_call('tone/')
tones_json = json.loads(result.content)
tones = [Tone(name=tone_json["tone"]["name"],
description=tone_json["tone"]["description"])
for tone_json in tones_json["objects"]]
return tones
def get_topics(self):
'''
Returns the topics available on unbabel
'''
result = self.api_call('topic/')
topics_json = json.loads(result.content)
topics = [Topic(name=topic_json["topic"]["name"])
for topic_json in topics_json["objects"]]
return topics
def get_account(self):
result = self.api_call('account/')
account_json = json.loads(result.content)
account_data = account_json['objects'][0]['account']
account = Account(**account_data)
return account
def get_word_count(self, text):
result = self.api_call('wordcount/', {"text": text})
if result.status_code == 201:
json_object = json.loads(result.content)
return json_object["word_count"]
else:
log.debug('Got a HTTP Error [{}]'.format(result.status_code))
raise Exception("Unknown Error")
def get_user(self):
result = self.api_call('app/user/', internal_api_call=True)
if result.status_code == 200:
return json.loads(result.content)
else:
log.debug('Got a HTTP Error [{}]'.format(result.status_code))
raise Exception("Unknown Error: %s" % result.status_code)
__all__ = ['UnbabelApi']
| []
| []
| [
"UNBABEL_SANDOX_API_URL",
"UNBABEL_API_URL"
]
| [] | ["UNBABEL_SANDOX_API_URL", "UNBABEL_API_URL"] | python | 2 | 0 | |
typed/int64/mailbox.go | package mailbox
import (
"sync"
"sync/atomic"
)
// New returns a new instance of Mailbox
func New(sz int) *Mailbox {
mb := Mailbox{
cap: sz,
tail: -1,
s: make([]int64, sz),
}
// Initialize the conds
mb.sc = sync.NewCond(&mb.mux)
mb.rc = sync.NewCond(&mb.mux)
return &mb
}
// MailboxIface defines the behaviour of a mailbox, it can be implemented
// with a different type of elements.
type MailboxIface interface {
Send(msg int64)
Batch(msgs ...int64)
Receive() (msg int64, state StateCode)
Listen(fn func(msg int64) (end bool)) (state StateCode)
Close()
}
// Mailbox is used to send and receive messages
type Mailbox struct {
mux sync.Mutex
sc *sync.Cond
rc *sync.Cond
s []int64
len int
cap int
head int
tail int
closed int32
}
func (m *Mailbox) isClosed() bool {
return atomic.LoadInt32(&m.closed) == 1
}
// rWait is a wait function for receivers
func (m *Mailbox) rWait() (ok bool) {
START:
if m.len > 0 {
// We have at least one unread message, return true
return true
}
if m.isClosed() {
// We have an empty inbox AND we are closed, done bro - done.
return false
}
// Let's wait for a signal..
m.rc.Wait()
// Signal received, let's check again!
goto START
}
var empty int64
// receive is the internal function for receiving messages
func (m *Mailbox) receive() (msg int64, state StateCode) {
if !m.rWait() {
// Ok was returned as false, set state to closed and return
state = StateClosed
return
}
// Set message as the current head
msg = m.s[m.head]
// Empty the current head value to avoid any retainment issues
m.s[m.head] = empty
// Goto the next index
if m.head++; m.head == m.cap {
// Our increment falls out of the bounds of our internal slice, reset to 0
m.head = 0
}
// Decrement the length
if m.len--; m.len == m.cap-1 {
// Notify the senders that we have a vacant entry
m.sc.Broadcast()
}
return
}
// send is the internal function used for sending messages
func (m *Mailbox) send(msg int64) {
CHECKFREE:
if m.cap-m.len == 0 {
// There are no vacant spots in the inbox, time to wait
m.sc.Wait()
// We received a signal, check again!
goto CHECKFREE
}
// Goto the next index
if m.tail++; m.tail == m.cap {
// Our increment falls out of the bounds of our internal slice, reset to 0
m.tail = 0
}
// Send the new tail as the provided message
m.s[m.tail] = msg
// Increment the length
if m.len++; m.len == 1 {
// Notify the receivers that we new message
m.rc.Broadcast()
}
}
// Send will send a message
func (m *Mailbox) Send(msg int64) {
m.mux.Lock()
if m.isClosed() {
goto END
}
m.send(msg)
END:
m.mux.Unlock()
}
// Batch will send a batch of messages
func (m *Mailbox) Batch(msgs ...int64) {
m.mux.Lock()
if m.isClosed() {
goto END
}
// Iterate through each message
for _, msg := range msgs {
m.send(msg)
}
END:
m.mux.Unlock()
}
// Receive will receive a message and state (See the "State" constants for more information)
func (m *Mailbox) Receive() (msg int64, state StateCode) {
m.mux.Lock()
msg, state = m.receive()
m.mux.Unlock()
return
}
// Listen will return all current and inbound messages until either:
// - The mailbox is empty and closed
// - The end boolean is returned
func (m *Mailbox) Listen(fn func(msg int64) (end bool)) (state StateCode) {
var msg int64
m.mux.Lock()
// Iterate until break is called
for {
// Get message and state
if msg, state = m.receive(); state != StateOK {
// Receiving was not successful, break
break
}
// Provide message to provided function
if fn(msg) {
// End was returned as true, set state accordingly and break
state = StateEnded
break
}
}
m.mux.Unlock()
return
}
// Close will close a mailbox
func (m *Mailbox) Close() {
// Attempt to set closed state to 1 (from 0)
if !atomic.CompareAndSwapInt32(&m.closed, 0, 1) {
// Already closed, return early
return
}
// Notify senders to attempt to send again
m.sc.Broadcast()
// Notify receivers to attempty to receive again
m.rc.Broadcast()
}
// StateCode represents the state of a response
type StateCode uint8
const (
// StateOK is returned when the request was OK
StateOK StateCode = iota
// StateEmpty is returned when the request was empty
// Note: This will be used when the reject option is implemented
StateEmpty
// StateEnded is returned when the client ends a listening
StateEnded
// StateClosed is returned when the calling mailbox is closed
StateClosed
)
| []
| []
| []
| [] | [] | go | null | null | null |
commands/list.go | package commands
import (
"context"
"flag"
"fmt"
"os"
"sort"
"github.com/google/subcommands"
c "github.com/kotakanbe/go-cve-dictionary/config"
"github.com/kotakanbe/go-cve-dictionary/db"
jvn "github.com/kotakanbe/go-cve-dictionary/fetcher/jvn/xml"
"github.com/kotakanbe/go-cve-dictionary/fetcher/nvd"
log "github.com/kotakanbe/go-cve-dictionary/log"
"github.com/kotakanbe/go-cve-dictionary/models"
"github.com/kotakanbe/go-cve-dictionary/util"
"github.com/olekukonko/tablewriter"
)
// ListCmd is Subcommand for fetch Nvd information.
type ListCmd struct {
debug bool
debugSQL bool
logDir string
logJSON bool
dbpath string
dbtype string
httpProxy string
}
// Name return subcommand name
func (*ListCmd) Name() string { return "list" }
// Synopsis return synopsis
func (*ListCmd) Synopsis() string { return "Show a list of fetched feeds" }
// Usage return usage
func (*ListCmd) Usage() string {
return `list:
fetchnvd
[-dbtype=mysql|postgres|sqlite3|redis]
[-dbpath=$PWD/cve.sqlite3 or connection string]
[-http-proxy=http://192.168.0.1:8080]
[-debug]
[-debug-sql]
[-log-dir=/path/to/log]
[-log-json]
`
}
// SetFlags set flag
func (p *ListCmd) SetFlags(f *flag.FlagSet) {
f.BoolVar(&p.debug, "debug", false, "debug mode")
f.BoolVar(&p.debugSQL, "debug-sql", false, "SQL debug mode")
defaultLogDir := util.GetDefaultLogDir()
f.StringVar(&p.logDir, "log-dir", defaultLogDir, "/path/to/log")
f.BoolVar(&p.logJSON, "log-json", false, "output log as JSON")
pwd := os.Getenv("PWD")
f.StringVar(&p.dbpath, "dbpath", pwd+"/cve.sqlite3",
"/path/to/sqlite3 or SQL connection string")
f.StringVar(&p.dbtype, "dbtype", "sqlite3",
"Database type to store data in (sqlite3, mysql, postgres or redis supported)")
f.StringVar(
&p.httpProxy,
"http-proxy",
"",
"http://proxy-url:port (default: empty)",
)
}
// Execute execute
func (p *ListCmd) Execute(_ context.Context, f *flag.FlagSet, _ ...interface{}) subcommands.ExitStatus {
c.Conf.Debug = p.debug
c.Conf.DebugSQL = p.debugSQL
c.Conf.DBPath = p.dbpath
c.Conf.DBType = p.dbtype
c.Conf.HTTPProxy = p.httpProxy
log.SetLogger(p.logDir, c.Conf.Quiet, c.Conf.Debug, p.logJSON)
if !c.Conf.Validate() {
return subcommands.ExitUsageError
}
driver, locked, err := db.NewDB(c.Conf.DBType, c.Conf.DBPath, c.Conf.DebugSQL)
if err != nil {
if locked {
log.Errorf("Failed to Open DB. Close DB connection: %s", err)
return subcommands.ExitFailure
}
log.Errorf("%s", err)
return subcommands.ExitFailure
}
defer func() {
_ = driver.CloseDB()
}()
jsonMetas, err := nvd.ListFetchedFeeds(driver)
if err != nil {
log.Errorf("%s", err)
return subcommands.ExitFailure
}
sort.Slice(jsonMetas, func(i, j int) bool {
return jsonMetas[i].URL < jsonMetas[j].URL
})
jvnMetas, err := jvn.ListFetchedFeeds(driver)
if err != nil {
log.Errorf("%s", err)
return subcommands.ExitFailure
}
sort.Slice(jvnMetas, func(i, j int) bool {
return jvnMetas[i].URL < jvnMetas[j].URL
})
metas := []models.FeedMeta{}
for _, mm := range [][]models.FeedMeta{jsonMetas, jvnMetas} {
metas = append(metas, mm...)
}
data := [][]string{}
for _, meta := range metas {
data = append(data, meta.ToTableWriterRow())
}
table := tablewriter.NewWriter(os.Stdout)
table.SetHeader([]string{"Source", "Year", "Status", "Fetched", "Latest"})
table.SetBorder(true)
table.SetHeaderColor(
tablewriter.Colors{tablewriter.Bold},
tablewriter.Colors{tablewriter.Bold},
tablewriter.Colors{tablewriter.Bold},
tablewriter.Colors{tablewriter.Bold},
tablewriter.Colors{tablewriter.Bold})
table.AppendBulk(data)
table.Render()
cmds := []string{}
for _, mm := range [][]models.FeedMeta{jsonMetas, jvnMetas} {
cmd := getUpdateCommand(mm)
if cmd != "" {
cmds = append(cmds, cmd)
}
}
if 0 < len(cmds) {
fmt.Printf("\nTo update feeds, execute the following commands.\n")
for _, cmd := range cmds {
fmt.Println(cmd)
}
}
return subcommands.ExitSuccess
}
func getUpdateCommand(metas []models.FeedMeta) string {
if len(metas) == 0 {
return ""
}
years := map[string]bool{}
latest := false
for _, meta := range metas {
if meta.OutDated() {
y, _, err := meta.Year()
if err != nil {
log.Errorf("err")
continue
}
switch y {
case "modified", "recent":
latest = true
default:
years[y] = true
}
}
}
opt := metas[0].FetchOption()
if len(years) == 0 && latest {
return fmt.Sprintf("$ go-cve-dictionary %s -latest", opt)
}
if len(years) == 0 {
return ""
}
opt += " -years"
for y := range years {
opt += " " + y
}
return fmt.Sprintf("$ go-cve-dictionary %s", opt)
}
| [
"\"PWD\""
]
| []
| [
"PWD"
]
| [] | ["PWD"] | go | 1 | 0 | |
3.image_classification/train_lenet.py | #coding:utf-8
'''
Created by huxiaoman 2017.11.27
train_lenet.py:训练lenet对cifar10数据集进行分类
'''
import sys, os
import paddle.v2 as paddle
from lenet import lenet
with_gpu = os.getenv('WITH_GPU', '0') != '1'
def main():
datadim = 3 * 32 * 32
classdim = 10
# PaddlePaddle init
paddle.init(use_gpu=with_gpu, trainer_count=7)
image = paddle.layer.data(
name="image", type=paddle.data_type.dense_vector(datadim))
# Add neural network config
# option 1. resnet
# net = resnet_cifar10(image, depth=32)
# option 2. vgg
net = lenet(image)
out = paddle.layer.fc(
input=net, size=classdim, act=paddle.activation.Softmax())
lbl = paddle.layer.data(
name="label", type=paddle.data_type.integer_value(classdim))
cost = paddle.layer.classification_cost(input=out, label=lbl)
# Create parameters
parameters = paddle.parameters.create(cost)
# Create optimizer
momentum_optimizer = paddle.optimizer.Momentum(
momentum=0.9,
regularization=paddle.optimizer.L2Regularization(rate=0.0002 * 128),
learning_rate=0.1 / 128.0,
learning_rate_decay_a=0.1,
learning_rate_decay_b=50000 * 100,
learning_rate_schedule='discexp')
# End batch and end pass event handler
def event_handler(event):
if isinstance(event, paddle.event.EndIteration):
if event.batch_id % 100 == 0:
print "\nPass %d, Batch %d, Cost %f, %s" % (
event.pass_id, event.batch_id, event.cost, event.metrics)
else:
sys.stdout.write('.')
sys.stdout.flush()
if isinstance(event, paddle.event.EndPass):
# save parameters
with open('params_pass_%d.tar' % event.pass_id, 'w') as f:
parameters.to_tar(f)
result = trainer.test(
reader=paddle.batch(
paddle.dataset.cifar.test10(), batch_size=128),
feeding={'image': 0,
'label': 1})
print "\nTest with Pass %d, %s" % (event.pass_id, result.metrics)
# Create trainer
trainer = paddle.trainer.SGD(
cost=cost, parameters=parameters, update_equation=momentum_optimizer)
# Save the inference topology to protobuf.
inference_topology = paddle.topology.Topology(layers=out)
with open("inference_topology.pkl", 'wb') as f:
inference_topology.serialize_for_inference(f)
trainer.train(
reader=paddle.batch(
paddle.reader.shuffle(
paddle.dataset.cifar.train10(), buf_size=50000),
batch_size=128),
num_passes=200,
event_handler=event_handler,
feeding={'image': 0,
'label': 1})
# inference
from PIL import Image
import numpy as np
import os
def load_image(file):
im = Image.open(file)
im = im.resize((32, 32), Image.ANTIALIAS)
im = np.array(im).astype(np.float32)
# The storage order of the loaded image is W(widht),
# H(height), C(channel). PaddlePaddle requires
# the CHW order, so transpose them.
im = im.transpose((2, 0, 1)) # CHW
# In the training phase, the channel order of CIFAR
# image is B(Blue), G(green), R(Red). But PIL open
# image in RGB mode. It must swap the channel order.
im = im[(2, 1, 0), :, :] # BGR
im = im.flatten()
im = im / 255.0
return im
test_data = []
cur_dir = os.path.dirname(os.path.realpath(__file__))
test_data.append((load_image(cur_dir + '/image/dog.png'), ))
# users can remove the comments and change the model name
# with open('params_pass_50.tar', 'r') as f:
# parameters = paddle.parameters.Parameters.from_tar(f)
probs = paddle.infer(
output_layer=out, parameters=parameters, input=test_data)
lab = np.argsort(-probs) # probs and lab are the results of one batch data
print "Label of image/dog.png is: %d" % lab[0][0]
if __name__ == '__main__':
main()
| []
| []
| [
"WITH_GPU"
]
| [] | ["WITH_GPU"] | python | 1 | 0 | |
mitmproxy/net/tls.py | import ipaddress
import os
import threading
from enum import Enum
from functools import lru_cache
from pathlib import Path
from typing import Any, BinaryIO, Callable, Iterable, Optional
import certifi
from OpenSSL.crypto import X509
from cryptography.hazmat.primitives.asymmetric import rsa
from OpenSSL import SSL, crypto
from mitmproxy import certs
# redeclared here for strict type checking
class Method(Enum):
TLS_SERVER_METHOD = SSL.TLS_SERVER_METHOD
TLS_CLIENT_METHOD = SSL.TLS_CLIENT_METHOD
try:
SSL._lib.TLS_server_method # type: ignore
except AttributeError as e: # pragma: no cover
raise RuntimeError(
"Your installation of the cryptography Python package is outdated."
) from e
class Version(Enum):
UNBOUNDED = 0
SSL3 = SSL.SSL3_VERSION
TLS1 = SSL.TLS1_VERSION
TLS1_1 = SSL.TLS1_1_VERSION
TLS1_2 = SSL.TLS1_2_VERSION
TLS1_3 = SSL.TLS1_3_VERSION
class Verify(Enum):
VERIFY_NONE = SSL.VERIFY_NONE
VERIFY_PEER = SSL.VERIFY_PEER
DEFAULT_MIN_VERSION = Version.TLS1_2
DEFAULT_MAX_VERSION = Version.UNBOUNDED
DEFAULT_OPTIONS = SSL.OP_CIPHER_SERVER_PREFERENCE | SSL.OP_NO_COMPRESSION
class MasterSecretLogger:
def __init__(self, filename: Path):
self.filename = filename.expanduser()
self.f: Optional[BinaryIO] = None
self.lock = threading.Lock()
# required for functools.wraps, which pyOpenSSL uses.
__name__ = "MasterSecretLogger"
def __call__(self, connection: SSL.Connection, keymaterial: bytes) -> None:
with self.lock:
if self.f is None:
self.filename.parent.mkdir(parents=True, exist_ok=True)
self.f = self.filename.open("ab")
self.f.write(b"\n")
self.f.write(keymaterial + b"\n")
self.f.flush()
def close(self):
with self.lock:
if self.f is not None:
self.f.close()
def make_master_secret_logger(filename: Optional[str]) -> Optional[MasterSecretLogger]:
if filename:
return MasterSecretLogger(Path(filename))
return None
log_master_secret = make_master_secret_logger(
os.getenv("MITMPROXY_SSLKEYLOGFILE") or os.getenv("SSLKEYLOGFILE")
)
def _create_ssl_context(
*,
method: Method,
min_version: Version,
max_version: Version,
cipher_list: Optional[Iterable[str]],
) -> SSL.Context:
context = SSL.Context(method.value)
ok = SSL._lib.SSL_CTX_set_min_proto_version(context._context, min_version.value) # type: ignore
ok += SSL._lib.SSL_CTX_set_max_proto_version(context._context, max_version.value) # type: ignore
if ok != 2:
raise RuntimeError(
f"Error setting TLS versions ({min_version=}, {max_version=}). "
"The version you specified may be unavailable in your libssl."
)
# Options
context.set_options(DEFAULT_OPTIONS)
# Cipher List
if cipher_list is not None:
try:
context.set_cipher_list(b":".join(x.encode() for x in cipher_list))
except SSL.Error as e:
raise RuntimeError(f"SSL cipher specification error: {e}") from e
# SSLKEYLOGFILE
if log_master_secret:
context.set_keylog_callback(log_master_secret)
return context
@lru_cache(256)
def create_proxy_server_context(
*,
min_version: Version,
max_version: Version,
cipher_list: Optional[tuple[str, ...]],
verify: Verify,
hostname: Optional[str],
ca_path: Optional[str],
ca_pemfile: Optional[str],
client_cert: Optional[str],
alpn_protos: Optional[tuple[bytes, ...]],
) -> SSL.Context:
context: SSL.Context = _create_ssl_context(
method=Method.TLS_CLIENT_METHOD,
min_version=min_version,
max_version=max_version,
cipher_list=cipher_list,
)
if verify is not Verify.VERIFY_NONE and hostname is None:
raise ValueError("Cannot validate certificate hostname without SNI")
context.set_verify(verify.value, None)
if hostname is not None:
assert isinstance(hostname, str)
# Manually enable hostname verification on the context object.
# https://wiki.openssl.org/index.php/Hostname_validation
param = SSL._lib.SSL_CTX_get0_param(context._context) # type: ignore
# Matching on the CN is disabled in both Chrome and Firefox, so we disable it, too.
# https://www.chromestatus.com/feature/4981025180483584
SSL._lib.X509_VERIFY_PARAM_set_hostflags( # type: ignore
param,
SSL._lib.X509_CHECK_FLAG_NO_PARTIAL_WILDCARDS | SSL._lib.X509_CHECK_FLAG_NEVER_CHECK_SUBJECT, # type: ignore
)
try:
ip: bytes = ipaddress.ip_address(hostname).packed
except ValueError:
SSL._openssl_assert( # type: ignore
SSL._lib.X509_VERIFY_PARAM_set1_host(param, hostname.encode(), len(hostname.encode())) == 1 # type: ignore
)
else:
SSL._openssl_assert( # type: ignore
SSL._lib.X509_VERIFY_PARAM_set1_ip(param, ip, len(ip)) == 1 # type: ignore
)
if ca_path is None and ca_pemfile is None:
ca_pemfile = certifi.where()
try:
context.load_verify_locations(ca_pemfile, ca_path)
except SSL.Error as e:
raise RuntimeError(
f"Cannot load trusted certificates ({ca_pemfile=}, {ca_path=})."
) from e
# Client Certs
if client_cert:
try:
context.use_privatekey_file(client_cert)
context.use_certificate_chain_file(client_cert)
except SSL.Error as e:
raise RuntimeError(f"Cannot load TLS client certificate: {e}") from e
if alpn_protos:
# advertise application layer protocols
context.set_alpn_protos(alpn_protos)
return context
@lru_cache(256)
def create_client_proxy_context(
*,
min_version: Version,
max_version: Version,
cipher_list: Optional[tuple[str, ...]],
cert: certs.Cert,
key: rsa.RSAPrivateKey,
chain_file: Optional[Path],
alpn_select_callback: Optional[Callable[[SSL.Connection, list[bytes]], Any]],
request_client_cert: bool,
extra_chain_certs: tuple[certs.Cert, ...],
dhparams: certs.DHParams,
) -> SSL.Context:
context: SSL.Context = _create_ssl_context(
method=Method.TLS_SERVER_METHOD,
min_version=min_version,
max_version=max_version,
cipher_list=cipher_list,
)
context.use_certificate(cert.to_pyopenssl())
context.use_privatekey(crypto.PKey.from_cryptography_key(key))
if chain_file is not None:
try:
context.load_verify_locations(str(chain_file), None)
except SSL.Error as e:
raise RuntimeError(f"Cannot load certificate chain ({chain_file}).") from e
if alpn_select_callback is not None:
assert callable(alpn_select_callback)
context.set_alpn_select_callback(alpn_select_callback)
if request_client_cert:
# The request_client_cert argument requires some explanation. We're
# supposed to be able to do this with no negative effects - if the
# client has no cert to present, we're notified and proceed as usual.
# Unfortunately, Android seems to have a bug (tested on 4.2.2) - when
# an Android client is asked to present a certificate it does not
# have, it hangs up, which is frankly bogus. Some time down the track
# we may be able to make the proper behaviour the default again, but
# until then we're conservative.
context.set_verify(Verify.VERIFY_PEER.value, accept_all)
else:
context.set_verify(Verify.VERIFY_NONE.value, None)
for i in extra_chain_certs:
context.add_extra_chain_cert(i.to_pyopenssl())
if dhparams:
SSL._lib.SSL_CTX_set_tmp_dh(context._context, dhparams) # type: ignore
return context
def accept_all(
conn_: SSL.Connection,
x509: X509,
errno: int,
err_depth: int,
is_cert_verified: int,
) -> bool:
# Return true to prevent cert verification error
return True
def is_tls_record_magic(d):
"""
Returns:
True, if the passed bytes start with the TLS record magic bytes.
False, otherwise.
"""
d = d[:3]
# TLS ClientHello magic, works for SSLv3, TLSv1.0, TLSv1.1, TLSv1.2, and TLSv1.3
# http://www.moserware.com/2009/06/first-few-milliseconds-of-https.html#client-hello
# https://tls13.ulfheim.net/
return len(d) == 3 and d[0] == 0x16 and d[1] == 0x03 and 0x0 <= d[2] <= 0x03
| []
| []
| [
"MITMPROXY_SSLKEYLOGFILE",
"SSLKEYLOGFILE"
]
| [] | ["MITMPROXY_SSLKEYLOGFILE", "SSLKEYLOGFILE"] | python | 2 | 0 | |
daemon/execdriver/lxc/init.go | package lxc
import (
"encoding/json"
"fmt"
"io/ioutil"
"net"
"os"
"strings"
"syscall"
"github.com/docker/libcontainer/netlink"
"github.com/dotcloud/docker/daemon/execdriver"
)
// Clear environment pollution introduced by lxc-start
func setupEnv(args *execdriver.InitArgs) error {
// Get env
var env []string
content, err := ioutil.ReadFile(".dockerenv")
if err != nil {
return fmt.Errorf("Unable to load environment variables: %v", err)
}
if err := json.Unmarshal(content, &env); err != nil {
return fmt.Errorf("Unable to unmarshal environment variables: %v", err)
}
// Propagate the plugin-specific container env variable
env = append(env, "container="+os.Getenv("container"))
args.Env = env
os.Clearenv()
for _, kv := range args.Env {
parts := strings.SplitN(kv, "=", 2)
if len(parts) == 1 {
parts = append(parts, "")
}
os.Setenv(parts[0], parts[1])
}
return nil
}
func setupHostname(args *execdriver.InitArgs) error {
hostname := getEnv(args, "HOSTNAME")
if hostname == "" {
return nil
}
return setHostname(hostname)
}
// Setup networking
func setupNetworking(args *execdriver.InitArgs) error {
if args.Ip != "" {
// eth0
iface, err := net.InterfaceByName("eth0")
if err != nil {
return fmt.Errorf("Unable to set up networking: %v", err)
}
ip, ipNet, err := net.ParseCIDR(args.Ip)
if err != nil {
return fmt.Errorf("Unable to set up networking: %v", err)
}
if err := netlink.NetworkLinkAddIp(iface, ip, ipNet); err != nil {
return fmt.Errorf("Unable to set up networking: %v", err)
}
if err := netlink.NetworkSetMTU(iface, args.Mtu); err != nil {
return fmt.Errorf("Unable to set MTU: %v", err)
}
if err := netlink.NetworkLinkUp(iface); err != nil {
return fmt.Errorf("Unable to set up networking: %v", err)
}
// loopback
iface, err = net.InterfaceByName("lo")
if err != nil {
return fmt.Errorf("Unable to set up networking: %v", err)
}
if err := netlink.NetworkLinkUp(iface); err != nil {
return fmt.Errorf("Unable to set up networking: %v", err)
}
}
if args.Gateway != "" {
gw := net.ParseIP(args.Gateway)
if gw == nil {
return fmt.Errorf("Unable to set up networking, %s is not a valid gateway IP", args.Gateway)
}
if err := netlink.AddDefaultGw(gw.String(), "eth0"); err != nil {
return fmt.Errorf("Unable to set up networking: %v", err)
}
}
return nil
}
// Setup working directory
func setupWorkingDirectory(args *execdriver.InitArgs) error {
if args.WorkDir == "" {
return nil
}
if err := syscall.Chdir(args.WorkDir); err != nil {
return fmt.Errorf("Unable to change dir to %v: %v", args.WorkDir, err)
}
return nil
}
func getEnv(args *execdriver.InitArgs, key string) string {
for _, kv := range args.Env {
parts := strings.SplitN(kv, "=", 2)
if parts[0] == key && len(parts) == 2 {
return parts[1]
}
}
return ""
}
| [
"\"container\""
]
| []
| [
"container"
]
| [] | ["container"] | go | 1 | 0 | |
pkg/api/gh/pkg/cmdutil/legacy.go | package cmdutil
import (
"fmt"
"os"
"github.com/scmn-dev/secman/pkg/api/gh/core/config"
)
// TODO: consider passing via Factory
// TODO: support per-hostname settings
func DetermineEditor(cf func() (config.Config, error)) (string, error) {
editorCommand := os.Getenv("GH_EDITOR")
if editorCommand == "" {
cfg, err := cf()
if err != nil {
return "", fmt.Errorf("could not read config: %w", err)
}
editorCommand, _ = cfg.Get("", "editor")
}
return editorCommand, nil
}
| [
"\"GH_EDITOR\""
]
| []
| [
"GH_EDITOR"
]
| [] | ["GH_EDITOR"] | go | 1 | 0 | |
doc/platutils.py | #!/usr/bin/python2.7
# -*- coding: utf-8 -*-
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Detailed platform detection functions. Differentiate Linux distributions
and MS Windows frameworks.
"""
from __future__ import print_function
import sys
import re
LINUX_RELEASE_FILES = [
"/etc/vmware-release",
"/etc/redhat-release",
"/etc/gentoo-release",
"/etc/lsb-release", # Ubuntu, possibly others
]
LINUX_RELEASE_RE = re.compile(r"(.*) release (.*)")
class OSInfo(object):
def __init__(self, **kwargs):
for name, value in kwargs.items():
setattr(self, name, value)
def __str__(self):
s = ["Platform info:"]
for name in ("platform", "arch", "osname", "osversion",
"distribution", "release"):
s.append("%15.15s: %s" % (name, getattr(self, name, "Unknown")))
return "\n".join(s)
def is_linux(self):
return self.platform.startswith("linux")
def is_windows(self):
return self.platform == "win32"
def is_cli(self):
return self.platform == "cli"
def is_gentoo(self):
return self.distribution.startswith("Gentoo")
def is_vmware(self):
return self.distribution.startswith("VMware")
def is_rhel(self):
return self.distribution.startswith("Red")
def is_centos(self):
return self.distribution.startswith("Cent")
def is_ubuntu(self):
return self.distribution.startswith("Ubu")
def is_redhat(self): # rpm-based
dist = self.distribution
return dist.startswith("Red") or dist.startswith("Cent")
def is_osx(self):
return self.distribution.startswith("Mac OS X")
def _get_linux_dist():
for fname in LINUX_RELEASE_FILES:
if os.path.exists(fname):
text = open(fname).read()
mo = LINUX_RELEASE_RE.search(text)
if mo:
return map(str.strip, mo.groups())
else:
pass
return "Unknown", "Unknown"
def _get_darwin_dist():
import subprocess
text = subprocess.check_output(["sw_vers"], shell=False)
return re.search(r"^ProductName:\t(.*)\nProductVersion:\t(.*)", text, re.M).groups()
def get_platform():
global os
rv = OSInfo()
rv.platform = sys.platform
if sys.platform.startswith("linux"):
import os # making this global breaks on IronPython
osname, _, kernel, _, arch = os.uname()
rv.arch = arch
rv.osname = osname
rv.osversion = kernel
rv.distribution, rv.release = _get_linux_dist()
elif sys.platform.startswith("darwin"):
import os # making this global breaks on IronPython
osname, _, kernel, _, arch = os.uname()
rv.arch = arch
rv.osname = osname
rv.osversion = kernel
rv.distribution, rv.release = _get_darwin_dist()
elif sys.platform in ("win32", "cli"):
import os
rv.arch = os.environ["PROCESSOR_ARCHITECTURE"]
rv.osname = os.environ["OS"]
rv.distribution = "Microsoft"
if sys.platform == "win32":
import win32api
major, minor, build, api, extra = win32api.GetVersionEx()
rv.osversion = "%d.%d.%d-%s" % (major, minor, build, extra)
rv.release = "Unknown"
elif sys.platform == "cli":
rv.osversion = "Unknown"
rv.release = "Unknown"
return rv
if __name__ == "__main__":
print (get_platform())
| []
| []
| [
"OS",
"PROCESSOR_ARCHITECTURE"
]
| [] | ["OS", "PROCESSOR_ARCHITECTURE"] | python | 2 | 0 | |
internal/suites/utils.go | package suites
import (
"context"
"fmt"
"log"
"os"
"path/filepath"
"runtime"
"strings"
"time"
"github.com/go-rod/rod"
)
// GetLoginBaseURL returns the URL of the login portal and the path prefix if specified.
func GetLoginBaseURL() string {
if PathPrefix != "" {
return LoginBaseURL + PathPrefix
}
return LoginBaseURL
}
func (rs *RodSession) collectCoverage(page *rod.Page) {
coverageDir := "../../web/.nyc_output"
now := time.Now()
resp, err := page.Eval("JSON.stringify(window.__coverage__)")
if err != nil {
log.Fatal(err)
}
coverageData := fmt.Sprintf("%v", resp.Value)
_ = os.MkdirAll(coverageDir, 0775)
if coverageData != "<nil>" {
err = os.WriteFile(fmt.Sprintf("%s/coverage-%d.json", coverageDir, now.Unix()), []byte(coverageData), 0664) //nolint:gosec
if err != nil {
log.Fatal(err)
}
err = filepath.Walk("../../web/.nyc_output", fixCoveragePath)
if err != nil {
log.Fatal(err)
}
}
}
func (rs *RodSession) collectScreenshot(err error, page *rod.Page) {
if err == context.DeadlineExceeded && os.Getenv("CI") == t {
base := "/buildkite/screenshots"
build := os.Getenv("BUILDKITE_BUILD_NUMBER")
suite := strings.ToLower(os.Getenv("SUITE"))
job := os.Getenv("BUILDKITE_JOB_ID")
path := filepath.Join(fmt.Sprintf("%s/%s/%s/%s", base, build, suite, job)) //nolint: gocritic
if err := os.MkdirAll(path, 0755); err != nil {
log.Fatal(err)
}
pc, _, _, _ := runtime.Caller(2)
fn := runtime.FuncForPC(pc)
p := "github.com/authelia/authelia/v4/internal/suites."
r := strings.NewReplacer(p, "", "(", "", ")", "", "*", "", ".", "-")
page.MustScreenshotFullPage(fmt.Sprintf("%s/%s.jpg", path, r.Replace(fn.Name())))
}
}
func fixCoveragePath(path string, file os.FileInfo, err error) error {
if err != nil {
return err
}
if file.IsDir() {
return nil
}
coverage, err := filepath.Match("*.json", file.Name())
if err != nil {
return err
}
if coverage {
read, err := os.ReadFile(path)
if err != nil {
return err
}
wd, _ := os.Getwd()
ciPath := strings.TrimSuffix(wd, "internal/suites")
content := strings.ReplaceAll(string(read), "/node/src/app/", ciPath+"web/")
err = os.WriteFile(path, []byte(content), 0)
if err != nil {
return err
}
}
return nil
}
| [
"\"CI\"",
"\"BUILDKITE_BUILD_NUMBER\"",
"\"SUITE\"",
"\"BUILDKITE_JOB_ID\""
]
| []
| [
"BUILDKITE_BUILD_NUMBER",
"SUITE",
"CI",
"BUILDKITE_JOB_ID"
]
| [] | ["BUILDKITE_BUILD_NUMBER", "SUITE", "CI", "BUILDKITE_JOB_ID"] | go | 4 | 0 | |
src/main/java/com/zebrunner/agent/core/registrar/ci/CircleCiContextResolver.java | package com.zebrunner.agent.core.registrar.ci;
import com.zebrunner.agent.core.registrar.domain.CiContextDTO;
import com.zebrunner.agent.core.registrar.domain.CiType;
import lombok.AccessLevel;
import lombok.NoArgsConstructor;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.function.Function;
import java.util.stream.Collectors;
@NoArgsConstructor(access = AccessLevel.PACKAGE)
class CircleCiContextResolver implements CiContextResolver {
// https://circleci.com/docs/2.0/env-vars/#built-in-environment-variables
private static final String CIRCLECI_ENV_VARIABLE = "CIRCLECI";
private static final List<String> ENV_VARIABLE_PREFIXES = Arrays.asList(
"CIRCLE",
"HOSTNAME"
);
@Override
public CiContextDTO resolve() {
Map<String, String> envVariables = System.getenv();
if (envVariables.containsKey(CIRCLECI_ENV_VARIABLE)) {
envVariables = collectEnvironmentVariables(envVariables);
return new CiContextDTO(CiType.CIRCLE_CI, envVariables);
}
return null;
}
private Map<String, String> collectEnvironmentVariables(Map<String, String> envVariables) {
return envVariables.keySet()
.stream()
.filter(key -> ENV_VARIABLE_PREFIXES.stream()
.anyMatch(key::startsWith))
.collect(Collectors.toMap(Function.identity(), envVariables::get));
}
}
| []
| []
| []
| [] | [] | java | 0 | 0 | |
Lib/idlelib/PyShell.py | #! /usr/bin/env python
from __future__ import print_function
import os
import os.path
import sys
import string
import getopt
import re
import socket
import time
import threading
import traceback
import types
import io
import linecache
from code import InteractiveInterpreter
from platform import python_version, system
try:
from Tkinter import *
except ImportError:
print("** IDLE can't import Tkinter.\n"
"Your Python may not be configured for Tk. **", file=sys.__stderr__)
sys.exit(1)
import tkMessageBox
from idlelib.EditorWindow import EditorWindow, fixwordbreaks
from idlelib.FileList import FileList
from idlelib.ColorDelegator import ColorDelegator
from idlelib.UndoDelegator import UndoDelegator
from idlelib.OutputWindow import OutputWindow
from idlelib.configHandler import idleConf
from idlelib import idlever
from idlelib import rpc
from idlelib import Debugger
from idlelib import RemoteDebugger
from idlelib import macosxSupport
IDENTCHARS = string.ascii_letters + string.digits + "_"
HOST = '127.0.0.1' # python execution server on localhost loopback
PORT = 0 # someday pass in host, port for remote debug capability
try:
from signal import SIGTERM
except ImportError:
SIGTERM = 15
# Override warnings module to write to warning_stream. Initialize to send IDLE
# internal warnings to the console. ScriptBinding.check_syntax() will
# temporarily redirect the stream to the shell window to display warnings when
# checking user's code.
warning_stream = sys.__stderr__ # None, at least on Windows, if no console.
import warnings
def idle_formatwarning(message, category, filename, lineno, line=None):
"""Format warnings the IDLE way."""
s = "\nWarning (from warnings module):\n"
s += ' File \"%s\", line %s\n' % (filename, lineno)
if line is None:
line = linecache.getline(filename, lineno)
line = line.strip()
if line:
s += " %s\n" % line
s += "%s: %s\n" % (category.__name__, message)
return s
def idle_showwarning(
message, category, filename, lineno, file=None, line=None):
"""Show Idle-format warning (after replacing warnings.showwarning).
The differences are the formatter called, the file=None replacement,
which can be None, the capture of the consequence AttributeError,
and the output of a hard-coded prompt.
"""
if file is None:
file = warning_stream
try:
file.write(idle_formatwarning(
message, category, filename, lineno, line=line))
file.write(">>> ")
except (AttributeError, IOError):
pass # if file (probably __stderr__) is invalid, skip warning.
_warnings_showwarning = None
def capture_warnings(capture):
"Replace warning.showwarning with idle_showwarning, or reverse."
global _warnings_showwarning
if capture:
if _warnings_showwarning is None:
_warnings_showwarning = warnings.showwarning
warnings.showwarning = idle_showwarning
else:
if _warnings_showwarning is not None:
warnings.showwarning = _warnings_showwarning
_warnings_showwarning = None
capture_warnings(True)
def extended_linecache_checkcache(filename=None,
orig_checkcache=linecache.checkcache):
"""Extend linecache.checkcache to preserve the <pyshell#...> entries
Rather than repeating the linecache code, patch it to save the
<pyshell#...> entries, call the original linecache.checkcache()
(skipping them), and then restore the saved entries.
orig_checkcache is bound at definition time to the original
method, allowing it to be patched.
"""
cache = linecache.cache
save = {}
for key in list(cache):
if key[:1] + key[-1:] == '<>':
save[key] = cache.pop(key)
orig_checkcache(filename)
cache.update(save)
# Patch linecache.checkcache():
linecache.checkcache = extended_linecache_checkcache
class PyShellEditorWindow(EditorWindow):
"Regular text edit window in IDLE, supports breakpoints"
def __init__(self, *args):
self.breakpoints = []
EditorWindow.__init__(self, *args)
self.text.bind("<<set-breakpoint-here>>", self.set_breakpoint_here)
self.text.bind("<<clear-breakpoint-here>>", self.clear_breakpoint_here)
self.text.bind("<<open-python-shell>>", self.flist.open_shell)
self.breakpointPath = os.path.join(idleConf.GetUserCfgDir(),
'breakpoints.lst')
# whenever a file is changed, restore breakpoints
def filename_changed_hook(old_hook=self.io.filename_change_hook,
self=self):
self.restore_file_breaks()
old_hook()
self.io.set_filename_change_hook(filename_changed_hook)
if self.io.filename:
self.restore_file_breaks()
self.color_breakpoint_text()
rmenu_specs = [
("Cut", "<<cut>>", "rmenu_check_cut"),
("Copy", "<<copy>>", "rmenu_check_copy"),
("Paste", "<<paste>>", "rmenu_check_paste"),
("Set Breakpoint", "<<set-breakpoint-here>>", None),
("Clear Breakpoint", "<<clear-breakpoint-here>>", None)
]
def color_breakpoint_text(self, color=True):
"Turn colorizing of breakpoint text on or off"
if self.io is None:
# possible due to update in restore_file_breaks
return
if color:
theme = idleConf.GetOption('main','Theme','name')
cfg = idleConf.GetHighlight(theme, "break")
else:
cfg = {'foreground': '', 'background': ''}
self.text.tag_config('BREAK', cfg)
def set_breakpoint(self, lineno):
text = self.text
filename = self.io.filename
text.tag_add("BREAK", "%d.0" % lineno, "%d.0" % (lineno+1))
try:
i = self.breakpoints.index(lineno)
except ValueError: # only add if missing, i.e. do once
self.breakpoints.append(lineno)
try: # update the subprocess debugger
debug = self.flist.pyshell.interp.debugger
debug.set_breakpoint_here(filename, lineno)
except: # but debugger may not be active right now....
pass
def set_breakpoint_here(self, event=None):
text = self.text
filename = self.io.filename
if not filename:
text.bell()
return
lineno = int(float(text.index("insert")))
self.set_breakpoint(lineno)
def clear_breakpoint_here(self, event=None):
text = self.text
filename = self.io.filename
if not filename:
text.bell()
return
lineno = int(float(text.index("insert")))
try:
self.breakpoints.remove(lineno)
except:
pass
text.tag_remove("BREAK", "insert linestart",\
"insert lineend +1char")
try:
debug = self.flist.pyshell.interp.debugger
debug.clear_breakpoint_here(filename, lineno)
except:
pass
def clear_file_breaks(self):
if self.breakpoints:
text = self.text
filename = self.io.filename
if not filename:
text.bell()
return
self.breakpoints = []
text.tag_remove("BREAK", "1.0", END)
try:
debug = self.flist.pyshell.interp.debugger
debug.clear_file_breaks(filename)
except:
pass
def store_file_breaks(self):
"Save breakpoints when file is saved"
# XXX 13 Dec 2002 KBK Currently the file must be saved before it can
# be run. The breaks are saved at that time. If we introduce
# a temporary file save feature the save breaks functionality
# needs to be re-verified, since the breaks at the time the
# temp file is created may differ from the breaks at the last
# permanent save of the file. Currently, a break introduced
# after a save will be effective, but not persistent.
# This is necessary to keep the saved breaks synched with the
# saved file.
#
# Breakpoints are set as tagged ranges in the text.
# Since a modified file has to be saved before it is
# run, and since self.breakpoints (from which the subprocess
# debugger is loaded) is updated during the save, the visible
# breaks stay synched with the subprocess even if one of these
# unexpected breakpoint deletions occurs.
breaks = self.breakpoints
filename = self.io.filename
try:
with open(self.breakpointPath,"r") as old_file:
lines = old_file.readlines()
except IOError:
lines = []
try:
with open(self.breakpointPath,"w") as new_file:
for line in lines:
if not line.startswith(filename + '='):
new_file.write(line)
self.update_breakpoints()
breaks = self.breakpoints
if breaks:
new_file.write(filename + '=' + str(breaks) + '\n')
except IOError as err:
if not getattr(self.root, "breakpoint_error_displayed", False):
self.root.breakpoint_error_displayed = True
tkMessageBox.showerror(title='IDLE Error',
message='Unable to update breakpoint list:\n%s'
% str(err),
parent=self.text)
def restore_file_breaks(self):
self.text.update() # this enables setting "BREAK" tags to be visible
if self.io is None:
# can happen if IDLE closes due to the .update() call
return
filename = self.io.filename
if filename is None:
return
if os.path.isfile(self.breakpointPath):
lines = open(self.breakpointPath,"r").readlines()
for line in lines:
if line.startswith(filename + '='):
breakpoint_linenumbers = eval(line[len(filename)+1:])
for breakpoint_linenumber in breakpoint_linenumbers:
self.set_breakpoint(breakpoint_linenumber)
def update_breakpoints(self):
"Retrieves all the breakpoints in the current window"
text = self.text
ranges = text.tag_ranges("BREAK")
linenumber_list = self.ranges_to_linenumbers(ranges)
self.breakpoints = linenumber_list
def ranges_to_linenumbers(self, ranges):
lines = []
for index in range(0, len(ranges), 2):
lineno = int(float(ranges[index].string))
end = int(float(ranges[index+1].string))
while lineno < end:
lines.append(lineno)
lineno += 1
return lines
# XXX 13 Dec 2002 KBK Not used currently
# def saved_change_hook(self):
# "Extend base method - clear breaks if module is modified"
# if not self.get_saved():
# self.clear_file_breaks()
# EditorWindow.saved_change_hook(self)
def _close(self):
"Extend base method - clear breaks when module is closed"
self.clear_file_breaks()
EditorWindow._close(self)
class PyShellFileList(FileList):
"Extend base class: IDLE supports a shell and breakpoints"
# override FileList's class variable, instances return PyShellEditorWindow
# instead of EditorWindow when new edit windows are created.
EditorWindow = PyShellEditorWindow
pyshell = None
def open_shell(self, event=None):
if self.pyshell:
self.pyshell.top.wakeup()
else:
self.pyshell = PyShell(self)
if self.pyshell:
if not self.pyshell.begin():
return None
return self.pyshell
class ModifiedColorDelegator(ColorDelegator):
"Extend base class: colorizer for the shell window itself"
def __init__(self):
ColorDelegator.__init__(self)
self.LoadTagDefs()
def recolorize_main(self):
self.tag_remove("TODO", "1.0", "iomark")
self.tag_add("SYNC", "1.0", "iomark")
ColorDelegator.recolorize_main(self)
def LoadTagDefs(self):
ColorDelegator.LoadTagDefs(self)
theme = idleConf.GetOption('main','Theme','name')
self.tagdefs.update({
"stdin": {'background':None,'foreground':None},
"stdout": idleConf.GetHighlight(theme, "stdout"),
"stderr": idleConf.GetHighlight(theme, "stderr"),
"console": idleConf.GetHighlight(theme, "console"),
})
def removecolors(self):
# Don't remove shell color tags before "iomark"
for tag in self.tagdefs:
self.tag_remove(tag, "iomark", "end")
class ModifiedUndoDelegator(UndoDelegator):
"Extend base class: forbid insert/delete before the I/O mark"
def insert(self, index, chars, tags=None):
try:
if self.delegate.compare(index, "<", "iomark"):
self.delegate.bell()
return
except TclError:
pass
UndoDelegator.insert(self, index, chars, tags)
def delete(self, index1, index2=None):
try:
if self.delegate.compare(index1, "<", "iomark"):
self.delegate.bell()
return
except TclError:
pass
UndoDelegator.delete(self, index1, index2)
class MyRPCClient(rpc.RPCClient):
def handle_EOF(self):
"Override the base class - just re-raise EOFError"
raise EOFError
class ModifiedInterpreter(InteractiveInterpreter):
def __init__(self, tkconsole):
self.tkconsole = tkconsole
locals = sys.modules['__main__'].__dict__
InteractiveInterpreter.__init__(self, locals=locals)
self.save_warnings_filters = None
self.restarting = False
self.subprocess_arglist = None
self.port = PORT
self.original_compiler_flags = self.compile.compiler.flags
_afterid = None
rpcclt = None
rpcpid = None
def spawn_subprocess(self):
if self.subprocess_arglist is None:
self.subprocess_arglist = self.build_subprocess_arglist()
args = self.subprocess_arglist
self.rpcpid = os.spawnv(os.P_NOWAIT, sys.executable, args)
def build_subprocess_arglist(self):
assert (self.port!=0), (
"Socket should have been assigned a port number.")
w = ['-W' + s for s in sys.warnoptions]
if 1/2 > 0: # account for new division
w.append('-Qnew')
# Maybe IDLE is installed and is being accessed via sys.path,
# or maybe it's not installed and the idle.py script is being
# run from the IDLE source directory.
del_exitf = idleConf.GetOption('main', 'General', 'delete-exitfunc',
default=False, type='bool')
if __name__ == 'idlelib.PyShell':
command = "__import__('idlelib.run').run.main(%r)" % (del_exitf,)
else:
command = "__import__('run').main(%r)" % (del_exitf,)
if sys.platform[:3] == 'win' and ' ' in sys.executable:
# handle embedded space in path by quoting the argument
decorated_exec = '"%s"' % sys.executable
else:
decorated_exec = sys.executable
return [decorated_exec] + w + ["-c", command, str(self.port)]
def start_subprocess(self):
addr = (HOST, self.port)
# GUI makes several attempts to acquire socket, listens for connection
for i in range(3):
time.sleep(i)
try:
self.rpcclt = MyRPCClient(addr)
break
except socket.error as err:
pass
else:
self.display_port_binding_error()
return None
# if PORT was 0, system will assign an 'ephemeral' port. Find it out:
self.port = self.rpcclt.listening_sock.getsockname()[1]
# if PORT was not 0, probably working with a remote execution server
if PORT != 0:
# To allow reconnection within the 2MSL wait (cf. Stevens TCP
# V1, 18.6), set SO_REUSEADDR. Note that this can be problematic
# on Windows since the implementation allows two active sockets on
# the same address!
self.rpcclt.listening_sock.setsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR, 1)
self.spawn_subprocess()
#time.sleep(20) # test to simulate GUI not accepting connection
# Accept the connection from the Python execution server
self.rpcclt.listening_sock.settimeout(10)
try:
self.rpcclt.accept()
except socket.timeout as err:
self.display_no_subprocess_error()
return None
self.rpcclt.register("console", self.tkconsole)
self.rpcclt.register("stdin", self.tkconsole.stdin)
self.rpcclt.register("stdout", self.tkconsole.stdout)
self.rpcclt.register("stderr", self.tkconsole.stderr)
self.rpcclt.register("flist", self.tkconsole.flist)
self.rpcclt.register("linecache", linecache)
self.rpcclt.register("interp", self)
self.transfer_path(with_cwd=True)
self.poll_subprocess()
return self.rpcclt
def restart_subprocess(self, with_cwd=False):
if self.restarting:
return self.rpcclt
self.restarting = True
# close only the subprocess debugger
debug = self.getdebugger()
if debug:
try:
# Only close subprocess debugger, don't unregister gui_adap!
RemoteDebugger.close_subprocess_debugger(self.rpcclt)
except:
pass
# Kill subprocess, spawn a new one, accept connection.
self.rpcclt.close()
self.unix_terminate()
console = self.tkconsole
was_executing = console.executing
console.executing = False
self.spawn_subprocess()
try:
self.rpcclt.accept()
except socket.timeout as err:
self.display_no_subprocess_error()
return None
self.transfer_path(with_cwd=with_cwd)
console.stop_readline()
# annotate restart in shell window and mark it
console.text.delete("iomark", "end-1c")
if was_executing:
console.write('\n')
console.showprompt()
halfbar = ((int(console.width) - 16) // 2) * '='
console.write(halfbar + ' RESTART ' + halfbar)
console.text.mark_set("restart", "end-1c")
console.text.mark_gravity("restart", "left")
console.showprompt()
# restart subprocess debugger
if debug:
# Restarted debugger connects to current instance of debug GUI
gui = RemoteDebugger.restart_subprocess_debugger(self.rpcclt)
# reload remote debugger breakpoints for all PyShellEditWindows
debug.load_breakpoints()
self.compile.compiler.flags = self.original_compiler_flags
self.restarting = False
return self.rpcclt
def __request_interrupt(self):
self.rpcclt.remotecall("exec", "interrupt_the_server", (), {})
def interrupt_subprocess(self):
threading.Thread(target=self.__request_interrupt).start()
def kill_subprocess(self):
if self._afterid is not None:
self.tkconsole.text.after_cancel(self._afterid)
try:
self.rpcclt.close()
except AttributeError: # no socket
pass
self.unix_terminate()
self.tkconsole.executing = False
self.rpcclt = None
def unix_terminate(self):
"UNIX: make sure subprocess is terminated and collect status"
if hasattr(os, 'kill'):
try:
os.kill(self.rpcpid, SIGTERM)
except OSError:
# process already terminated:
return
else:
try:
os.waitpid(self.rpcpid, 0)
except OSError:
return
def transfer_path(self, with_cwd=False):
if with_cwd: # Issue 13506
path = [''] # include Current Working Directory
path.extend(sys.path)
else:
path = sys.path
self.runcommand("""if 1:
import sys as _sys
_sys.path = %r
del _sys
\n""" % (path,))
active_seq = None
def poll_subprocess(self):
clt = self.rpcclt
if clt is None:
return
try:
response = clt.pollresponse(self.active_seq, wait=0.05)
except (EOFError, IOError, KeyboardInterrupt):
# lost connection or subprocess terminated itself, restart
# [the KBI is from rpc.SocketIO.handle_EOF()]
if self.tkconsole.closing:
return
response = None
self.restart_subprocess()
if response:
self.tkconsole.resetoutput()
self.active_seq = None
how, what = response
console = self.tkconsole.console
if how == "OK":
if what is not None:
print(repr(what), file=console)
elif how == "EXCEPTION":
if self.tkconsole.getvar("<<toggle-jit-stack-viewer>>"):
self.remote_stack_viewer()
elif how == "ERROR":
errmsg = "PyShell.ModifiedInterpreter: Subprocess ERROR:\n"
print(errmsg, what, file=sys.__stderr__)
print(errmsg, what, file=console)
# we received a response to the currently active seq number:
try:
self.tkconsole.endexecuting()
except AttributeError: # shell may have closed
pass
# Reschedule myself
if not self.tkconsole.closing:
self._afterid = self.tkconsole.text.after(
self.tkconsole.pollinterval, self.poll_subprocess)
debugger = None
def setdebugger(self, debugger):
self.debugger = debugger
def getdebugger(self):
return self.debugger
def open_remote_stack_viewer(self):
"""Initiate the remote stack viewer from a separate thread.
This method is called from the subprocess, and by returning from this
method we allow the subprocess to unblock. After a bit the shell
requests the subprocess to open the remote stack viewer which returns a
static object looking at the last exception. It is queried through
the RPC mechanism.
"""
self.tkconsole.text.after(300, self.remote_stack_viewer)
return
def remote_stack_viewer(self):
from idlelib import RemoteObjectBrowser
oid = self.rpcclt.remotequeue("exec", "stackviewer", ("flist",), {})
if oid is None:
self.tkconsole.root.bell()
return
item = RemoteObjectBrowser.StubObjectTreeItem(self.rpcclt, oid)
from idlelib.TreeWidget import ScrolledCanvas, TreeNode
top = Toplevel(self.tkconsole.root)
theme = idleConf.GetOption('main','Theme','name')
background = idleConf.GetHighlight(theme, 'normal')['background']
sc = ScrolledCanvas(top, bg=background, highlightthickness=0)
sc.frame.pack(expand=1, fill="both")
node = TreeNode(sc.canvas, None, item)
node.expand()
# XXX Should GC the remote tree when closing the window
gid = 0
def execsource(self, source):
"Like runsource() but assumes complete exec source"
filename = self.stuffsource(source)
self.execfile(filename, source)
def execfile(self, filename, source=None):
"Execute an existing file"
if source is None:
source = open(filename, "r").read()
try:
code = compile(source, filename, "exec")
except (OverflowError, SyntaxError):
self.tkconsole.resetoutput()
print('*** Error in script or command!\n'
'Traceback (most recent call last):',
file=self.tkconsole.stderr)
InteractiveInterpreter.showsyntaxerror(self, filename)
self.tkconsole.showprompt()
else:
self.runcode(code)
def runsource(self, source):
"Extend base class method: Stuff the source in the line cache first"
filename = self.stuffsource(source)
self.more = 0
self.save_warnings_filters = warnings.filters[:]
warnings.filterwarnings(action="error", category=SyntaxWarning)
if isinstance(source, types.UnicodeType):
from idlelib import IOBinding
try:
source = source.encode(IOBinding.encoding)
except UnicodeError:
self.tkconsole.resetoutput()
self.write("Unsupported characters in input\n")
return
try:
# InteractiveInterpreter.runsource() calls its runcode() method,
# which is overridden (see below)
return InteractiveInterpreter.runsource(self, source, filename)
finally:
if self.save_warnings_filters is not None:
warnings.filters[:] = self.save_warnings_filters
self.save_warnings_filters = None
def stuffsource(self, source):
"Stuff source in the filename cache"
filename = "<pyshell#%d>" % self.gid
self.gid = self.gid + 1
lines = source.split("\n")
linecache.cache[filename] = len(source)+1, 0, lines, filename
return filename
def prepend_syspath(self, filename):
"Prepend sys.path with file's directory if not already included"
self.runcommand("""if 1:
_filename = %r
import sys as _sys
from os.path import dirname as _dirname
_dir = _dirname(_filename)
if not _dir in _sys.path:
_sys.path.insert(0, _dir)
del _filename, _sys, _dirname, _dir
\n""" % (filename,))
def showsyntaxerror(self, filename=None):
"""Extend base class method: Add Colorizing
Color the offending position instead of printing it and pointing at it
with a caret.
"""
text = self.tkconsole.text
stuff = self.unpackerror()
if stuff:
msg, lineno, offset, line = stuff
if lineno == 1:
pos = "iomark + %d chars" % (offset-1)
else:
pos = "iomark linestart + %d lines + %d chars" % \
(lineno-1, offset-1)
text.tag_add("ERROR", pos)
text.see(pos)
char = text.get(pos)
if char and char in IDENTCHARS:
text.tag_add("ERROR", pos + " wordstart", pos)
self.tkconsole.resetoutput()
self.write("SyntaxError: %s\n" % str(msg))
else:
self.tkconsole.resetoutput()
InteractiveInterpreter.showsyntaxerror(self, filename)
self.tkconsole.showprompt()
def unpackerror(self):
type, value, tb = sys.exc_info()
ok = type is SyntaxError
if ok:
try:
msg, (dummy_filename, lineno, offset, line) = value
if not offset:
offset = 0
except:
ok = 0
if ok:
return msg, lineno, offset, line
else:
return None
def showtraceback(self):
"Extend base class method to reset output properly"
self.tkconsole.resetoutput()
self.checklinecache()
InteractiveInterpreter.showtraceback(self)
if self.tkconsole.getvar("<<toggle-jit-stack-viewer>>"):
self.tkconsole.open_stack_viewer()
def checklinecache(self):
c = linecache.cache
for key in c.keys():
if key[:1] + key[-1:] != "<>":
del c[key]
def runcommand(self, code):
"Run the code without invoking the debugger"
# The code better not raise an exception!
if self.tkconsole.executing:
self.display_executing_dialog()
return 0
if self.rpcclt:
self.rpcclt.remotequeue("exec", "runcode", (code,), {})
else:
exec code in self.locals
return 1
def runcode(self, code):
"Override base class method"
if self.tkconsole.executing:
self.interp.restart_subprocess()
self.checklinecache()
if self.save_warnings_filters is not None:
warnings.filters[:] = self.save_warnings_filters
self.save_warnings_filters = None
debugger = self.debugger
try:
self.tkconsole.beginexecuting()
if not debugger and self.rpcclt is not None:
self.active_seq = self.rpcclt.asyncqueue("exec", "runcode",
(code,), {})
elif debugger:
debugger.run(code, self.locals)
else:
exec code in self.locals
except SystemExit:
if not self.tkconsole.closing:
if tkMessageBox.askyesno(
"Exit?",
"Do you want to exit altogether?",
default="yes",
master=self.tkconsole.text):
raise
else:
self.showtraceback()
else:
raise
except:
if use_subprocess:
print("IDLE internal error in runcode()",
file=self.tkconsole.stderr)
self.showtraceback()
self.tkconsole.endexecuting()
else:
if self.tkconsole.canceled:
self.tkconsole.canceled = False
print("KeyboardInterrupt", file=self.tkconsole.stderr)
else:
self.showtraceback()
finally:
if not use_subprocess:
try:
self.tkconsole.endexecuting()
except AttributeError: # shell may have closed
pass
def write(self, s):
"Override base class method"
self.tkconsole.stderr.write(s)
def display_port_binding_error(self):
tkMessageBox.showerror(
"Port Binding Error",
"IDLE can't bind to a TCP/IP port, which is necessary to "
"communicate with its Python execution server. This might be "
"because no networking is installed on this computer. "
"Run IDLE with the -n command line switch to start without a "
"subprocess and refer to Help/IDLE Help 'Running without a "
"subprocess' for further details.",
master=self.tkconsole.text)
def display_no_subprocess_error(self):
tkMessageBox.showerror(
"Subprocess Startup Error",
"IDLE's subprocess didn't make connection. Either IDLE can't "
"start a subprocess or personal firewall software is blocking "
"the connection.",
master=self.tkconsole.text)
def display_executing_dialog(self):
tkMessageBox.showerror(
"Already executing",
"The Python Shell window is already executing a command; "
"please wait until it is finished.",
master=self.tkconsole.text)
class PyShell(OutputWindow):
shell_title = "Python " + python_version() + " Shell"
# Override classes
ColorDelegator = ModifiedColorDelegator
UndoDelegator = ModifiedUndoDelegator
# Override menus
menu_specs = [
("file", "_File"),
("edit", "_Edit"),
("debug", "_Debug"),
("options", "_Options"),
("windows", "_Window"),
("help", "_Help"),
]
# New classes
from idlelib.IdleHistory import History
def __init__(self, flist=None):
if use_subprocess:
ms = self.menu_specs
if ms[2][0] != "shell":
ms.insert(2, ("shell", "She_ll"))
self.interp = ModifiedInterpreter(self)
if flist is None:
root = Tk()
fixwordbreaks(root)
root.withdraw()
flist = PyShellFileList(root)
#
OutputWindow.__init__(self, flist, None, None)
#
## self.config(usetabs=1, indentwidth=8, context_use_ps1=1)
self.usetabs = True
# indentwidth must be 8 when using tabs. See note in EditorWindow:
self.indentwidth = 8
self.context_use_ps1 = True
#
text = self.text
text.configure(wrap="char")
text.bind("<<newline-and-indent>>", self.enter_callback)
text.bind("<<plain-newline-and-indent>>", self.linefeed_callback)
text.bind("<<interrupt-execution>>", self.cancel_callback)
text.bind("<<end-of-file>>", self.eof_callback)
text.bind("<<open-stack-viewer>>", self.open_stack_viewer)
text.bind("<<toggle-debugger>>", self.toggle_debugger)
text.bind("<<toggle-jit-stack-viewer>>", self.toggle_jit_stack_viewer)
if use_subprocess:
text.bind("<<view-restart>>", self.view_restart_mark)
text.bind("<<restart-shell>>", self.restart_shell)
#
self.save_stdout = sys.stdout
self.save_stderr = sys.stderr
self.save_stdin = sys.stdin
from idlelib import IOBinding
self.stdin = PseudoInputFile(self, "stdin", IOBinding.encoding)
self.stdout = PseudoOutputFile(self, "stdout", IOBinding.encoding)
self.stderr = PseudoOutputFile(self, "stderr", IOBinding.encoding)
self.console = PseudoOutputFile(self, "console", IOBinding.encoding)
if not use_subprocess:
sys.stdout = self.stdout
sys.stderr = self.stderr
sys.stdin = self.stdin
#
self.history = self.History(self.text)
#
self.pollinterval = 50 # millisec
def get_standard_extension_names(self):
return idleConf.GetExtensions(shell_only=True)
reading = False
executing = False
canceled = False
endoffile = False
closing = False
_stop_readline_flag = False
def set_warning_stream(self, stream):
global warning_stream
warning_stream = stream
def get_warning_stream(self):
return warning_stream
def toggle_debugger(self, event=None):
if self.executing:
tkMessageBox.showerror("Don't debug now",
"You can only toggle the debugger when idle",
master=self.text)
self.set_debugger_indicator()
return "break"
else:
db = self.interp.getdebugger()
if db:
self.close_debugger()
else:
self.open_debugger()
def set_debugger_indicator(self):
db = self.interp.getdebugger()
self.setvar("<<toggle-debugger>>", not not db)
def toggle_jit_stack_viewer(self, event=None):
pass # All we need is the variable
def close_debugger(self):
db = self.interp.getdebugger()
if db:
self.interp.setdebugger(None)
db.close()
if self.interp.rpcclt:
RemoteDebugger.close_remote_debugger(self.interp.rpcclt)
self.resetoutput()
self.console.write("[DEBUG OFF]\n")
sys.ps1 = ">>> "
self.showprompt()
self.set_debugger_indicator()
def open_debugger(self):
if self.interp.rpcclt:
dbg_gui = RemoteDebugger.start_remote_debugger(self.interp.rpcclt,
self)
else:
dbg_gui = Debugger.Debugger(self)
self.interp.setdebugger(dbg_gui)
dbg_gui.load_breakpoints()
sys.ps1 = "[DEBUG ON]\n>>> "
self.showprompt()
self.set_debugger_indicator()
def beginexecuting(self):
"Helper for ModifiedInterpreter"
self.resetoutput()
self.executing = 1
def endexecuting(self):
"Helper for ModifiedInterpreter"
self.executing = 0
self.canceled = 0
self.showprompt()
def close(self):
"Extend EditorWindow.close()"
if self.executing:
response = tkMessageBox.askokcancel(
"Kill?",
"The program is still running!\n Do you want to kill it?",
default="ok",
parent=self.text)
if response is False:
return "cancel"
self.stop_readline()
self.canceled = True
self.closing = True
return EditorWindow.close(self)
def _close(self):
"Extend EditorWindow._close(), shut down debugger and execution server"
self.close_debugger()
if use_subprocess:
self.interp.kill_subprocess()
# Restore std streams
sys.stdout = self.save_stdout
sys.stderr = self.save_stderr
sys.stdin = self.save_stdin
# Break cycles
self.interp = None
self.console = None
self.flist.pyshell = None
self.history = None
EditorWindow._close(self)
def ispythonsource(self, filename):
"Override EditorWindow method: never remove the colorizer"
return True
def short_title(self):
return self.shell_title
COPYRIGHT = \
'Type "copyright", "credits" or "license()" for more information.'
def begin(self):
self.resetoutput()
if use_subprocess:
nosub = ''
client = self.interp.start_subprocess()
if not client:
self.close()
return False
else:
nosub = "==== No Subprocess ===="
self.write("Python %s on %s\n%s\n%s" %
(sys.version, sys.platform, self.COPYRIGHT, nosub))
self.showprompt()
import Tkinter
Tkinter._default_root = None # 03Jan04 KBK What's this?
return True
def stop_readline(self):
if not self.reading: # no nested mainloop to exit.
return
self._stop_readline_flag = True
self.top.quit()
def readline(self):
save = self.reading
try:
self.reading = 1
self.top.mainloop() # nested mainloop()
finally:
self.reading = save
if self._stop_readline_flag:
self._stop_readline_flag = False
return ""
line = self.text.get("iomark", "end-1c")
if len(line) == 0: # may be EOF if we quit our mainloop with Ctrl-C
line = "\n"
if isinstance(line, unicode):
from idlelib import IOBinding
try:
line = line.encode(IOBinding.encoding)
except UnicodeError:
pass
self.resetoutput()
if self.canceled:
self.canceled = 0
if not use_subprocess:
raise KeyboardInterrupt
if self.endoffile:
self.endoffile = 0
line = ""
return line
def isatty(self):
return True
def cancel_callback(self, event=None):
try:
if self.text.compare("sel.first", "!=", "sel.last"):
return # Active selection -- always use default binding
except:
pass
if not (self.executing or self.reading):
self.resetoutput()
self.interp.write("KeyboardInterrupt\n")
self.showprompt()
return "break"
self.endoffile = 0
self.canceled = 1
if (self.executing and self.interp.rpcclt):
if self.interp.getdebugger():
self.interp.restart_subprocess()
else:
self.interp.interrupt_subprocess()
if self.reading:
self.top.quit() # exit the nested mainloop() in readline()
return "break"
def eof_callback(self, event):
if self.executing and not self.reading:
return # Let the default binding (delete next char) take over
if not (self.text.compare("iomark", "==", "insert") and
self.text.compare("insert", "==", "end-1c")):
return # Let the default binding (delete next char) take over
if not self.executing:
self.resetoutput()
self.close()
else:
self.canceled = 0
self.endoffile = 1
self.top.quit()
return "break"
def linefeed_callback(self, event):
# Insert a linefeed without entering anything (still autoindented)
if self.reading:
self.text.insert("insert", "\n")
self.text.see("insert")
else:
self.newline_and_indent_event(event)
return "break"
def enter_callback(self, event):
if self.executing and not self.reading:
return # Let the default binding (insert '\n') take over
# If some text is selected, recall the selection
# (but only if this before the I/O mark)
try:
sel = self.text.get("sel.first", "sel.last")
if sel:
if self.text.compare("sel.last", "<=", "iomark"):
self.recall(sel, event)
return "break"
except:
pass
# If we're strictly before the line containing iomark, recall
# the current line, less a leading prompt, less leading or
# trailing whitespace
if self.text.compare("insert", "<", "iomark linestart"):
# Check if there's a relevant stdin range -- if so, use it
prev = self.text.tag_prevrange("stdin", "insert")
if prev and self.text.compare("insert", "<", prev[1]):
self.recall(self.text.get(prev[0], prev[1]), event)
return "break"
next = self.text.tag_nextrange("stdin", "insert")
if next and self.text.compare("insert lineend", ">=", next[0]):
self.recall(self.text.get(next[0], next[1]), event)
return "break"
# No stdin mark -- just get the current line, less any prompt
indices = self.text.tag_nextrange("console", "insert linestart")
if indices and \
self.text.compare(indices[0], "<=", "insert linestart"):
self.recall(self.text.get(indices[1], "insert lineend"), event)
else:
self.recall(self.text.get("insert linestart", "insert lineend"), event)
return "break"
# If we're between the beginning of the line and the iomark, i.e.
# in the prompt area, move to the end of the prompt
if self.text.compare("insert", "<", "iomark"):
self.text.mark_set("insert", "iomark")
# If we're in the current input and there's only whitespace
# beyond the cursor, erase that whitespace first
s = self.text.get("insert", "end-1c")
if s and not s.strip():
self.text.delete("insert", "end-1c")
# If we're in the current input before its last line,
# insert a newline right at the insert point
if self.text.compare("insert", "<", "end-1c linestart"):
self.newline_and_indent_event(event)
return "break"
# We're in the last line; append a newline and submit it
self.text.mark_set("insert", "end-1c")
if self.reading:
self.text.insert("insert", "\n")
self.text.see("insert")
else:
self.newline_and_indent_event(event)
self.text.tag_add("stdin", "iomark", "end-1c")
self.text.update_idletasks()
if self.reading:
self.top.quit() # Break out of recursive mainloop() in raw_input()
else:
self.runit()
return "break"
def recall(self, s, event):
# remove leading and trailing empty or whitespace lines
s = re.sub(r'^\s*\n', '' , s)
s = re.sub(r'\n\s*$', '', s)
lines = s.split('\n')
self.text.undo_block_start()
try:
self.text.tag_remove("sel", "1.0", "end")
self.text.mark_set("insert", "end-1c")
prefix = self.text.get("insert linestart", "insert")
if prefix.rstrip().endswith(':'):
self.newline_and_indent_event(event)
prefix = self.text.get("insert linestart", "insert")
self.text.insert("insert", lines[0].strip())
if len(lines) > 1:
orig_base_indent = re.search(r'^([ \t]*)', lines[0]).group(0)
new_base_indent = re.search(r'^([ \t]*)', prefix).group(0)
for line in lines[1:]:
if line.startswith(orig_base_indent):
# replace orig base indentation with new indentation
line = new_base_indent + line[len(orig_base_indent):]
self.text.insert('insert', '\n'+line.rstrip())
finally:
self.text.see("insert")
self.text.undo_block_stop()
def runit(self):
line = self.text.get("iomark", "end-1c")
# Strip off last newline and surrounding whitespace.
# (To allow you to hit return twice to end a statement.)
i = len(line)
while i > 0 and line[i-1] in " \t":
i = i-1
if i > 0 and line[i-1] == "\n":
i = i-1
while i > 0 and line[i-1] in " \t":
i = i-1
line = line[:i]
more = self.interp.runsource(line)
def open_stack_viewer(self, event=None):
if self.interp.rpcclt:
return self.interp.remote_stack_viewer()
try:
sys.last_traceback
except:
tkMessageBox.showerror("No stack trace",
"There is no stack trace yet.\n"
"(sys.last_traceback is not defined)",
master=self.text)
return
from idlelib.StackViewer import StackBrowser
sv = StackBrowser(self.root, self.flist)
def view_restart_mark(self, event=None):
self.text.see("iomark")
self.text.see("restart")
def restart_shell(self, event=None):
"Callback for Run/Restart Shell Cntl-F6"
self.interp.restart_subprocess(with_cwd=True)
def showprompt(self):
self.resetoutput()
try:
s = str(sys.ps1)
except:
s = ""
self.console.write(s)
self.text.mark_set("insert", "end-1c")
self.set_line_and_column()
self.io.reset_undo()
def resetoutput(self):
source = self.text.get("iomark", "end-1c")
if self.history:
self.history.store(source)
if self.text.get("end-2c") != "\n":
self.text.insert("end-1c", "\n")
self.text.mark_set("iomark", "end-1c")
self.set_line_and_column()
sys.stdout.softspace = 0
def write(self, s, tags=()):
try:
self.text.mark_gravity("iomark", "right")
OutputWindow.write(self, s, tags, "iomark")
self.text.mark_gravity("iomark", "left")
except:
pass
if self.canceled:
self.canceled = 0
if not use_subprocess:
raise KeyboardInterrupt
def rmenu_check_cut(self):
try:
if self.text.compare('sel.first', '<', 'iomark'):
return 'disabled'
except TclError: # no selection, so the index 'sel.first' doesn't exist
return 'disabled'
return super(PyShell, self).rmenu_check_cut()
def rmenu_check_paste(self):
if self.text.compare('insert', '<', 'iomark'):
return 'disabled'
return super(PyShell, self).rmenu_check_paste()
class PseudoFile(io.TextIOBase):
def __init__(self, shell, tags, encoding=None):
self.shell = shell
self.tags = tags
self.softspace = 0
self._encoding = encoding
@property
def encoding(self):
return self._encoding
@property
def name(self):
return '<%s>' % self.tags
def isatty(self):
return True
class PseudoOutputFile(PseudoFile):
def writable(self):
return True
def write(self, s):
if self.closed:
raise ValueError("write to closed file")
if type(s) not in (unicode, str, bytearray):
# See issue #19481
if isinstance(s, unicode):
s = unicode.__getitem__(s, slice(None))
elif isinstance(s, str):
s = str.__str__(s)
elif isinstance(s, bytearray):
s = bytearray.__str__(s)
else:
raise TypeError('must be string, not ' + type(s).__name__)
return self.shell.write(s, self.tags)
class PseudoInputFile(PseudoFile):
def __init__(self, shell, tags, encoding=None):
PseudoFile.__init__(self, shell, tags, encoding)
self._line_buffer = ''
def readable(self):
return True
def read(self, size=-1):
if self.closed:
raise ValueError("read from closed file")
if size is None:
size = -1
elif not isinstance(size, int):
raise TypeError('must be int, not ' + type(size).__name__)
result = self._line_buffer
self._line_buffer = ''
if size < 0:
while True:
line = self.shell.readline()
if not line: break
result += line
else:
while len(result) < size:
line = self.shell.readline()
if not line: break
result += line
self._line_buffer = result[size:]
result = result[:size]
return result
def readline(self, size=-1):
if self.closed:
raise ValueError("read from closed file")
if size is None:
size = -1
elif not isinstance(size, int):
raise TypeError('must be int, not ' + type(size).__name__)
line = self._line_buffer or self.shell.readline()
if size < 0:
size = len(line)
eol = line.find('\n', 0, size)
if eol >= 0:
size = eol + 1
self._line_buffer = line[size:]
return line[:size]
def close(self):
self.shell.close()
usage_msg = """\
USAGE: idle [-deins] [-t title] [file]*
idle [-dns] [-t title] (-c cmd | -r file) [arg]*
idle [-dns] [-t title] - [arg]*
-h print this help message and exit
-n run IDLE without a subprocess (see Help/IDLE Help for details)
The following options will override the IDLE 'settings' configuration:
-e open an edit window
-i open a shell window
The following options imply -i and will open a shell:
-c cmd run the command in a shell, or
-r file run script from file
-d enable the debugger
-s run $IDLESTARTUP or $PYTHONSTARTUP before anything else
-t title set title of shell window
A default edit window will be bypassed when -c, -r, or - are used.
[arg]* are passed to the command (-c) or script (-r) in sys.argv[1:].
Examples:
idle
Open an edit window or shell depending on IDLE's configuration.
idle foo.py foobar.py
Edit the files, also open a shell if configured to start with shell.
idle -est "Baz" foo.py
Run $IDLESTARTUP or $PYTHONSTARTUP, edit foo.py, and open a shell
window with the title "Baz".
idle -c "import sys; print sys.argv" "foo"
Open a shell window and run the command, passing "-c" in sys.argv[0]
and "foo" in sys.argv[1].
idle -d -s -r foo.py "Hello World"
Open a shell window, run a startup script, enable the debugger, and
run foo.py, passing "foo.py" in sys.argv[0] and "Hello World" in
sys.argv[1].
echo "import sys; print sys.argv" | idle - "foobar"
Open a shell window, run the script piped in, passing '' in sys.argv[0]
and "foobar" in sys.argv[1].
"""
def main():
global flist, root, use_subprocess
capture_warnings(True)
use_subprocess = True
enable_shell = False
enable_edit = False
debug = False
cmd = None
script = None
startup = False
try:
opts, args = getopt.getopt(sys.argv[1:], "c:deihnr:st:")
except getopt.error as msg:
print("Error: %s\n%s" % (msg, usage_msg), file=sys.stderr)
sys.exit(2)
for o, a in opts:
if o == '-c':
cmd = a
enable_shell = True
if o == '-d':
debug = True
enable_shell = True
if o == '-e':
enable_edit = True
if o == '-h':
sys.stdout.write(usage_msg)
sys.exit()
if o == '-i':
enable_shell = True
if o == '-n':
use_subprocess = False
if o == '-r':
script = a
if os.path.isfile(script):
pass
else:
print("No script file: ", script, file=sys.stderr)
sys.exit()
enable_shell = True
if o == '-s':
startup = True
enable_shell = True
if o == '-t':
PyShell.shell_title = a
enable_shell = True
if args and args[0] == '-':
cmd = sys.stdin.read()
enable_shell = True
# process sys.argv and sys.path:
for i in range(len(sys.path)):
sys.path[i] = os.path.abspath(sys.path[i])
if args and args[0] == '-':
sys.argv = [''] + args[1:]
elif cmd:
sys.argv = ['-c'] + args
elif script:
sys.argv = [script] + args
elif args:
enable_edit = True
pathx = []
for filename in args:
pathx.append(os.path.dirname(filename))
for dir in pathx:
dir = os.path.abspath(dir)
if dir not in sys.path:
sys.path.insert(0, dir)
else:
dir = os.getcwd()
if not dir in sys.path:
sys.path.insert(0, dir)
# check the IDLE settings configuration (but command line overrides)
edit_start = idleConf.GetOption('main', 'General',
'editor-on-startup', type='bool')
enable_edit = enable_edit or edit_start
enable_shell = enable_shell or not enable_edit
# start editor and/or shell windows:
root = Tk(className="Idle")
# set application icon
icondir = os.path.join(os.path.dirname(__file__), 'Icons')
if system() == 'Windows':
iconfile = os.path.join(icondir, 'idle.ico')
root.wm_iconbitmap(default=iconfile)
elif TkVersion >= 8.5:
ext = '.png' if TkVersion >= 8.6 else '.gif'
iconfiles = [os.path.join(icondir, 'idle_%d%s' % (size, ext))
for size in (16, 32, 48)]
icons = [PhotoImage(file=iconfile) for iconfile in iconfiles]
root.tk.call('wm', 'iconphoto', str(root), "-default", *icons)
fixwordbreaks(root)
root.withdraw()
flist = PyShellFileList(root)
macosxSupport.setupApp(root, flist)
if enable_edit:
if not (cmd or script):
for filename in args[:]:
if flist.open(filename) is None:
# filename is a directory actually, disconsider it
args.remove(filename)
if not args:
flist.new()
if enable_shell:
shell = flist.open_shell()
if not shell:
return # couldn't open shell
if macosxSupport.isAquaTk() and flist.dict:
# On OSX: when the user has double-clicked on a file that causes
# IDLE to be launched the shell window will open just in front of
# the file she wants to see. Lower the interpreter window when
# there are open files.
shell.top.lower()
else:
shell = flist.pyshell
# Handle remaining options. If any of these are set, enable_shell
# was set also, so shell must be true to reach here.
if debug:
shell.open_debugger()
if startup:
filename = os.environ.get("IDLESTARTUP") or \
os.environ.get("PYTHONSTARTUP")
if filename and os.path.isfile(filename):
shell.interp.execfile(filename)
if cmd or script:
shell.interp.runcommand("""if 1:
import sys as _sys
_sys.argv = %r
del _sys
\n""" % (sys.argv,))
if cmd:
shell.interp.execsource(cmd)
elif script:
shell.interp.prepend_syspath(script)
shell.interp.execfile(script)
elif shell:
# If there is a shell window and no cmd or script in progress,
# check for problematic OS X Tk versions and print a warning
# message in the IDLE shell window; this is less intrusive
# than always opening a separate window.
tkversionwarning = macosxSupport.tkVersionWarning(root)
if tkversionwarning:
shell.interp.runcommand("print('%s')" % tkversionwarning)
while flist.inversedict: # keep IDLE running while files are open.
root.mainloop()
root.destroy()
capture_warnings(False)
if __name__ == "__main__":
sys.modules['PyShell'] = sys.modules['__main__']
main()
capture_warnings(False) # Make sure turned off; see issue 18081
| []
| []
| [
"PYTHONSTARTUP",
"IDLESTARTUP"
]
| [] | ["PYTHONSTARTUP", "IDLESTARTUP"] | python | 2 | 0 | |
socialdistribution/socialdistribution/wsgi.py | """
WSGI config for socialdistribution project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'socialdistribution.settings')
application = get_wsgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
core/core.go | /*
Package core implements the IpfsNode object and related methods.
Packages underneath core/ provide a (relatively) stable, low-level API
to carry out most IPFS-related tasks. For more details on the other
interfaces and how core/... fits into the bigger IPFS picture, see:
$ godoc github.com/ipfs/go-ipfs
*/
package core
import (
"bytes"
"context"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"strings"
"time"
version "github.com/ipfs/go-ipfs"
rp "github.com/ipfs/go-ipfs/exchange/reprovide"
filestore "github.com/ipfs/go-ipfs/filestore"
mount "github.com/ipfs/go-ipfs/fuse/mount"
mfs "github.com/ipfs/go-ipfs/mfs"
namesys "github.com/ipfs/go-ipfs/namesys"
ipnsrp "github.com/ipfs/go-ipfs/namesys/republisher"
p2p "github.com/ipfs/go-ipfs/p2p"
pin "github.com/ipfs/go-ipfs/pin"
repo "github.com/ipfs/go-ipfs/repo"
circuit "gx/ipfs/QmPMRK5yTc2KhnaxQN4R7vRqEfZo5hW1aF5x6W97RKnXZq/go-libp2p-circuit"
u "gx/ipfs/QmPdKqUcHGFdeSpvjVoaTRPPstGif9GBZb5Q56RVw9o69A/go-ipfs-util"
ic "gx/ipfs/QmPvyPwuCgJ7pDmrKDxRtsScJgBaM5h4EpRL2qQJsmXf4n/go-libp2p-crypto"
p2phost "gx/ipfs/QmQ1hwb95uSSZR8jSPJysnfHxBDQAykSXsmz5TwTzxjq2Z/go-libp2p-host"
config "gx/ipfs/QmQSG7YCizeUH2bWatzp6uK9Vm3m7LA5jpxGa9QqgpNKw4/go-ipfs-config"
bitswap "gx/ipfs/QmQk1Rqy5XSBzXykMSsgiXfnhivCSnFpykx4M2j6DD1nBH/go-bitswap"
bsnet "gx/ipfs/QmQk1Rqy5XSBzXykMSsgiXfnhivCSnFpykx4M2j6DD1nBH/go-bitswap/network"
merkledag "gx/ipfs/QmQzSpSjkdGHW6WFBhUG6P3t9K8yv7iucucT1cQaqJ6tgd/go-merkledag"
logging "gx/ipfs/QmRREK2CAZ5Re2Bd9zZFG6FeYDppUWt5cMgsoUEp3ktgSr/go-log"
psrouter "gx/ipfs/QmRXZNuxRue83mzP3tFafw1xa6sT9no5o3oN6WbwrKNFHe/go-libp2p-pubsub-router"
rhelpers "gx/ipfs/QmRe3KBUdY6dBCqGd5Fri5TC4jX9pxGFLigKvgxmLVwzFH/go-libp2p-routing-helpers"
nilrouting "gx/ipfs/QmRr8DpNhQMzsoqAitUrw43D82pyPXZkyUqarhSAfkrdaQ/go-ipfs-routing/none"
offroute "gx/ipfs/QmRr8DpNhQMzsoqAitUrw43D82pyPXZkyUqarhSAfkrdaQ/go-ipfs-routing/offline"
routing "gx/ipfs/QmSD6bSPcXaaR7LpQHjytLWQD7DrCsb415CWfpbd9Szemb/go-libp2p-routing"
goprocess "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess"
mamask "gx/ipfs/QmSMZwvs3n4GBikZ7hKzT17c3bk65FmyZo2JqtJ16swqCv/multiaddr-filter"
mafilter "gx/ipfs/QmSW4uNHbvQia8iZDXzbwjiyHQtnyo9aFqfQAMasj3TJ6Y/go-maddr-filter"
bserv "gx/ipfs/QmTZZrpd9o4vpYr9TEADW2EoJ9fzUtAgpXqjxZHbKR2T15/go-blockservice"
libp2p "gx/ipfs/QmUDzeFgYrRmHL2hUB6NZmqcBVQtUzETwmFRUc9onfSSHr/go-libp2p"
discovery "gx/ipfs/QmUDzeFgYrRmHL2hUB6NZmqcBVQtUzETwmFRUc9onfSSHr/go-libp2p/p2p/discovery"
p2pbhost "gx/ipfs/QmUDzeFgYrRmHL2hUB6NZmqcBVQtUzETwmFRUc9onfSSHr/go-libp2p/p2p/host/basic"
rhost "gx/ipfs/QmUDzeFgYrRmHL2hUB6NZmqcBVQtUzETwmFRUc9onfSSHr/go-libp2p/p2p/host/routed"
identify "gx/ipfs/QmUDzeFgYrRmHL2hUB6NZmqcBVQtUzETwmFRUc9onfSSHr/go-libp2p/p2p/protocol/identify"
ping "gx/ipfs/QmUDzeFgYrRmHL2hUB6NZmqcBVQtUzETwmFRUc9onfSSHr/go-libp2p/p2p/protocol/ping"
record "gx/ipfs/QmUTQSGgjs8CHm9yBcUHicpRs7C9abhyZiBwjzCUp1pNgX/go-libp2p-record"
ds "gx/ipfs/QmVG5gxteQNEMhrS8prJSmU2C9rebtFuTd3SYZ5kE3YZ5k/go-datastore"
floodsub "gx/ipfs/QmWEwoF9gZAGNevKxFo4q216DCo9ieui5ZAe5jSZCdesck/go-libp2p-floodsub"
"gx/ipfs/QmWMcvZbNvk5codeqbm7L89C9kqSwka4KaHnDb8HRnxsSL/go-path/resolver"
metrics "gx/ipfs/QmWne2EKHBvVpSTYuWuWch3D9KqAx78Te83UXWFKQDcksJ/go-libp2p-metrics"
ft "gx/ipfs/QmWv8MYwgPK4zXYv1et1snWJ6FWGqaL6xY2y9X1bRSKBxk/go-unixfs"
exchange "gx/ipfs/QmY2oJagiH65QDZqW4wMHcQAvAF3kbz6WtXvB1k9r4jQvP/go-ipfs-exchange-interface"
connmgr "gx/ipfs/QmY6ujWdgPoEnYPCTNYBBGD6gAj9fPfRZsDgKm9awpM1Tv/go-libp2p-connmgr"
smux "gx/ipfs/QmY9JXR3FupnYAYJWK9aMr9bCpqWKcToQ1tz8DVGTrHpHw/go-stream-muxer"
bstore "gx/ipfs/QmYBEfMSquSGnuxBthUoBJNs3F6p4VAPPvAgxq6XXGvTPh/go-ipfs-blockstore"
pstore "gx/ipfs/QmYLXCWN2myozZpx8Wx4UjrRuQuhY3YtWoMi6SHaXii6aM/go-libp2p-peerstore"
cid "gx/ipfs/QmYjnkEL7i731PirfVH1sis89evN7jt4otSHw5D2xXXwUV/go-cid"
ma "gx/ipfs/QmYmsdtJ3HsodkePE3eU3TsCaP2YvPZJ4LoXnNkDE5Tpt7/go-multiaddr"
pnet "gx/ipfs/QmZaQ3K9PRd5sYYoG1xbTGPtd3N7TYiKBRmcBUTsx8HVET/go-libp2p-pnet"
ipld "gx/ipfs/QmaA8GkXUYinkkndvg7T6Tx7gYXemhxjaxLisEPes7Rf1P/go-ipld-format"
peer "gx/ipfs/QmcZSzKEM5yDfpZbeEEZaVmaZ1zXm6JWTbrQZSB8hCVPzk/go-libp2p-peer"
yamux "gx/ipfs/QmcsgrV3nCAKjiHKZhKVXWc4oY3WBECJCqahXEMpHeMrev/go-smux-yamux"
dht "gx/ipfs/QmdP3wKxB6x6vJ57tDrewAJF2qv4ULejCZ6dspJRnk3993/go-libp2p-kad-dht"
dhtopts "gx/ipfs/QmdP3wKxB6x6vJ57tDrewAJF2qv4ULejCZ6dspJRnk3993/go-libp2p-kad-dht/opts"
mplex "gx/ipfs/QmdiBZzwGtN2yHJrWD9ojQ7ASS48nv7BcojWLkYd1ZtrV2/go-smux-multiplex"
ifconnmgr "gx/ipfs/QmeJbAMK4cZc1RMChb68h9t2jqvK8miqE8oQiwGAf4EdQq/go-libp2p-interface-connmgr"
)
const IpnsValidatorTag = "ipns"
const kReprovideFrequency = time.Hour * 12
const discoveryConnTimeout = time.Second * 30
var log = logging.Logger("core")
type mode int
const (
// zero value is not a valid mode, must be explicitly set
localMode mode = iota
offlineMode
onlineMode
)
func init() {
identify.ClientVersion = "go-ipfs/" + version.CurrentVersionNumber + "/" + version.CurrentCommit
}
// IpfsNode is IPFS Core module. It represents an IPFS instance.
type IpfsNode struct {
// Self
Identity peer.ID // the local node's identity
Repo repo.Repo
// Local node
Pinning pin.Pinner // the pinning manager
Mounts Mounts // current mount state, if any.
PrivateKey ic.PrivKey // the local node's private Key
PNetFingerprint []byte // fingerprint of private network
// Services
Peerstore pstore.Peerstore // storage for other Peer instances
Blockstore bstore.GCBlockstore // the block store (lower level)
Filestore *filestore.Filestore // the filestore blockstore
BaseBlocks bstore.Blockstore // the raw blockstore, no filestore wrapping
GCLocker bstore.GCLocker // the locker used to protect the blockstore during gc
Blocks bserv.BlockService // the block service, get/add blocks.
DAG ipld.DAGService // the merkle dag service, get/add objects.
Resolver *resolver.Resolver // the path resolution system
Reporter metrics.Reporter
Discovery discovery.Service
FilesRoot *mfs.Root
RecordValidator record.Validator
// Online
PeerHost p2phost.Host // the network host (server+client)
Bootstrapper io.Closer // the periodic bootstrapper
Routing routing.IpfsRouting // the routing system. recommend ipfs-dht
Exchange exchange.Interface // the block exchange + strategy (bitswap)
Namesys namesys.NameSystem // the name system, resolves paths to hashes
Ping *ping.PingService
Reprovider *rp.Reprovider // the value reprovider system
IpnsRepub *ipnsrp.Republisher
Floodsub *floodsub.PubSub
PSRouter *psrouter.PubsubValueStore
DHT *dht.IpfsDHT
P2P *p2p.P2P
proc goprocess.Process
ctx context.Context
mode mode
localModeSet bool
}
// Mounts defines what the node's mount state is. This should
// perhaps be moved to the daemon or mount. It's here because
// it needs to be accessible across daemon requests.
type Mounts struct {
Ipfs mount.Mount
Ipns mount.Mount
}
func (n *IpfsNode) startOnlineServices(ctx context.Context, routingOption RoutingOption, hostOption HostOption, do DiscoveryOption, pubsub, ipnsps, mplex bool) error {
if n.PeerHost != nil { // already online.
return errors.New("node already online")
}
// load private key
if err := n.LoadPrivateKey(); err != nil {
return err
}
// get undialable addrs from config
cfg, err := n.Repo.Config()
if err != nil {
return err
}
var libp2pOpts []libp2p.Option
for _, s := range cfg.Swarm.AddrFilters {
f, err := mamask.NewMask(s)
if err != nil {
return fmt.Errorf("incorrectly formatted address filter in config: %s", s)
}
libp2pOpts = append(libp2pOpts, libp2p.FilterAddresses(f))
}
if !cfg.Swarm.DisableBandwidthMetrics {
// Set reporter
n.Reporter = metrics.NewBandwidthCounter()
libp2pOpts = append(libp2pOpts, libp2p.BandwidthReporter(n.Reporter))
}
swarmkey, err := n.Repo.SwarmKey()
if err != nil {
return err
}
if swarmkey != nil {
protec, err := pnet.NewProtector(bytes.NewReader(swarmkey))
if err != nil {
return fmt.Errorf("failed to configure private network: %s", err)
}
n.PNetFingerprint = protec.Fingerprint()
go func() {
t := time.NewTicker(30 * time.Second)
<-t.C // swallow one tick
for {
select {
case <-t.C:
if ph := n.PeerHost; ph != nil {
if len(ph.Network().Peers()) == 0 {
log.Warning("We are in private network and have no peers.")
log.Warning("This might be configuration mistake.")
}
}
case <-n.Process().Closing():
t.Stop()
return
}
}
}()
libp2pOpts = append(libp2pOpts, libp2p.PrivateNetwork(protec))
}
addrsFactory, err := makeAddrsFactory(cfg.Addresses)
if err != nil {
return err
}
if !cfg.Swarm.DisableRelay {
addrsFactory = composeAddrsFactory(addrsFactory, filterRelayAddrs)
}
libp2pOpts = append(libp2pOpts, libp2p.AddrsFactory(addrsFactory))
connm, err := constructConnMgr(cfg.Swarm.ConnMgr)
if err != nil {
return err
}
libp2pOpts = append(libp2pOpts, libp2p.ConnectionManager(connm))
libp2pOpts = append(libp2pOpts, makeSmuxTransportOption(mplex))
if !cfg.Swarm.DisableNatPortMap {
libp2pOpts = append(libp2pOpts, libp2p.NATPortMap())
}
if !cfg.Swarm.DisableRelay {
var opts []circuit.RelayOpt
if cfg.Swarm.EnableRelayHop {
opts = append(opts, circuit.OptHop)
}
libp2pOpts = append(libp2pOpts, libp2p.EnableRelay(opts...))
}
peerhost, err := hostOption(ctx, n.Identity, n.Peerstore, libp2pOpts...)
if err != nil {
return err
}
if err := n.startOnlineServicesWithHost(ctx, peerhost, routingOption, pubsub, ipnsps); err != nil {
return err
}
// Ok, now we're ready to listen.
if err := startListening(n.PeerHost, cfg); err != nil {
return err
}
n.P2P = p2p.NewP2P(n.Identity, n.PeerHost, n.Peerstore)
// setup local discovery
if do != nil {
service, err := do(ctx, n.PeerHost)
if err != nil {
log.Error("mdns error: ", err)
} else {
service.RegisterNotifee(n)
n.Discovery = service
}
}
return n.Bootstrap(DefaultBootstrapConfig)
}
func constructConnMgr(cfg config.ConnMgr) (ifconnmgr.ConnManager, error) {
switch cfg.Type {
case "":
// 'default' value is the basic connection manager
return connmgr.NewConnManager(config.DefaultConnMgrLowWater, config.DefaultConnMgrHighWater, config.DefaultConnMgrGracePeriod), nil
case "none":
return nil, nil
case "basic":
grace, err := time.ParseDuration(cfg.GracePeriod)
if err != nil {
return nil, fmt.Errorf("parsing Swarm.ConnMgr.GracePeriod: %s", err)
}
return connmgr.NewConnManager(cfg.LowWater, cfg.HighWater, grace), nil
default:
return nil, fmt.Errorf("unrecognized ConnMgr.Type: %q", cfg.Type)
}
}
func (n *IpfsNode) startLateOnlineServices(ctx context.Context) error {
cfg, err := n.Repo.Config()
if err != nil {
return err
}
var keyProvider rp.KeyChanFunc
switch cfg.Reprovider.Strategy {
case "all":
fallthrough
case "":
keyProvider = rp.NewBlockstoreProvider(n.Blockstore)
case "roots":
keyProvider = rp.NewPinnedProvider(n.Pinning, n.DAG, true)
case "pinned":
keyProvider = rp.NewPinnedProvider(n.Pinning, n.DAG, false)
default:
return fmt.Errorf("unknown reprovider strategy '%s'", cfg.Reprovider.Strategy)
}
n.Reprovider = rp.NewReprovider(ctx, n.Routing, keyProvider)
reproviderInterval := kReprovideFrequency
if cfg.Reprovider.Interval != "" {
dur, err := time.ParseDuration(cfg.Reprovider.Interval)
if err != nil {
return err
}
reproviderInterval = dur
}
go n.Reprovider.Run(reproviderInterval)
return nil
}
func makeAddrsFactory(cfg config.Addresses) (p2pbhost.AddrsFactory, error) {
var annAddrs []ma.Multiaddr
for _, addr := range cfg.Announce {
maddr, err := ma.NewMultiaddr(addr)
if err != nil {
return nil, err
}
annAddrs = append(annAddrs, maddr)
}
filters := mafilter.NewFilters()
noAnnAddrs := map[string]bool{}
for _, addr := range cfg.NoAnnounce {
f, err := mamask.NewMask(addr)
if err == nil {
filters.AddDialFilter(f)
continue
}
maddr, err := ma.NewMultiaddr(addr)
if err != nil {
return nil, err
}
noAnnAddrs[maddr.String()] = true
}
return func(allAddrs []ma.Multiaddr) []ma.Multiaddr {
var addrs []ma.Multiaddr
if len(annAddrs) > 0 {
addrs = annAddrs
} else {
addrs = allAddrs
}
var out []ma.Multiaddr
for _, maddr := range addrs {
// check for exact matches
ok, _ := noAnnAddrs[maddr.String()]
// check for /ipcidr matches
if !ok && !filters.AddrBlocked(maddr) {
out = append(out, maddr)
}
}
return out
}, nil
}
func makeSmuxTransportOption(mplexExp bool) libp2p.Option {
const yamuxID = "/yamux/1.0.0"
const mplexID = "/mplex/6.7.0"
ymxtpt := &yamux.Transport{
AcceptBacklog: 512,
ConnectionWriteTimeout: time.Second * 10,
KeepAliveInterval: time.Second * 30,
EnableKeepAlive: true,
MaxStreamWindowSize: uint32(1024 * 512),
LogOutput: ioutil.Discard,
}
if os.Getenv("YAMUX_DEBUG") != "" {
ymxtpt.LogOutput = os.Stderr
}
muxers := map[string]smux.Transport{yamuxID: ymxtpt}
if mplexExp {
muxers[mplexID] = mplex.DefaultTransport
}
// Allow muxer preference order overriding
order := []string{yamuxID, mplexID}
if prefs := os.Getenv("LIBP2P_MUX_PREFS"); prefs != "" {
order = strings.Fields(prefs)
}
opts := make([]libp2p.Option, 0, len(order))
for _, id := range order {
tpt, ok := muxers[id]
if !ok {
log.Warning("unknown or duplicate muxer in LIBP2P_MUX_PREFS: %s", id)
continue
}
delete(muxers, id)
opts = append(opts, libp2p.Muxer(id, tpt))
}
return libp2p.ChainOptions(opts...)
}
func setupDiscoveryOption(d config.Discovery) DiscoveryOption {
if d.MDNS.Enabled {
return func(ctx context.Context, h p2phost.Host) (discovery.Service, error) {
if d.MDNS.Interval == 0 {
d.MDNS.Interval = 5
}
return discovery.NewMdnsService(ctx, h, time.Duration(d.MDNS.Interval)*time.Second, discovery.ServiceTag)
}
}
return nil
}
// HandlePeerFound attempts to connect to peer from `PeerInfo`, if it fails
// logs a warning log.
func (n *IpfsNode) HandlePeerFound(p pstore.PeerInfo) {
log.Warning("trying peer info: ", p)
ctx, cancel := context.WithTimeout(n.Context(), discoveryConnTimeout)
defer cancel()
if err := n.PeerHost.Connect(ctx, p); err != nil {
log.Warning("Failed to connect to peer found by discovery: ", err)
}
}
// startOnlineServicesWithHost is the set of services which need to be
// initialized with the host and _before_ we start listening.
func (n *IpfsNode) startOnlineServicesWithHost(ctx context.Context, host p2phost.Host, routingOption RoutingOption, pubsub bool, ipnsps bool) error {
// setup diagnostics service
n.Ping = ping.NewPingService(host)
if pubsub || ipnsps {
cfg, err := n.Repo.Config()
if err != nil {
return err
}
var service *floodsub.PubSub
switch cfg.Pubsub.Router {
case "":
fallthrough
case "floodsub":
service, err = floodsub.NewFloodSub(ctx, host)
case "gossipsub":
service, err = floodsub.NewGossipSub(ctx, host)
default:
err = fmt.Errorf("Unknown pubsub router %s", cfg.Pubsub.Router)
}
if err != nil {
return err
}
n.Floodsub = service
}
// setup routing service
r, err := routingOption(ctx, host, n.Repo.Datastore(), n.RecordValidator)
if err != nil {
return err
}
n.Routing = r
// TODO: I'm not a fan of type assertions like this but the
// `RoutingOption` system doesn't currently provide access to the
// IpfsNode.
//
// Ideally, we'd do something like:
//
// 1. Add some fancy method to introspect into tiered routers to extract
// things like the pubsub router or the DHT (complicated, messy,
// probably not worth it).
// 2. Pass the IpfsNode into the RoutingOption (would also remove the
// PSRouter case below.
// 3. Introduce some kind of service manager? (my personal favorite but
// that requires a fair amount of work).
if dht, ok := r.(*dht.IpfsDHT); ok {
n.DHT = dht
}
if ipnsps {
n.PSRouter = psrouter.NewPubsubValueStore(
ctx,
host,
n.Routing,
n.Floodsub,
n.RecordValidator,
)
n.Routing = rhelpers.Tiered{
// Always check pubsub first.
&rhelpers.Compose{
ValueStore: &rhelpers.LimitedValueStore{
ValueStore: n.PSRouter,
Namespaces: []string{"ipns"},
},
},
n.Routing,
}
}
// Wrap standard peer host with routing system to allow unknown peer lookups
n.PeerHost = rhost.Wrap(host, n.Routing)
// setup exchange service
bitswapNetwork := bsnet.NewFromIpfsHost(n.PeerHost, n.Routing)
n.Exchange = bitswap.New(ctx, bitswapNetwork, n.Blockstore)
size, err := n.getCacheSize()
if err != nil {
return err
}
// setup name system
n.Namesys = namesys.NewNameSystem(n.Routing, n.Repo.Datastore(), size)
// setup ipns republishing
return n.setupIpnsRepublisher()
}
// getCacheSize returns cache life and cache size
func (n *IpfsNode) getCacheSize() (int, error) {
cfg, err := n.Repo.Config()
if err != nil {
return 0, err
}
cs := cfg.Ipns.ResolveCacheSize
if cs == 0 {
cs = 128
}
if cs < 0 {
return 0, fmt.Errorf("cannot specify negative resolve cache size")
}
return cs, nil
}
func (n *IpfsNode) setupIpnsRepublisher() error {
cfg, err := n.Repo.Config()
if err != nil {
return err
}
n.IpnsRepub = ipnsrp.NewRepublisher(n.Namesys, n.Repo.Datastore(), n.PrivateKey, n.Repo.Keystore())
if cfg.Ipns.RepublishPeriod != "" {
d, err := time.ParseDuration(cfg.Ipns.RepublishPeriod)
if err != nil {
return fmt.Errorf("failure to parse config setting IPNS.RepublishPeriod: %s", err)
}
if !u.Debug && (d < time.Minute || d > (time.Hour*24)) {
return fmt.Errorf("config setting IPNS.RepublishPeriod is not between 1min and 1day: %s", d)
}
n.IpnsRepub.Interval = d
}
if cfg.Ipns.RecordLifetime != "" {
d, err := time.ParseDuration(cfg.Ipns.RepublishPeriod)
if err != nil {
return fmt.Errorf("failure to parse config setting IPNS.RecordLifetime: %s", err)
}
n.IpnsRepub.RecordLifetime = d
}
n.Process().Go(n.IpnsRepub.Run)
return nil
}
// Process returns the Process object
func (n *IpfsNode) Process() goprocess.Process {
return n.proc
}
// Close calls Close() on the Process object
func (n *IpfsNode) Close() error {
return n.proc.Close()
}
// Context returns the IpfsNode context
func (n *IpfsNode) Context() context.Context {
if n.ctx == nil {
n.ctx = context.TODO()
}
return n.ctx
}
// teardown closes owned children. If any errors occur, this function returns
// the first error.
func (n *IpfsNode) teardown() error {
log.Debug("core is shutting down...")
// owned objects are closed in this teardown to ensure that they're closed
// regardless of which constructor was used to add them to the node.
var closers []io.Closer
// NOTE: The order that objects are added(closed) matters, if an object
// needs to use another during its shutdown/cleanup process, it should be
// closed before that other object
if n.FilesRoot != nil {
closers = append(closers, n.FilesRoot)
}
if n.Exchange != nil {
closers = append(closers, n.Exchange)
}
if n.Mounts.Ipfs != nil && !n.Mounts.Ipfs.IsActive() {
closers = append(closers, mount.Closer(n.Mounts.Ipfs))
}
if n.Mounts.Ipns != nil && !n.Mounts.Ipns.IsActive() {
closers = append(closers, mount.Closer(n.Mounts.Ipns))
}
if n.DHT != nil {
closers = append(closers, n.DHT.Process())
}
if n.Blocks != nil {
closers = append(closers, n.Blocks)
}
if n.Bootstrapper != nil {
closers = append(closers, n.Bootstrapper)
}
if n.PeerHost != nil {
closers = append(closers, n.PeerHost)
}
// Repo closed last, most things need to preserve state here
closers = append(closers, n.Repo)
var errs []error
for _, closer := range closers {
if err := closer.Close(); err != nil {
errs = append(errs, err)
}
}
if len(errs) > 0 {
return errs[0]
}
return nil
}
// OnlineMode returns whether or not the IpfsNode is in OnlineMode.
func (n *IpfsNode) OnlineMode() bool {
return n.mode == onlineMode
}
// SetLocal will set the IpfsNode to local mode
func (n *IpfsNode) SetLocal(isLocal bool) {
if isLocal {
n.mode = localMode
}
n.localModeSet = true
}
// LocalMode returns whether or not the IpfsNode is in LocalMode
func (n *IpfsNode) LocalMode() bool {
if !n.localModeSet {
// programmer error should not happen
panic("local mode not set")
}
return n.mode == localMode
}
// Bootstrap will set and call the IpfsNodes bootstrap function.
func (n *IpfsNode) Bootstrap(cfg BootstrapConfig) error {
// TODO what should return value be when in offlineMode?
if n.Routing == nil {
return nil
}
if n.Bootstrapper != nil {
n.Bootstrapper.Close() // stop previous bootstrap process.
}
// if the caller did not specify a bootstrap peer function, get the
// freshest bootstrap peers from config. this responds to live changes.
if cfg.BootstrapPeers == nil {
cfg.BootstrapPeers = func() []pstore.PeerInfo {
ps, err := n.loadBootstrapPeers()
if err != nil {
log.Warning("failed to parse bootstrap peers from config")
return nil
}
return ps
}
}
var err error
n.Bootstrapper, err = Bootstrap(n, cfg)
return err
}
func (n *IpfsNode) loadID() error {
if n.Identity != "" {
return errors.New("identity already loaded")
}
cfg, err := n.Repo.Config()
if err != nil {
return err
}
cid := cfg.Identity.PeerID
if cid == "" {
return errors.New("identity was not set in config (was 'ipfs init' run?)")
}
if len(cid) == 0 {
return errors.New("no peer ID in config! (was 'ipfs init' run?)")
}
id, err := peer.IDB58Decode(cid)
if err != nil {
return fmt.Errorf("peer ID invalid: %s", err)
}
n.Identity = id
return nil
}
// GetKey will return a key from the Keystore with name `name`.
func (n *IpfsNode) GetKey(name string) (ic.PrivKey, error) {
if name == "self" {
return n.PrivateKey, nil
} else {
return n.Repo.Keystore().Get(name)
}
}
func (n *IpfsNode) LoadPrivateKey() error {
if n.Identity == "" || n.Peerstore == nil {
return errors.New("loaded private key out of order")
}
if n.PrivateKey != nil {
return errors.New("private key already loaded")
}
cfg, err := n.Repo.Config()
if err != nil {
return err
}
sk, err := loadPrivateKey(&cfg.Identity, n.Identity)
if err != nil {
return err
}
n.PrivateKey = sk
n.Peerstore.AddPrivKey(n.Identity, n.PrivateKey)
n.Peerstore.AddPubKey(n.Identity, sk.GetPublic())
return nil
}
func (n *IpfsNode) loadBootstrapPeers() ([]pstore.PeerInfo, error) {
cfg, err := n.Repo.Config()
if err != nil {
return nil, err
}
parsed, err := cfg.BootstrapPeers()
if err != nil {
return nil, err
}
return toPeerInfos(parsed), nil
}
func (n *IpfsNode) loadFilesRoot() error {
dsk := ds.NewKey("/local/filesroot")
pf := func(ctx context.Context, c *cid.Cid) error {
return n.Repo.Datastore().Put(dsk, c.Bytes())
}
var nd *merkledag.ProtoNode
val, err := n.Repo.Datastore().Get(dsk)
switch {
case err == ds.ErrNotFound || val == nil:
nd = ft.EmptyDirNode()
err := n.DAG.Add(n.Context(), nd)
if err != nil {
return fmt.Errorf("failure writing to dagstore: %s", err)
}
case err == nil:
c, err := cid.Cast(val)
if err != nil {
return err
}
rnd, err := n.DAG.Get(n.Context(), c)
if err != nil {
return fmt.Errorf("error loading filesroot from DAG: %s", err)
}
pbnd, ok := rnd.(*merkledag.ProtoNode)
if !ok {
return merkledag.ErrNotProtobuf
}
nd = pbnd
default:
return err
}
mr, err := mfs.NewRoot(n.Context(), n.DAG, nd, pf)
if err != nil {
return err
}
n.FilesRoot = mr
return nil
}
// SetupOfflineRouting instantiates a routing system in offline mode. This is
// primarily used for offline ipns modifications.
func (n *IpfsNode) SetupOfflineRouting() error {
if n.Routing != nil {
// Routing was already set up
return nil
}
// TODO: move this somewhere else.
err := n.LoadPrivateKey()
if err != nil {
return err
}
n.Routing = offroute.NewOfflineRouter(n.Repo.Datastore(), n.RecordValidator)
size, err := n.getCacheSize()
if err != nil {
return err
}
n.Namesys = namesys.NewNameSystem(n.Routing, n.Repo.Datastore(), size)
return nil
}
func loadPrivateKey(cfg *config.Identity, id peer.ID) (ic.PrivKey, error) {
sk, err := cfg.DecodePrivateKey("passphrase todo!")
if err != nil {
return nil, err
}
id2, err := peer.IDFromPrivateKey(sk)
if err != nil {
return nil, err
}
if id2 != id {
return nil, fmt.Errorf("private key in config does not match id: %s != %s", id, id2)
}
return sk, nil
}
func listenAddresses(cfg *config.Config) ([]ma.Multiaddr, error) {
var listen []ma.Multiaddr
for _, addr := range cfg.Addresses.Swarm {
maddr, err := ma.NewMultiaddr(addr)
if err != nil {
return nil, fmt.Errorf("failure to parse config.Addresses.Swarm: %s", cfg.Addresses.Swarm)
}
listen = append(listen, maddr)
}
return listen, nil
}
type ConstructPeerHostOpts struct {
AddrsFactory p2pbhost.AddrsFactory
DisableNatPortMap bool
DisableRelay bool
EnableRelayHop bool
ConnectionManager ifconnmgr.ConnManager
}
type HostOption func(ctx context.Context, id peer.ID, ps pstore.Peerstore, options ...libp2p.Option) (p2phost.Host, error)
var DefaultHostOption HostOption = constructPeerHost
// isolates the complex initialization steps
func constructPeerHost(ctx context.Context, id peer.ID, ps pstore.Peerstore, options ...libp2p.Option) (p2phost.Host, error) {
pkey := ps.PrivKey(id)
if pkey == nil {
return nil, fmt.Errorf("missing private key for node ID: %s", id.Pretty())
}
options = append([]libp2p.Option{libp2p.Identity(pkey), libp2p.Peerstore(ps)}, options...)
return libp2p.New(ctx, options...)
}
func filterRelayAddrs(addrs []ma.Multiaddr) []ma.Multiaddr {
var raddrs []ma.Multiaddr
for _, addr := range addrs {
_, err := addr.ValueForProtocol(circuit.P_CIRCUIT)
if err == nil {
continue
}
raddrs = append(raddrs, addr)
}
return raddrs
}
func composeAddrsFactory(f, g p2pbhost.AddrsFactory) p2pbhost.AddrsFactory {
return func(addrs []ma.Multiaddr) []ma.Multiaddr {
return f(g(addrs))
}
}
// startListening on the network addresses
func startListening(host p2phost.Host, cfg *config.Config) error {
listenAddrs, err := listenAddresses(cfg)
if err != nil {
return err
}
// Actually start listening:
if err := host.Network().Listen(listenAddrs...); err != nil {
return err
}
// list out our addresses
addrs, err := host.Network().InterfaceListenAddresses()
if err != nil {
return err
}
log.Infof("Swarm listening at: %s", addrs)
return nil
}
func constructDHTRouting(ctx context.Context, host p2phost.Host, dstore ds.Batching, validator record.Validator) (routing.IpfsRouting, error) {
return dht.New(
ctx, host,
dhtopts.Datastore(dstore),
dhtopts.Validator(validator),
)
}
func constructClientDHTRouting(ctx context.Context, host p2phost.Host, dstore ds.Batching, validator record.Validator) (routing.IpfsRouting, error) {
return dht.New(
ctx, host,
dhtopts.Client(true),
dhtopts.Datastore(dstore),
dhtopts.Validator(validator),
)
}
type RoutingOption func(context.Context, p2phost.Host, ds.Batching, record.Validator) (routing.IpfsRouting, error)
type DiscoveryOption func(context.Context, p2phost.Host) (discovery.Service, error)
var DHTOption RoutingOption = constructDHTRouting
var DHTClientOption RoutingOption = constructClientDHTRouting
var NilRouterOption RoutingOption = nilrouting.ConstructNilRouting
| [
"\"YAMUX_DEBUG\"",
"\"LIBP2P_MUX_PREFS\""
]
| []
| [
"YAMUX_DEBUG",
"LIBP2P_MUX_PREFS"
]
| [] | ["YAMUX_DEBUG", "LIBP2P_MUX_PREFS"] | go | 2 | 0 | |
src/ngrok/server/main.go | package server
import (
"crypto/tls"
"math/rand"
"github.com/grokker001/ngrok/src/ngrok/conn"
log "github.com/grokker001/ngrok/src/ngrok/log"
"github.com/grokker001/ngrok/src/ngrok/msg"
"github.com/grokker001/ngrok/src/ngrok/util"
"os"
"runtime/debug"
"time"
)
const (
registryCacheSize uint64 = 1024 * 1024 // 1 MB
connReadTimeout time.Duration = 10 * time.Second
)
// GLOBALS
var (
tunnelRegistry *TunnelRegistry
controlRegistry *ControlRegistry
// XXX: kill these global variables - they're only used in tunnel.go for constructing forwarding URLs
opts *Options
listeners map[string]*conn.Listener
)
func NewProxy(pxyConn conn.Conn, regPxy *msg.RegProxy) {
// fail gracefully if the proxy connection fails to register
defer func() {
if r := recover(); r != nil {
pxyConn.Warn("Failed with error: %v", r)
pxyConn.Close()
}
}()
// set logging prefix
pxyConn.SetType("pxy")
// look up the control connection for this proxy
pxyConn.Info("Registering new proxy for %s", regPxy.ClientId)
ctl := controlRegistry.Get(regPxy.ClientId)
if ctl == nil {
panic("No client found for identifier: " + regPxy.ClientId)
}
ctl.RegisterProxy(pxyConn)
}
// Listen for incoming control and proxy connections
// We listen for incoming control and proxy connections on the same port
// for ease of deployment. The hope is that by running on port 443, using
// TLS and running all connections over the same port, we can bust through
// restrictive firewalls.
func tunnelListener(addr string, tlsConfig *tls.Config) {
// listen for incoming connections
listener, err := conn.Listen(addr, "tun", tlsConfig)
if err != nil {
panic(err)
}
log.Info("Listening for control and proxy connections on %s", listener.Addr.String())
for c := range listener.Conns {
go func(tunnelConn conn.Conn) {
// don't crash on panics
defer func() {
if r := recover(); r != nil {
tunnelConn.Info("tunnelListener failed with error %v: %s", r, debug.Stack())
}
}()
tunnelConn.SetReadDeadline(time.Now().Add(connReadTimeout))
var rawMsg msg.Message
if rawMsg, err = msg.ReadMsg(tunnelConn); err != nil {
tunnelConn.Warn("Failed to read message: %v", err)
tunnelConn.Close()
return
}
// don't timeout after the initial read, tunnel heartbeating will kill
// dead connections
tunnelConn.SetReadDeadline(time.Time{})
switch m := rawMsg.(type) {
case *msg.Auth:
NewControl(tunnelConn, m)
case *msg.RegProxy:
NewProxy(tunnelConn, m)
default:
tunnelConn.Close()
}
}(c)
}
}
func Main() {
// parse options
opts = parseArgs()
// init logging
log.LogTo(opts.logto, opts.loglevel)
// seed random number generator
seed, err := util.RandomSeed()
if err != nil {
panic(err)
}
rand.Seed(seed)
// init tunnel/control registry
registryCacheFile := os.Getenv("REGISTRY_CACHE_FILE")
tunnelRegistry = NewTunnelRegistry(registryCacheSize, registryCacheFile)
controlRegistry = NewControlRegistry()
// start listeners
listeners = make(map[string]*conn.Listener)
// load tls configuration
tlsConfig, err := LoadTLSConfig(opts.tlsCrt, opts.tlsKey)
if err != nil {
panic(err)
}
// listen for http
if opts.httpAddr != "" {
listeners["http"] = startHttpListener(opts.httpAddr, nil)
}
// listen for https
if opts.httpsAddr != "" {
listeners["https"] = startHttpListener(opts.httpsAddr, tlsConfig)
}
// ngrok clients
tunnelListener(opts.tunnelAddr, tlsConfig)
}
| [
"\"REGISTRY_CACHE_FILE\""
]
| []
| [
"REGISTRY_CACHE_FILE"
]
| [] | ["REGISTRY_CACHE_FILE"] | go | 1 | 0 | |
manage.py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "swampytodo.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
backend/manage.py | #!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'StreamingServer.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
source/devel/_setup_util.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Software License Agreement (BSD License)
#
# Copyright (c) 2012, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
'''This file generates shell code for the setup.SHELL scripts to set environment variables'''
from __future__ import print_function
import argparse
import copy
import errno
import os
import platform
import sys
CATKIN_MARKER_FILE = '.catkin'
system = platform.system()
IS_DARWIN = (system == 'Darwin')
IS_WINDOWS = (system == 'Windows')
# subfolder of workspace prepended to CMAKE_PREFIX_PATH
ENV_VAR_SUBFOLDERS = {
'CMAKE_PREFIX_PATH': '',
'LD_LIBRARY_PATH' if not IS_DARWIN else 'DYLD_LIBRARY_PATH': ['lib', os.path.join('lib', 'arm-linux-gnueabihf')],
'PATH': 'bin',
'PKG_CONFIG_PATH': [os.path.join('lib', 'pkgconfig'), os.path.join('lib', 'arm-linux-gnueabihf', 'pkgconfig')],
'PYTHONPATH': 'lib/python2.7/dist-packages',
}
def rollback_env_variables(environ, env_var_subfolders):
'''
Generate shell code to reset environment variables
by unrolling modifications based on all workspaces in CMAKE_PREFIX_PATH.
This does not cover modifications performed by environment hooks.
'''
lines = []
unmodified_environ = copy.copy(environ)
for key in sorted(env_var_subfolders.keys()):
subfolders = env_var_subfolders[key]
if not isinstance(subfolders, list):
subfolders = [subfolders]
value = _rollback_env_variable(unmodified_environ, key, subfolders)
if value is not None:
environ[key] = value
lines.append(assignment(key, value))
if lines:
lines.insert(0, comment('reset environment variables by unrolling modifications based on all workspaces in CMAKE_PREFIX_PATH'))
return lines
def _rollback_env_variable(environ, name, subfolders):
'''
For each catkin workspace in CMAKE_PREFIX_PATH remove the first entry from env[NAME] matching workspace + subfolder.
:param subfolders: list of str '' or subfoldername that may start with '/'
:returns: the updated value of the environment variable.
'''
value = environ[name] if name in environ else ''
env_paths = [path for path in value.split(os.pathsep) if path]
value_modified = False
for subfolder in subfolders:
if subfolder:
if subfolder.startswith(os.path.sep) or (os.path.altsep and subfolder.startswith(os.path.altsep)):
subfolder = subfolder[1:]
if subfolder.endswith(os.path.sep) or (os.path.altsep and subfolder.endswith(os.path.altsep)):
subfolder = subfolder[:-1]
for ws_path in _get_workspaces(environ, include_fuerte=True, include_non_existing=True):
path_to_find = os.path.join(ws_path, subfolder) if subfolder else ws_path
path_to_remove = None
for env_path in env_paths:
env_path_clean = env_path[:-1] if env_path and env_path[-1] in [os.path.sep, os.path.altsep] else env_path
if env_path_clean == path_to_find:
path_to_remove = env_path
break
if path_to_remove:
env_paths.remove(path_to_remove)
value_modified = True
new_value = os.pathsep.join(env_paths)
return new_value if value_modified else None
def _get_workspaces(environ, include_fuerte=False, include_non_existing=False):
'''
Based on CMAKE_PREFIX_PATH return all catkin workspaces.
:param include_fuerte: The flag if paths starting with '/opt/ros/fuerte' should be considered workspaces, ``bool``
'''
# get all cmake prefix paths
env_name = 'CMAKE_PREFIX_PATH'
value = environ[env_name] if env_name in environ else ''
paths = [path for path in value.split(os.pathsep) if path]
# remove non-workspace paths
workspaces = [path for path in paths if os.path.isfile(os.path.join(path, CATKIN_MARKER_FILE)) or (include_fuerte and path.startswith('/opt/ros/fuerte')) or (include_non_existing and not os.path.exists(path))]
return workspaces
def prepend_env_variables(environ, env_var_subfolders, workspaces):
'''
Generate shell code to prepend environment variables
for the all workspaces.
'''
lines = []
lines.append(comment('prepend folders of workspaces to environment variables'))
paths = [path for path in workspaces.split(os.pathsep) if path]
prefix = _prefix_env_variable(environ, 'CMAKE_PREFIX_PATH', paths, '')
lines.append(prepend(environ, 'CMAKE_PREFIX_PATH', prefix))
for key in sorted([key for key in env_var_subfolders.keys() if key != 'CMAKE_PREFIX_PATH']):
subfolder = env_var_subfolders[key]
prefix = _prefix_env_variable(environ, key, paths, subfolder)
lines.append(prepend(environ, key, prefix))
return lines
def _prefix_env_variable(environ, name, paths, subfolders):
'''
Return the prefix to prepend to the environment variable NAME, adding any path in NEW_PATHS_STR without creating duplicate or empty items.
'''
value = environ[name] if name in environ else ''
environ_paths = [path for path in value.split(os.pathsep) if path]
checked_paths = []
for path in paths:
if not isinstance(subfolders, list):
subfolders = [subfolders]
for subfolder in subfolders:
path_tmp = path
if subfolder:
path_tmp = os.path.join(path_tmp, subfolder)
# skip nonexistent paths
if not os.path.exists(path_tmp):
continue
# exclude any path already in env and any path we already added
if path_tmp not in environ_paths and path_tmp not in checked_paths:
checked_paths.append(path_tmp)
prefix_str = os.pathsep.join(checked_paths)
if prefix_str != '' and environ_paths:
prefix_str += os.pathsep
return prefix_str
def assignment(key, value):
if not IS_WINDOWS:
return 'export %s="%s"' % (key, value)
else:
return 'set %s=%s' % (key, value)
def comment(msg):
if not IS_WINDOWS:
return '# %s' % msg
else:
return 'REM %s' % msg
def prepend(environ, key, prefix):
if key not in environ or not environ[key]:
return assignment(key, prefix)
if not IS_WINDOWS:
return 'export %s="%s$%s"' % (key, prefix, key)
else:
return 'set %s=%s%%%s%%' % (key, prefix, key)
def find_env_hooks(environ, cmake_prefix_path):
'''
Generate shell code with found environment hooks
for the all workspaces.
'''
lines = []
lines.append(comment('found environment hooks in workspaces'))
generic_env_hooks = []
generic_env_hooks_workspace = []
specific_env_hooks = []
specific_env_hooks_workspace = []
generic_env_hooks_by_filename = {}
specific_env_hooks_by_filename = {}
generic_env_hook_ext = 'bat' if IS_WINDOWS else 'sh'
specific_env_hook_ext = environ['CATKIN_SHELL'] if not IS_WINDOWS and 'CATKIN_SHELL' in environ and environ['CATKIN_SHELL'] else None
# remove non-workspace paths
workspaces = [path for path in cmake_prefix_path.split(os.pathsep) if path and os.path.isfile(os.path.join(path, CATKIN_MARKER_FILE))]
for workspace in reversed(workspaces):
env_hook_dir = os.path.join(workspace, 'etc', 'catkin', 'profile.d')
if os.path.isdir(env_hook_dir):
for filename in sorted(os.listdir(env_hook_dir)):
if filename.endswith('.%s' % generic_env_hook_ext):
# remove previous env hook with same name if present
if filename in generic_env_hooks_by_filename:
i = generic_env_hooks.index(generic_env_hooks_by_filename[filename])
generic_env_hooks.pop(i)
generic_env_hooks_workspace.pop(i)
# append env hook
generic_env_hooks.append(os.path.join(env_hook_dir, filename))
generic_env_hooks_workspace.append(workspace)
generic_env_hooks_by_filename[filename] = generic_env_hooks[-1]
elif specific_env_hook_ext is not None and filename.endswith('.%s' % specific_env_hook_ext):
# remove previous env hook with same name if present
if filename in specific_env_hooks_by_filename:
i = specific_env_hooks.index(specific_env_hooks_by_filename[filename])
specific_env_hooks.pop(i)
specific_env_hooks_workspace.pop(i)
# append env hook
specific_env_hooks.append(os.path.join(env_hook_dir, filename))
specific_env_hooks_workspace.append(workspace)
specific_env_hooks_by_filename[filename] = specific_env_hooks[-1]
env_hooks = generic_env_hooks + specific_env_hooks
env_hooks_workspace = generic_env_hooks_workspace + specific_env_hooks_workspace
count = len(env_hooks)
lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_COUNT', count))
for i in range(count):
lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_%d' % i, env_hooks[i]))
lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_%d_WORKSPACE' % i, env_hooks_workspace[i]))
return lines
def _parse_arguments(args=None):
parser = argparse.ArgumentParser(description='Generates code blocks for the setup.SHELL script.')
parser.add_argument('--extend', action='store_true', help='Skip unsetting previous environment variables to extend context')
return parser.parse_known_args(args=args)[0]
if __name__ == '__main__':
try:
try:
args = _parse_arguments()
except Exception as e:
print(e, file=sys.stderr)
sys.exit(1)
# environment at generation time
CMAKE_PREFIX_PATH = '/home/bonobono/catkin_ws/devel;/opt/ros/kinetic'.split(';')
# prepend current workspace if not already part of CPP
base_path = os.path.dirname(__file__)
if base_path not in CMAKE_PREFIX_PATH:
CMAKE_PREFIX_PATH.insert(0, base_path)
CMAKE_PREFIX_PATH = os.pathsep.join(CMAKE_PREFIX_PATH)
environ = dict(os.environ)
lines = []
if not args.extend:
lines += rollback_env_variables(environ, ENV_VAR_SUBFOLDERS)
lines += prepend_env_variables(environ, ENV_VAR_SUBFOLDERS, CMAKE_PREFIX_PATH)
lines += find_env_hooks(environ, CMAKE_PREFIX_PATH)
print('\n'.join(lines))
# need to explicitly flush the output
sys.stdout.flush()
except IOError as e:
# and catch potential "broken pipe" if stdout is not writable
# which can happen when piping the output to a file but the disk is full
if e.errno == errno.EPIPE:
print(e, file=sys.stderr)
sys.exit(2)
raise
sys.exit(0)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
client_test.go | package form3
import (
"context"
"encoding/json"
"fmt"
"net/http"
"os"
"testing"
"github.com/ahmedkamals/form3/internal/errors"
"github.com/google/uuid"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/suite"
)
type (
Form3TestSuite struct {
suite.Suite
apiClient *Client
seedAccounts []AccountData
toCreateAccounts []AccountData
}
fixturesData map[string][]AccountData
)
const (
seed string = "seed"
toCreate string = "toCreate"
invalid = "invalid"
)
func (suite *Form3TestSuite) SetupTest() {
suite.seedAccounts = suite.loadFixtures()[seed]
ctx := context.Background()
for _, account := range suite.seedAccounts {
_, err := suite.apiClient.CreateAccount(ctx, account)
if err != nil {
suite.Error(err)
suite.Fail(err.Error())
}
}
}
func (suite *Form3TestSuite) TearDownTest() {
ctx := context.Background()
toDeleteAccounts := append(suite.toCreateAccounts, suite.seedAccounts...)
for _, accountToDelete := range toDeleteAccounts {
err := suite.apiClient.DeleteAccount(ctx, uuid.MustParse(accountToDelete.UUID), *accountToDelete.Version)
if err != nil && !errors.Is(errors.Kind(http.StatusNotFound), err) {
suite.Fail(err.Error())
}
}
suite.toCreateAccounts = []AccountData{}
}
func (suite *Form3TestSuite) TestCreateAccount() {
accounts := suite.loadFixtures()
suite.toCreateAccounts = accounts[toCreate]
invalidAccounts := accounts[invalid]
testCases := []struct {
id string
input AccountData
expected *AccountData
expectedError error
}{
{
id: "Should report that id is not a seed uuid",
input: invalidAccounts[0],
expected: nil,
expectedError: errors.Errorf("id in body must be of type uuid: \"invalid_uuid\""),
},
{
id: "Should not able to create account - duplicate entry",
input: invalidAccounts[1],
expected: nil,
expectedError: errors.Errorf(fmt.Sprintf("Account cannot be created as it violates a duplicate constraint")),
},
{
id: "Should be able to create the account",
input: suite.toCreateAccounts[0],
expected: &suite.toCreateAccounts[0],
expectedError: nil,
},
}
ctx := context.Background()
for _, testCase := range testCases {
suite.T().Run(testCase.id, func(t *testing.T) {
accountData, err := suite.apiClient.CreateAccount(ctx, testCase.input)
if testCase.expectedError == nil {
assert.Nil(t, err)
assert.Equal(t, testCase.expected, accountData)
return
}
assert.NotNil(t, err, err.Error())
assert.Contains(t, err.Error(), testCase.expectedError.Error())
})
}
}
func (suite *Form3TestSuite) TestFetchAccount() {
notExistAccountUUID, _ := uuid.NewUUID()
testCases := []struct {
id string
input uuid.UUID
expected *AccountData
expectedError error
}{
{
id: "Should not able to fetch account",
input: notExistAccountUUID,
expected: nil,
expectedError: errors.Errorf(fmt.Sprintf("record %s does not exist", notExistAccountUUID)),
},
{
id: "Should be able to fetch the account",
input: uuid.MustParse(suite.seedAccounts[0].UUID),
expected: &suite.seedAccounts[0],
expectedError: nil,
},
}
ctx := context.Background()
for _, testCase := range testCases {
suite.T().Run(testCase.id, func(t *testing.T) {
accountData, err := suite.apiClient.FetchAccount(ctx, testCase.input)
if testCase.expectedError == nil {
assert.Nil(t, err)
assert.Equal(t, testCase.expected, accountData)
return
}
assert.NotNil(t, err, err.Error())
assert.Equal(t, testCase.expectedError.Error(), err.Error())
})
}
}
func (suite *Form3TestSuite) TestDeleteAccount() {
notExistAccountUUID, _ := uuid.NewUUID()
testCases := []struct {
id string
input map[uuid.UUID]uint64
expectedError error
}{
{
id: "Should not able to delete account - UUID does not exist",
input: map[uuid.UUID]uint64{notExistAccountUUID: 0},
expectedError: errors.Errorf("EOF"),
},
{
id: "Should not able to delete account - version does not exist",
input: map[uuid.UUID]uint64{uuid.MustParse(suite.seedAccounts[0].UUID): 1},
expectedError: errors.Errorf("invalid version"),
},
{
id: "Should be able to find the account",
input: map[uuid.UUID]uint64{uuid.MustParse(suite.seedAccounts[0].UUID): 0},
expectedError: nil,
},
}
ctx := context.Background()
for _, testCase := range testCases {
suite.T().Run(testCase.id, func(t *testing.T) {
for uuidValue, version := range testCase.input {
err := suite.apiClient.DeleteAccount(ctx, uuidValue, version)
if testCase.expectedError == nil {
assert.Nil(t, err)
return
}
assert.NotNil(t, err, err.Error())
assert.Equal(t, testCase.expectedError.Error(), err.Error())
}
})
}
}
func (suite *Form3TestSuite) loadFixtures() fixturesData {
config := Config{
endpoint: os.Getenv("API_ENDPOINT"),
}
suite.apiClient = NewClient(config, &http.Client{})
fixtures, err := os.Open(os.Getenv("FIXTURES_PATH"))
if err != nil {
suite.Fail(err.Error())
}
var fixturesData fixturesData
err = json.NewDecoder(fixtures).Decode(&fixturesData)
if err != nil {
suite.Fail(err.Error())
}
return fixturesData
}
func TestForm3TestSuite(t *testing.T) {
suite.Run(t, new(Form3TestSuite))
}
| [
"\"API_ENDPOINT\"",
"\"FIXTURES_PATH\""
]
| []
| [
"FIXTURES_PATH",
"API_ENDPOINT"
]
| [] | ["FIXTURES_PATH", "API_ENDPOINT"] | go | 2 | 0 | |
apache/tests/test_apache.py | # (C) Datadog, Inc. 2010-2018
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
import pytest
import os
import subprocess
import requests
import time
import logging
from datadog_checks.apache import Apache
log = logging.getLogger('test_apache')
CHECK_NAME = 'apache'
HERE = os.path.dirname(os.path.abspath(__file__))
HOST = os.getenv('DOCKER_HOSTNAME', 'localhost')
PORT = '18180'
BASE_URL = "http://{0}:{1}".format(HOST, PORT)
STATUS_URL = "{0}/server-status".format(BASE_URL)
AUTO_STATUS_URL = "{0}?auto".format(STATUS_URL)
STATUS_CONFIG = {
'apache_status_url': STATUS_URL,
'tags': ['instance:first']
}
AUTO_CONFIG = {
'apache_status_url': AUTO_STATUS_URL,
'tags': ['instance:second']
}
BAD_CONFIG = {
'apache_status_url': 'http://localhost:1234/server-status',
}
APACHE_GAUGES = [
'apache.performance.idle_workers',
'apache.performance.busy_workers',
'apache.performance.cpu_load',
'apache.performance.uptime',
'apache.net.bytes',
'apache.net.hits',
'apache.conns_total',
'apache.conns_async_writing',
'apache.conns_async_keep_alive',
'apache.conns_async_closing'
]
APACHE_RATES = [
'apache.net.bytes_per_s',
'apache.net.request_per_s'
]
def wait_for_apache():
for _ in xrange(0, 100):
res = None
try:
res = requests.get(STATUS_URL)
res.raise_for_status
return
except Exception as e:
log.info("exception: {0} res: {1}".format(e, res))
time.sleep(2)
raise Exception("Cannot start up apache")
@pytest.fixture(scope="session")
def spin_up_apache():
env = os.environ
env['APACHE_CONFIG'] = os.path.join(HERE, 'compose', 'httpd.conf')
env['APACHE_DOCKERFILE'] = os.path.join(HERE, 'compose', 'Dockerfile')
args = [
"docker-compose",
"-f", os.path.join(HERE, 'compose', 'apache.yaml')
]
subprocess.check_call(args + ["up", "-d", "--build"], env=env)
wait_for_apache()
for _ in xrange(0, 100):
requests.get(BASE_URL)
time.sleep(20)
yield
subprocess.check_call(args + ["down"], env=env)
@pytest.fixture
def aggregator():
from datadog_checks.stubs import aggregator
aggregator.reset()
return aggregator
def test_connection_failure(aggregator, spin_up_apache):
apache_check = Apache(CHECK_NAME, {}, {})
with pytest.raises(Exception):
apache_check.check(BAD_CONFIG)
assert aggregator.service_checks('apache.can_connect')[0].status == Apache.CRITICAL
assert len(aggregator._metrics) == 0
def test_check(aggregator, spin_up_apache):
apache_check = Apache(CHECK_NAME, {}, {})
apache_check.check(STATUS_CONFIG)
tags = STATUS_CONFIG['tags']
for mname in APACHE_GAUGES + APACHE_RATES:
aggregator.assert_metric(mname, tags=tags, count=1)
assert aggregator.service_checks('apache.can_connect')[0].status == Apache.OK
sc_tags = ['host:' + HOST, 'port:' + PORT] + tags
for sc in aggregator.service_checks('apache.can_connect'):
for tag in sc.tags:
assert tag in sc_tags
assert aggregator.metrics_asserted_pct == 100.0
def test_check_auto(aggregator, spin_up_apache):
apache_check = Apache(CHECK_NAME, {}, {})
apache_check.check(AUTO_CONFIG)
tags = AUTO_CONFIG['tags']
for mname in APACHE_GAUGES + APACHE_RATES:
aggregator.assert_metric(mname, tags=tags, count=1)
assert aggregator.service_checks('apache.can_connect')[0].status == Apache.OK
sc_tags = ['host:' + HOST, 'port:' + PORT] + tags
for sc in aggregator.service_checks('apache.can_connect'):
for tag in sc.tags:
assert tag in sc_tags
assert aggregator.metrics_asserted_pct == 100.0
aggregator.reset()
| []
| []
| [
"DOCKER_HOSTNAME"
]
| [] | ["DOCKER_HOSTNAME"] | python | 1 | 0 | |
cmd/gardener-extension-provider-packet/app/app.go | // Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package app
import (
"context"
"fmt"
"os"
druidv1alpha1 "github.com/gardener/etcd-druid/api/v1alpha1"
packetinstall "github.com/gardener/gardener-extension-provider-packet/pkg/apis/packet/install"
packetcmd "github.com/gardener/gardener-extension-provider-packet/pkg/cmd"
packetcontrolplane "github.com/gardener/gardener-extension-provider-packet/pkg/controller/controlplane"
"github.com/gardener/gardener-extension-provider-packet/pkg/controller/healthcheck"
packetinfrastructure "github.com/gardener/gardener-extension-provider-packet/pkg/controller/infrastructure"
packetworker "github.com/gardener/gardener-extension-provider-packet/pkg/controller/worker"
"github.com/gardener/gardener-extension-provider-packet/pkg/packet"
packetcontrolplaneexposure "github.com/gardener/gardener-extension-provider-packet/pkg/webhook/controlplaneexposure"
"github.com/gardener/gardener-extensions/pkg/controller"
controllercmd "github.com/gardener/gardener-extensions/pkg/controller/cmd"
"github.com/gardener/gardener-extensions/pkg/controller/worker"
"github.com/gardener/gardener-extensions/pkg/util"
webhookcmd "github.com/gardener/gardener-extensions/pkg/webhook/cmd"
machinev1alpha1 "github.com/gardener/machine-controller-manager/pkg/apis/machine/v1alpha1"
"github.com/spf13/cobra"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/manager"
)
// NewControllerManagerCommand creates a new command for running a Packet provider controller.
func NewControllerManagerCommand(ctx context.Context) *cobra.Command {
var (
restOpts = &controllercmd.RESTOptions{}
mgrOpts = &controllercmd.ManagerOptions{
LeaderElection: true,
LeaderElectionID: controllercmd.LeaderElectionNameID(packet.Name),
LeaderElectionNamespace: os.Getenv("LEADER_ELECTION_NAMESPACE"),
WebhookServerPort: 443,
WebhookCertDir: "/tmp/gardener-extensions-cert",
}
configFileOpts = &packetcmd.ConfigOptions{}
// options for the controlplane controller
controlPlaneCtrlOpts = &controllercmd.ControllerOptions{
MaxConcurrentReconciles: 5,
}
// options for the infrastructure controller
infraCtrlOpts = &controllercmd.ControllerOptions{
MaxConcurrentReconciles: 5,
}
reconcileOpts = &controllercmd.ReconcilerOptions{}
// options for the worker controller
workerCtrlOpts = &controllercmd.ControllerOptions{
MaxConcurrentReconciles: 5,
}
workerReconcileOpts = &worker.Options{
DeployCRDs: true,
}
workerCtrlOptsUnprefixed = controllercmd.NewOptionAggregator(workerCtrlOpts, workerReconcileOpts)
// options for the health care controller
healthCheckCtrlOpts = &controllercmd.ControllerOptions{
MaxConcurrentReconciles: 1,
}
// options for the webhook server
webhookServerOptions = &webhookcmd.ServerOptions{
Namespace: os.Getenv("WEBHOOK_CONFIG_NAMESPACE"),
}
controllerSwitches = packetcmd.ControllerSwitchOptions()
webhookSwitches = packetcmd.WebhookSwitchOptions()
webhookOptions = webhookcmd.NewAddToManagerOptions(packet.Name, webhookServerOptions, webhookSwitches)
aggOption = controllercmd.NewOptionAggregator(
restOpts,
mgrOpts,
controllercmd.PrefixOption("controlplane-", controlPlaneCtrlOpts),
controllercmd.PrefixOption("infrastructure-", infraCtrlOpts),
controllercmd.PrefixOption("worker-", &workerCtrlOptsUnprefixed),
controllercmd.PrefixOption("healthcheck-", healthCheckCtrlOpts),
controllerSwitches,
configFileOpts,
reconcileOpts,
webhookOptions,
)
)
cmd := &cobra.Command{
Use: fmt.Sprintf("%s-controller-manager", packet.Name),
Run: func(cmd *cobra.Command, args []string) {
if err := aggOption.Complete(); err != nil {
controllercmd.LogErrAndExit(err, "Error completing options")
}
util.ApplyClientConnectionConfigurationToRESTConfig(configFileOpts.Completed().Config.ClientConnection, restOpts.Completed().Config)
if workerReconcileOpts.Completed().DeployCRDs {
if err := worker.ApplyMachineResourcesForConfig(ctx, restOpts.Completed().Config); err != nil {
controllercmd.LogErrAndExit(err, "Error ensuring the machine CRDs")
}
}
mgr, err := manager.New(restOpts.Completed().Config, mgrOpts.Completed().Options())
if err != nil {
controllercmd.LogErrAndExit(err, "Could not instantiate manager")
}
scheme := mgr.GetScheme()
if err := controller.AddToScheme(scheme); err != nil {
controllercmd.LogErrAndExit(err, "Could not update manager scheme")
}
if err := packetinstall.AddToScheme(scheme); err != nil {
controllercmd.LogErrAndExit(err, "Could not update manager scheme")
}
if err := druidv1alpha1.AddToScheme(scheme); err != nil {
controllercmd.LogErrAndExit(err, "Could not update manager scheme")
}
// add common meta types to schema for controller-runtime to use v1.ListOptions
metav1.AddToGroupVersion(scheme, machinev1alpha1.SchemeGroupVersion)
// add types required for Health check
scheme.AddKnownTypes(machinev1alpha1.SchemeGroupVersion,
&machinev1alpha1.MachineDeploymentList{},
)
configFileOpts.Completed().ApplyETCDStorage(&packetcontrolplaneexposure.DefaultAddOptions.ETCDStorage)
configFileOpts.Completed().ApplyHealthCheckConfig(&healthcheck.DefaultAddOptions.HealthCheckConfig)
controlPlaneCtrlOpts.Completed().Apply(&packetcontrolplane.DefaultAddOptions.Controller)
healthCheckCtrlOpts.Completed().Apply(&healthcheck.DefaultAddOptions.Controller)
infraCtrlOpts.Completed().Apply(&packetinfrastructure.DefaultAddOptions.Controller)
reconcileOpts.Completed().Apply(&packetinfrastructure.DefaultAddOptions.IgnoreOperationAnnotation)
reconcileOpts.Completed().Apply(&packetcontrolplane.DefaultAddOptions.IgnoreOperationAnnotation)
reconcileOpts.Completed().Apply(&packetworker.DefaultAddOptions.IgnoreOperationAnnotation)
workerCtrlOpts.Completed().Apply(&packetworker.DefaultAddOptions.Controller)
_, shootWebhooks, err := webhookOptions.Completed().AddToManager(mgr)
if err != nil {
controllercmd.LogErrAndExit(err, "Could not add webhooks to manager")
}
packetcontrolplane.DefaultAddOptions.ShootWebhooks = shootWebhooks
if err := controllerSwitches.Completed().AddToManager(mgr); err != nil {
controllercmd.LogErrAndExit(err, "Could not add controllers to manager")
}
if err := mgr.Start(ctx.Done()); err != nil {
controllercmd.LogErrAndExit(err, "Error running manager")
}
},
}
aggOption.AddFlags(cmd.Flags())
return cmd
}
| [
"\"LEADER_ELECTION_NAMESPACE\"",
"\"WEBHOOK_CONFIG_NAMESPACE\""
]
| []
| [
"LEADER_ELECTION_NAMESPACE",
"WEBHOOK_CONFIG_NAMESPACE"
]
| [] | ["LEADER_ELECTION_NAMESPACE", "WEBHOOK_CONFIG_NAMESPACE"] | go | 2 | 0 | |
setup.py | import os
import io
import sys
from setuptools import setup, find_packages
from pkg_resources import parse_version, get_distribution, DistributionNotFound
import subprocess
import distutils.command.clean
import distutils.spawn
import glob
import shutil
import torch
from torch.utils.cpp_extension import BuildExtension, CppExtension, CUDAExtension, CUDA_HOME
from torch.utils.hipify import hipify_python
def read(*names, **kwargs):
with io.open(
os.path.join(os.path.dirname(__file__), *names),
encoding=kwargs.get("encoding", "utf8")
) as fp:
return fp.read()
def get_dist(pkgname):
try:
return get_distribution(pkgname)
except DistributionNotFound:
return None
cwd = os.path.dirname(os.path.abspath(__file__))
version_txt = os.path.join(cwd, 'version.txt')
with open(version_txt, 'r') as f:
version = f.readline().strip()
sha = 'Unknown'
package_name = 'torchvision'
try:
sha = subprocess.check_output(['git', 'rev-parse', 'HEAD'], cwd=cwd).decode('ascii').strip()
except Exception:
pass
if os.getenv('BUILD_VERSION'):
version = os.getenv('BUILD_VERSION')
elif sha != 'Unknown':
version += '+' + sha[:7]
def write_version_file():
version_path = os.path.join(cwd, 'torchvision', 'version.py')
with open(version_path, 'w') as f:
f.write("__version__ = '{}'\n".format(version))
f.write("git_version = {}\n".format(repr(sha)))
f.write("from torchvision.extension import _check_cuda_version\n")
f.write("if _check_cuda_version() > 0:\n")
f.write(" cuda = _check_cuda_version()\n")
pytorch_dep = 'torch'
if os.getenv('PYTORCH_VERSION'):
pytorch_dep += "==" + os.getenv('PYTORCH_VERSION')
requirements = [
'numpy',
pytorch_dep,
]
pillow_ver = ' >= 4.1.1'
pillow_req = 'pillow-simd' if get_dist('pillow-simd') is not None else 'pillow'
requirements.append(pillow_req + pillow_ver)
def find_library(name, vision_include):
this_dir = os.path.dirname(os.path.abspath(__file__))
build_prefix = os.environ.get('BUILD_PREFIX', None)
is_conda_build = build_prefix is not None
library_found = False
conda_installed = False
lib_folder = None
include_folder = None
library_header = '{0}.h'.format(name)
# Lookup in TORCHVISION_INCLUDE or in the package file
package_path = [os.path.join(this_dir, 'torchvision')]
for folder in vision_include + package_path:
candidate_path = os.path.join(folder, library_header)
library_found = os.path.exists(candidate_path)
if library_found:
break
if not library_found:
print('Running build on conda-build: {0}'.format(is_conda_build))
if is_conda_build:
# Add conda headers/libraries
if os.name == 'nt':
build_prefix = os.path.join(build_prefix, 'Library')
include_folder = os.path.join(build_prefix, 'include')
lib_folder = os.path.join(build_prefix, 'lib')
library_header_path = os.path.join(
include_folder, library_header)
library_found = os.path.isfile(library_header_path)
conda_installed = library_found
else:
# Check if using Anaconda to produce wheels
conda = distutils.spawn.find_executable('conda')
is_conda = conda is not None
print('Running build on conda: {0}'.format(is_conda))
if is_conda:
python_executable = sys.executable
py_folder = os.path.dirname(python_executable)
if os.name == 'nt':
env_path = os.path.join(py_folder, 'Library')
else:
env_path = os.path.dirname(py_folder)
lib_folder = os.path.join(env_path, 'lib')
include_folder = os.path.join(env_path, 'include')
library_header_path = os.path.join(
include_folder, library_header)
library_found = os.path.isfile(library_header_path)
conda_installed = library_found
if not library_found:
if sys.platform == 'linux':
library_found = os.path.exists('/usr/include/{0}'.format(
library_header))
library_found = library_found or os.path.exists(
'/usr/local/include/{0}'.format(library_header))
return library_found, conda_installed, include_folder, lib_folder
def get_extensions():
this_dir = os.path.dirname(os.path.abspath(__file__))
extensions_dir = os.path.join(this_dir, 'torchvision', 'csrc')
main_file = glob.glob(os.path.join(extensions_dir, '*.cpp')) + glob.glob(os.path.join(extensions_dir, 'ops',
'*.cpp'))
source_cpu = glob.glob(os.path.join(extensions_dir, 'ops', 'autograd', '*.cpp')) + glob.glob(
os.path.join(extensions_dir, 'ops', 'cpu', '*.cpp'))
is_rocm_pytorch = False
if torch.__version__ >= '1.5':
from torch.utils.cpp_extension import ROCM_HOME
is_rocm_pytorch = True if ((torch.version.hip is not None) and (ROCM_HOME is not None)) else False
if is_rocm_pytorch:
hipify_python.hipify(
project_directory=this_dir,
output_directory=this_dir,
includes="torchvision/csrc/ops/cuda/*",
show_detailed=True,
is_pytorch_extension=True,
)
source_cuda = glob.glob(os.path.join(extensions_dir, 'ops', 'hip', '*.hip'))
# Copy over additional files
for file in glob.glob(r"torchvision/csrc/ops/cuda/*.h"):
shutil.copy(file, "torchvision/csrc/ops/hip")
else:
source_cuda = glob.glob(os.path.join(extensions_dir, 'ops', 'cuda', '*.cu'))
source_cuda += glob.glob(os.path.join(extensions_dir, 'ops', 'autocast', '*.cpp'))
sources = main_file + source_cpu
extension = CppExtension
compile_cpp_tests = os.getenv('WITH_CPP_MODELS_TEST', '0') == '1'
if compile_cpp_tests:
test_dir = os.path.join(this_dir, 'test')
models_dir = os.path.join(this_dir, 'torchvision', 'csrc', 'models')
test_file = glob.glob(os.path.join(test_dir, '*.cpp'))
source_models = glob.glob(os.path.join(models_dir, '*.cpp'))
test_file = [os.path.join(test_dir, s) for s in test_file]
source_models = [os.path.join(models_dir, s) for s in source_models]
tests = test_file + source_models
tests_include_dirs = [test_dir, models_dir]
define_macros = []
extra_compile_args = {
'cxx': []
}
if (torch.cuda.is_available() and ((CUDA_HOME is not None) or is_rocm_pytorch)) \
or os.getenv('FORCE_CUDA', '0') == '1':
extension = CUDAExtension
sources += source_cuda
if not is_rocm_pytorch:
define_macros += [('WITH_CUDA', None)]
nvcc_flags = os.getenv('NVCC_FLAGS', '')
if nvcc_flags == '':
nvcc_flags = []
else:
nvcc_flags = nvcc_flags.split(' ')
else:
define_macros += [('WITH_HIP', None)]
nvcc_flags = []
extra_compile_args['nvcc'] = nvcc_flags
if sys.platform == 'win32':
define_macros += [('torchvision_EXPORTS', None)]
extra_compile_args['cxx'].append('/MP')
elif sys.platform == 'linux':
extra_compile_args['cxx'].append('-fopenmp')
debug_mode = os.getenv('DEBUG', '0') == '1'
if debug_mode:
print("Compile in debug mode")
extra_compile_args['cxx'].append("-g")
extra_compile_args['cxx'].append("-O0")
if "nvcc" in extra_compile_args:
# we have to remove "-OX" and "-g" flag if exists and append
nvcc_flags = extra_compile_args["nvcc"]
extra_compile_args["nvcc"] = [
f for f in nvcc_flags if not ("-O" in f or "-g" in f)
]
extra_compile_args["nvcc"].append("-O0")
extra_compile_args["nvcc"].append("-g")
sources = [os.path.join(extensions_dir, s) for s in sources]
include_dirs = [extensions_dir]
ext_modules = [
extension(
'torchvision._C',
sorted(sources),
include_dirs=include_dirs,
define_macros=define_macros,
extra_compile_args=extra_compile_args,
)
]
if compile_cpp_tests:
ext_modules.append(
extension(
'torchvision._C_tests',
tests,
include_dirs=tests_include_dirs,
define_macros=define_macros,
extra_compile_args=extra_compile_args,
)
)
# ------------------- Torchvision extra extensions ------------------------
vision_include = os.environ.get('TORCHVISION_INCLUDE', None)
vision_library = os.environ.get('TORCHVISION_LIBRARY', None)
vision_include = (vision_include.split(os.pathsep)
if vision_include is not None else [])
vision_library = (vision_library.split(os.pathsep)
if vision_library is not None else [])
include_dirs += vision_include
library_dirs = vision_library
# Image reading extension
image_macros = []
image_include = [extensions_dir]
image_library = []
image_link_flags = []
# Locating libPNG
libpng = distutils.spawn.find_executable('libpng-config')
pngfix = distutils.spawn.find_executable('pngfix')
png_found = libpng is not None or pngfix is not None
print('PNG found: {0}'.format(png_found))
if png_found:
if libpng is not None:
# Linux / Mac
png_version = subprocess.run([libpng, '--version'],
stdout=subprocess.PIPE)
png_version = png_version.stdout.strip().decode('utf-8')
print('libpng version: {0}'.format(png_version))
png_version = parse_version(png_version)
if png_version >= parse_version("1.6.0"):
print('Building torchvision with PNG image support')
png_lib = subprocess.run([libpng, '--libdir'],
stdout=subprocess.PIPE)
png_lib = png_lib.stdout.strip().decode('utf-8')
if 'disabled' not in png_lib:
image_library += [png_lib]
png_include = subprocess.run([libpng, '--I_opts'],
stdout=subprocess.PIPE)
png_include = png_include.stdout.strip().decode('utf-8')
_, png_include = png_include.split('-I')
print('libpng include path: {0}'.format(png_include))
image_include += [png_include]
image_link_flags.append('png')
else:
print('libpng installed version is less than 1.6.0, '
'disabling PNG support')
png_found = False
else:
# Windows
png_lib = os.path.join(
os.path.dirname(os.path.dirname(pngfix)), 'lib')
png_include = os.path.join(os.path.dirname(
os.path.dirname(pngfix)), 'include', 'libpng16')
image_library += [png_lib]
image_include += [png_include]
image_link_flags.append('libpng')
# Locating libjpeg
(jpeg_found, jpeg_conda,
jpeg_include, jpeg_lib) = find_library('jpeglib', vision_include)
print('JPEG found: {0}'.format(jpeg_found))
image_macros += [('PNG_FOUND', str(int(png_found)))]
image_macros += [('JPEG_FOUND', str(int(jpeg_found)))]
if jpeg_found:
print('Building torchvision with JPEG image support')
image_link_flags.append('jpeg')
if jpeg_conda:
image_library += [jpeg_lib]
image_include += [jpeg_include]
image_path = os.path.join(extensions_dir, 'io', 'image')
image_src = glob.glob(os.path.join(image_path, '*.cpp')) + glob.glob(os.path.join(image_path, 'cpu', '*.cpp'))
if png_found or jpeg_found:
ext_modules.append(extension(
'torchvision.image',
image_src,
include_dirs=image_include + include_dirs + [image_path],
library_dirs=image_library + library_dirs,
define_macros=image_macros,
libraries=image_link_flags,
extra_compile_args=extra_compile_args
))
ffmpeg_exe = distutils.spawn.find_executable('ffmpeg')
has_ffmpeg = ffmpeg_exe is not None
print("FFmpeg found: {}".format(has_ffmpeg))
if has_ffmpeg:
ffmpeg_libraries = {
'libavcodec',
'libavformat',
'libavutil',
'libswresample',
'libswscale'
}
ffmpeg_bin = os.path.dirname(ffmpeg_exe)
ffmpeg_root = os.path.dirname(ffmpeg_bin)
ffmpeg_include_dir = os.path.join(ffmpeg_root, 'include')
ffmpeg_library_dir = os.path.join(ffmpeg_root, 'lib')
gcc = distutils.spawn.find_executable('gcc')
platform_tag = subprocess.run(
[gcc, '-print-multiarch'], stdout=subprocess.PIPE)
platform_tag = platform_tag.stdout.strip().decode('utf-8')
if platform_tag:
# Most probably a Debian-based distribution
ffmpeg_include_dir = [
ffmpeg_include_dir,
os.path.join(ffmpeg_include_dir, platform_tag)
]
ffmpeg_library_dir = [
ffmpeg_library_dir,
os.path.join(ffmpeg_library_dir, platform_tag)
]
else:
ffmpeg_include_dir = [ffmpeg_include_dir]
ffmpeg_library_dir = [ffmpeg_library_dir]
has_ffmpeg = True
for library in ffmpeg_libraries:
library_found = False
for search_path in ffmpeg_include_dir + include_dirs:
full_path = os.path.join(search_path, library, '*.h')
library_found |= len(glob.glob(full_path)) > 0
if not library_found:
print('{0} header files were not found, disabling ffmpeg '
'support')
has_ffmpeg = False
if has_ffmpeg:
print("ffmpeg include path: {}".format(ffmpeg_include_dir))
print("ffmpeg library_dir: {}".format(ffmpeg_library_dir))
# TorchVision base decoder + video reader
video_reader_src_dir = os.path.join(this_dir, 'torchvision', 'csrc', 'io', 'video_reader')
video_reader_src = glob.glob(os.path.join(video_reader_src_dir, "*.cpp"))
base_decoder_src_dir = os.path.join(this_dir, 'torchvision', 'csrc', 'io', 'decoder')
base_decoder_src = glob.glob(
os.path.join(base_decoder_src_dir, "*.cpp"))
# Torchvision video API
videoapi_src_dir = os.path.join(this_dir, 'torchvision', 'csrc', 'io', 'video')
videoapi_src = glob.glob(os.path.join(videoapi_src_dir, "*.cpp"))
# exclude tests
base_decoder_src = [x for x in base_decoder_src if '_test.cpp' not in x]
combined_src = video_reader_src + base_decoder_src + videoapi_src
ext_modules.append(
CppExtension(
'torchvision.video_reader',
combined_src,
include_dirs=[
base_decoder_src_dir,
video_reader_src_dir,
videoapi_src_dir,
extensions_dir,
*ffmpeg_include_dir,
*include_dirs
],
library_dirs=ffmpeg_library_dir + library_dirs,
libraries=[
'avcodec',
'avformat',
'avutil',
'swresample',
'swscale',
],
extra_compile_args=["-std=c++14"] if os.name != 'nt' else ['/std:c++14', '/MP'],
extra_link_args=["-std=c++14" if os.name != 'nt' else '/std:c++14'],
)
)
return ext_modules
class clean(distutils.command.clean.clean):
def run(self):
with open('.gitignore', 'r') as f:
ignores = f.read()
for wildcard in filter(None, ignores.split('\n')):
for filename in glob.glob(wildcard):
try:
os.remove(filename)
except OSError:
shutil.rmtree(filename, ignore_errors=True)
# It's an old-style class in Python 2.7...
distutils.command.clean.clean.run(self)
if __name__ == "__main__":
print("Building wheel {}-{}".format(package_name, version))
write_version_file()
with open('README.rst') as f:
readme = f.read()
setup(
# Metadata
name=package_name,
version=version,
author='PyTorch Core Team',
author_email='[email protected]',
url='https://github.com/pytorch/vision',
description='image and video datasets and models for torch deep learning',
long_description=readme,
license='BSD',
# Package info
packages=find_packages(exclude=('test',)),
package_data={
package_name: ['*.dll', '*.dylib', '*.so']
},
zip_safe=False,
install_requires=requirements,
extras_require={
"scipy": ["scipy"],
},
ext_modules=get_extensions(),
cmdclass={
'build_ext': BuildExtension.with_options(no_python_abi_suffix=True),
'clean': clean,
}
)
| []
| []
| [
"TORCHVISION_LIBRARY",
"FORCE_CUDA",
"TORCHVISION_INCLUDE",
"WITH_CPP_MODELS_TEST",
"BUILD_VERSION",
"NVCC_FLAGS",
"DEBUG",
"BUILD_PREFIX",
"PYTORCH_VERSION"
]
| [] | ["TORCHVISION_LIBRARY", "FORCE_CUDA", "TORCHVISION_INCLUDE", "WITH_CPP_MODELS_TEST", "BUILD_VERSION", "NVCC_FLAGS", "DEBUG", "BUILD_PREFIX", "PYTORCH_VERSION"] | python | 9 | 0 | |
src/syscall/exec_linux_test.go | // Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build linux
package syscall_test
import (
"flag"
"fmt"
"internal/testenv"
"io"
"io/ioutil"
"os"
"os/exec"
"os/user"
"path/filepath"
"runtime"
"strconv"
"strings"
"syscall"
"testing"
"unsafe"
)
func isDocker() bool {
_, err := os.Stat("/.dockerenv")
return err == nil
}
func isLXC() bool {
return os.Getenv("container") == "lxc"
}
func skipInContainer(t *testing.T) {
if isDocker() {
t.Skip("skip this test in Docker container")
}
if isLXC() {
t.Skip("skip this test in LXC container")
}
}
func skipNoUserNamespaces(t *testing.T) {
if _, err := os.Stat("/proc/self/ns/user"); err != nil {
if os.IsNotExist(err) {
t.Skip("kernel doesn't support user namespaces")
}
if os.IsPermission(err) {
t.Skip("unable to test user namespaces due to permissions")
}
t.Fatalf("Failed to stat /proc/self/ns/user: %v", err)
}
}
func skipUnprivilegedUserClone(t *testing.T) {
// Skip the test if the sysctl that prevents unprivileged user
// from creating user namespaces is enabled.
data, errRead := ioutil.ReadFile("/proc/sys/kernel/unprivileged_userns_clone")
if errRead != nil || len(data) < 1 || data[0] == '0' {
t.Skip("kernel prohibits user namespace in unprivileged process")
}
}
// Check if we are in a chroot by checking if the inode of / is
// different from 2 (there is no better test available to non-root on
// linux).
func isChrooted(t *testing.T) bool {
root, err := os.Stat("/")
if err != nil {
t.Fatalf("cannot stat /: %v", err)
}
return root.Sys().(*syscall.Stat_t).Ino != 2
}
func checkUserNS(t *testing.T) {
skipInContainer(t)
skipNoUserNamespaces(t)
if isChrooted(t) {
// create_user_ns in the kernel (see
// https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/tree/kernel/user_namespace.c)
// forbids the creation of user namespaces when chrooted.
t.Skip("cannot create user namespaces when chrooted")
}
// On some systems, there is a sysctl setting.
if os.Getuid() != 0 {
skipUnprivilegedUserClone(t)
}
// On Centos 7 make sure they set the kernel parameter user_namespace=1
// See issue 16283 and 20796.
if _, err := os.Stat("/sys/module/user_namespace/parameters/enable"); err == nil {
buf, _ := ioutil.ReadFile("/sys/module/user_namespace/parameters/enabled")
if !strings.HasPrefix(string(buf), "Y") {
t.Skip("kernel doesn't support user namespaces")
}
}
// On Centos 7.5+, user namespaces are disabled if user.max_user_namespaces = 0
if _, err := os.Stat("/proc/sys/user/max_user_namespaces"); err == nil {
buf, errRead := ioutil.ReadFile("/proc/sys/user/max_user_namespaces")
if errRead == nil && buf[0] == '0' {
t.Skip("kernel doesn't support user namespaces")
}
}
// When running under the Go continuous build, skip tests for
// now when under Kubernetes. (where things are root but not quite)
// Both of these are our own environment variables.
// See Issue 12815.
if os.Getenv("GO_BUILDER_NAME") != "" && os.Getenv("IN_KUBERNETES") == "1" {
t.Skip("skipping test on Kubernetes-based builders; see Issue 12815")
}
}
func whoamiCmd(t *testing.T, uid, gid int, setgroups bool) *exec.Cmd {
checkUserNS(t)
cmd := exec.Command("whoami")
cmd.SysProcAttr = &syscall.SysProcAttr{
Cloneflags: syscall.CLONE_NEWUSER,
UidMappings: []syscall.SysProcIDMap{
{ContainerID: 0, HostID: uid, Size: 1},
},
GidMappings: []syscall.SysProcIDMap{
{ContainerID: 0, HostID: gid, Size: 1},
},
GidMappingsEnableSetgroups: setgroups,
}
return cmd
}
func testNEWUSERRemap(t *testing.T, uid, gid int, setgroups bool) {
cmd := whoamiCmd(t, uid, gid, setgroups)
out, err := cmd.CombinedOutput()
if err != nil {
t.Fatalf("Cmd failed with err %v, output: %s", err, out)
}
sout := strings.TrimSpace(string(out))
want := "root"
if sout != want {
t.Fatalf("whoami = %q; want %q", out, want)
}
}
func TestCloneNEWUSERAndRemapRootDisableSetgroups(t *testing.T) {
if os.Getuid() != 0 {
t.Skip("skipping root only test")
}
testNEWUSERRemap(t, 0, 0, false)
}
func TestCloneNEWUSERAndRemapRootEnableSetgroups(t *testing.T) {
if os.Getuid() != 0 {
t.Skip("skipping root only test")
}
testNEWUSERRemap(t, 0, 0, true)
}
func TestCloneNEWUSERAndRemapNoRootDisableSetgroups(t *testing.T) {
if os.Getuid() == 0 {
t.Skip("skipping unprivileged user only test")
}
testNEWUSERRemap(t, os.Getuid(), os.Getgid(), false)
}
func TestCloneNEWUSERAndRemapNoRootSetgroupsEnableSetgroups(t *testing.T) {
if os.Getuid() == 0 {
t.Skip("skipping unprivileged user only test")
}
cmd := whoamiCmd(t, os.Getuid(), os.Getgid(), true)
err := cmd.Run()
if err == nil {
t.Skip("probably old kernel without security fix")
}
if !os.IsPermission(err) {
t.Fatalf("Unprivileged gid_map rewriting with GidMappingsEnableSetgroups must fail")
}
}
func TestEmptyCredGroupsDisableSetgroups(t *testing.T) {
cmd := whoamiCmd(t, os.Getuid(), os.Getgid(), false)
cmd.SysProcAttr.Credential = &syscall.Credential{}
if err := cmd.Run(); err != nil {
t.Fatal(err)
}
}
func TestUnshare(t *testing.T) {
skipInContainer(t)
// Make sure we are running as root so we have permissions to use unshare
// and create a network namespace.
if os.Getuid() != 0 {
t.Skip("kernel prohibits unshare in unprivileged process, unless using user namespace")
}
// When running under the Go continuous build, skip tests for
// now when under Kubernetes. (where things are root but not quite)
// Both of these are our own environment variables.
// See Issue 12815.
if os.Getenv("GO_BUILDER_NAME") != "" && os.Getenv("IN_KUBERNETES") == "1" {
t.Skip("skipping test on Kubernetes-based builders; see Issue 12815")
}
path := "/proc/net/dev"
if _, err := os.Stat(path); err != nil {
if os.IsNotExist(err) {
t.Skip("kernel doesn't support proc filesystem")
}
if os.IsPermission(err) {
t.Skip("unable to test proc filesystem due to permissions")
}
t.Fatal(err)
}
if _, err := os.Stat("/proc/self/ns/net"); err != nil {
if os.IsNotExist(err) {
t.Skip("kernel doesn't support net namespace")
}
t.Fatal(err)
}
orig, err := ioutil.ReadFile(path)
if err != nil {
t.Fatal(err)
}
origLines := strings.Split(strings.TrimSpace(string(orig)), "\n")
cmd := exec.Command("cat", path)
cmd.SysProcAttr = &syscall.SysProcAttr{
Unshareflags: syscall.CLONE_NEWNET,
}
out, err := cmd.CombinedOutput()
if err != nil {
if strings.Contains(err.Error(), "operation not permitted") {
// Issue 17206: despite all the checks above,
// this still reportedly fails for some users.
// (older kernels?). Just skip.
t.Skip("skipping due to permission error")
}
t.Fatalf("Cmd failed with err %v, output: %s", err, out)
}
// Check there is only the local network interface
sout := strings.TrimSpace(string(out))
if !strings.Contains(sout, "lo:") {
t.Fatalf("Expected lo network interface to exist, got %s", sout)
}
lines := strings.Split(sout, "\n")
if len(lines) >= len(origLines) {
t.Fatalf("Got %d lines of output, want <%d", len(lines), len(origLines))
}
}
func TestGroupCleanup(t *testing.T) {
if os.Getuid() != 0 {
t.Skip("we need root for credential")
}
cmd := exec.Command("id")
cmd.SysProcAttr = &syscall.SysProcAttr{
Credential: &syscall.Credential{
Uid: 0,
Gid: 0,
},
}
out, err := cmd.CombinedOutput()
if err != nil {
t.Fatalf("Cmd failed with err %v, output: %s", err, out)
}
strOut := strings.TrimSpace(string(out))
expected := "uid=0(root) gid=0(root)"
// Just check prefix because some distros reportedly output a
// context parameter; see https://golang.org/issue/16224.
// Alpine does not output groups; see https://golang.org/issue/19938.
if !strings.HasPrefix(strOut, expected) {
t.Errorf("id command output: %q, expected prefix: %q", strOut, expected)
}
}
func TestGroupCleanupUserNamespace(t *testing.T) {
if os.Getuid() != 0 {
t.Skip("we need root for credential")
}
checkUserNS(t)
cmd := exec.Command("id")
uid, gid := os.Getuid(), os.Getgid()
cmd.SysProcAttr = &syscall.SysProcAttr{
Cloneflags: syscall.CLONE_NEWUSER,
Credential: &syscall.Credential{
Uid: uint32(uid),
Gid: uint32(gid),
},
UidMappings: []syscall.SysProcIDMap{
{ContainerID: 0, HostID: uid, Size: 1},
},
GidMappings: []syscall.SysProcIDMap{
{ContainerID: 0, HostID: gid, Size: 1},
},
}
out, err := cmd.CombinedOutput()
if err != nil {
t.Fatalf("Cmd failed with err %v, output: %s", err, out)
}
strOut := strings.TrimSpace(string(out))
// Strings we've seen in the wild.
expected := []string{
"uid=0(root) gid=0(root) groups=0(root)",
"uid=0(root) gid=0(root) groups=0(root),65534(nobody)",
"uid=0(root) gid=0(root) groups=0(root),65534(nogroup)",
"uid=0(root) gid=0(root) groups=0(root),65534",
"uid=0(root) gid=0(root) groups=0(root),65534(nobody),65534(nobody),65534(nobody),65534(nobody),65534(nobody),65534(nobody),65534(nobody),65534(nobody),65534(nobody),65534(nobody)", // Alpine; see https://golang.org/issue/19938
}
for _, e := range expected {
if strOut == e {
return
}
}
t.Errorf("id command output: %q, expected one of %q", strOut, expected)
}
// TestUnshareHelperProcess isn't a real test. It's used as a helper process
// for TestUnshareMountNameSpace.
func TestUnshareMountNameSpaceHelper(*testing.T) {
if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" {
return
}
defer os.Exit(0)
if err := syscall.Mount("none", flag.Args()[0], "proc", 0, ""); err != nil {
fmt.Fprintf(os.Stderr, "unshare: mount %v failed: %v", os.Args, err)
os.Exit(2)
}
}
// Test for Issue 38471: unshare fails because systemd has forced / to be shared
func TestUnshareMountNameSpace(t *testing.T) {
skipInContainer(t)
// Make sure we are running as root so we have permissions to use unshare
// and create a network namespace.
if os.Getuid() != 0 {
t.Skip("kernel prohibits unshare in unprivileged process, unless using user namespace")
}
// When running under the Go continuous build, skip tests for
// now when under Kubernetes. (where things are root but not quite)
// Both of these are our own environment variables.
// See Issue 12815.
if os.Getenv("GO_BUILDER_NAME") != "" && os.Getenv("IN_KUBERNETES") == "1" {
t.Skip("skipping test on Kubernetes-based builders; see Issue 12815")
}
d, err := ioutil.TempDir("", "unshare")
if err != nil {
t.Fatalf("tempdir: %v", err)
}
cmd := exec.Command(os.Args[0], "-test.run=TestUnshareMountNameSpaceHelper", d)
cmd.Env = []string{"GO_WANT_HELPER_PROCESS=1"}
cmd.SysProcAttr = &syscall.SysProcAttr{Unshareflags: syscall.CLONE_NEWNS}
o, err := cmd.CombinedOutput()
if err != nil {
if strings.Contains(err.Error(), ": permission denied") {
t.Skipf("Skipping test (golang.org/issue/19698); unshare failed due to permissions: %s, %v", o, err)
}
t.Fatalf("unshare failed: %s, %v", o, err)
}
// How do we tell if the namespace was really unshared? It turns out
// to be simple: just try to remove the directory. If it's still mounted
// on the rm will fail with EBUSY. Then we have some cleanup to do:
// we must unmount it, then try to remove it again.
if err := os.Remove(d); err != nil {
t.Errorf("rmdir failed on %v: %v", d, err)
if err := syscall.Unmount(d, syscall.MNT_FORCE); err != nil {
t.Errorf("Can't unmount %v: %v", d, err)
}
if err := os.Remove(d); err != nil {
t.Errorf("rmdir after unmount failed on %v: %v", d, err)
}
}
}
// Test for Issue 20103: unshare fails when chroot is used
func TestUnshareMountNameSpaceChroot(t *testing.T) {
skipInContainer(t)
// Make sure we are running as root so we have permissions to use unshare
// and create a network namespace.
if os.Getuid() != 0 {
t.Skip("kernel prohibits unshare in unprivileged process, unless using user namespace")
}
// When running under the Go continuous build, skip tests for
// now when under Kubernetes. (where things are root but not quite)
// Both of these are our own environment variables.
// See Issue 12815.
if os.Getenv("GO_BUILDER_NAME") != "" && os.Getenv("IN_KUBERNETES") == "1" {
t.Skip("skipping test on Kubernetes-based builders; see Issue 12815")
}
d, err := ioutil.TempDir("", "unshare")
if err != nil {
t.Fatalf("tempdir: %v", err)
}
// Since we are doing a chroot, we need the binary there,
// and it must be statically linked.
x := filepath.Join(d, "syscall.test")
cmd := exec.Command(testenv.GoToolPath(t), "test", "-c", "-o", x, "syscall")
cmd.Env = append(os.Environ(), "CGO_ENABLED=0")
if o, err := cmd.CombinedOutput(); err != nil {
t.Fatalf("Build of syscall in chroot failed, output %v, err %v", o, err)
}
cmd = exec.Command("/syscall.test", "-test.run=TestUnshareMountNameSpaceHelper", "/")
cmd.Env = []string{"GO_WANT_HELPER_PROCESS=1"}
cmd.SysProcAttr = &syscall.SysProcAttr{Chroot: d, Unshareflags: syscall.CLONE_NEWNS}
o, err := cmd.CombinedOutput()
if err != nil {
if strings.Contains(err.Error(), ": permission denied") {
t.Skipf("Skipping test (golang.org/issue/19698); unshare failed due to permissions: %s, %v", o, err)
}
t.Fatalf("unshare failed: %s, %v", o, err)
}
// How do we tell if the namespace was really unshared? It turns out
// to be simple: just try to remove the executable. If it's still mounted
// on, the rm will fail. Then we have some cleanup to do:
// we must force unmount it, then try to remove it again.
if err := os.Remove(x); err != nil {
t.Errorf("rm failed on %v: %v", x, err)
if err := syscall.Unmount(d, syscall.MNT_FORCE); err != nil {
t.Fatalf("Can't unmount %v: %v", d, err)
}
if err := os.Remove(x); err != nil {
t.Fatalf("rm failed on %v: %v", x, err)
}
}
if err := os.Remove(d); err != nil {
t.Errorf("rmdir failed on %v: %v", d, err)
}
}
func TestUnshareUidGidMappingHelper(*testing.T) {
if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" {
return
}
defer os.Exit(0)
if err := syscall.Chroot(os.TempDir()); err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(2)
}
}
// Test for Issue 29789: unshare fails when uid/gid mapping is specified
func TestUnshareUidGidMapping(t *testing.T) {
if os.Getuid() == 0 {
t.Skip("test exercises unprivileged user namespace, fails with privileges")
}
checkUserNS(t)
cmd := exec.Command(os.Args[0], "-test.run=TestUnshareUidGidMappingHelper")
cmd.Env = append(os.Environ(), "GO_WANT_HELPER_PROCESS=1")
cmd.SysProcAttr = &syscall.SysProcAttr{
Unshareflags: syscall.CLONE_NEWNS | syscall.CLONE_NEWUSER,
GidMappingsEnableSetgroups: false,
UidMappings: []syscall.SysProcIDMap{
{
ContainerID: 0,
HostID: syscall.Getuid(),
Size: 1,
},
},
GidMappings: []syscall.SysProcIDMap{
{
ContainerID: 0,
HostID: syscall.Getgid(),
Size: 1,
},
},
}
out, err := cmd.CombinedOutput()
if err != nil {
t.Fatalf("Cmd failed with err %v, output: %s", err, out)
}
}
type capHeader struct {
version uint32
pid int32
}
type capData struct {
effective uint32
permitted uint32
inheritable uint32
}
const CAP_SYS_TIME = 25
const CAP_SYSLOG = 34
type caps struct {
hdr capHeader
data [2]capData
}
func getCaps() (caps, error) {
var c caps
// Get capability version
if _, _, errno := syscall.Syscall(syscall.SYS_CAPGET, uintptr(unsafe.Pointer(&c.hdr)), uintptr(unsafe.Pointer(nil)), 0); errno != 0 {
return c, fmt.Errorf("SYS_CAPGET: %v", errno)
}
// Get current capabilities
if _, _, errno := syscall.Syscall(syscall.SYS_CAPGET, uintptr(unsafe.Pointer(&c.hdr)), uintptr(unsafe.Pointer(&c.data[0])), 0); errno != 0 {
return c, fmt.Errorf("SYS_CAPGET: %v", errno)
}
return c, nil
}
func mustSupportAmbientCaps(t *testing.T) {
var uname syscall.Utsname
if err := syscall.Uname(&uname); err != nil {
t.Fatalf("Uname: %v", err)
}
var buf [65]byte
for i, b := range uname.Release {
buf[i] = byte(b)
}
ver := string(buf[:])
if i := strings.Index(ver, "\x00"); i != -1 {
ver = ver[:i]
}
if strings.HasPrefix(ver, "2.") ||
strings.HasPrefix(ver, "3.") ||
strings.HasPrefix(ver, "4.1.") ||
strings.HasPrefix(ver, "4.2.") {
t.Skipf("kernel version %q predates required 4.3; skipping test", ver)
}
}
// TestAmbientCapsHelper isn't a real test. It's used as a helper process for
// TestAmbientCaps.
func TestAmbientCapsHelper(*testing.T) {
if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" {
return
}
defer os.Exit(0)
caps, err := getCaps()
if err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(2)
}
if caps.data[0].effective&(1<<uint(CAP_SYS_TIME)) == 0 {
fmt.Fprintln(os.Stderr, "CAP_SYS_TIME unexpectedly not in the effective capability mask")
os.Exit(2)
}
if caps.data[1].effective&(1<<uint(CAP_SYSLOG&31)) == 0 {
fmt.Fprintln(os.Stderr, "CAP_SYSLOG unexpectedly not in the effective capability mask")
os.Exit(2)
}
}
func TestAmbientCaps(t *testing.T) {
// Make sure we are running as root so we have permissions to use unshare
// and create a network namespace.
if os.Getuid() != 0 {
t.Skip("kernel prohibits unshare in unprivileged process, unless using user namespace")
}
testAmbientCaps(t, false)
}
func TestAmbientCapsUserns(t *testing.T) {
checkUserNS(t)
testAmbientCaps(t, true)
}
func testAmbientCaps(t *testing.T, userns bool) {
skipInContainer(t)
mustSupportAmbientCaps(t)
// When running under the Go continuous build, skip tests for
// now when under Kubernetes. (where things are root but not quite)
// Both of these are our own environment variables.
// See Issue 12815.
if os.Getenv("GO_BUILDER_NAME") != "" && os.Getenv("IN_KUBERNETES") == "1" {
t.Skip("skipping test on Kubernetes-based builders; see Issue 12815")
}
skipUnprivilegedUserClone(t)
// skip on android, due to lack of lookup support
if runtime.GOOS == "android" {
t.Skip("skipping test on android; see Issue 27327")
}
u, err := user.Lookup("nobody")
if err != nil {
t.Fatal(err)
}
uid, err := strconv.ParseInt(u.Uid, 0, 32)
if err != nil {
t.Fatal(err)
}
gid, err := strconv.ParseInt(u.Gid, 0, 32)
if err != nil {
t.Fatal(err)
}
// Copy the test binary to a temporary location which is readable by nobody.
f, err := ioutil.TempFile("", "gotest")
if err != nil {
t.Fatal(err)
}
defer os.Remove(f.Name())
defer f.Close()
e, err := os.Open(os.Args[0])
if err != nil {
t.Fatal(err)
}
defer e.Close()
if _, err := io.Copy(f, e); err != nil {
t.Fatal(err)
}
if err := f.Chmod(0755); err != nil {
t.Fatal(err)
}
if err := f.Close(); err != nil {
t.Fatal(err)
}
cmd := exec.Command(f.Name(), "-test.run=TestAmbientCapsHelper")
cmd.Env = []string{"GO_WANT_HELPER_PROCESS=1"}
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
cmd.SysProcAttr = &syscall.SysProcAttr{
Credential: &syscall.Credential{
Uid: uint32(uid),
Gid: uint32(gid),
},
AmbientCaps: []uintptr{CAP_SYS_TIME, CAP_SYSLOG},
}
if userns {
cmd.SysProcAttr.Cloneflags = syscall.CLONE_NEWUSER
const nobody = 65534
uid := os.Getuid()
gid := os.Getgid()
cmd.SysProcAttr.UidMappings = []syscall.SysProcIDMap{{
ContainerID: int(nobody),
HostID: int(uid),
Size: int(1),
}}
cmd.SysProcAttr.GidMappings = []syscall.SysProcIDMap{{
ContainerID: int(nobody),
HostID: int(gid),
Size: int(1),
}}
// Set credentials to run as user and group nobody.
cmd.SysProcAttr.Credential = &syscall.Credential{
Uid: nobody,
Gid: nobody,
}
}
if err := cmd.Run(); err != nil {
t.Fatal(err.Error())
}
}
| [
"\"container\"",
"\"GO_BUILDER_NAME\"",
"\"IN_KUBERNETES\"",
"\"GO_BUILDER_NAME\"",
"\"IN_KUBERNETES\"",
"\"GO_WANT_HELPER_PROCESS\"",
"\"GO_BUILDER_NAME\"",
"\"IN_KUBERNETES\"",
"\"GO_BUILDER_NAME\"",
"\"IN_KUBERNETES\"",
"\"GO_WANT_HELPER_PROCESS\"",
"\"GO_WANT_HELPER_PROCESS\"",
"\"GO_BUILDER_NAME\"",
"\"IN_KUBERNETES\""
]
| []
| [
"GO_BUILDER_NAME",
"GO_WANT_HELPER_PROCESS",
"IN_KUBERNETES",
"container"
]
| [] | ["GO_BUILDER_NAME", "GO_WANT_HELPER_PROCESS", "IN_KUBERNETES", "container"] | go | 4 | 0 | |
pilot/pkg/networking/core/v1alpha3/listener.go | // Copyright 2017 Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v1alpha3
import (
"encoding/json"
"fmt"
"os"
"reflect"
"sort"
"strings"
"time"
xdsapi "github.com/envoyproxy/go-control-plane/envoy/api/v2"
"github.com/envoyproxy/go-control-plane/envoy/api/v2/auth"
"github.com/envoyproxy/go-control-plane/envoy/api/v2/core"
"github.com/envoyproxy/go-control-plane/envoy/api/v2/listener"
fileaccesslog "github.com/envoyproxy/go-control-plane/envoy/config/accesslog/v2"
accesslog "github.com/envoyproxy/go-control-plane/envoy/config/filter/accesslog/v2"
http_conn "github.com/envoyproxy/go-control-plane/envoy/config/filter/network/http_connection_manager/v2"
tcp_proxy "github.com/envoyproxy/go-control-plane/envoy/config/filter/network/tcp_proxy/v2"
"github.com/envoyproxy/go-control-plane/envoy/type"
xdsutil "github.com/envoyproxy/go-control-plane/pkg/util"
google_protobuf "github.com/gogo/protobuf/types"
"github.com/prometheus/client_golang/prometheus"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pilot/pkg/networking/plugin"
"istio.io/istio/pilot/pkg/networking/util"
"istio.io/istio/pkg/log"
)
const (
envoyListenerTLSInspector = "envoy.listener.tls_inspector"
// RDSHttpProxy is the special name for HTTP PROXY route
RDSHttpProxy = "http_proxy"
// VirtualListenerName is the name for traffic capture listener
VirtualListenerName = "virtual"
// WildcardAddress binds to all IP addresses
WildcardAddress = "0.0.0.0"
// LocalhostAddress for local binding
LocalhostAddress = "127.0.0.1"
// EnvoyHTTPLogFormat format for envoy access logs
EnvoyHTTPLogFormat = "[%START_TIME%] \"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)%" +
"%PROTOCOL%\" %RESPONSE_CODE% %RESPONSE_FLAGS% %BYTES_RECEIVED% %BYTES_SENT% " +
"%DURATION% %RESP(X-ENVOY-UPSTREAM-SERVICE-TIME)% \"%REQ(X-FORWARDED-FOR)%\" " +
"\"%REQ(USER-AGENT)%\" \"%REQ(X-REQUEST-ID)%\" \"%REQ(:AUTHORITY)%\" \"%UPSTREAM_HOST%\" " +
"%UPSTREAM_CLUSTER% %UPSTREAM_LOCAL_ADDRESS% %DOWNSTREAM_LOCAL_ADDRESS% " +
"%DOWNSTREAM_REMOTE_ADDRESS% %REQUESTED_SERVER_NAME%\n"
// EnvoyTCPLogFormat format for envoy access logs
EnvoyTCPLogFormat = "[%START_TIME%] %RESPONSE_FLAGS% %BYTES_RECEIVED% %BYTES_SENT% " +
"%DURATION% \"%UPSTREAM_HOST%\" %UPSTREAM_CLUSTER% %UPSTREAM_LOCAL_ADDRESS% %DOWNSTREAM_LOCAL_ADDRESS% " +
"%DOWNSTREAM_REMOTE_ADDRESS% %REQUESTED_SERVER_NAME%\n"
)
var (
// Very verbose output in the logs - full LDS response logged for each sidecar.
// Use /debug/ldsz instead.
verboseDebug = os.Getenv("PILOT_DUMP_ALPHA3") != ""
// TODO: gauge should be reset on refresh, not the best way to represent errors but better
// than nothing.
// TODO: add dimensions - namespace of rule, service, rule name
invalidOutboundListeners = prometheus.NewGauge(prometheus.GaugeOpts{
Name: "pilot_invalid_out_listeners",
Help: "Number of invalid outbound listeners.",
})
filterChainsConflict = prometheus.NewGauge(prometheus.GaugeOpts{
Name: "pilot_conf_filter_chains",
Help: "Number of conflicting filter chains.",
})
)
func init() {
prometheus.MustRegister(invalidOutboundListeners)
prometheus.MustRegister(filterChainsConflict)
}
// ListenersALPNProtocols denotes the the list of ALPN protocols that the listener
// should expose
var ListenersALPNProtocols = []string{"h2", "http/1.1"}
// BuildListeners produces a list of listeners and referenced clusters for all proxies
func (configgen *ConfigGeneratorImpl) BuildListeners(env *model.Environment, node *model.Proxy, push *model.PushContext) ([]*xdsapi.Listener, error) {
switch node.Type {
case model.Sidecar:
return configgen.buildSidecarListeners(env, node, push)
case model.Router, model.Ingress:
return configgen.buildGatewayListeners(env, node, push)
}
return nil, nil
}
// buildSidecarListeners produces a list of listeners for sidecar proxies
func (configgen *ConfigGeneratorImpl) buildSidecarListeners(env *model.Environment, node *model.Proxy,
push *model.PushContext) ([]*xdsapi.Listener, error) {
mesh := env.Mesh
managementPorts := env.ManagementPorts(node.IPAddress)
proxyInstances, err := env.GetProxyServiceInstances(node)
if err != nil {
return nil, err
}
services := push.Services
listeners := make([]*xdsapi.Listener, 0)
if mesh.ProxyListenPort > 0 {
inbound := configgen.buildSidecarInboundListeners(env, node, push, proxyInstances)
outbound := configgen.buildSidecarOutboundListeners(env, node, push, proxyInstances, services)
listeners = append(listeners, inbound...)
listeners = append(listeners, outbound...)
mgmtListeners := buildSidecarInboundMgmtListeners(env, managementPorts, node.IPAddress)
// If management listener port and service port are same, bad things happen
// when running in kubernetes, as the probes stop responding. So, append
// non overlapping listeners only.
for i := range mgmtListeners {
m := mgmtListeners[i]
l := util.GetByAddress(listeners, m.Address.String())
if l != nil {
log.Warnf("Omitting listener for management address %s (%s) due to collision with service listener %s (%s)",
m.Name, m.Address.String(), l.Name, l.Address.String())
continue
}
listeners = append(listeners, m)
}
// We need a dummy filter to fill in the filter stack for orig_dst listener
// TODO: Move to Listener filters and set up original dst filter there.
dummyTCPProxy := &tcp_proxy.TcpProxy{
StatPrefix: util.BlackHoleCluster,
Cluster: util.BlackHoleCluster,
}
var transparent *google_protobuf.BoolValue
if mode := node.Metadata["INTERCEPTION_MODE"]; mode == "TPROXY" {
transparent = &google_protobuf.BoolValue{Value: true}
}
// add an extra listener that binds to the port that is the recipient of the iptables redirect
listeners = append(listeners, &xdsapi.Listener{
Name: VirtualListenerName,
Address: util.BuildAddress(WildcardAddress, uint32(mesh.ProxyListenPort)),
Transparent: transparent,
UseOriginalDst: &google_protobuf.BoolValue{Value: true},
FilterChains: []listener.FilterChain{
{
Filters: []listener.Filter{
{
Name: xdsutil.TCPProxy,
Config: util.MessageToStruct(dummyTCPProxy),
},
},
},
},
})
}
// enable HTTP PROXY port if necessary; this will add an RDS route for this port
if mesh.ProxyHttpPort > 0 {
useRemoteAddress := false
traceOperation := http_conn.EGRESS
listenAddress := LocalhostAddress
if node.Type == model.Router {
useRemoteAddress = true
traceOperation = http_conn.INGRESS
listenAddress = WildcardAddress
}
opts := buildListenerOpts{
env: env,
proxy: node,
proxyInstances: proxyInstances,
ip: listenAddress,
port: int(mesh.ProxyHttpPort),
protocol: model.ProtocolHTTP,
filterChainOpts: []*filterChainOpts{{
httpOpts: &httpListenerOpts{
rds: RDSHttpProxy,
useRemoteAddress: useRemoteAddress,
direction: traceOperation,
connectionManager: &http_conn.HttpConnectionManager{
HttpProtocolOptions: &core.Http1ProtocolOptions{
AllowAbsoluteUrl: &google_protobuf.BoolValue{
Value: true,
},
},
},
},
}},
bindToPort: true,
}
l := buildListener(opts)
if err := marshalFilters(l, opts, []plugin.FilterChain{{}}); err != nil {
log.Warna("buildSidecarListeners ", err.Error())
} else {
listeners = append(listeners, l)
}
// TODO: need inbound listeners in HTTP_PROXY case, with dedicated ingress listener.
}
return listeners, nil
}
// buildSidecarInboundListeners creates listeners for the server-side (inbound)
// configuration for co-located service proxyInstances.
func (configgen *ConfigGeneratorImpl) buildSidecarInboundListeners(env *model.Environment, node *model.Proxy, push *model.PushContext,
proxyInstances []*model.ServiceInstance) []*xdsapi.Listener {
var listeners []*xdsapi.Listener
listenerMap := make(map[string]*model.ServiceInstance)
// inbound connections/requests are redirected to the endpoint address but appear to be sent
// to the service address.
for _, instance := range proxyInstances {
endpoint := instance.Endpoint
protocol := endpoint.ServicePort.Protocol
// Local service instances can be accessed through one of three
// addresses: localhost, endpoint IP, and service
// VIP. Localhost bypasses the proxy and doesn't need any TCP
// route config. Endpoint IP is handled below and Service IP is handled
// by outbound routes.
// Traffic sent to our service VIP is redirected by remote
// services' kubeproxy to our specific endpoint IP.
listenerOpts := buildListenerOpts{
env: env,
proxy: node,
proxyInstances: proxyInstances,
ip: endpoint.Address,
port: endpoint.Port,
protocol: protocol,
}
listenerMapKey := fmt.Sprintf("%s:%d", endpoint.Address, endpoint.Port)
if old, exists := listenerMap[listenerMapKey]; exists {
push.Add(model.ProxyStatusConflictInboundListener, node.ID, node,
fmt.Sprintf("Rejected %s, used %s for %s", instance.Service.Hostname, old.Service.Hostname, listenerMapKey))
// Skip building listener for the same ip port
continue
}
allChains := []plugin.FilterChain{}
var httpOpts *httpListenerOpts
var tcpNetworkFilters []listener.Filter
listenerType := plugin.ModelProtocolToListenerProtocol(protocol)
switch listenerType {
case plugin.ListenerProtocolHTTP:
httpOpts = &httpListenerOpts{
routeConfig: configgen.buildSidecarInboundHTTPRouteConfig(env, node, push, instance),
rds: "", // no RDS for inbound traffic
useRemoteAddress: false,
direction: http_conn.INGRESS,
connectionManager: &http_conn.HttpConnectionManager{
// Append and forward client cert to backend.
ForwardClientCertDetails: http_conn.APPEND_FORWARD,
},
}
case plugin.ListenerProtocolTCP:
tcpNetworkFilters = buildInboundNetworkFilters(env, instance)
default:
log.Warnf("Unsupported inbound protocol %v for port %#v", protocol, endpoint.ServicePort)
continue
}
for _, p := range configgen.Plugins {
params := &plugin.InputParams{
ListenerProtocol: listenerType,
Env: env,
Node: node,
ProxyInstances: proxyInstances,
ServiceInstance: instance,
Port: endpoint.ServicePort,
}
chains := p.OnInboundFilterChains(params)
if len(chains) == 0 {
continue
}
if len(allChains) != 0 {
log.Warnf("Found two plugin setups inbound filter chains for listeners, FilterChainMatch may not work as intended!")
}
allChains = append(allChains, chains...)
}
// Construct the default filter chain.
if len(allChains) == 0 {
log.Infof("Use default filter chain for %v", endpoint)
// add one empty entry to the list so we generate a default listener below
allChains = []plugin.FilterChain{{}}
}
for _, chain := range allChains {
listenerOpts.filterChainOpts = append(listenerOpts.filterChainOpts, &filterChainOpts{
httpOpts: httpOpts,
networkFilters: tcpNetworkFilters,
tlsContext: chain.TLSContext,
match: chain.FilterChainMatch,
listenerFilters: chain.RequiredListenerFilters,
})
}
// call plugins
l := buildListener(listenerOpts)
mutable := &plugin.MutableObjects{
Listener: l,
FilterChains: make([]plugin.FilterChain, len(l.FilterChains)),
}
for _, p := range configgen.Plugins {
params := &plugin.InputParams{
ListenerProtocol: listenerType,
Env: env,
Node: node,
ProxyInstances: proxyInstances,
ServiceInstance: instance,
Port: endpoint.ServicePort,
Push: push,
}
if err := p.OnInboundListener(params, mutable); err != nil {
log.Warn(err.Error())
}
}
// Filters are serialized one time into an opaque struct once we have the complete list.
if err := marshalFilters(mutable.Listener, listenerOpts, mutable.FilterChains); err != nil {
log.Warna("buildSidecarInboundListeners ", err.Error())
} else {
listeners = append(listeners, mutable.Listener)
listenerMap[listenerMapKey] = instance
}
}
return listeners
}
type listenerEntry struct {
// TODO: Clean this up
services []*model.Service
servicePort *model.Port
listener *xdsapi.Listener
}
func protocolName(p model.Protocol) string {
switch plugin.ModelProtocolToListenerProtocol(p) {
case plugin.ListenerProtocolHTTP:
return "HTTP"
case plugin.ListenerProtocolTCP:
return "TCP"
default:
return "UNKNOWN"
}
}
type outboundListenerConflict struct {
metric *model.PushMetric
env *model.Environment
node *model.Proxy
listenerName string
currentProtocol model.Protocol
currentServices []*model.Service
newHostname model.Hostname
newProtocol model.Protocol
}
func (c outboundListenerConflict) addMetric(push *model.PushContext) {
currentHostnames := make([]string, len(c.currentServices))
for i, s := range c.currentServices {
currentHostnames[i] = string(s.Hostname)
}
concatHostnames := strings.Join(currentHostnames, ",")
push.Add(c.metric,
c.listenerName,
c.node,
fmt.Sprintf("Listener=%s Accepted%s=%s Rejected%s=%s %sServices=%d",
c.listenerName,
protocolName(c.currentProtocol),
concatHostnames,
protocolName(c.newProtocol),
c.newHostname,
protocolName(c.currentProtocol),
len(c.currentServices)))
}
// buildSidecarOutboundListeners generates http and tcp listeners for outbound connections from the service instance
// TODO(github.com/istio/pilot/issues/237)
//
// Sharing tcp_proxy and http_connection_manager filters on the same port for
// different destination services doesn't work with Envoy (yet). When the
// tcp_proxy filter's route matching fails for the http service the connection
// is closed without falling back to the http_connection_manager.
//
// Temporary workaround is to add a listener for each service IP that requires
// TCP routing
//
// Connections to the ports of non-load balanced services are directed to
// the connection's original destination. This avoids costly queries of instance
// IPs and ports, but requires that ports of non-load balanced service be unique.
func (configgen *ConfigGeneratorImpl) buildSidecarOutboundListeners(env *model.Environment, node *model.Proxy, push *model.PushContext,
proxyInstances []*model.ServiceInstance, services []*model.Service) []*xdsapi.Listener {
var proxyLabels model.LabelsCollection
for _, w := range proxyInstances {
proxyLabels = append(proxyLabels, w.Labels)
}
meshGateway := map[string]bool{model.IstioMeshGateway: true}
configs := push.VirtualServices(meshGateway)
var tcpListeners, httpListeners []*xdsapi.Listener
// For conflicit resolution
var currentListenerEntry *listenerEntry
listenerMap := make(map[string]*listenerEntry)
for _, service := range services {
for _, servicePort := range service.Ports {
listenAddress := WildcardAddress
var destinationIPAddress string
var listenerMapKey string
listenerOpts := buildListenerOpts{
env: env,
proxy: node,
proxyInstances: proxyInstances,
ip: WildcardAddress,
port: servicePort.Port,
protocol: servicePort.Protocol,
}
currentListenerEntry = nil
switch plugin.ModelProtocolToListenerProtocol(servicePort.Protocol) {
case plugin.ListenerProtocolHTTP:
listenerMapKey = fmt.Sprintf("%s:%d", listenAddress, servicePort.Port)
var exists bool
// Check if this HTTP listener conflicts with an existing wildcard TCP listener
// i.e. one of NONE resolution type, since we collapse all HTTP listeners into
// a single 0.0.0.0:port listener and use vhosts to distinguish individual http
// services in that port
if currentListenerEntry, exists = listenerMap[listenerMapKey]; exists {
if !currentListenerEntry.servicePort.Protocol.IsHTTP() {
outboundListenerConflict{
metric: model.ProxyStatusConflictOutboundListenerTCPOverHTTP,
env: env,
node: node,
listenerName: listenerMapKey,
currentServices: currentListenerEntry.services,
currentProtocol: currentListenerEntry.servicePort.Protocol,
newHostname: service.Hostname,
newProtocol: servicePort.Protocol,
}.addMetric(push)
}
// Skip building listener for the same http port
currentListenerEntry.services = append(currentListenerEntry.services, service)
continue
}
operation := http_conn.EGRESS
useRemoteAddress := false
listenerOpts.protocol = servicePort.Protocol
listenerOpts.filterChainOpts = []*filterChainOpts{{
httpOpts: &httpListenerOpts{
rds: fmt.Sprintf("%d", servicePort.Port),
useRemoteAddress: useRemoteAddress,
direction: operation,
},
}}
case plugin.ListenerProtocolTCP:
// Determine the listener address
// we listen on the service VIP if and only
// if the address is an IP address. If its a CIDR, we listen on
// 0.0.0.0, and setup a filter chain match for the CIDR range.
// As a small optimization, CIDRs with /32 prefix will be converted
// into listener address so that there is a dedicated listener for this
// ip:port. This will reduce the impact of a listener reload
var svcListenAddress string
// This is to maintain backward compatibility with 0.8 envoy
if !util.Is1xProxy(node) {
if service.Resolution != model.Passthrough {
svcListenAddress = service.GetServiceAddressForProxy(node)
}
} else {
svcListenAddress = service.GetServiceAddressForProxy(node)
}
// We should never get an empty address.
// This is a safety guard, in case some platform adapter isn't doing things
// properly
if len(svcListenAddress) > 0 {
if !strings.Contains(svcListenAddress, "/") {
listenAddress = svcListenAddress
} else {
// Address is a CIDR. Fall back to 0.0.0.0 and
// filter chain match
destinationIPAddress = svcListenAddress
}
}
listenerMapKey = fmt.Sprintf("%s:%d", listenAddress, servicePort.Port)
var exists bool
// Check if this TCP listener conflicts with an existing HTTP listener on 0.0.0.0:Port
if currentListenerEntry, exists = listenerMap[listenerMapKey]; exists {
// Check for port collisions between TCP/TLS and HTTP.
// If configured correctly, TCP/TLS ports may not collide.
// We'll need to do additional work to find out if there is a collision within TCP/TLS.
if !currentListenerEntry.servicePort.Protocol.IsTCP() {
outboundListenerConflict{
metric: model.ProxyStatusConflictOutboundListenerHTTPOverTCP,
env: env,
node: node,
listenerName: listenerMapKey,
currentServices: currentListenerEntry.services,
currentProtocol: currentListenerEntry.servicePort.Protocol,
newHostname: service.Hostname,
newProtocol: servicePort.Protocol,
}.addMetric(push)
continue
}
// WE have a collision with another TCP port.
// This can happen only if the service is listening on 0.0.0.0:<port>
// which is the case for headless services, or non-k8s services that do not have a VIP.
// Unfortunately we won't know if this is a real conflict or not
// until we process the VirtualServices, etc.
// The conflict resolution is done later in this code
}
listenerOpts.filterChainOpts = buildSidecarOutboundTCPTLSFilterChainOpts(env, node, push, configs,
destinationIPAddress, service, servicePort, proxyLabels, meshGateway)
default:
// UDP or other protocols: no need to log, it's too noisy
continue
}
// Even if we have a non empty current listener, lets build the new listener with the filter chains
// In the end, we will merge the filter chains
// call plugins
listenerOpts.ip = listenAddress
l := buildListener(listenerOpts)
mutable := &plugin.MutableObjects{
Listener: l,
FilterChains: make([]plugin.FilterChain, len(l.FilterChains)),
}
for _, p := range configgen.Plugins {
params := &plugin.InputParams{
ListenerProtocol: plugin.ModelProtocolToListenerProtocol(servicePort.Protocol),
Env: env,
Node: node,
ProxyInstances: proxyInstances,
Service: service,
Port: servicePort,
Push: push,
}
if err := p.OnOutboundListener(params, mutable); err != nil {
log.Warn(err.Error())
}
}
// Filters are serialized one time into an opaque struct once we have the complete list.
if err := marshalFilters(mutable.Listener, listenerOpts, mutable.FilterChains); err != nil {
log.Warna("buildSidecarOutboundListeners: ", err.Error())
continue
}
// TODO(rshriram) merge multiple identical filter chains with just a single destination CIDR based
// filter chain matche, into a single filter chain and array of destinationcidr matches
// We checked TCP over HTTP, and HTTP over TCP conflicts above.
// The code below checks for TCP over TCP conflicts and merges listeners
if currentListenerEntry != nil {
// merge the newly built listener with the existing listener
// if and only if the filter chains have distinct conditions
// Extract the current filter chain matches
// For every new filter chain match being added, check if any previous match is same
// if so, skip adding this filter chain with a warning
// This is very unoptimized.
newFilterChains := make([]listener.FilterChain, 0,
len(currentListenerEntry.listener.FilterChains)+len(mutable.Listener.FilterChains))
newFilterChains = append(newFilterChains, currentListenerEntry.listener.FilterChains...)
for _, incomingFilterChain := range mutable.Listener.FilterChains {
conflictFound := false
compareWithExisting:
for _, existingFilterChain := range currentListenerEntry.listener.FilterChains {
if existingFilterChain.FilterChainMatch == nil {
// This is a catch all filter chain.
// We can only merge with a non-catch all filter chain
// Else mark it as conflict
if incomingFilterChain.FilterChainMatch == nil {
conflictFound = true
outboundListenerConflict{
metric: model.ProxyStatusConflictOutboundListenerTCPOverTCP,
env: env,
node: node,
listenerName: listenerMapKey,
currentServices: currentListenerEntry.services,
currentProtocol: currentListenerEntry.servicePort.Protocol,
newHostname: service.Hostname,
newProtocol: servicePort.Protocol,
}.addMetric(push)
break compareWithExisting
} else {
continue
}
}
if incomingFilterChain.FilterChainMatch == nil {
continue
}
// We have two non-catch all filter chains. Check for duplicates
if reflect.DeepEqual(*existingFilterChain.FilterChainMatch, *incomingFilterChain.FilterChainMatch) {
conflictFound = true
outboundListenerConflict{
metric: model.ProxyStatusConflictOutboundListenerTCPOverTCP,
env: env,
node: node,
listenerName: listenerMapKey,
currentServices: currentListenerEntry.services,
currentProtocol: currentListenerEntry.servicePort.Protocol,
newHostname: service.Hostname,
newProtocol: servicePort.Protocol,
}.addMetric(push)
break compareWithExisting
}
}
if !conflictFound {
// There is no conflict with any filter chain in the existing listener.
// So append the new filter chains to the existing listener's filter chains
newFilterChains = append(newFilterChains, incomingFilterChain)
lEntry := listenerMap[listenerMapKey]
lEntry.services = append(lEntry.services, service)
}
}
currentListenerEntry.listener.FilterChains = newFilterChains
} else {
listenerMap[listenerMapKey] = &listenerEntry{
services: []*model.Service{service},
servicePort: servicePort,
listener: mutable.Listener,
}
}
if log.DebugEnabled() && len(mutable.Listener.FilterChains) > 1 || currentListenerEntry != nil {
var numChains int
if currentListenerEntry != nil {
numChains = len(currentListenerEntry.listener.FilterChains)
} else {
numChains = len(mutable.Listener.FilterChains)
}
log.Debugf("buildSidecarOutboundListeners: multiple filter chain listener %s with %d chains", mutable.Listener.Name, numChains)
}
}
}
for name, l := range listenerMap {
if err := l.listener.Validate(); err != nil {
log.Warnf("buildSidecarOutboundListeners: error validating listener %s (type %v): %v", name, l.servicePort.Protocol, err)
invalidOutboundListeners.Add(1)
continue
}
if l.servicePort.Protocol.IsTCP() {
tcpListeners = append(tcpListeners, l.listener)
} else {
httpListeners = append(httpListeners, l.listener)
}
}
return append(tcpListeners, httpListeners...)
}
// buildSidecarInboundMgmtListeners creates inbound TCP only listeners for the management ports on
// server (inbound). Management port listeners are slightly different from standard Inbound listeners
// in that, they do not have mixer filters nor do they have inbound auth.
// N.B. If a given management port is same as the service instance's endpoint port
// the pod will fail to start in Kubernetes, because the mixer service tries to
// lookup the service associated with the Pod. Since the pod is yet to be started
// and hence not bound to the service), the service lookup fails causing the mixer
// to fail the health check call. This results in a vicious cycle, where kubernetes
// restarts the unhealthy pod after successive failed health checks, and the mixer
// continues to reject the health checks as there is no service associated with
// the pod.
// So, if a user wants to use kubernetes probes with Istio, she should ensure
// that the health check ports are distinct from the service ports.
func buildSidecarInboundMgmtListeners(env *model.Environment, managementPorts model.PortList, managementIP string) []*xdsapi.Listener {
listeners := make([]*xdsapi.Listener, 0, len(managementPorts))
if managementIP == "" {
managementIP = "127.0.0.1"
}
// assumes that inbound connections/requests are sent to the endpoint address
for _, mPort := range managementPorts {
switch mPort.Protocol {
case model.ProtocolHTTP, model.ProtocolHTTP2, model.ProtocolGRPC, model.ProtocolTCP,
model.ProtocolHTTPS, model.ProtocolTLS, model.ProtocolMongo, model.ProtocolRedis:
instance := &model.ServiceInstance{
Endpoint: model.NetworkEndpoint{
Address: managementIP,
Port: mPort.Port,
ServicePort: mPort,
},
Service: &model.Service{
Hostname: ManagementClusterHostname,
},
}
listenerOpts := buildListenerOpts{
ip: managementIP,
port: mPort.Port,
protocol: model.ProtocolTCP,
filterChainOpts: []*filterChainOpts{{
networkFilters: buildInboundNetworkFilters(env, instance),
}},
}
l := buildListener(listenerOpts)
// TODO: should we call plugins for the admin port listeners too? We do everywhere else we construct listeners.
if err := marshalFilters(l, listenerOpts, []plugin.FilterChain{{}}); err != nil {
log.Warna("buildSidecarInboundMgmtListeners ", err.Error())
} else {
listeners = append(listeners, l)
}
default:
log.Warnf("Unsupported inbound protocol %v for management port %#v",
mPort.Protocol, mPort)
}
}
return listeners
}
// httpListenerOpts are options for an HTTP listener
type httpListenerOpts struct {
//nolint: maligned
routeConfig *xdsapi.RouteConfiguration
rds string
useRemoteAddress bool
direction http_conn.HttpConnectionManager_Tracing_OperationName
// If set, use this as a basis
connectionManager *http_conn.HttpConnectionManager
// stat prefix for the http connection manager
// DO not set this field. Will be overridden by marshalFilters
statPrefix string
}
// filterChainOpts describes a filter chain: a set of filters with the same TLS context
type filterChainOpts struct {
sniHosts []string
destinationCIDRs []string
tlsContext *auth.DownstreamTlsContext
httpOpts *httpListenerOpts
match *listener.FilterChainMatch
listenerFilters []listener.ListenerFilter
networkFilters []listener.Filter
}
// buildListenerOpts are the options required to build a Listener
type buildListenerOpts struct {
// nolint: maligned
env *model.Environment
proxy *model.Proxy
proxyInstances []*model.ServiceInstance
ip string
port int
protocol model.Protocol
bindToPort bool
filterChainOpts []*filterChainOpts
}
func buildHTTPConnectionManager(env *model.Environment, node *model.Proxy, httpOpts *httpListenerOpts,
httpFilters []*http_conn.HttpFilter) *http_conn.HttpConnectionManager {
filters := append(httpFilters,
&http_conn.HttpFilter{Name: xdsutil.CORS},
&http_conn.HttpFilter{Name: xdsutil.Fault},
&http_conn.HttpFilter{Name: xdsutil.Router},
)
if httpOpts.connectionManager == nil {
httpOpts.connectionManager = &http_conn.HttpConnectionManager{}
}
connectionManager := httpOpts.connectionManager
connectionManager.CodecType = http_conn.AUTO
connectionManager.AccessLog = []*accesslog.AccessLog{}
connectionManager.HttpFilters = filters
connectionManager.StatPrefix = httpOpts.statPrefix
connectionManager.UseRemoteAddress = &google_protobuf.BoolValue{Value: httpOpts.useRemoteAddress}
if util.Is1xProxy(node) {
// Allow websocket upgrades
websocketUpgrade := &http_conn.HttpConnectionManager_UpgradeConfig{UpgradeType: "websocket"}
connectionManager.UpgradeConfigs = []*http_conn.HttpConnectionManager_UpgradeConfig{websocketUpgrade}
notimeout := 0 * time.Second
// Setting IdleTimeout to 0 seems to break most tests, causing
// envoy to disconnect.
// connectionManager.IdleTimeout = ¬imeout
connectionManager.StreamIdleTimeout = ¬imeout
}
if httpOpts.rds != "" {
rds := &http_conn.HttpConnectionManager_Rds{
Rds: &http_conn.Rds{
ConfigSource: core.ConfigSource{
ConfigSourceSpecifier: &core.ConfigSource_Ads{
Ads: &core.AggregatedConfigSource{},
},
},
RouteConfigName: httpOpts.rds,
},
}
connectionManager.RouteSpecifier = rds
} else {
connectionManager.RouteSpecifier = &http_conn.HttpConnectionManager_RouteConfig{RouteConfig: httpOpts.routeConfig}
}
if env.Mesh.AccessLogFile != "" {
fl := &fileaccesslog.FileAccessLog{
Path: env.Mesh.AccessLogFile,
Format: EnvoyHTTPLogFormat,
}
connectionManager.AccessLog = []*accesslog.AccessLog{
{
Config: util.MessageToStruct(fl),
Name: xdsutil.FileAccessLog,
},
}
}
if env.Mesh.EnableTracing {
tc := model.GetTraceConfig()
connectionManager.Tracing = &http_conn.HttpConnectionManager_Tracing{
OperationName: httpOpts.direction,
ClientSampling: &envoy_type.Percent{
Value: tc.ClientSampling,
},
RandomSampling: &envoy_type.Percent{
Value: tc.RandomSampling,
},
OverallSampling: &envoy_type.Percent{
Value: tc.OverallSampling,
},
}
connectionManager.GenerateRequestId = &google_protobuf.BoolValue{Value: true}
}
if verboseDebug {
connectionManagerJSON, _ := json.MarshalIndent(connectionManager, " ", " ")
log.Infof("LDS: %s \n", string(connectionManagerJSON))
}
return connectionManager
}
// buildListener builds and initializes a Listener proto based on the provided opts. It does not set any filters.
func buildListener(opts buildListenerOpts) *xdsapi.Listener {
filterChains := make([]listener.FilterChain, 0, len(opts.filterChainOpts))
// TODO(incfly): consider changing this to map to handle duplicated listener filters from different chains?
var listenerFilters []listener.ListenerFilter
// add a TLS inspector if we need to detect ServerName or ALPN
needTLSInspector := false
for _, chain := range opts.filterChainOpts {
needsALPN := chain.tlsContext != nil && chain.tlsContext.CommonTlsContext != nil && len(chain.tlsContext.CommonTlsContext.AlpnProtocols) > 0
if len(chain.sniHosts) > 0 || needsALPN {
needTLSInspector = true
break
}
}
if needTLSInspector {
listenerFilters = append(listenerFilters, listener.ListenerFilter{Name: envoyListenerTLSInspector})
}
for _, chain := range opts.filterChainOpts {
listenerFilters = append(listenerFilters, chain.listenerFilters...)
match := &listener.FilterChainMatch{}
needMatch := false
if chain.match != nil {
needMatch = true
match = chain.match
}
if len(chain.sniHosts) > 0 {
sort.Strings(chain.sniHosts)
fullWildcardFound := false
for _, h := range chain.sniHosts {
if h == "*" {
fullWildcardFound = true
// If we have a host with *, it effectively means match anything, i.e.
// no SNI based matching for this host.
break
}
}
if !fullWildcardFound {
match.ServerNames = chain.sniHosts
}
}
if len(chain.destinationCIDRs) > 0 {
sort.Strings(chain.destinationCIDRs)
for _, d := range chain.destinationCIDRs {
if len(d) == 0 {
continue
}
cidr := util.ConvertAddressToCidr(d)
if cidr != nil && cidr.AddressPrefix != model.UnspecifiedIP {
match.PrefixRanges = append(match.PrefixRanges, cidr)
}
}
}
if !needMatch && reflect.DeepEqual(*match, listener.FilterChainMatch{}) {
match = nil
}
filterChains = append(filterChains, listener.FilterChain{
FilterChainMatch: match,
TlsContext: chain.tlsContext,
})
}
var deprecatedV1 *xdsapi.Listener_DeprecatedV1
if !opts.bindToPort {
deprecatedV1 = &xdsapi.Listener_DeprecatedV1{
BindToPort: boolFalse,
}
}
return &xdsapi.Listener{
Name: fmt.Sprintf("%s_%d", opts.ip, opts.port),
Address: util.BuildAddress(opts.ip, uint32(opts.port)),
ListenerFilters: listenerFilters,
FilterChains: filterChains,
DeprecatedV1: deprecatedV1,
}
}
// marshalFilters adds the provided TCP and HTTP filters to the provided Listener and serializes them.
//
// TODO: should we change this from []plugins.FilterChains to [][]listener.Filter, [][]*http_conn.HttpFilter?
// TODO: given how tightly tied listener.FilterChains, opts.filterChainOpts, and mutable.FilterChains are to eachother
// we should encapsulate them some way to ensure they remain consistent (mainly that in each an index refers to the same
// chain)
func marshalFilters(l *xdsapi.Listener, opts buildListenerOpts, chains []plugin.FilterChain) error {
if len(opts.filterChainOpts) == 0 {
return fmt.Errorf("must have more than 0 chains in listener: %#v", l)
}
for i, chain := range chains {
opt := opts.filterChainOpts[i]
if len(chain.TCP) > 0 {
l.FilterChains[i].Filters = append(l.FilterChains[i].Filters, chain.TCP...)
}
if len(opt.networkFilters) > 0 {
l.FilterChains[i].Filters = append(l.FilterChains[i].Filters, opt.networkFilters...)
}
if log.DebugEnabled() {
log.Debugf("attached %d network filters to listener %q filter chain %d", len(chain.TCP)+len(opt.networkFilters), l.Name, i)
}
if opt.httpOpts != nil {
opt.httpOpts.statPrefix = l.Name
connectionManager := buildHTTPConnectionManager(opts.env, opts.proxy, opt.httpOpts, chain.HTTP)
l.FilterChains[i].Filters = append(l.FilterChains[i].Filters, listener.Filter{
Name: xdsutil.HTTPConnectionManager,
Config: util.MessageToStruct(connectionManager),
})
log.Debugf("attached HTTP filter with %d http_filter options to listener %q filter chain %d", 1+len(chain.HTTP), l.Name, i)
}
}
return nil
}
| [
"\"PILOT_DUMP_ALPHA3\""
]
| []
| [
"PILOT_DUMP_ALPHA3"
]
| [] | ["PILOT_DUMP_ALPHA3"] | go | 1 | 0 | |
office/v1/examples/call_queue_by_name/main.go | package main
import (
"context"
"fmt"
"log"
"net/http"
"os"
"github.com/antihax/optional"
"github.com/grokify/goauth/credentials"
"github.com/grokify/mogo/config"
"github.com/grokify/mogo/fmt/fmtutil"
"github.com/grokify/mogo/net/httputilmore"
"github.com/grokify/mogo/net/urlutil"
"github.com/jessevdk/go-flags"
rc "github.com/grokify/go-ringcentral-client/office/v1/client"
ru "github.com/grokify/go-ringcentral-client/office/v1/util"
)
type CliOptions struct {
EnvFile string `short:"e" long:"env" description:"Env filepath"`
To []string `short:"t" long:"to" description:"Recipients"`
Files []string `short:"f" long:"file" description:"Files to send"`
CoverPageText string `short:"c" long:"coverpagetext" description:"Cover Page Text"`
}
func sendFaxRaw(opts CliOptions, httpClient *http.Client) {
fax := ru.FaxRequest{
To: opts.To,
CoverPageText: opts.CoverPageText,
Resolution: "High",
FilePaths: opts.Files,
}
url := urlutil.JoinAbsolute(os.Getenv("RINGCENTRAL_SERVER_URL"), "/restapi/v1.0/account/~/extension/~/fax")
resp, err := fax.Post(httpClient, url)
if err != nil {
panic(err)
}
err = httputilmore.PrintResponse(resp, true)
if err != nil {
panic(err)
}
}
// example: $ go run fax_send.go -to=+16505550100 -file=$GOPATH/src/github.com/grokify/go-ringcentral-client/office/v1/examples/fax_send/test_file.pdf
func main() {
opts := CliOptions{}
_, err := flags.Parse(&opts)
if err != nil {
log.Fatal(err)
}
err = config.LoadDotEnvFirst(opts.EnvFile, os.Getenv("ENV_PATH"), "./.env")
if err != nil {
log.Fatal(err)
}
fmtutil.PrintJSON(opts)
/*
rcCfg, err := NewRingCentralConfigEnv()
if err != nil {
log.Fatal(err)
}
fmtutil.PrintJSON(rcCfg)
*/
apiClient, err := ru.NewApiClientPassword(
credentials.NewOAuth2CredentialsEnv("RINGCENTRAL_"))
if err != nil {
log.Fatal(err)
}
httpClient := apiClient.HTTPClient()
if 1 == 0 {
sendFaxRaw(opts, httpClient)
}
if 1 == 1 {
fmt.Println(opts.Files[0])
file, err := os.Open(opts.Files[0])
if err != nil {
log.Fatal(err)
}
params := rc.SendFaxMessageOpts{}
if len(opts.CoverPageText) > 0 {
//params.FaxResolution = optional.NewString("High")
params.CoverPageText = optional.NewString(opts.CoverPageText)
}
fmtutil.PrintJSON(opts)
if 1 == 1 {
params.Attachment = optional.NewInterface(file)
}
info, resp, err := apiClient.MessagesApi.SendFaxMessage(
context.Background(),
"~",
"~",
opts.To,
¶ms)
if err != nil {
panic(err)
} else if resp.StatusCode > 299 {
panic(fmt.Errorf("API Status %v", resp.StatusCode))
}
fmtutil.PrintJSON(info)
}
fmt.Println("DONE")
}
| [
"\"RINGCENTRAL_SERVER_URL\"",
"\"ENV_PATH\""
]
| []
| [
"ENV_PATH",
"RINGCENTRAL_SERVER_URL"
]
| [] | ["ENV_PATH", "RINGCENTRAL_SERVER_URL"] | go | 2 | 0 | |
plugin/src/main/java/org/wildfly/plugins/bootablejar/maven/common/Utils.java | /*
* Copyright 2016-2019 Red Hat, Inc. and/or its affiliates
* and other contributors as indicated by the @author tags.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.wildfly.plugins.bootablejar.maven.common;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
import java.util.ArrayList;
import java.util.List;
import java.util.Locale;
import org.apache.maven.plugin.MojoExecutionException;
import org.apache.maven.project.MavenProject;
import org.jboss.as.controller.client.ModelControllerClient;
import org.wildfly.plugin.core.ServerHelper;
import org.wildfly.plugins.bootablejar.maven.goals.BuildBootableJarMojo;
/**
*
* @author jdenise
*/
public class Utils {
public static String getBootableJarPath(MavenProject project, String goal) throws MojoExecutionException {
String finalName = project.getBuild().getFinalName();
String jarName = finalName + "-" + BuildBootableJarMojo.BOOTABLE_SUFFIX + "." + BuildBootableJarMojo.JAR;
String path = project.getBuild().getDirectory() + File.separator + jarName;
if (!Files.exists(Paths.get(path))) {
throw new MojoExecutionException("Cannot " + goal + " without a bootable jar; please `mvn wildfly-jar:package` prior to invoking wildfly-jar:run from the command-line");
}
return path;
}
public static void startBootableJar(String jarPath, List<String> jvmArguments,
List<String> arguments, boolean waitFor,
boolean checkStart,
ModelControllerClient client, long timeout) throws MojoExecutionException {
List<String> cmd = new ArrayList<>();
cmd.add(getJava());
cmd.addAll(jvmArguments);
cmd.add("-jar");
cmd.add(jarPath);
cmd.addAll(arguments);
ProcessBuilder builder = new ProcessBuilder(cmd).inheritIO();
try {
Process p = builder.start();
if (waitFor) {
p.waitFor();
} else {
if (checkStart) {
checkStarted(client, timeout);
}
}
} catch (Exception ex) {
throw new MojoExecutionException(ex.getLocalizedMessage(), ex);
}
}
private static void checkStarted(ModelControllerClient client, long timeout) throws Exception {
ServerHelper.waitForStandalone(null, client, timeout);
}
private static String getJava() {
String exe = "java";
if (isWindows()) {
exe = "java.exe";
}
String javaHome = System.getenv("JAVA_HOME");
if (javaHome == null) {
return exe;
} else {
return javaHome + File.separator + "bin" + File.separator + exe;
}
}
private static boolean isWindows() {
return System.getProperty("os.name", null).toLowerCase(Locale.ENGLISH).contains("windows");
}
}
| [
"\"JAVA_HOME\""
]
| []
| [
"JAVA_HOME"
]
| [] | ["JAVA_HOME"] | java | 1 | 0 | |
cloudgenetics/s3uploader.go | package cloudgenetics
import (
"os"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
"log"
"time"
"github.com/gin-gonic/gin"
"github.com/google/uuid"
)
type FileUpload struct {
// Unique ID
Name string `json:"name, omitempty"`
// Title of project
Type string `json:"mime, omitempty"`
// UUID
Uid string `json:"uuid, omitempty"`
}
func presignedUrl(c *gin.Context) (string, string) {
var file FileUpload
c.BindJSON(&file)
// Initialize a session the SDK will use credentials in
// ~/.aws/credentials.
awsregion := os.Getenv("AWS_REGION")
sess, err := session.NewSession(&aws.Config{
Region: aws.String(awsregion)},
)
// Create S3 service client
svc := s3.New(sess)
bucket := os.Getenv("AWS_S3_BUCKET")
// Set UUID if not found in the request
datasetid := uuid.New().String()
if IsValidUUID(file.Uid) {
datasetid = file.Uid
}
filename := datasetid + "/" + file.Name
req, _ := svc.PutObjectRequest(&s3.PutObjectInput{
Bucket: aws.String(bucket),
Key: aws.String(filename),
})
url, err := req.Presign(15 * time.Minute)
if err != nil {
log.Println("Failed to sign request", err)
}
return datasetid, url
}
func IsValidUUID(u string) bool {
_, err := uuid.Parse(u)
return err == nil
}
| [
"\"AWS_REGION\"",
"\"AWS_S3_BUCKET\""
]
| []
| [
"AWS_S3_BUCKET",
"AWS_REGION"
]
| [] | ["AWS_S3_BUCKET", "AWS_REGION"] | go | 2 | 0 | |
pkg/allocateip/allocateip.go | // Copyright (c) 2018-2021 Tigera, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package allocateip
import (
"context"
"fmt"
gnet "net"
"os"
"reflect"
"time"
log "github.com/sirupsen/logrus"
api "github.com/projectcalico/api/pkg/apis/projectcalico/v3"
"github.com/projectcalico/libcalico-go/lib/apiconfig"
libapi "github.com/projectcalico/libcalico-go/lib/apis/v3"
bapi "github.com/projectcalico/libcalico-go/lib/backend/api"
"github.com/projectcalico/libcalico-go/lib/backend/model"
"github.com/projectcalico/libcalico-go/lib/backend/syncersv1/tunnelipsyncer"
client "github.com/projectcalico/libcalico-go/lib/clientv3"
cerrors "github.com/projectcalico/libcalico-go/lib/errors"
"github.com/projectcalico/libcalico-go/lib/ipam"
"github.com/projectcalico/libcalico-go/lib/net"
"github.com/projectcalico/libcalico-go/lib/options"
"github.com/projectcalico/typha/pkg/syncclientutils"
"github.com/projectcalico/typha/pkg/syncproto"
"github.com/projectcalico/node/buildinfo"
"github.com/projectcalico/node/pkg/calicoclient"
)
// This file contains the main processing and common logic for assigning tunnel addresses,
// used by calico/node to set the host's tunnel address if IPIP or VXLAN is enabled on the pool or
// wireguard is enabled on the node.
//
// It will assign an address if there are any available, and remove any tunnel addresses
// that are configured and should no longer be.
// Run runs the tunnel ip allocator. If done is nil, it runs in single-shot mode. If non-nil, it runs in daemon mode
// performing a reconciliation when IP pool or node configuration changes that may impact the allocations.
func Run(done <-chan struct{}) {
// This binary is only ever invoked _after_ the
// startup binary has been invoked and the modified environments have
// been sourced. Therefore, the NODENAME environment will always be
// set at this point.
nodename := os.Getenv("NODENAME")
if nodename == "" {
log.Panic("NODENAME environment is not set")
}
// Load the client config from environment.
cfg, c := calicoclient.CreateClient()
run(nodename, cfg, c, done)
}
func run(nodename string, cfg *apiconfig.CalicoAPIConfig, c client.Interface, done <-chan struct{}) {
// If configured to use host-local IPAM, there is no need to configure tunnel addresses as they use the
// first IP of the pod CIDR - this is handled in the k8s backend code in libcalico-go.
if cfg.Spec.K8sUsePodCIDR {
log.Debug("Using host-local IPAM, no need to allocate a tunnel IP")
if done != nil {
// If a done channel is specified, only exit when this is closed.
<-done
}
return
}
if done == nil {
// Running in single shot mode, so assign addresses and exit.
reconcileTunnelAddrs(nodename, cfg, c)
return
}
// This is running as a daemon. Create a long-running reconciler.
r := &reconciler{
nodename: nodename,
cfg: cfg,
client: c,
ch: make(chan struct{}),
data: make(map[string]interface{}),
}
// Either create a typha syncclient or a local syncer depending on configuration. This calls back into the
// reconciler to trigger updates when necessary.
// Read Typha settings from the environment.
// When Typha is in use, there will already be variables prefixed with FELIX_, so it's
// convenient if we honor those as well as the CALICO variables.
typhaConfig := syncclientutils.ReadTyphaConfig([]string{"FELIX_", "CALICO_"})
if syncclientutils.MustStartSyncerClientIfTyphaConfigured(
&typhaConfig, syncproto.SyncerTypeTunnelIPAllocation,
buildinfo.GitVersion, nodename, fmt.Sprintf("tunnel-ip-allocation %s", buildinfo.GitVersion),
r,
) {
log.Debug("Using typha syncclient")
} else {
// Use the syncer locally.
log.Debug("Using local syncer")
syncer := tunnelipsyncer.New(c.(backendClientAccessor).Backend(), r, nodename)
syncer.Start()
}
// Run the reconciler.
r.run(done)
}
// reconciler watches IPPool and Node configuration and triggers a reconciliation of the Tunnel IP addresses whenever
// it spots a configuration change that may impact IP selection.
type reconciler struct {
nodename string
cfg *apiconfig.CalicoAPIConfig
client client.Interface
ch chan struct{}
data map[string]interface{}
inSync bool
}
// run is the main reconciliation loop, it loops until done.
func (r reconciler) run(done <-chan struct{}) {
// Loop forever, updating whenever we get a kick. The first kick will happen as soon as the syncer is in sync.
for {
select {
case <-r.ch:
// Received an update that requires reconciliation. If the reconciliation fails it will cause the daemon
// to exit this is fine - it will be restarted, and the syncer will trigger a reconciliation when in-sync
// again.
reconcileTunnelAddrs(r.nodename, r.cfg, r.client)
case <-done:
return
}
}
}
// OnStatusUpdated handles the syncer status callback method.
func (r *reconciler) OnStatusUpdated(status bapi.SyncStatus) {
if status == bapi.InSync {
// We are in-sync, trigger an initial scan/update of the IP addresses.
r.inSync = true
r.ch <- struct{}{}
}
}
// OnUpdates handles the syncer resource updates.
func (r *reconciler) OnUpdates(updates []bapi.Update) {
var updated bool
for _, u := range updates {
switch u.UpdateType {
case bapi.UpdateTypeKVDeleted:
// Resource is deleted. If this resource is in our cache then trigger an update.
if _, ok := r.data[u.Key.String()]; ok {
updated = true
}
delete(r.data, u.Key.String())
case bapi.UpdateTypeKVNew, bapi.UpdateTypeKVUpdated:
// Resource is created or updated. Depending on the resource, we extract and cache the relevant data that
// we are monitoring. If the data has changed then trigger an update.
var data interface{}
switch v := u.Value.(type) {
case *model.IPPool:
// For pools just track the whole data.
log.Debugf("Updated pool resource: %s", u.Key)
data = v
case *libapi.Node:
// For nodes, we only care about our own node, *and* we only care about the wireguard public key.
if v.Name != r.nodename {
continue
}
log.Debugf("Updated node resource: %s", u.Key)
data = v.Status.WireguardPublicKey
default:
// We got an update for an unexpected resource type. Rather than ignore, just treat as updated so that
// we reconcile the addresses.
log.Warningf("Unexpected resource update: %s", u.Key)
updated = true
continue
}
if existing, ok := r.data[u.Key.String()]; !ok || !reflect.DeepEqual(existing, data) {
// Entry is new or associated data is modified. In either case update the data and flag as updated.
log.Debug("Stored data has been modified - trigger reconciliation")
updated = true
r.data[u.Key.String()] = data
}
}
}
if updated && r.inSync {
// We have updated data. Trigger a reconciliation, but don't block if there is already an update pending.
select {
case r.ch <- struct{}{}:
default:
}
}
}
// reconcileTunnelAddrs performs a single shot update of the tunnel IP allocations.
func reconcileTunnelAddrs(nodename string, cfg *apiconfig.CalicoAPIConfig, c client.Interface) {
ctx := context.Background()
// Get node resource for given nodename.
node, err := c.Nodes().Get(ctx, nodename, options.GetOptions{})
if err != nil {
log.WithError(err).Fatalf("failed to fetch node resource '%s'", nodename)
}
// Get list of ip pools
ipPoolList, err := c.IPPools().List(ctx, options.ListOptions{})
if err != nil {
log.WithError(err).Fatal("Unable to query IP pool configuration")
}
// If wireguard is enabled then allocate an IP for the wireguard device. We do this for all deployment types even
// when pod CIDRs are not managed by Calico.
if cidrs := determineEnabledPoolCIDRs(*node, *ipPoolList, ipam.AttributeTypeWireguard); len(cidrs) > 0 {
ensureHostTunnelAddress(ctx, c, nodename, cidrs, ipam.AttributeTypeWireguard)
} else {
removeHostTunnelAddr(ctx, c, nodename, ipam.AttributeTypeWireguard)
}
// Query the IPIP enabled pools and either configure the tunnel
// address, or remove it.
if cidrs := determineEnabledPoolCIDRs(*node, *ipPoolList, ipam.AttributeTypeIPIP); len(cidrs) > 0 {
ensureHostTunnelAddress(ctx, c, nodename, cidrs, ipam.AttributeTypeIPIP)
} else {
removeHostTunnelAddr(ctx, c, nodename, ipam.AttributeTypeIPIP)
}
// Query the VXLAN enabled pools and either configure the tunnel
// address, or remove it.
if cidrs := determineEnabledPoolCIDRs(*node, *ipPoolList, ipam.AttributeTypeVXLAN); len(cidrs) > 0 {
ensureHostTunnelAddress(ctx, c, nodename, cidrs, ipam.AttributeTypeVXLAN)
} else {
removeHostTunnelAddr(ctx, c, nodename, ipam.AttributeTypeVXLAN)
}
}
func ensureHostTunnelAddress(ctx context.Context, c client.Interface, nodename string, cidrs []net.IPNet, attrType string) {
logCtx := getLogger(attrType)
logCtx.WithField("Node", nodename).Debug("Ensure tunnel address is set")
// Get the currently configured address.
node, err := c.Nodes().Get(ctx, nodename, options.GetOptions{})
if err != nil {
logCtx.WithError(err).Fatalf("Unable to retrieve tunnel address. Error getting node '%s'", nodename)
}
// Get the address and ipam attribute string
var addr string
switch attrType {
case ipam.AttributeTypeVXLAN:
addr = node.Spec.IPv4VXLANTunnelAddr
case ipam.AttributeTypeIPIP:
if node.Spec.BGP != nil {
addr = node.Spec.BGP.IPv4IPIPTunnelAddr
}
case ipam.AttributeTypeWireguard:
if node.Spec.Wireguard != nil {
addr = node.Spec.Wireguard.InterfaceIPv4Address
}
}
// Work out if we need to assign a tunnel address.
// In most cases we should not release current address and should assign new one.
release := false
assign := true
if addr == "" {
// The tunnel has no IP address assigned, assign one.
logCtx.Info("Assign a new tunnel address")
// Defensively release any IP addresses with this handle. This covers a theoretical case
// where the node object has lost its reference to its IP, but the allocation still exists
// in IPAM. For example, if the node object was manually edited.
release = true
} else {
// Go ahead checking status of current address.
ipAddr := gnet.ParseIP(addr)
if ipAddr == nil {
logCtx.WithError(err).Fatalf("Failed to parse the CIDR '%s'", addr)
}
// Check if we got correct assignment attributes.
attr, handle, err := c.IPAM().GetAssignmentAttributes(ctx, net.IP{IP: ipAddr})
if err == nil {
if attr[ipam.AttributeType] == attrType && attr[ipam.AttributeNode] == nodename {
// The tunnel address is still assigned to this node, but is it in the correct pool this time?
if !isIpInPool(addr, cidrs) {
// Wrong pool, release this address.
logCtx.WithField("currentAddr", addr).Info("Current address is not in a valid pool, release it and reassign")
release = true
} else {
// Correct pool, keep this address.
logCtx.WithField("currentAddr", addr).Info("Current address is still valid, do nothing")
assign = false
}
} else if len(attr) == 0 {
// No attributes means that this is an old address, assigned by code that didn't use
// allocation attributes. It might be a pod address, or it might be a node tunnel
// address. The only way to tell is by the existence of a handle, since workload
// addresses have always used a handle, whereas tunnel addresses didn't start
// using handles until the same time as they got allocation attributes.
if handle != nil {
// Handle exists, so this address belongs to a workload. We need to assign
// a new one for the node, but we shouldn't clean up the old address.
logCtx.WithField("currentAddr", addr).Info("Current address is occupied, assign a new one")
} else {
// Handle does not exist. This is just an old tunnel address that comes from
// a time before we used handles and allocation attributes. Attempt to
// reassign the same address, but now with metadata. It's possible that someone
// else takes the address while we do this, in which case we'll just
// need to assign a new address.
if err := correctAllocationWithHandle(ctx, c, addr, nodename, attrType); err != nil {
if _, ok := err.(cerrors.ErrorResourceAlreadyExists); !ok {
// Unknown error attempting to allocate the address. Exit.
logCtx.WithError(err).Fatal("Error correcting tunnel IP allocation")
}
// The address was taken by someone else. We need to assign a new one.
logCtx.WithError(err).Warn("Failed to correct missing attributes, will assign a new address")
} else {
// We corrected the address, we can just return.
logCtx.Info("Updated tunnel address with allocation attributes")
return
}
}
} else {
// The allocation has attributes, but doesn't belong to us. Assign a new one.
logCtx.WithField("currentAddr", addr).Info("Current address is occupied, assign a new one")
}
} else if _, ok := err.(cerrors.ErrorResourceDoesNotExist); ok {
// The tunnel address is not assigned, reassign it.
logCtx.WithField("currentAddr", addr).Info("Current address is not assigned, assign a new one")
// Defensively release any IP addresses with this handle. This covers a theoretical case
// where the node object has lost its reference to its correct IP, but the allocation still exists
// in IPAM. For example, if the node object was manually edited.
release = true
} else {
// Failed to get assignment attributes, datastore connection issues possible, panic
logCtx.WithError(err).Panicf("Failed to get assignment attributes for CIDR '%s'", addr)
}
}
if release {
logCtx.WithField("IP", addr).Info("Release any old tunnel addresses")
handle, _ := generateHandleAndAttributes(nodename, attrType)
if err := c.IPAM().ReleaseByHandle(ctx, handle); err != nil {
if _, ok := err.(cerrors.ErrorResourceDoesNotExist); !ok {
logCtx.WithError(err).Fatal("Failed to release old addresses")
}
// No existing allocations for this node.
}
}
if assign {
logCtx.WithField("IP", addr).Info("Assign new tunnel address")
assignHostTunnelAddr(ctx, c, nodename, cidrs, attrType)
}
}
func correctAllocationWithHandle(ctx context.Context, c client.Interface, addr, nodename string, attrType string) error {
ipAddr := net.ParseIP(addr)
if ipAddr == nil {
log.Fatalf("Failed to parse node tunnel address '%s'", addr)
}
// Release the old allocation.
ipsToRelease := []net.IP{*ipAddr}
_, err := c.IPAM().ReleaseIPs(ctx, ipsToRelease)
if err != nil {
// If we fail to release the old allocation, we shouldn't continue any further. Just exit.
log.WithField("IP", ipAddr.String()).WithError(err).Fatal("Error releasing address")
}
// Attempt to re-assign the same address, but with a handle this time.
handle, attrs := generateHandleAndAttributes(nodename, attrType)
args := ipam.AssignIPArgs{
IP: *ipAddr,
HandleID: &handle,
Attrs: attrs,
Hostname: nodename,
}
// If we fail to allocate the same IP, return an error. We'll just
// have to allocate a new one.
return c.IPAM().AssignIP(ctx, args)
}
func generateHandleAndAttributes(nodename string, attrType string) (string, map[string]string) {
attrs := map[string]string{ipam.AttributeNode: nodename}
var handle string
switch attrType {
case ipam.AttributeTypeVXLAN:
handle = fmt.Sprintf("vxlan-tunnel-addr-%s", nodename)
case ipam.AttributeTypeIPIP:
handle = fmt.Sprintf("ipip-tunnel-addr-%s", nodename)
case ipam.AttributeTypeWireguard:
handle = fmt.Sprintf("wireguard-tunnel-addr-%s", nodename)
}
attrs[ipam.AttributeType] = attrType
return handle, attrs
}
// assignHostTunnelAddr claims an IP address from the first pool
// with some space. Stores the result in the host's config as its tunnel
// address. It will assign a VXLAN address if vxlan is true, otherwise an IPIP address.
func assignHostTunnelAddr(ctx context.Context, c client.Interface, nodename string, cidrs []net.IPNet, attrType string) {
// Build attributes and handle for this allocation.
handle, attrs := generateHandleAndAttributes(nodename, attrType)
logCtx := getLogger(attrType)
args := ipam.AutoAssignArgs{
Num4: 1,
Num6: 0,
HandleID: &handle,
Attrs: attrs,
Hostname: nodename,
IPv4Pools: cidrs,
}
v4Assignments, _, err := c.IPAM().AutoAssign(ctx, args)
if err != nil {
logCtx.WithError(err).Fatal("Unable to autoassign an address")
}
if err := v4Assignments.PartialFulfillmentError(); err != nil {
logCtx.WithError(err).Fatal("Unable to autoassign an address")
}
// Update the node object with the assigned address.
ip := v4Assignments.IPs[0].IP.String()
if err = updateNodeWithAddress(ctx, c, nodename, ip, attrType); err != nil {
// We hit an error, so release the IP address before exiting.
err := c.IPAM().ReleaseByHandle(ctx, handle)
if err != nil {
logCtx.WithError(err).WithField("IP", ip).Errorf("Error releasing IP address on failure")
}
// Log the error and exit with exit code 1.
logCtx.WithError(err).WithField("IP", ip).Fatal("Unable to set tunnel address")
}
logCtx.WithField("IP", ip).Info("Assigned tunnel address to node")
}
func updateNodeWithAddress(ctx context.Context, c client.Interface, nodename string, addr string, attrType string) error {
// If the update fails with ResourceConflict error then retry 5 times with 1 second delay before failing.
for i := 0; i < 5; i++ {
node, err := c.Nodes().Get(ctx, nodename, options.GetOptions{})
if err != nil {
return err
}
switch attrType {
case ipam.AttributeTypeVXLAN:
node.Spec.IPv4VXLANTunnelAddr = addr
case ipam.AttributeTypeIPIP:
if node.Spec.BGP == nil {
node.Spec.BGP = &libapi.NodeBGPSpec{}
}
node.Spec.BGP.IPv4IPIPTunnelAddr = addr
case ipam.AttributeTypeWireguard:
if node.Spec.Wireguard == nil {
node.Spec.Wireguard = &libapi.NodeWireguardSpec{}
}
node.Spec.Wireguard.InterfaceIPv4Address = addr
}
_, err = c.Nodes().Update(ctx, node, options.SetOptions{})
if _, ok := err.(cerrors.ErrorResourceUpdateConflict); ok {
// Wait for a second and try again if there was a conflict during the resource update.
log.WithField("node", node.Name).WithError(err).Info("Error updating node, retrying.")
time.Sleep(1 * time.Second)
continue
}
return nil
}
return fmt.Errorf("Too many retries attempting to update node with tunnel address")
}
// removeHostTunnelAddr removes any existing IP address for this host's
// tunnel device and releases the IP from IPAM. If no IP is assigned this function
// is a no-op.
func removeHostTunnelAddr(ctx context.Context, c client.Interface, nodename string, attrType string) {
var updateError error
logCtx := getLogger(attrType)
// If the update fails with ResourceConflict error then retry 5 times with 1 second delay before failing.
for i := 0; i < 5; i++ {
node, err := c.Nodes().Get(ctx, nodename, options.GetOptions{})
if err != nil {
logCtx.WithError(err).Fatalf("Unable to retrieve tunnel address for cleanup. Error getting node '%s'", nodename)
}
// Find out the currently assigned address and remove it from the node.
var ipAddrStr string
var ipAddr *net.IP
switch attrType {
case ipam.AttributeTypeVXLAN:
ipAddrStr = node.Spec.IPv4VXLANTunnelAddr
node.Spec.IPv4VXLANTunnelAddr = ""
case ipam.AttributeTypeIPIP:
if node.Spec.BGP != nil {
ipAddrStr = node.Spec.BGP.IPv4IPIPTunnelAddr
node.Spec.BGP.IPv4IPIPTunnelAddr = ""
// If removing the tunnel address causes the BGP spec to be empty, then nil it out.
// libcalico asserts that if a BGP spec is present, that it not be empty.
if reflect.DeepEqual(*node.Spec.BGP, libapi.NodeBGPSpec{}) {
logCtx.Debug("BGP spec is now empty, setting to nil")
node.Spec.BGP = nil
}
}
case ipam.AttributeTypeWireguard:
if node.Spec.Wireguard != nil {
ipAddrStr = node.Spec.Wireguard.InterfaceIPv4Address
node.Spec.Wireguard.InterfaceIPv4Address = ""
if reflect.DeepEqual(*node.Spec.Wireguard, libapi.NodeWireguardSpec{}) {
logCtx.Debug("Wireguard spec is now empty, setting to nil")
node.Spec.Wireguard = nil
}
}
}
if ipAddrStr != "" {
ipAddr = net.ParseIP(ipAddrStr)
}
// Release tunnel IP address(es) for the node.
handle, _ := generateHandleAndAttributes(nodename, attrType)
if err := c.IPAM().ReleaseByHandle(ctx, handle); err != nil {
if _, ok := err.(cerrors.ErrorResourceDoesNotExist); !ok {
// Unknown error releasing the address.
logCtx.WithError(err).WithFields(log.Fields{
"IP": ipAddrStr,
"Handle": handle,
}).Fatal("Error releasing address by handle")
}
if ipAddr != nil {
// There are no addresses with this handle. If there is an IP configured on the node, check to see if it
// belongs to us. If it has no handle and no attributes, then we can pretty confidently
// say that it belongs to us rather than a pod and should be cleaned up.
logCtx.WithField("handle", handle).Info("No IPs with handle, release exact IP")
attr, handle, err := c.IPAM().GetAssignmentAttributes(ctx, *ipAddr)
if err != nil {
if _, ok := err.(cerrors.ErrorResourceDoesNotExist); !ok {
logCtx.WithError(err).Fatal("Failed to query attributes")
}
// No allocation exists, we don't have anything to do.
} else if len(attr) == 0 && handle == nil {
// The IP is ours. Release it by passing the exact IP.
if _, err := c.IPAM().ReleaseIPs(ctx, []net.IP{*ipAddr}); err != nil {
logCtx.WithError(err).WithField("IP", ipAddr.String()).Fatal("Error releasing address from IPAM")
}
}
}
}
// Update the node object.
_, updateError = c.Nodes().Update(ctx, node, options.SetOptions{})
if _, ok := updateError.(cerrors.ErrorResourceUpdateConflict); ok {
// Wait for a second and try again if there was a conflict during the resource update.
logCtx.Infof("Error updating node %s: %s. Retrying.", node.Name, err)
time.Sleep(1 * time.Second)
continue
}
break
}
// Check to see if there was still an error after the retry loop,
// and log and exit if there was an error.
if updateError != nil {
// We hit an error, so release the IP address before exiting.
// Log the error and exit with exit code 1.
logCtx.WithError(updateError).Fatal("Unable to remove tunnel address")
}
}
// determineEnabledPools returns all enabled pools. If vxlan is true, then it will only return VXLAN pools. Otherwise
// it will only return IPIP enabled pools.
func determineEnabledPoolCIDRs(node libapi.Node, ipPoolList api.IPPoolList, attrType string) []net.IPNet {
// For wireguard, return no valid pools if the wireguard public key has not been set. Only once wireguard has been
// enabled *and* the wireguard device has been initialized do we require an IP address to be configured.
if attrType == ipam.AttributeTypeWireguard && node.Status.WireguardPublicKey == "" {
log.Debugf("Wireguard is not running on node %s", node.Name)
return nil
}
var cidrs []net.IPNet
for _, ipPool := range ipPoolList.Items {
_, poolCidr, err := net.ParseCIDR(ipPool.Spec.CIDR)
if err != nil {
log.WithError(err).Fatalf("Failed to parse CIDR '%s' for IPPool '%s'", ipPool.Spec.CIDR, ipPool.Name)
}
// Check if IP pool selects the node
if selects, err := ipam.SelectsNode(ipPool, node); err != nil {
log.WithError(err).Errorf("Failed to compare nodeSelector '%s' for IPPool '%s', skipping", ipPool.Spec.NodeSelector, ipPool.Name)
continue
} else if !selects {
log.Debugf("IPPool '%s' does not select Node '%s'", ipPool.Name, node.Name)
continue
}
// Check if desired encap is enabled in the IP pool, the IP pool is not disabled, and it is IPv4 pool since we
// don't support encap with IPv6.
switch attrType {
case ipam.AttributeTypeVXLAN:
if (ipPool.Spec.VXLANMode == api.VXLANModeAlways || ipPool.Spec.VXLANMode == api.VXLANModeCrossSubnet) && !ipPool.Spec.Disabled && poolCidr.Version() == 4 {
cidrs = append(cidrs, *poolCidr)
}
case ipam.AttributeTypeIPIP:
// Check if IPIP is enabled in the IP pool, the IP pool is not disabled, and it is IPv4 pool since we don't support IPIP with IPv6.
if (ipPool.Spec.IPIPMode == api.IPIPModeCrossSubnet || ipPool.Spec.IPIPMode == api.IPIPModeAlways) && !ipPool.Spec.Disabled && poolCidr.Version() == 4 {
cidrs = append(cidrs, *poolCidr)
}
case ipam.AttributeTypeWireguard:
// Wireguard does not require a specific encap configuration on the pool.
if !ipPool.Spec.Disabled && poolCidr.Version() == 4 {
cidrs = append(cidrs, *poolCidr)
}
}
}
return cidrs
}
// isIpInPool returns if the IP address is in one of the supplied pools.
func isIpInPool(ipAddrStr string, cidrs []net.IPNet) bool {
ipAddress := net.ParseIP(ipAddrStr)
for _, cidr := range cidrs {
if cidr.Contains(ipAddress.IP) {
return true
}
}
return false
}
func getLogger(attrType string) *log.Entry {
switch attrType {
case ipam.AttributeTypeVXLAN:
return log.WithField("type", "vxlanTunnelAddress")
case ipam.AttributeTypeIPIP:
return log.WithField("type", "ipipTunnelAddress")
case ipam.AttributeTypeWireguard:
return log.WithField("type", "wireguardTunnelAddress")
}
return nil
}
// backendClientAccessor is an interface to access the backend client from the main v2 client.
type backendClientAccessor interface {
Backend() bapi.Client
}
| [
"\"NODENAME\""
]
| []
| [
"NODENAME"
]
| [] | ["NODENAME"] | go | 1 | 0 | |
main.go | package main
//module github.com/JimbiniBambini/exchanges_data_polling **latest**
import (
"data_polling/exchanges_data_polling/managers/api_manager"
"data_polling/exchanges_data_polling/managers/client_manager"
"data_polling/exchanges_data_polling/pinger"
"log"
"net/http"
"os"
"github.com/gorilla/mux"
)
/* ****************************************** ENV AND GLOBALS ****************************************** */
var GIT_DEV bool
// exchange --> asset_regular --> fiat --> exchange_specific_asset
var assetMapping map[string]map[string]map[string]string
/* ****************************************** TODO ****************************************** */
/*
NEXT Release
- correct the issue with panic, while downloading non existing file
- add download scheme and proper api for exchanges (at least kraken)
- check the option for separate maps for clients and workers (workers can be accessed via client id as a map key) --> better scalability?
- switch to new architecture with separate module for APi to clean the main module
- add option for clients with multiple buckets at the same time
- ADD PROPER CONFIG FOR ASSETS
- ADD CONCAT-FILE ROUTINE AND A BUCKET FOR IT
*/
/* ****************************************** MAIN ****************************************** */
func main() {
log.Println("Dev_Env:", os.Getenv("GIT_DEV") == "true")
GIT_DEV = (os.Getenv("GIT_DEV") == "true")
clientManager := client_manager.NewClientManager()
assetMapping = make(map[string]map[string]map[string]string)
assetMapping["kraken"] = make(map[string]map[string]string)
assetMapping["kraken"]["btc"] = make(map[string]string)
assetMapping["kraken"]["btc"]["usd"] = "XXBTZUSD"
assetMapping["kraken"]["ada"] = make(map[string]string)
assetMapping["kraken"]["ada"]["usd"] = "ADAUSD"
r := mux.NewRouter()
r.HandleFunc("/storj_client_manager", func(w http.ResponseWriter, r *http.Request) {
api_manager.ManageClients(w, r, &clientManager)
})
r.HandleFunc("/storj_bucket_manager", func(w http.ResponseWriter, r *http.Request) {
api_manager.ManageBuckets(w, r, &clientManager, GIT_DEV)
})
r.HandleFunc("/storj_file_manager", func(w http.ResponseWriter, r *http.Request) {
api_manager.ManageFiles(w, r, &clientManager)
})
r.HandleFunc("/storj_worker_manager", func(w http.ResponseWriter, r *http.Request) {
api_manager.ManageWorkers(w, r, &clientManager, assetMapping)
})
r.HandleFunc("/ping_in", func(w http.ResponseWriter, r *http.Request) {
log.Println("incoming message", w)
pinger.IncomingMessageHandler(w, r)
})
if GIT_DEV {
r.HandleFunc("/storj_client_ini_upload", func(w http.ResponseWriter, r *http.Request) {
api_manager.Uploader(w, r, &clientManager)
})
pinger.PingWorker([]string{"http://127.0.0.1:8088/ping_in"}, 1)
} else {
pinger.PingWorker([]string{"https://data-polling.herokuapp.com/ping_in"}, 1)
}
port := os.Getenv("PORT")
if port == "" {
port = "8088" // Default port if not specified
}
http.ListenAndServe(":"+port, r)
}
| [
"\"GIT_DEV\"",
"\"GIT_DEV\"",
"\"PORT\""
]
| []
| [
"PORT",
"GIT_DEV"
]
| [] | ["PORT", "GIT_DEV"] | go | 2 | 0 | |
dkrz_cera/file_processing.py | #!/usr/bin/env python
# -*- coding utf-8 -*-
#
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Author: Markus Ritschel
# eMail: [email protected]
# Date: 18/06/2020
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#
import os
from pathlib import Path
from zipfile import ZipFile
__all__ = ["unzip_files"]
def unzip_files(path):
"""
Unzip recursively all files in a specific directory tree.
Parameters
----------
path : str
The directory which contains the zip files to be extracted.
"""
path = Path(path).expanduser()
count_zips = 0
count_netcdfs = 0
while True:
# all_zip_files = glob.glob('*.zip')
all_zip_files = [x for x in path.rglob('*.zip') if x.is_file()]
count_zips += len(all_zip_files)
if not all_zip_files:
print("No remaining zip files found.")
break
for zip_file in all_zip_files:
# TODO: maybe parallel processing?
netcdfs_in_zip = len([f for f in ZipFile(zip_file).namelist() if f.endswith('.nc')])
count_netcdfs += netcdfs_in_zip
print("Unpacking {} netCDF files from {}...".format(netcdfs_in_zip, zip_file))
ZipFile(zip_file).extractall(path=os.path.dirname(zip_file))
print("Unpacking successfully finished. Remove zip file.")
os.remove(zip_file)
print(f"{count_netcdfs} netCDF files out of {count_zips} zip files successfully extracted. "
f"Deleted zip files after extraction.")
return
if __name__ == '__main__':
DOWNLOAD_DIR = os.path.join(os.getenv('HOME'), "work/cmip5-download")
unzip_files(DOWNLOAD_DIR)
| []
| []
| [
"HOME"
]
| [] | ["HOME"] | python | 1 | 0 | |
cli/src/java/org/apache/hadoop/hive/cli/CliDriver.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hive.cli;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileReader;
import java.io.IOException;
import java.io.PrintStream;
import java.io.UnsupportedEncodingException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.Set;
import jline.ArgumentCompletor;
import jline.Completor;
import jline.ConsoleReader;
import jline.History;
import jline.SimpleCompletor;
import jline.ArgumentCompletor.AbstractArgumentDelimiter;
import jline.ArgumentCompletor.ArgumentDelimiter;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.common.HiveInterruptUtils;
import org.apache.hadoop.hive.common.LogUtils;
import org.apache.hadoop.hive.common.LogUtils.LogInitializationException;
import org.apache.hadoop.hive.common.io.CachingPrintStream;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
import org.apache.hadoop.hive.ql.CommandNeedRetryException;
import org.apache.hadoop.hive.ql.Driver;
import org.apache.hadoop.hive.ql.exec.FunctionRegistry;
import org.apache.hadoop.hive.ql.exec.HadoopJobExecHelper;
import org.apache.hadoop.hive.ql.exec.Utilities;
import org.apache.hadoop.hive.ql.exec.Utilities.StreamPrinter;
import org.apache.hadoop.hive.ql.parse.ParseDriver;
import org.apache.hadoop.hive.ql.parse.VariableSubstitution;
import org.apache.hadoop.hive.ql.processors.CommandProcessor;
import org.apache.hadoop.hive.ql.processors.CommandProcessorFactory;
import org.apache.hadoop.hive.ql.session.SessionState;
import org.apache.hadoop.hive.ql.session.SessionState.LogHelper;
import org.apache.hadoop.hive.service.HiveClient;
import org.apache.hadoop.hive.service.HiveServerException;
import org.apache.hadoop.hive.shims.ShimLoader;
import org.apache.hadoop.io.IOUtils;
import org.apache.thrift.TException;
import sun.misc.Signal;
import sun.misc.SignalHandler;
/**
* CliDriver.
*
*/
public class CliDriver {
public static String prompt = "hive";
public static String prompt2 = " "; // when ';' is not yet seen
public static final int LINES_TO_FETCH = 40; // number of lines to fetch in batch from remote hive server
public static final String HIVERCFILE = ".hiverc";
private final LogHelper console;
private Configuration conf;
public CliDriver() {
SessionState ss = SessionState.get();
conf = (ss != null) ? ss.getConf() : new Configuration();
Log LOG = LogFactory.getLog("CliDriver");
console = new LogHelper(LOG);
}
public int processCmd(String cmd) {
CliSessionState ss = (CliSessionState) SessionState.get();
// Flush the print stream, so it doesn't include output from the last command
ss.err.flush();
String cmd_trimmed = cmd.trim();
String[] tokens = tokenizeCmd(cmd_trimmed);
int ret = 0;
if (cmd_trimmed.toLowerCase().equals("quit") || cmd_trimmed.toLowerCase().equals("exit")) {
// if we have come this far - either the previous commands
// are all successful or this is command line. in either case
// this counts as a successful run
ss.close();
System.exit(0);
} else if (tokens[0].equalsIgnoreCase("source")) {
String cmd_1 = getFirstCmd(cmd_trimmed, tokens[0].length());
File sourceFile = new File(cmd_1);
if (! sourceFile.isFile()){
console.printError("File: "+ cmd_1 + " is not a file.");
ret = 1;
} else {
try {
this.processFile(cmd_1);
} catch (IOException e) {
console.printError("Failed processing file "+ cmd_1 +" "+ e.getLocalizedMessage(),
org.apache.hadoop.util.StringUtils.stringifyException(e));
ret = 1;
}
}
} else if (cmd_trimmed.startsWith("!")) {
String shell_cmd = cmd_trimmed.substring(1);
shell_cmd = new VariableSubstitution().substitute(ss.getConf(), shell_cmd);
// shell_cmd = "/bin/bash -c \'" + shell_cmd + "\'";
try {
Process executor = Runtime.getRuntime().exec(shell_cmd);
StreamPrinter outPrinter = new StreamPrinter(executor.getInputStream(), null, ss.out);
StreamPrinter errPrinter = new StreamPrinter(executor.getErrorStream(), null, ss.err);
outPrinter.start();
errPrinter.start();
ret = executor.waitFor();
if (ret != 0) {
console.printError("Command failed with exit code = " + ret);
}
} catch (Exception e) {
console.printError("Exception raised from Shell command " + e.getLocalizedMessage(),
org.apache.hadoop.util.StringUtils.stringifyException(e));
ret = 1;
}
} else if (tokens[0].toLowerCase().equals("list")) {
SessionState.ResourceType t;
if (tokens.length < 2 || (t = SessionState.find_resource_type(tokens[1])) == null) {
console.printError("Usage: list ["
+ StringUtils.join(SessionState.ResourceType.values(), "|") + "] [<value> [<value>]*]");
ret = 1;
} else {
List<String> filter = null;
if (tokens.length >= 3) {
System.arraycopy(tokens, 2, tokens, 0, tokens.length - 2);
filter = Arrays.asList(tokens);
}
Set<String> s = ss.list_resource(t, filter);
if (s != null && !s.isEmpty()) {
ss.out.println(StringUtils.join(s, "\n"));
}
}
} else if (ss.isRemoteMode()) { // remote mode -- connecting to remote hive server
HiveClient client = ss.getClient();
PrintStream out = ss.out;
PrintStream err = ss.err;
try {
client.execute(cmd_trimmed);
List<String> results;
do {
results = client.fetchN(LINES_TO_FETCH);
for (String line : results) {
out.println(line);
}
} while (results.size() == LINES_TO_FETCH);
} catch (HiveServerException e) {
ret = e.getErrorCode();
if (ret != 0) { // OK if ret == 0 -- reached the EOF
String errMsg = e.getMessage();
if (errMsg == null) {
errMsg = e.toString();
}
ret = e.getErrorCode();
err.println("[Hive Error]: " + errMsg);
}
} catch (TException e) {
String errMsg = e.getMessage();
if (errMsg == null) {
errMsg = e.toString();
}
ret = -10002;
err.println("[Thrift Error]: " + errMsg);
} finally {
try {
client.clean();
} catch (TException e) {
String errMsg = e.getMessage();
if (errMsg == null) {
errMsg = e.toString();
}
err.println("[Thrift Error]: Hive server is not cleaned due to thrift exception: "
+ errMsg);
}
}
} else { // local mode
CommandProcessor proc = CommandProcessorFactory.get(tokens[0], (HiveConf) conf);
ret = processLocalCmd(cmd, proc, ss);
}
return ret;
}
/**
* For testing purposes to inject Configuration dependency
* @param conf to replace default
*/
void setConf(Configuration conf) {
this.conf = conf;
}
/**
* Extract and clean up the first command in the input.
*/
private String getFirstCmd(String cmd, int length) {
return cmd.substring(length).trim();
}
private String[] tokenizeCmd(String cmd) {
return cmd.split("\\s+");
}
int processLocalCmd(String cmd, CommandProcessor proc, CliSessionState ss) {
int tryCount = 0;
boolean needRetry;
int ret = 0;
do {
try {
needRetry = false;
if (proc != null) {
if (proc instanceof Driver) {
Driver qp = (Driver) proc;
PrintStream out = ss.out;
long start = System.currentTimeMillis();
if (ss.getIsVerbose()) {
out.println(cmd);
}
qp.setTryCount(tryCount);
ret = qp.run(cmd).getResponseCode();
if (ret != 0) {
qp.close();
return ret;
}
ArrayList<String> res = new ArrayList<String>();
printHeader(qp, out);
try {
while (qp.getResults(res)) {
for (String r : res) {
out.println(r);
}
res.clear();
if (out.checkError()) {
break;
}
}
} catch (IOException e) {
console.printError("Failed with exception " + e.getClass().getName() + ":"
+ e.getMessage(), "\n"
+ org.apache.hadoop.util.StringUtils.stringifyException(e));
ret = 1;
}
int cret = qp.close();
if (ret == 0) {
ret = cret;
}
long end = System.currentTimeMillis();
if (end > start) {
double timeTaken = (end - start) / 1000.0;
console.printInfo("Time taken: " + timeTaken + " seconds", null);
}
} else {
String firstToken = tokenizeCmd(cmd.trim())[0];
String cmd_1 = getFirstCmd(cmd.trim(), firstToken.length());
if (ss.getIsVerbose()) {
ss.out.println(firstToken + " " + cmd_1);
}
ret = proc.run(cmd_1).getResponseCode();
}
}
} catch (CommandNeedRetryException e) {
console.printInfo("Retry query with a different approach...");
tryCount++;
needRetry = true;
}
} while (needRetry);
return ret;
}
/**
* If enabled and applicable to this command, print the field headers
* for the output.
*
* @param qp Driver that executed the command
* @param out Printstream which to send output to
*/
private void printHeader(Driver qp, PrintStream out) {
List<FieldSchema> fieldSchemas = qp.getSchema().getFieldSchemas();
if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_CLI_PRINT_HEADER)
&& fieldSchemas != null) {
// Print the column names
boolean first_col = true;
for (FieldSchema fs : fieldSchemas) {
if (!first_col) {
out.print('\t');
}
out.print(fs.getName());
first_col = false;
}
out.println();
}
}
public int processLine(String line) {
return processLine(line, false);
}
/**
* Processes a line of semicolon separated commands
*
* @param line
* The commands to process
* @param allowInterupting
* When true the function will handle SIG_INT (Ctrl+C) by interrupting the processing and
* returning -1
* @return 0 if ok
*/
public int processLine(String line, boolean allowInterupting) {
SignalHandler oldSignal = null;
Signal interupSignal = null;
if (allowInterupting) {
// Remember all threads that were running at the time we started line processing.
// Hook up the custom Ctrl+C handler while processing this line
interupSignal = new Signal("INT");
oldSignal = Signal.handle(interupSignal, new SignalHandler() {
private final Thread cliThread = Thread.currentThread();
private boolean interruptRequested;
@Override
public void handle(Signal signal) {
boolean initialRequest = !interruptRequested;
interruptRequested = true;
// Kill the VM on second ctrl+c
if (!initialRequest) {
console.printInfo("Exiting the JVM");
System.exit(127);
}
// Interrupt the CLI thread to stop the current statement and return
// to prompt
console.printInfo("Interrupting... Be patient, this might take some time.");
console.printInfo("Press Ctrl+C again to kill JVM");
// First, kill any running MR jobs
HadoopJobExecHelper.killRunningJobs();
HiveInterruptUtils.interrupt();
this.cliThread.interrupt();
}
});
}
try {
int lastRet = 0, ret = 0;
String command = "";
for (String oneCmd : line.split(";")) {
if (StringUtils.endsWith(oneCmd, "\\")) {
command += StringUtils.chop(oneCmd) + ";";
continue;
} else {
command += oneCmd;
}
if (StringUtils.isBlank(command)) {
continue;
}
ret = processCmd(command);
//wipe cli query state
SessionState ss = SessionState.get();
ss.setCommandType(null);
command = "";
lastRet = ret;
boolean ignoreErrors = HiveConf.getBoolVar(conf, HiveConf.ConfVars.CLIIGNOREERRORS);
if (ret != 0 && !ignoreErrors) {
CommandProcessorFactory.clean((HiveConf) conf);
return ret;
}
}
CommandProcessorFactory.clean((HiveConf) conf);
return lastRet;
} finally {
// Once we are done processing the line, restore the old handler
if (oldSignal != null && interupSignal != null) {
Signal.handle(interupSignal, oldSignal);
}
}
}
public int processReader(BufferedReader r) throws IOException {
String line;
StringBuilder qsb = new StringBuilder();
while ((line = r.readLine()) != null) {
// Skipping through comments
if (! line.startsWith("--")) {
qsb.append(line + "\n");
}
}
return (processLine(qsb.toString()));
}
public int processFile(String fileName) throws IOException {
FileReader fileReader = null;
BufferedReader bufferReader = null;
int rc = 0;
try {
fileReader = new FileReader(fileName);
bufferReader = new BufferedReader(fileReader);
rc = processReader(bufferReader);
bufferReader.close();
bufferReader = null;
} finally {
IOUtils.closeStream(bufferReader);
}
return rc;
}
public void processInitFiles(CliSessionState ss) throws IOException {
boolean saveSilent = ss.getIsSilent();
ss.setIsSilent(true);
for (String initFile : ss.initFiles) {
int rc = processFile(initFile);
if (rc != 0) {
System.exit(rc);
}
}
if (ss.initFiles.size() == 0) {
if (System.getenv("HIVE_HOME") != null) {
String hivercDefault = System.getenv("HIVE_HOME") + File.separator + "bin" + File.separator + HIVERCFILE;
if (new File(hivercDefault).exists()) {
int rc = processFile(hivercDefault);
if (rc != 0) {
System.exit(rc);
}
}
}
if (System.getProperty("user.home") != null) {
String hivercUser = System.getProperty("user.home") + File.separator + HIVERCFILE;
if (new File(hivercUser).exists()) {
int rc = processFile(hivercUser);
if (rc != 0) {
System.exit(rc);
}
}
}
}
ss.setIsSilent(saveSilent);
}
public static Completor getCommandCompletor () {
// SimpleCompletor matches against a pre-defined wordlist
// We start with an empty wordlist and build it up
SimpleCompletor sc = new SimpleCompletor(new String[0]);
// We add Hive function names
// For functions that aren't infix operators, we add an open
// parenthesis at the end.
for (String s : FunctionRegistry.getFunctionNames()) {
if (s.matches("[a-z_]+")) {
sc.addCandidateString(s + "(");
} else {
sc.addCandidateString(s);
}
}
// We add Hive keywords, including lower-cased versions
for (String s : ParseDriver.getKeywords()) {
sc.addCandidateString(s);
sc.addCandidateString(s.toLowerCase());
}
// Because we use parentheses in addition to whitespace
// as a keyword delimiter, we need to define a new ArgumentDelimiter
// that recognizes parenthesis as a delimiter.
ArgumentDelimiter delim = new AbstractArgumentDelimiter () {
@Override
public boolean isDelimiterChar (String buffer, int pos) {
char c = buffer.charAt(pos);
return (Character.isWhitespace(c) || c == '(' || c == ')' ||
c == '[' || c == ']');
}
};
// The ArgumentCompletor allows us to match multiple tokens
// in the same line.
final ArgumentCompletor ac = new ArgumentCompletor(sc, delim);
// By default ArgumentCompletor is in "strict" mode meaning
// a token is only auto-completed if all prior tokens
// match. We don't want that since there are valid tokens
// that are not in our wordlist (eg. table and column names)
ac.setStrict(false);
// ArgumentCompletor always adds a space after a matched token.
// This is undesirable for function names because a space after
// the opening parenthesis is unnecessary (and uncommon) in Hive.
// We stack a custom Completor on top of our ArgumentCompletor
// to reverse this.
Completor completor = new Completor () {
public int complete (String buffer, int offset, List completions) {
List<String> comp = (List<String>) completions;
int ret = ac.complete(buffer, offset, completions);
// ConsoleReader will do the substitution if and only if there
// is exactly one valid completion, so we ignore other cases.
if (completions.size() == 1) {
if (comp.get(0).endsWith("( ")) {
comp.set(0, comp.get(0).trim());
}
}
return ret;
}
};
return completor;
}
public static void main(String[] args) throws Exception {
int ret = run(args);
System.exit(ret);
}
public static int run(String[] args) throws Exception {
OptionsProcessor oproc = new OptionsProcessor();
if (!oproc.process_stage1(args)) {
return 1;
}
// NOTE: It is critical to do this here so that log4j is reinitialized
// before any of the other core hive classes are loaded
boolean logInitFailed = false;
String logInitDetailMessage;
try {
logInitDetailMessage = LogUtils.initHiveLog4j();
} catch (LogInitializationException e) {
logInitFailed = true;
logInitDetailMessage = e.getMessage();
}
CliSessionState ss = new CliSessionState(new HiveConf(SessionState.class));
ss.in = System.in;
try {
ss.out = new PrintStream(System.out, true, "UTF-8");
ss.info = new PrintStream(System.err, true, "UTF-8");
ss.err = new CachingPrintStream(System.err, true, "UTF-8");
} catch (UnsupportedEncodingException e) {
return 3;
}
if (!oproc.process_stage2(ss)) {
return 2;
}
if (!ss.getIsSilent()) {
if (logInitFailed) {
System.err.println(logInitDetailMessage);
} else {
SessionState.getConsole().printInfo(logInitDetailMessage);
}
}
// set all properties specified via command line
HiveConf conf = ss.getConf();
for (Map.Entry<Object, Object> item : ss.cmdProperties.entrySet()) {
conf.set((String) item.getKey(), (String) item.getValue());
ss.getOverriddenConfigurations().put((String) item.getKey(), (String) item.getValue());
}
SessionState.start(ss);
// connect to Hive Server
if (ss.getHost() != null) {
ss.connect();
if (ss.isRemoteMode()) {
prompt = "[" + ss.host + ':' + ss.port + "] " + prompt;
char[] spaces = new char[prompt.length()];
Arrays.fill(spaces, ' ');
prompt2 = new String(spaces);
}
}
// CLI remote mode is a thin client: only load auxJars in local mode
if (!ss.isRemoteMode() && !ShimLoader.getHadoopShims().usesJobShell()) {
// hadoop-20 and above - we need to augment classpath using hiveconf
// components
// see also: code in ExecDriver.java
ClassLoader loader = conf.getClassLoader();
String auxJars = HiveConf.getVar(conf, HiveConf.ConfVars.HIVEAUXJARS);
if (StringUtils.isNotBlank(auxJars)) {
loader = Utilities.addToClassPath(loader, StringUtils.split(auxJars, ","));
}
conf.setClassLoader(loader);
Thread.currentThread().setContextClassLoader(loader);
}
CliDriver cli = new CliDriver();
cli.setHiveVariables(oproc.getHiveVariables());
// Execute -i init files (always in silent mode)
cli.processInitFiles(ss);
if (ss.execString != null) {
return cli.processLine(ss.execString);
}
try {
if (ss.fileName != null) {
return cli.processFile(ss.fileName);
}
} catch (FileNotFoundException e) {
System.err.println("Could not open input file for reading. (" + e.getMessage() + ")");
return 3;
}
ConsoleReader reader = new ConsoleReader();
reader.setBellEnabled(false);
// reader.setDebug(new PrintWriter(new FileWriter("writer.debug", true)));
reader.addCompletor(getCommandCompletor());
String line;
final String HISTORYFILE = ".hivehistory";
String historyDirectory = System.getProperty("user.home");
try {
if ((new File(historyDirectory)).exists()) {
String historyFile = historyDirectory + File.separator + HISTORYFILE;
reader.setHistory(new History(new File(historyFile)));
} else {
System.err.println("WARNING: Directory for Hive history file: " + historyDirectory +
" does not exist. History will not be available during this session.");
}
} catch (Exception e) {
System.err.println("WARNING: Encountered an error while trying to initialize Hive's " +
"history file. History will not be available during this session.");
System.err.println(e.getMessage());
}
int ret = 0;
String prefix = "";
String curDB = getFormattedDb(conf, ss);
String curPrompt = prompt + curDB;
String dbSpaces = spacesForString(curDB);
while ((line = reader.readLine(curPrompt + "> ")) != null) {
if (!prefix.equals("")) {
prefix += '\n';
}
if (line.trim().endsWith(";") && !line.trim().endsWith("\\;")) {
line = prefix + line;
ret = cli.processLine(line, true);
prefix = "";
curDB = getFormattedDb(conf, ss);
curPrompt = prompt + curDB;
dbSpaces = dbSpaces.length() == curDB.length() ? dbSpaces : spacesForString(curDB);
} else {
prefix = prefix + line;
curPrompt = prompt2 + dbSpaces;
continue;
}
}
ss.close();
return ret;
}
/**
* Retrieve the current database name string to display, based on the
* configuration value.
* @param conf storing whether or not to show current db
* @param ss CliSessionState to query for db name
* @return String to show user for current db value
*/
private static String getFormattedDb(HiveConf conf, CliSessionState ss) {
if (!HiveConf.getBoolVar(conf, HiveConf.ConfVars.CLIPRINTCURRENTDB)) {
return "";
}
String currDb = ss.getCurrentDbName();
if (currDb == null) {
return "";
}
return " (" + currDb + ")";
}
/**
* Generate a string of whitespace the same length as the parameter
*
* @param s String for which to generate equivalent whitespace
* @return Whitespace
*/
private static String spacesForString(String s) {
if (s == null || s.length() == 0) {
return "";
}
return String.format("%1$-" + s.length() +"s", "");
}
public void setHiveVariables(Map<String, String> hiveVariables) {
SessionState.get().setHiveVariables(hiveVariables);
}
}
| [
"\"HIVE_HOME\"",
"\"HIVE_HOME\""
]
| []
| [
"HIVE_HOME"
]
| [] | ["HIVE_HOME"] | java | 1 | 0 | |
main.go | package main
import (
"errors"
"html/template"
"io"
"log"
"os"
"regexp"
"strconv"
"github.com/candidatos-info/site/db"
"github.com/candidatos-info/site/email"
"github.com/candidatos-info/site/token"
"github.com/labstack/echo"
)
const (
instagramLogoURL = "https://logodownload.org/wp-content/uploads/2017/04/instagram-logo-9.png"
facebookLogoURL = "https://logodownload.org/wp-content/uploads/2014/09/facebook-logo-11.png"
twitterLogoURL = "https://help.twitter.com/content/dam/help-twitter/brand/logo.png"
websiteLogoURL = "https://i.pinimg.com/originals/4e/d3/5b/4ed35b1c1bb4a3ddef205a3bbbe7fc17.jpg"
whatsAppLogoURL = "https://i0.wp.com/cantinhodabrantes.com.br/wp-content/uploads/2017/08/whatsapp-logo-PNG-Transparent.png?fit=1000%2C1000&ssl=1"
searchCookieExpiration = 360 //in hours
searchCacheCookie = "searchCookie"
prodEnvironmentName = "standard"
)
var (
emailClient *email.Client
tokenService *token.Token
candidateRoles = []string{"vereador", "prefeito"} // available candidate roles
siteURL string
suportEmails = []string{"[email protected]"}
emailRegex = regexp.MustCompile("^[a-zA-Z0-9.!#$%&'*+\\/=?^_`{|}~-]+@[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\\.[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)*$")
rolesMap = map[string]string{
"EM": "prefeito",
"LM": "vereador",
"VEM": "vice-prefeito",
}
allowedToUpdateProfile bool
tags = mustLoadTags()
)
type candidateCard struct {
Transparency float64 `json:"transparency"`
Picture string `json:"picture_url"`
Name string `json:"name"`
City string `json:"city"`
State string `json:"state"`
Role string `json:"role"`
Party string `json:"party"`
Number int `json:"number"`
Tags []string `json:"tags"`
SequentialID string `json:"sequential_id"`
Gender string `json:"gender"`
}
// Shared **read-only** variable. Used by templates and other functions.
// Please keep it short and instantiated in the beginning of the main.
// Keep this struct close to templateRegistry, which is where it is used.
var globals struct {
Env string
Year int
}
type templateRegistry struct {
templates map[string]*template.Template
}
func (t *templateRegistry) Render(w io.Writer, name string, data interface{}, c echo.Context) error {
tmpl, ok := t.templates[name]
if !ok {
err := errors.New("template not found -> " + name)
return err
}
if data == nil {
data = make(map[string]interface{})
}
m := data.(map[string]interface{})
m["IsProdEnv"] = globals.Env == prodEnvironmentName
m["Env"] = globals.Env
m["Year"] = globals.Year
return tmpl.ExecuteTemplate(w, "layout.html", data)
}
func main() {
// #### Global Params ####
ey := os.Getenv("ELECTION_YEAR")
if ey == "" {
log.Fatal("missing ELECTION_YEAR environment variable")
}
electionYearAsInt, err := strconv.Atoi(ey)
if err != nil {
log.Fatalf("failed to parse environment variable ELECTION_YEAR with value [%s] to int, error %v", ey, err)
}
globals.Year = electionYearAsInt
globals.Env = os.Getenv("GAE_ENV") // should be correlated to prodEnvironmentName to be able to identify when the server is running in production.
// Other environment variables.
urlConnection := os.Getenv("DB_URL")
if urlConnection == "" {
log.Fatal("missing DB_URL environment variable")
}
dbName := os.Getenv("DB_NAME")
if dbName == "" {
log.Fatal("missing DN_NAME environment variable")
}
dbClient, err := db.NewMongoClient(urlConnection, dbName)
if err != nil {
log.Fatalf("failed to connect to database at URL [%s], error %v\n", urlConnection, err)
}
log.Println("connected to database")
emailAccount := os.Getenv("EMAIL")
if emailAccount == "" {
log.Fatal("missing EMAIL environment variable")
}
contactEmail := os.Getenv("FALE_CONOSCO_EMAIL")
if contactEmail == "" {
log.Fatal("missing FALE_CONOSCO_EMAIL environment variable")
}
password := os.Getenv("PASSWORD")
if password == "" {
log.Fatal("missing PASSWORD environment variable")
}
siteURL = os.Getenv("SITE_URL")
if siteURL == "" {
log.Fatal("missing SITE_URL environment variable")
}
emailClient = email.New(emailAccount, password)
authSecret := os.Getenv("SECRET")
if authSecret == "" {
log.Fatal("missing SECRET environment variable")
}
tokenService = token.New(authSecret)
updateProfile := os.Getenv("UPDATE_PROFILE")
if updateProfile == "" {
log.Fatal("missing UPDATE_PROFILE environment variable")
}
r, err := strconv.Atoi(updateProfile)
if err != nil {
log.Fatalf("failed to parte environment variable UPDATE_PROFILE with value [%s] to int, error %v", updateProfile, err)
}
allowedToUpdateProfile = r == 1
// Template registration.
// Template data MUST BE either nil or a map[string]interface{}.
templates := make(map[string]*template.Template)
templates["index.html"] = template.Must(template.ParseFiles("web/templates/index.html", "web/templates/layout.html"))
templates["sobre.html"] = template.Must(template.ParseFiles("web/templates/sobre.html", "web/templates/layout.html"))
templates["candidato.html"] = template.Must(template.ParseFiles("web/templates/candidato.html", "web/templates/layout.html"))
templates["sou-candidato.html"] = template.Must(template.ParseFiles("web/templates/sou-candidato.html", "web/templates/layout.html"))
templates["sou-candidato-success.html"] = template.Must(template.ParseFiles("web/templates/sou-candidato-success.html", "web/templates/layout.html"))
templates["aceitar-termo.html"] = template.Must(template.ParseFiles("web/templates/aceitar-termo.html", "web/templates/layout.html"))
templates["atualizar-candidato.html"] = template.Must(template.ParseFiles("web/templates/atualizar-candidato.html", "web/templates/layout.html"))
templates["atualizar-candidato-success.html"] = template.Must(template.ParseFiles("web/templates/atualizar-candidato-success.html", "web/templates/layout.html"))
templates["fale-conosco.html"] = template.Must(template.ParseFiles("web/templates/fale-conosco.html", "web/templates/layout.html"))
templates["fale-conosco-success.html"] = template.Must(template.ParseFiles("web/templates/fale-conosco-success.html", "web/templates/layout.html"))
e := echo.New()
e.Renderer = &templateRegistry{
templates: templates,
}
// Rotes.
e.Static("/", "web/public")
e.GET("/", newHomeHandler(dbClient))
e.GET("/c/:year/:id", newCandidateHandler(dbClient))
e.GET("/sobre", sobreHandler)
e.GET("/sou-candidato", souCandidatoGET)
e.POST("/sou-candidato", newSouCandidatoFormHandler(dbClient, tokenService, emailClient))
e.GET("/atualizar-candidatura", newAtualizarCandidaturaHandler(dbClient, tags))
e.POST("/atualizar-candidatura", newAtualizarCandidaturaFormHandler(dbClient))
e.POST("/aceitar-termo", newAceitarTermoFormHandler(dbClient))
e.GET("/fale-conosco", newFaleConoscoHandler())
e.POST("/fale-conosco", newFaleConoscoFormHandler(dbClient, tokenService, emailClient, contactEmail))
port := os.Getenv("PORT")
if port == "" {
log.Fatal("missing PORT environment variable")
}
log.Fatal(e.Start(":" + port))
}
| [
"\"ELECTION_YEAR\"",
"\"GAE_ENV\"",
"\"DB_URL\"",
"\"DB_NAME\"",
"\"EMAIL\"",
"\"FALE_CONOSCO_EMAIL\"",
"\"PASSWORD\"",
"\"SITE_URL\"",
"\"SECRET\"",
"\"UPDATE_PROFILE\"",
"\"PORT\""
]
| []
| [
"PORT",
"EMAIL",
"PASSWORD",
"UPDATE_PROFILE",
"SECRET",
"DB_NAME",
"FALE_CONOSCO_EMAIL",
"GAE_ENV",
"SITE_URL",
"ELECTION_YEAR",
"DB_URL"
]
| [] | ["PORT", "EMAIL", "PASSWORD", "UPDATE_PROFILE", "SECRET", "DB_NAME", "FALE_CONOSCO_EMAIL", "GAE_ENV", "SITE_URL", "ELECTION_YEAR", "DB_URL"] | go | 11 | 0 | |
website/ops.py | #!/usr/bin/python3
import datetime
import json
import os
import os.path
import sqlite3
import mysql.connector
import hashlib
import random
import string
import smtplib, ssl
import urllib
import jwt
import shutil
import markdown
import re
import secrets
from collections import defaultdict
from icu import Locale, Collator
import logging
import sys
siteconfig = json.load(open(os.environ.get("LEXONOMY_SITECONFIG",
"siteconfig.json"), encoding="utf-8"))
DB = siteconfig['db']
mainDB = None
linksDB = None
dictDB = {}
ques = '%s' if DB == 'mysql' else '?'
SQL_SEP = '`' if DB == 'mysql' else '['
SQL_SEP_C = '`' if DB == 'mysql' else ']'
i18n = json.load(open(os.environ.get("LEXONOMY_LANG",
"lang/" + siteconfig["lang"] + ".json"), encoding="utf-8"))
defaultDictConfig = {"editing": {"xonomyMode": "nerd", "xonomyTextEditor": "askString" },
"searchability": {"searchableElements": []},
"xema": {"elements": {}},
"titling": {"headwordAnnotations": []},
"flagging": {"flag_element": "", "flags": []}}
prohibitedDictIDs = ["login", "logout", "make", "signup", "forgotpwd", "changepwd", "users", "dicts", "oneclick", "recoverpwd", "createaccount", "consent", "userprofile", "dictionaries", "about", "list", "lemma", "json", "ontolex", "tei"];
# db management
def getDB(dictID):
if DB == 'mysql':
global dictDB
try:
if dictID not in dictDB or not dictDB[dictID].is_connected():
dictDB[dictID] = mysql.connector.connect(
host=os.environ['MYSQL_DB_HOST'],
user=os.environ['MYSQL_DB_USER'],
database="lexo_" + dictID,
password=os.environ['MYSQL_DB_PASSWORD'],
autocommit=True
)
conn = dictDB[dictID].cursor(dictionary=True, buffered=True)
except:
dictDB[dictID] = mysql.connector.connect(
host=os.environ['MYSQL_DB_HOST'],
user=os.environ['MYSQL_DB_USER'],
database="lexo_" + dictID,
password=os.environ['MYSQL_DB_PASSWORD'],
autocommit=True
)
conn = dictDB[dictID].cursor(dictionary=True, buffered=True)
return conn
elif os.path.isfile(os.path.join(siteconfig["dataDir"], "dicts/"+dictID+".sqlite")):
conn = sqlite3.connect(os.path.join(siteconfig["dataDir"], "dicts/"+dictID+".sqlite"))
conn.row_factory = sqlite3.Row
conn.executescript("PRAGMA journal_mode=WAL; PRAGMA foreign_keys=on")
return conn
else:
return None
def getMainDB():
if DB == 'mysql':
global mainDB
try:
if mainDB == None or not mainDB.is_connected():
mainDB = mysql.connector.connect(
host=os.environ['MYSQL_DB_HOST'],
user=os.environ['MYSQL_DB_USER'],
database="lexo",
password=os.environ['MYSQL_DB_PASSWORD'],
autocommit=True
)
conn = mainDB.cursor(dictionary=True, buffered=True)
except:
mainDB = mysql.connector.connect(
host=os.environ['MYSQL_DB_HOST'],
user=os.environ['MYSQL_DB_USER'],
database="lexo",
password=os.environ['MYSQL_DB_PASSWORD'],
autocommit=True
)
conn = mainDB.cursor(dictionary=True, buffered=True)
return conn
else:
conn = sqlite3.connect(os.path.join(
siteconfig["dataDir"], 'lexonomy.sqlite'))
conn.row_factory = sqlite3.Row
return conn
def getLinkDB():
if DB == 'mysql':
global linksDB
try:
if linksDB == None or not linksDB.is_connected():
linksDB = mysql.connector.connect(
host=os.environ['MYSQL_DB_HOST'],
user=os.environ['MYSQL_DB_USER'],
database="lexo_crossref",
password=os.environ['MYSQL_DB_PASSWORD'],
autocommit=True
)
conn = linksDB.cursor(dictionary=True)
except:
linksDB = mysql.connector.connect(
host=os.environ['MYSQL_DB_HOST'],
user=os.environ['MYSQL_DB_USER'],
database="lexo_crossref",
password=os.environ['MYSQL_DB_PASSWORD'],
autocommit=True
)
conn = linksDB.cursor(dictionary=True, buffered=True)
return conn
else:
conn = sqlite3.connect(os.path.join(siteconfig["dataDir"], 'crossref.sqlite'))
conn.row_factory = sqlite3.Row
return conn
# SMTP
# SMTP
def sendmail(mailTo, mailSubject, mailText):
if siteconfig["mailconfig"] and siteconfig["mailconfig"]["host"] and siteconfig["mailconfig"]["port"]:
message = "Subject: " + mailSubject + "\n\n" + mailText
if siteconfig["mailconfig"]["ttl"]:
context = ssl.create_default_context()
with smtplib.SMTP(siteconfig["mailconfig"]["host"], siteconfig["mailconfig"]["port"]) as server:
server.ehlo() # Can be omitted
server.starttls(context=context)
server.ehlo() # Can be omitted
server.login(siteconfig["mailconfig"]["from"], siteconfig["mailconfig"]["password"])
server.sendmail(siteconfig["mailconfig"]["from"], mailTo, message)
server.quit()
elif siteconfig["mailconfig"]["secure"]:
context = ssl.create_default_context()
server = smtplib.SMTP_SSL(siteconfig["mailconfig"]["host"], siteconfig["mailconfig"]["port"], context=context)
server.sendmail(siteconfig["mailconfig"]["from"], mailTo, message)
server.quit()
else:
server = smtplib.SMTP(siteconfig["mailconfig"]["host"], siteconfig["mailconfig"]["port"])
server.sendmail(siteconfig["mailconfig"]["from"], mailTo, message)
server.quit()
# config
def readDictConfigs(dictDB):
configs = {"siteconfig": siteconfig}
c = dictDB.execute("select * from configs")
c = c if c else dictDB
for r in c.fetchall() if c else []:
configs[r["id"]] = json.loads(r["json"])
for conf in ["ident", "publico", "users", "kex", "titling", "flagging",
"searchability", "xampl", "thes", "collx", "defo", "xema",
"xemplate", "editing", "subbing", "download", "links", "autonumber", "gapi"]:
if not conf in configs:
configs[conf] = defaultDictConfig.get(conf, {})
users = {}
for email in configs["users"]:
users[email.lower()] = configs["users"][email]
configs["users"] = users
for key in configs.keys():
if type(configs[key]) is dict:
configs[key] = defaultdict(lambda: None, configs[key])
return configs
def addSubentryParentTags(db, entryID, xml):
from xml.dom import minidom, Node
doc = minidom.parseString(xml)
els = []
_els = doc.getElementsByTagName("*")
els.append(_els[0])
for i in range(1, len(_els)):
if _els[i].getAttributeNS("http://www.lexonomy.eu/", "subentryID"):
els.append(_els[i])
for el in els:
subentryID = el.getAttributeNS("http://www.lexonomy.eu/", "subentryID")
if el.parentNode.nodeType != Node.ELEMENT_NODE:
subentryID = entryID
c = db.execute(f"select s.parent_id, e.title from sub as s inner join entries as e on e.id=s.parent_id where s.child_id={ques}", (subentryID,))
c = c if c else db
for r in c.fetchall() if c else []:
pel = doc.createElementNS("http://www.lexonomy.eu/", "lxnm:subentryParent")
pel.setAttribute("id", str(r["parent_id"]))
pel.setAttribute("title", r["title"])
el.appendChild(pel)
return doc.toxml()
def removeSubentryParentTags(xml):
return re.sub(r"<lxnm:subentryParent[^>]*>", "", xml)
# auth
def verifyLogin(email, sessionkey):
conn = getMainDB()
now = datetime.datetime.utcnow()
yesterday = now - datetime.timedelta(days=1)
email = email.lower()
c = conn.execute(f"select email, ske_apiKey, ske_username, apiKey, consent from users where email={ques} and sessionKey={ques} and sessionLast>={ques}", (email, sessionkey, yesterday))
c = c if c else conn
user = c.fetchone() if c else None
if not user:
return {"loggedin": False, "email": None}
conn.execute(f"update users set sessionLast={ques} where email={ques}", (now, email))
close_db(conn, shouldclose=False)
ret = {"loggedin": True, "email": email, "isAdmin": email in siteconfig["admins"],
"ske_username": user["ske_username"], "ske_apiKey": user["ske_apiKey"],
"apiKey": user["apiKey"], "consent": user["consent"] == 1}
return ret
def verifyLoginAndDictAccess(email, sessionkey, dictDB):
ret = verifyLogin(email, sessionkey)
configs = readDictConfigs(dictDB)
dictAccess = configs["users"].get(email)
if ret["loggedin"] == False or (not dictAccess and not ret["isAdmin"]):
return {"loggedin": ret["loggedin"], "email": email, "dictAccess": False, "isAdmin": False}, configs
ret["dictAccess"] = dictAccess
for r in ["canEdit", "canConfig", "canDownload", "canUpload"]:
ret[r] = ret.get("isAdmin") or (dictAccess and dictAccess[r])
return ret, configs
def deleteEntry(db, entryID, email):
# tell my parents that they need a refresh:
db.execute (f"update entries set needs_refresh=1 where id in (select parent_id from sub where child_id={ques})", (entryID,))
# delete me:
db.execute (f"delete from entries where id={ques}", (entryID,))
# tell history that I have been deleted:
db.execute (f"insert into history(entry_id, action, {SQL_SEP}when{SQL_SEP_C}, email, xml) values({ques},{ques},{ques},{ques},{ques})",
(entryID, "delete", datetime.datetime.utcnow(), email, None))
close_db(db)
def readEntry(db, configs, entryID):
c = db.execute(f"select * from entries where id={ques}", (entryID,))
c = c if c else db
row = c.fetchone() if c else None
if not row:
return 0, "", ""
xml = setHousekeepingAttributes(entryID, row["xml"], configs["subbing"])
if configs["subbing"]:
xml = addSubentryParentTags(db, entryID, xml)
if configs["links"]:
xml = updateEntryLinkables(db, entryID, xml, configs, False, False)
return entryID, xml, row["title"]
def createEntry(dictDB, configs, entryID, xml, email, historiography):
xml = setHousekeepingAttributes(entryID, xml, configs["subbing"])
xml = removeSubentryParentTags(xml)
title = getEntryTitle(xml, configs["titling"])
sortkey = getSortTitle(xml, configs["titling"])
doctype = getDoctype(xml)
needs_refac = 1 if len(list(configs["subbing"].keys())) > 0 else 0
needs_resave = 1 if configs["searchability"].get("searchableElements") and len(configs["searchability"].get("searchableElements")) > 0 else 0
# entry title already exists?
c = dictDB.execute(f"select id from entries where title = {ques} and id <> {ques}", (title, entryID))
c = c if c else dictDB
r = c.fetchone() if c else None
feedback = {"type": "saveFeedbackHeadwordExists", "info": r["id"]} if r else None
if entryID:
sql = f"insert into entries(id, xml, title, sortkey, needs_refac, needs_resave, doctype) values({ques}, {ques}, {ques}, {ques}, {ques}, {ques}, {ques})"
params = (entryID, xml, title, sortkey, needs_refac, needs_resave, doctype)
else:
sql = f"insert into entries(xml, title, sortkey, needs_refac, needs_resave, doctype) values({ques}, {ques}, {ques}, {ques}, {ques}, {ques})"
params = (xml, title, sortkey, needs_refac, needs_resave, doctype)
c = dictDB.execute(sql, params)
c = c if c else dictDB
entryID = c.lastrowid
dictDB.execute(f"insert into searchables(entry_id, txt, level) values({ques}, {ques}, {ques})", (entryID, getEntryTitle(xml, configs["titling"], True), 1))
dictDB.execute(f"insert into history(entry_id, action, {SQL_SEP}when{SQL_SEP_C}, email, xml, historiography) values({ques}, {ques}, {ques}, {ques}, {ques}, {ques})", (entryID, "create", str(datetime.datetime.utcnow()), email, xml, json.dumps(historiography)))
close_db(dictDB, shouldclose=False)
return entryID, xml, feedback
def updateEntry(dictDB, configs, entryID, xml, email, historiography):
c = dictDB.execute(f"select id, xml from entries where id={ques}", (entryID, ))
c = c if c else dictDB
row = c.fetchone() if c else None
xml = setHousekeepingAttributes(entryID, xml, configs["subbing"])
xml = removeSubentryParentTags(xml)
newxml = re.sub(r" xmlns:lxnm=[\"\']http:\/\/www\.lexonomy\.eu\/[\"\']", "", xml)
newxml = re.sub(r"(\=)\"([^\"]*)\"", r"\1'\2'", newxml)
newxml = re.sub(r" lxnm:(sub)?entryID='[0-9]+'", "", newxml)
newxml = re.sub(r" lxnm:linkable='[^']+'", "", newxml)
if not row:
adjustedEntryID, adjustedXml, feedback = createEntry(dictDB, configs, entryID, xml, email, historiography)
if configs["links"]:
adjustedXml = updateEntryLinkables(dictDB, adjustedEntryID, adjustedXml, configs, True, True)
return adjustedEntryID, adjustedXml, True, feedback
else:
oldxml = row["xml"]
oldxml = re.sub(r" xmlns:lxnm=[\"\']http:\/\/www\.lexonomy\.eu\/[\"\']", "", oldxml)
oldxml = re.sub(r"(\=)\"([^\"]*)\"", r"\1'\2'", oldxml)
oldxml = re.sub(r" lxnm:(sub)?entryID='[0-9]+'", "", oldxml)
oldxml = re.sub(r" lxnm:linkable='[^']+'", "", oldxml)
if oldxml == newxml:
return entryID, xml, False, None
else:
dictDB.execute(f"update entries set needs_refresh=1 where id in (select parent_id from sub where child_id={ques})", (entryID,))
title = getEntryTitle(xml, configs["titling"])
sortkey = getSortTitle(xml, configs["titling"])
doctype = getDoctype(xml)
needs_refac = 1 if len(list(configs["subbing"].keys())) > 0 else 0
needs_resave = 1 if configs["searchability"].get("searchableElements") and len(configs["searchability"].get("searchableElements")) > 0 else 0
# entry title already exists?
c = dictDB.execute(f"select id from entries where title = {ques} and id <> {ques}", (title, entryID))
c = c if c else dictDB
r = c.fetchone() if c else None
feedback = {"type": "saveFeedbackHeadwordExists", "info": r["id"]} if r else None
dictDB.execute(f"update entries set doctype={ques}, xml={ques}, title={ques}, sortkey={ques}, needs_refac={ques}, needs_resave={ques} where id={ques}", (doctype, xml, title, sortkey, needs_refac, needs_resave, entryID))
dictDB.execute(f"update searchables set txt={ques} where entry_id={ques} and level=1", (getEntryTitle(xml, configs["titling"], True), entryID))
dictDB.execute(f"insert into history(entry_id, action, {SQL_SEP}when{SQL_SEP_C}, email, xml, historiography) values({ques}, {ques}, {ques}, {ques}, {ques}, {ques})", (entryID, "update", str(datetime.datetime.utcnow()), email, xml, json.dumps(historiography)))
close_db(dictDB, shouldclose=False)
if configs["links"]:
xml = updateEntryLinkables(dictDB, entryID, xml, configs, True, True)
return entryID, xml, True, feedback
def getEntryTitle(xml, titling, plaintext=False):
if titling.get("headwordAnnotationsType") == "advanced" and not plaintext:
ret = titling["headwordAnnotationsAdvanced"]
advancedUsed = False
for el in re.findall(r"%\([^)]+\)", titling["headwordAnnotationsAdvanced"]):
text = ""
extract = extractText(xml, el[2:-1])
if len(extract) > 0:
text = extract[0]
advancedUsed = True
ret = ret.replace(el, text)
if advancedUsed:
return ret
ret = getEntryHeadword(xml, titling.get("headword"))
if not plaintext:
ret = "<span class='headword'>" + ret + "</span>"
if titling.get("headwordAnnotations"):
for hw in titling.get("headwordAnnotations"):
ret += " " if ret != "" else ""
ret += " ".join(extractText(xml, hw))
return ret
def getEntryTitleID(dictDB, configs, entry_id, plaintext=False):
eid, xml, title = readEntry(dictDB, configs, entry_id)
return getEntryTitle(xml, configs["titling"], plaintext)
def getEntryHeadword(xml, headword_elem):
ret = "?"
arr = extractText(xml, headword_elem)
if len(arr)>0:
ret = arr[0]
else:
ret = extractFirstText(xml)
if len(ret) > 255:
ret = ret[0:255]
return ret
def getDoctype(xml):
pat = r"^<([^>\/\s]+)"
for match in re.findall(pat, xml):
return match
return ""
def getSortTitle(xml, titling):
if titling.get("headwordSorting"):
return getEntryHeadword(xml, titling.get("headwordSorting"))
return getEntryHeadword(xml, titling.get("headword"))
def generateKey(size=32):
return ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(size))
def generateDictId(size=8):
return ''.join(random.choice("abcdefghijkmnpqrstuvwxy23456789") for _ in range(size))
def login(email, password):
if siteconfig["readonly"]:
return {"success": False}
conn = getMainDB()
passhash = hashlib.sha1(password.encode("utf-8")).hexdigest();
c = conn.execute(f"select email from users where email={ques} and passwordHash={ques}", (email.lower(), passhash))
c = c if c else conn
user = c.fetchone() if c else None
if not user:
return {"success": False}
key = generateKey()
now = datetime.datetime.utcnow()
conn.execute(f"update users set sessionKey={ques}, sessionLast={ques} where email={ques}", (key, now, email))
close_db(conn)
return {"success": True, "email": user["email"], "key": key}
def logout(user):
conn = getMainDB()
if DB == 'sqlite':
conn.execute(f"update users set sessionKey='', sessionLast='' where email={ques}", (user["email"],))
else:
conn.execute(f"update users set sessionKey='', sessionLast=NULL where email={ques}", (user["email"],))
close_db(conn)
return True
def sendSignupToken(email, remoteip):
if siteconfig["readonly"]:
return False
conn = getMainDB()
c = conn.execute(f"select email from users where email={ques}", (email.lower(),))
c = c if c else conn
user = c.fetchone() if c else None
if not user:
token = secrets.token_hex()
tokenurl = siteconfig["baseUrl"] + "createaccount/" + token
expireDate = datetime.datetime.now() + datetime.timedelta(days=2)
mailSubject = "Lexonomy signup"
mailText = "Dear Lexonomy user,\n\n"
mailText += "Somebody (hopefully you, from the address "+remoteip+") requested to create a new Lexonomy account. Please follow the link below to create your account:\n\n"
mailText += tokenurl + "\n\n"
mailText += "For security reasons this link is only valid for two days (until "+expireDate.isoformat()+"). If you did not request an account, you can safely ignore this message. \n\n"
mailText += "Yours,\nThe Lexonomy team"
conn.execute(f"insert into register_tokens (email, requestAddress, token, expiration) values ({ques}, {ques}, {ques}, {ques})", (email, remoteip, token, expireDate))
close_db(conn)
sendmail(email, mailSubject, mailText)
return True
else:
return False
def sendToken(email, remoteip):
if siteconfig["readonly"]:
return False
conn = getMainDB()
c = conn.execute(f"select email from users where email={ques}", (email.lower(),))
c = c if c else conn
user = c.fetchone() if c else None
if user:
token = secrets.token_hex()
tokenurl = siteconfig["baseUrl"] + "recoverpwd/" + token
expireDate = datetime.datetime.now() + datetime.timedelta(days=2)
mailSubject = "Lexonomy password reset"
mailText = "Dear Lexonomy user,\n\n"
mailText += "Somebody (hopefully you, from the address "+remoteip+") requested a new password for the Lexonomy account "+email+". You can reset your password by clicking the link below:\n\n";
mailText += tokenurl + "\n\n"
mailText += "For security reasons this link is only valid for two days (until "+expireDate.isoformat()+"). If you did not request a password reset, you can safely ignore this message. \n\n"
mailText += "Yours,\nThe Lexonomy team"
conn.execute(f"insert into recovery_tokens (email, requestAddress, token, expiration) values ({ques}, {ques}, {ques}, {ques})", (email, remoteip, token, expireDate))
close_db(conn)
sendmail(email, mailSubject, mailText)
return True
else:
return False
def verifyToken(token, tokenType):
conn = getMainDB()
c = conn.execute(f"select * from "+tokenType+f"_tokens where token={ques} and expiration>=datetime('now') and usedDate is null", (token,))
c = c if c else conn
row = c.fetchone() if c else None
if row:
return True
else:
return False
def createAccount(token, password, remoteip):
conn = getMainDB()
c = conn.execute(f"select * from register_tokens where token={ques} and expiration>=datetime('now') and usedDate is null", (token,))
c = c if c else conn
row = c.fetchone() if c else None
if row:
c2 = conn.execute(f"select * from users where email={ques}", (row["email"],))
row2 = c2.fetchone() if c2 else None
if not row2:
passhash = hashlib.sha1(password.encode("utf-8")).hexdigest();
conn.execute(f"insert into users (email,passwordHash) values ({ques},{ques})", (row["email"], passhash))
conn.execute(f"update register_tokens set usedDate=datetime('now'), usedAddress={ques} where token={ques}", (remoteip, token))
close_db(conn)
return True
else:
return False
else:
return False
def resetPwd(token, password, remoteip):
conn = getMainDB()
c = conn.execute(f"select * from recovery_tokens where token={ques} and expiration>=datetime('now') and usedDate is null", (token,))
c = c if c else conn
row = c.fetchone() if c else None
if row:
passhash = hashlib.sha1(password.encode("utf-8")).hexdigest();
conn.execute(f"update users set passwordHash={ques} where email={ques}", (passhash, row["email"]))
conn.execute(f"update recovery_tokens set usedDate=datetime('now'), usedAddress={ques} where token={ques}", (remoteip, token))
close_db(conn)
return True
else:
return False
def setConsent(email, consent):
conn = getMainDB()
conn.execute(f"update users set consent={ques} where email={ques}", (consent, email))
close_db(conn)
return True
def changePwd(email, password):
conn = getMainDB()
passhash = hashlib.sha1(password.encode("utf-8")).hexdigest();
conn.execute(f"update users set passwordHash={ques} where email={ques}", (passhash, email))
close_db(conn)
return True
def changeSkeUserName(email, ske_userName):
conn = getMainDB()
conn.execute(f"update users set ske_username={ques} where email={ques}", (ske_userName, email))
close_db(conn)
return True
def changeSkeApiKey(email, ske_apiKey):
conn = getMainDB()
conn.execute(f"update users set ske_apiKey={ques} where email={ques}", (ske_apiKey, email))
close_db(conn)
return True
def updateUserApiKey(user, apiKey):
conn = getMainDB()
conn.execute(f"update users set apiKey={ques} where email={ques}", (apiKey, user["email"]))
close_db(conn)
sendApiKeyToSke(user, apiKey)
return True
def sendApiKeyToSke(user, apiKey):
if user["ske_username"] and user["ske_apiKey"]:
data = json.dumps({"options": {"settings_lexonomyApiKey": apiKey, "settings_lexonomyEmail": user["email"].lower()}})
queryData = urllib.parse.urlencode({ "username": user["ske_username"], "api_key": user["ske_apiKey"], "json": data })
url = "https://api.sketchengine.eu/bonito/run.cgi/set_user_options?" + queryData
res = urllib.request.urlopen(url)
return True
def prepareApiKeyForSke(email):
conn = getMainDB()
c = conn.execute(f"select * from users where email={ques}", (email,))
c = c if c else conn
row = c.fetchone() if c else None
if row:
if row["apiKey"] == None or row["apiKey"] == "":
lexapi = generateKey()
conn.execute(f"update users set apiKey={ques} where email={ques}", (lexapi, email))
close_db(conn)
else:
lexapi = row["apiKey"]
sendApiKeyToSke(row, lexapi)
return True
def processJWT(user, jwtdata):
conn = getMainDB()
c = conn.execute(f"select * from users where ske_id={ques}", (jwtdata["user"]["id"],))
c = c if c else conn
row = c.fetchone() if c else None
key = generateKey()
now = datetime.datetime.utcnow()
if row:
#if SkE ID in database = log in user
conn.execute(f"update users set sessionKey={ques}, sessionLast={ques} where email={ques}", (key, now, row["email"]))
close_db(conn)
prepareApiKeyForSke(row["email"])
return {"success": True, "email": row["email"], "key": key}
else:
if user["loggedin"]:
#user logged in = save SkE ID in database
conn.execute(f"update users set ske_id={ques}, ske_username={ques}, ske_apiKey={ques}, sessionKey={ques}, sessionLast={ques} where email={ques}", (jwtdata["user"]["id"], jwtdata["user"]["username"], jwtdata["user"]["api_key"], key, now, user["email"]))
close_db(conn)
prepareApiKeyForSke(user["email"])
return {"success": True, "email": user["email"], "key": key}
else:
#user not logged in = register and log in
email = jwtdata["user"]["email"].lower()
c2 = conn.execute(f"select * from users where email={ques}", (email,))
row2 = c2.fetchone() if c2 else None
if not row2:
lexapi = generateKey()
conn.execute(f"insert into users (email, passwordHash, ske_id, ske_username, ske_apiKey, sessionKey, sessionLast, apiKey) values ({ques}, null, {ques}, {ques}, {ques}, {ques}, {ques}, {ques})", (email, jwtdata["user"]["id"], jwtdata["user"]["username"], jwtdata["user"]["api_key"], key, now, lexapi))
close_db(conn)
prepareApiKeyForSke(email)
return {"success": True, "email": email, "key": key}
else:
return {"success": False, "error": "user with email " + email + " already exists. Log-in and connect account to SkE."}
def dictExists(dictID):
if DB == 'sqlite':
return os.path.isfile(os.path.join(siteconfig["dataDir"], "dicts/" + dictID + ".sqlite"))
elif DB == 'mysql':
conn = mysql.connector.connect(
host=os.environ['MYSQL_DB_HOST'],
user=os.environ['MYSQL_DB_USER'],
password=os.environ['MYSQL_DB_PASSWORD']
)
mycursor = conn.cursor()
mycursor.execute("SHOW DATABASES LIKE %s", ('lexo_' + dictID,))
c = mycursor.fetchone()
conn.close()
return True if c else False
def suggestDictId():
dictid = generateDictId()
while dictid in prohibitedDictIDs or dictExists(dictid):
dictid = generateDictId()
return dictid
def makeDict(dictID, template, title, blurb, email):
if title == "":
title = "?"
if blurb == "":
blurb = i18n["Yet another Lexonomy dictionary."]
if dictID in prohibitedDictIDs or dictExists(dictID):
return False
if DB == 'sqlite':
if not template.startswith("/"):
template = "dictTemplates/" + template + ".sqlite"
shutil.copy(template, os.path.join(siteconfig["dataDir"], "dicts/" + dictID + ".sqlite"))
elif DB == 'mysql':
import subprocess
p = subprocess.call(["adminscripts/copyMysqlDb.sh", template, dictID], start_new_session=True, close_fds=True)
users = {email: {"canEdit": True, "canConfig": True, "canDownload": True, "canUpload": True}}
dictDB = getDB(dictID)
dictDB.execute(f"update configs set json={ques} where id='users'", (json.dumps(users),))
ident = {"title": title, "blurb": blurb}
dictDB.execute(f"update configs set json={ques} where id='ident'", (json.dumps(ident),))
close_db(dictDB, shouldclose=False)
attachDict(dictDB, dictID)
return True
def attachDict(dictDB, dictID):
configs = readDictConfigs(dictDB)
conn = getMainDB()
conn.execute(f"delete from dicts where id={ques}", (dictID,))
conn.execute(f"delete from user_dict where dict_id={ques}", (dictID,))
title = configs["ident"]["title"]
conn.execute(f"insert into dicts(id, title) values ({ques}, {ques})", (dictID, title))
for email in configs["users"]:
conn.execute(f"insert into user_dict(dict_id, user_email) values ({ques}, {ques})", (dictID, email.lower()))
close_db(conn)
def cloneDict(dictID, email):
newID = suggestDictId()
shutil.copy(os.path.join(siteconfig["dataDir"], "dicts/" + dictID + ".sqlite"), os.path.join(siteconfig["dataDir"], "dicts/" + newID + ".sqlite"))
newDB = getDB(newID)
res = newDB.execute("select json from configs where id='ident'")
row = res.fetchone() if res else None
ident = {"title": "?", "blurb": "?"}
if row:
ident = json.loads(row["json"])
ident["title"] = i18n["Clone of "] + ident["title"]
newDB.execute(f"update configs set json={ques} where id='ident'", (json.dumps(ident),))
close_db(newDB)
attachDict(newDB, newID)
return {"success": True, "dictID": newID, "title": ident["title"]}
def destroyDict(dictID):
conn = getMainDB()
conn.execute(f"delete from dicts where id={ques}", (dictID,))
conn.execute(f"delete from user_dict where dict_id={ques}", (dictID,))
close_db(conn)
os.remove(os.path.join(siteconfig["dataDir"], "dicts/" + dictID + ".sqlite"))
return True
def moveDict(oldID, newID):
if newID in prohibitedDictIDs or dictExists(newID):
return False
shutil.move(os.path.join(siteconfig["dataDir"], "dicts/" + oldID + ".sqlite"), os.path.join(siteconfig["dataDir"], "dicts/" + newID + ".sqlite"))
conn = getMainDB()
conn.execute(f"delete from dicts where id={ques}", (oldID,))
close_db(conn)
dictDB = getDB(newID)
attachDict(dictDB, newID)
return True
def getDoc(docID):
if os.path.isfile("docs/"+docID+".md"):
doc = {"id": docID, "title":"", "html": ""}
html = markdown.markdown(open("docs/"+docID+".md").read())
title = re.search('<h1>([^<]*)</h1>', html)
if title:
doc["title"] = re.sub('<\/?h1>','', title.group(0))
doc["html"] = html
return doc
else:
return False
def getDictsByUser(email):
dicts = []
email = str(email).lower()
conn = getMainDB()
c = conn.execute(f"select d.id, d.title from dicts as d inner join user_dict as ud on ud.dict_id=d.id where ud.user_email={ques} order by d.title", (email,))
c = c if c else conn
for r in c.fetchall() if c else []:
info = {"id": r["id"], "title": r["title"], "hasLinks": False}
try:
configs = readDictConfigs(getDB(r["id"]))
if configs["users"][email] and configs["users"][email]["canConfig"]:
info["currentUserCanDelete"] = True
if configs["links"] and len(configs["links"])>0:
info["hasLinks"] = True
except:
info["broken"] = True
dicts.append(info)
return dicts
def getLangList():
langs = []
codes = get_iso639_1()
conn = getMainDB()
c = conn.execute("SELECT DISTINCT language FROM dicts WHERE language!='' ORDER BY language")
c = c if c else conn
for r in c.fetchall() if c else []:
lang = next((item for item in codes if item["code"] == r["language"]), {})
langs.append({"code": r["language"], "language": lang.get("lang")})
return langs
def getDictList(lang, withLinks):
dicts = []
conn = getMainDB()
if lang:
c = conn.execute(f"SELECT * FROM dicts WHERE language={ques} ORDER BY title", (lang, ))
c = c if c else conn
else:
c = conn.execute("SELECT * FROM dicts ORDER BY title")
c = c if c else conn
for r in c.fetchall() if c else []:
info = {"id": r["id"], "title": r["title"], "language": r["language"], "hasLinks": False}
try:
configs = readDictConfigs(getDB(r["id"]))
if configs["links"] and len(configs["links"])>0:
info["hasLinks"] = True
except:
info["broken"] = True
if not withLinks or (withLinks == True and info["hasLinks"] == True):
dicts.append(info)
return dicts
def getLinkList(headword, sourceLang, sourceDict, targetLang):
links = []
linkDB = getLinkDB()
if sourceDict and sourceDict != "":
dicts = [{"id": sourceDict}]
else:
dicts = getDictList(sourceLang, True)
for d in dicts:
dictDB = getDB(d["id"])
if dictDB:
query = f"SELECT DISTINCT l.entry_id AS entry_id, l.txt AS link_id, l.element AS link_el, s.txt AS hw FROM searchables AS s, linkables AS l WHERE s.entry_id=l.entry_id AND s.txt LIKE {ques} AND s.level=1"
c = dictDB.execute(query, (headword+"%", ))
c = c if c else dictDB
for entry in c.fetchall() if c else []:
info0 = {"sourceDict": d["id"], "sourceHeadword": entry["hw"]}
if entry["entry_id"] and entry["entry_id"] != "":
info0["sourceID"] = entry["entry_id"]
if entry["link_el"] == "sense" and "_" in entry["link_id"]:
lia = entry["link_id"].split("_")
info0["sourceSense"] = lia[1]
if not info0["sourceID"]:
info0["sourceID"] = lia[0]
info0["sourceURL"] = siteconfig["baseUrl"] + info0["sourceDict"] + "/" + str(info0["sourceID"])
# first, find links with searched dict as source
if targetLang:
targetDicts = []
for td in getDictList(targetLang, True):
targetDicts.append(td["id"])
query2 = f"SELECT * FROM links WHERE source_dict={ques} AND source_id={ques} AND target_dict IN "+"('"+"','".join(targetDicts)+"')"
else:
query2 = f"SELECT * FROM links WHERE source_dict={ques} AND source_id={ques}"
data2 = (d["id"], entry["link_id"])
c2 = linkDB.execute(query2, data2)
for r2 in c2.fetchall() if c2 else []:
info = info0.copy()
info["targetDict"] = r2["target_dict"]
info["confidence"] = r2["confidence"]
targetDB = getDB(r2["target_dict"])
if targetDB:
info["targetLang"] = readDictConfigs(targetDB)['ident']['lang']
info["targetDictConcept"] = False
if r2["target_element"] == "sense" and "_" in r2["target_id"]:
lia = r2["target_id"].split("_")
info["targetSense"] = lia[1]
query3 = f"SELECT DISTINCT l.entry_id AS entry_id, l.txt AS link_id, l.element AS link_el, s.txt AS hw FROM searchables AS s, linkables AS l WHERE s.entry_id=l.entry_id AND l.txt={ques} AND s.level=1"
c3 = targetDB.execute(query3, (r2["target_id"],))
for r3 in c3.fetchall() if c3 else []:
info["targetHeadword"] = r3["hw"]
info["targetID"] = r3["entry_id"]
info["targetURL"] = siteconfig["baseUrl"] + info["targetDict"] + "/" + str(info["targetID"])
links.append(info)
else:
info["targetHeadword"] = r2["target_id"]
info["targetID"] = r2["target_id"]
info["targetDictConcept"] = True
info["targetURL"] = ""
info["targetSense"] = ""
info["targetLang"] = ""
links.append(info)
# second, find links with search dict as target
if targetLang:
query2 = f"SELECT * FROM links WHERE target_dict={ques} AND target_id={ques} AND source_dict IN "+"('"+"','".join(targetDicts)+"')"
else:
query2 = f"SELECT * FROM links WHERE target_dict={ques} AND target_id={ques}"
data2 = (d["id"], entry["link_id"])
c2 = linkDB.execute(query2, data2)
for r2 in c2.fetchall() if c2 else []:
info = info0.copy()
info["targetDict"] = r2["source_dict"]
info["confidence"] = r2["confidence"]
sourceDB = getDB(r2["source_dict"])
if sourceDB:
info["targetLang"] = readDictConfigs(sourceDB)['ident']['lang']
info["targetDictConcept"] = False
if r2["source_element"] == "sense" and "_" in r2["source_id"]:
lia = r2["source_id"].split("_")
info["targetSense"] = lia[1]
query3 = "SELECT DISTINCT l.entry_id AS entry_id, l.txt AS link_id, l.element AS link_el, s.txt AS hw FROM searchables AS s, linkables AS l WHERE s.entry_id=l.entry_id AND l.txt=? AND s.level=1"
c3 = sourceDB.execute(query3, (r2["source_id"],))
for r3 in c3.fetchall():
info["targetHeadword"] = r3["hw"]
info["targetID"] = r3["entry_id"]
info["targetURL"] = siteconfig["baseUrl"] + info["targetDict"] + "/" + str(info["targetID"])
links.append(info)
else:
info["targetHeadword"] = r2["source_id"]
info["targetID"] = r2["source_id"]
info["targetDictConcept"] = True
info["targetURL"] = ""
info["targetSense"] = ""
info["targetLang"] = ""
links.append(info)
else:
# source dictionary is "concept", use headword as target_id
info0 = {"sourceDict": d["id"], "sourceHeadword": headword, "sourceID": headword, "sourceDictConcept": True, "sourceURL": "", "sourceSense": ""}
# first, find links with searched dict as source
if targetLang:
targetDicts = []
for td in getDictList(targetLang, True):
targetDicts.append(td["id"])
query2 = "SELECT * FROM links WHERE source_dict=? AND source_id=? AND target_dict IN "+"('"+"','".join(targetDicts)+"')"
else:
query2 = "SELECT * FROM links WHERE source_dict=? AND source_id=?"
data2 = (d["id"], headword)
c2 = linkDB.execute(query2, data2)
for r2 in c2.fetchall():
info = info0.copy()
info["targetDict"] = r2["target_dict"]
info["confidence"] = r2["confidence"]
targetDB = getDB(r2["target_dict"])
if targetDB:
info["targetLang"] = readDictConfigs(targetDB)['ident']['lang']
info["targetDictConcept"] = False
if r2["target_element"] == "sense" and "_" in r2["target_id"]:
lia = r2["target_id"].split("_")
info["targetSense"] = lia[1]
query3 = "SELECT DISTINCT l.entry_id AS entry_id, l.txt AS link_id, l.element AS link_el, s.txt AS hw FROM searchables AS s, linkables AS l WHERE s.entry_id=l.entry_id AND l.txt=? AND s.level=1"
c3 = targetDB.execute(query3, (r2["target_id"],))
for r3 in c3.fetchall():
info["targetHeadword"] = r3["hw"]
info["targetID"] = r3["entry_id"]
info["targetURL"] = siteconfig["baseUrl"] + info["targetDict"] + "/" + str(info["targetID"])
links.append(info)
else:
info["targetHeadword"] = r2["target_id"]
info["targetID"] = r2["target_id"]
info["targetDictConcept"] = True
info["targetURL"] = ""
info["targetSense"] = ""
info["targetLang"] = ""
links.append(info)
# second, find links with search dict as target
if targetLang:
query2 = "SELECT * FROM links WHERE target_dict=? AND target_id=? AND source_dict IN "+"('"+"','".join(targetDicts)+"')"
else:
query2 = "SELECT * FROM links WHERE target_dict=? AND target_id=?"
data2 = (d["id"], headword)
c2 = linkDB.execute(query2, data2)
for r2 in c2.fetchall():
info = info0.copy()
info["targetDict"] = r2["source_dict"]
info["confidence"] = r2["confidence"]
sourceDB = getDB(r2["source_dict"])
if sourceDB:
info["targetLang"] = readDictConfigs(sourceDB)['ident']['lang']
info["targetDictConcept"] = False
if r2["source_element"] == "sense" and "_" in r2["source_id"]:
lia = r2["source_id"].split("_")
info["targetSense"] = lia[1]
query3 = f"SELECT DISTINCT l.entry_id AS entry_id, l.txt AS link_id, l.element AS link_el, s.txt AS hw FROM searchables AS s, linkables AS l WHERE s.entry_id=l.entry_id AND l.txt={ques} AND s.level=1"
c3 = sourceDB.execute(query3, (r2["source_id"],))
for r3 in c3.fetchall() if c3 else []:
info["targetHeadword"] = r3["hw"]
info["targetID"] = r3["entry_id"]
info["targetURL"] = siteconfig["baseUrl"] + info["targetDict"] + "/" + str(info["targetID"])
links.append(info)
else:
info["targetHeadword"] = r2["source_id"]
info["targetID"] = r2["source_id"]
info["targetDictConcept"] = True
info["targetURL"] = ""
info["targetSense"] = ""
info["targetLang"] = ""
links.append(info)
return links
def listUsers(searchtext, howmany):
conn = getMainDB()
c = conn.execute(f"select * from users where email like {ques} order by email limit {howmany}", ("%"+searchtext+"%",))
c = c if c else conn
users = []
for r in c.fetchall() if c else []:
users.append({"id": r["email"], "title": r["email"]})
c = conn.execute(f"select count(*) as total from users where email like {ques}", (f"%"+searchtext+"%", ))
c = c if c else conn
r = c.fetchone() if c else None
total = r["total"]
return {"entries":users, "total": total}
def createUser(xml):
from lxml import etree as ET
root = ET.fromstring(xml)
email = root.attrib["email"]
passhash = hashlib.sha1(root.attrib["password"].encode("utf-8")).hexdigest();
conn = getMainDB()
conn.execute(f"insert into users(email, passwordHash) values({ques}, {ques})", (email.lower(), passhash))
close_db(conn)
return {"entryID": email, "adjustedXml": readUser(email)["xml"]}
def updateUser(email, xml):
from lxml import etree as ET
root = ET.fromstring(xml)
if root.attrib['password']:
passhash = hashlib.sha1(root.attrib["password"].encode("utf-8")).hexdigest();
conn = getMainDB()
conn.execute(f"update users set passwordHash={ques} where email={ques}", (passhash, email.lower()))
close_db(conn)
return readUser(email)
def deleteUser(email):
conn = getMainDB()
conn.execute(f"delete from users where email={ques}", (email.lower(),))
close_db(conn)
return True
def readUser(email):
conn = getMainDB()
c = conn.execute(f"select * from users where email={ques}", (email.lower(), ))
c = c if c else conn
r = c.fetchone() if c else None
if r:
if r["sessionLast"]:
xml = "<user lastSeen='"+str(r["sessionLast"])+"'>"
else:
xml = "<user>"
c2 = conn.execute(f"select d.id, d.title from user_dict as ud inner join dicts as d on d.id=ud.dict_id where ud.user_email={ques} order by d.title", (r["email"], ))
for r2 in c2.fetchall() if c2 else []:
xml += "<dict id='" + r2["id"] + "' title='" + clean4xml(r2["title"]) + "'/>"
xml += "</user>"
return {"email": r["email"], "xml": xml}
else:
return {"email":"", "xml":""}
def listDicts(searchtext, howmany):
conn = getMainDB()
c = conn.execute(f"select * from dicts where id like {ques} or title like {ques} order by id limit {howmany}", (f"%"+searchtext+"%", "%"+searchtext+"%"))
c = c if c else conn
dicts = []
for r in c.fetchall() if c else []:
dicts.append({"id": r["id"], "title": r["title"]})
c = conn.execute(f"select count(*) as total from dicts where id like {ques} or title like {ques}", (f"%"+searchtext+"%", "%"+searchtext+"%"))
c = c if c else conn
r = c.fetchone() if c else None
total = r["total"]
return {"entries": dicts, "total": total}
def readDict(dictId):
conn = getMainDB()
c = conn.execute(f"select * from dicts where id={ques}", (dictId, ))
c = c if c else conn
r = c.fetchone() if c else None
if r:
xml = "<dict id='"+clean4xml(r["id"])+"' title='"+clean4xml(r["title"])+"'>"
c2 = conn.execute(f"select u.email from user_dict as ud inner join users as u on u.email=ud.user_email where ud.dict_id={ques} order by u.email", (r["id"], ))
for r2 in c2.fetchall() if c2 else []:
xml += "<user email='" + r2["email"] + "'/>"
xml += "</dict>"
return {"id": r["id"], "xml": xml}
else:
return {"id":"", "xml":""}
def clean4xml(text):
return text.replace("&", "&").replace('"', """).replace("'", "'").replace("<", "<").replace(">", ">");
def markdown_text(text):
return markdown.markdown(text).replace("<a href=\"http", "<a target=\"_blank\" href=\"http")
def setHousekeepingAttributes(entryID, xml, subbing):
entryID = str(entryID)
#delete any housekeeping attributes and elements that already exist in the XML
xml = re.sub(r"^(<[^>\/]*)\s+xmlns:lxnm=['\"]http:\/\/www\.lexonomy\.eu\/[\"']", r"\1", xml)
xml = re.sub(r"^(<[^>\/]*)\s+lxnm:entryID=['\"][^\"\']*[\"']", r"\1", xml)
xml = re.sub(r"^(<[^>\/]*)\s+lxnm:subentryID=['\"][^\"\']*[\"']", r"\1", xml)
xml = re.sub(r"^(<[^>\/]*)\s+lxnm:linkable=['\"][^\"\']*[\"']", r"\1", xml)
#get name of the top-level element
root = ""
root = re.search(r"^<([^\s>\/]+)", xml, flags=re.M).group(1)
#set housekeeping attributes
if root in subbing:
xml = re.sub(r"^<([^\s>\/]+)", r"<\1 lxnm:subentryID='"+entryID+"'", xml)
else:
xml = re.sub(r"^<([^\s>\/]+)", r"<\1 lxnm:entryID='"+entryID+"'", xml)
xml = re.sub(r"^<([^\s>\/]+)", r"<\1 xmlns:lxnm='http://www.lexonomy.eu/'", xml)
return xml
def exportEntryXml(dictDB, dictID, entryID, configs, baseUrl):
c = dictDB.execute(f"select * from entries where id={ques}", (entryID,))
c = c if c else dictDB
row = c.fetchone() if c else None
if row:
xml = setHousekeepingAttributes(entryID, row["xml"], configs["subbing"])
attribs = " this=\"" + baseUrl + dictID + "/" + str(row["id"]) + ".xml\""
c2 = dictDB.execute(f"select e1.id, e1.title from entries as e1 where e1.sortkey<(select sortkey from entries where id={ques}) order by e1.sortkey desc limit 1", (entryID, ))
r2 = c2.fetchone() if c2 else None
if r2:
attribs += " previous=\"" + baseUrl + dictID + "/" + str(r2["id"]) + ".xml\""
c2 = dictDB.execute(f"select e1.id, e1.title from entries as e1 where e1.sortkey>(select sortkey from entries where id={ques}) order by e1.sortkey asc limit 1", (entryID, ))
r2 = c2.fetchone() if c2 else None
if r2:
attribs += " next=\"" + baseUrl + dictID + "/" + str(r2["id"]) + ".xml\""
xml = "<lexonomy" + attribs + ">" + xml + "</lexonomy>"
return {"entryID": row["id"], "xml": xml}
else:
return {"entryID": 0, "xml": ""}
def readNabesByEntryID(dictDB, dictID, entryID, configs):
nabes_before = []
nabes_after = []
nabes = []
c = dictDB.execute(f"select e1.id, e1.title, e1.sortkey from entries as e1 where e1.doctype={ques} ", (configs["xema"]["root"],))
c = c if c else dictDB
for r in c.fetchall() if c else []:
nabes.append({"id": str(r["id"]), "title": r["title"], "sortkey": r["sortkey"]})
# sort by selected locale
collator = Collator.createInstance(Locale(getLocale(configs)))
nabes.sort(key=lambda x: collator.getSortKey(x['sortkey']))
#select before/after entries
entryID_seen = False
for n in nabes:
if not entryID_seen:
nabes_before.append(n)
else:
nabes_after.append(n)
if n["id"] == entryID:
entryID_seen = True
return nabes_before[-8:] + nabes_after[0:15]
def readNabesByText(dictDB, dictID, configs, text):
nabes_before = []
nabes_after = []
nabes = []
c = dictDB.execute(f"select e1.id, e1.title, e1.sortkey from entries as e1 where e1.doctype={ques} ", (configs["xema"]["root"],))
c = c if c else dictDB
for r in c.fetchall() if c else []:
nabes.append({"id": str(r["id"]), "title": r["title"], "sortkey": r["sortkey"]})
# sort by selected locale
collator = Collator.createInstance(Locale(getLocale(configs)))
nabes.sort(key=lambda x: collator.getSortKey(x['sortkey']))
#select before/after entries
for n in nabes:
if collator.getSortKey(n["sortkey"]) <= collator.getSortKey(text):
nabes_before.append(n)
else:
nabes_after.append(n)
return nabes_before[-8:] + nabes_after[0:15]
def readRandoms(dictDB):
configs = readDictConfigs(dictDB)
limit = 75
more = False
randoms = []
if DB == 'sqlite':
c = dictDB.execute(f"select id, title, sortkey from entries where doctype={ques} and id in (select id from entries order by random() limit {limit})", (configs["xema"]["root"],))
elif DB == 'mysql':
c = dictDB.execute(f"select id, title, sortkey from entries where doctype={ques} order by RAND() limit {limit}", (configs["xema"]["root"],))
c = c if c else dictDB
for r in c.fetchall() if c else []:
randoms.append({"id": r["id"], "title": r["title"], "sortkey": r["sortkey"]})
# sort by selected locale
collator = Collator.createInstance(Locale(getLocale(configs)))
randoms.sort(key=lambda x: collator.getSortKey(x['sortkey']))
c = dictDB.execute("select count(*) as total from entries")
c = c if c else dictDB
r = c.fetchone() if c else None
if r["total"] > limit:
more = True
return {"entries": randoms, "more": more}
def readRandomOne(dictDB, dictID, configs):
if DB == 'sqlite':
c = dictDB.execute(f"select id, title, xml from entries where id in (select id from entries where doctype={ques} order by random() limit 1)", (configs["xema"]["root"], ))
elif DB == 'mysql':
c = dictDB.execute(
f"select id, title, xml from entries where doctype={ques} order by RAND() limit 1", (configs["xema"]["root"], ))
c = c if c else dictDB
r = c.fetchone() if c else None
if r:
return {"id": r["id"], "title": r["title"], "xml": r["xml"]}
else:
return {"id": 0, "title": "", "xml": ""}
def download_xslt(configs):
if 'download' in configs and 'xslt' in configs['download'] and configs['download']['xslt'].strip != "" and len(configs['download']['xslt']) > 0 and configs['download']['xslt'][0] == "<":
import lxml.etree as ET
try:
xslt_dom = ET.XML(configs["download"]["xslt"].encode("utf-8"))
xslt = ET.XSLT(xslt_dom)
except (ET.XSLTParseError, ET.XMLSyntaxError) as e:
return "Failed to parse XSL: {}".format(e), False
def transform(xml_txt):
try:
dom = ET.XML(xml_txt)
xml_transformed_dom = xslt(dom)
xml_transformed_byt = ET.tostring(xml_transformed_dom, xml_declaration=False, encoding="utf-8")
xml_transformed = xml_transformed_byt.decode('utf-8')
return xml_transformed, True
except ET.XMLSyntaxError as e:
return "Failed to parse content: {}".format(e), False
except ET.XSLTParseError as e:
return "Failed to use XSL: {}".format(e), False
else:
def transform(xml_text):
return re.sub("><",">\n<",xml_text), True
return transform
def download(dictDB, dictID, configs):
rootname = dictID.lstrip(" 0123456789")
if rootname == "":
rootname = "lexonomy"
yield "<"+rootname+">\n"
c = dictDB.execute("select id, xml from entries")
c = c if c else dictDB
transform = download_xslt(configs)
for r in c.fetchall() if c else []:
xml = setHousekeepingAttributes(r["id"], r["xml"], configs["subbing"])
xml_xsl, success = transform(xml)
if not success:
return xml_xsl, 400
yield xml_xsl
yield "\n"
yield "</"+rootname+">\n"
def purge(dictDB, email, historiography):
dictDB.execute(f"insert into history(entry_id, action, {SQL_SEP}when{SQL_SEP_C}, email, xml, historiography) select id, 'purge', {ques}, {ques}, xml, {ques} from entries", (str(datetime.datetime.utcnow()), email, json.dumps(historiography)))
dictDB.execute("delete from entries")
close_db(dictDB, True)
if DB == 'sqlite':
dictDB.execute("vacuum")
close_db(dictDB)
return True
def showImportErrors(filename, truncate):
with open(filename+".err", "r") as content_file:
content = content_file.read()
if (truncate):
content = content[0:truncate].replace("<", "<")
return {"errorData": content, "truncated": truncate}
else:
return content
def importfile(dictID, filename, email):
import subprocess
pidfile = filename + ".pid";
errfile = filename + ".err";
if os.path.isfile(pidfile):
return checkImportStatus(pidfile, errfile)
pidfile_f = open(pidfile, "w")
errfile_f = open(errfile, "w")
if DB == 'sqlite':
dbpath = os.path.join(siteconfig["dataDir"], "dicts/"+dictID+".sqlite")
p = subprocess.Popen(["adminscripts/import.py", dbpath, filename, email], stdout=pidfile_f, stderr=errfile_f, start_new_session=True, close_fds=True)
else:
p = subprocess.Popen(["adminscripts/importMysql.py", dictID, filename, email], stdout=pidfile_f, stderr=errfile_f, start_new_session=True, close_fds=True)
return {"progressMessage": "Import started. Please wait...", "finished": False, "errors": False}
def checkImportStatus(pidfile, errfile):
content = ''
while content == '':
with open(pidfile, "r") as content_file:
content = content_file.read()
pid_data = re.split(r"[\n\r]", content)
finished = False
if len(pid_data) > 1:
if pid_data[-1] == "":
progress = pid_data[-2]
else:
progress = pid_data[-1]
if "100%" in progress:
finished = True
else:
progress = "Import started. Please wait..."
errors = False
if os.path.isfile(errfile) and os.stat(errfile).st_size:
errors = True
return {"progressMessage": progress, "finished": finished, "errors": errors}
def readDoctypesUsed(dictDB):
c = dictDB.execute("select doctype from entries group by doctype order by count(*) desc")
c = c if c else dictDB
doctypes = []
for r in c.fetchall() if c else []:
doctypes.append(r["doctype"])
return doctypes
def getLastEditedEntry(dictDB, email):
c = dictDB.execute(f"select entry_id from history where email={ques} order by {SQL_SEP}when{SQL_SEP_C} desc limit 1", (email, ))
c = c if c else dictDB
r = c.fetchone() if c else None
if r:
return str(r["entry_id"])
else:
return ""
def listEntriesById(dictDB, entryID, configs):
c = dictDB.execute(f"select e.id, e.title, e.xml from entries as e where e.id={ques}", (entryID,))
c = c if c else dictDB
entries = []
for r in c.fetchall() if c else []:
xml = setHousekeepingAttributes(r["id"], r["xml"], configs["subbing"])
entries.append({"id": r["id"], "title": r["title"], "xml": xml})
return entries
def listEntries(dictDB, dictID, configs, doctype, searchtext="", modifier="start", howmany=10, sortdesc=False, reverse=False, fullXML=False):
# fast initial loading, for large dictionaries without search
if searchtext == "":
sqlc = "select count(*) as total from entries"
cc = dictDB.execute(sqlc)
cc = cc if cc else dictDB
rc = cc.fetchone() if cc else None
if int(rc["total"]) > 1000:
sqlf = "select * from entries order by sortkey limit 200"
cf = dictDB.execute(sqlf)
cf = cf if cf else dictDB
entries = []
for rf in cf.fetchall() if cf else []:
item = {"id": rf["id"], "title": rf["title"], "sortkey": rf["sortkey"]}
entries.append(item)
return rc["total"], entries, True
lowertext = searchtext.lower()
if type(sortdesc) == str:
if sortdesc == "true":
sortdesc = True
else:
sortdesc = False
if "flag_element" in configs["flagging"] or fullXML:
entryXML = ", e.xml "
else:
entryXML = ""
if "headwordSortDesc" in configs["titling"]:
reverse = configs["titling"]["headwordSortDesc"]
if reverse:
sortdesc = not sortdesc
if modifier == "start":
sql1 = f"select s.txt, min(s.level) as level, e.id, e.sortkey, e.title" + entryXML + f" from searchables as s inner join entries as e on e.id=s.entry_id where doctype={ques} and (LOWER(s.txt) like {ques} or s.txt like {ques}) group by e.id order by s.level"
params1 = (doctype, lowertext+"%", searchtext+"%")
sql2 = f"select count(distinct s.entry_id) as total from searchables as s inner join entries as e on e.id=s.entry_id where doctype={ques} and (LOWER(s.txt) like {ques} or s.txt like {ques})"
params2 = (doctype, lowertext+"%", searchtext+"%")
elif modifier == "wordstart":
sql1 = f"select s.txt, min(s.level) as level, e.id, e.sortkey, e.title" + entryXML + f" from searchables as s inner join entries as e on e.id=s.entry_id where doctype={ques} and (LOWER(s.txt) like {ques} or LOWER(s.txt) like {ques} or s.txt like {ques} or s.txt like {ques}) group by e.id order by s.level"
params1 = (doctype, lowertext + "%", "% " + lowertext + "%", searchtext + "%", "% " + searchtext + "%")
sql2 = f"select count(distinct s.entry_id) as total from searchables as s inner join entries as e on e.id=s.entry_id where doctype={ques} and (LOWER(s.txt) like {ques} or LOWER(s.txt) like {ques} or s.txt like {ques} or s.txt like {ques})"
params2 = (doctype, lowertext + "%", "% " + lowertext + "%", searchtext + "%", "% " + searchtext + "%")
elif modifier == "substring":
sql1 = f"select s.txt, min(s.level) as level, e.id, e.sortkey, e.title" + entryXML + f" from searchables as s inner join entries as e on e.id=s.entry_id where doctype={ques} and (LOWER(s.txt) like {ques} or s.txt like {ques}) group by e.id order by s.level"
params1 = (doctype, "%" + lowertext + "%", "%" + searchtext + "%")
sql2 = f"select count(distinct s.entry_id) as total from searchables as s inner join entries as e on e.id=s.entry_id where doctype={ques} and (LOWER(s.txt) like {ques} or s.txt like {ques})"
params2 = (doctype, "%" + lowertext + "%", "%" + searchtext + "%")
elif modifier == "exact":
sql1 = "select s.txt, min(s.level) as level, e.id, e.sortkey, e.title" + entryXML + f" from searchables as s inner join entries as e on e.id=s.entry_id where doctype={ques} and s.txt={ques} group by e.id order by s.level"
params1 = (doctype, searchtext)
sql2 = f"select count(distinct s.entry_id) as total from searchables as s inner join entries as e on e.id=s.entry_id where doctype={ques} and s.txt={ques}"
params2 = (doctype, searchtext)
c1 = dictDB.execute(sql1, params1)
c1 = c1 if c1 else dictDB
entries = []
for r1 in c1.fetchall() if c1 else []:
item = {"id": r1["id"], "title": r1["title"], "sortkey": r1["sortkey"]}
if "flag_element" in configs["flagging"]:
item["flag"] = extractText(r1["xml"], configs["flagging"]["flag_element"])
if fullXML:
item["xml"] = setHousekeepingAttributes(r1["id"], r1["xml"], configs["subbing"])
if r1["level"] > 1:
item["title"] += " ← <span class='redirector'>" + r1["txt"] + "</span>"
entries.append(item)
# sort by selected locale
collator = Collator.createInstance(Locale(getLocale(configs)))
entries.sort(key=lambda x: collator.getSortKey(x['sortkey']), reverse=sortdesc)
# and limit
entries = entries[0:int(howmany)]
c2 = dictDB.execute(sql2, params2)
c2 = c2 if c2 else dictDB
r2 = c2.fetchone() if c2 else None
total = r2["total"]
return total, entries, False
def listEntriesPublic(dictDB, dictID, configs, searchtext):
howmany = 100
sql_list = f"select s.txt, min(s.level) as level, e.id, e.title, e.sortkey, case when s.txt={ques} then 1 else 2 end as priority from searchables as s inner join entries as e on e.id=s.entry_id where s.txt like {ques} and e.doctype={ques} group by e.id order by priority, level, s.level"
c1 = dictDB.execute(sql_list, ("%"+searchtext+"%", "%"+searchtext+"%", configs["xema"].get("root")))
entries = []
for r1 in c1.fetchall() if c1 else []:
item = {"id": r1["id"], "title": r1["title"], "sortkey": r1["sortkey"], "exactMatch": (r1["level"] == 1 and r1["priority"] == 1)}
if r1["level"] > 1:
item["title"] += " ← <span class='redirector'>" + r1["txt"] + "</span>"
entries.append(item)
# sort by selected locale
collator = Collator.createInstance(Locale(getLocale(configs)))
entries.sort(key=lambda x: collator.getSortKey(x['sortkey']))
# and limit
entries = entries[0:int(howmany)]
return entries
def extractText(xml, elName):
elName = str(elName)
if elName == "":
return []
pat = r"<" + elName + "[^>]*>([^<]*)</" + elName + ">"
return re.findall(pat, xml)
def extractFirstText(xml):
pat = r"<([^\s>]+)[^>]*>([^<>]*?)</([^\s>]+)>"
for match in re.findall(pat, xml):
if match[0] == match[2] and match[1].strip() != "":
return match[1].strip()
return ""
def getDictStats(dictDB):
res = {"entryCount": 0, "needResave": 0}
c = dictDB.execute("select count(*) as entryCount from entries")
c = c if c else dictDB
r = c.fetchone() if c else None
res["entryCount"] = r["entryCount"]
c = dictDB.execute("select count(*) as needResave from entries where needs_resave=1 or needs_refresh=1 or needs_refac=1")
c = c if c else dictDB
r = c.fetchone() if c else None
res["needResave"] = r["needResave"]
return res
def updateDictConfig(dictDB, dictID, configID, content):
dictDB.execute(f"delete from configs where id={ques}", (configID, ))
dictDB.execute(f"insert into configs(id, json) values({ques}, {ques})", (configID, json.dumps(content)))
close_db(dictDB, shouldclose=False)
if configID == "ident":
attachDict(dictDB, dictID)
if content.get('lang'):
lang = content.get('lang')
conn = getMainDB()
conn.execute(f"UPDATE dicts SET language={ques} WHERE id={ques}", (lang, dictID))
close_db(conn, shouldclose=False)
return content, False
elif configID == 'users':
attachDict(dictDB, dictID)
return content, False
elif configID == "titling" or configID == "searchability":
resaveNeeded = flagForResave(dictDB)
return content, resaveNeeded
elif configID == "links":
resaveNeeded = flagForResave(dictDB)
if DB == 'sqlite':
c = dictDB.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='linkables'")
if not c.fetchone():
dictDB.execute("CREATE TABLE linkables (id INTEGER PRIMARY KEY AUTOINCREMENT, entry_id INTEGER REFERENCES entries (id) ON DELETE CASCADE, txt TEXT, element TEXT, preview TEXT)")
dictDB.execute("CREATE INDEX link ON linkables (txt)")
return content, resaveNeeded
elif configID == "subbing":
refacNeeded = flagForRefac(dictDB)
return content, refacNeeded
else:
return content, False
def flagForResave(dictDB):
c = dictDB.execute("update entries set needs_resave=1")
c = c if c else dictDB
close_db(dictDB)
return (c.rowcount > 0)
def flagForRefac(dictDB):
c = dictDB.execute("update entries set needs_refac=1")
c = c if c else dictDB
close_db(dictDB)
return (c.rowcount > 0)
def makeQuery(lemma):
words = []
for w in lemma.split(" "):
if w != "":
words.append('[lc="'+w+'"+|+lemma_lc="'+w+'"]')
ret = re.sub(" ","+", lemma) + ";q=aword," + "".join(words) + ";q=p+0+0>0+1+[ws(\".*\",+\"definitions\",+\".*\")];exceptmethod=PREV-CONC"
return ret
def clearRefac(dictDB):
dictDB.execute("update entries set needs_refac=0, needs_refresh=0")
close_db(dictDB, shouldclose=False)
def refac(dictDB, dictID, configs):
from xml.dom import minidom, Node
if len(configs['subbing']) == 0:
return False
c = dictDB.execute(f"select e.id, e.xml, h.email from entries as e left outer join history as h on h.entry_id=e.id where e.needs_refac=1 order by h.{SQL_SEP}when{SQL_SEP_C} asc limit 1")
c = c if c else dictDB
r = c.fetchone() if c else None
if not r:
return False
entryID = r["id"]
xml = r["xml"]
email = r["email"] or ""
doc = minidom.parseString(xml)
doc.documentElement.setAttributeNS("http://www.lexonomy.eu/", "lxnm:entryID", str(entryID))
#in the current entry, remove all <lxnm:subentryParent>
_els = doc.getElementsByTagNameNS("http://www.lexonomy.eu/", "subentryParent")
for el in _els:
el.parentNode.removeChild(el)
# in the current entry, find elements which are subentries, and are not contained inside other subentries
els = []
for doctype in configs["subbing"]:
_els = doc.getElementsByTagName(doctype)
for el in _els:
if el.parentNode and el.parentNode.nodeType == 1:
isSubSub = False
p = el.parentNode
while p.parentNode and p.parentNode.nodeType == 1:
if p.tagName in configs["subbing"]:
isSubSub = True
p = p.parentNode
if not isSubSub:
els.append(el)
dictDB.execute(f"delete from sub where parent_id={ques}", (entryID, ))
# keep saving subentries of the current entry until there are no more subentries to save:
if len(els) > 0:
for el in els:
subentryID = el.getAttributeNS("http://www.lexonomy.eu/", "subentryID")
xml = el.toxml()
if subentryID:
subentryID, adjustedXml, changed, feedback = updateEntry(dictDB, configs, subentryID, xml, email.lower(), {"refactoredFrom":entryID})
el.setAttributeNS("http://www.lexonomy.eu/", "lxnm:subentryID", str(subentryID))
dictDB.execute(f"insert into sub(parent_id, child_id) values({ques},{ques})", (entryID, subentryID))
if changed:
dictDB.execute(f"update entries set needs_refresh=1 where id in (select parent_id from sub where child_id={ques}) and id<>{ques}", (subentryID, entryID))
else:
subentryID, adjustedXml, feedback = createEntry(dictDB, configs, None, xml, email.lower(), {"refactoredFrom":entryID})
el.setAttributeNS("http://www.lexonomy.eu/", "lxnm:subentryID", str(subentryID))
subentryID, adjustedXml, changed, feedback = updateEntry(dictDB, configs, subentryID, el.toxml(), email.lower(), {"refactoredFrom":entryID})
dictDB.execute(f"insert into sub(parent_id, child_id) values({ques},{ques})", (entryID, subentryID))
if DB == 'mysql':
dictDB.execute(f"update entries set needs_refresh=1 where id in (select parent_id from sub where child_id={subentryID})", multi=True )
else:
dictDB.execute(f"update entries set needs_refresh=1 where id in (select parent_id from sub where child_id=?)", (subentryID, ))
xml = doc.toxml().replace('<?xml version="1.0" ?>', '').strip()
dictDB.execute(f"update entries set xml={ques}, needs_refac=0 where id={ques}", (xml, entryID))
close_db(dictDB, shouldclose= False)
def refresh(dictDB, dictID, configs):
from xml.dom import minidom, Node
if len(configs['subbing']) == 0:
return False
# takes one entry that needs refreshing and sucks into it the latest versions of its subentries
# get one entry that needs refreshing where none of its children needs refreshing
c = dictDB.execute("select pe.id, pe.xml from entries as pe left outer join sub as s on s.parent_id=pe.id left join entries as ce on ce.id=s.child_id where pe.needs_refresh=1 and (ce.needs_refresh is null or ce.needs_refresh=0) limit 1")
c = c if c else dictDB
r = c.fetchone() if c else None
if not r:
return False
parentID = r["id"]
parentXml = r["xml"]
if not "xmlns:lxnm" in parentXml:
parentXml = re.sub(r"<([^>^ ]*) ", r"<\1 xmlns:lxnm='http://www.lexonomy.eu/' ", parentXml)
parentDoc = minidom.parseString(parentXml)
# this will be called repeatedly till exhaustion
while True:
# find an element which is a subentry and which we haven't sucked in yet:
el = None
for doctype in configs["subbing"]:
els = parentDoc.documentElement.getElementsByTagName(doctype)
for el in els:
if el and not el.hasAttributeNS("http://www.lexonomy.eu/", "subentryID"):
el = None
if el and el.hasAttributeNS("http://www.lexonomy.eu/", "done"):
el = None
if el:
break
if el:
break
if el: #if such en element exists
subentryID = el.getAttributeNS("http://www.lexonomy.eu/", "subentryID")
# get the subentry from the database and inject it into the parent's xml:
c = dictDB.execute(f"select xml from entries where id={ques}", (subentryID, ))
c = c if c else dictDB
r = c.fetchone() if c else None
if not r:
el.parentNode.removeChild(el)
else:
childXml = r["xml"]
childDoc = minidom.parseString(childXml)
elNew = childDoc.documentElement
el.parentNode.replaceChild(elNew, el)
elNew.setAttributeNS("http://www.lexonomy.eu/", "lxnm:subentryID", subentryID)
elNew.setAttributeNS("http://www.lexonomy.eu/", "lxnm:done", "1")
else: #if no such element exists: we are done
els = parentDoc.documentElement.getElementsByTagName("*")
for el in els:
if el.hasAttributeNS("http://www.lexonomy.eu/", "done"):
el.removeAttributeNS("http://www.lexonomy.eu/", "done")
parentXml = parentDoc.toxml().replace('<?xml version="1.0" ?>', '').strip()
# save the parent's xml (into which all subentries have been injected by now) and tell it that it needs a resave:
dictDB.execute(f"update entries set xml={ques}, needs_refresh=0, needs_resave=1 where id={ques}", (parentXml, parentID))
return True
def resave(dictDB, dictID, configs):
from xml.dom import minidom, Node
c = dictDB.execute("select id, xml from entries where needs_resave=1")
c = c if c else dictDB
for r in c.fetchall() if c else []:
entryID = r["id"]
xml = r["xml"]
xml = re.sub(r"\s+xmlns:lxnm=['\"]http:\/\/www\.lexonomy\.eu\/[\"']", "", xml)
xml = re.sub(r"^<([^>^ ]*) ", r"<\1 xmlns:lxnm='http://www.lexonomy.eu/' ", xml)
dictDB.execute(f"update entries set needs_resave=0, title={ques}, sortkey={ques} where id={ques}", (getEntryTitle(xml, configs["titling"]), getSortTitle(xml, configs["titling"]), entryID))
dictDB.execute(f"delete from searchables where entry_id={ques}", (entryID,))
dictDB.execute(f"insert into searchables(entry_id, txt, level) values({ques}, {ques}, {ques})", (entryID, getEntryTitle(xml, configs["titling"], True), 1))
dictDB.execute(f"insert into searchables(entry_id, txt, level) values({ques}, {ques}, {ques})", (entryID, getEntryTitle(xml, configs["titling"], True).lower(), 1))
headword = getEntryHeadword(xml, configs["titling"].get("headword"))
for searchable in getEntrySearchables(xml, configs):
if searchable != headword:
dictDB.execute(f"insert into searchables(entry_id, txt, level) values({ques},{ques},{ques})", (entryID, searchable, 2))
if configs["links"]:
updateEntryLinkables(dictDB, entryID, xml, configs, True, True)
close_db(dictDB, shouldclose=False)
return True
def getEntryLinks(dictDB, dictID, entryID):
ret = {"out": [], "in": []}
if DB == 'sqlite':
cl = dictDB.execute("SELECT count(*) as count FROM sqlite_master WHERE type='table' and name='linkables'")
rl = cl.fetchone() if cl else None
if DB != 'sqlite' or rl['count'] > 0:
c = dictDB.execute(f"SELECT * FROM linkables WHERE entry_id={ques}", (entryID,))
c = c if c else dictDB
conn = getLinkDB()
for r in c.fetchall() if c else []:
ret["out"] = ret["out"] + links_get(dictID, r["element"], r["txt"], "", "", "")
ret["in"] = ret["in"] + links_get("", "", "", dictID, r["element"], r["txt"])
return ret
def updateEntryLinkables(dictDB, entryID, xml, configs, save=True, save_xml=True):
from xml.dom import minidom, Node
doc = minidom.parseString(xml)
ret = []
# table may not exists for older dictionaries
if DB == 'sqlite':
c = dictDB.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='linkables'")
if not c.fetchone():
dictDB.execute("CREATE TABLE linkables (id INTEGER PRIMARY KEY AUTOINCREMENT, entry_id INTEGER REFERENCES entries (id) ON DELETE CASCADE, txt TEXT, element TEXT, preview TEXT)")
dictDB.execute("CREATE INDEX link ON linkables (txt)")
for linkref in configs["links"].values():
for el in doc.getElementsByTagName(linkref["linkElement"]):
identifier = linkref["identifier"]
for pattern in re.findall(r"%\([^)]+\)", linkref["identifier"]):
text = ""
extract = extractText(el.toxml(), pattern[2:-1])
extractfull = extractText(xml, pattern[2:-1])
if len(extract) > 0:
text = extract[0]
elif len(extractfull) > 0:
text = extractfull[0]
identifier = identifier.replace(pattern, text)
el.setAttribute('lxnm:linkable', identifier)
preview = linkref["preview"]
for pattern in re.findall(r"%\([^)]+\)", linkref["preview"]):
text = ""
extract = extractText(el.toxml(), pattern[2:-1])
extractfull = extractText(xml, pattern[2:-1])
if len(extract) > 0:
text = extract[0]
elif len(extractfull) > 0:
text = extractfull[0]
preview = preview.replace(pattern, text)
ret.append({'element': linkref["linkElement"], "identifier": identifier, "preview": preview})
xml = doc.toxml().replace('<?xml version="1.0" ?>', '').strip()
if save:
dictDB.execute(f"delete from linkables where entry_id={ques}", (entryID,))
for linkable in ret:
dictDB.execute(f"insert into linkables(entry_id, txt, element, preview) values({ques},{ques},{ques},{ques})", (entryID, linkable["identifier"], linkable["element"], linkable["preview"]))
if save_xml and len(ret)>0:
dictDB.execute(f"update entries set xml={ques} where id={ques}", (xml, entryID))
close_db(dictDB)
return xml
def getEntrySearchables(xml, configs):
ret = []
ret.append(getEntryHeadword(xml, configs["titling"].get("headword")))
if configs["searchability"].get("searchableElements"):
for sel in configs["searchability"].get("searchableElements"):
for txt in extractText(xml, sel):
if txt != "" and txt not in ret:
ret.append(txt)
return ret
def flagEntry(dictDB, dictID, configs, entryID, flag, email, historiography):
c = dictDB.execute(f"select id, xml from entries where id={ques}", (entryID,))
c = c if c else dictDB
row = c.fetchone() if c else None
xml = row["xml"] if row else ""
xml = re.sub(r" xmlns:lxnm=[\"\']http:\/\/www\.lexonomy\.eu\/[\"\']", "", xml)
xml = re.sub(r"\=\"([^\"]*)\"", r"='\1'", xml)
xml = re.sub(r" lxnm:(sub)?entryID='[0-9]+'", "", xml)
xml = addFlag(xml, flag, configs["flagging"], configs["xema"])
# tell my parents that they need a refresh:
dictDB.execute(f"update entries set needs_refresh=1 where id in (select parent_id from sub where child_id={ques})", (entryID, ))
# update me
needs_refac = 1 if len(list(configs["subbing"].keys())) > 0 else 0
needs_resave = 1 if configs["searchability"].get("searchableElements") and len(configs["searchability"].get("searchableElements")) > 0 else 0
dictDB.execute(f"update entries set doctype={ques}, xml={ques}, title={ques}, sortkey=$sortkey, needs_refac={ques}, needs_resave={ques} where id={ques}", (getDoctype(xml), xml, getEntryTitle(xml, configs["titling"]), getSortTitle(xml, configs["titling"]), needs_refac, needs_resave, entryID))
dictDB.execute(f"insert into history(entry_id, action, {SQL_SEP}when{SQL_SEP_C}, email, xml, historiography) values({ques}, {ques}, {ques}, {ques}, {ques}, {ques})", (entryID, "update", str(datetime.datetime.utcnow()), email, xml, json.dumps(historiography)))
close_db(dictDB)
return entryID
def addFlag(xml, flag, flagconfig, xemaconfig):
flag_element = flagconfig["flag_element"]
path = getFlagElementPath(xemaconfig, flag_element)
loc1, loc2 = getFlagElementInString(path, xml)
return "{0}<{1}>{2}</{1}>{3}".format(
xml[:loc1], flag_element, flag, xml[loc2:])
def getFlagElementPath(xema, flag_element):
result = getFlagElementPath_recursive(xema, flag_element, xema["root"])
if result is not None:
result.insert(0, xema["root"])
return result
def getFlagElementPath_recursive(xema, flag_element, current_element):
# try all children
for child_props in xema["elements"][current_element]["children"]:
next_el = child_props["name"]
# if we get to the flag element, return!
if next_el == flag_element:
return [flag_element]
# else, recursive search, depth first
path = getFlagElementPath_recursive(xema, flag_element, next_el)
# if returned is not None, then we found what we need, just prepend to the returned path
if path is not None:
return [next_el] + path
# nothing useful found, return None
return None
def getFlagElementInString(path, xml):
start_out, end_out = 0, len(xml)
start_in, end_in = 0, len(xml)
# find each element in path to flag element, start with outmost one
for path_element in path:
regex = re.compile("<{}[^>]*>([\s\S]*?)</{}>".format(path_element, path_element))
match = regex.search(xml, start_in, end_in)
# we can not find the element, just return to the beginning of outer element
if match is None:
return (start_in, start_in)
start_out = match.start(0)
end_out = match.end(0)
start_in = match.start(1)
end_in = match.end(1)
# we found it! Return the span where flag element exists in xml
return (start_out, end_out)
def readDictHistory(dictDB, dictID, configs, entryID):
history = []
c = dictDB.execute(f"select * from history where entry_id={ques} order by {SQL_SEP}when{SQL_SEP_C} desc", (entryID,))
c = c if c else dictDB
for row in c.fetchall() if c else []:
xml = row["xml"]
if row["xml"]:
xml = setHousekeepingAttributes(entryID, row["xml"], configs["subbing"])
history.append({"entry_id": row["entry_id"], "revision_id": row["id"], "content": xml, "action": row["action"], "when": row["when"], "email": row["email"] or "", "historiography": json.loads(row["historiography"])})
return history
def verifyUserApiKey(email, apikey):
conn = getMainDB()
if email == '':
c = conn.execute("select email from users where apiKey=?", (apikey,))
c = c if c else conn
row = c.fetchone()
else:
c = conn.execute(f"select email from users where email={ques} and apiKey={ques}", (email, apikey))
c = c if c else conn
row = c.fetchone()
if not row or siteconfig["readonly"]:
return {"valid": False}
else:
return {"valid": True, "email": email or ""}
def links_add(source_dict, source_el, source_id, target_dict, target_el, target_id, confidence=0, conn=None):
if not conn:
conn = getLinkDB()
c = conn.execute(f"SELECT * FROM links WHERE source_dict={ques} AND source_element={ques} AND source_id={ques} AND target_dict={ques} AND target_element={ques} AND target_id={ques}", (source_dict, source_el, source_id, target_dict, target_el, target_id))
c = c if c else conn
row = c.fetchone() if c else None
if not row:
conn.execute(f"INSERT INTO links (source_dict, source_element, source_id, target_dict, target_element, target_id, confidence) VALUES ({ques},{ques},{ques},{ques},{ques},{ques},{ques})", (source_dict, source_el, source_id, target_dict, target_el, target_id, confidence))
close_db(conn)
c = conn.execute(f"SELECT * FROM links WHERE source_dict={ques} AND source_element={ques} AND source_id={ques} AND target_dict={ques} AND target_element={ques} AND target_id={ques}", (source_dict, source_el, source_id, target_dict, target_el, target_id))
c = c if c else conn
row = c.fetchone() if c else None
return {"link_id": row["link_id"], "source_dict": row["source_dict"], "source_el": row["source_element"], "source_id": row["source_id"], "target_dict": row["target_dict"], "target_el": row["target_element"], "target_id": row["target_id"], "confidence": row["confidence"]}
def links_delete(dictID, linkID):
conn = getLinkDB()
conn.execute(f"DELETE FROM links WHERE source_dict={ques} AND link_id={ques}", (dictID, linkID))
close_db(conn)
c = conn.execute(f"select * from links where link_id={ques}", (linkID, ))
c = c if c else conn
rows = c.fetchall() if c else []
if len(rows) > 0 :
return False
else:
return True
def links_get(source_dict, source_el, source_id, target_dict, target_el, target_id):
params = []
where = []
if source_dict != "":
where.append(f"source_dict={ques}")
params.append(source_dict)
if source_el != "":
where.append(f"source_element={ques}")
params.append(source_el)
if source_id != "":
where.append(f"source_id={ques}")
params.append(source_id)
if target_dict != "":
where.append(f"target_dict={ques}")
params.append(target_dict)
if target_el != "":
where.append(f"target_element={ques}")
params.append(target_el)
if target_id != "":
where.append(f"target_id={ques}")
params.append(target_id)
query = "SELECT * FROM links"
if len(where) > 0:
query += " WHERE " + " AND ".join(where)
conn = getLinkDB()
c = conn.execute(query, tuple(params))
c = c if c else conn
res = []
#first, get all dictionaries in results
dbs = {}
dbconfigs = {}
for row in c.fetchall() if c else []:
if not row["source_dict"] in dbs:
dbs[row["source_dict"]] = getDB(row["source_dict"])
dbconfigs[row["source_dict"]] = readDictConfigs(dbs[row["source_dict"]])
if not row["target_dict"] in dbs:
try:
dbs[row["target_dict"]] = getDB(row["target_dict"])
dbconfigs[row["target_dict"]] = readDictConfigs(dbs[row["target_dict"]])
except:
dbconfigs[row["target_dict"]] = None
#now the actual results
c = conn.execute(query, tuple(params))
c = c if c else conn
for row in c.fetchall() if c else []:
sourceDB = dbs[row["source_dict"]]
sourceConfig = dbconfigs[row["source_dict"]]
targetDB = dbs[row["target_dict"]]
targetConfig = dbconfigs[row["target_dict"]]
source_entry = ""
source_hw = ""
try:
# test if source DB has linkables tables
ress = sourceDB.execute(f"SELECT entry_id FROM linkables WHERE txt={ques}", (row["source_id"],))
rows = ress.fetchone() if ress else None
if rows:
source_entry = rows["entry_id"]
except:
source_entry = ""
# fallback for ontolex ids
if source_entry == "" and re.match(r"^[0-9]+_[0-9]+$", row["source_id"]):
source_entry = row["source_id"].split("_")[0]
if source_entry != "":
source_hw = getEntryTitleID(sourceDB, sourceConfig, source_entry, True)
target_entry = ""
target_hw = ""
try:
# test if target DB has linkables tables
rest = targetDB.execute(f"SELECT entry_id FROM linkables WHERE txt={ques}", (row["target_id"],))
rowt = rest.fetchone() if rest else None
if rowt:
target_entry = rowt["entry_id"]
except:
target_entry = ""
# fallback for ontolex ids and CILI
if target_entry == "" and re.match(r"^[0-9]+_[0-9]+$", row["target_id"]):
target_entry = row["target_id"].split("_")[0]
if target_entry != "":
target_hw = getEntryTitleID(targetDB, targetConfig, target_entry, True)
if target_dict == "CILI":
target_entry = row["target_id"]
target_hw = row["target_id"]
res.append({"link_id": row["link_id"], "source_dict": row["source_dict"], "source_entry": str(source_entry), "source_hw": source_hw, "source_el": row["source_element"], "source_id": row["source_id"], "target_dict": row["target_dict"], "target_entry": str(target_entry), "target_hw": target_hw, "target_el": row["target_element"], "target_id": row["target_id"], "confidence": row["confidence"]})
return res
def getDictLinkables(dictDB):
ret = []
if DB == 'sqlite':
cl = dictDB.execute("SELECT count(*) as count FROM sqlite_master WHERE type='table' and name='linkables'")
rl = cl.fetchone() if cl else None
if DB != 'sqlite' or rl['count'] > 0:
c = dictDB.execute("SELECT * FROM linkables ORDER BY entry_id, element, txt")
c = c if c else dictDB
for r in c.fetchall() if c else []:
ret.append({"element": r["element"], "link": r["txt"], "entry": r["entry_id"], "preview": r["preview"]})
return ret
def isrunning(dictDB, bgjob, pid=None):
if not pid:
c = dictDB.execute(f"SELECT pid FROM bgjobs WHERE id={ques}", (bgjob,))
c = c if c else dictDB
job = c.fetchone() if c else None
if not job:
return False
pid = job["pid"]
if pid < 0:
return False
try:
os.kill(pid, 0)
except OSError:
return False
else:
return True
def linkNAISC(dictDB, dictID, configs, otherdictDB, otherdictID, otherconfigs):
import subprocess
res = isLinking(dictDB)
if "otherdictID" in res:
return res
c = dictDB.execute(f"INSERT INTO bgjobs (type, data) VALUES ('naisc-local', {ques})", (otherdictID,))
c = c if c else dictDB
close_db(dictDB)
jobid = c.lastrowid
errfile = open("/tmp/linkNAISC-%s-%s.err" % (dictID, otherdictID), "w")
outfile = open("/tmp/linkNAISC-%s-%s.out" % (dictID, otherdictID), "w")
bgjob = subprocess.Popen(['adminscripts/linkNAISC.sh', siteconfig["dataDir"], dictID, otherdictID, siteconfig["naiscCmd"], str(jobid)],
start_new_session=True, close_fds=True, stderr=errfile, stdout=outfile, stdin=subprocess.DEVNULL)
dictDB.execute(f"UPDATE bgjobs SET pid={ques} WHERE id={ques}", (bgjob.pid, jobid))
close_db(dictDB)
return {"bgjob": jobid}
def autoImage(dictDB, dictID, configs, addElem, addNumber):
import subprocess
res = isAutoImage(dictDB)
if res["bgjob"] and res["bgjob"] > 0:
return res
c = dictDB.execute("INSERT INTO bgjobs (type, data) VALUES ('autoimage', 'autoimage')")
c = c if c else dictDB
close_db(dictDB)
jobid = c.lastrowid
errfile = open("/tmp/autoImage-%s.err" % (dictID), "w")
outfile = open("/tmp/autoImage-%s.out" % (dictID), "w")
bgjob = subprocess.Popen(['adminscripts/autoImage.py', siteconfig["dataDir"], dictID, addElem, str(addNumber), str(jobid)],
start_new_session=True, close_fds=True, stderr=errfile, stdout=outfile, stdin=subprocess.DEVNULL)
dictDB.execute(f"UPDATE bgjobs SET pid={ques} WHERE id={ques}", (bgjob.pid, jobid))
close_db(dictDB)
return {"bgjob": jobid}
def isLinking(dictDB):
c = dictDB.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='bgjobs'")
c = c if c else dictDB
cc = c.fetchone() if c else None
if cc:
dictDB.execute("CREATE TABLE bgjobs (id INTEGER PRIMARY KEY AUTOINCREMENT, type TEXT, data TEXT, finished INTEGER DEFAULT -1, pid DEFAULT -1)")
close_db(dictDB)
c = dictDB.execute("SELECT * FROM bgjobs WHERE finished=-1")
c = c if c else dictDB
job = c.fetchone() if c else None
if job:
pid = job["pid"]
if isrunning(dictDB, job["id"], pid):
return {"bgjob": job["id"], "otherdictID": job["data"]}
else: # mark as dead
c = dictDB.execute(f"UPDATE bgjobs SET finished=-2 WHERE pid={ques}", (pid,))
c = c if c else dictDB
return {"bgjob": -1}
def isAutoImage(dictDB):
c = dictDB.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='bgjobs'")
c = c if c else dictDB
cc = c.fetchone() if c else None
if cc:
dictDB.execute("CREATE TABLE bgjobs (id INTEGER PRIMARY KEY AUTOINCREMENT, type TEXT, data TEXT, finished INTEGER DEFAULT -1, pid DEFAULT -1)")
close_db(dictDB)
c = dictDB.execute("SELECT * FROM bgjobs WHERE finished=-1 AND data='autoimage'")
c = c if c else dictDB
job = c.fetchone() if c else None
if job:
pid = job["pid"]
if isrunning(dictDB, job["id"], pid):
return {"bgjob": job["id"]}
else: # mark as dead
c = dictDB.execute(f"UPDATE bgjobs SET finished=-2 WHERE pid={ques}", (pid,))
c = c if c else dictDB
return {"bgjob": -1}
def getNAISCstatus(dictDB, dictID, otherdictID, bgjob):
try:
err = open("/tmp/linkNAISC-%s-%s.err" % (dictID, otherdictID))
except:
return None
if "[COMPLETED] Done\n" in err.readlines():
return {"status": "finished"}
if isrunning(dictDB, bgjob):
return {"status": "linking"}
else:
return {"status": "failed"}
def autoImageStatus(dictDB, dictID, bgjob):
try:
out = open("/tmp/autoImage-%s.out" % (dictID))
except:
return None
if "COMPLETED\n" in out.readlines():
return {"status": "finished"}
if isrunning(dictDB, bgjob):
return {"status": "working"}
else:
return {"status": "failed"}
def addAutoNumbers(dictDB, dictID, countElem, storeElem):
from xml.dom import minidom, Node
isAttr = False
if storeElem[0] == '@':
isAttr = True
storeElem = storeElem[1:]
c = dictDB.execute("select id, xml from entries")
c = c if c else dictDB
process = 0
for r in c.fetchall() if c else []:
entryID = r["id"]
xml = r["xml"]
doc = minidom.parseString(xml)
allEmpty = True
for el in doc.getElementsByTagName(countElem):
if isAttr:
if el.getAttribute(storeElem) != "":
allEmpty = False
else:
for sel in el.getElementsByTagName(storeElem):
if sel.firstChild != None and sel.firstChild.nodeValue != "":
allEmpty = False
if allEmpty:
count = 0
for el in doc.getElementsByTagName(countElem):
count += 1
if isAttr:
el.setAttribute(storeElem, str(count))
else:
for sel in el.getElementsByTagName(storeElem):
el.removeChild(sel)
n_elem = doc.createElement(storeElem)
el.appendChild(n_elem)
n_elem.appendChild(doc.createTextNode(str(count)))
process += 1
xml = doc.toxml().replace('<?xml version="1.0" ?>', '').strip()
dictDB.execute(f"update entries set xml={ques}, needs_refac=0 where id={ques}", (xml, entryID))
close_db(dictDB)
return process
def get_iso639_1():
codes = []
for line in open("libs/iso-639-3.tab").readlines():
la = line.split("\t")
if la[3] != "" and la[3] != "Part1":
codes.append({'code':la[3], 'lang':la[6]})
return codes
def get_locales():
codes = []
for code in Locale().getAvailableLocales():
codes.append({'code': code, 'lang': Locale(code).getDisplayName()})
return codes
def getLocale(configs):
locale = 'en'
if "locale" in configs["titling"] and configs["titling"]["locale"] != "":
locale = configs["titling"]["locale"]
return locale
def preprocessLex0(entryXml):
from xml.dom import minidom, Node
doc = minidom.parseString(entryXml)
headword = None
for el in doc.getElementsByTagName("form"):
if el.getAttribute("type") == "lemma":
for el2 in el.getElementsByTagName("orth"):
headword = el2.firstChild.nodeValue
if headword and headword != "":
he = doc.createElement("headword")
het = doc.createTextNode(headword)
doc.documentElement.appendChild(he)
he.appendChild(het)
return doc.toxml().replace('<?xml version="1.0" ?>', '').strip()
def listOntolexEntries(dictDB, dictID, configs, doctype, searchtext=""):
from lxml import etree as ET
if searchtext == "":
sql = f"select id, title, sortkey, xml from entries where doctype={ques} order by id"
params = (doctype, )
else:
sql = f"select s.txt, min(s.level) as level, e.id, e.sortkey, e.title, e.xml from searchables as s inner join entries as e on e.id=s.entry_id where doctype={ques} and s.txt like {ques} group by e.id order by e.id"
params = (doctype, searchtext+"%")
c = dictDB.execute(sql, params)
c = c if c else dictDB
for r in c.fetchall() if c else []:
headword = getEntryHeadword(r["xml"], configs["titling"].get("headword"))
headword = headword.replace('"', "'")
item = {"id": r["id"], "title": headword}
if configs["ident"].get("lang"):
lang = configs["ident"].get("lang")
else:
lang = siteconfig["lang"] if siteconfig["lang"] else "en";
entryId = re.sub("[\W_]", "", headword) + "_" + str(r["id"])
line = "<" + siteconfig["baseUrl"] + dictID + "#" + entryId + "> <http://www.w3.org/1999/02/22-rdf-syntax-ns#type> <http://www.w3.org/ns/lemon/ontolex#LexicalEntry> ."
yield line; yield "\n"
line = "<" + siteconfig["baseUrl"] + dictID + "#" + entryId + "> <http://www.w3.org/2000/01/rdf-schema#label> \"" + headword + "\"@" + lang + " ."
yield line; yield "\n"
#just guessing and hoping
root = ET.fromstring(r["xml"])
num = 0
for sense in root.findall("sense"):
senseDef = sense.find("def")
if senseDef != None and senseDef.text:
defText = re.sub(r'[\r\n]', ' ', senseDef.text)
elif sense.text:
defText = re.sub(r'[\r\n]', ' ', sense.text)
else:
defText = ""
if defText != "":
num += 1
defText = defText.replace('"', "'")
senseId = 'sense:' + str(r["id"]) + "_" + str(num)
line = "<" + siteconfig["baseUrl"] + dictID + "#" + entryId + "> <http://www.w3.org/ns/lemon/ontolex#sense> <" + siteconfig["baseUrl"] + dictID + "#" + senseId + "> ."
yield line; yield "\n"
line = "<" + siteconfig["baseUrl"] + dictID + "#" + senseId + "> <http://www.w3.org/2004/02/skos/core#definition> \"" + defText + "\"@" + lang + " ."
yield line; yield "\n"
for sense in root.findall("meaning"):
senseDef = sense.find("def")
senseDesc = sense.find("semDescription")
if senseDef != None and senseDef.text:
defText = re.sub(r'[\r\n]', ' ', senseDef.text)
elif senseDesc != None and senseDesc.text:
defText = re.sub(r'[\r\n]', ' ', senseDesc.text)
elif sense.text:
defText = re.sub(r'[\r\n]', ' ', sense.text)
else:
defText = ""
if defText != "":
num += 1
defText = defText.replace('"', "'")
senseId = 'meaning:' + str(r["id"]) + "_" + str(num)
line = "<" + siteconfig["baseUrl"] + dictID + "#" + entryId + "> <http://www.w3.org/ns/lemon/ontolex#sense> <" + siteconfig["baseUrl"] + dictID + "#" + senseId + "> ."
yield line; yield "\n"
line = "<" + siteconfig["baseUrl"] + dictID + "#" + senseId + "> <http://www.w3.org/2004/02/skos/core#definition> \"" + defText + "\"@" + lang + " ."
yield line; yield "\n"
for sense in root.findall("def"):
if sense.text:
num += 1
defText = re.sub(r'[\r\n]', ' ', sense.text)
defText = defText.replace('"', "'")
senseId = 'def:' + str(r["id"]) + "_" + str(num)
line = "<" + siteconfig["baseUrl"] + dictID + "#" + entryId + "> <http://www.w3.org/ns/lemon/ontolex#sense> <" + siteconfig["baseUrl"] + dictID + "#" + senseId + "> ."
yield line; yield "\n"
line = "<" + siteconfig["baseUrl"] + dictID + "#" + senseId + "> <http://www.w3.org/2004/02/skos/core#definition> \"" + defText + "\"@" + lang + " ."
yield line; yield "\n"
# no sense detected, copy headword
if num == 0:
defText = re.sub(r'[\r\n]', ' ', headword)
defText = defText.replace('"', "'")
senseId = 'entry:' + str(r["id"]) + "_1"
line = "<" + siteconfig["baseUrl"] + dictID + "#" + entryId + "> <http://www.w3.org/ns/lemon/ontolex#sense> <" + siteconfig["baseUrl"] + dictID + "#" + senseId + "> ."
yield line; yield "\n"
line = "<" + siteconfig["baseUrl"] + dictID + "#" + senseId + "> <http://www.w3.org/2004/02/skos/core#definition> \"" + defText + "\"@" + lang + " ."
yield line; yield "\n"
def close_db(db, shouldclose = True):
if DB == 'sqlite':
db.commit()
elif DB == 'mysql' and shouldclose:
db.close()
def elexisDictAbout(dictID):
dictDB = getDB(dictID)
if dictDB:
info = {"id": dictID}
configs = readDictConfigs(dictDB)
info["sourceLang"] = configs['ident'].get('lang')
if configs["publico"]["public"]:
info["release"] = "PUBLIC"
info["license"] = configs["publico"]["licence"]
if siteconfig["licences"][configs["publico"]["licence"]]:
info["license"] = siteconfig["licences"][configs["publico"]["licence"]]["url"]
else:
info["release"] = "PRIVATE"
info["creator"] = []
for user in configs["users"]:
info["creator"].append({"email": user})
return info
else:
return None
def elexisLemmaList(dictID, limit=None, offset=0):
dictDB = getDB(dictID)
if dictDB:
info = {"language": "", "release": "PRIVATE"}
configs = readDictConfigs(dictDB)
info["language"] = configs['ident'].get('lang')
if configs["publico"]["public"]:
info["release"] = "PUBLIC"
lemmas = []
query = "SELECT id, xml FROM entries"
if limit != None and limit != "":
query += " LIMIT "+str(int(limit))
if offset != "" and int(offset) > 0:
query += " OFFSET "+str(int(offset))
c = dictDB.execute(query)
for r in c.fetchall():
lemma = {"release": info["release"], "language": info["language"], "formats": ["tei"]}
lemma["id"] = str(r["id"])
lemma["lemma"] = getEntryHeadword(r["xml"], configs["titling"].get("headword"))
pos = elexisGuessPOS(r["xml"])
if pos != "":
lemma["partOfSpeech"] = [pos]
lemmas.append(lemma)
return lemmas
else:
return None
def elexisGetLemma(dictID, headword, limit=None, offset=0):
dictDB = getDB(dictID)
if dictDB:
info = {"language": "", "release": "PRIVATE"}
configs = readDictConfigs(dictDB)
info["language"] = configs['ident'].get('lang')
if configs["publico"]["public"]:
info["release"] = "PUBLIC"
lemmas = []
query = "SELECT e.id, e.xml FROM searchables AS s INNER JOIN entries AS e on e.id=s.entry_id WHERE doctype=? AND s.txt=? GROUP BY e.id ORDER by s.level"
params = (configs["xema"]["root"], headword)
if limit != None and limit != "":
query += " LIMIT "+str(int(limit))
if offset != "" and int(offset) > 0:
query += " OFFSET "+str(int(offset))
c = dictDB.execute(query, params)
for r in c.fetchall():
lemma = {"release": info["release"], "language": info["language"], "formats": ["tei"]}
lemma["id"] = str(r["id"])
lemma["lemma"] = getEntryHeadword(r["xml"], configs["titling"].get("headword"))
pos = elexisGuessPOS(r["xml"])
if pos != "":
lemma["partOfSpeech"] = [pos]
lemmas.append(lemma)
return lemmas
else:
return None
def elexisGuessPOS(xml):
# try to guess frequent PoS element
pos = ""
if "</pos>" in xml:
arr = extractText(xml, "pos")
if arr[0] and arr[0] != "":
pos = arr[0]
if "<partOfSpeech>" in xml:
arr = extractText(xml, "partOfSpeech")
if arr[0] and arr[0] != "":
pos = arr[0]
if 'type="pos"' in xml:
pat = r'<gram[^>]*type="pos"[^>]*>([^<]*)</gram>'
arr = re.findall(pat, xml)
if arr[0] and arr[0] != "":
pos = arr[0]
return pos
def elexisGetEntry(dictID, entryID):
dictDB = getDB(dictID)
if dictDB:
query = "SELECT id, xml FROM entries WHERE id=?"
c = dictDB.execute(query, (entryID, ))
r = c.fetchone()
if not r:
return None
else:
return r["xml"]
else:
return None
| []
| []
| [
"LEXONOMY_LANG",
"MYSQL_DB_PASSWORD",
"MYSQL_DB_USER",
"LEXONOMY_SITECONFIG",
"MYSQL_DB_HOST"
]
| [] | ["LEXONOMY_LANG", "MYSQL_DB_PASSWORD", "MYSQL_DB_USER", "LEXONOMY_SITECONFIG", "MYSQL_DB_HOST"] | python | 5 | 0 | |
src/command_modules/azure-cli-acs/azure/cli/command_modules/acs/_params.py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=line-too-long,too-many-statements
import os.path
import platform
from argcomplete.completers import FilesCompleter
from azure.cli.core.commands.parameters import (
file_type, get_enum_type, get_resource_name_completion_list, name_type, tags_type)
from azure.cli.core.commands.validators import validate_file_or_dict
from ._completers import (
get_vm_size_completion_list, get_k8s_versions_completion_list, get_k8s_upgrades_completion_list)
from ._validators import (
validate_create_parameters, validate_k8s_client_version, validate_k8s_version, validate_linux_host_name,
validate_list_of_integers, validate_ssh_key, validate_connector_name, validate_max_pods, validate_nodepool_name)
aci_connector_os_type = ['Windows', 'Linux', 'Both']
aci_connector_chart_url = 'https://github.com/virtual-kubelet/virtual-kubelet/raw/master/charts/virtual-kubelet-for-aks-latest.tgz'
orchestrator_types = ["Custom", "DCOS", "Kubernetes", "Swarm", "DockerCE"]
regions_in_preview = [
"canadacentral",
"canadaeast",
"centralindia",
"koreasouth",
"koreacentral",
"southindia",
"uksouth",
"ukwest",
"westcentralus",
"westindia",
"westus2",
]
regions_in_prod = [
"australiaeast",
"australiasoutheast",
"brazilsouth",
"centralus",
"eastasia",
"eastus",
"eastus2",
"japaneast",
"japanwest",
"northcentralus",
"northeurope",
"southcentralus",
"southeastasia",
"westeurope",
"westus",
]
storage_profile_types = ["StorageAccount", "ManagedDisks"]
def load_arguments(self, _):
# ACS command argument configuration
with self.argument_context('acs') as c:
c.argument('resource_name', name_type,
completer=get_resource_name_completion_list('Microsoft.ContainerService/ContainerServices'),
help='Name of the container service. You can configure the default using `az configure --defaults acs=<name>`')
c.argument('name', name_type,
completer=get_resource_name_completion_list('Microsoft.ContainerService/ContainerServices'),
help='Name of the container service. You can configure the default using `az configure --defaults acs=<name>`')
c.argument('container_service_name', name_type, help='Name of the container service. You can configure the default using `az configure --defaults acs=<name>`',
completer=get_resource_name_completion_list('Microsoft.ContainerService/ContainerServices'))
c.argument('admin_username', options_list=['--admin-username', '-u'], default='azureuser')
c.argument('api_version',
help=_get_feature_in_preview_message() + 'Use API version of ACS to perform az acs operations. Available options: 2017-01-31, 2017-07-01. Default: the latest version for the location')
c.argument('dns_name_prefix', options_list=['--dns-prefix', '-d'])
c.argument('orchestrator_type', get_enum_type(orchestrator_types), options_list=['--orchestrator-type', '-t'])
c.argument('ssh_key_value', required=False, type=file_type, default=os.path.join('~', '.ssh', 'id_rsa.pub'),
completer=FilesCompleter(), validator=validate_ssh_key)
c.argument('tags', tags_type)
c.argument('disable_browser', help='Do not open browser after opening a proxy to the cluster web user interface')
with self.argument_context('acs create') as c:
c.argument('ssh_key_value', required=False, type=file_type, default=os.path.join('~', '.ssh', 'id_rsa.pub'),
completer=FilesCompleter(), validator=validate_ssh_key)
c.argument('master_profile', options_list=['--master-profile', '-m'], type=validate_file_or_dict,
help=_get_feature_in_preview_message() + 'The file or dictionary representation of the master profile. Note it will override any master settings once set')
c.argument('master_vm_size', completer=get_vm_size_completion_list,
help=_get_feature_in_preview_message())
c.argument('agent_count', type=int)
c.argument('generate_ssh_keys', action='store_true', validator=validate_create_parameters,
help='Generate SSH public and private key files if missing')
c.argument('master_osdisk_size', type=int,
help=_get_feature_in_preview_message() + 'The disk size for master pool vms. Unit in GB. Default: corresponding vmsize disk size')
c.argument('master_vnet_subnet_id', type=str,
help=_get_feature_in_preview_message() + 'The custom vnet subnet id. Note agent need to used the same vnet if master set. Default: ""')
c.argument('master_first_consecutive_static_ip', type=str,
help=_get_feature_in_preview_message() + 'The first consecutive ip used to specify static ip block.')
c.argument('master_storage_profile', get_enum_type(storage_profile_types),
help=_get_feature_in_preview_message() + 'Default: varies based on Orchestrator')
c.argument('agent_profiles', options_list=['--agent-profiles', '-a'], type=validate_file_or_dict,
help=_get_feature_in_preview_message() + 'The file or dictionary representation of the agent profiles. Note it will override any agent settings once set')
c.argument('agent_vm_size', completer=get_vm_size_completion_list,
help='Set the default size for agent pools vms.')
c.argument('agent_osdisk_size', type=int,
help=_get_feature_in_preview_message() + 'Set the default disk size for agent pools vms. Unit in GB. Default: corresponding vmsize disk size')
c.argument('agent_vnet_subnet_id', type=str,
help=_get_feature_in_preview_message() + 'Set the default custom vnet subnet id for agent pools. Note agent need to used the same vnet if master set. Default: ""')
c.argument('agent_ports', type=validate_list_of_integers,
help=_get_feature_in_preview_message() + 'Set the default ports exposed on the agent pools. Only usable for non-Kubernetes. Default: 8080,4000,80')
c.argument('agent_storage_profile', get_enum_type(storage_profile_types),
help=_get_feature_in_preview_message() + 'Set default storage profile for agent pools. Default: varies based on Orchestrator')
c.argument('windows', action='store_true',
help='If true, set the default osType of agent pools to be Windows.')
c.argument('validate', action='store_true',
help='Generate and validate the ARM template without creating any resources')
c.argument('orchestrator_version', help=_get_feature_in_preview_message() + 'Use Orchestrator Version to specify the semantic version for your choice of orchestrator.')
with self.argument_context('acs scale') as c:
c.argument('new_agent_count', type=int)
with self.argument_context('acs dcos browse') as c:
c.argument('ssh_key_file', required=False, type=file_type, default=os.path.join('~', '.ssh', 'id_rsa'),
completer=FilesCompleter(), help='Path to an SSH key file to use.')
with self.argument_context('acs dcos install-cli') as c:
c.argument('install_location', default=_get_default_install_location('dcos'))
with self.argument_context('acs kubernetes get-credentials') as c:
c.argument('path', options_list=['--file', '-f'])
with self.argument_context('acs kubernetes install-cli') as c:
c.argument('install_location', type=file_type, completer=FilesCompleter(),
default=_get_default_install_location('kubectl'))
c.argument('ssh_key_file', required=False, type=file_type, default=os.path.join('~', '.ssh', 'id_rsa'),
completer=FilesCompleter(), help='Path to an SSH key file to use.')
# AKS command argument configuration
with self.argument_context('aks') as c:
c.argument('resource_name', name_type, help='Name of the managed cluster.',
completer=get_resource_name_completion_list('Microsoft.ContainerService/ManagedClusters'))
c.argument('name', name_type, help='Name of the managed cluster.',
completer=get_resource_name_completion_list('Microsoft.ContainerService/ManagedClusters'))
c.argument('kubernetes_version', options_list=['--kubernetes-version', '-k'], validator=validate_k8s_version)
c.argument('node_count', options_list=['--node-count', '-c'], type=int)
c.argument('tags', tags_type)
with self.argument_context('aks create') as c:
c.argument('name', validator=validate_linux_host_name)
c.argument('kubernetes_version', completer=get_k8s_versions_completion_list)
c.argument('admin_username', options_list=['--admin-username', '-u'], default='azureuser')
c.argument('dns_name_prefix', options_list=['--dns-name-prefix', '-p'])
c.argument('generate_ssh_keys', action='store_true', validator=validate_create_parameters)
c.argument('node_vm_size', options_list=['--node-vm-size', '-s'], completer=get_vm_size_completion_list)
c.argument('nodepool_name', type=str, default='nodepool1',
help='Node pool name, upto 12 alphanumeric characters', validator=validate_nodepool_name)
c.argument('ssh_key_value', required=False, type=file_type, default=os.path.join('~', '.ssh', 'id_rsa.pub'),
completer=FilesCompleter(), validator=validate_ssh_key)
c.argument('aad_client_app_id')
c.argument('aad_server_app_id')
c.argument('aad_server_app_secret')
c.argument('aad_tenant_id')
c.argument('dns_service_ip')
c.argument('docker_bridge_address')
c.argument('enable_addons', options_list=['--enable-addons', '-a'])
c.argument('disable_rbac', action='store_true')
c.argument('enable_rbac', action='store_true', options_list=['--enable-rbac', '-r'],
deprecate_info=c.deprecate(redirect="--disable-rbac", hide="2.0.45"))
c.argument('max_pods', type=int, options_list=['--max-pods', '-m'], validator=validate_max_pods)
c.argument('network_plugin')
c.argument('network_policy')
c.argument('no_ssh_key', options_list=['--no-ssh-key', '-x'])
c.argument('pod_cidr')
c.argument('service_cidr')
c.argument('vnet_subnet_id')
c.argument('workspace_resource_id')
c.argument('skip_subnet_role_assignment', action='store_true')
with self.argument_context('aks disable-addons') as c:
c.argument('addons', options_list=['--addons', '-a'])
with self.argument_context('aks enable-addons') as c:
c.argument('addons', options_list=['--addons', '-a'])
c.argument('subnet_name', options_list=['--subnet-name', '-s'])
with self.argument_context('aks get-credentials') as c:
c.argument('admin', options_list=['--admin', '-a'], default=False)
c.argument('path', options_list=['--file', '-f'], type=file_type, completer=FilesCompleter(),
default=os.path.join(os.path.expanduser('~'), '.kube', 'config'))
with self.argument_context('aks install-cli') as c:
c.argument('client_version', validator=validate_k8s_client_version)
c.argument('install_location', default=_get_default_install_location('kubectl'))
with self.argument_context('aks install-connector') as c:
c.argument('aci_resource_group', help='The resource group to create the ACI container groups')
c.argument('chart_url', default=aci_connector_chart_url, help='URL to the chart')
c.argument('client_secret', help='Client secret to use with the service principal for making calls to Azure APIs')
c.argument('connector_name', default='aci-connector', help='The name for the ACI Connector', validator=validate_connector_name)
c.argument('image_tag', help='The image tag of the virtual kubelet')
c.argument('location', help='The location to create the ACI container groups')
c.argument('os_type', get_enum_type(aci_connector_os_type), help='The OS type of the connector')
c.argument('service_principal',
help='Service principal for making calls into Azure APIs. If not set, auto generate a new service principal of Contributor role, and save it locally for reusing')
with self.argument_context('aks remove-connector') as c:
c.argument('connector_name', default='aci-connector',
help='The name for the ACI Connector', validator=validate_connector_name)
c.argument('graceful', action='store_true',
help='Mention if you want to drain/uncordon your aci-connector to move your applications')
c.argument('os_type', get_enum_type(aci_connector_os_type),
help='The OS type of the connector')
with self.argument_context('aks update-credentials') as c:
c.argument('reset_service_principal', action='store_true')
c.argument('service_principal')
c.argument('client_secret')
with self.argument_context('aks upgrade') as c:
c.argument('kubernetes_version', completer=get_k8s_upgrades_completion_list)
with self.argument_context('aks scale') as c:
c.argument('nodepool_name', type=str,
help='Node pool name, upto 12 alphanumeric characters', validator=validate_nodepool_name)
with self.argument_context('aks upgrade-connector') as c:
c.argument('aci_resource_group')
c.argument('chart_url', default=aci_connector_chart_url)
c.argument('client_secret')
c.argument('connector_name', default='aci-connector', validator=validate_connector_name)
c.argument('image_tag')
c.argument('location')
c.argument('os_type', get_enum_type(aci_connector_os_type))
c.argument('service_principal')
with self.argument_context('aks use-dev-spaces') as c:
c.argument('update', options_list=['--update'], action='store_true')
c.argument('space_name', options_list=['--space', '-s'])
c.argument('prompt', options_list=['--yes', '-y'], action='store_true', help='Do not prompt for confirmation. Requires --space.')
with self.argument_context('aks remove-dev-spaces') as c:
c.argument('prompt', options_list=['--yes', '-y'], action='store_true', help='Do not prompt for confirmation')
# OpenShift command argument configuration
with self.argument_context('openshift') as c:
c.argument('resource_name', name_type, help='Name of the managed OpenShift cluster.',
completer=get_resource_name_completion_list('Microsoft.ContainerService/OpenShiftManagedClusters'))
c.argument('name', name_type, help='Name of the managed OpenShift cluster.',
completer=get_resource_name_completion_list('Microsoft.ContainerService/OpenShiftManagedClusters'))
c.argument('compute_count', options_list=['--compute-count', '-c'], type=int, default=4)
c.argument('tags', tags_type)
with self.argument_context('openshift create') as c:
c.argument('name', validator=validate_linux_host_name)
c.argument('compute_vm_size', options_list=['--compute-vm-size', '-s'])
def _get_default_install_location(exe_name):
system = platform.system()
if system == 'Windows':
home_dir = os.environ.get('USERPROFILE')
if not home_dir:
return None
install_location = os.path.join(home_dir, r'.azure-{0}\{0}.exe'.format(exe_name))
elif system == 'Linux' or system == 'Darwin':
install_location = '/usr/local/bin/{}'.format(exe_name)
else:
install_location = None
return install_location
def _get_feature_in_preview_message():
return "Feature in preview, only in " + ", ".join(regions_in_preview) + ". "
| []
| []
| [
"USERPROFILE"
]
| [] | ["USERPROFILE"] | python | 1 | 0 | |
configcenter/config_center_test.go | package configcenter_test
import (
_ "github.com/go-chassis/go-chassis/initiator"
"github.com/go-chassis/go-chassis/configcenter"
"github.com/go-chassis/go-chassis/core/config"
"github.com/go-chassis/go-chassis/core/config/model"
_ "github.com/go-chassis/go-chassis/core/registry/servicecenter"
"github.com/go-chassis/go-archaius"
"github.com/go-chassis/go-archaius/core"
"github.com/go-chassis/go-chassis/core/registry"
"github.com/stretchr/testify/assert"
"os"
"testing"
)
func TestInitConfigCenter(t *testing.T) {
t.Log("Testing InitConfigCenter function")
gopath := os.Getenv("GOPATH")
os.Setenv("CHASSIS_HOME", gopath+"/src/github.com/go-chassis/go-chassis/examples/discovery/server/")
err := config.Init()
registry.Enable()
config.GlobalDefinition = &model.GlobalCfg{}
config.GlobalDefinition.Cse.Config.Client.ServerURI = ""
err = configcenter.InitConfigCenter()
t.Log("HEllo", err)
}
func TestInitConfigCenterWithTenantEmpty(t *testing.T) {
t.Log("Testing InitConfigCenter function with autodiscovery true and tenant name empty")
gopath := os.Getenv("GOPATH")
os.Setenv("CHASSIS_HOME", gopath+"/src/github.com/go-chassis/go-chassis/examples/discovery/server/")
err := config.Init()
config.GlobalDefinition = &model.GlobalCfg{}
config.GlobalDefinition.Cse.Config.Client.Autodiscovery = true
config.GlobalDefinition.Cse.Config.Client.TenantName = ""
err = configcenter.InitConfigCenter()
t.Log("HEllo", err)
}
func TestInitConfigCenterWithEmptyURI(t *testing.T) {
t.Log("Testing InitConfigCenter function with empty ServerURI")
gopath := os.Getenv("GOPATH")
os.Setenv("CHASSIS_HOME", gopath+"/src/github.com/go-chassis/go-chassis/examples/discovery/server/")
err := config.Init()
config.GlobalDefinition = &model.GlobalCfg{}
config.GlobalDefinition.Cse.Config.Client.ServerURI = ""
err = configcenter.InitConfigCenter()
t.Log("HEllo", err)
}
func TestInitConfigCenterWithEmptyMicroservice(t *testing.T) {
t.Log("Testing InitConfigCenter function with empty microservice definition")
gopath := os.Getenv("GOPATH")
os.Setenv("CHASSIS_HOME", gopath+"/src/github.com/go-chassis/go-chassis/examples/discovery/server/")
err := config.Init()
config.MicroserviceDefinition = &model.MicroserviceCfg{}
err = configcenter.InitConfigCenter()
t.Log("HEllo", err)
}
func TestInitConfigCenterWithEnableSSl(t *testing.T) {
t.Log("Testing InitConfigCenter function without initializing any parameter")
gopath := os.Getenv("GOPATH")
os.Setenv("CHASSIS_HOME", gopath+"/src/github.com/go-chassis/go-chassis/examples/discovery/server/")
err := config.Init()
err = configcenter.InitConfigCenter()
t.Log("HEllo", err)
}
func TestInitConfigCenterWithInvalidURI(t *testing.T) {
t.Log("Testing InitConfigCenter function with Invalid URI")
gopath := os.Getenv("GOPATH")
os.Setenv("CHASSIS_HOME", gopath+"/src/github.com/go-chassis/go-chassis/examples/discovery/server/")
err := config.Init()
config.GlobalDefinition = &model.GlobalCfg{}
config.GlobalDefinition.Cse.Config.Client.ServerURI = "hdhhhd:njdj"
config.GlobalDefinition.Cse.Config.Client.Type = "config_center"
err = configcenter.InitConfigCenter()
t.Log("HEllo", err)
}
func TestInitConfigCenterWithSSL(t *testing.T) {
t.Log("Testing InitConfigCenter function with ServerURI https://127.0.0.1:8787")
gopath := os.Getenv("GOPATH")
os.Setenv("CHASSIS_HOME", gopath+"/src/github.com/go-chassis/go-chassis/examples/discovery/server/")
err := config.Init()
config.GlobalDefinition = &model.GlobalCfg{}
config.GlobalDefinition.Cse.Config.Client.ServerURI = "https://127.0.0.1:8787"
config.GlobalDefinition.Cse.Config.Client.Type = "config_center"
err = configcenter.InitConfigCenter()
t.Log("HEllo", err)
}
func TestInitConfigCenterWithInvalidName(t *testing.T) {
t.Log("Testing InitConfigCenter function with serverURI and microservice definition")
gopath := os.Getenv("GOPATH")
os.Setenv("CHASSIS_HOME", gopath+"/src/github.com/go-chassis/go-chassis/examples/discovery/server/")
err := config.Init()
config.GlobalDefinition = &model.GlobalCfg{}
name := model.MicServiceStruct{Name: "qwertyuiopasdfghjklgsgdfsgdgafdggsahhhhh"}
config.GlobalDefinition.Cse.Config.Client.ServerURI = "https://127.0.0.1:8787"
config.MicroserviceDefinition = &model.MicroserviceCfg{ServiceDescription: name}
config.GlobalDefinition.Cse.Config.Client.Type = "config_center"
err = configcenter.InitConfigCenter()
assert.NoError(t, err)
t.Log("HEllo", err)
}
func TestEvent(t *testing.T) {
t.Log("Testing EventListener function")
factoryObj, _ := archaius.NewConfigFactory()
factoryObj.Init()
gopath := os.Getenv("GOPATH")
os.Setenv("CHASSIS_HOME", gopath+"/src/github.com/go-chassis/go-chassis/examples/discovery/server/")
config.Init()
eventValue := &core.Event{Key: "refreshMode", Value: 6}
evt := archaius.EventListener{Name: "EventHandler", Factory: factoryObj}
evt.Event(eventValue)
}
| [
"\"GOPATH\"",
"\"GOPATH\"",
"\"GOPATH\"",
"\"GOPATH\"",
"\"GOPATH\"",
"\"GOPATH\"",
"\"GOPATH\"",
"\"GOPATH\"",
"\"GOPATH\""
]
| []
| [
"GOPATH"
]
| [] | ["GOPATH"] | go | 1 | 0 | |
git/add.go | package git
import (
"bytes"
"fmt"
"io/ioutil"
"os"
)
type AddOptions struct {
Verbose bool
DryRun bool
Force bool
Interactive bool
Patch bool
Edit bool
Update bool
All bool
IgnoreRemoval bool
IntentToAdd bool
Refresh bool
IgnoreErrors bool
IgnoreMissing bool
NoWarnEmbeddedRepo bool
Chmod BitSetter
}
// Add implements the "git add" plumbing command.
func Add(c *Client, opts AddOptions, files []File) (*Index, error) {
if opts.Patch {
diffs, err := DiffFiles(c, DiffFilesOptions{}, files)
if err != nil {
return nil, err
}
var patchbuf bytes.Buffer
if err := GeneratePatch(c, DiffCommonOptions{Patch: true}, diffs, &patchbuf); err != nil {
return nil, err
}
hunks, err := splitPatch(patchbuf.String(), false)
if err != nil {
return nil, err
}
hunks, err = filterHunks("stage this hunk", hunks)
if err == userAborted {
return nil, nil
} else if err != nil {
return nil, err
}
patch, err := ioutil.TempFile("", "addpatch")
if err != nil {
return nil, err
}
defer os.Remove(patch.Name())
recombinePatch(patch, hunks)
if !opts.DryRun {
if err := Apply(c, ApplyOptions{Cached: true}, []File{File(patch.Name())}); err != nil {
return nil, err
}
}
return c.GitDir.ReadIndex()
}
if len(files) == 0 {
if !opts.All && !opts.Update {
return nil, fmt.Errorf("Nothing to add. Did you mean \"git add .\"")
}
if opts.Update || opts.All {
// LsFiles by default only shows things from under the
// current directory, but -u is supposed to update the
// whole repo.
files = []File{File(c.WorkDir)}
}
}
// Start by using ls-files to convert directories to files, and
// ignore .gitignore
lsOpts := LsFilesOptions{
Deleted: true,
Modified: true,
Others: true,
}
if !opts.Force {
lsOpts.ExcludeStandard = true
}
if opts.Update {
lsOpts.Others = false
}
if opts.IgnoreRemoval {
lsOpts.Deleted = false
}
fileIdxs, err := LsFiles(c, lsOpts, files)
if err != nil {
return nil, err
}
fles := make([]File, len(fileIdxs), len(fileIdxs))
for i, f := range fileIdxs {
file, err := f.PathName.FilePath(c)
if err != nil {
return nil, err
}
fles[i] = file
}
updateIndexOpts := UpdateIndexOptions{
Add: true,
Remove: true,
Replace: true,
Verbose: opts.Verbose,
Refresh: opts.Refresh,
Chmod: opts.Chmod,
correctRemoveMsg: true,
}
idx, err := c.GitDir.ReadIndex()
if err != nil {
return nil, err
}
newidx, err := UpdateIndex(c, idx, updateIndexOpts, fles)
if err != nil {
return nil, err
}
if !opts.DryRun {
var f *os.File
var err error
if ifile := os.Getenv("GIT_INDEX_FILE"); ifile != "" {
f, err = os.Create(ifile)
} else {
f, err = c.GitDir.Create("index")
}
if err != nil {
return nil, err
}
defer f.Close()
return newidx, newidx.WriteIndex(f)
}
return newidx, nil
}
| [
"\"GIT_INDEX_FILE\""
]
| []
| [
"GIT_INDEX_FILE"
]
| [] | ["GIT_INDEX_FILE"] | go | 1 | 0 | |
src/settings.py | """
Django settings for data_transform project.
Generated by 'django-admin startproject' using Django 2.1.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = ')$&j_jd!vnscd87t_%a9766399cfkf9x1@+dvz4g2=%1bf0q&k'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = os.environ.get('DEBUG') == 'True'
DEMO_MODE = os.environ.get('DEMO_MODE') == 'True'
ALLOWED_HOSTS = ['*']
LOGS_DIR = os.environ.get(
'DJANGO_LOGS_DIR',
os.path.join(
BASE_DIR,
'logs'
)
)
# Application definition
INSTALLED_APPS = [
'rest_framework',
'django_filters',
'drf_queryfields',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'projects',
'core',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'core.middleware.cors.CorsMiddleware',
]
ROOT_URLCONF = 'urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': os.environ.get('DB_NAME', 'data_transform'),
'USER': os.environ.get('DB_USER', 'postgres'),
'PASSWORD': os.environ.get('DB_PASSWORD', 'Eir4Ooquae'),
'HOST': os.environ.get('DB_HOST', 'localhost'),
'PORT': os.environ.get('DB_PORT', '5432')
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
APPEND_SLASH = False
API_URL = 'api/'
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
STATIC_ROOT = os.path.join(
BASE_DIR,
'static'
)
MEDIA_ROOT = os.path.join(
BASE_DIR,
'media'
)
STATIC_APP_DIR = os.path.join(
BASE_DIR,
'static'
)
REST_FRAMEWORK = {
'DEFAULT_PAGINATION_CLASS': 'core.pagination.CustomPagination',
'DEFAULT_AUTHENTICATION_CLASSES': [],
'DEFAULT_PERMISSION_CLASSES': [],
'PAGE_SIZE': 100,
'EXCEPTION_HANDLER': 'core.middleware.exceptions_handler.custom_exception_handler',
'DEFAULT_FILTER_BACKENDS': (
'django_filters.rest_framework.DjangoFilterBackend',
)
}
PAGE_SIZE_QUERY_PARAM = 'page_size'
CELERY_BROKER_URL = os.environ.get(
'CELERY_BROKER_URL',
'redis://127.0.0.1:6379/0'
)
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
'simple': {
'format': '%(levelname)s %(asctime)s %(message)s'
},
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose'
},
'exceptions-log': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': LOGS_DIR + '/exceptions.log',
'formatter': 'verbose',
},
'common-log': {
'level': 'INFO',
'class': 'logging.FileHandler',
'filename': LOGS_DIR + '/common.log',
'formatter': 'verbose',
},
'django.db.backends-log': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': LOGS_DIR + '/django.db.backends.log',
'formatter': 'verbose',
},
# 'sentry': {
# 'level': 'ERROR',
# 'class': 'raven.contrib.django.raven_compat.handlers.SentryHandler',
# 'formatter': 'verbose',
# },
'celery-log': {
'level': 'INFO',
'class': 'logging.FileHandler',
'filename': LOGS_DIR + '/celery.log',
'formatter': 'verbose',
},
},
'loggers': {
'exceptions': {
'handlers': ['exceptions-log'],
'level': 'DEBUG',
'propagate': False,
},
'common': {
'handlers': ['common-log'],
'level': 'INFO',
'propagate': False,
},
'celery.tasks': {
'handlers': ['celery-log'],
'level': 'INFO',
'propagate': False,
},
'django.db.backends': {
'handlers': ['django.db.backends-log'],
'level': 'DEBUG',
'propagate': False,
},
'django.request': {
'handlers': ['exceptions-log'],
'level': 'ERROR',
'propagate': True,
},
},
}
| []
| []
| [
"DJANGO_LOGS_DIR",
"DB_PASSWORD",
"DB_HOST",
"DB_PORT",
"DB_NAME",
"CELERY_BROKER_URL",
"DEBUG",
"DEMO_MODE",
"DB_USER"
]
| [] | ["DJANGO_LOGS_DIR", "DB_PASSWORD", "DB_HOST", "DB_PORT", "DB_NAME", "CELERY_BROKER_URL", "DEBUG", "DEMO_MODE", "DB_USER"] | python | 9 | 0 | |
scripts/webapp.py | from __future__ import print_function
import os
import sys
import json
import traceback
import styles_distribution as updater
from cgi import parse_qs
def server(environ, start_response):
if environ.get("HTTP_AUTHORIZATION") == os.getenv("AUTHORIZATION"):
status = "200 OK"
else:
status = "400 Bad Request"
request_body_size = int(environ.get('CONTENT_LENGTH', 0))
request_body = environ['wsgi.input'].read(request_body_size)
if request_body:
try:
payload = json.loads(parse_qs(request_body)["payload"][0])
environ["type"] = payload["type"]
environ["commit_hash"] = payload["commit"]
environ["build_status"] = payload["status"]
environ["branch"] = payload["branch"]
except ValueError:
status = "400 Bad Request"
else:
environ["commit_hash"] = "HEAD"
environ["build_status"] = 0
data = "\n"
environ["response_status"] = status
start_response(status, [
("Content-Type", "text/plain"),
("Content-Length", str(len(data)))
])
return iter([data])
def update_styles(environ):
if (environ["response_status"][0:3] != "200"
or environ["build_status"] != 0
or environ["branch"] != "master"
or environ["type"] == "pull_request"):
return
try:
print("Updating styles to {0}".format(environ["commit_hash"]))
# Styles directories are in ../styles/
styles_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir, 'styles'))
updater.ORIGINAL_STYLES_DIRECTORY = os.path.join(styles_dir, 'original')
updater.DISTRIBUTION_STYLES_DIRECTORY = os.path.join(styles_dir, 'distribution')
updater.main(False, environ["commit_hash"])
except:
traceback.print_exc()
# Run code after request completion
#
# Adapted from http://code.google.com/p/modwsgi/wiki/RegisteringCleanupCode
class Generator:
def __init__(self, iterable, callback, environ):
self.__iterable = iterable
self.__callback = callback
self.__environ = environ
def __iter__(self):
for item in self.__iterable:
yield item
def close(self):
try:
if hasattr(self.__iterable, 'close'):
self.__iterable.close()
finally:
self.__callback(self.__environ)
class ExecuteOnCompletion:
def __init__(self, application, callback):
self.__application = application
self.__callback = callback
def __call__(self, environ, start_response):
result = self.__application(environ, start_response)
return Generator(result, self.__callback, environ)
sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0) # make stdout unbuffered
application = ExecuteOnCompletion(server, update_styles)
| []
| []
| [
"AUTHORIZATION"
]
| [] | ["AUTHORIZATION"] | python | 1 | 0 | |
ocfweb/main/templatetags/staff_hours.py | from django import template
register = template.Library()
@register.filter
def gravatar(staffer, size):
return staffer.gravatar(size)
| []
| []
| []
| [] | [] | python | null | null | null |
lib/python2.7/site-packages/pygments/lexers/_lua_builtins.py | # -*- coding: utf-8 -*-
"""
pygments.lexers._lua_builtins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This file contains the names and modules of lua functions
It is able to re-generate itself, but for adding new functions you
probably have to add some callbacks (see function module_callbacks).
Do not edit the MODULES dict by hand.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import print_function
MODULES = {'basic': ('_G',
'_VERSION',
'assert',
'collectgarbage',
'dofile',
'error',
'getfenv',
'getmetatable',
'ipairs',
'load',
'loadfile',
'loadstring',
'next',
'pairs',
'pcall',
'print',
'rawequal',
'rawget',
'rawset',
'select',
'setfenv',
'setmetatable',
'tonumber',
'tostring',
'type',
'unpack',
'xpcall'),
'coroutine': ('coroutine.create',
'coroutine.resume',
'coroutine.running',
'coroutine.status',
'coroutine.wrap',
'coroutine.yield'),
'debug': ('debug.debug',
'debug.getfenv',
'debug.gethook',
'debug.getinfo',
'debug.getlocal',
'debug.getmetatable',
'debug.getregistry',
'debug.getupvalue',
'debug.setfenv',
'debug.sethook',
'debug.setlocal',
'debug.setmetatable',
'debug.setupvalue',
'debug.traceback'),
'io': ('io.close',
'io.flush',
'io.input',
'io.lines',
'io.open',
'io.output',
'io.popen',
'io.read',
'io.tmpfile',
'io.type',
'io.write'),
'math': ('math.abs',
'math.acos',
'math.asin',
'math.atan2',
'math.atan',
'math.ceil',
'math.cosh',
'math.cos',
'math.deg',
'math.exp',
'math.floor',
'math.fmod',
'math.frexp',
'math.huge',
'math.ldexp',
'math.log10',
'math.log',
'math.max',
'math.min',
'math.modf',
'math.pi',
'math.pow',
'math.rad',
'math.random',
'math.randomseed',
'math.sinh',
'math.sin',
'math.sqrt',
'math.tanh',
'math.tan'),
'modules': ('module',
'require',
'package.cpath',
'package.loaded',
'package.loadlib',
'package.path',
'package.preload',
'package.seeall'),
'os': ('os.clock',
'os.date',
'os.difftime',
'os.execute',
'os.exit',
'os.getenv',
'os.remove',
'os.rename',
'os.setlocale',
'os.time',
'os.tmpname'),
'string': ('string.byte',
'string.char',
'string.dump',
'string.find',
'string.format',
'string.gmatch',
'string.gsub',
'string.len',
'string.lower',
'string.match',
'string.rep',
'string.reverse',
'string.sub',
'string.upper'),
'table': ('table.concat',
'table.insert',
'table.maxn',
'table.remove',
'table.sort')}
if __name__ == '__main__':
import re
try:
from urllib import urlopen
except ImportError:
from urllib.request import urlopen
import pprint
# you can't generally find out what module a function belongs to if you
# have only its name. Because of this, here are some callback functions
# that recognize if a gioven function belongs to a specific module
def module_callbacks():
def is_in_coroutine_module(name):
return name.startswith('coroutine.')
def is_in_modules_module(name):
if name in ['require', 'module'] or name.startswith('package'):
return True
else:
return False
def is_in_string_module(name):
return name.startswith('string.')
def is_in_table_module(name):
return name.startswith('table.')
def is_in_math_module(name):
return name.startswith('math')
def is_in_io_module(name):
return name.startswith('io.')
def is_in_os_module(name):
return name.startswith('os.')
def is_in_debug_module(name):
return name.startswith('debug.')
return {'coroutine': is_in_coroutine_module,
'modules': is_in_modules_module,
'string': is_in_string_module,
'table': is_in_table_module,
'math': is_in_math_module,
'io': is_in_io_module,
'os': is_in_os_module,
'debug': is_in_debug_module}
def get_newest_version():
f = urlopen('http://www.lua.org/manual/')
r = re.compile(r'^<A HREF="(\d\.\d)/">Lua \1</A>')
for line in f:
m = r.match(line)
if m is not None:
return m.groups()[0]
def get_lua_functions(version):
f = urlopen('http://www.lua.org/manual/%s/' % version)
r = re.compile(r'^<A HREF="manual.html#pdf-(.+)">\1</A>')
functions = []
for line in f:
m = r.match(line)
if m is not None:
functions.append(m.groups()[0])
return functions
def get_function_module(name):
for mod, cb in module_callbacks().items():
if cb(name):
return mod
if '.' in name:
return name.split('.')[0]
else:
return 'basic'
def regenerate(filename, modules):
with open(filename) as fp:
content = fp.read()
header = content[:content.find('MODULES = {')]
footer = content[content.find("if __name__ == '__main__':"):]
with open(filename, 'w') as fp:
fp.write(header)
fp.write('MODULES = %s\n\n' % pprint.pformat(modules))
fp.write(footer)
def run():
version = get_newest_version()
print('> Downloading function index for Lua %s' % version)
functions = get_lua_functions(version)
print('> %d functions found:' % len(functions))
modules = {}
for full_function_name in functions:
print('>> %s' % full_function_name)
m = get_function_module(full_function_name)
modules.setdefault(m, []).append(full_function_name)
regenerate(__file__, modules)
run()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
init2winit/mt_eval/main.py | # coding=utf-8
# Copyright 2021 The init2winit Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Used to evaluate MT model (BLEU/cross_entropy_loss/log_perplexity).
"""
import json
import os
import sys
from absl import app
from absl import flags
from absl import logging
from init2winit import hyperparameters
from init2winit.dataset_lib import datasets
from init2winit.model_lib import models
from init2winit.mt_eval import bleu_evaluator
import jax
import tensorflow.compat.v2 as tf
# Enable flax xprof trace labelling.
os.environ['FLAX_PROFILE'] = 'true'
flags.DEFINE_string('checkpoint_dir', '', 'Path to the checkpoint to evaluate.')
flags.DEFINE_integer('seed', 0, 'seed used to initialize the computation.')
flags.DEFINE_integer('worker_id', 1,
'Client id for hparam sweeps and tuning studies.')
flags.DEFINE_string('experiment_config_filename', None,
'Path to the config.json file for this experiment.')
flags.DEFINE_string(
'model', '', 'Name of the model used to evaluate (not'
'needed if experiment_config_filenmae is provided).')
flags.DEFINE_string(
'dataset', '', 'Name of the dataset used to evaluate (not'
'needed if experiment_config_filenmae is provided).')
flags.DEFINE_string(
'hparam_overrides', '', 'json representation of a flattened dict of hparam '
'overrides. For nested dictionaries, the override key '
'should be specified as lr_hparams.initial_value.')
flags.DEFINE_string(
'trial_hparams_filename', None,
'Path to the hparams.json file for the trial we want to run inference on.')
flags.DEFINE_string('mt_eval_config', '',
'Json representation of the mt evaluation config.')
FLAGS = flags.FLAGS
def main(unused_argv):
# Necessary to use the tfds loader.
tf.enable_v2_behavior()
if jax.process_count() > 1:
# TODO(ankugarg): Add support for multihost inference.
raise NotImplementedError('BLEU eval does not support multihost inference.')
rng = jax.random.PRNGKey(FLAGS.seed)
mt_eval_config = json.loads(FLAGS.mt_eval_config)
if FLAGS.experiment_config_filename:
with tf.io.gfile.GFile(FLAGS.experiment_config_filename) as f:
experiment_config = json.load(f)
if jax.process_index() == 0:
logging.info('experiment_config: %r', experiment_config)
dataset_name = experiment_config['dataset']
model_name = experiment_config['model']
else:
assert FLAGS.dataset and FLAGS.model
dataset_name = FLAGS.dataset
model_name = FLAGS.model
if jax.process_index() == 0:
logging.info('argv:\n%s', ' '.join(sys.argv))
logging.info('device_count: %d', jax.device_count())
logging.info('num_hosts : %d', jax.host_count())
logging.info('host_id : %d', jax.host_id())
model_class = models.get_model(model_name)
dataset_builder = datasets.get_dataset(dataset_name)
dataset_meta_data = datasets.get_dataset_meta_data(dataset_name)
hparam_overrides = None
if FLAGS.hparam_overrides:
if isinstance(FLAGS.hparam_overrides, str):
hparam_overrides = json.loads(FLAGS.hparam_overrides)
merged_hps = hyperparameters.build_hparams(
model_name=model_name,
initializer_name=experiment_config['initializer'],
dataset_name=dataset_name,
hparam_file=FLAGS.trial_hparams_filename,
hparam_overrides=hparam_overrides)
if jax.process_index() == 0:
logging.info('Merged hps are: %s', json.dumps(merged_hps.to_json()))
evaluator = bleu_evaluator.BLEUEvaluator(FLAGS.checkpoint_dir, merged_hps,
rng,
model_class, dataset_builder,
dataset_meta_data,
mt_eval_config)
evaluator.translate_and_calculate_bleu()
if __name__ == '__main__':
app.run(main)
| []
| []
| [
"FLAX_PROFILE"
]
| [] | ["FLAX_PROFILE"] | python | 1 | 0 | |
testbedadapter/src/main/java/eu/driver/adapter/properties/ProducerProperties.java | package eu.driver.adapter.properties;
import java.io.FileInputStream;
import java.io.IOException;
import java.util.Properties;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import eu.driver.adapter.core.CISAdapter;
/**
* Properties object that contains extends the standard Kafka properties with
* properties used specifically by the Producers. Sets default values for the
* local test-bed upon creation.
*
* @author hameetepa
*
*/
public class ProducerProperties extends KafkaProperties {
private static final long serialVersionUID = -7988826780301880736L;
// Configuration Keys for Kafka Producers
public static final String KEY_SERIALIZER = ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG;
public static final String VALUE_SERIALIZER = ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG;
public static final String COMPRESSION_TYPE = ProducerConfig.COMPRESSION_TYPE_CONFIG;
private static final Logger logger = LoggerFactory.getLogger(ProducerProperties.class);
private static ProducerProperties instance;
private static Boolean secured = false;
/**
*
* @return The Singleton Producer Properties object containing all Kafka produer
* related configuration.
*/
public static ProducerProperties getInstance(Boolean secured) {
if (ProducerProperties.instance == null || ProducerProperties.secured != secured) {
ProducerProperties.instance = new ProducerProperties(secured);
}
return ProducerProperties.instance;
}
private ProducerProperties(Boolean secured) {
super();
setDefaults();
loadConfigFile();
if(secured) {
loadSSLConfigFile();
}
if (System.getenv().get("KAFKA_BROKER_URL") != null) {
setProperty("bootstrap.servers", System.getenv().get("KAFKA_BROKER_URL"));
logger.info("Using KAFKA_BROKER_URL from ENV!");
} else if (System.getProperty("KAFKA_BROKER_URL") != null) {
setProperty("bootstrap.servers", System.getProperty("KAFKA_BROKER_URL"));
logger.info("Using KAFKA_BROKER_URL from PROP!");
}
if (System.getenv().get("SCHEMA_REGISTRY_URL") != null) {
setProperty("schema.registry.url", System.getenv().get("SCHEMA_REGISTRY_URL"));
logger.info("Using SCHEMA_REGISTRY_URL from ENV!");
} else if (System.getProperty("SCHEMA_REGISTRY_URL") != null) {
setProperty("schema.registry.url", System.getProperty("SCHEMA_REGISTRY_URL"));
logger.info("Using SCHEMA_REGISTRY_URL from PROP!");
}
Properties systemProp = System.getProperties();
if (systemProp != null) {
if (systemProp.get("KAFKA_BROKER_URL")!= null) {
setProperty("bootstrap.servers", systemProp.get("KAFKA_BROKER_URL").toString());
}
if (systemProp.get("SCHEMA_REGISTRY_URL")!= null) {
setProperty("schema.registry.url", systemProp.get("SCHEMA_REGISTRY_URL").toString());
}
}
}
private void loadConfigFile() {
try {
FileInputStream fis = null;
if (CISAdapter.globalConfigPath != null) {
logger.error("Loading producer.properties from: " + CISAdapter.globalConfigPath + "/producer.properties");
fis = new FileInputStream(CISAdapter.globalConfigPath + "/producer.properties");
} else {
logger.error("Loading producer.properties from: config/client.properties");
fis = new FileInputStream("config/producer.properties");
}
load(fis);
fis.close();
} catch (Exception e) {
logger.error("Could not read producer Properties file producer.properties in config folder", e);
logger.error(e.getStackTrace().toString());
}
}
private void loadSSLConfigFile() {
try {
FileInputStream fis = null;
if (CISAdapter.globalConfigPath != null) {
fis = new FileInputStream(CISAdapter.globalConfigPath + "/ssl.properties");
} else {
fis = new FileInputStream("config/ssl.properties");
}
load(fis);
fis.close();
} catch (IOException e) {
logger.error("Could not read Client Properties file client.properties in config folder");
}
}
private void setDefaults() {
setProperty(KEY_SERIALIZER, "io.confluent.kafka.serializers.KafkaAvroSerializer");
setProperty(VALUE_SERIALIZER, "io.confluent.kafka.serializers.KafkaAvroSerializer");
setProperty(COMPRESSION_TYPE, "none");
}
}
| []
| []
| []
| [] | [] | java | 0 | 0 | |
preload_test.go | package gorm_test
import (
"database/sql"
"encoding/json"
"os"
"reflect"
"testing"
"github.com/zdxie/gorm"
)
func getPreloadUser(name string) *User {
return getPreparedUser(name, "Preload")
}
func checkUserHasPreloadData(user User, t *testing.T) {
u := getPreloadUser(user.Name)
if user.BillingAddress.Address1 != u.BillingAddress.Address1 {
t.Error("Failed to preload user's BillingAddress")
}
if user.ShippingAddress.Address1 != u.ShippingAddress.Address1 {
t.Error("Failed to preload user's ShippingAddress")
}
if user.CreditCard.Number != u.CreditCard.Number {
t.Error("Failed to preload user's CreditCard")
}
if user.Company.Name != u.Company.Name {
t.Error("Failed to preload user's Company")
}
if len(user.Emails) != len(u.Emails) {
t.Error("Failed to preload user's Emails")
} else {
var found int
for _, e1 := range u.Emails {
for _, e2 := range user.Emails {
if e1.Email == e2.Email {
found++
break
}
}
}
if found != len(u.Emails) {
t.Error("Failed to preload user's email details")
}
}
}
func TestPreload(t *testing.T) {
user1 := getPreloadUser("user1")
DB.Save(user1)
preloadDB := DB.Where("role = ?", "Preload").Preload("BillingAddress").Preload("ShippingAddress").
Preload("CreditCard").Preload("Emails").Preload("Company")
var user User
preloadDB.Find(&user)
checkUserHasPreloadData(user, t)
user2 := getPreloadUser("user2")
DB.Save(user2)
user3 := getPreloadUser("user3")
DB.Save(user3)
var users []User
preloadDB.Find(&users)
for _, user := range users {
checkUserHasPreloadData(user, t)
}
var users2 []*User
preloadDB.Find(&users2)
for _, user := range users2 {
checkUserHasPreloadData(*user, t)
}
var users3 []*User
preloadDB.Preload("Emails", "email = ?", user3.Emails[0].Email).Find(&users3)
for _, user := range users3 {
if user.Name == user3.Name {
if len(user.Emails) != 1 {
t.Errorf("should only preload one emails for user3 when with condition")
}
} else if len(user.Emails) != 0 {
t.Errorf("should not preload any emails for other users when with condition")
} else if user.Emails == nil {
t.Errorf("should return an empty slice to indicate zero results")
}
}
}
func TestAutoPreload(t *testing.T) {
user1 := getPreloadUser("auto_user1")
DB.Save(user1)
preloadDB := DB.Set("gorm:auto_preload", true).Where("role = ?", "Preload")
var user User
preloadDB.Find(&user)
checkUserHasPreloadData(user, t)
user2 := getPreloadUser("auto_user2")
DB.Save(user2)
var users []User
preloadDB.Find(&users)
for _, user := range users {
checkUserHasPreloadData(user, t)
}
var users2 []*User
preloadDB.Find(&users2)
for _, user := range users2 {
checkUserHasPreloadData(*user, t)
}
}
func TestAutoPreloadFalseDoesntPreload(t *testing.T) {
user1 := getPreloadUser("auto_user1")
DB.Save(user1)
preloadDB := DB.Set("gorm:auto_preload", false).Where("role = ?", "Preload")
var user User
preloadDB.Find(&user)
if user.BillingAddress.Address1 != "" {
t.Error("AutoPreload was set to fasle, but still fetched data")
}
user2 := getPreloadUser("auto_user2")
DB.Save(user2)
var users []User
preloadDB.Find(&users)
for _, user := range users {
if user.BillingAddress.Address1 != "" {
t.Error("AutoPreload was set to fasle, but still fetched data")
}
}
}
func TestNestedPreload1(t *testing.T) {
type (
Level1 struct {
ID uint
Value string
Level2ID uint
}
Level2 struct {
ID uint
Level1 Level1
Level3ID uint
}
Level3 struct {
ID uint
Name string
Level2 Level2
}
)
DB.DropTableIfExists(&Level3{})
DB.DropTableIfExists(&Level2{})
DB.DropTableIfExists(&Level1{})
if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}).Error; err != nil {
t.Error(err)
}
want := Level3{Level2: Level2{Level1: Level1{Value: "value"}}}
if err := DB.Create(&want).Error; err != nil {
t.Error(err)
}
var got Level3
if err := DB.Preload("Level2").Preload("Level2.Level1").Find(&got).Error; err != nil {
t.Error(err)
}
if !reflect.DeepEqual(got, want) {
t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want))
}
if err := DB.Preload("Level2").Preload("Level2.Level1").Find(&got, "name = ?", "not_found").Error; err != gorm.ErrRecordNotFound {
t.Error(err)
}
}
func TestNestedPreload2(t *testing.T) {
type (
Level1 struct {
ID uint
Value string
Level2ID uint
}
Level2 struct {
ID uint
Level1s []*Level1
Level3ID uint
}
Level3 struct {
ID uint
Name string
Level2s []Level2
}
)
DB.DropTableIfExists(&Level3{})
DB.DropTableIfExists(&Level2{})
DB.DropTableIfExists(&Level1{})
if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}).Error; err != nil {
t.Error(err)
}
want := Level3{
Level2s: []Level2{
{
Level1s: []*Level1{
{Value: "value1"},
{Value: "value2"},
},
},
{
Level1s: []*Level1{
{Value: "value3"},
},
},
},
}
if err := DB.Create(&want).Error; err != nil {
t.Error(err)
}
var got Level3
if err := DB.Preload("Level2s.Level1s").Find(&got).Error; err != nil {
t.Error(err)
}
if !reflect.DeepEqual(got, want) {
t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want))
}
}
func TestNestedPreload3(t *testing.T) {
type (
Level1 struct {
ID uint
Value string
Level2ID uint
}
Level2 struct {
ID uint
Level1 Level1
Level3ID uint
}
Level3 struct {
Name string
ID uint
Level2s []Level2
}
)
DB.DropTableIfExists(&Level3{})
DB.DropTableIfExists(&Level2{})
DB.DropTableIfExists(&Level1{})
if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}).Error; err != nil {
t.Error(err)
}
want := Level3{
Level2s: []Level2{
{Level1: Level1{Value: "value1"}},
{Level1: Level1{Value: "value2"}},
},
}
if err := DB.Create(&want).Error; err != nil {
t.Error(err)
}
var got Level3
if err := DB.Preload("Level2s.Level1").Find(&got).Error; err != nil {
t.Error(err)
}
if !reflect.DeepEqual(got, want) {
t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want))
}
}
func TestNestedPreload4(t *testing.T) {
type (
Level1 struct {
ID uint
Value string
Level2ID uint
}
Level2 struct {
ID uint
Level1s []Level1
Level3ID uint
}
Level3 struct {
ID uint
Name string
Level2 Level2
}
)
DB.DropTableIfExists(&Level3{})
DB.DropTableIfExists(&Level2{})
DB.DropTableIfExists(&Level1{})
if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}).Error; err != nil {
t.Error(err)
}
want := Level3{
Level2: Level2{
Level1s: []Level1{
{Value: "value1"},
{Value: "value2"},
},
},
}
if err := DB.Create(&want).Error; err != nil {
t.Error(err)
}
var got Level3
if err := DB.Preload("Level2.Level1s").Find(&got).Error; err != nil {
t.Error(err)
}
if !reflect.DeepEqual(got, want) {
t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want))
}
}
// Slice: []Level3
func TestNestedPreload5(t *testing.T) {
type (
Level1 struct {
ID uint
Value string
Level2ID uint
}
Level2 struct {
ID uint
Level1 Level1
Level3ID uint
}
Level3 struct {
ID uint
Name string
Level2 Level2
}
)
DB.DropTableIfExists(&Level3{})
DB.DropTableIfExists(&Level2{})
DB.DropTableIfExists(&Level1{})
if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}).Error; err != nil {
t.Error(err)
}
want := make([]Level3, 2)
want[0] = Level3{Level2: Level2{Level1: Level1{Value: "value"}}}
if err := DB.Create(&want[0]).Error; err != nil {
t.Error(err)
}
want[1] = Level3{Level2: Level2{Level1: Level1{Value: "value2"}}}
if err := DB.Create(&want[1]).Error; err != nil {
t.Error(err)
}
var got []Level3
if err := DB.Preload("Level2").Preload("Level2.Level1").Find(&got).Error; err != nil {
t.Error(err)
}
if !reflect.DeepEqual(got, want) {
t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want))
}
}
func TestNestedPreload6(t *testing.T) {
type (
Level1 struct {
ID uint
Value string
Level2ID uint
}
Level2 struct {
ID uint
Level1s []Level1
Level3ID uint
}
Level3 struct {
ID uint
Name string
Level2s []Level2
}
)
DB.DropTableIfExists(&Level3{})
DB.DropTableIfExists(&Level2{})
DB.DropTableIfExists(&Level1{})
if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}).Error; err != nil {
t.Error(err)
}
want := make([]Level3, 2)
want[0] = Level3{
Level2s: []Level2{
{
Level1s: []Level1{
{Value: "value1"},
{Value: "value2"},
},
},
{
Level1s: []Level1{
{Value: "value3"},
},
},
},
}
if err := DB.Create(&want[0]).Error; err != nil {
t.Error(err)
}
want[1] = Level3{
Level2s: []Level2{
{
Level1s: []Level1{
{Value: "value3"},
{Value: "value4"},
},
},
{
Level1s: []Level1{
{Value: "value5"},
},
},
},
}
if err := DB.Create(&want[1]).Error; err != nil {
t.Error(err)
}
var got []Level3
if err := DB.Preload("Level2s.Level1s").Find(&got).Error; err != nil {
t.Error(err)
}
if !reflect.DeepEqual(got, want) {
t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want))
}
}
func TestNestedPreload7(t *testing.T) {
type (
Level1 struct {
ID uint
Value string
Level2ID uint
}
Level2 struct {
ID uint
Level1 Level1
Level3ID uint
}
Level3 struct {
ID uint
Name string
Level2s []Level2
}
)
DB.DropTableIfExists(&Level3{})
DB.DropTableIfExists(&Level2{})
DB.DropTableIfExists(&Level1{})
if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}).Error; err != nil {
t.Error(err)
}
want := make([]Level3, 2)
want[0] = Level3{
Level2s: []Level2{
{Level1: Level1{Value: "value1"}},
{Level1: Level1{Value: "value2"}},
},
}
if err := DB.Create(&want[0]).Error; err != nil {
t.Error(err)
}
want[1] = Level3{
Level2s: []Level2{
{Level1: Level1{Value: "value3"}},
{Level1: Level1{Value: "value4"}},
},
}
if err := DB.Create(&want[1]).Error; err != nil {
t.Error(err)
}
var got []Level3
if err := DB.Preload("Level2s.Level1").Find(&got).Error; err != nil {
t.Error(err)
}
if !reflect.DeepEqual(got, want) {
t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want))
}
}
func TestNestedPreload8(t *testing.T) {
type (
Level1 struct {
ID uint
Value string
Level2ID uint
}
Level2 struct {
ID uint
Level1s []Level1
Level3ID uint
}
Level3 struct {
ID uint
Name string
Level2 Level2
}
)
DB.DropTableIfExists(&Level3{})
DB.DropTableIfExists(&Level2{})
DB.DropTableIfExists(&Level1{})
if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}).Error; err != nil {
t.Error(err)
}
want := make([]Level3, 2)
want[0] = Level3{
Level2: Level2{
Level1s: []Level1{
{Value: "value1"},
{Value: "value2"},
},
},
}
if err := DB.Create(&want[0]).Error; err != nil {
t.Error(err)
}
want[1] = Level3{
Level2: Level2{
Level1s: []Level1{
{Value: "value3"},
{Value: "value4"},
},
},
}
if err := DB.Create(&want[1]).Error; err != nil {
t.Error(err)
}
var got []Level3
if err := DB.Preload("Level2.Level1s").Find(&got).Error; err != nil {
t.Error(err)
}
if !reflect.DeepEqual(got, want) {
t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want))
}
}
func TestNestedPreload9(t *testing.T) {
type (
Level0 struct {
ID uint
Value string
Level1ID uint
}
Level1 struct {
ID uint
Value string
Level2ID uint
Level2_1ID uint
Level0s []Level0
}
Level2 struct {
ID uint
Level1s []Level1
Level3ID uint
}
Level2_1 struct {
ID uint
Level1s []Level1
Level3ID uint
}
Level3 struct {
ID uint
Name string
Level2 Level2
Level2_1 Level2_1
}
)
DB.DropTableIfExists(&Level3{})
DB.DropTableIfExists(&Level2{})
DB.DropTableIfExists(&Level2_1{})
DB.DropTableIfExists(&Level1{})
DB.DropTableIfExists(&Level0{})
if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}, &Level2_1{}, &Level0{}).Error; err != nil {
t.Error(err)
}
want := make([]Level3, 2)
want[0] = Level3{
Level2: Level2{
Level1s: []Level1{
{Value: "value1"},
{Value: "value2"},
},
},
Level2_1: Level2_1{
Level1s: []Level1{
{
Value: "value1-1",
Level0s: []Level0{{Value: "Level0-1"}},
},
{
Value: "value2-2",
Level0s: []Level0{{Value: "Level0-2"}},
},
},
},
}
if err := DB.Create(&want[0]).Error; err != nil {
t.Error(err)
}
want[1] = Level3{
Level2: Level2{
Level1s: []Level1{
{Value: "value3"},
{Value: "value4"},
},
},
Level2_1: Level2_1{
Level1s: []Level1{
{
Value: "value3-3",
Level0s: []Level0{},
},
{
Value: "value4-4",
Level0s: []Level0{},
},
},
},
}
if err := DB.Create(&want[1]).Error; err != nil {
t.Error(err)
}
var got []Level3
if err := DB.Preload("Level2").Preload("Level2.Level1s").Preload("Level2_1").Preload("Level2_1.Level1s").Preload("Level2_1.Level1s.Level0s").Find(&got).Error; err != nil {
t.Error(err)
}
if !reflect.DeepEqual(got, want) {
t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want))
}
}
type LevelA1 struct {
ID uint
Value string
}
type LevelA2 struct {
ID uint
Value string
LevelA3s []*LevelA3
}
type LevelA3 struct {
ID uint
Value string
LevelA1ID sql.NullInt64
LevelA1 *LevelA1
LevelA2ID sql.NullInt64
LevelA2 *LevelA2
}
func TestNestedPreload10(t *testing.T) {
DB.DropTableIfExists(&LevelA3{})
DB.DropTableIfExists(&LevelA2{})
DB.DropTableIfExists(&LevelA1{})
if err := DB.AutoMigrate(&LevelA1{}, &LevelA2{}, &LevelA3{}).Error; err != nil {
t.Error(err)
}
levelA1 := &LevelA1{Value: "foo"}
if err := DB.Save(levelA1).Error; err != nil {
t.Error(err)
}
want := []*LevelA2{
{
Value: "bar",
LevelA3s: []*LevelA3{
{
Value: "qux",
LevelA1: levelA1,
},
},
},
{
Value: "bar 2",
LevelA3s: []*LevelA3{},
},
}
for _, levelA2 := range want {
if err := DB.Save(levelA2).Error; err != nil {
t.Error(err)
}
}
var got []*LevelA2
if err := DB.Preload("LevelA3s.LevelA1").Find(&got).Error; err != nil {
t.Error(err)
}
if !reflect.DeepEqual(got, want) {
t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want))
}
}
type LevelB1 struct {
ID uint
Value string
LevelB3s []*LevelB3
}
type LevelB2 struct {
ID uint
Value string
}
type LevelB3 struct {
ID uint
Value string
LevelB1ID sql.NullInt64
LevelB1 *LevelB1
LevelB2s []*LevelB2 `gorm:"many2many:levelb1_levelb3_levelb2s"`
}
func TestNestedPreload11(t *testing.T) {
DB.DropTableIfExists(&LevelB2{})
DB.DropTableIfExists(&LevelB3{})
DB.DropTableIfExists(&LevelB1{})
if err := DB.AutoMigrate(&LevelB1{}, &LevelB2{}, &LevelB3{}).Error; err != nil {
t.Error(err)
}
levelB1 := &LevelB1{Value: "foo"}
if err := DB.Create(levelB1).Error; err != nil {
t.Error(err)
}
levelB3 := &LevelB3{
Value: "bar",
LevelB1ID: sql.NullInt64{Valid: true, Int64: int64(levelB1.ID)},
LevelB2s: []*LevelB2{},
}
if err := DB.Create(levelB3).Error; err != nil {
t.Error(err)
}
levelB1.LevelB3s = []*LevelB3{levelB3}
want := []*LevelB1{levelB1}
var got []*LevelB1
if err := DB.Preload("LevelB3s.LevelB2s").Find(&got).Error; err != nil {
t.Error(err)
}
if !reflect.DeepEqual(got, want) {
t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want))
}
}
type LevelC1 struct {
ID uint
Value string
LevelC2ID uint
}
type LevelC2 struct {
ID uint
Value string
LevelC1 LevelC1
}
type LevelC3 struct {
ID uint
Value string
LevelC2ID uint
LevelC2 LevelC2
}
func TestNestedPreload12(t *testing.T) {
DB.DropTableIfExists(&LevelC2{})
DB.DropTableIfExists(&LevelC3{})
DB.DropTableIfExists(&LevelC1{})
if err := DB.AutoMigrate(&LevelC1{}, &LevelC2{}, &LevelC3{}).Error; err != nil {
t.Error(err)
}
level2 := LevelC2{
Value: "c2",
LevelC1: LevelC1{
Value: "c1",
},
}
DB.Create(&level2)
want := []LevelC3{
{
Value: "c3-1",
LevelC2: level2,
}, {
Value: "c3-2",
LevelC2: level2,
},
}
for i := range want {
if err := DB.Create(&want[i]).Error; err != nil {
t.Error(err)
}
}
var got []LevelC3
if err := DB.Preload("LevelC2").Preload("LevelC2.LevelC1").Find(&got).Error; err != nil {
t.Error(err)
}
if !reflect.DeepEqual(got, want) {
t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want))
}
}
func TestManyToManyPreloadWithMultiPrimaryKeys(t *testing.T) {
if dialect := os.Getenv("GORM_DIALECT"); dialect == "" || dialect == "sqlite" || dialect == "mssql" {
return
}
type (
Level1 struct {
ID uint `gorm:"primary_key;"`
LanguageCode string `gorm:"primary_key"`
Value string
}
Level2 struct {
ID uint `gorm:"primary_key;"`
LanguageCode string `gorm:"primary_key"`
Value string
Level1s []Level1 `gorm:"many2many:levels;"`
}
)
DB.DropTableIfExists(&Level2{})
DB.DropTableIfExists(&Level1{})
DB.DropTableIfExists("levels")
if err := DB.AutoMigrate(&Level2{}, &Level1{}).Error; err != nil {
t.Error(err)
}
want := Level2{Value: "Bob", LanguageCode: "ru", Level1s: []Level1{
{Value: "ru", LanguageCode: "ru"},
{Value: "en", LanguageCode: "en"},
}}
if err := DB.Save(&want).Error; err != nil {
t.Error(err)
}
want2 := Level2{Value: "Tom", LanguageCode: "zh", Level1s: []Level1{
{Value: "zh", LanguageCode: "zh"},
{Value: "de", LanguageCode: "de"},
}}
if err := DB.Save(&want2).Error; err != nil {
t.Error(err)
}
var got Level2
if err := DB.Preload("Level1s").Find(&got, "value = ?", "Bob").Error; err != nil {
t.Error(err)
}
if !reflect.DeepEqual(got, want) {
t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want))
}
var got2 Level2
if err := DB.Preload("Level1s").Find(&got2, "value = ?", "Tom").Error; err != nil {
t.Error(err)
}
if !reflect.DeepEqual(got2, want2) {
t.Errorf("got %s; want %s", toJSONString(got2), toJSONString(want2))
}
var got3 []Level2
if err := DB.Preload("Level1s").Find(&got3, "value IN (?)", []string{"Bob", "Tom"}).Error; err != nil {
t.Error(err)
}
if !reflect.DeepEqual(got3, []Level2{got, got2}) {
t.Errorf("got %s; want %s", toJSONString(got3), toJSONString([]Level2{got, got2}))
}
var got4 []Level2
if err := DB.Preload("Level1s", "value IN (?)", []string{"zh", "ru"}).Find(&got4, "value IN (?)", []string{"Bob", "Tom"}).Error; err != nil {
t.Error(err)
}
var ruLevel1 Level1
var zhLevel1 Level1
DB.First(&ruLevel1, "value = ?", "ru")
DB.First(&zhLevel1, "value = ?", "zh")
got.Level1s = []Level1{ruLevel1}
got2.Level1s = []Level1{zhLevel1}
if !reflect.DeepEqual(got4, []Level2{got, got2}) {
t.Errorf("got %s; want %s", toJSONString(got4), toJSONString([]Level2{got, got2}))
}
if err := DB.Preload("Level1s").Find(&got4, "value IN (?)", []string{"non-existing"}).Error; err != nil {
t.Error(err)
}
}
func TestManyToManyPreloadForNestedPointer(t *testing.T) {
type (
Level1 struct {
ID uint
Value string
}
Level2 struct {
ID uint
Value string
Level1s []*Level1 `gorm:"many2many:levels;"`
}
Level3 struct {
ID uint
Value string
Level2ID sql.NullInt64
Level2 *Level2
}
)
DB.DropTableIfExists(&Level3{})
DB.DropTableIfExists(&Level2{})
DB.DropTableIfExists(&Level1{})
DB.DropTableIfExists("levels")
if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}).Error; err != nil {
t.Error(err)
}
want := Level3{
Value: "Bob",
Level2: &Level2{
Value: "Foo",
Level1s: []*Level1{
{Value: "ru"},
{Value: "en"},
},
},
}
if err := DB.Save(&want).Error; err != nil {
t.Error(err)
}
want2 := Level3{
Value: "Tom",
Level2: &Level2{
Value: "Bar",
Level1s: []*Level1{
{Value: "zh"},
{Value: "de"},
},
},
}
if err := DB.Save(&want2).Error; err != nil {
t.Error(err)
}
var got Level3
if err := DB.Preload("Level2.Level1s").Find(&got, "value = ?", "Bob").Error; err != nil {
t.Error(err)
}
if !reflect.DeepEqual(got, want) {
t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want))
}
var got2 Level3
if err := DB.Preload("Level2.Level1s").Find(&got2, "value = ?", "Tom").Error; err != nil {
t.Error(err)
}
if !reflect.DeepEqual(got2, want2) {
t.Errorf("got %s; want %s", toJSONString(got2), toJSONString(want2))
}
var got3 []Level3
if err := DB.Preload("Level2.Level1s").Find(&got3, "value IN (?)", []string{"Bob", "Tom"}).Error; err != nil {
t.Error(err)
}
if !reflect.DeepEqual(got3, []Level3{got, got2}) {
t.Errorf("got %s; want %s", toJSONString(got3), toJSONString([]Level3{got, got2}))
}
var got4 []Level3
if err := DB.Preload("Level2.Level1s", "value IN (?)", []string{"zh", "ru"}).Find(&got4, "value IN (?)", []string{"Bob", "Tom"}).Error; err != nil {
t.Error(err)
}
var got5 Level3
DB.Preload("Level2.Level1s").Find(&got5, "value = ?", "bogus")
var ruLevel1 Level1
var zhLevel1 Level1
DB.First(&ruLevel1, "value = ?", "ru")
DB.First(&zhLevel1, "value = ?", "zh")
got.Level2.Level1s = []*Level1{&ruLevel1}
got2.Level2.Level1s = []*Level1{&zhLevel1}
if !reflect.DeepEqual(got4, []Level3{got, got2}) {
t.Errorf("got %s; want %s", toJSONString(got4), toJSONString([]Level3{got, got2}))
}
}
func TestNestedManyToManyPreload(t *testing.T) {
type (
Level1 struct {
ID uint
Value string
}
Level2 struct {
ID uint
Value string
Level1s []*Level1 `gorm:"many2many:level1_level2;"`
}
Level3 struct {
ID uint
Value string
Level2s []Level2 `gorm:"many2many:level2_level3;"`
}
)
DB.DropTableIfExists(&Level1{})
DB.DropTableIfExists(&Level2{})
DB.DropTableIfExists(&Level3{})
DB.DropTableIfExists("level1_level2")
DB.DropTableIfExists("level2_level3")
if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}).Error; err != nil {
t.Error(err)
}
want := Level3{
Value: "Level3",
Level2s: []Level2{
{
Value: "Bob",
Level1s: []*Level1{
{Value: "ru"},
{Value: "en"},
},
}, {
Value: "Tom",
Level1s: []*Level1{
{Value: "zh"},
{Value: "de"},
},
},
},
}
if err := DB.Save(&want).Error; err != nil {
t.Error(err)
}
var got Level3
if err := DB.Preload("Level2s").Preload("Level2s.Level1s").Find(&got, "value = ?", "Level3").Error; err != nil {
t.Error(err)
}
if !reflect.DeepEqual(got, want) {
t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want))
}
if err := DB.Preload("Level2s.Level1s").Find(&got, "value = ?", "not_found").Error; err != gorm.ErrRecordNotFound {
t.Error(err)
}
}
func TestNestedManyToManyPreload2(t *testing.T) {
type (
Level1 struct {
ID uint
Value string
}
Level2 struct {
ID uint
Value string
Level1s []*Level1 `gorm:"many2many:level1_level2;"`
}
Level3 struct {
ID uint
Value string
Level2ID sql.NullInt64
Level2 *Level2
}
)
DB.DropTableIfExists(&Level1{})
DB.DropTableIfExists(&Level2{})
DB.DropTableIfExists(&Level3{})
DB.DropTableIfExists("level1_level2")
if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}).Error; err != nil {
t.Error(err)
}
want := Level3{
Value: "Level3",
Level2: &Level2{
Value: "Bob",
Level1s: []*Level1{
{Value: "ru"},
{Value: "en"},
},
},
}
if err := DB.Save(&want).Error; err != nil {
t.Error(err)
}
var got Level3
if err := DB.Preload("Level2.Level1s").Find(&got, "value = ?", "Level3").Error; err != nil {
t.Error(err)
}
if !reflect.DeepEqual(got, want) {
t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want))
}
if err := DB.Preload("Level2.Level1s").Find(&got, "value = ?", "not_found").Error; err != gorm.ErrRecordNotFound {
t.Error(err)
}
}
func TestNestedManyToManyPreload3(t *testing.T) {
type (
Level1 struct {
ID uint
Value string
}
Level2 struct {
ID uint
Value string
Level1s []*Level1 `gorm:"many2many:level1_level2;"`
}
Level3 struct {
ID uint
Value string
Level2ID sql.NullInt64
Level2 *Level2
}
)
DB.DropTableIfExists(&Level1{})
DB.DropTableIfExists(&Level2{})
DB.DropTableIfExists(&Level3{})
DB.DropTableIfExists("level1_level2")
if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}).Error; err != nil {
t.Error(err)
}
level1Zh := &Level1{Value: "zh"}
level1Ru := &Level1{Value: "ru"}
level1En := &Level1{Value: "en"}
level21 := &Level2{
Value: "Level2-1",
Level1s: []*Level1{level1Zh, level1Ru},
}
level22 := &Level2{
Value: "Level2-2",
Level1s: []*Level1{level1Zh, level1En},
}
wants := []*Level3{
{
Value: "Level3-1",
Level2: level21,
},
{
Value: "Level3-2",
Level2: level22,
},
{
Value: "Level3-3",
Level2: level21,
},
}
for _, want := range wants {
if err := DB.Save(&want).Error; err != nil {
t.Error(err)
}
}
var gots []*Level3
if err := DB.Preload("Level2.Level1s", func(db *gorm.DB) *gorm.DB {
return db.Order("level1.id ASC")
}).Find(&gots).Error; err != nil {
t.Error(err)
}
if !reflect.DeepEqual(gots, wants) {
t.Errorf("got %s; want %s", toJSONString(gots), toJSONString(wants))
}
}
func TestNestedManyToManyPreload3ForStruct(t *testing.T) {
type (
Level1 struct {
ID uint
Value string
}
Level2 struct {
ID uint
Value string
Level1s []Level1 `gorm:"many2many:level1_level2;"`
}
Level3 struct {
ID uint
Value string
Level2ID sql.NullInt64
Level2 Level2
}
)
DB.DropTableIfExists(&Level1{})
DB.DropTableIfExists(&Level2{})
DB.DropTableIfExists(&Level3{})
DB.DropTableIfExists("level1_level2")
if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}).Error; err != nil {
t.Error(err)
}
level1Zh := Level1{Value: "zh"}
level1Ru := Level1{Value: "ru"}
level1En := Level1{Value: "en"}
level21 := Level2{
Value: "Level2-1",
Level1s: []Level1{level1Zh, level1Ru},
}
level22 := Level2{
Value: "Level2-2",
Level1s: []Level1{level1Zh, level1En},
}
wants := []*Level3{
{
Value: "Level3-1",
Level2: level21,
},
{
Value: "Level3-2",
Level2: level22,
},
{
Value: "Level3-3",
Level2: level21,
},
}
for _, want := range wants {
if err := DB.Save(&want).Error; err != nil {
t.Error(err)
}
}
var gots []*Level3
if err := DB.Preload("Level2.Level1s", func(db *gorm.DB) *gorm.DB {
return db.Order("level1.id ASC")
}).Find(&gots).Error; err != nil {
t.Error(err)
}
if !reflect.DeepEqual(gots, wants) {
t.Errorf("got %s; want %s", toJSONString(gots), toJSONString(wants))
}
}
func TestNestedManyToManyPreload4(t *testing.T) {
type (
Level4 struct {
ID uint
Value string
Level3ID uint
}
Level3 struct {
ID uint
Value string
Level4s []*Level4
}
Level2 struct {
ID uint
Value string
Level3s []*Level3 `gorm:"many2many:level2_level3;"`
}
Level1 struct {
ID uint
Value string
Level2s []*Level2 `gorm:"many2many:level1_level2;"`
}
)
DB.DropTableIfExists(&Level1{})
DB.DropTableIfExists(&Level2{})
DB.DropTableIfExists(&Level3{})
DB.DropTableIfExists(&Level4{})
DB.DropTableIfExists("level1_level2")
DB.DropTableIfExists("level2_level3")
dummy := Level1{
Value: "Level1",
Level2s: []*Level2{{
Value: "Level2",
Level3s: []*Level3{{
Value: "Level3",
Level4s: []*Level4{{
Value: "Level4",
}},
}},
}},
}
if err := DB.AutoMigrate(&Level4{}, &Level3{}, &Level2{}, &Level1{}).Error; err != nil {
t.Error(err)
}
if err := DB.Save(&dummy).Error; err != nil {
t.Error(err)
}
var level1 Level1
if err := DB.Preload("Level2s").Preload("Level2s.Level3s").Preload("Level2s.Level3s.Level4s").First(&level1).Error; err != nil {
t.Error(err)
}
}
func TestManyToManyPreloadForPointer(t *testing.T) {
type (
Level1 struct {
ID uint
Value string
}
Level2 struct {
ID uint
Value string
Level1s []*Level1 `gorm:"many2many:levels;"`
}
)
DB.DropTableIfExists(&Level2{})
DB.DropTableIfExists(&Level1{})
DB.DropTableIfExists("levels")
if err := DB.AutoMigrate(&Level2{}, &Level1{}).Error; err != nil {
t.Error(err)
}
want := Level2{Value: "Bob", Level1s: []*Level1{
{Value: "ru"},
{Value: "en"},
}}
if err := DB.Save(&want).Error; err != nil {
t.Error(err)
}
want2 := Level2{Value: "Tom", Level1s: []*Level1{
{Value: "zh"},
{Value: "de"},
}}
if err := DB.Save(&want2).Error; err != nil {
t.Error(err)
}
var got Level2
if err := DB.Preload("Level1s").Find(&got, "value = ?", "Bob").Error; err != nil {
t.Error(err)
}
if !reflect.DeepEqual(got, want) {
t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want))
}
var got2 Level2
if err := DB.Preload("Level1s").Find(&got2, "value = ?", "Tom").Error; err != nil {
t.Error(err)
}
if !reflect.DeepEqual(got2, want2) {
t.Errorf("got %s; want %s", toJSONString(got2), toJSONString(want2))
}
var got3 []Level2
if err := DB.Preload("Level1s").Find(&got3, "value IN (?)", []string{"Bob", "Tom"}).Error; err != nil {
t.Error(err)
}
if !reflect.DeepEqual(got3, []Level2{got, got2}) {
t.Errorf("got %s; want %s", toJSONString(got3), toJSONString([]Level2{got, got2}))
}
var got4 []Level2
if err := DB.Preload("Level1s", "value IN (?)", []string{"zh", "ru"}).Find(&got4, "value IN (?)", []string{"Bob", "Tom"}).Error; err != nil {
t.Error(err)
}
var got5 Level2
DB.Preload("Level1s").First(&got5, "value = ?", "bogus")
var ruLevel1 Level1
var zhLevel1 Level1
DB.First(&ruLevel1, "value = ?", "ru")
DB.First(&zhLevel1, "value = ?", "zh")
got.Level1s = []*Level1{&ruLevel1}
got2.Level1s = []*Level1{&zhLevel1}
if !reflect.DeepEqual(got4, []Level2{got, got2}) {
t.Errorf("got %s; want %s", toJSONString(got4), toJSONString([]Level2{got, got2}))
}
}
func TestNilPointerSlice(t *testing.T) {
type (
Level3 struct {
ID uint
Value string
}
Level2 struct {
ID uint
Value string
Level3ID uint
Level3 *Level3
}
Level1 struct {
ID uint
Value string
Level2ID uint
Level2 *Level2
}
)
DB.DropTableIfExists(&Level3{})
DB.DropTableIfExists(&Level2{})
DB.DropTableIfExists(&Level1{})
if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}).Error; err != nil {
t.Error(err)
}
want := Level1{
Value: "Bob",
Level2: &Level2{
Value: "en",
Level3: &Level3{
Value: "native",
},
},
}
if err := DB.Save(&want).Error; err != nil {
t.Error(err)
}
want2 := Level1{
Value: "Tom",
Level2: nil,
}
if err := DB.Save(&want2).Error; err != nil {
t.Error(err)
}
var got []Level1
if err := DB.Preload("Level2").Preload("Level2.Level3").Find(&got).Error; err != nil {
t.Error(err)
}
if len(got) != 2 {
t.Errorf("got %v items, expected 2", len(got))
}
if !reflect.DeepEqual(got[0], want) && !reflect.DeepEqual(got[1], want) {
t.Errorf("got %s; want array containing %s", toJSONString(got), toJSONString(want))
}
if !reflect.DeepEqual(got[0], want2) && !reflect.DeepEqual(got[1], want2) {
t.Errorf("got %s; want array containing %s", toJSONString(got), toJSONString(want2))
}
}
func TestNilPointerSlice2(t *testing.T) {
type (
Level4 struct {
ID uint
}
Level3 struct {
ID uint
Level4ID sql.NullInt64 `sql:"index"`
Level4 *Level4
}
Level2 struct {
ID uint
Level3s []*Level3 `gorm:"many2many:level2_level3s"`
}
Level1 struct {
ID uint
Level2ID sql.NullInt64 `sql:"index"`
Level2 *Level2
}
)
DB.DropTableIfExists(new(Level4))
DB.DropTableIfExists(new(Level3))
DB.DropTableIfExists(new(Level2))
DB.DropTableIfExists(new(Level1))
if err := DB.AutoMigrate(new(Level4), new(Level3), new(Level2), new(Level1)).Error; err != nil {
t.Error(err)
}
want := new(Level1)
if err := DB.Save(want).Error; err != nil {
t.Error(err)
}
got := new(Level1)
err := DB.Preload("Level2.Level3s.Level4").Last(&got).Error
if err != nil {
t.Error(err)
}
if !reflect.DeepEqual(got, want) {
t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want))
}
}
func TestPrefixedPreloadDuplication(t *testing.T) {
type (
Level4 struct {
ID uint
Name string
Level3ID uint
}
Level3 struct {
ID uint
Name string
Level4s []*Level4
}
Level2 struct {
ID uint
Name string
Level3ID sql.NullInt64 `sql:"index"`
Level3 *Level3
}
Level1 struct {
ID uint
Name string
Level2ID sql.NullInt64 `sql:"index"`
Level2 *Level2
}
)
DB.DropTableIfExists(new(Level3))
DB.DropTableIfExists(new(Level4))
DB.DropTableIfExists(new(Level2))
DB.DropTableIfExists(new(Level1))
if err := DB.AutoMigrate(new(Level3), new(Level4), new(Level2), new(Level1)).Error; err != nil {
t.Error(err)
}
lvl := &Level3{}
if err := DB.Save(lvl).Error; err != nil {
t.Error(err)
}
sublvl1 := &Level4{Level3ID: lvl.ID}
if err := DB.Save(sublvl1).Error; err != nil {
t.Error(err)
}
sublvl2 := &Level4{Level3ID: lvl.ID}
if err := DB.Save(sublvl2).Error; err != nil {
t.Error(err)
}
lvl.Level4s = []*Level4{sublvl1, sublvl2}
want1 := Level1{
Level2: &Level2{
Level3: lvl,
},
}
if err := DB.Save(&want1).Error; err != nil {
t.Error(err)
}
want2 := Level1{
Level2: &Level2{
Level3: lvl,
},
}
if err := DB.Save(&want2).Error; err != nil {
t.Error(err)
}
want := []Level1{want1, want2}
var got []Level1
err := DB.Preload("Level2.Level3.Level4s").Find(&got).Error
if err != nil {
t.Error(err)
}
if !reflect.DeepEqual(got, want) {
t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want))
}
}
func TestPreloadManyToManyCallbacks(t *testing.T) {
type (
Level2 struct {
ID uint
Name string
}
Level1 struct {
ID uint
Name string
Level2s []Level2 `gorm:"many2many:level1_level2s;AssociationForeignKey:ID;ForeignKey:ID"`
}
)
DB.DropTableIfExists("level1_level2s")
DB.DropTableIfExists(new(Level1))
DB.DropTableIfExists(new(Level2))
if err := DB.AutoMigrate(new(Level1), new(Level2)).Error; err != nil {
t.Error(err)
}
lvl := Level1{
Name: "l1",
Level2s: []Level2{
{Name: "l2-1"}, {Name: "l2-2"},
},
}
DB.Save(&lvl)
called := 0
DB.Callback().Query().After("gorm:query").Register("TestPreloadManyToManyCallbacks", func(scope *gorm.Scope) {
called = called + 1
})
DB.Preload("Level2s").First(&Level1{}, "id = ?", lvl.ID)
if called != 3 {
t.Errorf("Wanted callback to be called 3 times but got %d", called)
}
}
func toJSONString(v interface{}) []byte {
r, _ := json.MarshalIndent(v, "", " ")
return r
}
| [
"\"GORM_DIALECT\""
]
| []
| [
"GORM_DIALECT"
]
| [] | ["GORM_DIALECT"] | go | 1 | 0 | |
src/cmd/dist/buildgo.go | // Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"bytes"
"fmt"
"os"
"sort"
)
/*
* Helpers for building cmd/go and cmd/cgo.
*/
// mkzdefaultcc writes zdefaultcc.go:
//
// package main
// const defaultCC = <defaultcc>
// const defaultCXX = <defaultcxx>
// const defaultPkgConfig = <defaultpkgconfig>
//
// It is invoked to write cmd/go/internal/cfg/zdefaultcc.go
// but we also write cmd/cgo/zdefaultcc.go
func mkzdefaultcc(dir, file string) {
outGo := fmt.Sprintf(
"// Code generated by go tool dist; DO NOT EDIT.\n"+
"\n"+
"package cfg\n"+
"\n"+
"const DefaultCC = `%s`\n"+
"const DefaultCXX = `%s`\n"+
"const DefaultPkgConfig = `%s`\n",
defaultcctarget, defaultcxxtarget, defaultpkgconfigtarget)
writefile(outGo, file, writeSkipSame)
// Convert file name to replace: turn go/internal/cfg into cgo.
outCgo := fmt.Sprintf(
"// Code generated by go tool dist; DO NOT EDIT.\n"+
"\n"+
"package main\n"+
"\n"+
"const defaultCC = `%s`\n"+
"const defaultCXX = `%s`\n"+
"const defaultPkgConfig = `%s`\n",
defaultcctarget, defaultcxxtarget, defaultpkgconfigtarget)
i := len(file) - len("go/internal/cfg/zdefaultcc.go")
file = file[:i] + "cgo/zdefaultcc.go"
writefile(outCgo, file, writeSkipSame)
}
// mkzcgo writes zosarch.go for cmd/go.
func mkzosarch(dir, file string) {
// sort for deterministic zosarch.go file
var list []string
for plat := range cgoEnabled {
list = append(list, plat)
}
sort.Strings(list)
var buf bytes.Buffer
buf.WriteString("// Code generated by go tool dist; DO NOT EDIT.\n\n")
buf.WriteString("package cfg\n\n")
fmt.Fprintf(&buf, "var OSArchSupportsCgo = map[string]bool{\n")
for _, plat := range list {
fmt.Fprintf(&buf, "\t%q: %v,\n", plat, cgoEnabled[plat])
}
fmt.Fprintf(&buf, "}\n")
writefile(buf.String(), file, writeSkipSame)
}
// mkzcgo writes zcgo.go for the go/build package:
//
// package build
// var cgoEnabled = map[string]bool{}
//
// It is invoked to write go/build/zcgo.go.
func mkzcgo(dir, file string) {
// sort for deterministic zcgo.go file
var list []string
for plat, hasCgo := range cgoEnabled {
if hasCgo {
list = append(list, plat)
}
}
sort.Strings(list)
var buf bytes.Buffer
fmt.Fprintf(&buf,
"// Code generated by go tool dist; DO NOT EDIT.\n"+
"\n"+
"package build\n"+
"\n"+
"const defaultCGO_ENABLED = %q\n\n"+
"var cgoEnabled = map[string]bool{\n", os.Getenv("CGO_ENABLED"))
for _, plat := range list {
fmt.Fprintf(&buf, "\t%q: true,\n", plat)
}
fmt.Fprintf(&buf, "}\n")
writefile(buf.String(), file, writeSkipSame)
}
| [
"\"CGO_ENABLED\""
]
| []
| [
"CGO_ENABLED"
]
| [] | ["CGO_ENABLED"] | go | 1 | 0 | |
example/example.go | package main
import (
"context"
"os"
vault "github.com/ONSdigital/dp-vault"
"github.com/ONSdigital/log.go/v2/log"
)
const maxRetries = 3
func main() {
log.Namespace = "vault-example"
devAddress := os.Getenv("VAULT_ADDR")
token := os.Getenv("VAULT_TOKEN")
ctx := context.Background()
client, err := vault.CreateClient(token, devAddress, maxRetries)
// In production no tokens should be logged
logData := log.Data{"address": devAddress, "token": token}
log.Info(ctx, "Created vault client", logData)
if err != nil {
log.Error(ctx, "failed to connect to vault", err, logData)
}
err = client.WriteKey("secret/shared/datasets/CPIH-0000", "PK-Key", "098980474948463874535354")
if err != nil {
log.Error(ctx, "failed to write to vault", err, logData)
}
PKKey, err := client.ReadKey("secret/shared/datasets/CPIH-0000", "PK-Key")
if err != nil {
log.Error(ctx, "failed to read PK Key from vault", err, logData)
}
logData["pk-key"] = PKKey
log.Info(ctx, "successfully written and read a key from vault", logData)
}
| [
"\"VAULT_ADDR\"",
"\"VAULT_TOKEN\""
]
| []
| [
"VAULT_ADDR",
"VAULT_TOKEN"
]
| [] | ["VAULT_ADDR", "VAULT_TOKEN"] | go | 2 | 0 | |
gorm/association_test.go | package gorm_test
import (
"fmt"
"os"
"reflect"
"sort"
"testing"
"github.com/gongxianjin/xcent-common/gorm"
)
func TestBelongsTo(t *testing.T) {
post := Post{
Title: "post belongs to",
Body: "body belongs to",
Category: Category{Name: "Category 1"},
MainCategory: Category{Name: "Main Category 1"},
}
if err := DB.Save(&post).Error; err != nil {
t.Error("Got errors when save post", err)
}
if post.Category.ID == 0 || post.MainCategory.ID == 0 {
t.Errorf("Category's primary key should be updated")
}
if post.CategoryId.Int64 == 0 || post.MainCategoryId == 0 {
t.Errorf("post's foreign key should be updated")
}
// Query
var category1 Category
DB.Model(&post).Association("Category").Find(&category1)
if category1.Name != "Category 1" {
t.Errorf("Query belongs to relations with Association")
}
var mainCategory1 Category
DB.Model(&post).Association("MainCategory").Find(&mainCategory1)
if mainCategory1.Name != "Main Category 1" {
t.Errorf("Query belongs to relations with Association")
}
var category11 Category
DB.Model(&post).Related(&category11)
if category11.Name != "Category 1" {
t.Errorf("Query belongs to relations with Related")
}
if DB.Model(&post).Association("Category").Count() != 1 {
t.Errorf("Post's category count should be 1")
}
if DB.Model(&post).Association("MainCategory").Count() != 1 {
t.Errorf("Post's main category count should be 1")
}
// Append
var category2 = Category{
Name: "Category 2",
}
DB.Model(&post).Association("Category").Append(&category2)
if category2.ID == 0 {
t.Errorf("Category should has ID when created with Append")
}
var category21 Category
DB.Model(&post).Related(&category21)
if category21.Name != "Category 2" {
t.Errorf("Category should be updated with Append")
}
if DB.Model(&post).Association("Category").Count() != 1 {
t.Errorf("Post's category count should be 1")
}
// Replace
var category3 = Category{
Name: "Category 3",
}
DB.Model(&post).Association("Category").Replace(&category3)
if category3.ID == 0 {
t.Errorf("Category should has ID when created with Replace")
}
var category31 Category
DB.Model(&post).Related(&category31)
if category31.Name != "Category 3" {
t.Errorf("Category should be updated with Replace")
}
if DB.Model(&post).Association("Category").Count() != 1 {
t.Errorf("Post's category count should be 1")
}
// Delete
DB.Model(&post).Association("Category").Delete(&category2)
if DB.Model(&post).Related(&Category{}).RecordNotFound() {
t.Errorf("Should not delete any category when Delete a unrelated Category")
}
if post.Category.Name == "" {
t.Errorf("Post's category should not be reseted when Delete a unrelated Category")
}
DB.Model(&post).Association("Category").Delete(&category3)
if post.Category.Name != "" {
t.Errorf("Post's category should be reseted after Delete")
}
var category41 Category
DB.Model(&post).Related(&category41)
if category41.Name != "" {
t.Errorf("Category should be deleted with Delete")
}
if count := DB.Model(&post).Association("Category").Count(); count != 0 {
t.Errorf("Post's category count should be 0 after Delete, but got %v", count)
}
// Clear
DB.Model(&post).Association("Category").Append(&Category{
Name: "Category 2",
})
if DB.Model(&post).Related(&Category{}).RecordNotFound() {
t.Errorf("Should find category after append")
}
if post.Category.Name == "" {
t.Errorf("Post's category should has value after Append")
}
DB.Model(&post).Association("Category").Clear()
if post.Category.Name != "" {
t.Errorf("Post's category should be cleared after Clear")
}
if !DB.Model(&post).Related(&Category{}).RecordNotFound() {
t.Errorf("Should not find any category after Clear")
}
if count := DB.Model(&post).Association("Category").Count(); count != 0 {
t.Errorf("Post's category count should be 0 after Clear, but got %v", count)
}
// Check Association mode with soft delete
category6 := Category{
Name: "Category 6",
}
DB.Model(&post).Association("Category").Append(&category6)
if count := DB.Model(&post).Association("Category").Count(); count != 1 {
t.Errorf("Post's category count should be 1 after Append, but got %v", count)
}
DB.Delete(&category6)
if count := DB.Model(&post).Association("Category").Count(); count != 0 {
t.Errorf("Post's category count should be 0 after the category has been deleted, but got %v", count)
}
if err := DB.Model(&post).Association("Category").Find(&Category{}).Error; err == nil {
t.Errorf("Post's category is not findable after Delete")
}
if count := DB.Unscoped().Model(&post).Association("Category").Count(); count != 1 {
t.Errorf("Post's category count should be 1 when query with Unscoped, but got %v", count)
}
if err := DB.Unscoped().Model(&post).Association("Category").Find(&Category{}).Error; err != nil {
t.Errorf("Post's category should be findable when query with Unscoped, got %v", err)
}
}
func TestBelongsToOverrideForeignKey1(t *testing.T) {
type Profile struct {
gorm.Model
Name string
}
type User struct {
gorm.Model
Profile Profile `gorm:"ForeignKey:ProfileRefer"`
ProfileRefer int
}
if relation, ok := DB.NewScope(&User{}).FieldByName("Profile"); ok {
if relation.Relationship.Kind != "belongs_to" ||
!reflect.DeepEqual(relation.Relationship.ForeignFieldNames, []string{"ProfileRefer"}) ||
!reflect.DeepEqual(relation.Relationship.AssociationForeignFieldNames, []string{"ID"}) {
t.Errorf("Override belongs to foreign key with tag")
}
}
}
func TestBelongsToOverrideForeignKey2(t *testing.T) {
type Profile struct {
gorm.Model
Refer string
Name string
}
type User struct {
gorm.Model
Profile Profile `gorm:"ForeignKey:ProfileID;AssociationForeignKey:Refer"`
ProfileID int
}
if relation, ok := DB.NewScope(&User{}).FieldByName("Profile"); ok {
if relation.Relationship.Kind != "belongs_to" ||
!reflect.DeepEqual(relation.Relationship.ForeignFieldNames, []string{"ProfileID"}) ||
!reflect.DeepEqual(relation.Relationship.AssociationForeignFieldNames, []string{"Refer"}) {
t.Errorf("Override belongs to foreign key with tag")
}
}
}
func TestHasOne(t *testing.T) {
user := User{
Name: "has one",
CreditCard: CreditCard{Number: "411111111111"},
}
if err := DB.Save(&user).Error; err != nil {
t.Error("Got errors when save user", err.Error())
}
if user.CreditCard.UserId.Int64 == 0 {
t.Errorf("CreditCard's foreign key should be updated")
}
// Query
var creditCard1 CreditCard
DB.Model(&user).Related(&creditCard1)
if creditCard1.Number != "411111111111" {
t.Errorf("Query has one relations with Related")
}
var creditCard11 CreditCard
DB.Model(&user).Association("CreditCard").Find(&creditCard11)
if creditCard11.Number != "411111111111" {
t.Errorf("Query has one relations with Related")
}
if DB.Model(&user).Association("CreditCard").Count() != 1 {
t.Errorf("User's credit card count should be 1")
}
// Append
var creditcard2 = CreditCard{
Number: "411111111112",
}
DB.Model(&user).Association("CreditCard").Append(&creditcard2)
if creditcard2.ID == 0 {
t.Errorf("Creditcard should has ID when created with Append")
}
var creditcard21 CreditCard
DB.Model(&user).Related(&creditcard21)
if creditcard21.Number != "411111111112" {
t.Errorf("CreditCard should be updated with Append")
}
if DB.Model(&user).Association("CreditCard").Count() != 1 {
t.Errorf("User's credit card count should be 1")
}
// Replace
var creditcard3 = CreditCard{
Number: "411111111113",
}
DB.Model(&user).Association("CreditCard").Replace(&creditcard3)
if creditcard3.ID == 0 {
t.Errorf("Creditcard should has ID when created with Replace")
}
var creditcard31 CreditCard
DB.Model(&user).Related(&creditcard31)
if creditcard31.Number != "411111111113" {
t.Errorf("CreditCard should be updated with Replace")
}
if DB.Model(&user).Association("CreditCard").Count() != 1 {
t.Errorf("User's credit card count should be 1")
}
// Delete
DB.Model(&user).Association("CreditCard").Delete(&creditcard2)
var creditcard4 CreditCard
DB.Model(&user).Related(&creditcard4)
if creditcard4.Number != "411111111113" {
t.Errorf("Should not delete credit card when Delete a unrelated CreditCard")
}
if DB.Model(&user).Association("CreditCard").Count() != 1 {
t.Errorf("User's credit card count should be 1")
}
DB.Model(&user).Association("CreditCard").Delete(&creditcard3)
if !DB.Model(&user).Related(&CreditCard{}).RecordNotFound() {
t.Errorf("Should delete credit card with Delete")
}
if DB.Model(&user).Association("CreditCard").Count() != 0 {
t.Errorf("User's credit card count should be 0 after Delete")
}
// Clear
var creditcard5 = CreditCard{
Number: "411111111115",
}
DB.Model(&user).Association("CreditCard").Append(&creditcard5)
if DB.Model(&user).Related(&CreditCard{}).RecordNotFound() {
t.Errorf("Should added credit card with Append")
}
if DB.Model(&user).Association("CreditCard").Count() != 1 {
t.Errorf("User's credit card count should be 1")
}
DB.Model(&user).Association("CreditCard").Clear()
if !DB.Model(&user).Related(&CreditCard{}).RecordNotFound() {
t.Errorf("Credit card should be deleted with Clear")
}
if DB.Model(&user).Association("CreditCard").Count() != 0 {
t.Errorf("User's credit card count should be 0 after Clear")
}
// Check Association mode with soft delete
var creditcard6 = CreditCard{
Number: "411111111116",
}
DB.Model(&user).Association("CreditCard").Append(&creditcard6)
if count := DB.Model(&user).Association("CreditCard").Count(); count != 1 {
t.Errorf("User's credit card count should be 1 after Append, but got %v", count)
}
DB.Delete(&creditcard6)
if count := DB.Model(&user).Association("CreditCard").Count(); count != 0 {
t.Errorf("User's credit card count should be 0 after credit card deleted, but got %v", count)
}
if err := DB.Model(&user).Association("CreditCard").Find(&CreditCard{}).Error; err == nil {
t.Errorf("User's creditcard is not findable after Delete")
}
if count := DB.Unscoped().Model(&user).Association("CreditCard").Count(); count != 1 {
t.Errorf("User's credit card count should be 1 when query with Unscoped, but got %v", count)
}
if err := DB.Unscoped().Model(&user).Association("CreditCard").Find(&CreditCard{}).Error; err != nil {
t.Errorf("User's creditcard should be findable when query with Unscoped, got %v", err)
}
}
func TestHasOneOverrideForeignKey1(t *testing.T) {
type Profile struct {
gorm.Model
Name string
UserRefer uint
}
type User struct {
gorm.Model
Profile Profile `gorm:"ForeignKey:UserRefer"`
}
if relation, ok := DB.NewScope(&User{}).FieldByName("Profile"); ok {
if relation.Relationship.Kind != "has_one" ||
!reflect.DeepEqual(relation.Relationship.ForeignFieldNames, []string{"UserRefer"}) ||
!reflect.DeepEqual(relation.Relationship.AssociationForeignFieldNames, []string{"ID"}) {
t.Errorf("Override belongs to foreign key with tag")
}
}
}
func TestHasOneOverrideForeignKey2(t *testing.T) {
type Profile struct {
gorm.Model
Name string
UserID uint
}
type User struct {
gorm.Model
Refer string
Profile Profile `gorm:"ForeignKey:UserID;AssociationForeignKey:Refer"`
}
if relation, ok := DB.NewScope(&User{}).FieldByName("Profile"); ok {
if relation.Relationship.Kind != "has_one" ||
!reflect.DeepEqual(relation.Relationship.ForeignFieldNames, []string{"UserID"}) ||
!reflect.DeepEqual(relation.Relationship.AssociationForeignFieldNames, []string{"Refer"}) {
t.Errorf("Override belongs to foreign key with tag")
}
}
}
func TestHasMany(t *testing.T) {
post := Post{
Title: "post has many",
Body: "body has many",
Comments: []*Comment{{Content: "Comment 1"}, {Content: "Comment 2"}},
}
if err := DB.Save(&post).Error; err != nil {
t.Error("Got errors when save post", err)
}
for _, comment := range post.Comments {
if comment.PostId == 0 {
t.Errorf("comment's PostID should be updated")
}
}
var compareComments = func(comments []Comment, contents []string) bool {
var commentContents []string
for _, comment := range comments {
commentContents = append(commentContents, comment.Content)
}
sort.Strings(commentContents)
sort.Strings(contents)
return reflect.DeepEqual(commentContents, contents)
}
// Query
if DB.First(&Comment{}, "content = ?", "Comment 1").Error != nil {
t.Errorf("Comment 1 should be saved")
}
var comments1 []Comment
DB.Model(&post).Association("Comments").Find(&comments1)
if !compareComments(comments1, []string{"Comment 1", "Comment 2"}) {
t.Errorf("Query has many relations with Association")
}
var comments11 []Comment
DB.Model(&post).Related(&comments11)
if !compareComments(comments11, []string{"Comment 1", "Comment 2"}) {
t.Errorf("Query has many relations with Related")
}
if DB.Model(&post).Association("Comments").Count() != 2 {
t.Errorf("Post's comments count should be 2")
}
// Append
DB.Model(&post).Association("Comments").Append(&Comment{Content: "Comment 3"})
var comments2 []Comment
DB.Model(&post).Related(&comments2)
if !compareComments(comments2, []string{"Comment 1", "Comment 2", "Comment 3"}) {
t.Errorf("Append new record to has many relations")
}
if DB.Model(&post).Association("Comments").Count() != 3 {
t.Errorf("Post's comments count should be 3 after Append")
}
// Delete
DB.Model(&post).Association("Comments").Delete(comments11)
var comments3 []Comment
DB.Model(&post).Related(&comments3)
if !compareComments(comments3, []string{"Comment 3"}) {
t.Errorf("Delete an existing resource for has many relations")
}
if DB.Model(&post).Association("Comments").Count() != 1 {
t.Errorf("Post's comments count should be 1 after Delete 2")
}
// Replace
DB.Model(&Post{Id: 999}).Association("Comments").Replace()
var comments4 []Comment
DB.Model(&post).Related(&comments4)
if len(comments4) == 0 {
t.Errorf("Replace for other resource should not clear all comments")
}
DB.Model(&post).Association("Comments").Replace(&Comment{Content: "Comment 4"}, &Comment{Content: "Comment 5"})
var comments41 []Comment
DB.Model(&post).Related(&comments41)
if !compareComments(comments41, []string{"Comment 4", "Comment 5"}) {
t.Errorf("Replace has many relations")
}
// Clear
DB.Model(&Post{Id: 999}).Association("Comments").Clear()
var comments5 []Comment
DB.Model(&post).Related(&comments5)
if len(comments5) == 0 {
t.Errorf("Clear should not clear all comments")
}
DB.Model(&post).Association("Comments").Clear()
var comments51 []Comment
DB.Model(&post).Related(&comments51)
if len(comments51) != 0 {
t.Errorf("Clear has many relations")
}
// Check Association mode with soft delete
var comment6 = Comment{
Content: "comment 6",
}
DB.Model(&post).Association("Comments").Append(&comment6)
if count := DB.Model(&post).Association("Comments").Count(); count != 1 {
t.Errorf("post's comments count should be 1 after Append, but got %v", count)
}
DB.Delete(&comment6)
if count := DB.Model(&post).Association("Comments").Count(); count != 0 {
t.Errorf("post's comments count should be 0 after comment been deleted, but got %v", count)
}
var comments6 []Comment
if DB.Model(&post).Association("Comments").Find(&comments6); len(comments6) != 0 {
t.Errorf("post's comments count should be 0 when find with Find, but got %v", len(comments6))
}
if count := DB.Unscoped().Model(&post).Association("Comments").Count(); count != 1 {
t.Errorf("post's comments count should be 1 when query with Unscoped, but got %v", count)
}
var comments61 []Comment
if DB.Unscoped().Model(&post).Association("Comments").Find(&comments61); len(comments61) != 1 {
t.Errorf("post's comments count should be 1 when query with Unscoped, but got %v", len(comments61))
}
}
func TestHasManyOverrideForeignKey1(t *testing.T) {
type Profile struct {
gorm.Model
Name string
UserRefer uint
}
type User struct {
gorm.Model
Profile []Profile `gorm:"ForeignKey:UserRefer"`
}
if relation, ok := DB.NewScope(&User{}).FieldByName("Profile"); ok {
if relation.Relationship.Kind != "has_many" ||
!reflect.DeepEqual(relation.Relationship.ForeignFieldNames, []string{"UserRefer"}) ||
!reflect.DeepEqual(relation.Relationship.AssociationForeignFieldNames, []string{"ID"}) {
t.Errorf("Override belongs to foreign key with tag")
}
}
}
func TestHasManyOverrideForeignKey2(t *testing.T) {
type Profile struct {
gorm.Model
Name string
UserID uint
}
type User struct {
gorm.Model
Refer string
Profile []Profile `gorm:"ForeignKey:UserID;AssociationForeignKey:Refer"`
}
if relation, ok := DB.NewScope(&User{}).FieldByName("Profile"); ok {
if relation.Relationship.Kind != "has_many" ||
!reflect.DeepEqual(relation.Relationship.ForeignFieldNames, []string{"UserID"}) ||
!reflect.DeepEqual(relation.Relationship.AssociationForeignFieldNames, []string{"Refer"}) {
t.Errorf("Override belongs to foreign key with tag")
}
}
}
func TestManyToMany(t *testing.T) {
DB.Raw("delete from languages")
var languages = []Language{{Name: "ZH"}, {Name: "EN"}}
user := User{Name: "Many2Many", Languages: languages}
DB.Save(&user)
// Query
var newLanguages []Language
DB.Model(&user).Related(&newLanguages, "Languages")
if len(newLanguages) != len([]string{"ZH", "EN"}) {
t.Errorf("Query many to many relations")
}
DB.Model(&user).Association("Languages").Find(&newLanguages)
if len(newLanguages) != len([]string{"ZH", "EN"}) {
t.Errorf("Should be able to find many to many relations")
}
if DB.Model(&user).Association("Languages").Count() != len([]string{"ZH", "EN"}) {
t.Errorf("Count should return correct result")
}
// Append
DB.Model(&user).Association("Languages").Append(&Language{Name: "DE"})
if DB.Where("name = ?", "DE").First(&Language{}).RecordNotFound() {
t.Errorf("New record should be saved when append")
}
languageA := Language{Name: "AA"}
DB.Save(&languageA)
DB.Model(&User{Id: user.Id}).Association("Languages").Append(&languageA)
languageC := Language{Name: "CC"}
DB.Save(&languageC)
DB.Model(&user).Association("Languages").Append(&[]Language{{Name: "BB"}, languageC})
DB.Model(&User{Id: user.Id}).Association("Languages").Append(&[]Language{{Name: "DD"}, {Name: "EE"}})
totalLanguages := []string{"ZH", "EN", "DE", "AA", "BB", "CC", "DD", "EE"}
if DB.Model(&user).Association("Languages").Count() != len(totalLanguages) {
t.Errorf("All appended languages should be saved")
}
// Delete
user.Languages = []Language{}
DB.Model(&user).Association("Languages").Find(&user.Languages)
var language Language
DB.Where("name = ?", "EE").First(&language)
DB.Model(&user).Association("Languages").Delete(language, &language)
if DB.Model(&user).Association("Languages").Count() != len(totalLanguages)-1 || len(user.Languages) != len(totalLanguages)-1 {
t.Errorf("Relations should be deleted with Delete")
}
if DB.Where("name = ?", "EE").First(&Language{}).RecordNotFound() {
t.Errorf("Language EE should not be deleted")
}
DB.Where("name IN (?)", []string{"CC", "DD"}).Find(&languages)
user2 := User{Name: "Many2Many_User2", Languages: languages}
DB.Save(&user2)
DB.Model(&user).Association("Languages").Delete(languages, &languages)
if DB.Model(&user).Association("Languages").Count() != len(totalLanguages)-3 || len(user.Languages) != len(totalLanguages)-3 {
t.Errorf("Relations should be deleted with Delete")
}
if DB.Model(&user2).Association("Languages").Count() == 0 {
t.Errorf("Other user's relations should not be deleted")
}
// Replace
var languageB Language
DB.Where("name = ?", "BB").First(&languageB)
DB.Model(&user).Association("Languages").Replace(languageB)
if len(user.Languages) != 1 || DB.Model(&user).Association("Languages").Count() != 1 {
t.Errorf("Relations should be replaced")
}
DB.Model(&user).Association("Languages").Replace()
if len(user.Languages) != 0 || DB.Model(&user).Association("Languages").Count() != 0 {
t.Errorf("Relations should be replaced with empty")
}
DB.Model(&user).Association("Languages").Replace(&[]Language{{Name: "FF"}, {Name: "JJ"}})
if len(user.Languages) != 2 || DB.Model(&user).Association("Languages").Count() != len([]string{"FF", "JJ"}) {
t.Errorf("Relations should be replaced")
}
// Clear
DB.Model(&user).Association("Languages").Clear()
if len(user.Languages) != 0 || DB.Model(&user).Association("Languages").Count() != 0 {
t.Errorf("Relations should be cleared")
}
// Check Association mode with soft delete
var language6 = Language{
Name: "language 6",
}
DB.Model(&user).Association("Languages").Append(&language6)
if count := DB.Model(&user).Association("Languages").Count(); count != 1 {
t.Errorf("user's languages count should be 1 after Append, but got %v", count)
}
DB.Delete(&language6)
if count := DB.Model(&user).Association("Languages").Count(); count != 0 {
t.Errorf("user's languages count should be 0 after language been deleted, but got %v", count)
}
var languages6 []Language
if DB.Model(&user).Association("Languages").Find(&languages6); len(languages6) != 0 {
t.Errorf("user's languages count should be 0 when find with Find, but got %v", len(languages6))
}
if count := DB.Unscoped().Model(&user).Association("Languages").Count(); count != 1 {
t.Errorf("user's languages count should be 1 when query with Unscoped, but got %v", count)
}
var languages61 []Language
if DB.Unscoped().Model(&user).Association("Languages").Find(&languages61); len(languages61) != 1 {
t.Errorf("user's languages count should be 1 when query with Unscoped, but got %v", len(languages61))
}
}
func TestRelated(t *testing.T) {
user := User{
Name: "jinzhu",
BillingAddress: Address{Address1: "Billing Address - Address 1"},
ShippingAddress: Address{Address1: "Shipping Address - Address 1"},
Emails: []Email{{Email: "[email protected]"}, {Email: "jinzhu-2@[email protected]"}},
CreditCard: CreditCard{Number: "1234567890"},
Company: Company{Name: "company1"},
}
if err := DB.Save(&user).Error; err != nil {
t.Errorf("No error should happen when saving user")
}
if user.CreditCard.ID == 0 {
t.Errorf("After user save, credit card should have id")
}
if user.BillingAddress.ID == 0 {
t.Errorf("After user save, billing address should have id")
}
if user.Emails[0].Id == 0 {
t.Errorf("After user save, billing address should have id")
}
var emails []Email
DB.Model(&user).Related(&emails)
if len(emails) != 2 {
t.Errorf("Should have two emails")
}
var emails2 []Email
DB.Model(&user).Where("email = ?", "[email protected]").Related(&emails2)
if len(emails2) != 1 {
t.Errorf("Should have two emails")
}
var emails3 []*Email
DB.Model(&user).Related(&emails3)
if len(emails3) != 2 {
t.Errorf("Should have two emails")
}
var user1 User
DB.Model(&user).Related(&user1.Emails)
if len(user1.Emails) != 2 {
t.Errorf("Should have only one email match related condition")
}
var address1 Address
DB.Model(&user).Related(&address1, "BillingAddressId")
if address1.Address1 != "Billing Address - Address 1" {
t.Errorf("Should get billing address from user correctly")
}
user1 = User{}
DB.Model(&address1).Related(&user1, "BillingAddressId")
if DB.NewRecord(user1) {
t.Errorf("Should get user from address correctly")
}
var user2 User
DB.Model(&emails[0]).Related(&user2)
if user2.Id != user.Id || user2.Name != user.Name {
t.Errorf("Should get user from email correctly")
}
var creditcard CreditCard
var user3 User
DB.First(&creditcard, "number = ?", "1234567890")
DB.Model(&creditcard).Related(&user3)
if user3.Id != user.Id || user3.Name != user.Name {
t.Errorf("Should get user from credit card correctly")
}
if !DB.Model(&CreditCard{}).Related(&User{}).RecordNotFound() {
t.Errorf("RecordNotFound for Related")
}
var company Company
if DB.Model(&user).Related(&company, "Company").RecordNotFound() || company.Name != "company1" {
t.Errorf("RecordNotFound for Related")
}
}
func TestForeignKey(t *testing.T) {
for _, structField := range DB.NewScope(&User{}).GetStructFields() {
for _, foreignKey := range []string{"BillingAddressID", "ShippingAddressId", "CompanyID"} {
if structField.Name == foreignKey && !structField.IsForeignKey {
t.Errorf(fmt.Sprintf("%v should be foreign key", foreignKey))
}
}
}
for _, structField := range DB.NewScope(&Email{}).GetStructFields() {
for _, foreignKey := range []string{"UserId"} {
if structField.Name == foreignKey && !structField.IsForeignKey {
t.Errorf(fmt.Sprintf("%v should be foreign key", foreignKey))
}
}
}
for _, structField := range DB.NewScope(&Post{}).GetStructFields() {
for _, foreignKey := range []string{"CategoryId", "MainCategoryId"} {
if structField.Name == foreignKey && !structField.IsForeignKey {
t.Errorf(fmt.Sprintf("%v should be foreign key", foreignKey))
}
}
}
for _, structField := range DB.NewScope(&Comment{}).GetStructFields() {
for _, foreignKey := range []string{"PostId"} {
if structField.Name == foreignKey && !structField.IsForeignKey {
t.Errorf(fmt.Sprintf("%v should be foreign key", foreignKey))
}
}
}
}
func testForeignKey(t *testing.T, source interface{}, sourceFieldName string, target interface{}, targetFieldName string) {
if dialect := os.Getenv("GORM_DIALECT"); dialect == "" || dialect == "sqlite" {
// sqlite does not support ADD CONSTRAINT in ALTER TABLE
return
}
targetScope := DB.NewScope(target)
targetTableName := targetScope.TableName()
modelScope := DB.NewScope(source)
modelField, ok := modelScope.FieldByName(sourceFieldName)
if !ok {
t.Fatalf(fmt.Sprintf("Failed to get field by name: %v", sourceFieldName))
}
targetField, ok := targetScope.FieldByName(targetFieldName)
if !ok {
t.Fatalf(fmt.Sprintf("Failed to get field by name: %v", targetFieldName))
}
dest := fmt.Sprintf("%v(%v)", targetTableName, targetField.DBName)
err := DB.Model(source).AddForeignKey(modelField.DBName, dest, "CASCADE", "CASCADE").Error
if err != nil {
t.Fatalf(fmt.Sprintf("Failed to create foreign key: %v", err))
}
}
func TestLongForeignKey(t *testing.T) {
testForeignKey(t, &NotSoLongTableName{}, "ReallyLongThingID", &ReallyLongTableNameToTestMySQLNameLengthLimit{}, "ID")
}
func TestLongForeignKeyWithShortDest(t *testing.T) {
testForeignKey(t, &ReallyLongThingThatReferencesShort{}, "ShortID", &Short{}, "ID")
}
func TestHasManyChildrenWithOneStruct(t *testing.T) {
category := Category{
Name: "main",
Categories: []Category{
{Name: "sub1"},
{Name: "sub2"},
},
}
DB.Save(&category)
}
func TestAutoSaveBelongsToAssociation(t *testing.T) {
type Company struct {
gorm.Model
Name string
}
type User struct {
gorm.Model
Name string
CompanyID uint
Company Company `gorm:"association_autoupdate:false;association_autocreate:false;"`
}
DB.Where("name = ?", "auto_save_association").Delete(&Company{})
DB.AutoMigrate(&Company{}, &User{})
DB.Save(&User{Name: "jinzhu", Company: Company{Name: "auto_save_association"}})
if !DB.Where("name = ?", "auto_save_association").First(&Company{}).RecordNotFound() {
t.Errorf("Company auto_save_association should not have been saved when autosave is false")
}
// if foreign key is set, this should be saved even if association isn't
company := Company{Name: "auto_save_association"}
DB.Save(&company)
company.Name = "auto_save_association_new_name"
user := User{Name: "jinzhu", Company: company}
DB.Save(&user)
if !DB.Where("name = ?", "auto_save_association_new_name").First(&Company{}).RecordNotFound() {
t.Errorf("Company should not have been updated")
}
if DB.Where("id = ? AND company_id = ?", user.ID, company.ID).First(&User{}).RecordNotFound() {
t.Errorf("User's foreign key should have been saved")
}
user2 := User{Name: "jinzhu_2", Company: Company{Name: "auto_save_association_2"}}
DB.Set("gorm:association_autocreate", true).Save(&user2)
if DB.Where("name = ?", "auto_save_association_2").First(&Company{}).RecordNotFound() {
t.Errorf("Company auto_save_association_2 should been created when autocreate is true")
}
user2.Company.Name = "auto_save_association_2_newname"
DB.Set("gorm:association_autoupdate", true).Save(&user2)
if DB.Where("name = ?", "auto_save_association_2_newname").First(&Company{}).RecordNotFound() {
t.Errorf("Company should been updated")
}
}
func TestAutoSaveHasOneAssociation(t *testing.T) {
type Company struct {
gorm.Model
UserID uint
Name string
}
type User struct {
gorm.Model
Name string
Company Company `gorm:"association_autoupdate:false;association_autocreate:false;"`
}
DB.Where("name = ?", "auto_save_has_one_association").Delete(&Company{})
DB.AutoMigrate(&Company{}, &User{})
DB.Save(&User{Name: "jinzhu", Company: Company{Name: "auto_save_has_one_association"}})
if !DB.Where("name = ?", "auto_save_has_one_association").First(&Company{}).RecordNotFound() {
t.Errorf("Company auto_save_has_one_association should not have been saved when autosave is false")
}
company := Company{Name: "auto_save_has_one_association"}
DB.Save(&company)
company.Name = "auto_save_has_one_association_new_name"
user := User{Name: "jinzhu", Company: company}
DB.Save(&user)
if !DB.Where("name = ?", "auto_save_has_one_association_new_name").First(&Company{}).RecordNotFound() {
t.Errorf("Company should not have been updated")
}
if !DB.Where("name = ? AND user_id = ?", "auto_save_has_one_association", user.ID).First(&Company{}).RecordNotFound() {
t.Errorf("Company should not have been updated")
}
if user.Company.UserID == 0 {
t.Errorf("UserID should be assigned")
}
company.Name = "auto_save_has_one_association_2_new_name"
DB.Set("gorm:association_autoupdate", true).Save(&user)
if DB.Where("name = ? AND user_id = ?", "auto_save_has_one_association_new_name", user.ID).First(&Company{}).RecordNotFound() {
t.Errorf("Company should been updated")
}
user2 := User{Name: "jinzhu_2", Company: Company{Name: "auto_save_has_one_association_2"}}
DB.Set("gorm:association_autocreate", true).Save(&user2)
if DB.Where("name = ?", "auto_save_has_one_association_2").First(&Company{}).RecordNotFound() {
t.Errorf("Company auto_save_has_one_association_2 should been created when autocreate is true")
}
}
func TestAutoSaveMany2ManyAssociation(t *testing.T) {
type Company struct {
gorm.Model
Name string
}
type User struct {
gorm.Model
Name string
Companies []Company `gorm:"many2many:user_companies;association_autoupdate:false;association_autocreate:false;"`
}
DB.AutoMigrate(&Company{}, &User{})
DB.Save(&User{Name: "jinzhu", Companies: []Company{{Name: "auto_save_m2m_association"}}})
if !DB.Where("name = ?", "auto_save_m2m_association").First(&Company{}).RecordNotFound() {
t.Errorf("Company auto_save_m2m_association should not have been saved when autosave is false")
}
company := Company{Name: "auto_save_m2m_association"}
DB.Save(&company)
company.Name = "auto_save_m2m_association_new_name"
user := User{Name: "jinzhu", Companies: []Company{company, {Name: "auto_save_m2m_association_new_name_2"}}}
DB.Save(&user)
if !DB.Where("name = ?", "auto_save_m2m_association_new_name").First(&Company{}).RecordNotFound() {
t.Errorf("Company should not have been updated")
}
if !DB.Where("name = ?", "auto_save_m2m_association_new_name_2").First(&Company{}).RecordNotFound() {
t.Errorf("Company should not been created")
}
if DB.Model(&user).Association("Companies").Count() != 1 {
t.Errorf("Relationship should been saved")
}
DB.Set("gorm:association_autoupdate", true).Set("gorm:association_autocreate", true).Save(&user)
if DB.Where("name = ?", "auto_save_m2m_association_new_name").First(&Company{}).RecordNotFound() {
t.Errorf("Company should been updated")
}
if DB.Where("name = ?", "auto_save_m2m_association_new_name_2").First(&Company{}).RecordNotFound() {
t.Errorf("Company should been created")
}
if DB.Model(&user).Association("Companies").Count() != 2 {
t.Errorf("Relationship should been updated")
}
}
| [
"\"GORM_DIALECT\""
]
| []
| [
"GORM_DIALECT"
]
| [] | ["GORM_DIALECT"] | go | 1 | 0 | |
napari/_tests/test_viewer.py | import os
import numpy as np
import pytest
from napari import Viewer, layers
from napari._tests.utils import (
add_layer_by_type,
check_view_transform_consistency,
check_viewer_functioning,
layer_test_data,
)
from napari.utils._tests.test_naming import eval_with_filename
def _get_all_keybinding_methods(type_):
obj_methods = set(super(type_, type_).class_keymap.values())
obj_methods.update(type_.class_keymap.values())
return obj_methods
viewer_methods = _get_all_keybinding_methods(Viewer)
EXPECTED_NUMBER_OF_VIEWER_METHODS = 19
def test_len_methods_viewer():
"""
Make sure we do find all the methods attached to a viewer via keybindings
"""
assert len(viewer_methods) == EXPECTED_NUMBER_OF_VIEWER_METHODS
@pytest.mark.xfail
def test_non_existing_bindings():
"""
Those are condition tested in next unittest; but do not exists; this is
likely due to an oversight somewhere.
"""
assert 'play' in [x.__name__ for x in viewer_methods]
assert 'toggle_fullscreen' in [x.__name__ for x in viewer_methods]
@pytest.mark.parametrize('func', viewer_methods)
def test_viewer_methods(make_napari_viewer, func):
"""Test instantiating viewer."""
viewer = make_napari_viewer()
if func.__name__ == 'toggle_fullscreen' and not os.getenv("CI"):
pytest.skip("Fullscreen cannot be tested in CI")
if func.__name__ == 'play':
pytest.skip("Play cannot be tested with Pytest")
func(viewer)
def test_viewer(make_napari_viewer):
"""Test instantiating viewer."""
viewer = make_napari_viewer()
view = viewer.window.qt_viewer
assert viewer.title == 'napari'
assert view.viewer == viewer
assert len(viewer.layers) == 0
assert view.layers.model().rowCount() == 0
assert viewer.dims.ndim == 2
assert view.dims.nsliders == viewer.dims.ndim
assert np.sum(view.dims._displayed_sliders) == 0
# Switch to 3D rendering mode and back to 2D rendering mode
viewer.dims.ndisplay = 3
assert viewer.dims.ndisplay == 3
viewer.dims.ndisplay = 2
assert viewer.dims.ndisplay == 2
EXPECTED_NUMBER_OF_LAYER_METHODS = {
'Image': 0,
'Vectors': 0,
'Surface': 0,
'Tracks': 0,
'Points': 8,
'Labels': 14,
'Shapes': 17,
}
# We unroll the layer data, with the all the methods of the layer that we are
# going to test, so that if one method fails we know which one, as well as
# remove potential issues that would be triggered by calling methods after each
# other.
unrolled_layer_data = []
for layer_class, data, ndim in layer_test_data:
methods = _get_all_keybinding_methods(layer_class)
for func in methods:
unrolled_layer_data.append(
(layer_class, data, ndim, func, len(methods))
)
@pytest.mark.parametrize(
'layer_class, data, ndim, func, Nmeth', unrolled_layer_data
)
@pytest.mark.parametrize('visible', [True, False])
def test_add_layer(
make_napari_viewer, layer_class, data, ndim, func, Nmeth, visible
):
viewer = make_napari_viewer()
layer = add_layer_by_type(viewer, layer_class, data, visible=visible)
check_viewer_functioning(viewer, viewer.window.qt_viewer, data, ndim)
func(layer)
assert Nmeth == EXPECTED_NUMBER_OF_LAYER_METHODS[layer_class.__name__]
@pytest.mark.parametrize('layer_class, a_unique_name, ndim', layer_test_data)
def test_add_layer_magic_name(
make_napari_viewer, layer_class, a_unique_name, ndim
):
"""Test magic_name works when using add_* for layers"""
# Tests for issue #1709
viewer = make_napari_viewer() # noqa: F841
layer = eval_with_filename(
"add_layer_by_type(viewer, layer_class, a_unique_name)",
"somefile.py",
)
assert layer.name == "a_unique_name"
def test_screenshot(make_napari_viewer):
"""Test taking a screenshot."""
viewer = make_napari_viewer()
np.random.seed(0)
# Add image
data = np.random.random((10, 15))
viewer.add_image(data)
# Add labels
data = np.random.randint(20, size=(10, 15))
viewer.add_labels(data)
# Add points
data = 20 * np.random.random((10, 2))
viewer.add_points(data)
# Add vectors
data = 20 * np.random.random((10, 2, 2))
viewer.add_vectors(data)
# Add shapes
data = 20 * np.random.random((10, 4, 2))
viewer.add_shapes(data)
# Take screenshot of the image canvas only
screenshot = viewer.screenshot(canvas_only=True)
assert screenshot.ndim == 3
# Take screenshot with the viewer included
screenshot = viewer.screenshot(canvas_only=False)
assert screenshot.ndim == 3
def test_changing_theme(make_napari_viewer):
"""Test changing the theme updates the full window."""
viewer = make_napari_viewer(show=False)
viewer.window.qt_viewer.set_welcome_visible(False)
viewer.add_points(data=None)
size = viewer.window.qt_viewer.size()
viewer.window.qt_viewer.setFixedSize(size)
assert viewer.theme == 'dark'
screenshot_dark = viewer.screenshot(canvas_only=False)
viewer.theme = 'light'
assert viewer.theme == 'light'
screenshot_light = viewer.screenshot(canvas_only=False)
equal = (screenshot_dark == screenshot_light).min(-1)
# more than 99.5% of the pixels have changed
assert (np.count_nonzero(equal) / equal.size) < 0.05, "Themes too similar"
with pytest.raises(ValueError):
viewer.theme = 'nonexistent_theme'
@pytest.mark.parametrize('layer_class, data, ndim', layer_test_data)
def test_roll_traspose_update(make_napari_viewer, layer_class, data, ndim):
"""Check that transpose and roll preserve correct transform sequence."""
viewer = make_napari_viewer()
np.random.seed(0)
layer = add_layer_by_type(viewer, layer_class, data)
# Set translations and scalings (match type of visual layer storing):
transf_dict = {
'translate': np.random.randint(0, 10, ndim).astype(np.float32),
'scale': np.random.rand(ndim).astype(np.float32),
}
for k, val in transf_dict.items():
setattr(layer, k, val)
if layer_class in [layers.Image, layers.Labels]:
transf_dict['translate'] -= transf_dict['scale'] / 2
# Check consistency:
check_view_transform_consistency(layer, viewer, transf_dict)
# Roll dims and check again:
viewer.dims._roll()
check_view_transform_consistency(layer, viewer, transf_dict)
# Transpose and check again:
viewer.dims._transpose()
check_view_transform_consistency(layer, viewer, transf_dict)
def test_toggling_axes(make_napari_viewer):
"""Test toggling axes."""
viewer = make_napari_viewer()
# Check axes are not visible
assert not viewer.axes.visible
# Make axes visible
viewer.axes.visible = True
assert viewer.axes.visible
# Enter 3D rendering and check axes still visible
viewer.dims.ndisplay = 3
assert viewer.axes.visible
# Make axes not visible
viewer.axes.visible = False
assert not viewer.axes.visible
def test_toggling_scale_bar(make_napari_viewer):
"""Test toggling scale bar."""
viewer = make_napari_viewer()
# Check scale bar is not visible
assert not viewer.scale_bar.visible
# Make scale bar visible
viewer.scale_bar.visible = True
assert viewer.scale_bar.visible
# Enter 3D rendering and check scale bar is still visible
viewer.dims.ndisplay = 3
assert viewer.scale_bar.visible
# Make scale bar not visible
viewer.scale_bar.visible = False
assert not viewer.scale_bar.visible
def test_removing_points_data(make_napari_viewer):
viewer = make_napari_viewer()
points = np.random.random((4, 2)) * 4
pts_layer = viewer.add_points(points)
pts_layer.data = np.zeros([0, 2])
assert len(pts_layer.data) == 0
def test_deleting_points(make_napari_viewer):
viewer = make_napari_viewer()
points = np.random.random((4, 2)) * 4
pts_layer = viewer.add_points(points)
pts_layer.selected_data = {0}
pts_layer.remove_selected()
assert len(pts_layer.data) == 3
| []
| []
| [
"CI"
]
| [] | ["CI"] | python | 1 | 0 | |
cmd/zoekt-mirror-github/main.go | // Copyright 2016 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// This binary fetches all repos of a user or organization and clones
// them. It is strongly recommended to get a personal API token from
// https://github.com/settings/tokens, save the token in a file, and
// point the --token option to it.
package main
import (
"context"
"flag"
"fmt"
"io/ioutil"
"log"
"net/url"
"os"
"path/filepath"
"strconv"
"strings"
"github.com/google/go-github/github"
"golang.org/x/oauth2"
"github.com/google/zoekt/gitindex"
)
func main() {
dest := flag.String("dest", "", "destination directory")
githubURL := flag.String("url", "", "GitHub Enterprise url. If not set github.com will be used as the host.")
org := flag.String("org", "", "organization to mirror")
user := flag.String("user", "", "user to mirror")
token := flag.String("token",
filepath.Join(os.Getenv("HOME"), ".github-token"),
"file holding API token.")
forks := flag.Bool("forks", false, "also mirror forks.")
deleteRepos := flag.Bool("delete", false, "delete missing repos")
namePattern := flag.String("name", "", "only clone repos whose name matches the given regexp.")
excludePattern := flag.String("exclude", "", "don't mirror repos whose names match this regexp.")
flag.Parse()
if *dest == "" {
log.Fatal("must set --dest")
}
if (*org == "") == (*user == "") {
log.Fatal("must set either --org or --user")
}
var host string
var apiBaseURL string
var client *github.Client
if *githubURL != "" {
rootURL, err := url.Parse(*githubURL)
if err != nil {
log.Fatal(err)
}
host = rootURL.Host
apiPath, err := url.Parse("/api/v3/")
if err != nil {
log.Fatal(err)
}
apiBaseURL = rootURL.ResolveReference(apiPath).String()
client, err = github.NewEnterpriseClient(apiBaseURL, apiBaseURL, nil)
if err != nil {
log.Fatal(err)
}
} else {
host = "github.com"
apiBaseURL = "https://github.com/"
client = github.NewClient(nil)
}
destDir := filepath.Join(*dest, host)
if err := os.MkdirAll(destDir, 0755); err != nil {
log.Fatal(err)
}
if *token != "" {
content, err := ioutil.ReadFile(*token)
if err != nil {
log.Fatal(err)
}
ts := oauth2.StaticTokenSource(
&oauth2.Token{
AccessToken: strings.TrimSpace(string(content)),
})
tc := oauth2.NewClient(context.Background(), ts)
if *githubURL != "" {
client, err = github.NewEnterpriseClient(apiBaseURL, apiBaseURL, tc)
if err != nil {
log.Fatal(err)
}
} else {
client = github.NewClient(tc)
}
}
var repos []*github.Repository
var err error
if *org != "" {
repos, err = getOrgRepos(client, *org)
} else if *user != "" {
repos, err = getUserRepos(client, *user)
}
if err != nil {
log.Fatal(err)
}
if !*forks {
trimmed := repos[:0]
for _, r := range repos {
if r.Fork == nil || !*r.Fork {
trimmed = append(trimmed, r)
}
}
repos = trimmed
}
filter, err := gitindex.NewFilter(*namePattern, *excludePattern)
if err != nil {
log.Fatal(err)
}
{
trimmed := repos[:0]
for _, r := range repos {
if filter.Include(*r.Name) {
trimmed = append(trimmed, r)
}
}
repos = trimmed
}
if err := cloneRepos(destDir, repos); err != nil {
log.Fatalf("cloneRepos: %v", err)
}
if *deleteRepos {
if err := deleteStaleRepos(*dest, filter, repos, *org+*user); err != nil {
log.Fatalf("deleteStaleRepos: %v", err)
}
}
}
func deleteStaleRepos(destDir string, filter *gitindex.Filter, repos []*github.Repository, user string) error {
var baseURL string
if len(repos) > 0 {
baseURL = *repos[0].HTMLURL
} else {
return nil
}
u, err := url.Parse(baseURL + user)
if err != nil {
return err
}
paths, err := gitindex.ListRepos(destDir, u)
if err != nil {
return err
}
names := map[string]bool{}
for _, r := range repos {
u, err := url.Parse(*r.HTMLURL)
if err != nil {
return err
}
names[filepath.Join(u.Host, u.Path+".git")] = true
}
var toDelete []string
for _, p := range paths {
if filter.Include(p) && !names[p] {
toDelete = append(toDelete, p)
}
}
if len(toDelete) > 0 {
log.Printf("deleting repos %v", toDelete)
}
var errs []string
for _, d := range toDelete {
if err := os.RemoveAll(filepath.Join(destDir, d)); err != nil {
errs = append(errs, err.Error())
}
}
if len(errs) > 0 {
return fmt.Errorf("errors: %v", errs)
}
return nil
}
func getOrgRepos(client *github.Client, org string) ([]*github.Repository, error) {
var allRepos []*github.Repository
opt := &github.RepositoryListByOrgOptions{}
for {
repos, resp, err := client.Repositories.ListByOrg(context.Background(), org, opt)
if err != nil {
return nil, err
}
if len(repos) == 0 {
break
}
opt.Page = resp.NextPage
allRepos = append(allRepos, repos...)
if resp.NextPage == 0 {
break
}
}
return allRepos, nil
}
func getUserRepos(client *github.Client, user string) ([]*github.Repository, error) {
var allRepos []*github.Repository
opt := &github.RepositoryListOptions{}
for {
repos, resp, err := client.Repositories.List(context.Background(), user, opt)
if err != nil {
return nil, err
}
if len(repos) == 0 {
break
}
opt.Page = resp.NextPage
allRepos = append(allRepos, repos...)
if resp.NextPage == 0 {
break
}
}
return allRepos, nil
}
func itoa(p *int) string {
if p != nil {
return strconv.Itoa(*p)
}
return ""
}
func cloneRepos(destDir string, repos []*github.Repository) error {
for _, r := range repos {
host, err := url.Parse(*r.HTMLURL)
if err != nil {
return err
}
config := map[string]string{
"zoekt.web-url-type": "github",
"zoekt.web-url": *r.HTMLURL,
"zoekt.name": filepath.Join(host.Hostname(), *r.FullName),
"zoekt.github-stars": itoa(r.StargazersCount),
"zoekt.github-watchers": itoa(r.WatchersCount),
"zoekt.github-subscribers": itoa(r.SubscribersCount),
"zoekt.github-forks": itoa(r.ForksCount),
}
dest, err := gitindex.CloneRepo(destDir, *r.FullName, *r.CloneURL, config)
if err != nil {
return err
}
if dest != "" {
fmt.Println(dest)
}
}
return nil
}
| [
"\"HOME\""
]
| []
| [
"HOME"
]
| [] | ["HOME"] | go | 1 | 0 | |
train.py | import argparse
import logging
import math
import os
import random
import time
from copy import deepcopy
from pathlib import Path
from threading import Thread
import numpy as np
import torch.distributed as dist
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.optim.lr_scheduler as lr_scheduler
import torch.utils.data
import yaml
from torch.cuda import amp
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
import torch
import numpy as np
import random
import predict # import predict.py to get mAP after each epoch
from models.experimental import attempt_load
from models.yolo import Model
from utils.autoanchor import check_anchors
from utils.datasets import create_dataloader
from utils.general import labels_to_class_weights, increment_path, labels_to_image_weights, init_seeds, \
fitness, strip_optimizer, get_latest_run, check_dataset, check_file, check_git_status, check_img_size, \
check_requirements, print_mutation, set_logging, one_cycle, colorstr
from utils.google_utils import attempt_download
from utils.loss import ComputeLoss
from utils.plots import plot_images, plot_labels, plot_results, plot_evolution
from utils.torch_utils import ModelEMA, select_device, intersect_dicts, torch_distributed_zero_first, de_parallel
from utils.wandb_logging.wandb_utils import WandbLogger, check_wandb_resume
import json
from PIL import Image
import os
import shutil
from os import path
import sys
sys.path.append(path.dirname( path.dirname( path.abspath(__file__) ) ))
from utils.general import xyxy2xywh
logger = logging.getLogger(__name__)
def train(hyp, opt, device, tb_writer=None):
logger.info(colorstr('hyperparameters: ') + ', '.join(f'{k}={v}' for k, v in hyp.items()))
save_dir, epochs, batch_size, total_batch_size, weights, rank = \
Path(opt.save_dir), opt.epochs, opt.batch_size, opt.total_batch_size, opt.weights, opt.global_rank
# Directories
wdir = save_dir / 'weights'
wdir.mkdir(parents=True, exist_ok=True) # make dir
last = wdir / 'last.pt'
best = wdir / 'best.pt'
results_file = save_dir / 'results.txt'
# Save run settings
with open(save_dir / 'hyp.yaml', 'w') as f:
yaml.safe_dump(hyp, f, sort_keys=False)
with open(save_dir / 'opt.yaml', 'w') as f:
yaml.safe_dump(vars(opt), f, sort_keys=False)
# Configure
# plots = not opt.evolve # create plots
plots = True # create plots
cuda = device.type != 'cpu'
init_seeds(1 + rank)
with open(opt.data) as f:
data_dict = yaml.safe_load(f) # data dict
# Logging- Doing this before checking the dataset. Might update data_dict
loggers = {'wandb': None} # loggers dict
if rank in [-1, 0]:
opt.hyp = hyp # add hyperparameters
run_id = torch.load(weights).get('wandb_id') if weights.endswith('.pt') and os.path.isfile(weights) else None
wandb_logger = WandbLogger(opt, save_dir.stem, run_id, data_dict)
loggers['wandb'] = wandb_logger.wandb
data_dict = wandb_logger.data_dict
if wandb_logger.wandb:
weights, epochs, hyp = opt.weights, opt.epochs, opt.hyp # WandbLogger might update weights, epochs if resuming
nc = 1 if opt.single_cls else int(data_dict['nc']) # number of classes
names = ['item'] if opt.single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names
assert len(names) == nc, '%g names found for nc=%g dataset in %s' % (len(names), nc, opt.data) # check
is_coco = opt.data.endswith('coco.yaml') and nc == 80 # COCO dataset
# Model
pretrained = weights.endswith('.pt')
if pretrained:
# with torch_distributed_zero_first(rank):
# weights = attempt_download(weights) # download if not found locally
ckpt = torch.load(weights, map_location=device) # load checkpoint
model = Model(opt.cfg or ckpt['model'].yaml, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create
exclude = ['anchor'] if (opt.cfg or hyp.get('anchors')) and not opt.resume else [] # exclude keys
state_dict = ckpt['model'].float().state_dict() # to FP32
state_dict = intersect_dicts(state_dict, model.state_dict(), exclude=exclude) # intersect
model.load_state_dict(state_dict, strict=False) # load
logger.info('Transferred %g/%g items from %s' % (len(state_dict), len(model.state_dict()), weights)) # report
else:
model = Model(opt.cfg, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create
with torch_distributed_zero_first(rank):
check_dataset(data_dict) # check
train_path = data_dict['train']
test_path = data_dict['val']
# Freeze
freeze = ['1', '2', '3', '4', '5', '6' '7', '8', '9', '10', '11'] # parameter names to freeze (full or partial)
freeze = ['model.' + number + '.' for number in freeze]
for k, v in model.named_parameters():
v.requires_grad = True # train all layers
if any(x in k for x in freeze) and opt.fine_tune is True:
print('freezing %s' % k)
v.requires_grad = False
# Optimizer
nbs = 64 # nominal batch size
accumulate = max(round(nbs / total_batch_size), 1) # accumulate loss before optimizing
hyp['weight_decay'] *= total_batch_size * accumulate / nbs # scale weight_decay
logger.info(f"Scaled weight_decay = {hyp['weight_decay']}")
pg0, pg1, pg2 = [], [], [] # optimizer parameter groups
for k, v in model.named_modules():
if hasattr(v, 'bias') and isinstance(v.bias, nn.Parameter):
pg2.append(v.bias) # biases
if isinstance(v, nn.BatchNorm2d):
pg0.append(v.weight) # no decay
elif hasattr(v, 'weight') and isinstance(v.weight, nn.Parameter):
pg1.append(v.weight) # apply decay
if opt.adam:
optimizer = optim.Adam(pg0, lr=hyp['lr0'], betas=(hyp['momentum'], 0.999)) # adjust beta1 to momentum
else:
optimizer = optim.SGD(pg0, lr=hyp['lr0'], momentum=hyp['momentum'], nesterov=True)
optimizer.add_param_group({'params': pg1, 'weight_decay': hyp['weight_decay']}) # add pg1 with weight_decay
optimizer.add_param_group({'params': pg2}) # add pg2 (biases)
logger.info('Optimizer groups: %g .bias, %g conv.weight, %g other' % (len(pg2), len(pg1), len(pg0)))
del pg0, pg1, pg2
# Scheduler https://arxiv.org/pdf/1812.01187.pdf
# https://pytorch.org/docs/stable/_modules/torch/optim/lr_scheduler.html#OneCycleLR
if opt.linear_lr:
lf = lambda x: (1 - x / (epochs - 1)) * (1.0 - hyp['lrf']) + hyp['lrf'] # linear
else:
lf = one_cycle(1, hyp['lrf'], epochs) # cosine 1->hyp['lrf']
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)
# plot_lr_scheduler(optimizer, scheduler, epochs)
# EMA
ema = ModelEMA(model) if rank in [-1, 0] else None
# Resume
start_epoch, best_fitness = 0, 0.0
if pretrained:
# Optimizer
if ckpt['optimizer'] is not None:
optimizer.load_state_dict(ckpt['optimizer'])
best_fitness = ckpt['best_fitness']
# EMA
if ema and ckpt.get('ema'):
ema.ema.load_state_dict(ckpt['ema'].float().state_dict())
ema.updates = ckpt['updates']
# Results
if ckpt.get('training_results') is not None:
results_file.write_text(ckpt['training_results']) # write results.txt
# Epochs
start_epoch = ckpt['epoch'] + 1
if opt.resume:
assert start_epoch > 0, '%s training to %g epochs is finished, nothing to resume.' % (weights, epochs)
if epochs < start_epoch:
logger.info('%s has been trained for %g epochs. Fine-tuning for %g additional epochs.' %
(weights, ckpt['epoch'], epochs))
epochs += ckpt['epoch'] # finetune additional epochs
del ckpt, state_dict
# Image sizes
gs = max(int(model.stride.max()), 32) # grid size (max stride)
nl = model.model[-1].nl # number of detection layers (used for scaling hyp['obj'])
imgsz, imgsz_test = [check_img_size(x, gs) for x in opt.img_size] # verify imgsz are gs-multiples
# DP mode
if cuda and rank == -1 and torch.cuda.device_count() > 1:
model = torch.nn.DataParallel(model)
# SyncBatchNorm
if opt.sync_bn and cuda and rank != -1:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device)
logger.info('Using SyncBatchNorm()')
# Trainloader
dataloader, dataset = create_dataloader(train_path, imgsz, batch_size, gs, opt,
hyp=hyp, augment=True, cache=opt.cache_images, rect=opt.rect, rank=rank,
world_size=opt.world_size, workers=opt.workers,
image_weights=opt.image_weights, quad=opt.quad, prefix=colorstr('train: '), task='train', epoch_parts=opt.epoch_parts)
mlc = np.concatenate(dataset.labels, 0)[:, 0].max() # max label class
nb = len(dataloader) # number of batches
assert mlc < nc, 'Label class %g exceeds nc=%g in %s. Possible class labels are 0-%g' % (mlc, nc, opt.data, nc - 1)
# Process 0
if rank in [-1, 0]:
testloader = create_dataloader(test_path, imgsz_test, batch_size * 2, gs, opt, # testloader
hyp=hyp, cache=opt.cache_images and not opt.notest, rect=True, rank=-1,
world_size=opt.world_size, workers=opt.workers,
pad=0.5, prefix=colorstr('val: '))[0]
if not opt.resume:
labels = np.concatenate(dataset.labels, 0)
c = torch.tensor(labels[:, 0]) # classes
# cf = torch.bincount(c.long(), minlength=nc) + 1. # frequency
# model._initialize_biases(cf.to(device))
if plots:
plot_labels(labels, names, save_dir, loggers)
if tb_writer:
tb_writer.add_histogram('classes', c, 0)
# Anchors
if not opt.noautoanchor:
check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz)
model.half().float() # pre-reduce anchor precision
# DDP mode
if cuda and rank != -1:
model = DDP(model, device_ids=[opt.local_rank], output_device=opt.local_rank,
# nn.MultiheadAttention incompatibility with DDP https://github.com/pytorch/pytorch/issues/26698
find_unused_parameters=any(isinstance(layer, nn.MultiheadAttention) for layer in model.modules()))
# Model parameters
hyp['box'] *= 3. / nl # scale to layers
hyp['cls'] *= nc / 80. * 3. / nl # scale to classes and layers
hyp['obj'] *= (imgsz / 640) ** 2 * 3. / nl # scale to image size and layers
hyp['label_smoothing'] = opt.label_smoothing
model.nc = nc # attach number of classes to model
model.hyp = hyp # attach hyperparameters to model
model.gr = 1.0 # iou loss ratio (obj_loss = 1.0 or iou)
model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) * nc # attach class weights
model.names = names
# Start training
t0 = time.time()
nw = max(round(hyp['warmup_epochs'] * nb), 1000) # number of warmup iterations, max(3 epochs, 1k iterations)
# nw = min(nw, (epochs - start_epoch) / 2 * nb) # limit warmup to < 1/2 of training
maps = np.zeros(nc) # mAP per class
results = (0, 0, 0, 0, 0, 0, 0) # P, R, [email protected], [email protected], val_loss(box, obj, cls)
scheduler.last_epoch = start_epoch - 1 # do not move
scaler = amp.GradScaler(enabled=cuda)
compute_loss = ComputeLoss(model) # init loss class
logger.info(f'Image sizes {imgsz} train, {imgsz_test} test\n'
f'Using {dataloader.num_workers} dataloader workers\n'
f'Logging results to {save_dir}\n'
f'Starting training for {epochs} epochs...')
for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------
model.train()
# Update image weights (optional)
if opt.image_weights:
# Generate indices
if rank in [-1, 0]:
cw = model.class_weights.cpu().numpy() * (1 - maps) ** 2 / nc # class weights
iw = labels_to_image_weights(dataset.labels, nc=nc, class_weights=cw) # image weights
dataset.indices = random.choices(range(dataset.n), weights=iw, k=dataset.n) # rand weighted idx
# Broadcast if DDP
if rank != -1:
indices = (torch.tensor(dataset.indices) if rank == 0 else torch.zeros(dataset.n)).int()
dist.broadcast(indices, 0)
if rank != 0:
dataset.indices = indices.cpu().numpy()
# Update mosaic border
# b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs)
# dataset.mosaic_border = [b - imgsz, -b] # height, width borders
mloss = torch.zeros(4, device=device) # mean losses
if rank != -1:
dataloader.sampler.set_epoch(epoch)
pbar = enumerate(dataloader)
logger.info(('\n' + '%10s' * 8) % ('Epoch', 'gpu_mem', 'box', 'obj', 'cls', 'total', 'labels', 'img_size'))
if rank in [-1, 0]:
pbar = tqdm(pbar, total=nb) # progress bar
optimizer.zero_grad()
for i, (imgs, targets, paths, _) in pbar: # batch -------------------------------------------------------------
ni = i + nb * epoch # number integrated batches (since train start)
imgs = imgs.to(device, non_blocking=True).float() / 255.0 # uint8 to float32, 0-255 to 0.0-1.0
# Warmup
if ni <= nw:
xi = [0, nw] # x interp
# model.gr = np.interp(ni, xi, [0.0, 1.0]) # iou loss ratio (obj_loss = 1.0 or iou)
accumulate = max(1, np.interp(ni, xi, [1, nbs / total_batch_size]).round())
for j, x in enumerate(optimizer.param_groups):
# bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0
x['lr'] = np.interp(ni, xi, [hyp['warmup_bias_lr'] if j == 2 else 0.0, x['initial_lr'] * lf(epoch)])
if 'momentum' in x:
x['momentum'] = np.interp(ni, xi, [hyp['warmup_momentum'], hyp['momentum']])
# Multi-scale
if opt.multi_scale:
sz = random.randrange(imgsz * 0.5, imgsz * 1.5 + gs) // gs * gs # size
sf = sz / max(imgs.shape[2:]) # scale factor
if sf != 1:
ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]] # new shape (stretched to gs-multiple)
imgs = F.interpolate(imgs, size=ns, mode='bilinear', align_corners=False)
# Forward
with amp.autocast(enabled=cuda):
pred = model(imgs) # forward
loss, loss_items = compute_loss(pred, targets.to(device)) # loss scaled by batch_size
if rank != -1:
loss *= opt.world_size # gradient averaged between devices in DDP mode
if opt.quad:
loss *= 4.
# Backward
scaler.scale(loss).backward()
# Optimize
if ni % accumulate == 0:
scaler.step(optimizer) # optimizer.step
scaler.update()
optimizer.zero_grad()
if ema:
ema.update(model)
# Print
if rank in [-1, 0]:
mloss = (mloss * i + loss_items) / (i + 1) # update mean losses
mem = '%.3gG' % (torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0) # (GB)
s = ('%10s' * 2 + '%10.6g' * 6) % (
'%g/%g' % (epoch, epochs - 1), mem, *mloss, targets.shape[0], imgs.shape[-1])
pbar.set_description(s)
# Plot
if plots and ni < 3:
f = save_dir / f'train_batch{ni}.jpg' # filename
Thread(target=plot_images, args=(imgs, targets, paths, f), daemon=True).start()
if tb_writer:
tb_writer.add_graph(torch.jit.trace(de_parallel(model), imgs, strict=False), []) # model graph
# tb_writer.add_image(f, result, dataformats='HWC', global_step=epoch)
elif plots and ni == 10 and wandb_logger.wandb:
wandb_logger.log({"Mosaics": [wandb_logger.wandb.Image(str(x), caption=x.name) for x in
save_dir.glob('train*.jpg') if x.exists()]})
# end batch ------------------------------------------------------------------------------------------------
# end epoch ----------------------------------------------------------------------------------------------------
# Scheduler
lr = [x['lr'] for x in optimizer.param_groups] # for tensorboard
scheduler.step()
# DDP process 0 or single-GPU
if rank in [-1, 0]:
# mAP
ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'gr', 'names', 'stride', 'class_weights'])
final_epoch = epoch + 1 == epochs
if (epoch+1) % opt.save_period != 0:
wandb_logger.current_epoch = epoch + 1
# Log
tags = ['train/box_loss', 'train/obj_loss', 'train/cls_loss', # train loss
'x/lr0', 'x/lr1', 'x/lr2'] # params
for x, tag in zip(list(mloss[:-1]) + lr, tags):
if tb_writer:
tb_writer.add_scalar(tag, x, epoch) # tensorboard
if wandb_logger.wandb:
wandb_logger.log({tag: x}) # W&B
wandb_logger.end_epoch()
# Write
with open(results_file, 'a') as f:
f.write(s + '\n') # append metrics, val_loss
else:
if not opt.notest or final_epoch: # Calculate mAP
wandb_logger.current_epoch = epoch + 1
results, maps, times = predict.test(data_dict,
batch_size=batch_size * 2,
imgsz=imgsz_test,
model=ema.ema,
single_cls=opt.single_cls,
dataloader=testloader,
save_dir=save_dir,
save_json=is_coco and final_epoch,
verbose=nc < 50,
plots=plots and final_epoch,
wandb_logger=wandb_logger,
compute_loss=compute_loss,
is_coco=is_coco)
# Write
with open(results_file, 'a') as f:
f.write(s + '%10.4g' * 8 % results + '\n') # append metrics, val_loss
# Log
tags = ['train/box_loss', 'train/obj_loss', 'train/cls_loss', # train loss
'metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.75', 'metrics/mAP_0.5:0.95',
'val/box_loss', 'val/obj_loss', 'val/cls_loss', # val loss
'x/lr0', 'x/lr1', 'x/lr2'] # params
for x, tag in zip(list(mloss[:-1]) + list(results) + lr, tags):
if tb_writer:
tb_writer.add_scalar(tag, x, epoch) # tensorboard
if wandb_logger.wandb:
wandb_logger.log({tag: x}) # W&B
# Update best mAP
fi = fitness(np.array(results).reshape(1, -1)) # weighted combination of [P, R, [email protected], [email protected], [email protected]]
if fi > best_fitness:
best_fitness = fi
wandb_logger.end_epoch(best_result=best_fitness == fi)
# Save model
if (not opt.nosave) or (final_epoch and not opt.evolve): # if save
ckpt = {'epoch': epoch,
'best_fitness': best_fitness,
'training_results': results_file.read_text(),
'model': deepcopy(de_parallel(model)).half(),
'ema': deepcopy(ema.ema).half(),
'updates': ema.updates,
'optimizer': optimizer.state_dict(),
'wandb_id': wandb_logger.wandb_run.id if wandb_logger.wandb else None}
# Save last, best and delete
torch.save(ckpt, last)
if best_fitness == fi:
torch.save(ckpt, best)
if wandb_logger.wandb:
if ((epoch + 1) % opt.save_period == 0 and not final_epoch) and opt.save_period != -1:
wandb_logger.log_model(
last.parent, opt, epoch, fi, best_model=best_fitness == fi)
del ckpt
# end epoch ----------------------------------------------------------------------------------------------------
# end training
if rank in [-1, 0]:
logger.info(f'{epoch - start_epoch + 1} epochs completed in {(time.time() - t0) / 3600:.3f} hours.\n')
if plots:
plot_results(save_dir=save_dir) # save as results.png
if wandb_logger.wandb:
files = ['results.png', 'confusion_matrix.png', *[f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R')]]
wandb_logger.log({"Results": [wandb_logger.wandb.Image(str(save_dir / f), caption=f) for f in files
if (save_dir / f).exists()]})
if not opt.evolve:
if is_coco: # COCO dataset
for m in [last, best] if best.exists() else [last]: # speed, mAP tests
results, _, _ = predict.test(opt.data,
batch_size=batch_size * 2,
imgsz=imgsz_test,
conf_thres=0.001,
iou_thres=0.7,
model=attempt_load(m, device).half(),
single_cls=opt.single_cls,
dataloader=testloader,
save_dir=save_dir,
save_json=True,
plots=False,
is_coco=is_coco)
# Strip optimizers
for f in last, best:
if f.exists():
strip_optimizer(f) # strip optimizers
if wandb_logger.wandb: # Log the stripped model
wandb_logger.wandb.log_artifact(str(best if best.exists() else last), type='model',
name='run_' + wandb_logger.wandb_run.id + '_model',
aliases=['latest', 'best', 'stripped'])
wandb_logger.finish_run()
else:
dist.destroy_process_group()
torch.cuda.empty_cache()
return results
def data_prepare():
random.seed(100)
names = ['eye_opened', 'eye_closed', 'mouth_opened', 'mouth_closed', 'face', 'phone', 'cigar']
path_train_dir = '/DATA/Final_DATA/task03_train'
new_dir = '../drowsy_face'
# generate raw_train.json, raw_val.json
generate_raw_json = True
if generate_raw_json == True:
print('generate raw_train.json, raw_val.json')
if os.path.exists(new_dir):
shutil.rmtree(new_dir)
os.makedirs(new_dir + '/images/train')
os.makedirs(new_dir + '/images/val')
os.makedirs(new_dir + '/labels/train')
os.makedirs(new_dir + '/labels/val')
with open(path_train_dir + '/labels.json') as f:
json_data = json.load(f)
json_anno = json_data["annotations"]
num_data = len(json_anno) # 273224
val_idx = random.sample(list(range(num_data)), 20000)
json_anno_val = []
json_anno_train = []
for idx, json_img in enumerate(tqdm(json_anno)):
if idx in val_idx:
json_anno_val.append(json_img)
else:
json_anno_train.append(json_img)
json_data_val = {}
json_data_val['annotations'] = json_anno_val
json_data_train = {}
json_data_train['annotations'] = json_anno_train
if os.path.isfile(new_dir + '/raw_val.json'):
os.remove(new_dir + '/raw_val.json')
if os.path.isfile(new_dir + '/raw_train.json'):
os.remove(new_dir + '/raw_train.json')
with open(new_dir + '/raw_val.json', 'w') as f_val:
json.dump(json_data_val, f_val)
with open(new_dir + '/raw_train.json', 'w') as f_train:
json.dump(json_data_train, f_train)
# generate drowsy_face/train, drowsy_face/val
generate_drowsy_face = True
if generate_drowsy_face == True:
print('generate drowsy_face/train, drowsy_face/val')
with open(new_dir + '/raw_val.json') as f:
json_data = json.load(f)
json_anno = json_data["annotations"]
for json_img in tqdm(json_anno):
img_id = json_img['file_name']
txt_dir = new_dir + '/labels/val/' + img_id.split('.')[0] + '.txt'
img_dir = new_dir + '/images/val/' + img_id
f_txt = open(txt_dir, 'w')
img_ = Image.open(path_train_dir + '/images/' + img_id)
img_size = img_.size
objects_yolo = ''
for img_obj in json_img['objects']:
class_id = str(names.index(img_obj['class']))
img_pos = img_obj['position']
xywh = xyxy2xywh(np.array([[img_pos[0]/img_size[0], img_pos[1]/img_size[1], img_pos[2]/img_size[0], img_pos[3]/img_size[1]]]))[0]
f_txt.write(f"{class_id} {xywh[0]:.5f} {xywh[1]:.5f} {xywh[2]:.5f} {xywh[3]:.5f}\n") # write label
f_txt.close()
shutil.copy(path_train_dir + '/images/' + img_id, img_dir)
with open(new_dir + '/raw_train.json') as f:
json_data = json.load(f)
json_anno = json_data["annotations"]
for json_img in tqdm(json_anno):
img_id = json_img['file_name']
txt_dir = new_dir + '/labels/train/' + img_id.split('.')[0] + '.txt'
img_dir = new_dir + '/images/train/' + img_id
f_txt = open(txt_dir, 'w')
img_ = Image.open(path_train_dir + '/images/' + img_id)
img_size = img_.size
objects_yolo = ''
for img_obj in json_img['objects']:
class_id = str(names.index(img_obj['class']))
img_pos = img_obj['position']
xywh = xyxy2xywh(np.array([[img_pos[0]/img_size[0], img_pos[1]/img_size[1], img_pos[2]/img_size[0], img_pos[3]/img_size[1]]]))[0]
f_txt.write(f"{class_id} {xywh[0]:.5f} {xywh[1]:.5f} {xywh[2]:.5f} {xywh[3]:.5f}\n") # write label
f_txt.close()
shutil.copy(path_train_dir + '/images/' + img_id, img_dir)
# generate diet_train.json
generate_diet_json = True
if generate_diet_json == True:
print('generate diet_train.json')
json_anno_diet = []
with open(path_train_dir + '/labels.json') as f:
json_data = json.load(f)
json_anno = json_data["annotations"]
fidx = 0
for img_info in tqdm(json_anno):
file_name = img_info['file_name']
cigar_check = 0
phone_check = 0
eye_closed_check = 0
mouth_closed_check = 0
mouth_opened_check = 0
for annotation_info in img_info['objects']:
if annotation_info['class'] == 'cigar':
cigar_check = 1
elif annotation_info['class'] == 'phone':
phone_check = 1
elif annotation_info['class'] == 'eye_closed':
eye_closed_check = 1
elif annotation_info['class'] == 'mouth_closed':
mouth_closed_check = 1
elif annotation_info['class'] == 'mouth_opened':
mouth_opened_check = 1
if cigar_check or phone_check:
json_anno_diet.append(img_info)
elif eye_closed_check and mouth_closed_check:
json_anno_diet.append(img_info)
elif eye_closed_check and mouth_opened_check:
json_anno_diet.append(img_info)
elif mouth_opened_check:
fidx = fidx + 1
if fidx % 3 == 0:
json_anno_diet.append(img_info)
json_data_diet = {}
json_data_diet['annotations'] = json_anno_diet
if os.path.isfile(new_dir + '/diet_train.json'):
os.remove(new_dir + '/diet_train.json')
with open(new_dir + '/diet_train.json', 'w') as f_diet:
json.dump(json_data_diet, f_diet)
# generate drowsy_face_diet/train
generate_drowsy_face_diet = True
if generate_drowsy_face_diet == True:
print('generate drowsy_face_diet/train')
new_dir_diet = '../drowsy_face_diet'
if os.path.exists(new_dir_diet):
shutil.rmtree(new_dir_diet)
os.makedirs(new_dir_diet + '/images/train')
os.makedirs(new_dir_diet + '/labels/train')
with open(new_dir + '/diet_train.json') as f:
json_data = json.load(f)
json_anno = json_data["annotations"]
for json_img in tqdm(json_anno):
img_id = json_img['file_name']
txt_dir = new_dir_diet + '/labels/train/' + img_id.split('.')[0] + '.txt'
img_dir = new_dir_diet + '/images/train/' + img_id
f_txt = open(txt_dir, 'w')
img_ = Image.open(path_train_dir + '/images/' + img_id)
img_size = img_.size
objects_yolo = ''
for img_obj in json_img['objects']:
class_id = str(names.index(img_obj['class']))
img_pos = img_obj['position']
xywh = xyxy2xywh(np.array([[img_pos[0]/img_size[0], img_pos[1]/img_size[1], img_pos[2]/img_size[0], img_pos[3]/img_size[1]]]))[0]
f_txt.write(f"{class_id} {xywh[0]:.5f} {xywh[1]:.5f} {xywh[2]:.5f} {xywh[3]:.5f}\n") # write label
f_txt.close()
shutil.copy(path_train_dir + '/images/' + img_id, img_dir)
# count classes
def count_classes(annotations):
class_dict = {
'eye_opened': 0,
'eye_closed': 0,
'mouth_opened': 0,
'mouth_closed': 0,
'face': 0,
'phone': 0,
'cigar': 0
}
for img_info in tqdm(annotations):
for annotation_info in img_info['objects']:
class_dict[annotation_info['class']] = class_dict[annotation_info['class']] + 1
print(class_dict)
count_jsons = True
if count_jsons == True:
print('count classes')
with open(new_dir + '/diet_train.json', 'r') as annotation_file:
annotations = json.load(annotation_file)
annotations = annotations['annotations']
print('diet_train.json')
count_classes(annotations)
with open(new_dir + '/raw_train.json', 'r') as annotation_file:
annotations = json.load(annotation_file)
annotations = annotations['annotations']
print('raw_train.json')
count_classes(annotations)
with open(new_dir + '/raw_val.json', 'r') as annotation_file:
annotations = json.load(annotation_file)
annotations = annotations['annotations']
print('raw_val.json')
count_classes(annotations)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--random_seed', type=int, default=0, help='')
parser.add_argument('--weights', type=str, default='', help='initial weights path')
parser.add_argument('--cfg', type=str, default='models/hub/yolov5l6.yaml', help='model.yaml path')
parser.add_argument('--data', type=str, default='data/drowsy_face.yaml', help='data.yaml path')
parser.add_argument('--hyp', type=str, default='data/hyp.scratch-p6.yaml', help='hyperparameters path')
parser.add_argument('--batch-size', type=int, default=4, help='total batch size for all GPUs')
parser.add_argument('--img-size', nargs='+', type=int, default=[1280, 1280], help='[train, test] image sizes')
parser.add_argument('--rect', action='store_true', help='rectangular training')
parser.add_argument('--resume', nargs='?', const=True, default=False, help='resume most recent training')
parser.add_argument('--nosave', action='store_true', help='only save final checkpoint')
parser.add_argument('--notest', action='store_true', help='only test final epoch')
parser.add_argument('--noautoanchor', action='store_true', help='disable autoanchor check')
parser.add_argument('--evolve', action='store_true', help='evolve hyperparameters')
parser.add_argument('--bucket', type=str, default='', help='gsutil bucket')
parser.add_argument('--cache-images', default='', action='store_true', help='cache images for faster training')
parser.add_argument('--image-weights', action='store_true', help='use weighted image selection for training')
parser.add_argument('--device', default='0', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%%')
parser.add_argument('--single-cls', action='store_true', help='train multi-class data as single-class')
parser.add_argument('--adam', action='store_true', help='use torch.optim.Adam() optimizer')
parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode')
parser.add_argument('--local_rank', type=int, default=-1, help='DDP parameter, do not modify')
parser.add_argument('--workers', type=int, default=8, help='maximum number of dataloader workers')
parser.add_argument('--project', default='runs/train', help='save to project/name')
parser.add_argument('--entity', default=None, help='W&B entity')
parser.add_argument('--name', default='final', help='save to project/name')
parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
parser.add_argument('--quad', action='store_true', help='quad dataloader')
parser.add_argument('--linear-lr', action='store_true', help='linear LR')
parser.add_argument('--label-smoothing', type=float, default=0.0, help='Label smoothing epsilon')
parser.add_argument('--upload_dataset', action='store_true', help='Upload dataset as W&B artifact table')
parser.add_argument('--bbox_interval', type=int, default=300, help='Set bounding-box image logging interval for W&B')
parser.add_argument('--artifact_alias', type=str, default="latest", help='version of dataset artifact to be used')
## for baseline training
parser.add_argument('--no_data_prepare', action='store_true')
parser.add_argument('--epochs', type=int, default=300)
parser.add_argument('--epoch_parts', type=int, default=15, help='Log model after every "save_period" epoch')
parser.add_argument('--save_period', type=int, default=300, help='Log model after every "save_period" epoch')
## for fine-tuning
parser.add_argument('--fine_tune', action='store_true', help='fine_tune')
parser.add_argument('--epochs_tune', type=int, default=50)
parser.add_argument('--epoch_parts_tune', type=int, default=50, help='Log model after every "save_period" epoch')
parser.add_argument('--save_period_tune', type=int, default=50, help='Log model after every "save_period" epoch')
opt = parser.parse_args()
if not opt.no_data_prepare:
data_prepare()
# Reproducibility
torch.manual_seed(opt.random_seed)
torch.cuda.manual_seed(opt.random_seed)
torch.cuda.manual_seed_all(opt.random_seed) # if use multi-GPU
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
np.random.seed(opt.random_seed)
random.seed(opt.random_seed)
# Set DDP variables
opt.world_size = int(os.environ['WORLD_SIZE']) if 'WORLD_SIZE' in os.environ else 1
opt.global_rank = int(os.environ['RANK']) if 'RANK' in os.environ else -1
set_logging(opt.global_rank)
if opt.global_rank in [-1, 0]:
check_requirements(exclude=('pycocotools', 'thop'))
# Resume
wandb_run = check_wandb_resume(opt)
if opt.resume and not wandb_run: # resume an interrupted run
ckpt = opt.resume if isinstance(opt.resume, str) else get_latest_run() # specified or most recent path
assert os.path.isfile(ckpt), 'ERROR: --resume checkpoint does not exist'
apriori = opt.global_rank, opt.local_rank
with open(Path(ckpt).parent.parent / 'opt.yaml') as f:
opt = argparse.Namespace(**yaml.safe_load(f)) # replace
opt.cfg, opt.weights, opt.resume, opt.batch_size, opt.global_rank, opt.local_rank = \
'', ckpt, True, opt.total_batch_size, *apriori # reinstate
logger.info('Resuming training from %s' % ckpt)
else:
# opt.hyp = opt.hyp or ('hyp.finetune.yaml' if opt.weights else 'hyp.scratch.yaml')
opt.data, opt.cfg, opt.hyp = check_file(opt.data), check_file(opt.cfg), check_file(opt.hyp) # check files
assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified'
opt.img_size.extend([opt.img_size[-1]] * (2 - len(opt.img_size))) # extend to 2 sizes (train, test)
opt.name = 'evolve' if opt.evolve else opt.name
opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok | opt.evolve))
# DDP mode
opt.total_batch_size = opt.batch_size
device = select_device(opt.device, batch_size=opt.batch_size)
if opt.local_rank != -1:
assert torch.cuda.device_count() > opt.local_rank
torch.cuda.set_device(opt.local_rank)
device = torch.device('cuda', opt.local_rank)
dist.init_process_group(backend='nccl', init_method='env://') # distributed backend
assert opt.batch_size % opt.world_size == 0, '--batch-size must be multiple of CUDA device count'
assert not opt.image_weights, '--image-weights argument is not compatible with DDP training'
opt.batch_size = opt.total_batch_size // opt.world_size
# Hyperparameters
with open(opt.hyp) as f:
hyp = yaml.safe_load(f) # load hyps
# Train
logger.info(opt)
if not opt.evolve:
tb_writer = None # init loggers
if opt.global_rank in [-1, 0]:
prefix = colorstr('tensorboard: ')
logger.info(f"{prefix}Start with 'tensorboard --logdir {opt.project}', view at http://localhost:6006/")
tb_writer = SummaryWriter(opt.save_dir) # Tensorboard
train(hyp, opt, device, tb_writer)
print("### base train completed")
print("### fine-tuning start")
opt.fine_tune = True
opt.weights = opt.save_dir + '/weights/last.pt'
opt.data = 'data/drowsy_face_tuning.yaml'
opt.hyp = 'data/hyp.finetune-simple.yaml'
opt.epochs = opt.epochs_tune
opt.epoch_parts = opt.epoch_parts_tune
opt.save_period = opt.save_period_tune
opt.data, opt.cfg, opt.hyp = check_file(opt.data), check_file(opt.cfg), check_file(opt.hyp) # check files
assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified'
opt.img_size.extend([opt.img_size[-1]] * (2 - len(opt.img_size))) # extend to 2 sizes (train, test)
opt.name = 'evolve' if opt.evolve else opt.name
opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok | opt.evolve))
# Hyperparameters
with open(opt.hyp) as f:
hyp = yaml.safe_load(f) # load hyps
# Train
logger.info(opt)
if not opt.evolve:
tb_writer = None # init loggers
if opt.global_rank in [-1, 0]:
prefix = colorstr('tensorboard: ')
logger.info(f"{prefix}Start with 'tensorboard --logdir {opt.project}', view at http://localhost:6006/")
tb_writer = SummaryWriter(opt.save_dir) # Tensorboard
train(hyp, opt, device, tb_writer)
| []
| []
| [
"RANK",
"WORLD_SIZE"
]
| [] | ["RANK", "WORLD_SIZE"] | python | 2 | 0 | |
cmd/web/main.go | // Copyright elipZis GmbH 2022
// All Rights Reserved
package main
import (
"context"
"github.com/aws/aws-lambda-go/events"
"github.com/aws/aws-lambda-go/lambda"
fiberAdapter "github.com/awslabs/aws-lambda-go-api-proxy/fiber"
"github.com/elipzis/go-serverless/web/router"
_ "github.com/joho/godotenv/autoload"
"log"
"os"
"os/signal"
)
var fiberLambda *fiberAdapter.FiberLambda
var r *router.Router
// init sets up some general routes and requirements per environment
func init() {
// Init router
r = router.NewRouter()
r.Register(r.App.Group(""))
// Fo AWS Lambda we need a wrapper to proxy the requests
if os.Getenv("SERVER_ENV") == "AWS" {
fiberLambda = fiberAdapter.New(r.App)
}
}
// Handler to proxy AWS Lambda requests/responses
func Handler(ctx context.Context, req events.APIGatewayProxyRequest) (events.APIGatewayProxyResponse, error) {
// If no name is provided in the HTTP request body, throw an error
return fiberLambda.ProxyWithContext(ctx, req)
}
// Run, 8, 1
// @title Go Serverless! Example App
// @version 1.0
// @description The Example App for the Conf42: Golang 2022 Presentation "Go Serverless!"
// @contact.name Savas Ziplies
// @contact.web elipZis.com
// @contact.email [email protected]
// @BasePath /web
func main() {
env := os.Getenv("SERVER_ENV")
log.Printf("Starting on %s environment", env)
if env == "AWS" {
lambda.Start(Handler)
} else {
r.Run()
quit := make(chan os.Signal)
signal.Notify(quit, os.Interrupt)
<-quit
}
}
| [
"\"SERVER_ENV\"",
"\"SERVER_ENV\""
]
| []
| [
"SERVER_ENV"
]
| [] | ["SERVER_ENV"] | go | 1 | 0 | |
services/rms/v1/model/model_show_tracker_config_response.go | package model
import (
"github.com/huaweicloud/huaweicloud-sdk-go-v3/core/utils"
"strings"
)
// Response Object
type ShowTrackerConfigResponse struct {
Channel *ChannelConfigBody `json:"channel,omitempty"`
Selector *SelectorConfigBody `json:"selector,omitempty"`
// IAM委托名称
AgencyName *string `json:"agency_name,omitempty"`
HttpStatusCode int `json:"-"`
}
func (o ShowTrackerConfigResponse) String() string {
data, err := utils.Marshal(o)
if err != nil {
return "ShowTrackerConfigResponse struct{}"
}
return strings.Join([]string{"ShowTrackerConfigResponse", string(data)}, " ")
}
| []
| []
| []
| [] | [] | go | null | null | null |
tests/integration/init/test_init_command.py | from unittest import TestCase
from subprocess import Popen
import os
from backports import tempfile
class TestBasicInitCommand(TestCase):
def test_init_command_passes_and_dir_created(self):
with tempfile.TemporaryDirectory() as temp:
process = Popen([TestBasicInitCommand._get_command(), "init", "-o", temp])
return_code = process.wait()
self.assertEqual(return_code, 0)
self.assertTrue(os.path.isdir(temp + "/sam-app"))
@staticmethod
def _get_command():
command = "sam"
if os.getenv("SAM_CLI_DEV"):
command = "samdev"
return command
| []
| []
| [
"SAM_CLI_DEV"
]
| [] | ["SAM_CLI_DEV"] | python | 1 | 0 | |
oneflow/python/test/ops/test_cpu_only_user_op.py | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import oneflow as flow
import numpy as np
import oneflow.typing as oft
import unittest
import os
def _cpu_only_relu(x):
op = (
flow.user_op_builder("CpuOnlyRelu")
.Op("cpu_only_relu_test")
.Input("in", [x])
.Output("out")
.Build()
)
return op.InferAndTryRun().SoleOutputBlob()
def _check_cpu_only_relu_device(test_case, verbose=False):
flow.clear_default_session()
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
func_config.default_placement_scope(flow.scope.placement("cpu", "0:0"))
@flow.global_function(function_config=func_config)
def cpu_only_relu_job(x_def: oft.Numpy.Placeholder(shape=(2, 5), dtype=flow.float)):
y = _cpu_only_relu(x_def)
if verbose:
print("cpu_only_relu output device", y.parallel_conf.device_tag)
test_case.assertTrue("cpu" in y.parallel_conf.device_tag)
return y
cpu_only_relu_job(np.random.rand(2, 5).astype(np.single)).get()
def _check_non_cpu_only_relu_device(test_case):
flow.clear_default_session()
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
func_config.default_placement_scope(flow.scope.placement("gpu", "0:0"))
@flow.global_function(function_config=func_config)
def relu_job(x_def: oft.Numpy.Placeholder(shape=(2, 5), dtype=flow.float)):
with flow.scope.placement("gpu", "0:0"):
y = flow.math.relu(x_def)
test_case.assertTrue("gpu" in y.parallel_conf.device_tag)
return y
relu_job(np.random.rand(2, 5).astype(np.single)).get()
@flow.unittest.skip_unless_1n1d()
class TestCpuOnlyUserOp(flow.unittest.TestCase):
def test_cpu_only_user_op(test_case):
_check_cpu_only_relu_device(test_case)
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_non_cpu_only_user_op(test_case):
_check_non_cpu_only_relu_device(test_case)
if __name__ == "__main__":
unittest.main()
| []
| []
| [
"ONEFLOW_TEST_CPU_ONLY"
]
| [] | ["ONEFLOW_TEST_CPU_ONLY"] | python | 1 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.